2016-10-30 2 views
2

Ich verwende Tensorflow, um ein cnn Deep Learning-Programm zu starten, aber es ist fehlgeschlagen? Ich habe meine Eingabedaten ‚Bilder‘ zu np.float32 übersetzt, aber es berichtet noch dtype Fehler:Warum akzeptiert tensorflow np.float nicht?

E tensorflow/core/client/tensor_c_api.cc:485] You must feed a value for placeholder tensor 'Placeholder_2' with dtype float 
[[Node: Placeholder_2 = Placeholder[dtype=DT_FLOAT, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]] 

folgende ist mein Code:

import dataset 
import numpy as np 
import tensorflow as tf 
from tensorflow.examples.tutorials.mnist import input_data 
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) 

class CNN(object): 

    def __init__(self): 
     self.x = tf.placeholder(tf.float32, [None, 784]) 
     self.y_ = tf.placeholder(tf.float32, [None, 10]) 


     # First Convolutional Layer 
     W_conv1 = self.weight_variable([5, 5, 1, 32]) 
     b_conv1 = self.bias_variable([32]) 

     x_image = tf.reshape(self.x, [-1, 28, 28, 1]) 

     h_conv1 = tf.nn.relu(self.conv2d(x_image, W_conv1) + b_conv1) 
     h_pool1 = self.max_pool_2x2(h_conv1) 

     # Second Convolutional Layer 
     W_conv2 = self.weight_variable([5, 5, 32, 64]) 
     b_conv2 = self.bias_variable([64]) 

     h_conv2 = tf.nn.relu(self.conv2d(h_pool1, W_conv2) + b_conv2) 
     h_pool2 = self.max_pool_2x2(h_conv2) 

     # Densely Connected Layer 
     W_fc1 = self.weight_variable([7 * 7 * 64, 1024]) 
     b_fc1 = self.bias_variable([1024]) 

     h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64]) 
     h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) 

     # Dropout 
     self.keep_prob = tf.placeholder(tf.float32) 
     h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob) 

     # Readout Layer 
     W_fc2 = self.weight_variable([1024, 10]) 
     b_fc2 = self.bias_variable([10]) 

     self.y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 

     # Train and Evaluate the Model 
     self.cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(self.y_conv, self.y_)) 
     self.train_step = tf.train.AdamOptimizer(1e-4).minimize(self.cross_entropy) 
     self.correct_prediction = tf.equal(tf.argmax(self.y_conv, 1), tf.argmax(self.y_, 1)) 
     self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32)) 

     self.saver = tf.train.Saver() 
     self.sess = tf.Session() 
     self.sess.run(tf.initialize_all_variables()) 
     print("cnn initial finished!") 


    def weight_variable(self, shape): 
     initial = tf.truncated_normal(shape, stddev=0.1) 
     return tf.Variable(initial) 

    def bias_variable(self, shape): 
     initial = tf.constant(0.1, shape=shape) 
     return tf.Variable(initial) 

    def conv2d(self, x, W): 
     return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') 

    def max_pool_2x2(self, x): 
     return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') 


    def train(self): 
     # 初始化数据集 
     self.trainset = dataset.Train() 
     # mnist_softmax.py 中使用的是在sess中通过run方法执行train_step, accuracy 
     # mnist_cnn.py中 使用的是直接执行train_step, accuracy.eval,所以必须要传入session参数 

     for i in range(20000): 
      batch_xs, batch_ys = mnist.train.next_batch(50) 
      if i%100 == 0: 
       #print(batch_xs[0]) 
       #print(batch_ys[0]) 
       self.train_accuracy = self.accuracy.eval(session=self.sess, feed_dict={self.x: batch_xs, self.y_: batch_ys, self.keep_prob: 1.0}) 
       print("step %d, trainning accuracy %g" % (i, self.train_accuracy)) 
      self.train_step.run(session=self.sess, feed_dict={self.x: batch_xs, self.y_: batch_ys, self.keep_prob: 0.5}) 
     # Save the variables to disk. 
     save_path = self.saver.save(self.sess, "CNN_data/model.ckpt") 
     print("Model saved in file: %s" % save_path) 
     #print("test accuracy %g" % self.accuracy.eval(session=self.sess, feed_dict={self.x: mnist.test.images, self.y_: mnist.test.labels, self.keep_prob: 1.0})) 

    def predict(self, images): 
     images = np.reshape(images, (1, 784)) 
     images = images.astype(np.float32) 
     print(images) 
     ckpt = tf.train.get_checkpoint_state("CNN_data/") 
     if ckpt and ckpt.model_checkpoint_path: 
      self.saver.restore(self.sess, ckpt.model_checkpoint_path) 
     else: 
      print("No checkpoint found!") 

     predictions = self.sess.run(self.y_conv, feed_dict={self.x: images}) 

     return predictions 


if __name__ == '__main__': 
    cnn = CNN() 
    #cnn.train() 
    images = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] 
    cnn.predict(images) 

die Zug-Methode ist kein Problem, aber wenn Aufruf vorhersagen, gibt es immer einen Fehler des Platzhalter dtype Fehler genau wie oben? Ich weiß nicht warum, weil ich überprüft habe, dass meine Bilder float32 dtype sind.

+1

Könnte es einen Unterschied zwischen 'tf.float32' und' np.float32' passieren müssen. Es kann die gleichen 4 Bytes haben, aber der Objekt-Wrapper ist anders? – hpaulj

+0

Ja, aber das macht nichts! Ich habe es mit den Argumenten self.keep_prob behoben. – kitian

Antwort

0

wenn es hier versagt:

predictions = self.sess.run(self.y_conv, feed_dict={self.x: images}) 

es ist, weil Sie in einem self.keep_prob

+0

Danke für die Antwort sehr !! Ich bin neu für CNN, ich führe einfach die Demo, also weiß ich nicht, dass es einen dummen Fehler gibt! Danke für die Antwort! – kitian

Verwandte Themen