0

ich auf Ubuntu 14.04 arbeiten, schrieb ich einen Code für die Anerkennung von Briefen whith Tensorflow V 0.11, i creat einer Codequelle bin für das Modell Quelle LeNet5 mein Code verwendet:Ich kann meinen Code nicht ausgeführt Tensorflow

`

import PIL 

import numpy 
import tensorflow as tf 
# from tensorflow.examples.tutorials.mnist import input_data 
import Input as input_data 
from tensorflow.python.framework.importer import import_graph_def 

from Resize import Resize_img 

# these functions to optimize the accurancy of the mnist training 
#from imp_image import imp_img 
import scipy.misc 


def weight_variable(shape): 
    initial = tf.truncated_normal(shape, stddev=0.1) 
    return tf.Variable(initial) 


def bias_variable(shape): 
    initial = tf.constant(0.1, shape=shape) 
    return tf.Variable(initial) 


def conv2d(x, W): 
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') 


def max_pool_2x2(x): 
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') 


# ============================================================ End Functions part 

# mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) 

class MNIST: 

    def __init__(self): 

     # Open the compuation session 
     self.sess = tf.InteractiveSession() 
     # Load the network 
     self.Deep_Network() 

    def Deep_Network(self): 

     # nodes for the input images and target output classes. 
     # supervised classifier 
     self.x = tf.placeholder(tf.float32, shape=[None, 784]) 
     self.y_ = tf.placeholder(tf.float32, shape=[None, 10]) 

     # First convolutionanal Layer ===================================== 
     # It will consist of convolution, followed by max pooling 
     # The convolutional will compute 32 features for each 5x5 patch. 
     self.W_conv1 = weight_variable([5, 5, 1, 32]) 
     self.b_conv1 = bias_variable([32]) 

     # To apply the layer, we first reshape x to a 4d tensor, 
     # with the second and third dimensions corresponding to image width and height, 
     # and the final dimension corresponding to the number of color channels. 
     self.x_image = tf.reshape(self.x, [-1, 28, 28, 1]) 

     # We then convolve x_image with the weight tensor, add the bias, apply the ReLU function, and finally max pool. 
     self.h_conv1 = tf.nn.relu(conv2d(self.x_image, self.W_conv1) + self.b_conv1) 
     self.h_pool1 = max_pool_2x2(self.h_conv1) 

     # Second Convolutional Layer ===================================== 

     # In order to build a deep network, we stack several layers of this type. 
     # The second layer will have 64 features for each 5x5 patch. 

     self.W_conv2 = weight_variable([5, 5, 32, 64]) 
     self.b_conv2 = bias_variable([64]) 

     self.h_conv2 = tf.nn.relu(conv2d(self.h_pool1, self.W_conv2) + self.b_conv2) 
     self.h_pool2 = max_pool_2x2(self.h_conv2) 

     # Densely Connected Layer 

     # Now that the image size has been reduced to 7x7, we add a fully-connected layer with 1024 neurons 
     # to allow processing on the entire image. We reshape the tensor from the pooling layer into 
     # a batch of vectors, multiply by a weight matrix, add a bias, and apply a ReLU. 

     self.W_fc1 = weight_variable([7 * 7 * 64, 1024]) 
     self.b_fc1 = bias_variable([1024]) 

     self.h_pool2_flat = tf.reshape(self.h_pool2, [-1, 7 * 7 * 64]) 
     self.h_fc1 = tf.nn.relu(
      tf.matmul(self.h_pool2_flat, self.W_fc1) + self.b_fc1) # ReLu Computes rectified linear: max(features, 0). 

     # Dropout 

     self.keep_prob = tf.placeholder(tf.float32) 
     self.h_fc1_drop = tf.nn.dropout(self.h_fc1, self.keep_prob) 

     # Readout Layer ======================================== 
     # Finally, we add a softmax layer, just like for the one layer softmax regression above. 

     self.W_fc2 = weight_variable([1024, 10]) 
     self.b_fc2 = bias_variable([10]) 

     self.y_conv = tf.nn.softmax(tf.matmul(self.h_fc1_drop, self.W_fc2) + self.b_fc2) 
     self.cross_entropy = -tf.reduce_sum(self.y_ * tf.log(self.y_conv)) 
     self.correct_prediction = tf.equal(tf.argmax(self.y_conv, 1), tf.argmax(self.y_, 1)) 
     self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32)) 

    def Prediction(self, imageName): 

     # Load the trained model 
     ' Restore the model ' 
     'here i should create the model saver' 
     Saved_model_dir = '/home/brm17/Desktop/PFE/' 
     saver = tf.train.Saver() 
     ckpt = tf.train.get_checkpoint_state(Saved_model_dir) 

     'verifie if the saved model exists or not!' 
     if ckpt and ckpt.model_checkpoint_path: 
      saver.restore(self.sess, ckpt.model_checkpoint_path) 
     else: 
      print '# No saved model found!' 
      exit() # exit the prgm 

     # image_test = 'number-3.jpg' 
     ResizedImage = Resize_img(imageName) 

     ImageInput = ResizedImage.mnist_image_input.reshape(1, -1) 

     print 'Predection > ', tf.argmax(self.y_conv, 1).eval(feed_dict={self.x: ImageInput, self.keep_prob: 1.0}) 

    # print("test accuracy %g"%accuracy.eval(feed_dict={x: myTestImg, y_: myLabel, keep_prob: 1.0})) 


def main(): 
    image = '/home/brm17/Desktop/PFE/n2.jpeg' 
    model = MNIST() 
    model.Prediction(image) 

if __name__ == "__main__": 
    main() 

` 

, wenn ich diesen Code ausführen, er den Fehler drucken:

[email protected]:~/Desktop/PFE$ python LeNet5.py 
Traceback (most recent call last): 
    File "LeNet5.py", line 137, in <module> 
    model.Prediction(image) 
    File "LeNet5.py", line 120, in Prediction 
    saver.restore(self.sess, ckpt.model_checkpoint_path) 
    File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/saver.py", line 1129, in restore 
    {self.saver_def.filename_tensor_name: save_path}) 
    File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 710, in run 
    run_metadata_ptr) 
    File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 908, in _run 
    feed_dict_string, options, run_metadata) 
    File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 958, in _do_run 
    target_list, options, run_metadata) 
    File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 978, in _do_call 
    raise type(e)(node_def, op, message) 
tensorflow.python.framework.errors.NotFoundError: Tensor name "Variable_1" not found in checkpoint files /home/brm17/Desktop/PFE/MNISTmodel-20000 
    [[Node: save/restore_slice_1 = RestoreSlice[dt=DT_FLOAT, preferred_shard=-1, _device="/job:localhost/replica:0/task:0/cpu:0"](_recv_save/Const_0, save/restore_slice_1/tensor_name, save/restore_slice_1/shape_and_slice)]] 
Caused by op u'save/restore_slice_1', defined at: 
    File "LeNet5.py", line 137, in <module> 
    model.Prediction(image) 
    File "LeNet5.py", line 115, in Prediction 
    saver = tf.train.Saver() 
    File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/saver.py", line 861, in __init__ 
    restore_sequentially=restore_sequentially) 
    File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/saver.py", line 519, in build 
    filename_tensor, vars_to_save, restore_sequentially, reshape) 
    File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/saver.py", line 272, in _AddRestoreOps 
    values = self.restore_op(filename_tensor, vs, preferred_shard) 
    File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/saver.py", line 187, in restore_op 
    preferred_shard=preferred_shard) 
    File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/io_ops.py", line 203, in _restore_slice 
    preferred_shard, name=name) 
    File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_io_ops.py", line 359, in _restore_slice 
    preferred_shard=preferred_shard, name=name) 
    File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/op_def_library.py", line 703, in apply_op 
    op_def=op_def) 
    File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 2317, in create_op 
    original_op=self._default_original_op, op_def=op_def) 
    File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 1239, in __init__ 
    self._traceback = _extract_stack() 

was die p Problem und wie löste das?

Antwort

0

Covißio,

ich glaube, das Problem ist, wie folgt:

  • Sie ein Netzwerk erstellt und gespeichert dieses Netzwerk ...
  • Sie das Netzwerk verändert, und nicht entfernen Sie die gespeicherte Netzwerk
  • Jetzt versuchen Sie, Ihr Netzwerk von einer alten Version neu zu laden, aber eine neue Variable, die Sie erstellt haben, existiert nicht.

Können Sie versuchen, entweder:

  • den gespeicherten Zustand des Netzwerks entfernen und umschulen es
  • das Speichern und Laden von Ihrem Netzwerk entfernen und sehen, ob das funktioniert? Sie können den Status Ihres Netzwerks entfernen durch die Checkpoint-Datei in Ihrem Ordner/home Entfernen/brm17/Desktop/PFE/

Edit: Lesen Sie den Code gründlich und das Problem ist, dass, wenn kein Kontrollpunkt ist man nicht Beginnen Sie damit, Ihr Netzwerk neu zu trainieren ... Vielleicht beginnen Sie mit dem Schreiben, bevor Sie Ihr Netzwerk speichern, laden und ändern.

Viel Glück und lassen Sie mich wissen, wenn das funktioniert!

+0

Gute Frage. Siehe meine Bearbeitung. – rmeertens

+0

Aber er druckt: '# Kein gespeichertes Modell gefunden!' –

+0

Das ist genau das, was Sie programmiert haben. Siehe meine Bearbeitung. – rmeertens

Verwandte Themen