2017-09-04 12 views
1

Ich bekomme Tensor Tensor("Placeholder:0", shape=(128, 784), dtype=float32) is not an element of this graph., wenn ich trainieren möchte & meine Grafik testen. Die Stack-Trace ist nicht wirklich hilfreich, ich kann nicht mehr Informationen daraus erhalten. Ich verstehe wirklich nicht, wie dieser Fehler passiert, jeder Datensatz sollte automatisch zu graphFully über with graphFully.as_default(): hinzugefügt werden und ich nenne es später mit with tf.Session(graph=graph) as session:.Tensor ist kein Element dieser Grafik

Ratschläge zur Vereinfachung der Grafik wären willkommen. Ich möchte mehrere Graphen definieren und vergleichen, daher die "komplizierte" Struktur.

mein Diagramm:

##fully connected with hidden layer 
def createFullyConnected(): 

    graphFully = tf.Graph() 
    with graphFully.as_default(): 

     def constructGraph(dataset, weights1, biases1, weights2, biases2): 
      logits1 = tf.matmul(dataset, weights1) + biases1 
      hiddenl = tf.nn.relu(logits1) 
      logits2 = tf.matmul(hiddenl, weights2) + biases2 
      return logits2 

     def weight_variable(shape): 
      initial = tf.truncated_normal(shape, stddev=0.01) 
      return tf.Variable(initial, name='weights') 


     def bias_variable(shape): 
      initial = tf.constant(0.0, shape=shape) 
      return tf.Variable(initial, name='biases') 

     # Input data. For the training data, we use a placeholder that will be fed 
     # at run time with a training minibatch. 
     tf_train_dataset = tf.placeholder(tf.float32, 
             shape=(batch_size, image_size * image_size), name='train_data') 
     tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels), name='train_labels') 
     tf_valid_dataset = tf.constant(validation[0], name='valid_labels') 
     tf_test_dataset = tf.constant(test[0], name='test_labels') 

     # Variables. 
     with tf.name_scope('hidden') as scope: 
      weights1 = weight_variable([image_size * image_size, 1024]) 
      biases1 = bias_variable([1024]) 
     weights2 = weight_variable([1024, num_labels]) 
     biases2 = bias_variable([num_labels]) 

     # Training computation. 
     logits = constructGraph(tf_train_dataset, weights1, biases1, weights2, biases2) 
     loss = tf.reduce_mean(
     tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits)) 

     # Optimizer. 
     optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss) 

     # Predictions for the training, validation, and test data. 
     train_prediction = tf.nn.softmax(logits) 
     valid_prediction = tf.nn.softmax(constructGraph(tf_valid_dataset, weights1, biases1, weights2, biases2)) 
     test_prediction = tf.nn.softmax(constructGraph(tf_test_dataset, weights1, biases1, weights2, biases2)) 
     # We write the graph out to the `logs` directory 
     tf.summary.FileWriter("logs", graphFully).close() 
     return (graphFully, optimizer, train_prediction, valid_prediction, test_prediction) 

und die Bewertung:

def evaluate(graph, optimizer, train_prediction, valid_prediction, test_prediction): 
    num_steps = 3001 
    train_dataset = train[0] 
    train_labels = train[1] 
    valid_labels = validation[1] 
    test_labels = test[1] 
    outlier_labels = outlier[1] 

    with tf.Session(graph=graph) as session: 
     tf.global_variables_initializer().run() 
     print("Initialized") 
     for step in range(num_steps): 
      # Pick an offset within the training data, which has been randomized. 
      # Note: we could use better randomization across epochs. 
      offset = (step * batch_size) % (train_labels.shape[0] - batch_size) 
      # Generate a minibatch. 
      batch_data = train_dataset[offset:(offset + batch_size), :] 
      batch_labels = train_labels[offset:(offset + batch_size), :] 
      # Prepare a dictionary telling the session where to feed the minibatch. 
      # The key of the dictionary is the placeholder node of the graph to be fed, 
      # and the value is the numpy array to feed to it. 
      feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels} 
      _, l, predictions = session.run(
     [optimizer, loss, train_prediction], feed_dict=feed_dict) 
      if (step % 500 == 0): 
       print("Minibatch loss at step %d: %f" % (step, l)) 
       print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels)) 
       print("Validation accuracy: %.1f%%" % accuracy(
        valid_prediction.eval(), valid_labels)) 
     print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels)) 

Antwort

1

Hier werden die Fehler entstehen durch den Gültigkeitsbereich der beiden Platzhalter tf_train_dataset und tf_train_labels. Sie müssen auf diese beiden Tensoren von der Grafik innerhalb der evaluate Funktion zugreifen.

def evaluate(...): 
    ... 
    tf_train_dataset = graph.get_tensor_by_name('train_data:0') 
    tf_train_labels = graph.get_tensor_by_name('train_labels:0') 
    with tf.Session(graph=graph) as session: 
    ... 
Verwandte Themen