2016-04-08 12 views
0

Ich möchte die Wahrscheinlichkeiten jedes Testbild betrachten, so modifiziert ich den Code (cifar10_eval.py) wie diesertensorflow cifar-10 Bewertungsbeispiel softmax Ausgänge

def eval_once(saver, summary_writer, logits, labels, top_k_op, summary_op): 
............... 
     while step < num_iter and not coord.should_stop(): 
     result1, result2 = sess.run([logits, labels]) 
     print('Step:', step, 'result',result1, 'Label:', result2) 
............... 

und I führen das Python Code wie folgt.

# python cifar10_eval.py --batch_size=1 --run_once=True 

Die Bildschirm Ergebnisse sind wie diese

Step: 0 result [[ 0.01539493 -0.00109618 -0.00364288 -0.00898853 -0.00086198 0.00587899 0.00981337 -0.00785329 -0.00282823 -0.00171288]] Label: [4] 
Step: 1 result [[ 0.01539471 -0.00109601 -0.00364273 -0.00898863 -0.00086192 0.005879 0.00981339 -0.00785322 -0.00282811 -0.00171296]] Label: [7] 
Step: 2 result [[ 0.01539475 -0.00109617 -0.00364274 -0.00898876 -0.00086183 0.00587886 0.00981328 -0.00785333 -0.00282814 -0.00171295]] Label: [8] 
Step: 3 result [[ 0.01539472 -0.00109597 -0.00364275 -0.0089886 -0.00086183 0.00587902 0.00981344 -0.00785326 -0.00282817 -0.00171299]] Label: [4] 
Step: 4 result [[ 0.01539488 -0.00109631 -0.00364294 -0.00898863 -0.00086199 0.00587896 0.00981327 -0.00785329 -0.00282809 -0.00171307]] Label: [0] 
Step: 5 result [[ 0.01539478 -0.00109607 -0.00364292 -0.00898858 -0.00086194 0.00587904 0.00981335 -0.0078533 -0.00282818 -0.00171321]] Label: [4] 
Step: 6 result [[ 0.01539493 -0.00109627 -0.00364277 -0.00898873 -0.0008618 0.00587892 0.00981339 -0.00785325 -0.00282807 -0.00171289]] Label: [9] 
Step: 7 result [[ 0.01539504 -0.00109619 -0.0036429 -0.00898865 -0.00086194 0.00587894 0.0098133 -0.00785331 -0.00282818 -0.00171294]] Label: [4] 
Step: 8 result [[ 0.01539493 -0.00109627 -0.00364286 -0.00898867 -0.00086183 0.00587899 0.00981332 -0.00785329 -0.00282825 -0.00171283]] Label: [8] 
Step: 9 result [[ 0.01539495 -0.00109617 -0.00364286 -0.00898852 -0.00086186 0.0058789 0.00981337 -0.00785326 -0.00282827 -0.00171287]] Label: [9] 

Die Label-Werte scheinen gut zu sein, aber die Logits Ausgänge scheinen dieselben Werte zu sein! Warum? Jeder kann mir den Grund sagen?

Dies ist der neue Quellcode cifar10_eval.py.

from __future__ import absolute_import 
from __future__ import division 
from __future__ import print_function 

from datetime import datetime 
import math 
import time 

import numpy as np 
import tensorflow as tf 

#from tensorflow.models.image.cifar10 import cifar10 
import cifar10 

FLAGS = tf.app.flags.FLAGS 
tf.app.flags.DEFINE_string('eval_dir', '/tmp/cifar10_eval', 
          """Directory where to write event logs.""") 
tf.app.flags.DEFINE_string('eval_data', 'test', 
          """Either 'test' or 'train_eval'.""") 
tf.app.flags.DEFINE_string('checkpoint_dir', '/tmp/cifar10_train', 
          """Directory where to read model checkpoints.""") 
tf.app.flags.DEFINE_integer('eval_interval_secs', 60 * 5, 
          """How often to run the eval.""") 
tf.app.flags.DEFINE_integer('num_examples', 10000, 
          """Number of examples to run.""") 
tf.app.flags.DEFINE_boolean('run_once', True, 
         """Whether to run eval only once.""") 

def eval_once(saver, summary_writer, logits, labels, top_k_op, summary_op): 
    with tf.Session() as sess: 
    ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir) 
    if ckpt and ckpt.model_checkpoint_path: 
     saver.restore(sess, ckpt.model_checkpoint_path) 
     global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1] 
    else: 
     print('No checkpoint file found') 
     return 

    # Start the queue runners. 
    coord = tf.train.Coordinator() 
    try: 
     threads = [] 
     for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS): 
     threads.extend(qr.create_threads(sess, coord=coord, daemon=True, 
             start=True)) 

     #num_iter = int(math.ceil(FLAGS.num_examples/FLAGS.batch_size)) 
     #total_sample_count = num_iter * FLAGS.batch_size 
     num_iter = FLAGS.num_examples 
     total_sample_count = FLAGS.num_examples 
     print (num_iter, FLAGS.batch_size, total_sample_count) 
     true_count = 0 # Counts the number of correct predictions. 
     step = 0 
     time.sleep(1) 
     while step < num_iter and not coord.should_stop(): 
     result1, result2 = sess.run([logits, labels]) 
     #label = sess.run(labels) 
     print('Step:', step, 'result',result1, 'Label:', result2) 
     step += 1 
     precision = true_count/step 

     print('Summary -- Step:', step, 'Accurcy:',true_count * 100.0/step * 1.0,) 
     print('%s: total:%d true:%d precision @ 1 = %.3f' % (datetime.now(), total_sample_count, true_count, precision)) 

    except Exception as e: # pylint: disable=broad-except 
     coord.request_stop(e) 

    coord.request_stop() 
    coord.join(threads, stop_grace_period_secs=10) 


def evaluate(): 
    """Eval CIFAR-10 for a number of steps.""" 
    with tf.Graph().as_default(): 
    # Get images and labels for CIFAR-10. 
    eval_data = FLAGS.eval_data == 'test' 
    images, labels = cifar10.inputs(eval_data=eval_data,) 

    # Build a Graph that computes the logits predictions from the 
    # inference model. logits is softmax 
    logits = cifar10.inference(images) 

    # Calculate predictions. 
    top_k_op = tf.nn.in_top_k(logits, labels, 1) 

    # Restore the moving average version of the learned variables for eval. 
    variable_averages = tf.train.ExponentialMovingAverage(
     cifar10.MOVING_AVERAGE_DECAY) 
    variables_to_restore = variable_averages.variables_to_restore() 
    saver = tf.train.Saver(variables_to_restore) 

    # Build the summary operation based on the TF collection of Summaries. 
    summary_op = tf.merge_all_summaries() 

    graph_def = tf.get_default_graph().as_graph_def() 
    summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir, 
              graph_def=graph_def) 

    while True: 
     eval_once(saver, summary_writer, logits, labels,top_k_op, summary_op) 
     if FLAGS.run_once: 
     break 
     time.sleep(FLAGS.eval_interval_secs) 


def main(argv=None): # pylint: disable=unused-argument 
    cifar10.maybe_download_and_extract() 
    if tf.gfile.Exists(FLAGS.eval_dir): 
    tf.gfile.DeleteRecursively(FLAGS.eval_dir) 
    tf.gfile.MakeDirs(FLAGS.eval_dir) 
    print('Evaluate Start') 
    evaluate() 


if __name__ == '__main__': 
    tf.app.run() 
+0

@OmG sehr gut, Sie sind viele dieser softmax Hinzufügen von Tags, aber der Tag hat keine Beschreibung. Dies macht Ihre Bewertungen sehr schwierig zu beurteilen. Könnten Sie das auch hinzufügen? https://stackoverflow.com/edit-tag-wiki/117307 –

Antwort

1

Ich habe mit 1k Schritten (Genauigkeit 10% oder so) trainiert. Aber nachdem ich mit 100k Schritten (Genauigkeit 86% oder so) trainiert, ist das Ergebnis

................. 
Step: 9991 result: [[ 1.30259633 0.71064955 -2.6035285 -1.30183697 -4.1291523 -3.00246906 0.30873945 -4.02916574 13.05054665 -0.42556083]] Label: 8 
Step: 9992 result: [[-1.05670786 -1.86572766 0.28350741 1.78929067 0.03841069 1.23079467 2.97172165 -1.18722486 -1.17184007 -1.02505279]] Label: 6 
Step: 9993 result: [[ 1.50454926 2.34122658 -3.45632267 -0.55308843 -4.35214806 -2.28931832 -1.74908364 -4.71527719 11.44062901 1.72015083]] Label: 8 
Step: 9994 result: [[ 1.96891284 -2.57139373 0.29864013 1.30923986 1.72708285 0.95571399 -0.49331608 0.49454236 -2.26134181 -1.39561605]] Label: 0 
Step: 9995 result: [[-0.65523863 1.58577776 0.13226865 1.43122363 -2.34669352 0.18927786 -2.51019335 -1.70729315 -0.21297894 4.06098557]] Label: 9 
Step: 9996 result: [[-2.17944765 -3.22895575 2.29571438 2.63287306 0.46685112 4.42715979 -0.76104468 2.39603662 -3.21783161 -2.8433671 ]] Label: 2 
Step: 9997 result: [[ 4.26957560e+00 1.95574760e-03 1.91038296e-01 -8.00723195e-01 -2.36319876e+00 -2.12906289e+00 -3.35138845e+00 7.97132492e-01 6.60009801e-01 2.73786736e+00]] Label: 0 
Step: 9998 result: [[ 0.42694128 -2.07150149 0.47749567 2.62247086 1.11608386 3.05186462 -0.19805858 0.03386561 -2.87092948 -2.59781456]] Label: 5 
Step: 9999 result: [[ 0.23629765 -3.21540785 1.01075113 0.46802399 3.44423246 0.25743011 4.71304989 -1.12128389 -3.07727337 -2.7076664 ]] Label: 6 
2016-04-09 00:32:49.861650 Total:10000 True:8631: precision @ 1 = 0.863 
Verwandte Themen