2017-07-20 5 views
1

Ich versuche, meine eigenen 3D-Daten zu einem LSTM zu füttern. Die Daten haben: Höhe = 365, Breite = 310, Zeit = unbekannt/inkonsistent, bestehen aus 0 und 1, jeder Datenblock, der eine Ausgabe erzeugt, wird in eine einzige Datei getrennt.tensorflow static_rnn Fehler: Eingabe muss eine Sequenz sein

import tensorflow as tf 
import os 
from tensorflow.contrib import rnn 

filename = "C:/Kuliah/EmotionRecognition/Train1/D2N2Sur.txt" 

hm_epochs = 10 
n_classes = 12 
n_chunk = 443 
n_hidden = 500 

data = tf.placeholder(tf.bool, name='data') 
cat = tf.placeholder("float", [None, n_classes]) 

weights = { 
    'out': tf.Variable(tf.random_normal([n_hidden, n_classes])) 
} 
biases = { 
    'out': tf.Variable(tf.random_normal([n_classes])) 
} 

def RNN(x, weights, biases): 
    lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0) 
    outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32) 
    return tf.matmul(outputs[-1], weights['out']) + biases['out'] 

pred = RNN(data, weights, biases) 

cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=cat)) 
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost) 

correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(cat, 1)) 
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) 

saver = tf.train.Saver() 

temp = [[]] 
d3 = [[]] 
counter = 0 
with tf.Session() as sess: 
    #load 
    #saver.restore(sess, "C:/Kuliah/EmotionRecognition/model.ckpt") 
    sess.run(tf.global_variables_initializer()) 
    with open(filename) as inf: 
     for line in inf: 
      bla = list(line) 
      bla.pop(len(bla) - 1) 
      for index, item in enumerate(bla): 
       if (item == '0'): 
        bla[index] = False 
       else: 
        bla[index] = True 
      temp.append(bla) 
      counter += 1 
      if counter%365==0: #height 365 
       temp.pop(0) 
       d3.append(temp) 
       temp = [[]] 
     temp.pop(0) 
     d3.append(temp) 

     batch_data = d3.reshape() 
     sess.run(optimizer, feed_dict={data: d3, cat: 11}) 

     acc = sess.run(accuracy, feed_dict={data: d3, cat: 11}) 
     loss = sess.run(loss, feed_dict={data: d3, cat: 11}) 
     print(acc) 
     print(loss) 
     #save 
     saver.save(sess, "C:/Kuliah/EmotionRecognition/model.ckpt") 

dieser Code mir einen Fehler aus:

Traceback (most recent call last): 
    File "C:/Kuliah/EmotionRecognition/Main", line 31, in <module> 
    pred = RNN(data, weights, biases) 
    File "C:/Kuliah/EmotionRecognition/Main", line 28, in RNN 
    outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32) 
    File "C:\Users\Anonymous\AppData\Roaming\Python\Python35\site-packages\tensorflow\python\ops\rnn.py", line 1119, in static_rnn 
    raise TypeError("inputs must be a sequence") 
TypeError: inputs must be a sequence 

Antwort

3

Wenn Sie pred = RNN(data, weights, biases) nennen, die data Argument eine Folge der Länge der Länge des RNN sein sollte. Aber in Ihrem Fall ist es ein data = tf.placeholder(tf.bool, name='data').

Sie könnten versuchen pred = RNN([data], weights, biases).

Siehe die Zeichenfolge doc des Verfahrens:

inputs: A length T list of inputs, each a Tensor of shape [batch_size, input_size] , or a nested tuple of such elements.

Wenn die Länge Ihrer RNN unknow ist, Sie tf.nn.dynamic_rnn Verwendung in Betracht ziehen sollten.

Verwandte Themen