2016-05-20 6 views
0

Ich habe ein Problem mit meinem perceptron codes.Ich erhalte dies, wenn ich meinen Code ausführen. Ich überprüfte meine beiden TXT-Dateien und ich bin mir ziemlich sicher, dass die beiden definitiv in Ordnung sind. Kann jemand helfen? Vielen DankTypeError: 'NoneType' Objekt ist nicht iterabel bei der Implementierung von Perceptron, siehe Code unten

Traceback (most recent call last): 
    File "perceptron.py", line 160, in <module> 
    test() 
    File "perceptron.py", line 133, in test 
    w,k,i = p.perceptron_train('train.txt') 
TypeError: 'NoneType' object is not iterable 

Hier ist mein Code

import numpy as np 
import matplotlib.pyplot as plt 

class Data(): 
    def __init__(self,x,y): 
     self.len = len(x) 
     self.x = x 
     self.y = y 

class Perceptron(): 
    def __init__(self,N,X): 
     self.w = np.array([]) 
     self.N = N 
     self.X =X 

    def prepare_training(self,file): 
     file = open(file,'r').readlines() 
     self.dic = set([]) 
     y = []    
     vocab = {} 

     for i in range(len(file)): 
      words = file[i].strip().split() 
      y.append(int(words[0])*2-1) 
      for w in set(words[1:]): 
       if w in vocab: 
        vocab[w].add(i) 
        if i < self.N and len(vocab[w]) >= self.X: 
         self.dic.add(w) 
       elif i < self.N: 
        vocab[w] = set([i]) 

     x = np.zeros((len(file),len(self.dic)))  
     self.dic = list(self.dic) 
     for i in range(len(self.dic)): 
      for j in vocab[self.dic[i]]: 
       x[j][i] = 1 
     self.training = Data(x[:self.N],y[:self.N]) 
     self.validation = Data(x[self.N:],y[self.N:]) 
     return x,y 

    def update_weight(self,x,y): 
     self.w = self.w + x * y 

    def perceptron_train(self,data): 
     x,y = self.prepare_training(data) 
     self.w = np.zeros(len(self.dic),int) 
     passes = 0 
     total_passes = 100 
     k = 0 

     while passes < total_passes: 
      print('passes:',passes) 
      mistake = 0 
      for i in range(self.N): 
       check = y[i] * np.dot(self.w,x[i]) 
       if (check == 0 and (not 
np.array_equal(x[i],np.zeros(len(self.dic),int)))) or (check < 0): 
         self.update_weight(x[i],y[i]) 
         mistake += 1 
         k += 1 
      passes += 1 
      print('mistake:',mistake) 
      if mistake == 0: 
       print('converge at pass:',passes) 
       print('total mistakes:', k) 
       return self.w, k, passes 

    def perceptron_error(self,w,data): 
     error = 0 
     for i in range(data.len): 
      if data.y[i] * np.dot(w,data.x[i]) < 0: 
       error += 1 
     return error/data.len 

    def test(self,report): 
     x = np.zeros(len(self.dic),int) 
     for i in range(len(self.dic)): 
      if self.dic[i] in report: 
       x[i] = 1 
     if np.dot(self.w,x) > 0: 
      return 1 
     else: 
      return 0 

    def perceptron_test(self,data): 
     test = open(data,'r').readlines() 
     y = [] 
     mistake = 0 
     for t in test: 
      y0 = int(t.strip().split()[0]) 
      report = set(t.strip().split()[1:]) 
      r = self.test(report) 
      y.append(r) 
      if (y0 != r): 
       mistake += 1 
     return y,mistake/len(test) 

    def predictive_words(self): 
     w2d = {} 
     for i in range(len(self.dic)): 
      try: 
       w2d[self.w[i]].append(self.dic[i] + " ") 
      except: 
       w2d[self.w[i]] = [self.dic[i] + " "] 
     key = list(w2d.keys()) 
     key.sort() 
     count = 0 
     most_positive = "" 
     most_negative = "" 
     for i in range(len(key)): 
      for j in range(len(w2d[key[i]])): 
       most_negative += w2d[key[i]][j] 
       count += 1 
       if count == 5: 
        break 
      if count == 5: 
       break 

     count = 0 
     for i in range(len(key)): 
      for j in range(len(w2d[key[len(key)-i-1]])): 
       most_positive += w2d[key[len(key)-i-1]][j] 
       count += 1 
       if count == 5: 
        break 
      if count == 5: 
       break 
     return most_positive,most_negative 


def test(): 
    p = Perceptron(500,30) 
    w,k,i = p.perceptron_train('train.txt') 
    print(p.perceptron_error(w,p.validation)) 
    normal,abnormal = p.predictive_words() 
    print('Normal:\n',normal) 
    print('Abnormal:\n',abnormal) 
    print(p.perceptron_test('test.txt')) 

def plot_error():  
    x = [100,200,400,500] 
    y = [] 
    for n in x: 
     p = Perceptron(n,10) 
     w,k,i = p.perceptron_train('train.txt') 
     y.append(p.perceptron_error(w,p.validation)) 
    plt.plot(x,y) 
    plt.show() 

def plot_converge():  
    x = [100,200,400,500] 
    y = [] 
    for n in x: 
     p = Perceptron(n,10) 
     w,k,i = p.perceptron_train('train.txt') 
     y.append(i) 
    plt.plot(x,y) 
    plt.show() 

test() 

Antwort

0

perceptron_train hat den impliziten Rückgabewert None wenn mistakes!=0, so dass das, was Sie hier sehen.

+0

Die Fehler sind immer gleich 60. Wie sollte ich meinen Code ändern? Es sollte 0 – TonyShao

+0

Auch etwas außerhalb der while sinnvoll sein, oder wenn das keinen Sinn ergibt, eine Ausnahme auslösen. – renemilk

+0

Und ofc untersuchen, warum Sie nicht die Konvergenz erhalten, die Sie in der Anzahl der Durchgänge erwarten, die Sie tun. – renemilk

Verwandte Themen