Dies ist gestimmt Perzeptron-Algorithmus:abgestimmt perceptron Dual Form
#this is a pseudo code
#m is the number of examples
initialize k = 0, w1 := 0, c1 := 0
repeat for T epochs:
for i = 1 to i = m (this is one epoch)
if (x[i],y[i]) is classified correctly then
c[k] = c[k] + 1
otherwise:
w[k+1] = w[k] + y[i]x[i]
c[k+1]=1
k = k+1
Der Algorithmus bei http://curtis.ml.cmu.edu/w/courses/index.php/Voted_Perceptron
berichtet wird, ich will perceptron duale Form eine gestimmt machen. Das ist mein Pseudo-Code:
#m is the number of examples
Initialize k = 0, a1 := 0, c1 := 0
Repeat for T epochs:
for i = 1 to i = m
if (x[i], y[i]) is classified correctly then
c[k] = c[k] + 1
otherwise
k = k + 1
a[k][i] = a[k][i]+1
c[k] = 1
Der Ausgang ist die folgende Liste: (a 1, c1), (A_2, c2), ..., (a_k, ck), wo jeder a_i ein Vektor
Ist es richtig ? Muss ich die Verzerrung hinzufügen?
Hier berichte ich meine Python-Implementierung dieses Dual perceptron gestimmt:
Klasse Perceptron (Objekt):
def __init__(self, kernel = linear_kernel, epochs = 50):
self.kernel = kernel
self.epochs = epochs
def summation(self, a, y, x, xi, b):
s = 0
for j in range(0, len(x)):
s += (a[j]*y[j]*(self.kernel(x[j], xi))) + b
return s
def maxNorm(self, x):
v = np.linalg.norm(x[0])
for i in range(1, len(x)):
if (np.linalg.norm(x[i]) > v):
v = np.linalg.norm(x[i])
return v
def training(self, x, y):
k = 0
a = np.zeros(len(x), dtype=np.int)
alfa = []
alfa.append(a.copy())
print(alfa)
b = 0
bias = []
bias.append(0)
c = []
c.append(0)
for _ in range(self.epochs):
for i in range(len(y)):
if (y[i] * self.summation(a, y, x, x[i], b))<=0:
a[i] = a[i] + 1
alfa.append(a.copy())
b = b + (y[i]*(self.maxNorm(x)**2))
bias.append(b)
c.append(1)
k = k+1
else:
c[k] += 1
self.alfa = alfa
self.b = bias
self.c = c
print("b: ",len(self.b))
print("c: ",len(self.c))
def predict(self,xTest, yTest, xTrain, yTrain):
print("a: ",self.alfa)
print("\nc:", self.c)
print(yTest)
print(yTrain)
SumFin=0
Err = np.zeros(len(xTest))
nErr = 0
yPredict = []
for i in range(len(xTest)):
for j in range(len(self.c)):
print(self.c[j]*(np.sign(self.summation(self.alfa[i], yTrain, xTrain, xTest[i], self.b[i]))))
SumFin += (self.c[j]*(np.sign(self.summation(self.alfa[i], yTrain, xTrain, xTest[i], self.b[i]))))
yPredict.append(np.sign(SumFin))
for i in range(len(yTest)):
print("i = ",i," | yTest = ",yTest[i]," | yPredict = ",yPredict[i])
if(yTest[i] != yPredict[i]):
nErr += 1
Err[i] += 1
print("Error rate: ", ((100*nErr)/len(xTest)), "%")
self.Err = Err
Ich glaube nicht, diesen Code arbeiten, denn wenn ich den Trainingssatzes vorhersagen i erhalten 75% der Fehlerrate.
Kann mir jemand helfen? Dank