2016-04-06 8 views
3

ich diesen Code bin mit vorherzusagen, um zu versuchen:Apache Funken MultilayerPerceptronClassifier nicht mit ArrayIndexOutOfBoundsException

import org.apache.spark.sql.functions.col 
import org.apache.spark.Logging 
import org.apache.spark.graphx._ 
import org.apache.spark.{ SparkConf, SparkContext } 
import org.apache.spark.SparkContext._ 
import org.apache.spark.sql.SQLContext._ 
import org.apache.log4j.Logger 
import org.apache.log4j.Level 
import org.apache.spark.sql.functions.col 
import org.apache.spark.ml.feature.VectorAssembler 

object NN extends App { 

Logger.getLogger("org").setLevel(Level.OFF) 
Logger.getLogger("akka").setLevel(Level.OFF) 

val sc = new SparkContext(new SparkConf().setMaster("local[2]") 
.setAppName("cs")) 

val sqlContext = new org.apache.spark.sql.SQLContext(sc) 
import sqlContext.implicits._ 

val df = sc.parallelize(Seq(

("3", "1", "1"), 

("2", "1", "1"), 

("2", "3", "3"), 

("3", "3", "3"), 

("0", "1", "0"))) 

.toDF("label", "feature1", "feature2") 

val numeric = df 
.select(df.columns.map(c => col(c).cast("double").alias(c)): _*) 

val assembler = new VectorAssembler() 
.setInputCols(Array("feature1", "feature2")) 
.setOutputCol("features") 

val data = assembler.transform(numeric) 

import org.apache.spark.ml.classification.MultilayerPerceptronClassifier 

val layers = Array[Int](2, 3, 5, 4) // Note 2 neurons in the input layer 
val trainer = new MultilayerPerceptronClassifier() 
.setLayers(layers) 
.setBlockSize(128) 
.setSeed(1234L) 
.setMaxIter(100) 

val model = trainer.fit(data) 
model.transform(data).show 


} 

Für die Datenrahmen (df), wenn ich ("4", "1", "1") statt ("3", "1", "1") verwende ich Fehlermeldung an:

Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=256m; support was removed in 8.0 
[info] Set current project to spark-applications1458853926-master (in build file:/C:/Users/Desktop/spark-applications1458853926-master/) 
[info] Compiling 1 Scala source to C:\Users\Desktop\spark-applications1458853926-master\target\scala-2.11\classes... 
[info] Running NN 
Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties 
16/04/06 12:42:11 INFO Remoting: Starting remoting 
16/04/06 12:42:11 INFO Remoting: Remoting started; listening on addresses :[akka.tcp://[email protected]:64056] 
[error] (run-main-0) org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 0.0 failed 1 times, most recent failure: Lost task 0.0 in stage 0.0 (TID 0, localhost): java.lang.ArrayIndexOutOfBoundsException: 4 
[error]   at org.apache.spark.ml.classification.LabelConverter$.encodeLabeledPoint(MultilayerPerceptronClassifier.scala:85) 
[error]   at org.apache.spark.ml.classification.MultilayerPerceptronClassifier$$anonfun$2.apply(MultilayerPerceptronClassifier.scala:165) 
[error]   at org.apache.spark.ml.classification.MultilayerPerceptronClassifier$$anonfun$2.apply(MultilayerPerceptronClassifier.scala:165) 
[error]   at scala.collection.Iterator$$anon$11.next(Iterator.scala:370) 
[error]   at scala.collection.Iterator$GroupedIterator.takeDestructively(Iterator.scala:934) 
[error]   at scala.collection.Iterator$GroupedIterator.go(Iterator.scala:949) 
[error]   at scala.collection.Iterator$GroupedIterator.fill(Iterator.scala:986) 
[error]   at scala.collection.Iterator$GroupedIterator.hasNext(Iterator.scala:990) 
[error]   at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:369) 
[error]   at org.apache.spark.util.Utils$.getIteratorSize(Utils.scala:1595) 
[error]   at org.apache.spark.rdd.RDD$$anonfun$count$1.apply(RDD.scala:1143) 
[error]   at org.apache.spark.rdd.RDD$$anonfun$count$1.apply(RDD.scala:1143) 
[error]   at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858) 
[error]   at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858) 
[error]   at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66) 
[error]   at org.apache.spark.scheduler.Task.run(Task.scala:89) 
[error]   at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213) 
[error]   at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) 
[error]   at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) 
[error]   at java.lang.Thread.run(Thread.java:745) 
[error] 
[error] Driver stacktrace: 
org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 0.0 failed 1 times, most recent failure: Lost task 0.0 in stage 0.0 (TID 0, localhost): java.lang.ArrayIndexOutOfBoundsException: 4 
     at org.apache.spark.ml.classification.LabelConverter$.encodeLabeledPoint(MultilayerPerceptronClassifier.scala:85) 
     at org.apache.spark.ml.classification.MultilayerPerceptronClassifier$$anonfun$2.apply(MultilayerPerceptronClassifier.scala:165) 
     at org.apache.spark.ml.classification.MultilayerPerceptronClassifier$$anonfun$2.apply(MultilayerPerceptronClassifier.scala:165) 
     at scala.collection.Iterator$$anon$11.next(Iterator.scala:370) 
     at scala.collection.Iterator$GroupedIterator.takeDestructively(Iterator.scala:934) 
     at scala.collection.Iterator$GroupedIterator.go(Iterator.scala:949) 
     at scala.collection.Iterator$GroupedIterator.fill(Iterator.scala:986) 
     at scala.collection.Iterator$GroupedIterator.hasNext(Iterator.scala:990) 
     at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:369) 
     at org.apache.spark.util.Utils$.getIteratorSize(Utils.scala:1595) 
     at org.apache.spark.rdd.RDD$$anonfun$count$1.apply(RDD.scala:1143) 
     at org.apache.spark.rdd.RDD$$anonfun$count$1.apply(RDD.scala:1143) 
     at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858) 
     at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858) 
     at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66) 
     at org.apache.spark.scheduler.Task.run(Task.scala:89) 
     at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213) 
     at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) 
     at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) 
     at java.lang.Thread.run(Thread.java:745) 

Driver stacktrace: 
     at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1431) 
     at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1419) 
     at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1418) 
     at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59) 
     at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48) 
     at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1418) 
     at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:799) 
     at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:799) 
     at scala.Option.foreach(Option.scala:257) 
     at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:799) 
     at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1640) 
     at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1599) 
     at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1588) 
     at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48) 
     at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:620) 
     at org.apache.spark.SparkContext.runJob(SparkContext.scala:1832) 
     at org.apache.spark.SparkContext.runJob(SparkContext.scala:1845) 
     at org.apache.spark.SparkContext.runJob(SparkContext.scala:1858) 
     at org.apache.spark.SparkContext.runJob(SparkContext.scala:1929) 
     at org.apache.spark.rdd.RDD.count(RDD.scala:1143) 
     at org.apache.spark.mllib.optimization.LBFGS$.runLBFGS(LBFGS.scala:170) 
     at org.apache.spark.mllib.optimization.LBFGS.optimize(LBFGS.scala:117) 
     at org.apache.spark.ml.ann.FeedForwardTrainer.train(Layer.scala:878) 
     at org.apache.spark.ml.classification.MultilayerPerceptronClassifier.train(MultilayerPerceptronClassifier.scala:170) 
     at org.apache.spark.ml.classification.MultilayerPerceptronClassifier.train(MultilayerPerceptronClassifier.scala:110) 
     at org.apache.spark.ml.Predictor.fit(Predictor.scala:90) 
     at NN$.delayedEndpoint$NN$1(NN.scala:56) 
     at NN$delayedInit$body.apply(NN.scala:15) 
     at scala.Function0$class.apply$mcV$sp(Function0.scala:34) 
     at scala.runtime.AbstractFunction0.apply$mcV$sp(AbstractFunction0.scala:12) 
     at scala.App$$anonfun$main$1.apply(App.scala:76) 
     at scala.App$$anonfun$main$1.apply(App.scala:76) 
     at scala.collection.immutable.List.foreach(List.scala:381) 
     at scala.collection.generic.TraversableForwarder$class.foreach(TraversableForwarder.scala:35) 
     at scala.App$class.main(App.scala:76) 
     at NN$.main(NN.scala:15) 
     at NN.main(NN.scala) 
     at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) 
     at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) 
     at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) 
     at java.lang.reflect.Method.invoke(Method.java:497) 
Caused by: java.lang.ArrayIndexOutOfBoundsException: 4 
     at org.apache.spark.ml.classification.LabelConverter$.encodeLabeledPoint(MultilayerPerceptronClassifier.scala:85) 
     at org.apache.spark.ml.classification.MultilayerPerceptronClassifier$$anonfun$2.apply(MultilayerPerceptronClassifier.scala:165) 
     at org.apache.spark.ml.classification.MultilayerPerceptronClassifier$$anonfun$2.apply(MultilayerPerceptronClassifier.scala:165) 
     at scala.collection.Iterator$$anon$11.next(Iterator.scala:370) 
     at scala.collection.Iterator$GroupedIterator.takeDestructively(Iterator.scala:934) 
     at scala.collection.Iterator$GroupedIterator.go(Iterator.scala:949) 
     at scala.collection.Iterator$GroupedIterator.fill(Iterator.scala:986) 
     at scala.collection.Iterator$GroupedIterator.hasNext(Iterator.scala:990) 
     at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:369) 
     at org.apache.spark.util.Utils$.getIteratorSize(Utils.scala:1595) 
     at org.apache.spark.rdd.RDD$$anonfun$count$1.apply(RDD.scala:1143) 
     at org.apache.spark.rdd.RDD$$anonfun$count$1.apply(RDD.scala:1143) 
     at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858) 
     at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858) 
     at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66) 
     at org.apache.spark.scheduler.Task.run(Task.scala:89) 
     at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213) 
     at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) 
     at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) 
     at java.lang.Thread.run(Thread.java:745) 
[trace] Stack trace suppressed: run last compile:run for the full output. 
java.lang.RuntimeException: Nonzero exit code: 1 
     at scala.sys.package$.error(package.scala:27) 
[trace] Stack trace suppressed: run last compile:run for the full output. 
[error] (compile:run) Nonzero exit code: 1 
[error] Total time: 19 s, completed 06-Apr-2016 12:42:20 

Warum erhalte ich ArrayIndexOutOfBoundsException, ich meine Etiketten nicht richtig einrichten? Können Etiketten keinen Wert annehmen, da sie nur Etiketten sind? In diesem Beispiel scheint es, dass sie im Bereich von 0-3 liegen müssen.

Antwort

6

die Ausgangsschicht verwendet one-hot encoding; Das heißt, eine Beschriftung von "3" wird in (0,0,0,1) konvertiert, wobei das "dritte" Element 1 ist und der Rest 0 ist. Wenn Sie 4 Ausgabeknoten und eine Beschriftung von 4 haben, wird die LabelConverter-Funktion verwendet (wessen Quelle sichtbar ist here) wird fehlschlagen. (labelCount 4, labeledPoint.label.toInt 4, damit Ihr Fehler.)

val output = Array.fill(labelCount)(0.0) 
output(labeledPoint.label.toInt) = 1.0 
(labeledPoint.features, Vectors.dense(output)) 

So diese Zeile ändern:

val layers = Array[Int](2, 3, 5, 4) // Note 2 neurons in the input layer 

dazu:

val layers = Array[Int](2, 3, 5, 5) // Note 2 neurons in the input layer and 5 neurons in the output layer 

und ich erwarte, dass es funktioniert.

+1

so muss die Anzahl der Neuronen in der Ausgabeschicht größer oder gleich der Anzahl der Trainingslabels sein? –

+1

@ blue-sky: Ja. Ich denke, du bist am besten mit einer genauen Übereinstimmung; Sonst haben Sie ein zusätzliches Neuron, dessen korrekter Wert immer Null ist, aber Sie investieren viel Rechenaufwand, um diese Regel zu lernen und durchzusetzen. –

1

Die letzte Schicht erscheint (ich schreibe mein erstes ernstes Beispiel selbst) um die auf int Werte gerundeten Etiketten darzustellen, also durch Deklarieren von 4 als dieser Wert erwarten Sie Etiketten von 0,1,2,3 - Anscheinend ist der Code entworfen ein neuronales Netz zu schaffen, die Ausgabe in eine Reihe von Zuständen am Eingang zu klassifizieren - ich, um zu bestimmen, wie ich versuche, t Tic Tac toe-Spieler schreiben diese Fähigkeit mit

Verwandte Themen