2016-05-04 6 views
0

Ich arbeite mit Feder Wolke für Funken Programm. Ich benutze Spark 1.3.1 mit Hortonworks-Umgebung. Das Funke-Programm geht in den Zustand "Akzeptiert" und geht nicht weiter. Ich habe das gleiche Funkenprogramm im Randknoten ausgeführt. Es funktioniert gut dort. Hier finden Sie das komplette Programm für den gleichenSpring Wolke Datenfluss funktioniert nicht mit Funken

package org.springframework.cloud.task.sparkapp.yarn; 

import java.util.ArrayList; 
import java.util.Arrays; 
import java.util.List; 

import org.apache.hadoop.conf.Configuration; 
import org.apache.spark.SparkConf; 
import org.apache.spark.deploy.yarn.Client; 
import org.apache.spark.deploy.yarn.ClientArguments; 
import org.slf4j.Logger; 
import org.slf4j.LoggerFactory; 

import org.springframework.beans.factory.annotation.Autowired; 
import org.springframework.boot.CommandLineRunner; 
import org.springframework.util.StringUtils; 
import scala.Tuple2; 

/** 
* {@link CommandLineRunner} implementation that will run a Spark App in YARN mode using 
* configuration properties provided. 
* 
* @author Thomas Risberg 
*/ 
public class SparkAppYarnRunner implements CommandLineRunner { 

    private static final Logger logger = LoggerFactory.getLogger(SparkAppYarnRunner.class); 

    @Autowired 
    private Configuration hadoopConfiguration; 

    @Autowired 
    private SparkAppYarnTaskProperties config; 

    @Override 
    public void run(String... args) throws Exception { 
     SparkConf sparkConf = new SparkConf(); 
     sparkConf.set("spark.yarn.jar", config.getSparkAssemblyJar()); 
     sparkConf.set("HADOOP_HOME","file:///usr/hdp/current/hadoop-client"); 
     sparkConf.set("HADOOP_CONF_DIR","file:///etc/hadoop/conf"); 
     sparkConf.set("SPARK_HOME","file:///usr/hdp/current/spark-client"); 

     //sparkConf.set("spark.driver.extraJavaOptions","-Dhdp.version=2.6.0.2.2.9.2-1"); 
     //sparkConf.set("spark.yarn.am.extraJavaOptions", "-Dhdp.version=2.6.0.2.2.9.2-1"); 
     sparkConf.set("spark.application.properties.file", "hdfs://<NAMENODE>:8020/db/e2e/sparkstudy/sparkapp/spark-defaults.conf"); 
     List<String> submitArgs = new ArrayList<String>(); 
     if (StringUtils.hasText(config.getAppName())) { 
      submitArgs.add("--name"); 
      submitArgs.add(config.getAppName()); 
     } 
     submitArgs.add("--jar"); 
     submitArgs.add(config.getAppJar()); 
     submitArgs.add("--class"); 
     submitArgs.add(config.getAppClass()); 
     if (StringUtils.hasText(config.getResourceFiles())) { 
      submitArgs.add("--files"); 
      submitArgs.add(config.getResourceFiles()); 
     } 
     if (StringUtils.hasText(config.getResourceArchives())) { 
      submitArgs.add("--archives"); 
      submitArgs.add(config.getResourceArchives()); 
     } 
     submitArgs.add("--executor-memory"); 
     submitArgs.add(config.getExecutorMemory()); 
     submitArgs.add("--num-executors"); 
     submitArgs.add("" + config.getNumExecutors()); 
     submitArgs.add("--queue"); 
     submitArgs.add("" + config.getQueue()); 
     /*for (String arg : config.getAppConfs()) { 
      submitArgs.add("--conf"); 
      submitArgs.add(arg); 
     }*/ 
     for (String arg : config.getAppArgs()) { 
      submitArgs.add("--arg"); 
      submitArgs.add(arg); 
     } 
     logger.info("Submit App with args: " + Arrays.asList(submitArgs)); 

     ClientArguments clientArguments = 
       new ClientArguments(submitArgs.toArray(new String[submitArgs.size()]), sparkConf); 
     clientArguments.isClusterMode(); 
     Tuple2<String,String>[] sparkConfigs = sparkConf.getAll(); 
     List<String> sparkConfValues = new ArrayList<String>(); 
     for(Tuple2<String,String> tuple : sparkConfigs) { 
      sparkConfValues.add(tuple.toString()); 
     } 
     logger.info("All spark configs: " + Arrays.asList(sparkConfValues)); 
     Client client = new Client(clientArguments, hadoopConfiguration, sparkConf); 
     System.setProperty("SPARK_YARN_MODE", "true"); 
     //System.setProperty("HADOOP_CONF_DIR","file:///etc/hadoop/conf"); 
     //System.setProperty("HADOOP_HOME", "file:///usr/hdp/current/hadoop-client"); 
     //System.setProperty("JAVA_HOME","file:///usr/java/jdk1.7.0_45"); 

     try { 
      client.run(); 
     } 
     catch (Throwable t) { 
      logger.error("Spark Application failed: " + t.getMessage(), t); 
      throw new RuntimeException("Spark Application failed", t); 
     } 
    } 

} 

ausführten spring_application_json

Export SPRING_APPLICATION_JSON = '{ "app-name": "my-Garn-Pi", "App-Klasse": "org.apache.spark.examples.JavaSparkPi", "app-jar": "hdfs: //: 8020/db/d2e/sparkstudy/sparkapp/sparkapp-client-task-1.0.0.BUILD-SNAPSHOT-Tests. jar "," spark-assembly-jar ":" hdfs: //: 8020/db/d2e/sparkstudy/sparkapp/funken-assembly-1.3.1.2.2.9.2-1-hadoop2.6.0.2.2.9.2-1 .jar "," Warteschlange ":" dart "," app-args ": [" 10 "]," Frühling ": {" hadoop ": {" fsUri ":" hdfs: //: 8020 "," resourceManagerHost " : "", "resourceManagerPort": 8032, "jobHistoryAd Kleid ": ": 10020"}}}‘

Antwort

0

diese Dinge nicht richtig aussehen, wenn man sie nicht absichtlich unkenntlich gemacht:

{"hadoop": {"fsUri": "hdfs://:8020", "resourceManagerHost": "", "resourceManagerPort": 8032, "jobHistoryAddress": ":10020"} 

Auch in HDP, ich glaube, der Standard resourceManagerPort ist 8050 statt 8032.

Verwandte Themen