Ich versuche, meine Codes, die HiveContext auf Spark Cluster enthält, zu implementieren.Wie funken-sende hiveContext, der von IDE geschrieben wird
./spark-submit --class com.dt.sparkSQL.DataFrameToHive --master spark://SparkMaster:7077 /root/Documents/DataFrameToHive.jar
Aber hier ist das Problem
17/08/13 10:29:46 INFO hive.metastore: Trying to connect to metastore with URI thrift://SparkMaster:9083
17/08/13 10:29:46 WARN hive.metastore: Failed to connect to the MetaStore Server...
17/08/13 10:29:46 INFO hive.metastore: Waiting 1 seconds before next connection attempt.
Exception in thread "main" java.lang.RuntimeException: java.lang.RuntimeException: Unable to instantiate org.apache.hadoop.hive.ql.metadata.SessionHiveMetaStoreClient
Wenn ich die Funken Schale tat
./spark-shell --master spark://SparkMaster:7077
ich mit SparkMaster verbinden: 9083 successfully.Here ist mein Funken/conf/hive- site.xml
Meine Frage ist, warum es w verbindet i mit SparkMaster: 9083 wenn ich die Funkenvorlage mache und was ist das Problem mit SparkMaster: 9083? Hier sind die Codes auf IDE
package com.dt.sparkSQL
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.hive.HiveContext
object DataFrameToHive {
def main(args: Array[String]): Unit = {
val conf = new SparkConf()
conf.setAppName("DataFrameToHive").setMaster("spark://SparkMaster:7077")
val sc = new SparkContext(conf)
val hiveContext = new HiveContext(sc)
hiveContext.sql("use userdb")
hiveContext.sql("DROP TABLE IF EXISTS people")
hiveContext.sql("CREATE TABLE IF NOT EXISTS people(name STRING, age INT)ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t' LINES TERMINATED BY '\\n'")
hiveContext.sql("LOAD DATA LOCAL INPATH '/root/Documents/people.txt' INTO TABLE people")
hiveContext.sql("use userdb")
hiveContext.sql("DROP TABLE IF EXISTS peopleScores")
hiveContext.sql("CREATE TABLE IF NOT EXISTS peopleScores(name STRING, score INT) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\\t' LINES TERMINATED BY '\\n'")
hiveContext.sql("LOAD DATA LOCAL INPATH '/root/Documents/peopleScore.txt' INTO TABLE peopleScores")
val resultDF = hiveContext.sql("select pi.name,pi.age,ps.score "
+" from people pi join peopleScores ps on pi.name=ps.name"
+" where ps.score>90")
hiveContext.sql("drop table if exists peopleResult")
resultDF.saveAsTable("peopleResult")
val dataframeHive = hiveContext.table("peopleResult")
dataframeHive.show()
}
}
`