package hive import org.apache.spark.SparkConf import org.apache.spark.sql.SparkSession object HiveDome { def fun1(): Unit = { val conf = new SparkConf().setAppName("hive") val spark = SparkSession.builder().appName("hive").config(conf).enableHiveSupport().getOrCreate() import spark.sql //创建件hive表 sql("create table if not exists user(id int,value String) row format delimited fields terminated by ‘,‘") sql("load data local inpath ‘/user.log‘ into table user") //这里不能写成user.txt文件,会出现乱码 // sql("select * from user").show() // sql("select count(*) from user").show() // // val sqlDF = sql("select key,value from user where key<10 order BY key") // import spark.implicits._ //隐式转化 // val stringDS = sqlDF.map { case org.apache.spark.sql.Row(id: Int, value: String) => s"key:$id,value:$value" } // stringDS.show() sql("select value,id from user").write.mode("overwrite") .saveAsTable("spark_hive_test1") spark.stop() } def main(args: Array[String]): Unit = { fun1() } }
写完之后在idea 打成jar包,将jar包写到liunx下的根目录下
然后配置环境
1.把hive的配置 hive-site.xml 复制到 spark/conf cp /usr/hive/apache-hive-2.3.3-bin/conf/hive-site.xml /usr/spark/spark- 2.1.1-bin-hadoop2.7/conf/ 2.把mysql的驱动jar放入到 spark/spark-2.1.1-bin-hadoop2.7/jars/ mysql-connector-java-5.1.38.jar 3.修改spark-env.sh文件 export SPARK_CLASSPATH=$SPARK_CLASS:$SPARK_HOME/jars/mysql-connector-java-5.1.38.jar 4.在集群上执行 spark-submit --master spark://zhiyou01:7077 --class 类名的全称 --total-executor-cores 2 --executor-memory 512m /jar包路径
如果不配置环境会出现以下错误
Exception in thread "main" java.lang.UnsupportedOperationException: loadTable is not implemented
常见bug:
1>[code=java]org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.ipc.StandbyException): Operation category READ is not supported in state standby
原因:
2>io错误,说是hdfs下的user.log不存在
原因:我在创建
原文地址:https://www.cnblogs.com/han-guang-xue/p/10041150.html
时间: 2024-10-24 18:45:36