版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/u012719230/article/details/82492745
package wondersgroup_0905_Test
import org.apache.spark.sql.SparkSession
object sparkHive {
def main(args: Array[String]): Unit = {
//数据库名称
val database ="hivetest"
//表名称
val table = "tb_his_zy_fee_detail"
//hive warehouse在hdfs上的存储目录
val warehouse = "/user/hive/warehouse"
val path = "/user/hdfs/test/hivespark"
val spark = SparkSession
.builder()
.appName("Spark Hive Example")
.config("spark.sql.warehouse.dir", warehouse)
.enableHiveSupport()
.getOrCreate()
val sql="select * from hivetest.tb_his_zy_fee_detail where tb_his_zy_fee_detail."
val data=spark.sql(sql)
//显示查询结果
data.show
//将结果保存只HDFS
data.coalesce(1).write.save(path)
spark.stop()
}
}