设为首页 加入收藏

TOP

spark on hive 写入hdfs 用csv格式
2019-02-08 12:17:07 】 浏览:614
Tags:spark hive 写入 hdfs csv 格式
Exception in thread "main" java.lang.ClassNotFoundException: Failed to find data source: csv. Please find packages at http://spark-packages.org
at org.apache.spark.sql.execution.datasources.ResolvedDataSource$.lookupDataSource(ResolvedDataSource.scala:77)
at org.apache.spark.sql.execution.datasources.ResolvedDataSource$.apply(ResolvedDataSource.scala:219)
at org.apache.spark.sql.DataFrameWriter.dataSource$lzycompute$1(DataFrameWriter.scala:181)
at org.apache.spark.sql.DataFrameWriter.org$apache$spark$sql$DataFrameWriter$$dataSource$1(DataFrameWriter.scala:181)
at org.apache.spark.sql.DataFrameWriter$$anonfun$save$1.apply$mcV$sp(DataFrameWriter.scala:188)
at org.apache.spark.sql.DataFrameWriter.executeAndCallQEListener(DataFrameWriter.scala:154)
at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:188)
at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:172)
at xiangqi_spark.MysqlImport$.main(MysqlImport.scala:30)
at xiangqi_spark.MysqlImport.main(MysqlImport.scala)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:730)
at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:181)
at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:206)
at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:121)
at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
Caused by: java.lang.ClassNotFoundException: csv.DefaultSource
at java.net.URLClassLoader.findClass(URLClassLoader.java:381)
at java.lang.ClassLoader.loadClass(ClassLoader.java:424)
at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
at org.apache.spark.sql.execution.datasources.ResolvedDataSource$$anonfun$4$$anonfun$apply$1.apply(ResolvedDataSource.scala:62)
at org.apache.spark.sql.execution.datasources.ResolvedDataSource$$anonfun$4$$anonfun$apply$1.apply(ResolvedDataSource.scala:62)
at scala.util.Try$.apply(Try.scala:161)
at org.apache.spark.sql.execution.datasources.ResolvedDataSource$$anonfun$4.apply(ResolvedDataSource.scala:62)
at org.apache.spark.sql.execution.datasources.ResolvedDataSource$$anonfun$4.apply(ResolvedDataSource.scala:62)
at scala.util.Try.orElse(Try.scala:82)

at org.apache.spark.sql.execution.datasources.ResolvedDataSource$.lookupDataSource(ResolvedDataSource.scala:62)


解决办法:

pom文件加:spark是1.6.0

<!--spark on hive 写入csv格式文件-->
<dependency>
    <groupId>com.databricks</groupId>
    <artifactId>spark-csv_2.10</artifactId>
    <version>1.4.0</version>
</dependency>


 
    代码:

package 666
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}
import utils.TimeUtil

/**
  * Created by Administrator on 2017/10/24.
  */
object MysqlImport extends Serializable{

  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf ( )
      .setAppName ( "MysqlImport" )
//      .setMaster ( "local" )

    val sc: SparkContext = new SparkContext(conf)
    val sqlcontext: SQLContext = new SQLContext(sc)
    val hiveContext: HiveContext = new HiveContext(sc)
//    hiveContext.sql("use bdp_dw ") //生产库
    hiveContext.sql("use default ")//测试库
    val date: String = TimeUtil.getOneDaysAgoTime(0)//获取当前日期格式:yyyyMMdd
    val resultsql: DataFrame = hiveContext.sql(" select * from bi_device_under_vol")
//    //生产集群
//    resultsql.write.format("csv").save("hdfs://10.27.227.160:8020/tmp/"+date)
    import com.databricks.spark.csv._
    resultsql.saveAsCsvFile("hdfs://10.27.227.160:8020/tmp/"+date)
//    测试集群
//    resultsql.repartition(1).saveAsCsvFile("hdfs://192.168.0.118:8020/tmp/"+date+"_1")
//    resultsql.write.format("csv").save("hdfs://192.168.0.118:8020/tmp/"+date+"_2")
//    resultsql.write.format("csv").save("D:\\tt")
    sc.stop()
  }
}





】【打印繁体】【投稿】【收藏】 【推荐】【举报】【评论】 【关闭】 【返回顶部
上一篇Sqoop学习笔记——关系数据库与hd.. 下一篇hadoop进阶---hadoop性能优化(一)..

最新文章

热门文章

Hot 文章

Python

C 语言

C++基础

大数据基础

linux编程基础

C/C++面试题目