zoukankan      html  css  js  c++  java
  • Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/hadoop/fs/CanUnbuffer

    在执行spark on hive 的时候在  sql.show()处报错 : Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/hadoop/fs/CanUnbuffer

    报错详情:

    Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/hadoop/fs/CanUnbuffer
    	at java.lang.ClassLoader.defineClass1(Native Method)
    	at java.lang.ClassLoader.defineClass(ClassLoader.java:763)
    	at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
    	at java.net.URLClassLoader.defineClass(URLClassLoader.java:467)
    	at java.net.URLClassLoader.access$100(URLClassLoader.java:73)
    	at java.net.URLClassLoader$1.run(URLClassLoader.java:368)
    	at java.net.URLClassLoader$1.run(URLClassLoader.java:362)
    	at java.security.AccessController.doPrivileged(Native Method)
    	at java.net.URLClassLoader.findClass(URLClassLoader.java:361)
    	at java.lang.ClassLoader.loadClass(ClassLoader.java:424)
    	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:349)
    	at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
    	at org.apache.hadoop.hdfs.DistributedFileSystem.initialize(DistributedFileSystem.java:149)
    	at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:2591)
    	at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:89)
    	at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:2625)
    	at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:2607)
    	at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:368)
    	at org.apache.hadoop.fs.Path.getFileSystem(Path.java:296)
    	at org.apache.hadoop.mapred.FileInputFormat.singleThreadedListStatus(FileInputFormat.java:256)
    	at org.apache.hadoop.mapred.FileInputFormat.listStatus(FileInputFormat.java:228)
    	at org.apache.hadoop.mapred.FileInputFormat.getSplits(FileInputFormat.java:313)
    	at org.apache.spark.rdd.HadoopRDD.getPartitions(HadoopRDD.scala:194)
    	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:252)
    	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:250)
    	at scala.Option.getOrElse(Option.scala:121)
    	at org.apache.spark.rdd.RDD.partitions(RDD.scala:250)
    	at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
    	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:252)
    	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:250)
    	at scala.Option.getOrElse(Option.scala:121)
    	at org.apache.spark.rdd.RDD.partitions(RDD.scala:250)
    	at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
    	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:252)
    	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:250)
    	at scala.Option.getOrElse(Option.scala:121)
    	at org.apache.spark.rdd.RDD.partitions(RDD.scala:250)
    	at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
    	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:252)
    	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:250)
    	at scala.Option.getOrElse(Option.scala:121)
    	at org.apache.spark.rdd.RDD.partitions(RDD.scala:250)
    	at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
    	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:252)
    	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:250)
    	at scala.Option.getOrElse(Option.scala:121)
    	at org.apache.spark.rdd.RDD.partitions(RDD.scala:250)
    	at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:314)
    	at org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:38)
    	at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collectFromPlan(Dataset.scala:2853)
    	at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2153)
    	at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2153)
    	at org.apache.spark.sql.Dataset$$anonfun$55.apply(Dataset.scala:2837)
    	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:65)
    	at org.apache.spark.sql.Dataset.withAction(Dataset.scala:2836)
    	at org.apache.spark.sql.Dataset.head(Dataset.scala:2153)
    	at org.apache.spark.sql.Dataset.take(Dataset.scala:2366)
    	at org.apache.spark.sql.Dataset.showString(Dataset.scala:245)
    	at org.apache.spark.sql.Dataset.show(Dataset.scala:644)
    	at org.apache.spark.sql.Dataset.show(Dataset.scala:603)
    	at org.apache.spark.sql.Dataset.show(Dataset.scala:612)
    	at com.sparksql.spark_on_hive.main(spark_on_hive.java:35)
    Caused by: java.lang.ClassNotFoundException: org.apache.hadoop.fs.CanUnbuffer
    	at java.net.URLClassLoader.findClass(URLClassLoader.java:381)
    	at java.lang.ClassLoader.loadClass(ClassLoader.java:424)
    	at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:349)
    	at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
    	... 62 more
    

      说明:程序是在idea上本地运行。

      问题:原因不明!!!!!!!!!!

      解决:上网找了许多的错误原因,都说是hadoop的环境问题,但是我加过hadoop classpath后依然没有用。

      最终解决:我删除了pom.xml文件中的hbase的依赖(程序运行中并没有用到)然后莫名奇妙就好了。在把原来的hbase的依赖加上去还是报错。

           前面我pom.xml文件中的hbase的依赖在hadoop依赖之前,所以报错,后来我把hbase的依赖放到了hadoop的依赖后面就可以。

      猜想:可能是hbase依赖中有什么东西把hadoop依赖中的什么东西顶掉了。把hadoop依赖放到hbase依赖之前就解决了问题。

  • 相关阅读:
    iOS开发-类簇(Class Cluster)
    算法-有向图及可达性
    算法-无向图(连通分量,是否有环和二分图)
    算法-无向图(深度优先搜索和广度优先搜索)
    算法-无向图
    算法-散列表
    Red-Gate.NET.Reflector.v8.0.1.308(内含注册机Keygen与注册图解)
    [转]c#快捷键
    Windows常用性能计数器总结
    [转]C#程序性能优化
  • 原文地址:https://www.cnblogs.com/dongxiucai/p/10245896.html
Copyright © 2011-2022 走看看