Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- org.apache.spark.sql.catalyst.errors.package$TreeNodeException: execute, tree:
- TungstenAggregate(key=[], functions=[(count(1),mode=Final,isDistinct=false)], output=[count#1L])
- +- TungstenExchange SinglePartition, None
- +- TungstenAggregate(key=[], functions=[(count(1),mode=Partial,isDistinct=false)], output=[count#4L])
- +- Project
- +- Scan ExistingRDD[_1#0]
- at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:49)
- at org.apache.spark.sql.execution.aggregate.TungstenAggregate.doExecute(TungstenAggregate.scala:80)
- at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:132)
- at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:130)
- at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)
- at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:130)
- at org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:166)
- at org.apache.spark.sql.execution.SparkPlan.executeCollectPublic(SparkPlan.scala:174)
- at org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1538)
- at org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1538)
- at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:56)
- at org.apache.spark.sql.DataFrame.withNewExecutionId(DataFrame.scala:2125)
- at org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$execute$1(DataFrame.scala:1537)
- at org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$collect(DataFrame.scala:1544)
- at org.apache.spark.sql.DataFrame$$anonfun$count$1.apply(DataFrame.scala:1554)
- at org.apache.spark.sql.DataFrame$$anonfun$count$1.apply(DataFrame.scala:1553)
- at org.apache.spark.sql.DataFrame.withCallback(DataFrame.scala:2138)
- at org.apache.spark.sql.DataFrame.count(DataFrame.scala:1553)
- at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:32)
- at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:37)
- at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:39)
- at $iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:41)
- at $iwC$$iwC$$iwC$$iwC.<init>(<console>:43)
- at $iwC$$iwC$$iwC.<init>(<console>:45)
- at $iwC$$iwC.<init>(<console>:47)
- at $iwC.<init>(<console>:49)
- at <init>(<console>:51)
- at .<init>(<console>:55)
- at .<clinit>(<console>)
- at .<init>(<console>:7)
- at .<clinit>(<console>)
- at $print(<console>)
- at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
- at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
- at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
- at java.lang.reflect.Method.invoke(Method.java:498)
- at org.apache.spark.repl.SparkIMain$ReadEvalPrint.call(SparkIMain.scala:1065)
- at org.apache.spark.repl.SparkIMain$Request.loadAndRun(SparkIMain.scala:1346)
- at org.apache.spark.repl.SparkIMain.loadAndRunReq$1(SparkIMain.scala:840)
- at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:871)
- at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:819)
- at org.apache.spark.repl.SparkILoop.reallyInterpret$1(SparkILoop.scala:857)
- at org.apache.spark.repl.SparkILoop.interpretStartingWith(SparkILoop.scala:902)
- at org.apache.spark.repl.SparkILoop.command(SparkILoop.scala:814)
- at org.apache.spark.repl.SparkILoop.processLine$1(SparkILoop.scala:657)
- at org.apache.spark.repl.SparkILoop.innerLoop$1(SparkILoop.scala:665)
- at org.apache.spark.repl.SparkILoop.org$apache$spark$repl$SparkILoop$$loop(SparkILoop.scala:670)
- at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply$mcZ$sp(SparkILoop.scala:997)
- at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply(SparkILoop.scala:945)
- at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply(SparkILoop.scala:945)
- at scala.tools.nsc.util.ScalaClassLoader$.savingContextLoader(ScalaClassLoader.scala:135)
- at org.apache.spark.repl.SparkILoop.org$apache$spark$repl$SparkILoop$$process(SparkILoop.scala:945)
- at org.apache.spark.repl.SparkILoop.process(SparkILoop.scala:1059)
- at org.apache.spark.repl.Main$.main(Main.scala:31)
- at org.apache.spark.repl.Main.main(Main.scala)
- at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
- at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
- at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
- at java.lang.reflect.Method.invoke(Method.java:498)
- at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:731)
- at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:181)
- at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:206)
- at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:121)
- at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
- Caused by: org.apache.spark.sql.catalyst.errors.package$TreeNodeException: execute, tree:
- TungstenExchange SinglePartition, None
- +- TungstenAggregate(key=[], functions=[(count(1),mode=Partial,isDistinct=false)], output=[count#4L])
- +- Project
- +- Scan ExistingRDD[_1#0]
- at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:49)
- at org.apache.spark.sql.execution.Exchange.doExecute(Exchange.scala:247)
- at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:132)
- at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:130)
- at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)
- at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:130)
- at org.apache.spark.sql.execution.aggregate.TungstenAggregate$$anonfun$doExecute$1.apply(TungstenAggregate.scala:86)
- at org.apache.spark.sql.execution.aggregate.TungstenAggregate$$anonfun$doExecute$1.apply(TungstenAggregate.scala:80)
- at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:48)
- ... 63 more
- Caused by: org.apache.hadoop.mapred.InvalidInputException: Input path does not exist: ftp://user:pwd@192.168.1.5/brecht-d-m/input.nt
- at org.apache.hadoop.mapred.FileInputFormat.singleThreadedListStatus(FileInputFormat.java:285)
- at org.apache.hadoop.mapred.FileInputFormat.listStatus(FileInputFormat.java:228)
- at org.apache.hadoop.mapred.FileInputFormat.getSplits(FileInputFormat.java:313)
- at org.apache.spark.rdd.HadoopRDD.getPartitions(HadoopRDD.scala:199)
- at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
- at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
- at scala.Option.getOrElse(Option.scala:120)
- at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
- at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
- at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
- at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
- at scala.Option.getOrElse(Option.scala:120)
- at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
- at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
- at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
- at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
- at scala.Option.getOrElse(Option.scala:120)
- at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
- at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
- at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
- at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
- at scala.Option.getOrElse(Option.scala:120)
- at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
- at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
- at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
- at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
- at scala.Option.getOrElse(Option.scala:120)
- at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
- at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)
- at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)
- at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)
- at scala.Option.getOrElse(Option.scala:120)
- at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)
- at org.apache.spark.ShuffleDependency.<init>(Dependency.scala:91)
- at org.apache.spark.sql.execution.Exchange.prepareShuffleDependency(Exchange.scala:220)
- at org.apache.spark.sql.execution.Exchange$$anonfun$doExecute$1.apply(Exchange.scala:254)
- at org.apache.spark.sql.execution.Exchange$$anonfun$doExecute$1.apply(Exchange.scala:248)
- at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:48)
- ... 71 more
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement