Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- NFO 30-11 22:05:41,548 - Starting job: show at <console>:37
- INFO 30-11 22:05:41,548 - Got job 3 (show at <console>:37) with 1 output partitions
- INFO 30-11 22:05:41,548 - Final stage: ResultStage 4 (show at <console>:37)
- INFO 30-11 22:05:41,548 - Parents of final stage: List()
- INFO 30-11 22:05:41,548 - Missing parents: List()
- INFO 30-11 22:05:41,549 - Submitting ResultStage 4 (MapPartitionsRDD[21] at show at <console>:37), which has no missing parents
- INFO 30-11 22:05:41,552 - Block broadcast_6 stored as values in memory (estimated size 13.2 KB, free 25.6 KB)
- INFO 30-11 22:05:41,553 - Block broadcast_6_piece0 stored as bytes in memory (estimated size 6.7 KB, free 32.2 KB)
- INFO 30-11 22:05:41,553 - Added broadcast_6_piece0 in memory on localhost:53272 (size: 6.7 KB, free: 511.1 MB)
- INFO 30-11 22:05:41,554 - Created broadcast 6 from broadcast at DAGScheduler.scala:1006
- INFO 30-11 22:05:41,554 - Submitting 1 missing tasks from ResultStage 4 (MapPartitionsRDD[21] at show at <console>:37)
- INFO 30-11 22:05:41,554 - Adding task set 4.0 with 1 tasks
- INFO 30-11 22:05:41,555 - Starting task 0.0 in stage 4.0 (TID 6, localhost, partition 0,PROCESS_LOCAL, 2546 bytes)
- INFO 30-11 22:05:41,555 - Running task 0.0 in stage 4.0 (TID 6)
- INFO 30-11 22:05:41,584 - [Executor task launch worker-0][partitionID:table;queryID:10233263048484647] Query will be executed on table: test_table
- ERROR 30-11 22:05:41,594 - Exception in task 0.0 in stage 4.0 (TID 6)
- java.lang.InterruptedException:
- at org.apache.carbondata.hadoop.CarbonRecordReader.initialize(CarbonRecordReader.java:83)
- at org.apache.carbondata.spark.rdd.CarbonScanRDD.compute(CarbonScanRDD.scala:171)
- at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)
- at org.apache.spark.rdd.RDD.iterator(RDD.scala:270)
- at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
- at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)
- at org.apache.spark.rdd.RDD.iterator(RDD.scala:270)
- at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
- at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)
- at org.apache.spark.rdd.RDD.iterator(RDD.scala:270)
- at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
- at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)
- at org.apache.spark.rdd.RDD.iterator(RDD.scala:270)
- at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
- at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)
- at org.apache.spark.rdd.RDD.iterator(RDD.scala:270)
- at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
- at org.apache.spark.scheduler.Task.run(Task.scala:89)
- at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214)
- at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
- at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
- at java.lang.Thread.run(Thread.java:745)
- WARN 30-11 22:05:41,607 - Lost task 0.0 in stage 4.0 (TID 6, localhost): java.lang.InterruptedException:
- at org.apache.carbondata.hadoop.CarbonRecordReader.initialize(CarbonRecordReader.java:83)
- at org.apache.carbondata.spark.rdd.CarbonScanRDD.compute(CarbonScanRDD.scala:171)
- at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)
- at org.apache.spark.rdd.RDD.iterator(RDD.scala:270)
- at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
- at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)
- at org.apache.spark.rdd.RDD.iterator(RDD.scala:270)
- at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
- at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)
- at org.apache.spark.rdd.RDD.iterator(RDD.scala:270)
- at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
- at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)
- at org.apache.spark.rdd.RDD.iterator(RDD.scala:270)
- at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
- at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)
- at org.apache.spark.rdd.RDD.iterator(RDD.scala:270)
- at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
- at org.apache.spark.scheduler.Task.run(Task.scala:89)
- at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214)
- at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
- at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
- at java.lang.Thread.run(Thread.java:745)
- ERROR 30-11 22:05:41,608 - Task 0 in stage 4.0 failed 1 times; aborting job
- INFO 30-11 22:05:41,609 - Removed TaskSet 4.0, whose tasks have all completed, from pool
- INFO 30-11 22:05:41,612 - Cancelling stage 4
- INFO 30-11 22:05:41,614 - ResultStage 4 (show at <console>:37) failed in 0.059 s
- INFO 30-11 22:05:41,615 - Job 3 failed: show at <console>:37, took 0.066920 s
- org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 4.0 failed 1 times, most recent failure: Lost task 0.0 in stage 4.0 (TID 6, localhost): java.lang.InterruptedException:
- at org.apache.carbondata.hadoop.CarbonRecordReader.initialize(CarbonRecordReader.java:83)
- at org.apache.carbondata.spark.rdd.CarbonScanRDD.compute(CarbonScanRDD.scala:171)
- at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)
- at org.apache.spark.rdd.RDD.iterator(RDD.scala:270)
- at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
- at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)
- at org.apache.spark.rdd.RDD.iterator(RDD.scala:270)
- at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
- at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)
- at org.apache.spark.rdd.RDD.iterator(RDD.scala:270)
- at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
- at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)
- at org.apache.spark.rdd.RDD.iterator(RDD.scala:270)
- at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
- at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)
- at org.apache.spark.rdd.RDD.iterator(RDD.scala:270)
- at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
- at org.apache.spark.scheduler.Task.run(Task.scala:89)
- at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214)
- at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
- at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
- at java.lang.Thread.run(Thread.java:745)
- Driver stacktrace:
- at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1431)
- at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1419)
- at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1418)
- at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
- at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
- at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1418)
- at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:799)
- at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:799)
- at scala.Option.foreach(Option.scala:236)
- at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:799)
- at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1640)
- at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1599)
- at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1588)
- at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
- at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:620)
- at org.apache.spark.SparkContext.runJob(SparkContext.scala:1832)
- at org.apache.spark.SparkContext.runJob(SparkContext.scala:1845)
- at org.apache.spark.SparkContext.runJob(SparkContext.scala:1858)
- at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:212)
- at org.apache.spark.sql.execution.Limit.executeCollect(basicOperators.scala:165)
- at org.apache.spark.sql.execution.SparkPlan.executeCollectPublic(SparkPlan.scala:174)
- at org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1499)
- at org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1499)
- at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:56)
- at org.apache.spark.sql.DataFrame.withNewExecutionId(DataFrame.scala:2086)
- at org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$execute$1(DataFrame.scala:1498)
- at org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$collect(DataFrame.scala:1505)
- at org.apache.spark.sql.DataFrame$$anonfun$head$1.apply(DataFrame.scala:1375)
- at org.apache.spark.sql.DataFrame$$anonfun$head$1.apply(DataFrame.scala:1374)
- at org.apache.spark.sql.DataFrame.withCallback(DataFrame.scala:2099)
- at org.apache.spark.sql.DataFrame.head(DataFrame.scala:1374)
- at org.apache.spark.sql.DataFrame.take(DataFrame.scala:1456)
- at org.apache.spark.sql.DataFrame.showString(DataFrame.scala:170)
- at org.apache.spark.sql.DataFrame.show(DataFrame.scala:350)
- at org.apache.spark.sql.DataFrame.show(DataFrame.scala:311)
- at org.apache.spark.sql.DataFrame.show(DataFrame.scala:319)
- at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:37)
- at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:42)
- at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:44)
- at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:46)
- at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:48)
- at $iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:50)
- at $iwC$$iwC$$iwC$$iwC.<init>(<console>:52)
- at $iwC$$iwC$$iwC.<init>(<console>:54)
- at $iwC$$iwC.<init>(<console>:56)
- at $iwC.<init>(<console>:58)
- at <init>(<console>:60)
- at .<init>(<console>:64)
- at .<clinit>(<console>)
- at .<init>(<console>:7)
- at .<clinit>(<console>)
- at $print(<console>)
- at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
- at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
- at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
- at java.lang.reflect.Method.invoke(Method.java:498)
- at org.apache.spark.repl.SparkIMain$ReadEvalPrint.call(SparkIMain.scala:1065)
- at org.apache.spark.repl.SparkIMain$Request.loadAndRun(SparkIMain.scala:1346)
- at org.apache.spark.repl.SparkIMain.loadAndRunReq$1(SparkIMain.scala:840)
- at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:871)
- at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:819)
- at org.apache.spark.repl.SparkILoop.reallyInterpret$1(SparkILoop.scala:857)
- at org.apache.spark.repl.SparkILoop.interpretStartingWith(SparkILoop.scala:902)
- at org.apache.spark.repl.SparkILoop.command(SparkILoop.scala:814)
- at org.apache.spark.repl.SparkILoop.processLine$1(SparkILoop.scala:657)
- at org.apache.spark.repl.SparkILoop.innerLoop$1(SparkILoop.scala:665)
- at org.apache.spark.repl.SparkILoop.org$apache$spark$repl$SparkILoop$$loop(SparkILoop.scala:670)
- at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply$mcZ$sp(SparkILoop.scala:997)
- at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply(SparkILoop.scala:945)
- at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply(SparkILoop.scala:945)
- at scala.tools.nsc.util.ScalaClassLoader$.savingContextLoader(ScalaClassLoader.scala:135)
- at org.apache.spark.repl.SparkILoop.org$apache$spark$repl$SparkILoop$$process(SparkILoop.scala:945)
- at org.apache.spark.repl.SparkILoop.process(SparkILoop.scala:1059)
- at org.apache.spark.repl.Main$.main(Main.scala:31)
- at org.apache.spark.repl.Main.main(Main.scala)
- at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
- at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
- at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
- at java.lang.reflect.Method.invoke(Method.java:498)
- at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:731)
- at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:181)
- at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:206)
- at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:121)
- at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
- Caused by: java.lang.InterruptedException:
- at org.apache.carbondata.hadoop.CarbonRecordReader.initialize(CarbonRecordReader.java:83)
- at org.apache.carbondata.spark.rdd.CarbonScanRDD.compute(CarbonScanRDD.scala:171)
- at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)
- at org.apache.spark.rdd.RDD.iterator(RDD.scala:270)
- at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
- at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)
- at org.apache.spark.rdd.RDD.iterator(RDD.scala:270)
- at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
- at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)
- at org.apache.spark.rdd.RDD.iterator(RDD.scala:270)
- at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
- at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)
- at org.apache.spark.rdd.RDD.iterator(RDD.scala:270)
- at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
- at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)
- at org.apache.spark.rdd.RDD.iterator(RDD.scala:270)
- at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
- at org.apache.spark.scheduler.Task.run(Task.scala:89)
- at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214)
- at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
- at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
- at java.lang.Thread.run(Thread.java:745)
Advertisement
Add Comment
Please, Sign In to add comment