Advertisement
Guest User

Untitled

a guest
Jul 26th, 2017
90
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 2.41 KB | None | 0 0
  1. val df = spark.readStream
  2. .format("kafka")
  3. .option(...)
  4.  
  5. 17/05/30 11:05:23 WARN TaskSetManager: Lost task 23.0 in stage 77.0 (TID 3329, spark-worker-3, executor 0): java.lang.IllegalStateException: This consumer has already been closed.
  6. at org.apache.kafka.clients.consumer.KafkaConsumer.ensureNotClosed(KafkaConsumer.java:1611)
  7. at org.apache.kafka.clients.consumer.KafkaConsumer.acquire(KafkaConsumer.java:1622)
  8. at org.apache.kafka.clients.consumer.KafkaConsumer.seek(KafkaConsumer.java:1198)
  9. at org.apache.spark.sql.kafka010.CachedKafkaConsumer.seek(CachedKafkaConsumer.scala:278)
  10. at org.apache.spark.sql.kafka010.CachedKafkaConsumer.fetchData(CachedKafkaConsumer.scala:177)
  11. at org.apache.spark.sql.kafka010.CachedKafkaConsumer.get(CachedKafkaConsumer.scala:89)
  12. at org.apache.spark.sql.kafka010.KafkaSourceRDD$$anon$1.getNext(KafkaSourceRDD.scala:147)
  13. at org.apache.spark.sql.kafka010.KafkaSourceRDD$$anon$1.getNext(KafkaSourceRDD.scala:136)
  14. at org.apache.spark.util.NextIterator.hasNext(NextIterator.scala:73)
  15. at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408)
  16. at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408)
  17. at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown Source)
  18. at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
  19. at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$8$$anon$1.hasNext(WholeStageCodegenExec.scala:377)
  20. at org.apache.spark.sql.execution.streaming.ForeachSink$$anonfun$addBatch$1.apply(ForeachSink.scala:52)
  21. at org.apache.spark.sql.execution.streaming.ForeachSink$$anonfun$addBatch$1.apply(ForeachSink.scala:49)
  22. at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:925)
  23. at org.apache.spark.rdd.RDD$$anonfun$foreachPartition$1$$anonfun$apply$29.apply(RDD.scala:925)
  24. at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1944)
  25. at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1944)
  26. at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
  27. at org.apache.spark.scheduler.Task.run(Task.scala:99)
  28. at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:282)
  29. at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
  30. at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
  31. at java.lang.Thread.run(Thread.java:745)
  32.  
  33. Still getting the same:
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement