Guest User

Untitled

a guest
Apr 21st, 2018
337
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 7.99 KB | None | 0 0
  1.  
  2. Traceback (most recent call last):
  3. File "/home/cluengo/Documents/Projects/vcfLoader/python/rdconnect/main.py", line 151, in <module>
  4. main(hc,sqlContext)
  5. File "/home/cluengo/Documents/Projects/vcfLoader/python/rdconnect/main.py", line 141, in main
  6. variantsRN.write.format("org.elasticsearch.spark.sql").options(**es_conf).option("es.nodes",configuration["elasticsearch"]["host"]).option("es.port",configuration["elasticsearch"]["port"] ).save(configuration["elasticsearch"]["index_name"]+"/"+configuration["version"],mode='append')
  7. File "/home/cluengo/spark/python/lib/pyspark.zip/pyspark/sql/readwriter.py", line 595, in save
  8. File "/home/cluengo/spark/python/lib/py4j-0.10.4-src.zip/py4j/java_gateway.py", line 1133, in _call_
  9. File "/home/cluengo/spark/python/lib/pyspark.zip/pyspark/sql/utils.py", line 63, in deco
  10.  
  11. File "/home/cluengo/spark/python/lib/py4j-0.10.4-src.zip/py4j/protocol.py", line 319, in get_return_value
  12. py4j.protocol.Py4JJavaError: An error occurred while calling o181.save.
  13. : org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 41.0 failed 1 times, most recent failure: Lost task 0.0 in stage 41.0 (TID 200, localhost, executor driver): java.lang.ClassCastException: scala.collection.mutable.WrappedArray$ofRef cannot be cast to scala.Tuple2
  14. at org.elasticsearch.spark.sql.DataFrameValueWriter.write(DataFrameValueWriter.scala:53)
  15. at org.elasticsearch.hadoop.serialization.bulk.AbstractBulkFactory$FieldWriter.doWrite(AbstractBulkFactory.java:152)
  16. at org.elasticsearch.hadoop.serialization.bulk.AbstractBulkFactory$FieldWriter.write(AbstractBulkFactory.java:118)
  17. at org.elasticsearch.hadoop.serialization.bulk.TemplatedBulk.writeTemplate(TemplatedBulk.java:80)
  18. at org.elasticsearch.hadoop.serialization.bulk.TemplatedBulk.write(TemplatedBulk.java:56)
  19. at org.elasticsearch.hadoop.rest.RestRepository.writeToIndex(RestRepository.java:168)
  20. at org.elasticsearch.spark.rdd.EsRDDWriter.write(EsRDDWriter.scala:67)
  21. at org.elasticsearch.spark.sql.EsSparkSQL$$anonfun$saveToEs$1.apply(EsSparkSQL.scala:101)
  22. at org.elasticsearch.spark.sql.EsSparkSQL$$anonfun$saveToEs$1.apply(EsSparkSQL.scala:101)
  23. at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
  24. at org.apache.spark.scheduler.Task.run(Task.scala:108)
  25. at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:335)
  26. at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
  27. at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
  28. at java.lang.Thread.run(Thread.java:748)
  29.  
  30. Driver stacktrace:
  31. at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1499)
  32. at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1487)
  33. at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1486)
  34. at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
  35. at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
  36. at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1486)
  37. at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:814)
  38. at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:814)
  39. at scala.Option.foreach(Option.scala:257)
  40. at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:814)
  41. at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1714)
  42. at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1669)
  43. at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1658)
  44. at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
  45. at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:630)
  46. at org.apache.spark.SparkContext.runJob(SparkContext.scala:2022)
  47. at org.apache.spark.SparkContext.runJob(SparkContext.scala:2043)
  48. at org.apache.spark.SparkContext.runJob(SparkContext.scala:2075)
  49. at org.elasticsearch.spark.sql.EsSparkSQL$.saveToEs(EsSparkSQL.scala:101)
  50. at org.elasticsearch.spark.sql.ElasticsearchRelation.insert(DefaultSource.scala:610)
  51. at org.elasticsearch.spark.sql.DefaultSource.createRelation(DefaultSource.scala:103)
  52. at org.apache.spark.sql.execution.datasources.DataSource.write(DataSource.scala:472)
  53. at org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:48)
  54. at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:58)
  55. at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:56)
  56. at org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:74)
  57. at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:117)
  58. at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:117)
  59. at org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:138)
  60. at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
  61. at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:135)
  62. at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:116)
  63. at org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:92)
  64. at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:92)
  65. at org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:610)
  66. at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:233)
  67. at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:217)
  68. at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
  69. at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
  70. at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
  71. at java.lang.reflect.Method.invoke(Method.java:498)
  72. at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
  73. at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
  74. at py4j.Gateway.invoke(Gateway.java:280)
  75. at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
  76. at py4j.commands.CallCommand.execute(CallCommand.java:79)
  77. at py4j.GatewayConnection.run(GatewayConnection.java:214)
  78. at java.lang.Thread.run(Thread.java:748)
  79. Caused by: java.lang.ClassCastException: scala.collection.mutable.WrappedArray$ofRef cannot be cast to scala.Tuple2
  80. at org.elasticsearch.spark.sql.DataFrameValueWriter.write(DataFrameValueWriter.scala:53)
  81. at org.elasticsearch.hadoop.serialization.bulk.AbstractBulkFactory$FieldWriter.doWrite(AbstractBulkFactory.java:152)
  82. at org.elasticsearch.hadoop.serialization.bulk.AbstractBulkFactory$FieldWriter.write(AbstractBulkFactory.java:118)
  83. at org.elasticsearch.hadoop.serialization.bulk.TemplatedBulk.writeTemplate(TemplatedBulk.java:80)
  84. at org.elasticsearch.hadoop.serialization.bulk.TemplatedBulk.write(TemplatedBulk.java:56)
  85. at org.elasticsearch.hadoop.rest.RestRepository.writeToIndex(RestRepository.java:168)
  86. at org.elasticsearch.spark.rdd.EsRDDWriter.write(EsRDDWriter.scala:67)
  87. at org.elasticsearch.spark.sql.EsSparkSQL$$anonfun$saveToEs$1.apply(EsSparkSQL.scala:101)
  88. at org.elasticsearch.spark.sql.EsSparkSQL$$anonfun$saveToEs$1.apply(EsSparkSQL.scala:101)
  89. at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
  90. at org.apache.spark.scheduler.Task.run(Task.scala:108)
  91. at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:335)
  92. at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
  93. at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
  94. ... 1 more
Advertisement
Add Comment
Please, Sign In to add comment