Advertisement
Guest User

Untitled

a guest
Feb 8th, 2016
83
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 16.70 KB | None | 0 0
  1. [jp@jplap bin]$ ./adam-submit transform ~/adam_work/v1/data/run2 ~/adam_work/v1/data/run3 -mark_duplicate_reads -limit_projection
  2. Using ADAM_MAIN=org.bdgenomics.adam.cli.ADAMMain
  3. Using SPARK_SUBMIT=/home/jp/Apps/Spark/spark-1.6.0-bin-hadoop2.6/bin/spark-submit
  4. 2016-02-09 00:19:45 WARN NativeCodeLoader:62 - Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
  5. SLF4J: Failed to load class "org.slf4j.impl.StaticLoggerBinder".
  6. SLF4J: Defaulting to no-operation (NOP) logger implementation
  7. SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details.
  8. 2016-02-09 00:21:43 ERROR Executor:74 - Managed memory leak detected; size = 76525545 bytes, TID = 48
  9. 2016-02-09 00:21:43 ERROR Executor:74 - Managed memory leak detected; size = 76525545 bytes, TID = 52
  10. 2016-02-09 00:21:43 ERROR Executor:95 - Exception in task 2.0 in stage 1.0 (TID 48)
  11. java.util.NoSuchElementException: None.get
  12. at scala.None$.get(Option.scala:313)
  13. at scala.None$.get(Option.scala:311)
  14. at org.bdgenomics.adam.rdd.read.MarkDuplicates$.org$bdgenomics$adam$rdd$read$MarkDuplicates$$leftPositionAndLibrary$1(MarkDuplicates.scala:73)
  15. at org.bdgenomics.adam.rdd.read.MarkDuplicates$$anonfun$apply$3.apply(MarkDuplicates.scala:86)
  16. at org.bdgenomics.adam.rdd.read.MarkDuplicates$$anonfun$apply$3.apply(MarkDuplicates.scala:86)
  17. at org.apache.spark.rdd.RDD$$anonfun$groupBy$3$$anonfun$apply$19.apply(RDD.scala:650)
  18. at org.apache.spark.rdd.RDD$$anonfun$groupBy$3$$anonfun$apply$19.apply(RDD.scala:650)
  19. at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
  20. at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:149)
  21. at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)
  22. at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
  23. at org.apache.spark.scheduler.Task.run(Task.scala:89)
  24. at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213)
  25. at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
  26. at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
  27. at java.lang.Thread.run(Thread.java:745)
  28. 2016-02-09 00:21:43 ERROR Executor:95 - Exception in task 6.0 in stage 1.0 (TID 52)
  29. java.util.NoSuchElementException: None.get
  30. at scala.None$.get(Option.scala:313)
  31. at scala.None$.get(Option.scala:311)
  32. at org.bdgenomics.adam.rdd.read.MarkDuplicates$.org$bdgenomics$adam$rdd$read$MarkDuplicates$$leftPositionAndLibrary$1(MarkDuplicates.scala:73)
  33. at org.bdgenomics.adam.rdd.read.MarkDuplicates$$anonfun$apply$3.apply(MarkDuplicates.scala:86)
  34. at org.bdgenomics.adam.rdd.read.MarkDuplicates$$anonfun$apply$3.apply(MarkDuplicates.scala:86)
  35. at org.apache.spark.rdd.RDD$$anonfun$groupBy$3$$anonfun$apply$19.apply(RDD.scala:650)
  36. at org.apache.spark.rdd.RDD$$anonfun$groupBy$3$$anonfun$apply$19.apply(RDD.scala:650)
  37. at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
  38. at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:149)
  39. at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)
  40. at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
  41. at org.apache.spark.scheduler.Task.run(Task.scala:89)
  42. at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213)
  43. at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
  44. at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
  45. at java.lang.Thread.run(Thread.java:745)
  46. 2016-02-09 00:21:44 WARN TaskSetManager:70 - Lost task 2.0 in stage 1.0 (TID 48, localhost): java.util.NoSuchElementException: None.get
  47. at scala.None$.get(Option.scala:313)
  48. at scala.None$.get(Option.scala:311)
  49. at org.bdgenomics.adam.rdd.read.MarkDuplicates$.org$bdgenomics$adam$rdd$read$MarkDuplicates$$leftPositionAndLibrary$1(MarkDuplicates.scala:73)
  50. at org.bdgenomics.adam.rdd.read.MarkDuplicates$$anonfun$apply$3.apply(MarkDuplicates.scala:86)
  51. at org.bdgenomics.adam.rdd.read.MarkDuplicates$$anonfun$apply$3.apply(MarkDuplicates.scala:86)
  52. at org.apache.spark.rdd.RDD$$anonfun$groupBy$3$$anonfun$apply$19.apply(RDD.scala:650)
  53. at org.apache.spark.rdd.RDD$$anonfun$groupBy$3$$anonfun$apply$19.apply(RDD.scala:650)
  54. at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
  55. at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:149)
  56. at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)
  57. at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
  58. at org.apache.spark.scheduler.Task.run(Task.scala:89)
  59. at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213)
  60. at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
  61. at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
  62. at java.lang.Thread.run(Thread.java:745)
  63.  
  64. 2016-02-09 00:21:44 ERROR TaskSetManager:74 - Task 2 in stage 1.0 failed 1 times; aborting job
  65. Command body threw exception:
  66. org.apache.spark.SparkException: Job aborted due to stage failure: Task 2 in stage 1.0 failed 1 times, most recent failure: Lost task 2.0 in stage 1.0 (TID 48, localhost): java.util.NoSuchElementException: None.get
  67. at scala.None$.get(Option.scala:313)
  68. at scala.None$.get(Option.scala:311)
  69. at org.bdgenomics.adam.rdd.read.MarkDuplicates$.org$bdgenomics$adam$rdd$read$MarkDuplicates$$leftPositionAndLibrary$1(MarkDuplicates.scala:73)
  70. at org.bdgenomics.adam.rdd.read.MarkDuplicates$$anonfun$apply$3.apply(MarkDuplicates.scala:86)
  71. at org.bdgenomics.adam.rdd.read.MarkDuplicates$$anonfun$apply$3.apply(MarkDuplicates.scala:86)
  72. at org.apache.spark.rdd.RDD$$anonfun$groupBy$3$$anonfun$apply$19.apply(RDD.scala:650)
  73. at org.apache.spark.rdd.RDD$$anonfun$groupBy$3$$anonfun$apply$19.apply(RDD.scala:650)
  74. at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
  75. at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:149)
  76. at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)
  77. at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
  78. at org.apache.spark.scheduler.Task.run(Task.scala:89)
  79. at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213)
  80. at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
  81. at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
  82. at java.lang.Thread.run(Thread.java:745)
  83.  
  84. Driver stacktrace:
  85. Exception in thread "main" 2016-02-09 00:21:44 ERROR Executor:74 - Managed memory leak detected; size = 79630424 bytes, TID = 53
  86. 2016-02-09 00:21:44 ERROR Executor:74 - Managed memory leak detected; size = 5452202 bytes, TID = 50
  87. 2016-02-09 00:21:44 ERROR Executor:74 - Managed memory leak detected; size = 37525956 bytes, TID = 49
  88. org.apache.spark.SparkException: Job aborted due to stage failure: Task 2 in stage 1.0 failed 1 times, most recent failure: Lost task 2.0 in stage 1.0 (TID 48, localhost): java.util.NoSuchElementException: None.get
  89. at scala.None$.get(Option.scala:313)
  90. at scala.None$.get(Option.scala:311)
  91. at org.bdgenomics.adam.rdd.read.MarkDuplicates$.org$bdgenomics$adam$rdd$read$MarkDuplicates$$leftPositionAndLibrary$1(MarkDuplicates.scala:73)
  92. at org.bdgenomics.adam.rdd.read.MarkDuplicates$$anonfun$apply$3.apply(MarkDuplicates.scala:86)
  93. at org.bdgenomics.adam.rdd.read.MarkDuplicates$$anonfun$apply$3.apply(MarkDuplicates.scala:86)
  94. at org.apache.spark.rdd.RDD$$anonfun$groupBy$3$$anonfun$apply$19.apply(RDD.scala:650)
  95. at org.apache.spark.rdd.RDD$$anonfun$groupBy$3$$anonfun$apply$19.apply(RDD.scala:650)
  96. at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
  97. at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:149)
  98. at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)
  99. at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
  100. at org.apache.spark.scheduler.Task.run(Task.scala:89)
  101. at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213)
  102. at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
  103. at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
  104. at java.lang.Thread.run(Thread.java:745)
  105.  
  106. Driver stacktrace:
  107. at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1431)
  108. at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1419)
  109. at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1418)
  110. at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
  111. at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
  112. at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1418)
  113. at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:799)
  114. at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:799)
  115. at scala.Option.foreach(Option.scala:236)
  116. at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:799)
  117. at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1640)
  118. at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1599)
  119. at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1588)
  120. at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
  121. at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:620)
  122. at org.apache.spark.SparkContext.runJob(SparkContext.scala:1832)
  123. at org.apache.spark.SparkContext.runJob(SparkContext.scala:1845)
  124. at org.apache.spark.SparkContext.runJob(SparkContext.scala:1922)
  125. at org.apache.spark.rdd.PairRDDFunctions$$anonfun$saveAsNewAPIHadoopDataset$1.apply$mcV$sp(PairRDDFunctions.scala:1146)
  126. at org.apache.spark.rdd.PairRDDFunctions$$anonfun$saveAsNewAPIHadoopDataset$1.apply(PairRDDFunctions.scala:1074)
  127. at org.apache.spark.rdd.PairRDDFunctions$$anonfun$saveAsNewAPIHadoopDataset$1.apply(PairRDDFunctions.scala:1074)
  128. at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)
  129. at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:111)
  130. at org.apache.spark.rdd.RDD.withScope(RDD.scala:316)
  131. at org.apache.spark.rdd.PairRDDFunctions.saveAsNewAPIHadoopDataset(PairRDDFunctions.scala:1074)
  132. at org.apache.spark.rdd.PairRDDFunctions$$anonfun$saveAsNewAPIHadoopFile$2.apply$mcV$sp(PairRDDFunctions.scala:994)
  133. at org.apache.spark.rdd.PairRDDFunctions$$anonfun$saveAsNewAPIHadoopFile$2.apply(PairRDDFunctions.scala:985)
  134. at org.apache.spark.rdd.PairRDDFunctions$$anonfun$saveAsNewAPIHadoopFile$2.apply(PairRDDFunctions.scala:985)
  135. at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)
  136. at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:111)
  137. at org.apache.spark.rdd.RDD.withScope(RDD.scala:316)
  138. at org.apache.spark.rdd.PairRDDFunctions.saveAsNewAPIHadoopFile(PairRDDFunctions.scala:985)
  139. at org.apache.spark.rdd.InstrumentedPairRDDFunctions.saveAsNewAPIHadoopFile(InstrumentedPairRDDFunctions.scala:487)
  140. at org.bdgenomics.adam.rdd.ADAMRDDFunctions$$anonfun$adamParquetSave$1.apply$mcV$sp(ADAMRDDFunctions.scala:77)
  141. at org.bdgenomics.adam.rdd.ADAMRDDFunctions$$anonfun$adamParquetSave$1.apply(ADAMRDDFunctions.scala:61)
  142. at org.bdgenomics.adam.rdd.ADAMRDDFunctions$$anonfun$adamParquetSave$1.apply(ADAMRDDFunctions.scala:61)
  143. at org.apache.spark.rdd.Timer.time(Timer.scala:57)
  144. at org.bdgenomics.adam.rdd.ADAMRDDFunctions.adamParquetSave(ADAMRDDFunctions.scala:61)
  145. at org.bdgenomics.adam.rdd.ADAMRDDFunctions.adamParquetSave(ADAMRDDFunctions.scala:46)
  146. at org.bdgenomics.adam.rdd.read.AlignmentRecordRDDFunctions.saveAsParquet(AlignmentRecordRDDFunctions.scala:199)
  147. at org.bdgenomics.adam.rdd.read.AlignmentRecordRDDFunctions.adamSave(AlignmentRecordRDDFunctions.scala:251)
  148. at org.bdgenomics.adam.cli.Transform.run(Transform.scala:315)
  149. at org.bdgenomics.utils.cli.BDGSparkCommand$class.run(BDGCommand.scala:54)
  150. at org.bdgenomics.adam.cli.Transform.run(Transform.scala:119)
  151. at org.bdgenomics.adam.cli.ADAMMain.apply(ADAMMain.scala:136)
  152. at org.bdgenomics.adam.cli.ADAMMain$.main(ADAMMain.scala:76)
  153. at org.bdgenomics.adam.cli.ADAMMain.main(ADAMMain.scala)
  154. at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
  155. at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
  156. at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
  157. at java.lang.reflect.Method.invoke(Method.java:497)
  158. at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:731)
  159. at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:181)
  160. at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:206)
  161. at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:121)
  162. at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
  163. Caused by: java.util.NoSuchElementException: None.get
  164. at scala.None$.get(Option.scala:313)
  165. at scala.None$.get(Option.scala:311)
  166. at org.bdgenomics.adam.rdd.read.MarkDuplicates$.org$bdgenomics$adam$rdd$read$MarkDuplicates$$leftPositionAndLibrary$1(MarkDuplicates.scala:73)
  167. at org.bdgenomics.adam.rdd.read.MarkDuplicates$$anonfun$apply$3.apply(MarkDuplicates.scala:86)
  168. at org.bdgenomics.adam.rdd.read.MarkDuplicates$$anonfun$apply$3.apply(MarkDuplicates.scala:86)
  169. at org.apache.spark.rdd.RDD$$anonfun$groupBy$3$$anonfun$apply$19.apply(RDD.scala:650)
  170. at org.apache.spark.rdd.RDD$$anonfun$groupBy$3$$anonfun$apply$19.apply(RDD.scala:650)
  171. at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
  172. at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:149)
  173. at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)
  174. at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
  175. at org.apache.spark.scheduler.Task.run(Task.scala:89)
  176. at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213)
  177. at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
  178. at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
  179. at java.lang.Thread.run(Thread.java:745)
  180. 2016-02-09 00:21:44 WARN TaskSetManager:70 - Lost task 1.0 in stage 1.0 (TID 47, localhost): TaskKilled (killed intentionally)
  181. 2016-02-09 00:21:44 WARN TaskSetManager:70 - Lost task 4.0 in stage 1.0 (TID 50, localhost): TaskKilled (killed intentionally)
  182. 2016-02-09 00:21:44 WARN TaskSetManager:70 - Lost task 8.0 in stage 1.0 (TID 54, localhost): TaskKilled (killed intentionally)
  183. 2016-02-09 00:21:44 WARN TaskSetManager:70 - Lost task 7.0 in stage 1.0 (TID 53, localhost): TaskKilled (killed intentionally)
  184. 2016-02-09 00:21:44 WARN TaskSetManager:70 - Lost task 0.0 in stage 1.0 (TID 46, localhost): TaskKilled (killed intentionally)
  185. Feb 9, 2016 12:19:47 AM INFO: org.apache.parquet.hadoop.ParquetInputFormat: Total input paths to process : 46
  186. 2016-02-09 00:21:44 WARN TaskSetManager:70 - Lost task 3.0 in stage 1.0 (TID 49, localhost): TaskKilled (killed intentionally)
  187. 2016-02-09 00:21:44 ERROR Executor:74 - Managed memory leak detected; size = 81182856 bytes, TID = 51
  188. 2016-02-09 00:21:44 ERROR Executor:95 - Exception in task 5.0 in stage 1.0 (TID 51)
  189. java.util.NoSuchElementException: None.get
  190. at scala.None$.get(Option.scala:313)
  191. at scala.None$.get(Option.scala:311)
  192. at org.bdgenomics.adam.rdd.read.MarkDuplicates$.org$bdgenomics$adam$rdd$read$MarkDuplicates$$leftPositionAndLibrary$1(MarkDuplicates.scala:73)
  193. at org.bdgenomics.adam.rdd.read.MarkDuplicates$$anonfun$apply$3.apply(MarkDuplicates.scala:86)
  194. at org.bdgenomics.adam.rdd.read.MarkDuplicates$$anonfun$apply$3.apply(MarkDuplicates.scala:86)
  195. at org.apache.spark.rdd.RDD$$anonfun$groupBy$3$$anonfun$apply$19.apply(RDD.scala:650)
  196. at org.apache.spark.rdd.RDD$$anonfun$groupBy$3$$anonfun$apply$19.apply(RDD.scala:650)
  197. at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
  198. at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:149)
  199. at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)
  200. at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
  201. at org.apache.spark.scheduler.Task.run(Task.scala:89)
  202. at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213)
  203. at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
  204. at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
  205. at java.lang.Thread.run(Thread.java:745)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement