Advertisement
Guest User

Untitled

a guest
Apr 24th, 2019
93
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 25.88 KB | None | 0 0
  1. 2019-04-24T12:17:55.363+0000 SCALA 20190424121725g1lCs xcsf-pro processing xcsf-gl-krb-inr-ingresosefectivo-01 gl INFO org.apache.spark.internal.Logging$class: ResultStage 0 (zipWithIndex at OffsetTransformation.scala:56) failed in 4.744 s due to Job aborted due to stage failure: Task 5 in stage 0.0 failed 4 times, most recent failure: Lost task 5.3 in stage 0.0 (TID 9, 192.168.192.107, e 1): java.lang.RuntimeException: Malformed CSV record
  2. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser.org$apache$spark$sql$e n$datasources$csv$UnivocityParser$$convert(UnivocityParser.scala:213)
  3. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser.parse(UnivocityParser.scala:191)
  4. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser$$anonfun$5.apply(UnivocityParser.scala:308)
  5. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser$$anonfun$5.apply(UnivocityParser.scala:308)
  6. at org.apache.spark.sql.e n.datasources.FailureSafeParser.parse(FailureSafeParser.scala:60)
  7. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser$$anonfun$parseIterator$1.apply(UnivocityParser.scala:312)
  8. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser$$anonfun$parseIterator$1.apply(UnivocityParser.scala:312)
  9. at scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:434)
  10. at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:440)
  11. at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408)
  12. at org.apache.spark.sql.e n.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:105)
  13. at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown Source)
  14. at org.apache.spark.sql.e n.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
  15. at org.apache.spark.sql.e n.WholeStageCodegenE onfun$8$$anon$1.hasNext(WholeStageCodegenE la:395)
  16. at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408)
  17. at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408)
  18. at org.apache.spark.util.Utils$.getIteratorSize(Utils.scala:1793)
  19. at org.apache.spark.rdd.ZippedWithIndexRDD$$anonfun$2.apply(ZippedWithIndexRDD.scala:52)
  20. at org.apache.spark.rdd.ZippedWithIndexRDD$$anonfun$2.apply(ZippedWithIndexRDD.scala:52)
  21. at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2070)
  22. at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2070)
  23. at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
  24. at org.apache.spark.scheduler.Task.run(Task.scala:108)
  25. at org.apache.spark.e .E $TaskRunner.run(E .scala:338)
  26. at java.util.concurrent.ThreadPoolE .runWorker(ThreadPoolE .java:1149)
  27. at java.util.concurrent.ThreadPoolE $Worker.run(ThreadPoolE .java:624)
  28. at java.lang.Thread.run(Thread.java:748)
  29. Driver stacktrace:
  30. 2019-04-24T12:17:55.368+0000 SCALA 20190424121725g1lCs xcsf-pro processing xcsf-gl-krb-inr-ingresosefectivo-01 gl INFO org.apache.spark.internal.Logging$class: Job 0 failed: zipWithIndex at OffsetTransformation.scala:56, took 4.906124 s
  31. 2019-04-24T12:17:55.370+0000 SCALA 20190424121725g1lCs xcsf-pro processing xcsf-gl-krb-inr-ingresosefectivo-01 gl ERROR com.datio.kirby.CheckFlow$class: Exception applying transformations: org.apache.spark.SparkException: Job aborted due to stage failure: Task 5 in stage 0.0 failed 4 times, most recent failure: Lost task 5.3 in stage 0.0 (TID 9, 192.168.192.107, e 1): java.lang.RuntimeException: Malformed CSV record
  32. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser.org$apache$spark$sql$e n$datasources$csv$UnivocityParser$$convert(UnivocityParser.scala:213)
  33. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser.parse(UnivocityParser.scala:191)
  34. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser$$anonfun$5.apply(UnivocityParser.scala:308)
  35. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser$$anonfun$5.apply(UnivocityParser.scala:308)
  36. at org.apache.spark.sql.e n.datasources.FailureSafeParser.parse(FailureSafeParser.scala:60)
  37. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser$$anonfun$parseIterator$1.apply(UnivocityParser.scala:312)
  38. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser$$anonfun$parseIterator$1.apply(UnivocityParser.scala:312)
  39. at scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:434)
  40. at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:440)
  41. at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408)
  42. at org.apache.spark.sql.e n.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:105)
  43. at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown Source)
  44. at org.apache.spark.sql.e n.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
  45. at org.apache.spark.sql.e n.WholeStageCodegenE onfun$8$$anon$1.hasNext(WholeStageCodegenE la:395)
  46. at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408)
  47. at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408)
  48. at org.apache.spark.util.Utils$.getIteratorSize(Utils.scala:1793)
  49. at org.apache.spark.rdd.ZippedWithIndexRDD$$anonfun$2.apply(ZippedWithIndexRDD.scala:52)
  50. at org.apache.spark.rdd.ZippedWithIndexRDD$$anonfun$2.apply(ZippedWithIndexRDD.scala:52)
  51. at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2070)
  52. at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2070)
  53. at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
  54. at org.apache.spark.scheduler.Task.run(Task.scala:108)
  55. at org.apache.spark.e .E $TaskRunner.run(E .scala:338)
  56. at java.util.concurrent.ThreadPoolE .runWorker(ThreadPoolE .java:1149)
  57. at java.util.concurrent.ThreadPoolE $Worker.run(ThreadPoolE .java:624)
  58. at java.lang.Thread.run(Thread.java:748)
  59. Driver stacktrace:
  60. at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1517)
  61. at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1505)
  62. at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1504)
  63. at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
  64. at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
  65. at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1504)
  66. at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:814)
  67. at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:814)
  68. at scala.Option.foreach(Option.scala:257)
  69. at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:814)
  70. at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1732)
  71. at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1687)
  72. at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1676)
  73. at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
  74. at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:630)
  75. at org.apache.spark.SparkContext.runJob(SparkContext.scala:2030)
  76. at org.apache.spark.SparkContext.runJob(SparkContext.scala:2051)
  77. at org.apache.spark.SparkContext.runJob(SparkContext.scala:2070)
  78. at org.apache.spark.rdd.ZippedWithIndexRDD.(ZippedWithIndexRDD.scala:50)
  79. at org.apache.spark.rdd.RDD$$anonfun$zipWithIndex$1.apply(RDD.scala:1294)
  80. at org.apache.spark.rdd.RDD$$anonfun$zipWithIndex$1.apply(RDD.scala:1294)
  81. at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
  82. at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
  83. at org.apache.spark.rdd.RDD.withScope(RDD.scala:362)
  84. at org.apache.spark.rdd.RDD.zipWithIndex(RDD.scala:1293)
  85. at com.bbva.cib.core.transformations.row.offset.OffsetTransformation.transform(OffsetTransformation.scala:56)
  86. at com.datio.kirby.CheckFlow$$anonfun$applyTransformations$1.apply(CheckFlow.scala:48)
  87. at com.datio.kirby.CheckFlow$$anonfun$applyTransformations$1.apply(CheckFlow.scala:42)
  88. at scala.collection.Inde ptimized$class.foldl(Inde ptimized.scala:57)
  89. at scala.collection.Inde ptimized$class.foldLeft(Inde ptimized.scala:66)
  90. at scala.collection.mutable.ArrayBuffer.foldLeft(ArrayBuffer.scala:48)
  91. at com.datio.kirby.CheckFlow$class.applyTransformations(CheckFlow.scala:42)
  92. at com.datio.kirby.Launcher$.applyTransformations(Launcher.scala:23)
  93. at com.datio.kirby.Launcher$$anonfun$1.apply$mcV$sp(Launcher.scala:73)
  94. at com.datio.kirby.Launcher$$anonfun$1.apply(Launcher.scala:60)
  95. at com.datio.kirby.Launcher$$anonfun$1.apply(Launcher.scala:60)
  96. at scala.util.Try$.apply(Try.scala:192)
  97. at com.datio.kirby.Launcher$.runProcess(Launcher.scala:60)
  98. at com.bbva.cib.cirby.CIBLauncherTrait$class.runProcess(CIBLauncher.scala:30)
  99. at com.bbva.cib.cirby.CIBLauncher$.runProcess(CIBLauncher.scala:44)
  100. at com.datio.spark.SparkLauncher$class.runTask(SparkLauncher.scala:65)
  101. at com.bbva.cib.cirby.CIBLauncher$.runTask(CIBLauncher.scala:44)
  102. at com.datio.spark.InitSpark$class.main(InitSpark.scala:19)
  103. at com.bbva.cib.cirby.CIBLauncher$.main(CIBLauncher.scala:44)
  104. at com.bbva.cib.cirby.CIBLauncher.main(CIBLauncher.scala)
  105. at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
  106. at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
  107. at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
  108. at java.lang.reflect.Method.invoke(Method.java:498)
  109. at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:809)
  110. at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:182)
  111. at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:207)
  112. at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:121)
  113. at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
  114. Caused by: java.lang.RuntimeException: Malformed CSV record
  115. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser.org$apache$spark$sql$e n$datasources$csv$UnivocityParser$$convert(UnivocityParser.scala:213)
  116. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser.parse(UnivocityParser.scala:191)
  117. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser$$anonfun$5.apply(UnivocityParser.scala:308)
  118. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser$$anonfun$5.apply(UnivocityParser.scala:308)
  119. at org.apache.spark.sql.e n.datasources.FailureSafeParser.parse(FailureSafeParser.scala:60)
  120. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser$$anonfun$parseIterator$1.apply(UnivocityParser.scala:312)
  121. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser$$anonfun$parseIterator$1.apply(UnivocityParser.scala:312)
  122. at scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:434)
  123. at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:440)
  124. at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408)
  125. at org.apache.spark.sql.e n.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:105)
  126. at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown Source)
  127. at org.apache.spark.sql.e n.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
  128. at org.apache.spark.sql.e n.WholeStageCodegenE onfun$8$$anon$1.hasNext(WholeStageCodegenE la:395)
  129. at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408)
  130. at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408)
  131. at org.apache.spark.util.Utils$.getIteratorSize(Utils.scala:1793)
  132. at org.apache.spark.rdd.ZippedWithIndexRDD$$anonfun$2.apply(ZippedWithIndexRDD.scala:52)
  133. at org.apache.spark.rdd.ZippedWithIndexRDD$$anonfun$2.apply(ZippedWithIndexRDD.scala:52)
  134. at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2070)
  135. at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2070)
  136. at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
  137. at org.apache.spark.scheduler.Task.run(Task.scala:108)
  138. at org.apache.spark.e .E $TaskRunner.run(E .scala:338)
  139. at java.util.concurrent.ThreadPoolE .runWorker(ThreadPoolE .java:1149)
  140. at java.util.concurrent.ThreadPoolE $Worker.run(ThreadPoolE .java:624)
  141. at java.lang.Thread.run(Thread.java:748)
  142. 2019-04-24T12:17:55.378+0000 SCALA 20190424121725g1lCs xcsf-pro processing xcsf-gl-krb-inr-ingresosefectivo-01 gl WARN com.datio.kirby.Launcher$: Input Args: Config(SimpleConfigObject({"InputCsvRegexReplaced":{},"kirby":{"input":{"class":"com.bbva.cib.core.input.csvregexreplaced.CsvRegexReplacedInput","encoding":"UTF-16","options":{"delimiter":";","header":true},"paths":["/in/staging/ratransmit/ptcib_es/endt/ENDT_D02_20190423_INGRESOSEFECTIVO.dat"],"regex":[{"regex2Find":"\\.","regex2Replace":""}],"schema":{"path":"/data/raw/xcsf/endt/schemas/current/t_endt_ingresos_en_efectivo.json"},"tmpPath":"/in/staging/ratransmit/ptcib_es/endt/tmp/","type":"custom","withoutSpark":true},"output":{"mode":"append","partition":["odate_date"],"path":"/data/raw/xcsf/endt/data/t_endt_cash_receipts","repartition":{"partitions":10},"schema":{"path":"/data/raw/xcsf/endt/schemas/current/t_endt_ingresos_en_efectivo.json"},"type":"avro"},"transformations":[{"class":"com.bbva.cib.core.transformations.row.offset.OffsetTransformation","type":"custom"},{"auditIdValue":"XCSF5000_GTB_NBO_T_ENDT_INGRESOS_EN_EFECTIVO_0022D","class":"com.bbva.cib.core.transformation.row.audit.AuditRawTransformation","type":"custom"},{"default":"20190423","defaultType":"string","field":"odate_date","type":"literal"},{"field":"personal_id","trimType":"right","type":"trim"},{"class":"com.bbva.cib.core.transformations.row.replace.ReplaceTransformation","columnPattern":".*_(per|amount)","findPattern":",","replacePattern":"\\.","type":"custom"}]},"sparkMetrics":{"listeners":["default"],"output":[{"auth":true,"disable-hostname-verification":true, ,"type":"rest","url":"https://monitoring-kibana-1.live01.daas.gl.igrupobbva:443/monitoring/argos-api/metrics", type":"console"}],"streaming_listeners":["default"],"streaming_output":[{"auth":true,"disable-hostname-verification":true, ,"type":"rest","url":"https://monitoring-kibana-1.live01.daas.gl.igrupobbva:443/monitoring/argos-api/metrics", type":"console"}]}}))
  143. 2019-04-24T12:17:55.380+0000 SCALA 20190424121725g1lCs xcsf-pro processing xcsf-gl-krb-inr-ingresosefectivo-01 gl ERROR com.datio.kirby.Launcher$: Exception: com.datio.kirby.api.exceptions.KirbyException: 116 - Transformation Error: Fatal error in the transformations
  144. at com.datio.kirby.CheckFlow$class.applyTransformations(CheckFlow.scala:56)
  145. at com.datio.kirby.Launcher$.applyTransformations(Launcher.scala:23)
  146. at com.datio.kirby.Launcher$$anonfun$1.apply$mcV$sp(Launcher.scala:73)
  147. at com.datio.kirby.Launcher$$anonfun$1.apply(Launcher.scala:60)
  148. at com.datio.kirby.Launcher$$anonfun$1.apply(Launcher.scala:60)
  149. at scala.util.Try$.apply(Try.scala:192)
  150. at com.datio.kirby.Launcher$.runProcess(Launcher.scala:60)
  151. at com.bbva.cib.cirby.CIBLauncherTrait$class.runProcess(CIBLauncher.scala:30)
  152. at com.bbva.cib.cirby.CIBLauncher$.runProcess(CIBLauncher.scala:44)
  153. at com.datio.spark.SparkLauncher$class.runTask(SparkLauncher.scala:65)
  154. at com.bbva.cib.cirby.CIBLauncher$.runTask(CIBLauncher.scala:44)
  155. at com.datio.spark.InitSpark$class.main(InitSpark.scala:19)
  156. at com.bbva.cib.cirby.CIBLauncher$.main(CIBLauncher.scala:44)
  157. at com.bbva.cib.cirby.CIBLauncher.main(CIBLauncher.scala)
  158. at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
  159. at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
  160. at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
  161. at java.lang.reflect.Method.invoke(Method.java:498)
  162. at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:809)
  163. at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:182)
  164. at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:207)
  165. at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:121)
  166. at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
  167. Caused by: org.apache.spark.SparkException: Job aborted due to stage failure: Task 5 in stage 0.0 failed 4 times, most recent failure: Lost task 5.3 in stage 0.0 (TID 9, 192.168.192.107, e 1): java.lang.RuntimeException: Malformed CSV record
  168. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser.org$apache$spark$sql$e n$datasources$csv$UnivocityParser$$convert(UnivocityParser.scala:213)
  169. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser.parse(UnivocityParser.scala:191)
  170. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser$$anonfun$5.apply(UnivocityParser.scala:308)
  171. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser$$anonfun$5.apply(UnivocityParser.scala:308)
  172. at org.apache.spark.sql.e n.datasources.FailureSafeParser.parse(FailureSafeParser.scala:60)
  173. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser$$anonfun$parseIterator$1.apply(UnivocityParser.scala:312)
  174. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser$$anonfun$parseIterator$1.apply(UnivocityParser.scala:312)
  175. at scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:434)
  176. at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:440)
  177. at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408)
  178. at org.apache.spark.sql.e n.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:105)
  179. at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown Source)
  180. at org.apache.spark.sql.e n.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
  181. at org.apache.spark.sql.e n.WholeStageCodegenE onfun$8$$anon$1.hasNext(WholeStageCodegenE la:395)
  182. at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408)
  183. at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408)
  184. at org.apache.spark.util.Utils$.getIteratorSize(Utils.scala:1793)
  185. at org.apache.spark.rdd.ZippedWithIndexRDD$$anonfun$2.apply(ZippedWithIndexRDD.scala:52)
  186. at org.apache.spark.rdd.ZippedWithIndexRDD$$anonfun$2.apply(ZippedWithIndexRDD.scala:52)
  187. at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2070)
  188. at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2070)
  189. at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
  190. at org.apache.spark.scheduler.Task.run(Task.scala:108)
  191. at org.apache.spark.e .E $TaskRunner.run(E .scala:338)
  192. at java.util.concurrent.ThreadPoolE .runWorker(ThreadPoolE .java:1149)
  193. at java.util.concurrent.ThreadPoolE $Worker.run(ThreadPoolE .java:624)
  194. at java.lang.Thread.run(Thread.java:748)
  195. Driver stacktrace:
  196. at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1517)
  197. at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1505)
  198. at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1504)
  199. at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
  200. at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
  201. at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1504)
  202. at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:814)
  203. at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:814)
  204. at scala.Option.foreach(Option.scala:257)
  205. at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:814)
  206. at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1732)
  207. at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1687)
  208. at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1676)
  209. at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
  210. at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:630)
  211. at org.apache.spark.SparkContext.runJob(SparkContext.scala:2030)
  212. at org.apache.spark.SparkContext.runJob(SparkContext.scala:2051)
  213. at org.apache.spark.SparkContext.runJob(SparkContext.scala:2070)
  214. at org.apache.spark.rdd.ZippedWithIndexRDD.(ZippedWithIndexRDD.scala:50)
  215. at org.apache.spark.rdd.RDD$$anonfun$zipWithIndex$1.apply(RDD.scala:1294)
  216. at org.apache.spark.rdd.RDD$$anonfun$zipWithIndex$1.apply(RDD.scala:1294)
  217. at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
  218. at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
  219. at org.apache.spark.rdd.RDD.withScope(RDD.scala:362)
  220. at org.apache.spark.rdd.RDD.zipWithIndex(RDD.scala:1293)
  221. at com.bbva.cib.core.transformations.row.offset.OffsetTransformation.transform(OffsetTransformation.scala:56)
  222. at com.datio.kirby.CheckFlow$$anonfun$applyTransformations$1.apply(CheckFlow.scala:48)
  223. at com.datio.kirby.CheckFlow$$anonfun$applyTransformations$1.apply(CheckFlow.scala:42)
  224. at scala.collection.Inde ptimized$class.foldl(Inde ptimized.scala:57)
  225. at scala.collection.Inde ptimized$class.foldLeft(Inde ptimized.scala:66)
  226. at scala.collection.mutable.ArrayBuffer.foldLeft(ArrayBuffer.scala:48)
  227. at com.datio.kirby.CheckFlow$class.applyTransformations(CheckFlow.scala:42)
  228. ... 22 more
  229. Caused by: java.lang.RuntimeException: Malformed CSV record
  230. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser.org$apache$spark$sql$e n$datasources$csv$UnivocityParser$$convert(UnivocityParser.scala:213)
  231. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser.parse(UnivocityParser.scala:191)
  232. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser$$anonfun$5.apply(UnivocityParser.scala:308)
  233. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser$$anonfun$5.apply(UnivocityParser.scala:308)
  234. at org.apache.spark.sql.e n.datasources.FailureSafeParser.parse(FailureSafeParser.scala:60)
  235. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser$$anonfun$parseIterator$1.apply(UnivocityParser.scala:312)
  236. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser$$anonfun$parseIterator$1.apply(UnivocityParser.scala:312)
  237. at scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:434)
  238. at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:440)
  239. at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408)
  240. at org.apache.spark.sql.e n.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:105)
  241. at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown Source)
  242. at org.apache.spark.sql.e n.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
  243. at org.apache.spark.sql.e n.WholeStageCodegenE onfun$8$$anon$1.hasNext(WholeStageCodegenE la:395)
  244. at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408)
  245. at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408)
  246. at org.apache.spark.util.Utils$.getIteratorSize(Utils.scala:1793)
  247. at org.apache.spark.rdd.ZippedWithIndexRDD$$anonfun$2.apply(ZippedWithIndexRDD.scala:52)
  248. at org.apache.spark.rdd.ZippedWithIndexRDD$$anonfun$2.apply(ZippedWithIndexRDD.scala:52)
  249. at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2070)
  250. at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2070)
  251. at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
  252. at org.apache.spark.scheduler.Task.run(Task.scala:108)
  253. at org.apache.spark.e .E $TaskRunner.run(E .scala:338)
  254. at java.util.concurrent.ThreadPoolE .runWorker(ThreadPoolE .java:1149)
  255. at java.util.concurrent.ThreadPoolE $Worker.run(ThreadPoolE .java:624)
  256. at java.lang.Thread.run(Thread.java:748)
  257. 2019-04-24T12:17:55.380+0000 SCALA 20190424121725g1lCs xcsf-pro processing xcsf-gl-krb-inr-ingresosefectivo-01 gl ERROR com.datio.spark.SparkLauncher$class: Exception message: 116 - Transformation Error: Fatal error in the transformations
  258. 2019-04-24T12:17:55.381+0000 SCALA 20190424121725g1lCs xcsf-pro processing xcsf-gl-krb-inr-ingresosefectivo-01 gl ERROR com.datio.spark.SparkLauncher$class: Exception cause: Job aborted due to stage failure: Task 5 in stage 0.0 failed 4 times, most recent failure: Lost task 5.3 in stage 0.0 (TID 9, 192.168.192.107, e 1): java.lang.RuntimeException: Malformed CSV record
  259. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser.org$apache$spark$sql$e n$datasources$csv$UnivocityParser$$convert(UnivocityParser.scala:213)
  260. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser.parse(UnivocityParser.scala:191)
  261. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser$$anonfun$5.apply(UnivocityParser.scala:308)
  262. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser$$anonfun$5.apply(UnivocityParser.scala:308)
  263. at org.apache.spark.sql.e n.datasources.FailureSafeParser.parse(FailureSafeParser.scala:60)
  264. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser$$anonfun$parseIterator$1.apply(UnivocityParser.scala:312)
  265. at org.apache.spark.sql.e n.datasources.csv.UnivocityParser$$anonfun$parseIterator$1.apply(UnivocityParser.scala:312)
  266. at scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:434)
  267. at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:440)
  268. at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408)
  269. at org.apache.spark.sql.e n.datasources.FileScanRDD$$anon$1.hasNext(FileScanRDD.scala:105)
  270. at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown Source)
  271. at org.apache.spark.sql.e n.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
  272. at org.apache.spark.sql.e n.WholeStageCodegenE onfun$8$$anon$1.hasNext(WholeStageCodegenE la:395)
  273. at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408)
  274. at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:408)
  275. at org.apache.spark.util.Utils$.getIteratorSize(Utils.scala:1793)
  276. at org.apache.spark.rdd.ZippedWithIndexRDD$$anonfun$2.apply(ZippedWithIndexRDD.scala:52)
  277. at org.apache.spark.rdd.ZippedWithIndexRDD$$anonfun$2.apply(ZippedWithIndexRDD.scala:52)
  278. at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2070)
  279. at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2070)
  280. at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
  281. at org.apache.spark.scheduler.Task.run(Task.scala:108)
  282. at org.apache.spark.e .E $TaskRunner.run(E .scala:338)
  283. at java.util.concurrent.ThreadPoolE .runWorker(ThreadPoolE .java:1149)
  284. at java.util.concurrent.ThreadPoolE $Worker.run(ThreadPoolE .java:624)
  285. at java.lang.Thread.run(Thread.java:748)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement