Advertisement
avisrivastava254084

Untitled

Sep 30th, 2019
158
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 22.57 KB | None | 0 0
  1. >>> try:
  2. ... test_format.process_df(spark)
  3. ... except Exception as e:
  4. ... print('col should be dropped: {}'.format(e))
  5. ...
  6. 19/09/30 17:03:55 ERROR Executor: Exception in task 2.0 in stage 5.0 (TID 11)
  7. org.apache.spark.api.python.PythonException: Traceback (most recent call last):
  8. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/worker.py", line 377, in main
  9. process()
  10. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/worker.py", line 372, in process
  11. serializer.dump_stream(func(split_index, iterator), outfile)
  12. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/serializers.py", line 345, in dump_stream
  13. self.serializer.dump_stream(self._batched(iterator), stream)
  14. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/serializers.py", line 141, in dump_stream
  15. for obj in iterator:
  16. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/serializers.py", line 334, in _batched
  17. for item in iterator:
  18. File "<string>", line 1, in <lambda>
  19. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/worker.py", line 85, in <lambda>
  20. return lambda *a: f(*a)
  21. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/util.py", line 99, in wrapper
  22. return f(*args, **kwargs)
  23. File "/Users/aviralsrivastava/dev/data-engineer-fc-avisrivastava254084/test_format.py", line 28, in validate_format
  24. raise ValueError
  25. ValueError
  26.  
  27. at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:456)
  28. at org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$1.read(PythonUDFRunner.scala:81)
  29. at org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$1.read(PythonUDFRunner.scala:64)
  30. at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:410)
  31. at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
  32. at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:440)
  33. at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
  34. at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
  35. at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage2.processNext(Unknown Source)
  36. at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
  37. at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$13$$anon$1.hasNext(WholeStageCodegenExec.scala:636)
  38. at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:255)
  39. at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:247)
  40. at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:836)
  41. at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:836)
  42. at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
  43. at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
  44. at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
  45. at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
  46. at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
  47. at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
  48. at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
  49. at org.apache.spark.scheduler.Task.run(Task.scala:123)
  50. at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:408)
  51. at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
  52. at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:414)
  53. at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
  54. at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
  55. at java.lang.Thread.run(Thread.java:748)
  56. 19/09/30 17:03:55 ERROR Executor: Exception in task 0.0 in stage 5.0 (TID 9)
  57. org.apache.spark.api.python.PythonException: Traceback (most recent call last):
  58. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/worker.py", line 377, in main
  59. process()
  60. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/worker.py", line 372, in process
  61. serializer.dump_stream(func(split_index, iterator), outfile)
  62. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/serializers.py", line 345, in dump_stream
  63. self.serializer.dump_stream(self._batched(iterator), stream)
  64. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/serializers.py", line 141, in dump_stream
  65. for obj in iterator:
  66. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/serializers.py", line 334, in _batched
  67. for item in iterator:
  68. File "<string>", line 1, in <lambda>
  69. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/worker.py", line 85, in <lambda>
  70. return lambda *a: f(*a)
  71. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/util.py", line 99, in wrapper
  72. return f(*args, **kwargs)
  73. File "/Users/aviralsrivastava/dev/data-engineer-fc-avisrivastava254084/test_format.py", line 28, in validate_format
  74. raise ValueError
  75. ValueError
  76.  
  77. at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:456)
  78. at org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$1.read(PythonUDFRunner.scala:81)
  79. at org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$1.read(PythonUDFRunner.scala:64)
  80. at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:410)
  81. at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
  82. at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:440)
  83. at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
  84. at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
  85. at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage2.processNext(Unknown Source)
  86. at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
  87. at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$13$$anon$1.hasNext(WholeStageCodegenExec.scala:636)
  88. at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:255)
  89. at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:247)
  90. at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:836)
  91. at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:836)
  92. at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
  93. at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
  94. at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
  95. at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
  96. at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
  97. at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
  98. at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
  99. at org.apache.spark.scheduler.Task.run(Task.scala:123)
  100. at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:408)
  101. at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
  102. at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:414)
  103. at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
  104. at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
  105. at java.lang.Thread.run(Thread.java:748)
  106. 19/09/30 17:03:55 WARN TaskSetManager: Lost task 2.0 in stage 5.0 (TID 11, localhost, executor driver): org.apache.spark.api.python.PythonException: Traceback (most recent call last):
  107. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/worker.py", line 377, in main
  108. process()
  109. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/worker.py", line 372, in process
  110. serializer.dump_stream(func(split_index, iterator), outfile)
  111. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/serializers.py", line 345, in dump_stream
  112. self.serializer.dump_stream(self._batched(iterator), stream)
  113. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/serializers.py", line 141, in dump_stream
  114. for obj in iterator:
  115. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/serializers.py", line 334, in _batched
  116. for item in iterator:
  117. File "<string>", line 1, in <lambda>
  118. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/worker.py", line 85, in <lambda>
  119. return lambda *a: f(*a)
  120. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/util.py", line 99, in wrapper
  121. return f(*args, **kwargs)
  122. File "/Users/aviralsrivastava/dev/data-engineer-fc-avisrivastava254084/test_format.py", line 28, in validate_format
  123. raise ValueError
  124. ValueError
  125.  
  126. at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:456)
  127. at org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$1.read(PythonUDFRunner.scala:81)
  128. at org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$1.read(PythonUDFRunner.scala:64)
  129. at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:410)
  130. at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
  131. at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:440)
  132. at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
  133. at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
  134. at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage2.processNext(Unknown Source)
  135. at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
  136. at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$13$$anon$1.hasNext(WholeStageCodegenExec.scala:636)
  137. at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:255)
  138. at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:247)
  139. at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:836)
  140. at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:836)
  141. at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
  142. at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
  143. at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
  144. at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
  145. at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
  146. at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
  147. at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
  148. at org.apache.spark.scheduler.Task.run(Task.scala:123)
  149. at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:408)
  150. at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
  151. at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:414)
  152. at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
  153. at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
  154. at java.lang.Thread.run(Thread.java:748)
  155.  
  156. 19/09/30 17:03:55 ERROR TaskSetManager: Task 2 in stage 5.0 failed 1 times; aborting job
  157. col should be dropped: An error occurred while calling o215.showString.
  158. : org.apache.spark.SparkException: Job aborted due to stage failure: Task 2 in stage 5.0 failed 1 times, most recent failure: Lost task 2.0 in stage 5.0 (TID 11, localhost, executor driver): org.apache.spark.api.python.PythonException: Traceback (most recent call last):
  159. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/worker.py", line 377, in main
  160. process()
  161. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/worker.py", line 372, in process
  162. serializer.dump_stream(func(split_index, iterator), outfile)
  163. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/serializers.py", line 345, in dump_stream
  164. self.serializer.dump_stream(self._batched(iterator), stream)
  165. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/serializers.py", line 141, in dump_stream
  166. for obj in iterator:
  167. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/serializers.py", line 334, in _batched
  168. for item in iterator:
  169. File "<string>", line 1, in <lambda>
  170. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/worker.py", line 85, in <lambda>
  171. return lambda *a: f(*a)
  172. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/util.py", line 99, in wrapper
  173. return f(*args, **kwargs)
  174. File "/Users/aviralsrivastava/dev/data-engineer-fc-avisrivastava254084/test_format.py", line 28, in validate_format
  175. raise ValueError
  176. ValueError
  177.  
  178. at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:456)
  179. at org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$1.read(PythonUDFRunner.scala:81)
  180. at org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$1.read(PythonUDFRunner.scala:64)
  181. at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:410)
  182. at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
  183. at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:440)
  184. at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
  185. at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
  186. at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage2.processNext(Unknown Source)
  187. at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
  188. at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$13$$anon$1.hasNext(WholeStageCodegenExec.scala:636)
  189. at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:255)
  190. at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:247)
  191. at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:836)
  192. at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:836)
  193. at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
  194. at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
  195. at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
  196. at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
  197. at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
  198. at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
  199. at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
  200. at org.apache.spark.scheduler.Task.run(Task.scala:123)
  201. at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:408)
  202. at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
  203. at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:414)
  204. at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
  205. at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
  206. at java.lang.Thread.run(Thread.java:748)
  207.  
  208. Driver stacktrace:
  209. at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1889)
  210. at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1877)
  211. at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1876)
  212. at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
  213. at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
  214. at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1876)
  215. at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:926)
  216. at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:926)
  217. at scala.Option.foreach(Option.scala:257)
  218. at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:926)
  219. at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2110)
  220. at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2059)
  221. at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2048)
  222. at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
  223. at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:737)
  224. at org.apache.spark.SparkContext.runJob(SparkContext.scala:2061)
  225. at org.apache.spark.SparkContext.runJob(SparkContext.scala:2082)
  226. at org.apache.spark.SparkContext.runJob(SparkContext.scala:2101)
  227. at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:365)
  228. at org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:38)
  229. at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collectFromPlan(Dataset.scala:3389)
  230. at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2550)
  231. at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2550)
  232. at org.apache.spark.sql.Dataset$$anonfun$52.apply(Dataset.scala:3370)
  233. at org.apache.spark.sql.execution.SQLExecution$$anonfun$withNewExecutionId$1.apply(SQLExecution.scala:78)
  234. at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:125)
  235. at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:73)
  236. at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3369)
  237. at org.apache.spark.sql.Dataset.head(Dataset.scala:2550)
  238. at org.apache.spark.sql.Dataset.take(Dataset.scala:2764)
  239. at org.apache.spark.sql.Dataset.getRows(Dataset.scala:254)
  240. at org.apache.spark.sql.Dataset.showString(Dataset.scala:291)
  241. at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
  242. at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
  243. at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
  244. at java.lang.reflect.Method.invoke(Method.java:498)
  245. at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
  246. at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
  247. at py4j.Gateway.invoke(Gateway.java:282)
  248. at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
  249. at py4j.commands.CallCommand.execute(CallCommand.java:79)
  250. at py4j.GatewayConnection.run(GatewayConnection.java:238)
  251. at java.lang.Thread.run(Thread.java:748)
  252. Caused by: org.apache.spark.api.python.PythonException: Traceback (most recent call last):
  253. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/worker.py", line 377, in main
  254. process()
  255. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/worker.py", line 372, in process
  256. serializer.dump_stream(func(split_index, iterator), outfile)
  257. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/serializers.py", line 345, in dump_stream
  258. self.serializer.dump_stream(self._batched(iterator), stream)
  259. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/serializers.py", line 141, in dump_stream
  260. for obj in iterator:
  261. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/serializers.py", line 334, in _batched
  262. for item in iterator:
  263. File "<string>", line 1, in <lambda>
  264. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/worker.py", line 85, in <lambda>
  265. return lambda *a: f(*a)
  266. File "/usr/local/lib/python3.7/site-packages/pyspark/python/lib/pyspark.zip/pyspark/util.py", line 99, in wrapper
  267. return f(*args, **kwargs)
  268. File "/Users/aviralsrivastava/dev/data-engineer-fc-avisrivastava254084/test_format.py", line 28, in validate_format
  269. raise ValueError
  270. ValueError
  271.  
  272. at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:456)
  273. at org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$1.read(PythonUDFRunner.scala:81)
  274. at org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$1.read(PythonUDFRunner.scala:64)
  275. at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:410)
  276. at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
  277. at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:440)
  278. at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
  279. at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
  280. at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage2.processNext(Unknown Source)
  281. at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
  282. at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$13$$anon$1.hasNext(WholeStageCodegenExec.scala:636)
  283. at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:255)
  284. at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:247)
  285. at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:836)
  286. at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:836)
  287. at org.apache.spark.19/09/30 17:03:55 WARN TaskSetManager: Lost task 1.0 in stage 5.0 (TID 10, localhost, executor driver): TaskKilled (Stage cancelled)
  288. rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
  289. at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
  290. at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
  291. at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
  292. at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
  293. at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
  294. at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
  295. at org.apache.spark.scheduler.Task.run(Task.scala:123)
  296. at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:408)
  297. at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
  298. at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:414)
  299. at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
  300. at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
  301. ... 1 more
  302.  
  303. >>>
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement