Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- pdf = df.toPandas()
- ---------------------------------------------------------------------------
- Py4JJavaError Traceback (most recent call last)
- <timed exec> in <module>
- /usr/local/spark/python/pyspark/sql/dataframe.py in toPandas(self)
- 2140
- 2141 # Below is toPandas without Arrow optimization.
- -> 2142 pdf = pd.DataFrame.from_records(self.collect(), columns=self.columns)
- 2143
- 2144 dtype = {}
- /usr/local/spark/python/pyspark/sql/dataframe.py in collect(self)
- 531 """
- 532 with SCCallSiteSync(self._sc) as css:
- --> 533 sock_info = self._jdf.collectToPython()
- 534 return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
- 535
- /usr/local/spark/python/lib/py4j-0.10.7-src.zip/py4j/java_gateway.py in __call__(self, *args)
- 1255 answer = self.gateway_client.send_command(command)
- 1256 return_value = get_return_value(
- -> 1257 answer, self.gateway_client, self.target_id, self.name)
- 1258
- 1259 for temp_arg in temp_args:
- /usr/local/spark/python/pyspark/sql/utils.py in deco(*a, **kw)
- 61 def deco(*a, **kw):
- 62 try:
- ---> 63 return f(*a, **kw)
- 64 except py4j.protocol.Py4JJavaError as e:
- 65 s = e.java_exception.toString()
- /usr/local/spark/python/lib/py4j-0.10.7-src.zip/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name)
- 326 raise Py4JJavaError(
- 327 "An error occurred while calling {0}{1}{2}.n".
- --> 328 format(target_id, ".", name), value)
- 329 else:
- 330 raise Py4JError(
- Py4JJavaError: An error occurred while calling o99.collectToPython.
- : org.apache.spark.SparkException: Job aborted due to stage failure: Task 64 in stage 6.0 failed 1 times, most recent failure: Lost task 64.0 in stage 6.0 (TID 780, localhost, executor driver): java.lang.OutOfMemoryError: Java heap space
- Driver stacktrace:
- at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:1889)
- at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:1877)
- at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:1876)
- at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
- at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
- at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
- at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1876)
- at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:926)
- at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:926)
- at scala.Option.foreach(Option.scala:274)
- at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:926)
- at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2110)
- at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2059)
- at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2048)
- at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
- at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:737)
- at org.apache.spark.SparkContext.runJob(SparkContext.scala:2061)
- at org.apache.spark.SparkContext.runJob(SparkContext.scala:2082)
- at org.apache.spark.SparkContext.runJob(SparkContext.scala:2101)
- at org.apache.spark.SparkContext.runJob(SparkContext.scala:2126)
- at org.apache.spark.rdd.RDD.$anonfun$collect$1(RDD.scala:945)
- at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
- at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
- at org.apache.spark.rdd.RDD.withScope(RDD.scala:363)
- at org.apache.spark.rdd.RDD.collect(RDD.scala:944)
- at org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:299)
- at org.apache.spark.sql.Dataset.$anonfun$collectToPython$1(Dataset.scala:3257)
- at org.apache.spark.sql.Dataset.$anonfun$withAction$2(Dataset.scala:3364)
- at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:78)
- at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:125)
- at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:73)
- at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3364)
- at org.apache.spark.sql.Dataset.collectToPython(Dataset.scala:3254)
- at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
- at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
- at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
- at java.lang.reflect.Method.invoke(Method.java:498)
- at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
- at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
- at py4j.Gateway.invoke(Gateway.java:282)
- at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
- at py4j.commands.CallCommand.execute(CallCommand.java:79)
- at py4j.GatewayConnection.run(GatewayConnection.java:238)
- at java.lang.Thread.run(Thread.java:748)
- Caused by: java.lang.OutOfMemoryError: Java heap space
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement