Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- aused by: org.apache.spark.sql.catalyst.errors.package$TreeNodeException: execute, tree:
- Exchange rangepartitioning(merchant_id#8 ASC,200), None
- +- ConvertToSafe
- +- TungstenAggregate(key=[city#5,category#4,merchant_id#8,timestamp#12], functions=[], output=[city#5,timestamp#12,merchant_id#8])
- +- TungstenExchange hashpartitioning(city#5,category#4,merchant_id#8,timestamp#12,200), None
- +- TungstenAggregate(key=[city#5,category#4,merchant_id#8,timestamp#12], functions=[], output=[city#5,category#4,merchant_id#8,timestamp#12])
- +- Project [city#5,category#4,merchant_id#8,timestamp#12]
- +- Filter NOT (merchant_id#8 = )
- +- Scan ExistingRDD[entity_id#0,is_OMS_jingpin#1,wlt_ico#2,subcategory#3,category#4,city#5,is_OMS_ding#6,is_famousCompany#7,merchant_id#8,pageno#9,url#10,position#11,timestamp#12,hour#13]
- at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:49)
- at org.apache.spark.sql.execution.Exchange.doExecute(Exchange.scala:247)
- at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:132)
- at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:130)
- at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)
- at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:130)
- at org.apache.spark.sql.execution.ConvertToUnsafe.doExecute(rowFormatConverters.scala:38)
- at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:132)
- at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:130)
- at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)
- at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:130)
- at org.apache.spark.sql.execution.Sort.doExecute(Sort.scala:64)
- at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:132)
- at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:130)
- at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)
- at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:130)
- at org.apache.spark.sql.execution.Project.doExecute(basicOperators.scala:46)
- at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:132)
- at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:130)
- at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150)
- at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:130)
- at org.apache.spark.sql.execution.aggregate.TungstenAggregate$$anonfun$doExecute$1.apply(TungstenAggregate.scala:86)
- at org.apache.spark.sql.execution.aggregate.TungstenAggregate$$anonfun$doExecute$1.apply(TungstenAggregate.scala:80)
- at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:48)
- ... 54 more
- Caused by: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 1.0 failed 4 times, most recent failure: Lost task 0.3 in stage 1.0 (TID 8, ip-172-31-44-106.us-west-2.compute.internal): ExecutorLostFailure (executor 4 exited caused by one of the running tasks) Reason: Container killed by YARN for exceeding memory limits. 5.5 GB of 5.5 GB physical memory used. Consider boosting spark.yarn.executor.memoryOverhead.
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement