Advertisement
Guest User

Untitled

a guest
Aug 20th, 2019
83
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 11.76 KB | None | 0 0
  1. ```import pyspark.sql.functions as fx
  2. from pyspark.sql.types import *
  3.  
  4. # read in BGEN
  5. bgen_path = "/mnt/data/home/jeremy/data/ukbb/imputed/ukb_imp_chr22_v3.bgen"
  6. bgen_sample_path = '/mnt/data/home/jeremy/data/ukbb/imputed/ukb49662_imp_chr1_v3_s487317.sample'
  7.  
  8. df = spark.read.format("com.databricks.bgen").options(sampleIdColumn="ID_2", sampleFilePath=bgen_sample_path).load(bgen_path)
  9.  
  10.  
  11. display(df.withColumn("firstGenotype", fx.expr("genotypes[0]")).drop("genotypes").limit(10))
  12. ```
  13.  
  14. # --------- NEWCELL
  15. ```
  16. def add_position_bin(df, bin_width=10000000):
  17. """
  18. add a bin for each variant, which will be used for partitioning the Delta-lake
  19. The start position
  20. :param df: dataframe
  21. :param bin_width: int, bin_width, default = 10,000,000 (10Mb)
  22. """
  23. df = df.withColumn("bin_start", fx.col("start") - (fx.col("start") % bin_width)). \
  24. withColumn("bin_end", fx.col("start") - (fx.col("start") % bin_width) + bin_width). \
  25. withColumn("bin", fx.concat(fx.col("bin_start").cast(StringType()),
  26. fx.lit("-"),
  27. fx.col("bin_end").cast(StringType()))). \
  28. drop("bin_start", "bin_end")
  29. return df
  30.  
  31. df = add_position_bin(df)
  32. ```
  33.  
  34.  
  35. # --------- NEWCELL
  36. df.write.format("delta").partitionBy("contigName", "bin").save("/mnt/data/home/jeremy/data/ukbb/delta/")
  37.  
  38. # this is where it explodes and results in
  39. |
  40. V
  41. ```
  42. ---------------------------------------------------------------------------
  43. Py4JJavaError Traceback (most recent call last)
  44. <command-4103967183024832> in <module>()
  45. ----> 1 df.write.format("delta").partitionBy("contigName", "bin").save("/mnt/data/home/jeremy/data/ukbb/delta/")
  46.  
  47. /databricks/spark/python/pyspark/sql/readwriter.py in save(self, path, format, mode, partitionBy, **options)
  48. 736 self._jwrite.save()
  49. 737 else:
  50. --> 738 self._jwrite.save(path)
  51. 739
  52. 740 @since(1.4)
  53.  
  54. /databricks/spark/python/lib/py4j-0.10.7-src.zip/py4j/java_gateway.py in __call__(self, *args)
  55. 1255 answer = self.gateway_client.send_command(command)
  56. 1256 return_value = get_return_value(
  57. -> 1257 answer, self.gateway_client, self.target_id, self.name)
  58. 1258
  59. 1259 for temp_arg in temp_args:
  60.  
  61. /databricks/spark/python/pyspark/sql/utils.py in deco(*a, **kw)
  62. 61 def deco(*a, **kw):
  63. 62 try:
  64. ---> 63 return f(*a, **kw)
  65. 64 except py4j.protocol.Py4JJavaError as e:
  66. 65 s = e.java_exception.toString()
  67.  
  68. /databricks/spark/python/lib/py4j-0.10.7-src.zip/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name)
  69. 326 raise Py4JJavaError(
  70. 327 "An error occurred while calling {0}{1}{2}.\n".
  71. --> 328 format(target_id, ".", name), value)
  72. 329 else:
  73. 330 raise Py4JError(
  74.  
  75. Py4JJavaError: An error occurred while calling o350.save.
  76. : org.apache.spark.SparkException: Job aborted.
  77. at org.apache.spark.sql.execution.datasources.FileFormatWriter$.write(FileFormatWriter.scala:198)
  78. at com.databricks.sql.transaction.tahoe.files.TransactionalWriteEdge$$anonfun$writeFiles$1$$anonfun$apply$1.apply(TransactionalWriteEdge.scala:147)
  79. at com.databricks.sql.transaction.tahoe.files.TransactionalWriteEdge$$anonfun$writeFiles$1$$anonfun$apply$1.apply(TransactionalWriteEdge.scala:130)
  80. at org.apache.spark.sql.execution.SQLExecution$$anonfun$withCustomExecutionEnv$1.apply(SQLExecution.scala:111)
  81. at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:240)
  82. at org.apache.spark.sql.execution.SQLExecution$.withCustomExecutionEnv(SQLExecution.scala:97)
  83. at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:170)
  84. at com.databricks.sql.transaction.tahoe.files.TransactionalWriteEdge$$anonfun$writeFiles$1.apply(TransactionalWriteEdge.scala:130)
  85. at com.databricks.sql.transaction.tahoe.files.TransactionalWriteEdge$$anonfun$writeFiles$1.apply(TransactionalWriteEdge.scala:87)
  86. at com.databricks.logging.UsageLogging$$anonfun$recordOperation$1.apply(UsageLogging.scala:369)
  87. at com.databricks.logging.UsageLogging$$anonfun$withAttributionContext$1.apply(UsageLogging.scala:238)
  88. at scala.util.DynamicVariable.withValue(DynamicVariable.scala:58)
  89. at com.databricks.logging.UsageLogging$class.withAttributionContext(UsageLogging.scala:233)
  90. at com.databricks.spark.util.PublicDBLogging.withAttributionContext(DatabricksSparkUsageLogger.scala:18)
  91. at com.databricks.logging.UsageLogging$class.withAttributionTags(UsageLogging.scala:271)
  92. at com.databricks.spark.util.PublicDBLogging.withAttributionTags(DatabricksSparkUsageLogger.scala:18)
  93. at com.databricks.logging.UsageLogging$class.recordOperation(UsageLogging.scala:350)
  94. at com.databricks.spark.util.PublicDBLogging.recordOperation(DatabricksSparkUsageLogger.scala:18)
  95. at com.databricks.spark.util.PublicDBLogging.recordOperation0(DatabricksSparkUsageLogger.scala:55)
  96. at com.databricks.spark.util.DatabricksSparkUsageLogger.recordOperation(DatabricksSparkUsageLogger.scala:94)
  97. at com.databricks.spark.util.UsageLogger$class.recordOperation(UsageLogger.scala:66)
  98. at com.databricks.spark.util.DatabricksSparkUsageLogger.recordOperation(DatabricksSparkUsageLogger.scala:63)
  99. at com.databricks.spark.util.UsageLogging$class.recordOperation(UsageLogger.scala:297)
  100. at com.databricks.sql.transaction.tahoe.OptimisticTransaction.recordOperation(OptimisticTransaction.scala:79)
  101. at com.databricks.sql.transaction.tahoe.metering.DeltaLogging$class.recordDeltaOperation(DeltaLogging.scala:108)
  102. at com.databricks.sql.transaction.tahoe.OptimisticTransaction.recordDeltaOperation(OptimisticTransaction.scala:79)
  103. at com.databricks.sql.transaction.tahoe.files.TransactionalWriteEdge$class.writeFiles(TransactionalWriteEdge.scala:87)
  104. at com.databricks.sql.transaction.tahoe.OptimisticTransaction.writeFiles(OptimisticTransaction.scala:79)
  105. at com.databricks.sql.transaction.tahoe.files.TransactionalWrite$class.writeFiles(TransactionalWrite.scala:96)
  106. at com.databricks.sql.transaction.tahoe.OptimisticTransaction.writeFiles(OptimisticTransaction.scala:79)
  107. at com.databricks.sql.transaction.tahoe.commands.WriteIntoDelta.write(WriteIntoDelta.scala:110)
  108. at com.databricks.sql.transaction.tahoe.commands.WriteIntoDelta$$anonfun$run$1$$anonfun$apply$1.apply(WriteIntoDelta.scala:71)
  109. at com.databricks.sql.transaction.tahoe.commands.WriteIntoDelta$$anonfun$run$1$$anonfun$apply$1.apply(WriteIntoDelta.scala:70)
  110. at com.databricks.sql.transaction.tahoe.DeltaLog.withNewTransaction(DeltaLog.scala:382)
  111. at com.databricks.sql.transaction.tahoe.commands.WriteIntoDelta$$anonfun$run$1.apply(WriteIntoDelta.scala:70)
  112. at com.databricks.sql.transaction.tahoe.commands.WriteIntoDelta$$anonfun$run$1.apply(WriteIntoDelta.scala:69)
  113. at com.databricks.sql.acl.CheckPermissions$.trusted(CheckPermissions.scala:781)
  114. at com.databricks.sql.transaction.tahoe.commands.WriteIntoDelta.run(WriteIntoDelta.scala:69)
  115. at com.databricks.sql.transaction.tahoe.sources.DeltaDataSource.createRelation(DeltaDataSource.scala:143)
  116. at org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:45)
  117. at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:72)
  118. at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:70)
  119. at org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:88)
  120. at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:146)
  121. at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:134)
  122. at org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$5.apply(SparkPlan.scala:187)
  123. at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
  124. at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:183)
  125. at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:134)
  126. at org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:114)
  127. at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:114)
  128. at org.apache.spark.sql.DataFrameWriter$$anonfun$runCommand$1.apply(DataFrameWriter.scala:710)
  129. at org.apache.spark.sql.DataFrameWriter$$anonfun$runCommand$1.apply(DataFrameWriter.scala:710)
  130. at org.apache.spark.sql.execution.SQLExecution$$anonfun$withCustomExecutionEnv$1.apply(SQLExecution.scala:111)
  131. at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:240)
  132. at org.apache.spark.sql.execution.SQLExecution$.withCustomExecutionEnv(SQLExecution.scala:97)
  133. at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:170)
  134. at org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:710)
  135. at org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:306)
  136. at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:292)
  137. at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:235)
  138. at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
  139. at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
  140. at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
  141. at java.lang.reflect.Method.invoke(Method.java:498)
  142. at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
  143. at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:380)
  144. at py4j.Gateway.invoke(Gateway.java:295)
  145. at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
  146. at py4j.commands.CallCommand.execute(CallCommand.java:79)
  147. at py4j.GatewayConnection.run(GatewayConnection.java:251)
  148. at java.lang.Thread.run(Thread.java:748)
  149. Caused by: org.apache.spark.SparkException: Job aborted due to stage failure: Task 84 in stage 178.0 failed 4 times, most recent failure: Lost task 84.3 in stage 178.0 (TID 21018, 10.26.246.237, executor 62): ExecutorLostFailure (executor 62 exited caused by one of the running tasks) Reason: Remote RPC client disassociated. Likely due to containers exceeding thresholds, or network issues. Check driver logs for WARN messages.
  150. Driver stacktrace:
  151. at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:2355)
  152. at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:2343)
  153. at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:2342)
  154. at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
  155. at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
  156. at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2342)
  157. at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:1096)
  158. at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:1096)
  159. at scala.Option.foreach(Option.scala:257)
  160. at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1096)
  161. at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2574)
  162. at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2522)
  163. at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2510)
  164. at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
  165. at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:893)
  166. at org.apache.spark.SparkContext.runJob(SparkContext.scala:2243)
  167. at org.apache.spark.sql.execution.datasources.FileFormatWriter$.write(FileFormatWriter.scala:167)
  168. ... 71 more
  169. ```
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement