daily pastebin goal
59%
SHARE
TWEET

Untitled

a guest May 16th, 2018 106 Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
  1. var finalDF = spark_session.createDataFrame(d, schema)
  2. finalDF.show(10, false)
  3. finalDF.write.mode("overwrite").json("test/df.json")
  4.    
  5. ExitCodeException exitCode=-1073741515:
  6.     at org.apache.hadoop.util.Shell.runCommand(Shell.java:575)
  7.     at org.apache.hadoop.util.Shell.run(Shell.java:478)
  8.     at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:766)
  9.     at org.apache.hadoop.util.Shell.execCommand(Shell.java:859)
  10.     at org.apache.hadoop.util.Shell.execCommand(Shell.java:842)
  11.     at org.apache.hadoop.fs.RawLocalFileSystem.setPermission(RawLocalFileSystem.java:661)
  12.     at org.apache.hadoop.fs.ChecksumFileSystem$1.apply(ChecksumFileSystem.java:501)
  13.     at org.apache.hadoop.fs.ChecksumFileSystem$FsOperation.run(ChecksumFileSystem.java:482)
  14.     at org.apache.hadoop.fs.ChecksumFileSystem.setPermission(ChecksumFileSystem.java:498)
  15.     at org.apache.hadoop.fs.ChecksumFileSystem.create(ChecksumFileSystem.java:467)
  16.     at org.apache.hadoop.fs.ChecksumFileSystem.create(ChecksumFileSystem.java:433)
  17.     at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:908)
  18.     at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:889)
  19.     at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:786)
  20.     at org.apache.spark.sql.execution.datasources.CodecStreams$.createOutputStream(CodecStreams.scala:81)
  21.     at org.apache.spark.sql.execution.datasources.CodecStreams$.createOutputStreamWriter(CodecStreams.scala:92)
  22.     at org.apache.spark.sql.execution.datasources.json.JsonOutputWriter.<init>(JsonFileFormat.scala:140)
  23.     at org.apache.spark.sql.execution.datasources.json.JsonFileFormat$$anon$1.newInstance(JsonFileFormat.scala:80)
  24.     at org.apache.spark.sql.execution.datasources.FileFormatWriter$SingleDirectoryWriteTask.newOutputWriter(FileFormatWriter.scala:305)
  25.     at org.apache.spark.sql.execution.datasources.FileFormatWriter$SingleDirectoryWriteTask.execute(FileFormatWriter.scala:314)
  26.     at org.apache.spark.sql.execution.datasources.FileFormatWriter$$anonfun$org$apache$spark$sql$execution$datasources$FileFormatWriter$$executeTask$3.apply(FileFormatWriter.scala:258)
  27.     at org.apache.spark.sql.execution.datasources.FileFormatWriter$$anonfun$org$apache$spark$sql$execution$datasources$FileFormatWriter$$executeTask$3.apply(FileFormatWriter.scala:256)
  28.     at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1375)
  29.     at org.apache.spark.sql.execution.datasources.FileFormatWriter$.org$apache$spark$sql$execution$datasources$FileFormatWriter$$executeTask(FileFormatWriter.scala:261)
  30.     at org.apache.spark.sql.execution.datasources.FileFormatWriter$$anonfun$write$1$$anonfun$apply$mcV$sp$1.apply(FileFormatWriter.scala:191)
  31.     at org.apache.spark.sql.execution.datasources.FileFormatWriter$$anonfun$write$1$$anonfun$apply$mcV$sp$1.apply(FileFormatWriter.scala:190)
  32.     at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
  33.     at org.apache.spark.scheduler.Task.run(Task.scala:108)
  34.     at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:335)
  35.     at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
  36.     at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
  37.     at java.lang.Thread.run(Thread.java:748)
  38. 18/05/16 17:09:48 WARN FileUtil: Failed to delete file or dir [C:UsersjsolanoIdeaProjectsTest2testdf.json_temporary_temporaryattempt_20180516170948_0005_m_000000_0.part-00000-ff4d215c-00f2-4585-89bb-d53426315539-c000.json.crc]: it still exists.
  39. 18/05/16 17:09:48 WARN FileUtil: Failed to delete file or dir [C:UsersjsolanoIdeaProjectsTest2testdf.json_temporary_temporaryattempt_20180516170948_0005_m_000000_0part-00000-ff4d215c-00f2-4585-89bb-d53426315539-c000.json]: it still exists.
  40. 18/05/16 17:09:48 WARN FileOutputCommitter: Could not delete file:/C:/Users/jsolano/IdeaProjects/Test2/test/df.json/_temporary/0/_temporary/attempt_20180516170948_0005_m_000000_0
  41. 18/05/16 17:09:48 ERROR FileFormatWriter: Job job_20180516170948_0005 aborted.
  42. 18/05/16 17:09:48 ERROR Executor: Exception in task 0.0 in stage 5.0 (TID 4)
  43. org.apache.spark.SparkException: Task failed while writing rows
  44.     at org.apache.spark.sql.execution.datasources.FileFormatWriter$.org$apache$spark$sql$execution$datasources$FileFormatWriter$$executeTask(FileFormatWriter.scala:272)
  45.     at org.apache.spark.sql.execution.datasources.FileFormatWriter$$anonfun$write$1$$anonfun$apply$mcV$sp$1.apply(FileFormatWriter.scala:191)
  46.     at org.apache.spark.sql.execution.datasources.FileFormatWriter$$anonfun$write$1$$anonfun$apply$mcV$sp$1.apply(FileFormatWriter.scala:190)
  47.     at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
  48.     at org.apache.spark.scheduler.Task.run(Task.scala:108)
  49.     at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:335)
  50.     at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
  51.     at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
  52.     at java.lang.Thread.run(Thread.java:748)
  53. Caused by: ExitCodeException exitCode=-1073741515:
  54.     at org.apache.hadoop.util.Shell.runCommand(Shell.java:575)
  55.     at org.apache.hadoop.util.Shell.run(Shell.java:478)
  56.     at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:766)
  57.     at org.apache.hadoop.util.Shell.execCommand(Shell.java:859)
  58.     at org.apache.hadoop.util.Shell.execCommand(Shell.java:842)
  59.     at org.apache.hadoop.fs.RawLocalFileSystem.setPermission(RawLocalFileSystem.java:661)
  60.     at org.apache.hadoop.fs.ChecksumFileSystem$1.apply(ChecksumFileSystem.java:501)
  61.     at org.apache.hadoop.fs.ChecksumFileSystem$FsOperation.run(ChecksumFileSystem.java:482)
  62.     at org.apache.hadoop.fs.ChecksumFileSystem.setPermission(ChecksumFileSystem.java:498)
  63.     at org.apache.hadoop.fs.ChecksumFileSystem.create(ChecksumFileSystem.java:467)
  64.     at org.apache.hadoop.fs.ChecksumFileSystem.create(ChecksumFileSystem.java:433)
  65.     at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:908)
  66.     at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:889)
  67.     at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:786)
  68.     at org.apache.spark.sql.execution.datasources.CodecStreams$.createOutputStream(CodecStreams.scala:81)
  69.     at org.apache.spark.sql.execution.datasources.CodecStreams$.createOutputStreamWriter(CodecStreams.scala:92)
  70.     at org.apache.spark.sql.execution.datasources.json.JsonOutputWriter.<init>(JsonFileFormat.scala:140)
  71.     at org.apache.spark.sql.execution.datasources.json.JsonFileFormat$$anon$1.newInstance(JsonFileFormat.scala:80)
  72.     at org.apache.spark.sql.execution.datasources.FileFormatWriter$SingleDirectoryWriteTask.newOutputWriter(FileFormatWriter.scala:305)
  73.     at org.apache.spark.sql.execution.datasources.FileFormatWriter$SingleDirectoryWriteTask.execute(FileFormatWriter.scala:314)
  74.     at org.apache.spark.sql.execution.datasources.FileFormatWriter$$anonfun$org$apache$spark$sql$execution$datasources$FileFormatWriter$$executeTask$3.apply(FileFormatWriter.scala:258)
  75.     at org.apache.spark.sql.execution.datasources.FileFormatWriter$$anonfun$org$apache$spark$sql$execution$datasources$FileFormatWriter$$executeTask$3.apply(FileFormatWriter.scala:256)
  76.     at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1375)
  77.     at org.apache.spark.sql.execution.datasources.FileFormatWriter$.org$apache$spark$sql$execution$datasources$FileFormatWriter$$executeTask(FileFormatWriter.scala:261)
  78.     ... 8 more
RAW Paste Data
Top