Advertisement
Guest User

Untitled

a guest
Sep 2nd, 2015
248
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 10.95 KB | None | 0 0
  1. Error: org.kitesdk.data.DatasetOperationException: Failed to open appender ParquetAppender{path=maprfs:/tmp/default/.temp/job_1439821524094_0009/mr/attempt_1439821524094_0009_m_000000_0/.9535aee0-de39-4d9b-8d93-8368a5d00ff0.parquet.tmp, schema={"type":"record","name":"sqoop","doc":"Sqoop import of sqoop","fields":[{"name":"id","type":["null","int"],"default":null,"columnName":"id","sqlType":"4"}],"tableName":"sqoop"}, fileSystem=com.mapr.fs.MapRFileSystem@348d62d4, avroParquetWriter=null}
  2. at org.kitesdk.data.spi.filesystem.FileSystemWriter.initialize(FileSystemWriter.java:138)
  3. at org.kitesdk.data.spi.filesystem.FileSystemView.newWriter(FileSystemView.java:101)
  4. at org.kitesdk.data.mapreduce.DatasetKeyOutputFormat$DatasetRecordWriter.<init>(DatasetKeyOutputFormat.java:308)
  5. at org.kitesdk.data.mapreduce.DatasetKeyOutputFormat.getRecordWriter(DatasetKeyOutputFormat.java:445)
  6. at org.apache.hadoop.mapred.MapTask$NewDirectOutputCollector.<init>(MapTask.java:656)
  7. at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:776)
  8. at org.apache.hadoop.mapred.MapTask.run(MapTask.java:346)
  9. at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:163)
  10. at java.security.AccessController.doPrivileged(Native Method)
  11. at javax.security.auth.Subject.doAs(Subject.java:415)
  12. at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1595)
  13. at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:158)
  14. Caused by: java.lang.IllegalArgumentException: maxCapacityHint can't be less than initialSlabSize 10485760 1048576
  15. at parquet.Preconditions.checkArgument(Preconditions.java:55)
  16. at parquet.bytes.CapacityByteArrayOutputStream.<init>(CapacityByteArrayOutputStream.java:118)
  17. at parquet.bytes.CapacityByteArrayOutputStream.<init>(CapacityByteArrayOutputStream.java:108)
  18. at parquet.hadoop.ColumnChunkPageWriteStore$ColumnChunkPageWriter.<init>(ColumnChunkPageWriteStore.java:64)
  19. at parquet.hadoop.ColumnChunkPageWriteStore$ColumnChunkPageWriter.<init>(ColumnChunkPageWriteStore.java:46)
  20. at parquet.hadoop.ColumnChunkPageWriteStore.getPageWriter(ColumnChunkPageWriteStore.java:153)
  21. at parquet.column.impl.ColumnWriteStoreImpl.newMemColumn(ColumnWriteStoreImpl.java:62)
  22. at parquet.column.impl.ColumnWriteStoreImpl.getColumnWriter(ColumnWriteStoreImpl.java:55)
  23. at parquet.io.MessageColumnIO$MessageColumnIORecordConsumer.<init>(MessageColumnIO.java:183)
  24. at parquet.io.MessageColumnIO.getRecordWriter(MessageColumnIO.java:375)
  25. at parquet.hadoop.InternalParquetRecordWriter.initStore(InternalParquetRecordWriter.java:106)
  26. at parquet.hadoop.InternalParquetRecordWriter.<init>(InternalParquetRecordWriter.java:92)
  27. at parquet.hadoop.ParquetWriter.<init>(ParquetWriter.java:181)
  28. at parquet.avro.AvroParquetWriter.<init>(AvroParquetWriter.java:93)
  29. at org.kitesdk.data.spi.filesystem.ParquetAppender.open(ParquetAppender.java:66)
  30. at org.kitesdk.data.spi.filesystem.FileSystemWriter.initialize(FileSystemWriter.java:135)
  31. ... 11 more
  32.  
  33. 15/09/02 08:53:29 INFO mapreduce.Job: Task Id : attempt_1439821524094_0009_m_000000_1, Status : FAILED
  34. Error: org.kitesdk.data.DatasetOperationException: Failed to open appender ParquetAppender{path=maprfs:/tmp/default/.temp/job_1439821524094_0009/mr/attempt_1439821524094_0009_m_000000_1/.c451ed54-a70d-432c-adc0-e4cb2ce14615.parquet.tmp, schema={"type":"record","name":"sqoop","doc":"Sqoop import of sqoop","fields":[{"name":"id","type":["null","int"],"default":null,"columnName":"id","sqlType":"4"}],"tableName":"sqoop"}, fileSystem=com.mapr.fs.MapRFileSystem@61149fb3, avroParquetWriter=null}
  35. at org.kitesdk.data.spi.filesystem.FileSystemWriter.initialize(FileSystemWriter.java:138)
  36. at org.kitesdk.data.spi.filesystem.FileSystemView.newWriter(FileSystemView.java:101)
  37. at org.kitesdk.data.mapreduce.DatasetKeyOutputFormat$DatasetRecordWriter.<init>(DatasetKeyOutputFormat.java:308)
  38. at org.kitesdk.data.mapreduce.DatasetKeyOutputFormat.getRecordWriter(DatasetKeyOutputFormat.java:445)
  39. at org.apache.hadoop.mapred.MapTask$NewDirectOutputCollector.<init>(MapTask.java:656)
  40. at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:776)
  41. at org.apache.hadoop.mapred.MapTask.run(MapTask.java:346)
  42. at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:163)
  43. at java.security.AccessController.doPrivileged(Native Method)
  44. at javax.security.auth.Subject.doAs(Subject.java:415)
  45. at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1595)
  46. at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:158)
  47. Caused by: java.lang.IllegalArgumentException: maxCapacityHint can't be less than initialSlabSize 10485760 1048576
  48. at parquet.Preconditions.checkArgument(Preconditions.java:55)
  49. at parquet.bytes.CapacityByteArrayOutputStream.<init>(CapacityByteArrayOutputStream.java:118)
  50. at parquet.bytes.CapacityByteArrayOutputStream.<init>(CapacityByteArrayOutputStream.java:108)
  51. at parquet.hadoop.ColumnChunkPageWriteStore$ColumnChunkPageWriter.<init>(ColumnChunkPageWriteStore.java:64)
  52. at parquet.hadoop.ColumnChunkPageWriteStore$ColumnChunkPageWriter.<init>(ColumnChunkPageWriteStore.java:46)
  53. at parquet.hadoop.ColumnChunkPageWriteStore.getPageWriter(ColumnChunkPageWriteStore.java:153)
  54. at parquet.column.impl.ColumnWriteStoreImpl.newMemColumn(ColumnWriteStoreImpl.java:62)
  55. at parquet.column.impl.ColumnWriteStoreImpl.getColumnWriter(ColumnWriteStoreImpl.java:55)
  56. at parquet.io.MessageColumnIO$MessageColumnIORecordConsumer.<init>(MessageColumnIO.java:183)
  57. at parquet.io.MessageColumnIO.getRecordWriter(MessageColumnIO.java:375)
  58. at parquet.hadoop.InternalParquetRecordWriter.initStore(InternalParquetRecordWriter.java:106)
  59. at parquet.hadoop.InternalParquetRecordWriter.<init>(InternalParquetRecordWriter.java:92)
  60. at parquet.hadoop.ParquetWriter.<init>(ParquetWriter.java:181)
  61. at parquet.avro.AvroParquetWriter.<init>(AvroParquetWriter.java:93)
  62. at org.kitesdk.data.spi.filesystem.ParquetAppender.open(ParquetAppender.java:66)
  63. at org.kitesdk.data.spi.filesystem.FileSystemWriter.initialize(FileSystemWriter.java:135)
  64. ... 11 more
  65.  
  66. 15/09/02 08:53:39 INFO mapreduce.Job: Task Id : attempt_1439821524094_0009_m_000000_2, Status : FAILED
  67. Error: org.kitesdk.data.DatasetOperationException: Failed to open appender ParquetAppender{path=maprfs:/tmp/default/.temp/job_1439821524094_0009/mr/attempt_1439821524094_0009_m_000000_2/.abe9c49c-91b2-4774-8380-5a741625d307.parquet.tmp, schema={"type":"record","name":"sqoop","doc":"Sqoop import of sqoop","fields":[{"name":"id","type":["null","int"],"default":null,"columnName":"id","sqlType":"4"}],"tableName":"sqoop"}, fileSystem=com.mapr.fs.MapRFileSystem@180541a0, avroParquetWriter=null}
  68. at org.kitesdk.data.spi.filesystem.FileSystemWriter.initialize(FileSystemWriter.java:138)
  69. at org.kitesdk.data.spi.filesystem.FileSystemView.newWriter(FileSystemView.java:101)
  70. at org.kitesdk.data.mapreduce.DatasetKeyOutputFormat$DatasetRecordWriter.<init>(DatasetKeyOutputFormat.java:308)
  71. at org.kitesdk.data.mapreduce.DatasetKeyOutputFormat.getRecordWriter(DatasetKeyOutputFormat.java:445)
  72. at org.apache.hadoop.mapred.MapTask$NewDirectOutputCollector.<init>(MapTask.java:656)
  73. at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:776)
  74. at org.apache.hadoop.mapred.MapTask.run(MapTask.java:346)
  75. at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:163)
  76. at java.security.AccessController.doPrivileged(Native Method)
  77. at javax.security.auth.Subject.doAs(Subject.java:415)
  78. at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1595)
  79. at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:158)
  80. Caused by: java.lang.IllegalArgumentException: maxCapacityHint can't be less than initialSlabSize 10485760 1048576
  81. at parquet.Preconditions.checkArgument(Preconditions.java:55)
  82. at parquet.bytes.CapacityByteArrayOutputStream.<init>(CapacityByteArrayOutputStream.java:118)
  83. at parquet.bytes.CapacityByteArrayOutputStream.<init>(CapacityByteArrayOutputStream.java:108)
  84. at parquet.hadoop.ColumnChunkPageWriteStore$ColumnChunkPageWriter.<init>(ColumnChunkPageWriteStore.java:64)
  85. at parquet.hadoop.ColumnChunkPageWriteStore$ColumnChunkPageWriter.<init>(ColumnChunkPageWriteStore.java:46)
  86. at parquet.hadoop.ColumnChunkPageWriteStore.getPageWriter(ColumnChunkPageWriteStore.java:153)
  87. at parquet.column.impl.ColumnWriteStoreImpl.newMemColumn(ColumnWriteStoreImpl.java:62)
  88. at parquet.column.impl.ColumnWriteStoreImpl.getColumnWriter(ColumnWriteStoreImpl.java:55)
  89. at parquet.io.MessageColumnIO$MessageColumnIORecordConsumer.<init>(MessageColumnIO.java:183)
  90. at parquet.io.MessageColumnIO.getRecordWriter(MessageColumnIO.java:375)
  91. at parquet.hadoop.InternalParquetRecordWriter.initStore(InternalParquetRecordWriter.java:106)
  92. at parquet.hadoop.InternalParquetRecordWriter.<init>(InternalParquetRecordWriter.java:92)
  93. at parquet.hadoop.ParquetWriter.<init>(ParquetWriter.java:181)
  94. at parquet.avro.AvroParquetWriter.<init>(AvroParquetWriter.java:93)
  95. at org.kitesdk.data.spi.filesystem.ParquetAppender.open(ParquetAppender.java:66)
  96. at org.kitesdk.data.spi.filesystem.FileSystemWriter.initialize(FileSystemWriter.java:135)
  97. ... 11 more
  98.  
  99. 15/09/02 08:53:51 INFO mapreduce.Job: map 100% reduce 0%
  100. 15/09/02 08:53:51 INFO mapreduce.Job: Job job_1439821524094_0009 failed with state FAILED due to: Task failed task_1439821524094_0009_m_000000
  101. Job failed as tasks failed. failedMaps:1 failedReduces:0
  102.  
  103. 15/09/02 08:53:51 INFO mapreduce.Job: Counters: 9
  104. Job Counters
  105. Failed map tasks=4
  106. Launched map tasks=4
  107. Other local map tasks=4
  108. Total time spent by all maps in occupied slots (ms)=33490
  109. Total time spent by all reduces in occupied slots (ms)=0
  110. Total time spent by all map tasks (ms)=33490
  111. Total vcore-seconds taken by all map tasks=33490
  112. Total megabyte-seconds taken by all map tasks=34293760
  113. DISK_MILLIS_MAPS=16747
  114. 15/09/02 08:53:51 WARN mapreduce.Counters: Group FileSystemCounters is deprecated. Use org.apache.hadoop.mapreduce.FileSystemCounter instead
  115. 15/09/02 08:53:51 INFO mapreduce.ImportJobBase: Transferred 0 bytes in 74.3586 seconds (0 bytes/sec)
  116. 15/09/02 08:53:51 WARN mapreduce.Counters: Group org.apache.hadoop.mapred.Task$Counter is deprecated. Use org.apache.hadoop.mapreduce.TaskCounter instead
  117. 15/09/02 08:53:51 INFO mapreduce.ImportJobBase: Retrieved 0 records.
  118. 15/09/02 08:53:51 ERROR tool.ImportTool: Error during import: Import job failed!
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement