Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- Error: org.kitesdk.data.DatasetOperationException: Failed to open appender ParquetAppender{path=maprfs:/tmp/default/.temp/job_1439821524094_0009/mr/attempt_1439821524094_0009_m_000000_0/.9535aee0-de39-4d9b-8d93-8368a5d00ff0.parquet.tmp, schema={"type":"record","name":"sqoop","doc":"Sqoop import of sqoop","fields":[{"name":"id","type":["null","int"],"default":null,"columnName":"id","sqlType":"4"}],"tableName":"sqoop"}, fileSystem=com.mapr.fs.MapRFileSystem@348d62d4, avroParquetWriter=null}
- at org.kitesdk.data.spi.filesystem.FileSystemWriter.initialize(FileSystemWriter.java:138)
- at org.kitesdk.data.spi.filesystem.FileSystemView.newWriter(FileSystemView.java:101)
- at org.kitesdk.data.mapreduce.DatasetKeyOutputFormat$DatasetRecordWriter.<init>(DatasetKeyOutputFormat.java:308)
- at org.kitesdk.data.mapreduce.DatasetKeyOutputFormat.getRecordWriter(DatasetKeyOutputFormat.java:445)
- at org.apache.hadoop.mapred.MapTask$NewDirectOutputCollector.<init>(MapTask.java:656)
- at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:776)
- at org.apache.hadoop.mapred.MapTask.run(MapTask.java:346)
- at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:163)
- at java.security.AccessController.doPrivileged(Native Method)
- at javax.security.auth.Subject.doAs(Subject.java:415)
- at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1595)
- at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:158)
- Caused by: java.lang.IllegalArgumentException: maxCapacityHint can't be less than initialSlabSize 10485760 1048576
- at parquet.Preconditions.checkArgument(Preconditions.java:55)
- at parquet.bytes.CapacityByteArrayOutputStream.<init>(CapacityByteArrayOutputStream.java:118)
- at parquet.bytes.CapacityByteArrayOutputStream.<init>(CapacityByteArrayOutputStream.java:108)
- at parquet.hadoop.ColumnChunkPageWriteStore$ColumnChunkPageWriter.<init>(ColumnChunkPageWriteStore.java:64)
- at parquet.hadoop.ColumnChunkPageWriteStore$ColumnChunkPageWriter.<init>(ColumnChunkPageWriteStore.java:46)
- at parquet.hadoop.ColumnChunkPageWriteStore.getPageWriter(ColumnChunkPageWriteStore.java:153)
- at parquet.column.impl.ColumnWriteStoreImpl.newMemColumn(ColumnWriteStoreImpl.java:62)
- at parquet.column.impl.ColumnWriteStoreImpl.getColumnWriter(ColumnWriteStoreImpl.java:55)
- at parquet.io.MessageColumnIO$MessageColumnIORecordConsumer.<init>(MessageColumnIO.java:183)
- at parquet.io.MessageColumnIO.getRecordWriter(MessageColumnIO.java:375)
- at parquet.hadoop.InternalParquetRecordWriter.initStore(InternalParquetRecordWriter.java:106)
- at parquet.hadoop.InternalParquetRecordWriter.<init>(InternalParquetRecordWriter.java:92)
- at parquet.hadoop.ParquetWriter.<init>(ParquetWriter.java:181)
- at parquet.avro.AvroParquetWriter.<init>(AvroParquetWriter.java:93)
- at org.kitesdk.data.spi.filesystem.ParquetAppender.open(ParquetAppender.java:66)
- at org.kitesdk.data.spi.filesystem.FileSystemWriter.initialize(FileSystemWriter.java:135)
- ... 11 more
- 15/09/02 08:53:29 INFO mapreduce.Job: Task Id : attempt_1439821524094_0009_m_000000_1, Status : FAILED
- Error: org.kitesdk.data.DatasetOperationException: Failed to open appender ParquetAppender{path=maprfs:/tmp/default/.temp/job_1439821524094_0009/mr/attempt_1439821524094_0009_m_000000_1/.c451ed54-a70d-432c-adc0-e4cb2ce14615.parquet.tmp, schema={"type":"record","name":"sqoop","doc":"Sqoop import of sqoop","fields":[{"name":"id","type":["null","int"],"default":null,"columnName":"id","sqlType":"4"}],"tableName":"sqoop"}, fileSystem=com.mapr.fs.MapRFileSystem@61149fb3, avroParquetWriter=null}
- at org.kitesdk.data.spi.filesystem.FileSystemWriter.initialize(FileSystemWriter.java:138)
- at org.kitesdk.data.spi.filesystem.FileSystemView.newWriter(FileSystemView.java:101)
- at org.kitesdk.data.mapreduce.DatasetKeyOutputFormat$DatasetRecordWriter.<init>(DatasetKeyOutputFormat.java:308)
- at org.kitesdk.data.mapreduce.DatasetKeyOutputFormat.getRecordWriter(DatasetKeyOutputFormat.java:445)
- at org.apache.hadoop.mapred.MapTask$NewDirectOutputCollector.<init>(MapTask.java:656)
- at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:776)
- at org.apache.hadoop.mapred.MapTask.run(MapTask.java:346)
- at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:163)
- at java.security.AccessController.doPrivileged(Native Method)
- at javax.security.auth.Subject.doAs(Subject.java:415)
- at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1595)
- at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:158)
- Caused by: java.lang.IllegalArgumentException: maxCapacityHint can't be less than initialSlabSize 10485760 1048576
- at parquet.Preconditions.checkArgument(Preconditions.java:55)
- at parquet.bytes.CapacityByteArrayOutputStream.<init>(CapacityByteArrayOutputStream.java:118)
- at parquet.bytes.CapacityByteArrayOutputStream.<init>(CapacityByteArrayOutputStream.java:108)
- at parquet.hadoop.ColumnChunkPageWriteStore$ColumnChunkPageWriter.<init>(ColumnChunkPageWriteStore.java:64)
- at parquet.hadoop.ColumnChunkPageWriteStore$ColumnChunkPageWriter.<init>(ColumnChunkPageWriteStore.java:46)
- at parquet.hadoop.ColumnChunkPageWriteStore.getPageWriter(ColumnChunkPageWriteStore.java:153)
- at parquet.column.impl.ColumnWriteStoreImpl.newMemColumn(ColumnWriteStoreImpl.java:62)
- at parquet.column.impl.ColumnWriteStoreImpl.getColumnWriter(ColumnWriteStoreImpl.java:55)
- at parquet.io.MessageColumnIO$MessageColumnIORecordConsumer.<init>(MessageColumnIO.java:183)
- at parquet.io.MessageColumnIO.getRecordWriter(MessageColumnIO.java:375)
- at parquet.hadoop.InternalParquetRecordWriter.initStore(InternalParquetRecordWriter.java:106)
- at parquet.hadoop.InternalParquetRecordWriter.<init>(InternalParquetRecordWriter.java:92)
- at parquet.hadoop.ParquetWriter.<init>(ParquetWriter.java:181)
- at parquet.avro.AvroParquetWriter.<init>(AvroParquetWriter.java:93)
- at org.kitesdk.data.spi.filesystem.ParquetAppender.open(ParquetAppender.java:66)
- at org.kitesdk.data.spi.filesystem.FileSystemWriter.initialize(FileSystemWriter.java:135)
- ... 11 more
- 15/09/02 08:53:39 INFO mapreduce.Job: Task Id : attempt_1439821524094_0009_m_000000_2, Status : FAILED
- Error: org.kitesdk.data.DatasetOperationException: Failed to open appender ParquetAppender{path=maprfs:/tmp/default/.temp/job_1439821524094_0009/mr/attempt_1439821524094_0009_m_000000_2/.abe9c49c-91b2-4774-8380-5a741625d307.parquet.tmp, schema={"type":"record","name":"sqoop","doc":"Sqoop import of sqoop","fields":[{"name":"id","type":["null","int"],"default":null,"columnName":"id","sqlType":"4"}],"tableName":"sqoop"}, fileSystem=com.mapr.fs.MapRFileSystem@180541a0, avroParquetWriter=null}
- at org.kitesdk.data.spi.filesystem.FileSystemWriter.initialize(FileSystemWriter.java:138)
- at org.kitesdk.data.spi.filesystem.FileSystemView.newWriter(FileSystemView.java:101)
- at org.kitesdk.data.mapreduce.DatasetKeyOutputFormat$DatasetRecordWriter.<init>(DatasetKeyOutputFormat.java:308)
- at org.kitesdk.data.mapreduce.DatasetKeyOutputFormat.getRecordWriter(DatasetKeyOutputFormat.java:445)
- at org.apache.hadoop.mapred.MapTask$NewDirectOutputCollector.<init>(MapTask.java:656)
- at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:776)
- at org.apache.hadoop.mapred.MapTask.run(MapTask.java:346)
- at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:163)
- at java.security.AccessController.doPrivileged(Native Method)
- at javax.security.auth.Subject.doAs(Subject.java:415)
- at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1595)
- at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:158)
- Caused by: java.lang.IllegalArgumentException: maxCapacityHint can't be less than initialSlabSize 10485760 1048576
- at parquet.Preconditions.checkArgument(Preconditions.java:55)
- at parquet.bytes.CapacityByteArrayOutputStream.<init>(CapacityByteArrayOutputStream.java:118)
- at parquet.bytes.CapacityByteArrayOutputStream.<init>(CapacityByteArrayOutputStream.java:108)
- at parquet.hadoop.ColumnChunkPageWriteStore$ColumnChunkPageWriter.<init>(ColumnChunkPageWriteStore.java:64)
- at parquet.hadoop.ColumnChunkPageWriteStore$ColumnChunkPageWriter.<init>(ColumnChunkPageWriteStore.java:46)
- at parquet.hadoop.ColumnChunkPageWriteStore.getPageWriter(ColumnChunkPageWriteStore.java:153)
- at parquet.column.impl.ColumnWriteStoreImpl.newMemColumn(ColumnWriteStoreImpl.java:62)
- at parquet.column.impl.ColumnWriteStoreImpl.getColumnWriter(ColumnWriteStoreImpl.java:55)
- at parquet.io.MessageColumnIO$MessageColumnIORecordConsumer.<init>(MessageColumnIO.java:183)
- at parquet.io.MessageColumnIO.getRecordWriter(MessageColumnIO.java:375)
- at parquet.hadoop.InternalParquetRecordWriter.initStore(InternalParquetRecordWriter.java:106)
- at parquet.hadoop.InternalParquetRecordWriter.<init>(InternalParquetRecordWriter.java:92)
- at parquet.hadoop.ParquetWriter.<init>(ParquetWriter.java:181)
- at parquet.avro.AvroParquetWriter.<init>(AvroParquetWriter.java:93)
- at org.kitesdk.data.spi.filesystem.ParquetAppender.open(ParquetAppender.java:66)
- at org.kitesdk.data.spi.filesystem.FileSystemWriter.initialize(FileSystemWriter.java:135)
- ... 11 more
- 15/09/02 08:53:51 INFO mapreduce.Job: map 100% reduce 0%
- 15/09/02 08:53:51 INFO mapreduce.Job: Job job_1439821524094_0009 failed with state FAILED due to: Task failed task_1439821524094_0009_m_000000
- Job failed as tasks failed. failedMaps:1 failedReduces:0
- 15/09/02 08:53:51 INFO mapreduce.Job: Counters: 9
- Job Counters
- Failed map tasks=4
- Launched map tasks=4
- Other local map tasks=4
- Total time spent by all maps in occupied slots (ms)=33490
- Total time spent by all reduces in occupied slots (ms)=0
- Total time spent by all map tasks (ms)=33490
- Total vcore-seconds taken by all map tasks=33490
- Total megabyte-seconds taken by all map tasks=34293760
- DISK_MILLIS_MAPS=16747
- 15/09/02 08:53:51 WARN mapreduce.Counters: Group FileSystemCounters is deprecated. Use org.apache.hadoop.mapreduce.FileSystemCounter instead
- 15/09/02 08:53:51 INFO mapreduce.ImportJobBase: Transferred 0 bytes in 74.3586 seconds (0 bytes/sec)
- 15/09/02 08:53:51 WARN mapreduce.Counters: Group org.apache.hadoop.mapred.Task$Counter is deprecated. Use org.apache.hadoop.mapreduce.TaskCounter instead
- 15/09/02 08:53:51 INFO mapreduce.ImportJobBase: Retrieved 0 records.
- 15/09/02 08:53:51 ERROR tool.ImportTool: Error during import: Import job failed!
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement