Advertisement
Guest User

Untitled

a guest
Jul 19th, 2017
109
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 9.10 KB | None | 0 0
  1. Config:
  2.  
  3. Hive history file=/tmp/ubuntu/hive_job_log_ubuntu_201102042305_328681162.txt
  4. datancucleus.transactionIsolation=read-committed
  5. datanucleus.autoCreateSchema=true
  6. datanucleus.autoStartMechanismMode=checked
  7. datanucleus.plugin.pluginRegistryBundleCheck=LOG
  8. datanucleus.storeManagerType=rdbms
  9. datanucleus.validateColumns=false
  10. datanucleus.validateConstraints=false
  11. datanucleus.validateTables=false
  12. datanuclues.cache.level2=true
  13. datanuclues.cache.level2.type=SOFT
  14. fs.automatic.close=true
  15. fs.checkpoint.dir=${hadoop.tmp.dir}/dfs/namesecondary
  16. fs.checkpoint.edits.dir=${fs.checkpoint.dir}
  17. fs.checkpoint.period=3600
  18. fs.checkpoint.size=67108864
  19. fs.default.name=hdfs://<hostname hidden>:54310
  20. fs.file.impl=org.apache.hadoop.fs.LocalFileSystem
  21. fs.ftp.impl=org.apache.hadoop.fs.ftp.FTPFileSystem
  22. fs.har.impl.disable.cache=true
  23. fs.har.impl=org.apache.hadoop.fs.HarFileSystem
  24. fs.hdfs.impl=org.apache.hadoop.hdfs.DistributedFileSystem
  25. fs.hftp.impl=org.apache.hadoop.hdfs.HftpFileSystem
  26. fs.hsftp.impl=org.apache.hadoop.hdfs.HsftpFileSystem
  27. fs.kfs.impl=org.apache.hadoop.fs.kfs.KosmosFileSystem
  28. fs.ramfs.impl=org.apache.hadoop.fs.InMemoryFileSystem
  29. fs.s3.block.size=67108864
  30. fs.s3.buffer.dir=${hadoop.tmp.dir}/s3
  31. fs.s3.impl=org.apache.hadoop.fs.s3.S3FileSystem
  32. fs.s3.maxRetries=4
  33. fs.s3n.awsAccessKeyId=
  34. fs.s3n.awsSecretAccessKey=
  35. fs.s3n.block.size=67108864
  36. fs.s3n.impl=org.apache.hadoop.fs.s3native.NativeS3FileSystem
  37. fs.s3.sleepTimeSeconds=10
  38. fs.trash.interval=0
  39. hadoop.logfile.count=10
  40. hadoop.logfile.size=10000000
  41. hadoop.native.lib=true
  42. hadoop.rpc.socket.factory.class.default=org.apache.hadoop.net.StandardSocketFactory
  43. hadoop.security.authentication=simple
  44. hadoop.security.authorization=false
  45. hadoop.security.group.mapping=org.apache.hadoop.security.ShellBasedUnixGroupsMapping
  46. hadoop.tmp.dir=/tmp/hadoop-${user.name}
  47. hadoop.util.hash.type=murmur
  48. hive.default.fileformat=TextFile
  49. hive.exec.compress.intermediate=false
  50. hive.exec.compress.output=false
  51. hive.exec.parallel=false
  52. hive.exec.reducers.bytes.per.reducer=1000000000
  53. hive.exec.reducers.max=999
  54. hive.exec.scratchdir=/tmp/hive-${user.name}
  55. hive.exec.script.allow.partial.consumption=false
  56. hive.exec.script.maxerrsize=100000
  57. hive.fileformat.check=true
  58. hive.groupby.mapaggr.checkinterval=100000
  59. hive.groupby.skewindata=false
  60. hive.heartbeat.interval=1000
  61. hive.hwi.listen.host=0.0.0.0
  62. hive.hwi.listen.port=9999
  63. hive.hwi.war.file=/usr/lib/hive/lib/hive_hwi.war
  64. hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat
  65. hive.join.cache.size=25000
  66. hive.join.emit.interval=1000
  67. hive.map.aggr=false
  68. hive.map.aggr.hash.min.reduction=0.5
  69. hive.map.aggr.hash.percentmemory=0.5
  70. hive.mapjoin.bucket.cache.size=100
  71. hive.mapjoin.cache.numrows=25000
  72. hive.mapjoin.maxsize=100000
  73. hive.mapred.local.mem=0
  74. hive.mapred.mode=nonstrict
  75. hive.mapred.reduce.tasks.speculative.execution=true
  76. hive.merge.mapfiles=true
  77. hive.merge.mapredfiles=false
  78. hive.merge.size.per.task=256000000
  79. hive.metastore.connect.retries=5
  80. hive.metastore.local=true
  81. hive.metastore.metadb.dir=file:///var/metastore/metadb/
  82. hive.metastore.rawstore.impl=org.apache.hadoop.hive.metastore.ObjectStore
  83. hive.metastore.uris=file:///var/lib/hivevar/metastore/metadb/
  84. hive.metastore.warehouse.dir=/user/hive/warehouse
  85. hive.optimize.cp=true
  86. hive.optimize.groupby=true
  87. hive.optimize.ppd=true
  88. hive.optimize.pruner=true
  89. hive.script.auto.progress=false
  90. hive.script.operator.id.env.var=HIVE_SCRIPT_OPERATOR_ID
  91. hive.script.recordreader=org.apache.hadoop.hive.ql.exec.TextRecordReader
  92. hive.script.recordwriter=org.apache.hadoop.hive.ql.exec.TextRecordWriter
  93. hive.script.serde=org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
  94. hive.session.id=ubuntu_201102042305
  95. hive.test.mode=false
  96. hive.test.mode.prefix=test_
  97. hive.test.mode.samplefreq=32
  98. hive.udtf.auto.progress=false
  99. io.bytes.per.checksum=512
  100. io.compression.codecs=org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec
  101. io.file.buffer.size=4096
  102. io.mapfile.bloom.error.rate=0.005
  103. io.mapfile.bloom.size=1048576
  104. io.map.index.skip=0
  105. io.seqfile.compress.blocksize=1000000
  106. io.seqfile.lazydecompress=true
  107. io.seqfile.sorter.recordlimit=1000000
  108. io.serializations=org.apache.hadoop.io.serializer.WritableSerialization
  109. io.skip.checksum.errors=false
  110. io.sort.factor=10
  111. io.sort.mb=100
  112. io.sort.record.percent=0.05
  113. io.sort.spill.percent=0.80
  114. ipc.client.connection.maxidletime=10000
  115. ipc.client.connect.max.retries=10
  116. ipc.client.idlethreshold=4000
  117. ipc.client.kill.max=10
  118. ipc.client.tcpnodelay=false
  119. ipc.server.listen.queue.size=128
  120. ipc.server.tcpnodelay=false
  121. javax.jdo.option.ConnectionDriverName=org.apache.derby.jdbc.EmbeddedDriver
  122. javax.jdo.option.ConnectionPassword=mine
  123. javax.jdo.option.ConnectionURL=jdbc:derby:;databaseName=/var/lib/hive/metastore/${user.name}_db;create=true
  124. javax.jdo.option.ConnectionUserName=APP
  125. javax.jdo.option.DetachAllOnCommit=true
  126. javax.jdo.option.NonTransactionalRead=true
  127. javax.jdo.PersistenceManagerFactoryClass=org.datanucleus.jdo.JDOPersistenceManagerFactory
  128. jobclient.completion.poll.interval=5000
  129. jobclient.output.filter=FAILED
  130. jobclient.progress.monitor.poll.interval=1000
  131. job.end.retry.attempts=0
  132. job.end.retry.interval=30000
  133. keep.failed.task.files=false
  134. local.cache.size=10737418240
  135. map.output.compression.type=BLOCK
  136. mapred.acls.enabled=false
  137. mapred.child.java.opts=-Xmx500m
  138. mapred.child.tmp=./tmp
  139. mapred.compress.map.output=true
  140. mapred.healthChecker.interval=60000
  141. mapred.healthChecker.script.timeout=600000
  142. mapred.heartbeats.in.second=100
  143. mapred.inmem.merge.threshold=1000
  144. mapred.job.queue.name=default
  145. mapred.job.reduce.input.buffer.percent=0.0
  146. mapred.job.reuse.jvm.num.tasks=1
  147. mapred.job.shuffle.input.buffer.percent=0.70
  148. mapred.job.shuffle.merge.percent=0.66
  149. mapred.jobtracker.completeuserjobs.maximum=100
  150. mapred.job.tracker=domU-12-31-39-00-CE-22:50030
  151. mapred.job.tracker.handler.count=10
  152. mapred.job.tracker.http.address=domU-12-31-39-00-CE-22:50040
  153. mapred.jobtracker.instrumentation=org.apache.hadoop.mapred.JobTrackerMetricsInst
  154. mapred.jobtracker.job.history.block.size=3145728
  155. mapred.job.tracker.jobhistory.lru.cache.size=5
  156. mapred.jobtracker.maxtasks.per.job=-1
  157. mapred.job.tracker.persist.jobstatus.active=false
  158. mapred.job.tracker.persist.jobstatus.dir=/jobtracker/jobsInfo
  159. mapred.job.tracker.persist.jobstatus.hours=0
  160. mapred.jobtracker.restart.recover=false
  161. mapred.job.tracker.retiredjobs.cache.size=1000
  162. mapred.jobtracker.taskScheduler=org.apache.hadoop.mapred.JobQueueTaskScheduler
  163. mapred.line.input.format.linespermap=1
  164. mapred.local.dir.minspacekill=0
  165. mapred.local.dir.minspacestart=0
  166. mapred.local.dir=/var/hadoop/mapred/local
  167. mapred.map.max.attempts=4
  168. mapred.map.output.compression.codec=org.apache.hadoop.io.compress.DefaultCodec
  169. mapred.map.tasks=23
  170. mapred.map.tasks.speculative.execution=true
  171. mapred.max.maps.per.node=-1
  172. mapred.max.reduces.per.node=-1
  173. mapred.max.tracker.blacklists=4
  174. mapred.max.tracker.failures=4
  175. mapred.merge.recordsBeforeProgress=10000
  176. mapred.min.split.size=0
  177. mapred.output.compress=false
  178. mapred.output.compression.codec=org.apache.hadoop.io.compress.DefaultCodec
  179. mapred.output.compression.type=RECORD
  180. mapred.queue.names=default
  181. mapred.reduce.copy.backoff=300
  182. mapred.reduce.max.attempts=4
  183. mapred.reduce.parallel.copies=5
  184. mapred.reduce.slowstart.completed.maps=0.05
  185. mapred.reduce.tasks=-1
  186. mapred.reduce.tasks.speculative.execution=true
  187. mapred.running.map.limit=-1
  188. mapred.running.reduce.limit=-1
  189. mapred.skip.attempts.to.start.skipping=2
  190. mapred.skip.map.auto.incr.proc.count=true
  191. mapred.skip.map.max.skip.records=0
  192. mapred.skip.reduce.auto.incr.proc.count=true
  193. mapred.skip.reduce.max.skip.groups=0
  194. mapred.submit.replication=10
  195. mapred.system.dir=/var/hadoop/mapred/system
  196. mapred.task.cache.levels=2
  197. mapred.task.profile=false
  198. mapred.task.profile.maps=0-2
  199. mapred.task.profile.reduces=0-2
  200. mapred.task.timeout=600000
  201. mapred.tasktracker.dns.interface=default
  202. mapred.tasktracker.dns.nameserver=default
  203. mapred.tasktracker.expiry.interval=600000
  204. mapred.task.tracker.http.address=0.0.0.0:50060
  205. mapred.tasktracker.indexcache.mb=10
  206. mapred.tasktracker.instrumentation=org.apache.hadoop.mapred.TaskTrackerMetricsInst
  207. mapred.tasktracker.map.tasks.maximum=2
  208. mapred.tasktracker.reduce.tasks.maximum=2
  209. mapred.task.tracker.report.address=127.0.0.1:0
  210. mapred.task.tracker.task-controller=org.apache.hadoop.mapred.DefaultTaskController
  211. mapred.tasktracker.taskmemorymanager.monitoring-interval=5000
  212. mapred.tasktracker.tasks.sleeptime-before-sigkill=5000
  213. mapred.temp.dir=${hadoop.tmp.dir}/mapred/temp
  214. mapreduce.job.acl-modify-job=
  215. mapreduce.job.acl-view-job=
  216. mapreduce.job.complete.cancel.delegation.tokens=true
  217. mapreduce.job.split.metainfo.maxsize=10000000
  218. mapreduce.jobtracker.staging.root.dir=${hadoop.tmp.dir}/mapred/staging
  219. mapreduce.reduce.shuffle.connect.timeout=180000
  220. mapreduce.reduce.shuffle.read.timeout=180000
  221. mapreduce.tasktracker.outofband.heartbeat=false
  222. mapred.userlog.limit.kb=0
  223. mapred.userlog.retain.hours=24
  224. map.sort.class=org.apache.hadoop.util.QuickSort
  225. silent=off
  226. tasktracker.http.threads=40
  227. topology.node.switch.mapping.impl=org.apache.hadoop.net.ScriptBasedMapping
  228. topology.script.number.args=100
  229. webinterface.private.actions=false
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement