Guest User

Untitled

a guest
Jun 24th, 2015
684
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 218.53 KB | None | 0 0
  1. {
  2. "configurations" : [
  3. {
  4. "cluster-env" : {
  5. "properties_attributes" : { },
  6. "properties" : {
  7. "cluster_name" : "sandbox",
  8. "user_group" : "hadoop",
  9. "kerberos_domain" : "EXAMPLE.COM",
  10. "command_retry_max_time_in_sec" : "600",
  11. "smokeuser" : "ambari-qa",
  12. "security_enabled" : "false",
  13. "command_retry_enabled" : "true",
  14. "smokeuser_keytab" : "/etc/security/keytabs/smokeuser.headless.keytab",
  15. "ignore_groupsusers_create" : "false",
  16. "commands_to_retry" : "INSTALL,START"
  17. }
  18. }
  19. },
  20. {
  21. "ams-hbase-log4j" : {
  22. "properties_attributes" : { },
  23. "properties" : {
  24. "content" : "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n# Define some default values that can be overridden by system properties\nhbase.root.logger=INFO,console\nhbase.security.logger=INFO,console\nhbase.log.dir=.\nhbase.log.file=hbase.log\n\n# Define the root logger to the system property \"hbase.root.logger\".\nlog4j.rootLogger=${hbase.root.logger}\n\n# Logging Threshold\nlog4j.threshold=ALL\n\n#\n# Daily Rolling File Appender\n#\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n\n# Rolling File Appender properties\nhbase.log.maxfilesize=256MB\nhbase.log.maxbackupindex=20\n\n# Rolling File Appender\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hbase.log.dir}/${hbase.log.file}\n\nlog4j.appender.RFA.MaxFileSize=${hbase.log.maxfilesize}\nlog4j.appender.RFA.MaxBackupIndex=${hbase.log.maxbackupindex}\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n\n#\n# Security audit appender\n#\nhbase.security.log.file=SecurityAuth.audit\nhbase.security.log.maxfilesize=256MB\nhbase.security.log.maxbackupindex=20\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hbase.log.dir}/${hbase.security.log.file}\nlog4j.appender.RFAS.MaxFileSize=${hbase.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hbase.security.log.maxbackupindex}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.category.SecurityLogger=${hbase.security.logger}\nlog4j.additivity.SecurityLogger=false\n#log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.access.AccessController=TRACE\n\n#\n# Null Appender\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n\n# Custom Logging levels\n\nlog4j.logger.org.apache.zookeeper=INFO\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.hbase=INFO\n# Make these two classes INFO-level. Make them DEBUG to see more zk debug.\nlog4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO\nlog4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO\n#log4j.logger.org.apache.hadoop.dfs=DEBUG\n# Set this class to log INFO only otherwise its OTT\n# Enable this to get detailed connection error/retry logging.\n# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE\n\n\n# Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)\n#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG\n\n# Uncomment the below if you want to remove logging of client region caching'\n# and scan of .META. messages\n# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO\n# log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO\n\n "
  25. }
  26. }
  27. },
  28. {
  29. "hive-env" : {
  30. "properties_attributes" : { },
  31. "properties" : {
  32. "webhcat_user" : "hcat",
  33. "hive_txn_acid" : "off",
  34. "hcat_pid_dir" : "/var/run/webhcat",
  35. "hive_security_authorization" : "None",
  36. "hive_log_dir" : "/var/log/hive",
  37. "hive_user" : "hive",
  38. "hcat_log_dir" : "/var/log/webhcat",
  39. "hcat_user" : "hcat",
  40. "hive_ambari_database" : "MySQL",
  41. "content" : "\n if [ \"$SERVICE\" = \"cli\" ]; then\n if [ -z \"$DEBUG\" ]; then\n export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseNUMA -XX:+UseParallelGC -XX:-UseGCOverheadLimit\"\n else\n export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit\"\n fi\n fi\n\n# The heap size of the jvm stared by hive shell script can be controlled via:\n\n# Larger heap size may be required when running queries over large number of files or partitions.\n# By default hive shell scripts use a heap size of 256 (MB). Larger heap size would also be\n# appropriate for hive server (hwi etc).\n\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hive Configuration Directory can be controlled by:\nexport HIVE_CONF_DIR={{hive_config_dir}}\n\n# Folder containing extra libraries required for hive compilation/execution can be controlled by:\nif [ \"${HIVE_AUX_JARS_PATH}\" != \"\" ]; then\n if [ -f \"${HIVE_AUX_JARS_PATH}\" ]; then \n export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\n elif [ -d \"/usr/hdp/current/hive-webhcat/share/hcatalog\" ]; then\n export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar\n fi\nelif [ -d \"/usr/hdp/current/hive-webhcat/share/hcatalog\" ]; then\n export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar\nfi \n\nexport METASTORE_PORT={{hive_metastore_port}}\n ",
  42. "hive_database" : "New MySQL Database",
  43. "hive_exec_orc_storage_strategy" : "SPEED",
  44. "cost_based_optimizer" : "Off",
  45. "hive_pid_dir" : "/var/run/hive",
  46. "hive_timeline_logging_enabled" : "true",
  47. "hive_database_type" : "mysql",
  48. "hive_database_name" : "hive"
  49. }
  50. }
  51. },
  52. {
  53. "ams-site" : {
  54. "properties_attributes" : { },
  55. "properties" : {
  56. "timeline.metrics.daily.aggregator.minute.interval" : "86400",
  57. "timeline.metrics.cluster.aggregator.daily.disabled" : "false",
  58. "timeline.metrics.host.aggregator.minute.checkpointCutOffMultiplier" : "2",
  59. "timeline.metrics.host.aggregator.hourly.checkpointCutOffMultiplier" : "2",
  60. "timeline.metrics.cluster.aggregator.hourly.ttl" : "31536000",
  61. "timeline.metrics.service.webapp.address" : "0.0.0.0:6188",
  62. "timeline.metrics.service.rpc.address" : "0.0.0.0:60200",
  63. "timeline.metrics.cluster.aggregator.hourly.interval" : "3600",
  64. "timeline.metrics.host.aggregator.hourly.ttl" : "2592000",
  65. "timeline.metrics.hbase.compression.scheme" : "SNAPPY",
  66. "timeline.metrics.cluster.aggregator.hourly.checkpointCutOffMultiplier" : "2",
  67. "timeline.metrics.host.aggregator.hourly.disabled" : "false",
  68. "timeline.metrics.host.aggregator.daily.checkpointCutOffMultiplier" : "1",
  69. "timeline.metrics.cluster.aggregator.daily.interval" : "86400",
  70. "timeline.metrics.aggregator.checkpoint.dir" : "/var/lib/ambari-metrics-collector/checkpoint",
  71. "timeline.metrics.cluster.aggregator.minute.disabled" : "false",
  72. "timeline.metrics.service.operation.mode" : "embedded",
  73. "timeline.metrics.host.aggregator.minute.ttl" : "604800",
  74. "timeline.metrics.host.aggregator.minute.interval" : "120",
  75. "timeline.metrics.cluster.aggregator.minute.checkpointCutOffMultiplier" : "2",
  76. "timeline.metrics.cluster.aggregator.daily.ttl" : "63072000",
  77. "timeline.metrics.service.checkpointDelay" : "60",
  78. "timeline.metrics.hbase.data.block.encoding" : "FAST_DIFF",
  79. "phoenix.query.maxGlobalMemoryPercentage" : "25",
  80. "timeline.metrics.cluster.aggregator.minute.timeslice.interval" : "30",
  81. "timeline.metrics.service.default.result.limit" : "5760",
  82. "timeline.metrics.cluster.aggregator.minute.interval" : "120",
  83. "timeline.metrics.host.aggregator.hourly.interval" : "3600",
  84. "timeline.metrics.cluster.aggregator.hourly.disabled" : "false",
  85. "timeline.metrics.host.aggregator.minute.disabled" : "false",
  86. "timeline.metrics.host.aggregator.daily.ttl" : "31536000",
  87. "timeline.metrics.cluster.aggregator.minute.ttl" : "2592000",
  88. "timeline.metrics.service.resultset.fetchSize" : "2000",
  89. "timeline.metrics.cluster.aggregator.daily.checkpointCutOffMultiplier" : "1",
  90. "timeline.metrics.service.cluster.aggregator.appIds" : "datanode,nodemanager,hbase",
  91. "timeline.metrics.host.aggregator.daily.disabled" : "false",
  92. "timeline.metrics.host.aggregator.ttl" : "86400",
  93. "phoenix.spool.directory" : "/tmp"
  94. }
  95. }
  96. },
  97. {
  98. "yarn-log4j" : {
  99. "properties_attributes" : { },
  100. "properties" : {
  101. "content" : "\n#Relative to Yarn Log Dir Prefix\nyarn.log.dir=.\n#\n# Job Summary Appender\n#\n# Use following logger to send summary to separate file defined by\n# hadoop.mapreduce.jobsummary.log.file rolled daily:\n# hadoop.mapreduce.jobsummary.logger=INFO,JSA\n#\nhadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}\nhadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log\nlog4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender\n# Set the ResourceManager summary log filename\nyarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log\n# Set the ResourceManager summary log level and appender\nyarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}\n#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\n\n# To enable AppSummaryLogging for the RM,\n# set yarn.server.resourcemanager.appsummary.logger to\n# LEVEL,RMSUMMARY in hadoop-env.sh\n\n# Appender for ResourceManager Application Summary Log\n# Requires the following properties to be set\n# - hadoop.log.dir (Hadoop Log directory)\n# - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)\n# - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)\nlog4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender\nlog4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}\nlog4j.appender.RMSUMMARY.MaxFileSize=256MB\nlog4j.appender.RMSUMMARY.MaxBackupIndex=20\nlog4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.JSA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\nlog4j.appender.JSA.DatePattern=.yyyy-MM-dd\nlog4j.appender.JSA.layout=org.apache.log4j.PatternLayout\nlog4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}\nlog4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false\n\n# Appender for viewing information for errors and warnings\nyarn.ewma.cleanupInterval=300\nyarn.ewma.messageAgeLimitSeconds=86400\nyarn.ewma.maxUniqueMessages=250\nlog4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender\nlog4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}\nlog4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}\nlog4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}\n "
  102. }
  103. }
  104. },
  105. {
  106. "pig-log4j" : {
  107. "properties_attributes" : { },
  108. "properties" : {
  109. "content" : "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n# ***** Set root logger level to DEBUG and its only appender to A.\nlog4j.logger.org.apache.pig=info, A\n\n# ***** A is set to be a ConsoleAppender.\nlog4j.appender.A=org.apache.log4j.ConsoleAppender\n# ***** A uses PatternLayout.\nlog4j.appender.A.layout=org.apache.log4j.PatternLayout\nlog4j.appender.A.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n\n "
  110. }
  111. }
  112. },
  113. {
  114. "hadoop-policy" : {
  115. "properties_attributes" : { },
  116. "properties" : {
  117. "security.client.protocol.acl" : "*",
  118. "security.job.client.protocol.acl" : "*",
  119. "security.inter.datanode.protocol.acl" : "*",
  120. "security.admin.operations.protocol.acl" : "hadoop",
  121. "security.client.datanode.protocol.acl" : "*",
  122. "security.job.task.protocol.acl" : "*",
  123. "security.refresh.usertogroups.mappings.protocol.acl" : "hadoop",
  124. "security.refresh.policy.protocol.acl" : "hadoop",
  125. "security.namenode.protocol.acl" : "*",
  126. "security.datanode.protocol.acl" : "*",
  127. "security.inter.tracker.protocol.acl" : "*"
  128. }
  129. }
  130. },
  131. {
  132. "spark-metrics-properties" : {
  133. "properties_attributes" : { },
  134. "properties" : {
  135. "content" : "\n# syntax: [instance].sink|source.[name].[options]=[value]\n\n# This file configures Spark's internal metrics system. The metrics system is\n# divided into instances which correspond to internal components.\n# Each instance can be configured to report its metrics to one or more sinks.\n# Accepted values for [instance] are \"master\", \"worker\", \"executor\", \"driver\",\n# and \"applications\". A wild card \"*\" can be used as an instance name, in\n# which case all instances will inherit the supplied property.\n#\n# Within an instance, a \"source\" specifies a particular set of grouped metrics.\n# there are two kinds of sources:\n# 1. Spark internal sources, like MasterSource, WorkerSource, etc, which will\n# collect a Spark component's internal state. Each instance is paired with a\n# Spark source that is added automatically.\n# 2. Common sources, like JvmSource, which will collect low level state.\n# These can be added through configuration options and are then loaded\n# using reflection.\n#\n# A \"sink\" specifies where metrics are delivered to. Each instance can be\n# assigned one or more sinks.\n#\n# The sink|source field specifies whether the property relates to a sink or\n# source.\n#\n# The [name] field specifies the name of source or sink.\n#\n# The [options] field is the specific property of this source or sink. The\n# source or sink is responsible for parsing this property.\n#\n# Notes:\n# 1. To add a new sink, set the \"class\" option to a fully qualified class\n# name (see examples below).\n# 2. Some sinks involve a polling period. The minimum allowed polling period\n# is 1 second.\n# 3. Wild card properties can be overridden by more specific properties.\n# For example, master.sink.console.period takes precedence over\n# *.sink.console.period.\n# 4. A metrics specific configuration\n# \"spark.metrics.conf=${SPARK_HOME}/conf/metrics.properties\" should be\n# added to Java properties using -Dspark.metrics.conf=xxx if you want to\n# customize metrics system. You can also put the file in ${SPARK_HOME}/conf\n# and it will be loaded automatically.\n# 5. MetricsServlet is added by default as a sink in master, worker and client\n# driver, you can send http request \"/metrics/json\" to get a snapshot of all the\n# registered metrics in json format. For master, requests \"/metrics/master/json\" and\n# \"/metrics/applications/json\" can be sent seperately to get metrics snapshot of\n# instance master and applications. MetricsServlet may not be configured by self.\n#\n\n## List of available sinks and their properties.\n\n# org.apache.spark.metrics.sink.ConsoleSink\n# Name: Default: Description:\n# period 10 Poll period\n# unit seconds Units of poll period\n\n# org.apache.spark.metrics.sink.CSVSink\n# Name: Default: Description:\n# period 10 Poll period\n# unit seconds Units of poll period\n# directory /tmp Where to store CSV files\n\n# org.apache.spark.metrics.sink.GangliaSink\n# Name: Default: Description:\n# host NONE Hostname or multicast group of Ganglia server\n# port NONE Port of Ganglia server(s)\n# period 10 Poll period\n# unit seconds Units of poll period\n# ttl 1 TTL of messages sent by Ganglia\n# mode multicast Ganglia network mode ('unicast' or 'multicast')\n\n# org.apache.spark.metrics.sink.JmxSink\n\n# org.apache.spark.metrics.sink.MetricsServlet\n# Name: Default: Description:\n# path VARIES* Path prefix from the web server root\n# sample false Whether to show entire set of samples for histograms ('false' or 'true')\n#\n# * Default path is /metrics/json for all instances except the master. The master has two paths:\n# /metrics/aplications/json # App information\n# /metrics/master/json # Master information\n\n# org.apache.spark.metrics.sink.GraphiteSink\n# Name: Default: Description:\n# host NONE Hostname of Graphite server\n# port NONE Port of Graphite server\n# period 10 Poll period\n# unit seconds Units of poll period\n# prefix EMPTY STRING Prefix to prepend to metric name\n\n## Examples\n# Enable JmxSink for all instances by class name\n#*.sink.jmx.class=org.apache.spark.metrics.sink.JmxSink\n\n# Enable ConsoleSink for all instances by class name\n#*.sink.console.class=org.apache.spark.metrics.sink.ConsoleSink\n\n# Polling period for ConsoleSink\n#*.sink.console.period=10\n\n#*.sink.console.unit=seconds\n\n# Master instance overlap polling period\n#master.sink.console.period=15\n\n#master.sink.console.unit=seconds\n\n# Enable CsvSink for all instances\n#*.sink.csv.class=org.apache.spark.metrics.sink.CsvSink\n\n# Polling period for CsvSink\n#*.sink.csv.period=1\n\n#*.sink.csv.unit=minutes\n\n# Polling directory for CsvSink\n#*.sink.csv.directory=/tmp/\n\n# Worker instance overlap polling period\n#worker.sink.csv.period=10\n\n#worker.sink.csv.unit=minutes\n\n# Enable jvm source for instance master, worker, driver and executor\n#master.source.jvm.class=org.apache.spark.metrics.source.JvmSource\n\n#worker.source.jvm.class=org.apache.spark.metrics.source.JvmSource\n\n#driver.source.jvm.class=org.apache.spark.metrics.source.JvmSource\n\n#executor.source.jvm.class=org.apache.spark.metrics.source.JvmSource\n\n "
  136. }
  137. }
  138. },
  139. {
  140. "ranger-knox-plugin-properties" : {
  141. "properties_attributes" : { },
  142. "properties" : {
  143. "REPOSITORY_CONFIG_USERNAME" : "admin",
  144. "KNOX_HOME" : "/usr/hdp/current/knox-server",
  145. "ranger-knox-plugin-enabled" : "No",
  146. "common.name.for.certificate" : "",
  147. "policy_user" : "ambari-qa"
  148. }
  149. }
  150. },
  151. {
  152. "hive-exec-log4j" : {
  153. "properties_attributes" : { },
  154. "properties" : {
  155. "content" : "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Define some default values that can be overridden by system properties\n\nhive.log.threshold=ALL\nhive.root.logger=INFO,FA\nhive.log.dir=${java.io.tmpdir}/${user.name}\nhive.query.id=hadoop\nhive.log.file=${hive.query.id}.log\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hive.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=${hive.log.threshold}\n\n#\n# File Appender\n#\n\nlog4j.appender.FA=org.apache.log4j.FileAppender\nlog4j.appender.FA.File=${hive.log.dir}/${hive.log.file}\nlog4j.appender.FA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\nlog4j.appender.FA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#custom logging levels\n#log4j.logger.xxx=DEBUG\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.hive.shims.HiveEventCounter\n\n\nlog4j.category.DataNucleus=ERROR,FA\nlog4j.category.Datastore=ERROR,FA\nlog4j.category.Datastore.Schema=ERROR,FA\nlog4j.category.JPOX.Datastore=ERROR,FA\nlog4j.category.JPOX.Plugin=ERROR,FA\nlog4j.category.JPOX.MetaData=ERROR,FA\nlog4j.category.JPOX.Query=ERROR,FA\nlog4j.category.JPOX.General=ERROR,FA\nlog4j.category.JPOX.Enhancer=ERROR,FA\n\n\n# Silence useless ZK logs\nlog4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,FA\nlog4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,FA\n\n "
  156. }
  157. }
  158. },
  159. {
  160. "sqoop-env" : {
  161. "properties_attributes" : { },
  162. "properties" : {
  163. "content" : "\n# Set Hadoop-specific environment variables here.\n\n#Set path to where bin/hadoop is available\n#Set path to where bin/hadoop is available\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n#set the path to where bin/hbase is available\nexport HBASE_HOME=${HBASE_HOME:-{{hbase_home}}}\n\n#Set the path to where bin/hive is available\nexport HIVE_HOME=${HIVE_HOME:-{{hive_home}}}\n\n#Set the path for where zookeper config dir is\nexport ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}\n\n# add libthrift in hive to sqoop class path first so hive imports work\nexport SQOOP_USER_CLASSPATH=\"`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}\"\n ",
  164. "jdbc_drivers" : " ",
  165. "sqoop_user" : "sqoop"
  166. }
  167. }
  168. },
  169. {
  170. "atlas-env" : {
  171. "properties_attributes" : { },
  172. "properties" : {
  173. "content" : "\n# The java implementation to use. If JAVA_HOME is not found we expect java and jar to be in path\nexport JAVA_HOME={{java64_home}}\n\n# any additional java opts you want to set. This will apply to both client and server operations\nexport METADATA_OPTS={{metadata_opts}}\n\n# metadata configuration directory \nexport METADATA_CONF={{conf_dir}}\n\n# Where log files are stored. Defatult is logs directory under the base install location\nexport METADATA_LOG_DIR={{log_dir}}\n\n# additional classpath entries\nexport METADATACPPATH={{metadata_classpath}}\n\n# data dir\nexport METADATA_DATA_DIR={{data_dir}}\n\n# pid dir\nexport METADATA_PID_DIR={{pid_dir}}\n\n# Where do you want to expand the war file. By Default it is in /server/webapp dir under the base install dir.\nexport METADATA_EXPANDED_WEBAPP_DIR={{expanded_war_dir}}\n ",
  174. "metadata_pid_dir" : "/var/run/atlas",
  175. "metadata_port" : "21000",
  176. "metadata_data_dir" : "/var/lib/atlas/data",
  177. "metadata_opts" : " ",
  178. "metadata_log_dir" : "/var/log/atlas",
  179. "metadata_classpath" : " ",
  180. "metadata_user" : "atlas",
  181. "metadata_expanded_war_dir" : "./server/webapp"
  182. }
  183. }
  184. },
  185. {
  186. "ranger-kafka-audit" : {
  187. "properties_attributes" : { },
  188. "properties" : {
  189. "xasecure.audit.destination.solr.batch.filespool.dir" : "/var/log/kafka/audit/solr/spool",
  190. "xasecure.audit.destination.hdfs.dir" : "hdfs://NAMENODE_HOSTNAME:8020/ranger/audit",
  191. "xasecure.audit.destination.solr.urls" : "{{ranger_audit_solr_urls}}",
  192. "xasecure.audit.destination.hdfs.batch.filespool.dir" : "/var/log/kafka/audit/hdfs/spool",
  193. "xasecure.audit.is.enabled" : "true",
  194. "xasecure.audit.destination.hdfs" : "true",
  195. "xasecure.audit.credential.provider.file" : "jceks://file{{credential_file}}",
  196. "xasecure.audit.provider.summary.enabled" : "true",
  197. "xasecure.audit.destination.db.jdbc.driver" : "{{jdbc_driver}}",
  198. "xasecure.audit.destination.db.batch.filespool.dir" : "/var/log/kafka/audit/db/spool",
  199. "xasecure.audit.destination.solr" : "true",
  200. "xasecure.audit.destination.db.user" : "{{xa_audit_db_user}}",
  201. "xasecure.audit.destination.db" : "false",
  202. "xasecure.audit.destination.db.jdbc.url" : "{{audit_jdbc_url}}",
  203. "xasecure.audit.destination.solr.zookeepers" : "none"
  204. }
  205. }
  206. },
  207. {
  208. "ranger-yarn-plugin-properties" : {
  209. "properties_attributes" : { },
  210. "properties" : {
  211. "REPOSITORY_CONFIG_USERNAME" : "yarn",
  212. "common.name.for.certificate" : "",
  213. "ranger-yarn-plugin-enabled" : "No",
  214. "policy_user" : "ambari-qa",
  215. "hadoop.rpc.protection" : ""
  216. }
  217. }
  218. },
  219. {
  220. "ssl-server" : {
  221. "properties_attributes" : { },
  222. "properties" : {
  223. "ssl.server.truststore.reload.interval" : "10000",
  224. "ssl.server.keystore.location" : "/etc/security/serverKeys/keystore.jks",
  225. "ssl.server.keystore.type" : "jks",
  226. "ssl.server.truststore.type" : "jks",
  227. "ssl.server.truststore.location" : "/etc/security/serverKeys/all.jks"
  228. }
  229. }
  230. },
  231. {
  232. "ranger-hdfs-audit" : {
  233. "properties_attributes" : { },
  234. "properties" : {
  235. "xasecure.audit.destination.solr.batch.filespool.dir" : "/var/log/hadoop/hdfs/audit/solr/spool",
  236. "xasecure.audit.destination.hdfs.dir" : "hdfs://NAMENODE_HOSTNAME:8020/ranger/audit",
  237. "xasecure.audit.destination.solr.urls" : "{{ranger_audit_solr_urls}}",
  238. "xasecure.audit.destination.hdfs.batch.filespool.dir" : "/var/log/hadoop/hdfs/audit/hdfs/spool",
  239. "xasecure.audit.is.enabled" : "true",
  240. "xasecure.audit.destination.hdfs" : "true",
  241. "xasecure.audit.credential.provider.file" : "jceks://file{{credential_file}}",
  242. "xasecure.audit.provider.summary.enabled" : "false",
  243. "xasecure.audit.destination.db.jdbc.driver" : "{{jdbc_driver}}",
  244. "xasecure.audit.destination.db.batch.filespool.dir" : "/var/log/hadoop/hdfs/audit/db/spool",
  245. "xasecure.audit.destination.solr" : "false",
  246. "xasecure.audit.destination.db.user" : "{{xa_audit_db_user}}",
  247. "xasecure.audit.destination.db" : "false",
  248. "xasecure.audit.destination.db.jdbc.url" : "{{audit_jdbc_url}}",
  249. "xasecure.audit.destination.solr.zookeepers" : "none"
  250. }
  251. }
  252. },
  253. {
  254. "spark-defaults" : {
  255. "properties_attributes" : { },
  256. "properties" : {
  257. "spark.yarn.queue" : "default",
  258. "spark.history.kerberos.principal" : "none",
  259. "spark.yarn.preserve.staging.files" : "false",
  260. "spark.yarn.max.executor.failures" : "3",
  261. "spark.yarn.services" : "org.apache.spark.deploy.yarn.history.YarnHistoryService",
  262. "spark.yarn.am.extraJavaOptions" : "-Dhdp.version={{hdp_full_version}}",
  263. "spark.yarn.applicationMaster.waitTries" : "10",
  264. "spark.history.kerberos.keytab" : "none",
  265. "spark.driver.extraJavaOptions" : "-Dhdp.version={{hdp_full_version}}",
  266. "spark.yarn.scheduler.heartbeat.interval-ms" : "5000",
  267. "spark.history.provider" : "org.apache.spark.deploy.yarn.history.YarnHistoryProvider",
  268. "spark.yarn.driver.memoryOverhead" : "384",
  269. "spark.yarn.containerLauncherMaxThreads" : "25",
  270. "spark.yarn.historyServer.address" : "{{spark_history_server_host}}:{{spark_history_ui_port}}",
  271. "spark.yarn.submit.file.replication" : "3",
  272. "spark.yarn.executor.memoryOverhead" : "384",
  273. "spark.history.ui.port" : "18080"
  274. }
  275. }
  276. },
  277. {
  278. "storm-env" : {
  279. "properties_attributes" : { },
  280. "properties" : {
  281. "content" : "\n#!/bin/bash\n\n# Set Storm specific environment variables here.\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}\n\nexport STORM_CONF_DIR={{conf_dir}}\nexport STORM_HOME={{storm_component_home_dir}}\n ",
  282. "storm_log_dir" : "/var/log/storm",
  283. "nimbus_seeds_supported" : "true",
  284. "storm_user" : "storm",
  285. "storm_pid_dir" : "/var/run/storm",
  286. "jmxremote_port" : "56431"
  287. }
  288. }
  289. },
  290. {
  291. "falcon-runtime.properties" : {
  292. "properties_attributes" : { },
  293. "properties" : {
  294. "*.log.cleanup.frequency.days.retention" : "days(7)",
  295. "*.log.cleanup.frequency.minutes.retention" : "hours(6)",
  296. "*.log.cleanup.frequency.hours.retention" : "minutes(1)",
  297. "*.log.cleanup.frequency.months.retention" : "months(3)",
  298. "*.domain" : "${falcon.app.type}"
  299. }
  300. }
  301. },
  302. {
  303. "ams-hbase-security-site" : {
  304. "properties_attributes" : { },
  305. "properties" : {
  306. "hbase.zookeeper.property.kerberos.removeHostFromPrincipal" : "",
  307. "hbase.regionserver.kerberos.principal" : "",
  308. "hbase.zookeeper.property.authProvider.1" : "",
  309. "hbase.coprocessor.master.classes" : "",
  310. "hbase.zookeeper.property.kerberos.removeRealmFromPrincipal" : "",
  311. "hbase.security.authorization" : "",
  312. "zookeeper.znode.parent" : "",
  313. "ams.zookeeper.keytab" : "",
  314. "hbase.security.authentication" : "",
  315. "hbase.myclient.principal" : "",
  316. "hbase.master.kerberos.principal" : "",
  317. "hbase.master.keytab.file" : "",
  318. "hbase.coprocessor.region.classes" : "",
  319. "hadoop.security.authentication" : "",
  320. "hbase.zookeeper.property.jaasLoginRenew" : "",
  321. "hbase.regionserver.keytab.file" : "",
  322. "hbase.myclient.keytab" : "",
  323. "ams.zookeeper.principal" : ""
  324. }
  325. }
  326. },
  327. {
  328. "ranger-hdfs-plugin-properties" : {
  329. "properties_attributes" : { },
  330. "properties" : {
  331. "REPOSITORY_CONFIG_USERNAME" : "hadoop",
  332. "ranger-hdfs-plugin-enabled" : "No",
  333. "common.name.for.certificate" : "",
  334. "policy_user" : "ambari-qa",
  335. "hadoop.rpc.protection" : ""
  336. }
  337. }
  338. },
  339. {
  340. "ams-log4j" : {
  341. "properties_attributes" : { },
  342. "properties" : {
  343. "content" : "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n# Define some default values that can be overridden by system properties\nams.log.dir=.\nams.log.file=ambari-metrics-collector.log\n\n# Root logger option\nlog4j.rootLogger=INFO,file\n\n# Direct log messages to a log file\nlog4j.appender.file=org.apache.log4j.RollingFileAppender\nlog4j.appender.file.File=${ams.log.dir}/${ams.log.file}\nlog4j.appender.file.MaxFileSize=80MB\nlog4j.appender.file.MaxBackupIndex=60\nlog4j.appender.file.layout=org.apache.log4j.PatternLayout\nlog4j.appender.file.layout.ConversionPattern=%d{ABSOLUTE} %5p [%t] %c{1}:%L - %m%n\n "
  344. }
  345. }
  346. },
  347. {
  348. "ranger-yarn-audit" : {
  349. "properties_attributes" : { },
  350. "properties" : {
  351. "xasecure.audit.destination.solr.batch.filespool.dir" : "/var/log/hadoop/yarn/audit/solr/spool",
  352. "xasecure.audit.destination.hdfs.dir" : "hdfs://NAMENODE_HOSTNAME:8020/ranger/audit",
  353. "xasecure.audit.destination.solr.urls" : "{{ranger_audit_solr_urls}}",
  354. "xasecure.audit.destination.hdfs.batch.filespool.dir" : "/var/log/hadoop/yarn/audit/hdfs/spool",
  355. "xasecure.audit.is.enabled" : "true",
  356. "xasecure.audit.destination.hdfs" : "true",
  357. "xasecure.audit.credential.provider.file" : "jceks://file{{credential_file}}",
  358. "xasecure.audit.provider.summary.enabled" : "false",
  359. "xasecure.audit.destination.db.jdbc.driver" : "{{jdbc_driver}}",
  360. "xasecure.audit.destination.db.batch.filespool.dir" : "/var/log/hadoop/yarn/audit/db/spool",
  361. "xasecure.audit.destination.solr" : "false",
  362. "xasecure.audit.destination.db.user" : "{{xa_audit_db_user}}",
  363. "xasecure.audit.destination.db" : "false",
  364. "xasecure.audit.destination.db.jdbc.url" : "{{audit_jdbc_url}}",
  365. "xasecure.audit.destination.solr.zookeepers" : "none"
  366. }
  367. }
  368. },
  369. {
  370. "hive-site" : {
  371. "properties_attributes" : { },
  372. "properties" : {
  373. "hive.security.authenticator.manager" : "org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator",
  374. "datanucleus.cache.level2.type" : "none",
  375. "hive.stats.autogather" : "true",
  376. "hive.optimize.index.filter" : "true",
  377. "hive.enforce.sorting" : "true",
  378. "hive.metastore.uris" : "thrift://%HOSTGROUP::host_group_1%:9083",
  379. "hive.stats.dbclass" : "fs",
  380. "hive.map.aggr.hash.force.flush.memory.threshold" : "0.9",
  381. "hive.server2.transport.mode" : "binary",
  382. "hive.compactor.worker.timeout" : "86400s",
  383. "hive.metastore.pre.event.listeners" : "org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener",
  384. "hive.convert.join.bucket.mapjoin.tez" : "false",
  385. "hive.tez.container.size" : "250",
  386. "javax.jdo.option.ConnectionURL" : "jdbc:mysql://%HOSTGROUP::host_group_1%/hive?createDatabaseIfNotExist=true",
  387. "hive.server2.enable.doAs" : "false",
  388. "hive.metastore.warehouse.dir" : "/apps/hive/warehouse",
  389. "hive.metastore.client.socket.timeout" : "1800s",
  390. "hive.exec.orc.default.compress" : "ZLIB",
  391. "hive.exec.post.hooks" : "org.apache.hadoop.hive.ql.hooks.ATSHook",
  392. "hive.exec.dynamic.partition" : "true",
  393. "hive.enforce.sortmergebucketmapjoin" : "true",
  394. "javax.jdo.option.ConnectionDriverName" : "com.mysql.jdbc.Driver",
  395. "hive.metastore.kerberos.principal" : "hive/_HOST@EXAMPLE.COM",
  396. "hive.server2.thrift.http.port" : "10001",
  397. "hive.metastore.client.connect.retry.delay" : "5s",
  398. "hive.exec.pre.hooks" : "org.apache.hadoop.hive.ql.hooks.ATSHook",
  399. "hive.exec.failure.hooks" : "org.apache.hadoop.hive.ql.hooks.ATSHook",
  400. "hive.prewarm.enabled" : "false",
  401. "hive.server2.tez.default.queues" : "default",
  402. "hive.optimize.metadataonly" : "true",
  403. "hive.exec.dynamic.partition.mode" : "nonstrict",
  404. "hive.compute.query.using.stats" : "true",
  405. "hive.server2.thrift.max.worker.threads" : "500",
  406. "hive.compactor.check.interval" : "300s",
  407. "hive.prewarm.numcontainers" : "3",
  408. "hive.metastore.kerberos.keytab.file" : "/etc/security/keytabs/hive.service.keytab",
  409. "hive.cluster.delegation.token.store.zookeeper.connectString" : "%HOSTGROUP::host_group_1%:2181",
  410. "hive.security.metastore.authorization.manager" : "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider",
  411. "hive.mapjoin.bucket.cache.size" : "10000",
  412. "hive.exec.orc.default.stripe.size" : "67108864",
  413. "hive.merge.rcfile.block.level" : "true",
  414. "hive.server2.table.type.mapping" : "CLASSIC",
  415. "hive.exec.orc.encoding.strategy" : "SPEED",
  416. "hive.exec.parallel.thread.number" : "8",
  417. "hive.tez.dynamic.partition.pruning" : "true",
  418. "hive.security.authorization.manager" : "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory",
  419. "hive.server2.logging.operation.log.location" : "${system:java.io.tmpdir}/${system:user.name}/operation_logs",
  420. "hive.metastore.failure.retries" : "24",
  421. "hive.server2.logging.operation.enabled" : "true",
  422. "hive.cluster.delegation.token.store.class" : "org.apache.hadoop.hive.thrift.ZooKeeperTokenStore",
  423. "hive.tez.max.partition.factor" : "2.0",
  424. "hive.optimize.null.scan" : "true",
  425. "hive.metastore.sasl.enabled" : "false",
  426. "hive.cluster.delegation.token.store.zookeeper.znode" : "/hive/cluster/delegation",
  427. "hive.server2.authentication" : "NONE",
  428. "hive.tez.java.opts" : "-server -Xmx200m -Djava.net.preferIPv4Stack=true",
  429. "hive.optimize.reducededuplication.min.reducer" : "4",
  430. "hive.optimize.bucketmapjoin.sortedmerge" : "false",
  431. "hive.mapjoin.optimized.hashtable" : "true",
  432. "datanucleus.autoCreateSchema" : "false",
  433. "hive.compactor.abortedtxn.threshold" : "1000",
  434. "hive.server2.zookeeper.namespace" : "hiveserver2",
  435. "hive.exec.max.dynamic.partitions.pernode" : "2000",
  436. "ambari.hive.db.schema.name" : "hive",
  437. "hive.merge.smallfiles.avgsize" : "16000000",
  438. "hive.conf.restricted.list" : "hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role",
  439. "hive.map.aggr.hash.percentmemory" : "0.5",
  440. "hive.tez.dynamic.partition.pruning.max.event.size" : "1048576",
  441. "hive.security.metastore.authenticator.manager" : "org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator",
  442. "hive.fetch.task.conversion" : "more",
  443. "hive.execution.engine" : "tez",
  444. "mapreduce.input.fileinputformat.split.minsize" : "1000000",
  445. "hive.fetch.task.conversion.threshold" : "1073741824",
  446. "hive.limit.optimize.enable" : "true",
  447. "hive.merge.size.per.task" : "256000000",
  448. "hive.server2.use.SSL" : "false",
  449. "hive.server2.thrift.sasl.qop" : "auth",
  450. "hive.exec.max.dynamic.partitions" : "5000",
  451. "javax.jdo.option.ConnectionUserName" : "hive",
  452. "hive.auto.convert.join.noconditionaltask" : "true",
  453. "hive.server2.tez.sessions.per.default.queue" : "1",
  454. "hive.server2.tez.initialize.default.sessions" : "false",
  455. "hive.metastore.authorization.storage.checks" : "false",
  456. "hive.map.aggr.hash.min.reduction" : "0.5",
  457. "hive.exec.reducers.max" : "1009",
  458. "hive.fetch.task.aggr" : "false",
  459. "hive.tez.min.partition.factor" : "0.25",
  460. "hive.mapred.reduce.tasks.speculative.execution" : "false",
  461. "hive.vectorized.groupby.flush.percent" : "0.1",
  462. "hive.server2.thrift.http.path" : "cliservice",
  463. "hive_metastore_user_passwd" : "hive",
  464. "hive.tez.smb.number.waves" : "0.5",
  465. "hive.limit.pushdown.memory.usage" : "0.04",
  466. "hive.server2.authentication.spnego.principal" : "/etc/security/keytabs/spnego.service.keytab",
  467. "hive.vectorized.groupby.maxentries" : "100000",
  468. "hive.users.in.admin.role" : "hue,hive",
  469. "hive.optimize.reducededuplication" : "true",
  470. "hive.exec.submit.local.task.via.child" : "true",
  471. "hive.exec.parallel" : "false",
  472. "hive.tez.auto.reducer.parallelism" : "false",
  473. "hive.auto.convert.join.noconditionaltask.size" : "52428800",
  474. "hive.support.concurrency" : "true",
  475. "hive.tez.dynamic.partition.pruning.max.data.size" : "104857600",
  476. "hive.server2.support.dynamic.service.discovery" : "true",
  477. "hive.exec.reducers.bytes.per.reducer" : "67108864",
  478. "hive.exec.compress.output" : "false",
  479. "hive.stats.fetch.column.stats" : "false",
  480. "hive.user.install.directory" : "/user/",
  481. "hive.exec.max.created.files" : "100000",
  482. "hive.tez.log.level" : "INFO",
  483. "hive.cbo.enable" : "true",
  484. "hive.tez.cpu.vcores" : "-1",
  485. "hive.auto.convert.join" : "true",
  486. "hive.merge.tezfiles" : "false",
  487. "hive.compactor.delta.pct.threshold" : "0.1f",
  488. "hive.stats.fetch.partition.stats" : "true",
  489. "hive.merge.mapfiles" : "true",
  490. "hive.exec.scratchdir" : "/tmp/hive",
  491. "hive.heapsize" : "250",
  492. "hive.optimize.constant.propagation" : "true",
  493. "hive.merge.orcfile.stripe.level" : "true",
  494. "hive.txn.manager" : "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager",
  495. "hive.vectorized.groupby.checkinterval" : "4096",
  496. "hive.vectorized.execution.reduce.enabled" : "false",
  497. "hive.server2.enable.impersonation" : "true",
  498. "hive.zookeeper.quorum" : "%HOSTGROUP::host_group_1%:2181",
  499. "hive.compactor.delta.num.threshold" : "10",
  500. "hive.cli.print.header" : "false",
  501. "hive.auto.convert.sortmerge.join" : "true",
  502. "hive.zookeeper.namespace" : "hive_zookeeper_namespace",
  503. "hive.orc.compute.splits.num.threads" : "10",
  504. "hive.orc.splits.include.file.footer" : "false",
  505. "hive.security.metastore.authorization.auth.reads" : "true",
  506. "hive.txn.max.open.batch" : "1000",
  507. "hive.map.aggr" : "true",
  508. "hive.auto.convert.sortmerge.join.to.mapjoin" : "false",
  509. "hive.server2.allow.user.substitution" : "true",
  510. "hive.compactor.initiator.on" : "true",
  511. "hive.optimize.sort.dynamic.partition" : "false",
  512. "hive.enforce.bucketing" : "true",
  513. "hive.default.fileformat" : "TextFile",
  514. "hive.vectorized.execution.enabled" : "true",
  515. "hive.txn.timeout" : "300",
  516. "hive.metastore.cache.pinobjtypes" : "Table,Database,Type,FieldSchema,Order",
  517. "hive.server2.authentication.spnego.keytab" : "HTTP/_HOST@EXAMPLE.COM",
  518. "hive.metastore.execute.setugi" : "true",
  519. "hive.server2.thrift.port" : "10000",
  520. "hive.tez.input.format" : "org.apache.hadoop.hive.ql.io.HiveInputFormat",
  521. "hive.optimize.bucketmapjoin" : "true",
  522. "hive.merge.mapredfiles" : "false",
  523. "hive.security.authorization.enabled" : "false",
  524. "hive.exec.compress.intermediate" : "false",
  525. "hive.smbjoin.cache.rows" : "10000",
  526. "hive.exec.orc.compression.strategy" : "SPEED",
  527. "hive.metastore.connect.retries" : "24",
  528. "hive.metastore.server.max.threads" : "100000",
  529. "hive.zookeeper.client.port" : "2181",
  530. "hive.exec.submitviachild" : "false",
  531. "hive.compactor.worker.threads" : "0"
  532. }
  533. }
  534. },
  535. {
  536. "ranger-storm-security" : {
  537. "properties_attributes" : { },
  538. "properties" : {
  539. "ranger.plugin.storm.policy.rest.ssl.config.file" : "/usr/hdp/current/storm-client/conf/ranger-policymgr-ssl.xml",
  540. "ranger.plugin.storm.policy.cache.dir" : "/etc/ranger/{{repo_name}}/policycache",
  541. "ranger.plugin.storm.policy.pollIntervalMs" : "30000",
  542. "ranger.plugin.storm.policy.source.impl" : "org.apache.ranger.admin.client.RangerAdminRESTClient",
  543. "ranger.plugin.storm.policy.rest.url" : "{{policymgr_mgr_url}}",
  544. "ranger.plugin.storm.service.name" : "{{repo_name}}"
  545. }
  546. }
  547. },
  548. {
  549. "spark-log4j-properties" : {
  550. "properties_attributes" : { },
  551. "properties" : {
  552. "content" : "\n# Set everything to be logged to the console\nlog4j.rootCategory=INFO, console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n\n\n# Settings to quiet third party logs that are too verbose\nlog4j.logger.org.eclipse.jetty=WARN\nlog4j.logger.org.eclipse.jetty.util.component.AbstractLifeCycle=ERROR\nlog4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO\nlog4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO\n\n "
  553. }
  554. }
  555. },
  556. {
  557. "yarn-env" : {
  558. "properties_attributes" : { },
  559. "properties" : {
  560. "yarn_log_dir_prefix" : "/var/log/hadoop-yarn",
  561. "resourcemanager_heapsize" : "250",
  562. "content" : "\n export HADOOP_YARN_HOME={{hadoop_yarn_home}}\n export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER\n export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER\n export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n export JAVA_HOME={{java64_home}}\n\n # We need to add the EWMA appender for the yarn daemons only;\n # however, YARN_ROOT_LOGGER is shared by the yarn client and the\n # daemons. This is restrict the EWMA appender to daemons only.\n INVOKER=\"${0##*/}\"\n if [ \"$INVOKER\" == \"yarn-daemon.sh\" ]; then\n export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,EWMA,RFA}\n fi\n\n # User for YARN daemons\n export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}\n\n # resolve links - $0 may be a softlink\n export YARN_CONF_DIR=\"${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}\"\n\n # some Java parameters\n # export JAVA_HOME=/home/y/libexec/jdk1.6.0/\n if [ \"$JAVA_HOME\" != \"\" ]; then\n #echo \"run java in $JAVA_HOME\"\n JAVA_HOME=$JAVA_HOME\n fi\n\n if [ \"$JAVA_HOME\" = \"\" ]; then\n echo \"Error: JAVA_HOME is not set.\"\n exit 1\n fi\n\n JAVA=$JAVA_HOME/bin/java\n JAVA_HEAP_MAX=-Xmx1000m\n\n # For setting YARN specific HEAP sizes please use this\n # Parameter and set appropriately\n YARN_HEAPSIZE={{yarn_heapsize}}\n\n # check envvars which might override default args\n if [ \"$YARN_HEAPSIZE\" != \"\" ]; then\n JAVA_HEAP_MAX=\"-Xmx\"\"$YARN_HEAPSIZE\"\"m\"\n fi\n\n # Resource Manager specific parameters\n\n # Specify the max Heapsize for the ResourceManager using a numerical value\n # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n # the value to 1000.\n # This value will be overridden by an Xmx setting specified in either YARN_OPTS\n # and/or YARN_RESOURCEMANAGER_OPTS.\n # If not specified, the default value will be picked from either YARN_HEAPMAX\n # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\n export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}\n\n # Specify the JVM options to be used when starting the ResourceManager.\n # These options will be appended to the options specified as YARN_OPTS\n # and therefore may override any similar flags set in YARN_OPTS\n #export YARN_RESOURCEMANAGER_OPTS=\n\n # Node Manager specific parameters\n\n # Specify the max Heapsize for the NodeManager using a numerical value\n # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n # the value to 1000.\n # This value will be overridden by an Xmx setting specified in either YARN_OPTS\n # and/or YARN_NODEMANAGER_OPTS.\n # If not specified, the default value will be picked from either YARN_HEAPMAX\n # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\n export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}\n\n # Specify the max Heapsize for the HistoryManager using a numerical value\n # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n # the value to 1024.\n # This value will be overridden by an Xmx setting specified in either YARN_OPTS\n # and/or YARN_HISTORYSERVER_OPTS.\n # If not specified, the default value will be picked from either YARN_HEAPMAX\n # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\n export YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}\n\n # Specify the JVM options to be used when starting the NodeManager.\n # These options will be appended to the options specified as YARN_OPTS\n # and therefore may override any similar flags set in YARN_OPTS\n #export YARN_NODEMANAGER_OPTS=\n\n # so that filenames w/ spaces are handled correctly in loops below\n IFS=\n\n\n # default log directory and file\n if [ \"$YARN_LOG_DIR\" = \"\" ]; then\n YARN_LOG_DIR=\"$HADOOP_YARN_HOME/logs\"\n fi\n if [ \"$YARN_LOGFILE\" = \"\" ]; then\n YARN_LOGFILE='yarn.log'\n fi\n\n # default policy file for service-level authorization\n if [ \"$YARN_POLICYFILE\" = \"\" ]; then\n YARN_POLICYFILE=\"hadoop-policy.xml\"\n fi\n\n # restore ordinary behaviour\n unset IFS\n\n\n YARN_OPTS=\"$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR\"\n YARN_OPTS=\"$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR\"\n YARN_OPTS=\"$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE\"\n YARN_OPTS=\"$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE\"\n YARN_OPTS=\"$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME\"\n YARN_OPTS=\"$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING\"\n YARN_OPTS=\"$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}\"\n YARN_OPTS=\"$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}\"\n if [ \"x$JAVA_LIBRARY_PATH\" != \"x\" ]; then\n YARN_OPTS=\"$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH\"\n fi\n YARN_OPTS=\"$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE\"\n ",
  563. "yarn_cgroups_enabled" : "false",
  564. "is_supported_yarn_ranger" : "true",
  565. "yarn_user" : "yarn",
  566. "yarn_pid_dir_prefix" : "/var/run/hadoop-yarn",
  567. "nodemanager_heapsize" : "250",
  568. "min_user_id" : "1000",
  569. "yarn_heapsize" : "250",
  570. "apptimelineserver_heapsize" : "250"
  571. }
  572. }
  573. },
  574. {
  575. "tez-site" : {
  576. "properties_attributes" : { },
  577. "properties" : {
  578. "tez.am.am-rm.heartbeat.interval-ms.max" : "250",
  579. "tez.runtime.io.sort.mb" : "150",
  580. "tez.runtime.compress" : "true",
  581. "tez.am.log.level" : "INFO",
  582. "tez.runtime.sorter.class" : "PIPELINED",
  583. "tez.lib.uris" : "/hdp/apps/${hdp.version}/tez/tez.tar.gz",
  584. "tez.grouping.split-waves" : "1.7",
  585. "tez.am.container.reuse.non-local-fallback.enabled" : "false",
  586. "tez.runtime.optimize.local.fetch" : "true",
  587. "tez.runtime.compress.codec" : "org.apache.hadoop.io.compress.SnappyCodec",
  588. "tez.runtime.unordered.output.buffer.size-mb" : "100",
  589. "tez.cluster.additional.classpath.prefix" : "/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure",
  590. "tez.am.container.reuse.enabled" : "true",
  591. "tez.task.get-task.sleep.interval-ms.max" : "200",
  592. "tez.task.generate.counters.per.io" : "true",
  593. "tez.task.launch.env" : "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64",
  594. "tez.history.logging.service.class" : "org.apache.tez.dag.history.logging.ats.ATSHistoryLoggingService",
  595. "tez.task.resource.memory.mb" : "1536",
  596. "tez.runtime.convert.user-payload.to.history-text" : "false",
  597. "tez.am.container.idle.release-timeout-max.millis" : "20000",
  598. "tez.counters.max" : "2000",
  599. "tez.am.launch.cmd-opts" : "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC",
  600. "tez.use.cluster.hadoop-libs" : "false",
  601. "tez.am.tez-ui.history-url.template" : "__HISTORY_URL_BASE__?viewPath=%2F%23%2Ftez-app%2F__APPLICATION_ID__",
  602. "tez.am.launch.cluster-default.cmd-opts" : "-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
  603. "tez.am.container.idle.release-timeout-min.millis" : "10000",
  604. "yarn.app.mapreduce.am.command-opts" : "-Xmx200m",
  605. "tez.am.java.opts" : "-server -Xmx200m -Djava.net.preferIPv4Stack=true -XX:+UseNUMA -XX:+UseParallelGC",
  606. "tez.am.resource.memory.mb" : "250",
  607. "tez.am.container.reuse.rack-fallback.enabled" : "true",
  608. "tez.shuffle-vertex-manager.min-src-fraction" : "0.2",
  609. "tez.am.maxtaskfailures.per.node" : "10",
  610. "tez.task.launch.cluster-default.cmd-opts" : "-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
  611. "tez.grouping.min-size" : "16777216",
  612. "tez.am.view-acls" : "*",
  613. "tez.generate.debug.artifacts" : "false",
  614. "tez.grouping.max-size" : "1073741824",
  615. "tez.session.am.dag.submit.timeout.secs" : "300",
  616. "tez.task.am.heartbeat.counter.interval-ms.max" : "4000",
  617. "tez.task.launch.cmd-opts" : "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC",
  618. "tez.am.container.reuse.locality.delay-allocation-millis" : "250",
  619. "tez.dag.am.resource.memory.mb" : "250",
  620. "tez.am.max.app.attempts" : "2",
  621. "tez.staging-dir" : "/tmp/${user.name}/staging",
  622. "tez.am.launch.env" : "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64",
  623. "tez.counters.max.groups" : "1000",
  624. "tez.runtime.pipelined.sorter.sort.threads" : "2",
  625. "tez.session.client.timeout.secs" : "-1",
  626. "tez.task.max-events-per-heartbeat" : "500",
  627. "tez.shuffle-vertex-manager.max-src-fraction" : "0.4"
  628. }
  629. }
  630. },
  631. {
  632. "mapred-site" : {
  633. "properties_attributes" : { },
  634. "properties" : {
  635. "mapreduce.map.log.level" : "INFO",
  636. "mapreduce.jobhistory.bind-host" : "0.0.0.0",
  637. "mapreduce.shuffle.port" : "13562",
  638. "mapreduce.output.fileoutputformat.compress.type" : "BLOCK",
  639. "mapreduce.reduce.shuffle.merge.percent" : "0.66",
  640. "mapreduce.reduce.shuffle.input.buffer.percent" : "0.7",
  641. "mapreduce.task.timeout" : "300000",
  642. "mapreduce.job.emit-timeline-data" : "false",
  643. "mapreduce.task.io.sort.mb" : "64",
  644. "mapreduce.reduce.log.level" : "INFO",
  645. "mapreduce.task.io.sort.factor" : "100",
  646. "mapreduce.admin.reduce.child.java.opts" : "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
  647. "mapreduce.jobhistory.address" : "%HOSTGROUP::host_group_1%:10020",
  648. "mapreduce.am.max-attempts" : "2",
  649. "yarn.app.mapreduce.am.admin-command-opts" : "-Dhdp.version=${hdp.version}",
  650. "mapreduce.cluster.administrators" : " hadoop",
  651. "mapred.job.reduce.memory.mb" : "250",
  652. "mapreduce.jobhistory.done-dir" : "/mr-history/done",
  653. "mapreduce.framework.name" : "yarn",
  654. "mapreduce.reduce.shuffle.fetch.retry.timeout-ms" : "30000",
  655. "mapred.job.map.memory.mb" : "250",
  656. "mapreduce.reduce.shuffle.fetch.retry.interval-ms" : "1000",
  657. "mapreduce.reduce.java.opts" : "-Xmx200m",
  658. "mapreduce.reduce.shuffle.parallelcopies" : "30",
  659. "mapreduce.map.java.opts" : "-Xmx200m",
  660. "mapreduce.reduce.input.buffer.percent" : "0.0",
  661. "mapreduce.application.framework.path" : "/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework",
  662. "yarn.app.mapreduce.am.command-opts" : "-Xmx200m",
  663. "mapreduce.map.memory.mb" : "250",
  664. "mapreduce.reduce.speculative" : "false",
  665. "mapreduce.application.classpath" : "$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:$PWD/mr-framework/hadoop/share/hadoop/tools/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure",
  666. "mapreduce.output.fileoutputformat.compress" : "false",
  667. "mapreduce.map.speculative" : "false",
  668. "yarn.app.mapreduce.am.log.level" : "INFO",
  669. "mapreduce.reduce.memory.mb" : "250",
  670. "mapreduce.jobhistory.intermediate-done-dir" : "/mr-history/tmp",
  671. "mapreduce.reduce.shuffle.fetch.retry.enabled" : "1",
  672. "mapreduce.job.counters.max" : "130",
  673. "mapreduce.map.output.compress" : "false",
  674. "yarn.app.mapreduce.am.staging-dir" : "/user",
  675. "mapreduce.map.sort.spill.percent" : "0.7",
  676. "mapred.child.java.opts" : "-Xmx200m",
  677. "mapreduce.jobhistory.webapp.address" : "%HOSTGROUP::host_group_1%:19888",
  678. "io.sort.mb" : "64",
  679. "yarn.app.mapreduce.am.resource.mb" : "250",
  680. "mapreduce.admin.user.env" : "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64",
  681. "mapreduce.job.reduce.slowstart.completedmaps" : "0.05",
  682. "mapreduce.admin.map.child.java.opts" : "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}"
  683. }
  684. }
  685. },
  686. {
  687. "ranger-kafka-plugin-properties" : {
  688. "properties_attributes" : { },
  689. "properties" : {
  690. "REPOSITORY_CONFIG_USERNAME" : "kafka",
  691. "zookeeper.connect" : "localhost:2181",
  692. "common.name.for.certificate" : "",
  693. "ranger-kafka-plugin-enabled" : "No",
  694. "policy_user" : "ambari-qa",
  695. "hadoop.rpc.protection" : ""
  696. }
  697. }
  698. },
  699. {
  700. "ams-hbase-env" : {
  701. "properties_attributes" : { },
  702. "properties" : {
  703. "max_open_files_limit" : "32768",
  704. "content" : "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HBASE_HEAPSIZE={{hbase_heapsize}}\n\n# Extra Java runtime options.\n# Below are what we set by default. May only work with SUN JVM.\n# For more on why as well as other possible settings,\n# see http://wiki.apache.org/hadoop/PerformanceTuning\nexport HBASE_OPTS=\"-XX:+UseConcMarkSweepGC -XX:ErrorFile={{hbase_log_dir}}/hs_err_pid%p.log -Djava.io.tmpdir={{hbase_tmp_dir}}\"\nexport SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{hbase_log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable JMX exporting\n# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\n\n{% if java_version < 8 %}\nexport HBASE_MASTER_OPTS=\" -XX:PermSize=64m -XX:MaxPermSize={{hbase_master_maxperm_size}} -Xms{{hbase_heapsize}} -Xmx{{hbase_heapsize}} -Xmn{{hbase_master_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly\"\nexport HBASE_REGIONSERVER_OPTS=\"-XX:MaxPermSize=128m -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n{% else %}\nexport HBASE_MASTER_OPTS=\" -Xms{{hbase_heapsize}} -Xmx{{hbase_heapsize}} -Xmn{{hbase_master_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly\"\nexport HBASE_REGIONSERVER_OPTS=\" -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n{% endif %}\n\n\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# Extra ssh options. Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by default.\nexport HBASE_LOG_DIR={{hbase_log_dir}}\n\n# A string representing this instance of hbase. $USER by default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The directory where pid files are stored. /tmp by default.\nexport HBASE_PID_DIR={{hbase_pid_dir}}\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_config_file}}\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}}\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\nexport HBASE_ZOOKEEPER_OPTS=\"$HBASE_ZOOKEEPER_OPTS -Djava.security.auth.login.config={{ams_zookeeper_jaas_config_file}}\"\n{% endif %}\n\n# use embedded native libs\n_HADOOP_NATIVE_LIB=\"/usr/lib/ams-hbase/lib/hadoop-native/\"\nexport HBASE_OPTS=\"$HBASE_OPTS -Djava.library.path=${_HADOOP_NATIVE_LIB}\"\n\n# Unset HADOOP_HOME to avoid importing HADOOP installed cluster related configs like: /usr/hdp/2.2.0.0-2041/hadoop/conf/\nexport HADOOP_HOME={{ams_hbase_home_dir}}\n ",
  705. "hbase_master_maxperm_size" : "128m",
  706. "hbase_pid_dir" : "/var/run/ambari-metrics-collector/",
  707. "regionserver_xmn_size" : "256m",
  708. "hbase_master_heapsize" : "512m",
  709. "hbase_regionserver_heapsize" : "512m",
  710. "hbase_regionserver_xmn_max" : "256m",
  711. "hbase_log_dir" : "/var/log/ambari-metrics-collector",
  712. "hbase_regionserver_xmn_ratio" : "0.2",
  713. "hbase_master_xmn_size" : "256m"
  714. }
  715. }
  716. },
  717. {
  718. "knox-env" : {
  719. "properties_attributes" : { },
  720. "properties" : {
  721. "knox_master_secret" : "knox",
  722. "knox_group" : "knox",
  723. "knox_pid_dir" : "/var/run/knox",
  724. "knox_user" : "knox"
  725. }
  726. }
  727. },
  728. {
  729. "gateway-log4j" : {
  730. "properties_attributes" : { },
  731. "properties" : {
  732. "content" : "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\napp.log.dir=${launcher.dir}/../logs\napp.log.file=${launcher.name}.log\napp.audit.file=${launcher.name}-audit.log\n\nlog4j.rootLogger=ERROR, drfa\n\nlog4j.logger.org.apache.hadoop.gateway=INFO\n#log4j.logger.org.apache.hadoop.gateway=DEBUG\n\n#log4j.logger.org.eclipse.jetty=DEBUG\n#log4j.logger.org.apache.shiro=DEBUG\n#log4j.logger.org.apache.http=DEBUG\n#log4j.logger.org.apache.http.client=DEBUG\n#log4j.logger.org.apache.http.headers=DEBUG\n#log4j.logger.org.apache.http.wire=DEBUG\n\nlog4j.appender.stdout=org.apache.log4j.ConsoleAppender\nlog4j.appender.stdout.layout=org.apache.log4j.PatternLayout\nlog4j.appender.stdout.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\nlog4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.drfa.File=${app.log.dir}/${app.log.file}\nlog4j.appender.drfa.DatePattern=.yyyy-MM-dd\nlog4j.appender.drfa.layout=org.apache.log4j.PatternLayout\nlog4j.appender.drfa.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\nlog4j.logger.audit=INFO, auditfile\nlog4j.appender.auditfile=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.auditfile.File=${app.log.dir}/${app.audit.file}\nlog4j.appender.auditfile.Append = true\nlog4j.appender.auditfile.DatePattern = '.'yyyy-MM-dd\nlog4j.appender.auditfile.layout = org.apache.hadoop.gateway.audit.log4j.layout.AuditLayout"
  733. }
  734. }
  735. },
  736. {
  737. "topology" : {
  738. "properties_attributes" : { },
  739. "properties" : {
  740. "content" : "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<!--\n Licensed to the Apache Software Foundation (ASF) under one or more\n contributor license agreements. See the NOTICE file distributed with\n this work for additional information regarding copyright ownership.\n The ASF licenses this file to You under the Apache License, Version 2.0\n (the \"License\"); you may not use this file except in compliance with\n the License. You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n-->\n<topology>\n\n <gateway>\n\n <provider>\n <role>authentication</role>\n <name>ShiroProvider</name>\n <enabled>true</enabled>\n <param>\n <!-- \n session timeout in minutes, this is really idle timeout,\n defaults to 30mins, if the property value is not defined,, \n current client authentication would expire if client idles contiuosly for more than this value\n -->\n <name>sessionTimeout</name>\n <value>30</value>\n </param>\n <param>\n <name>main.ldapRealm</name>\n <value>org.apache.shiro.realm.ldap.JndiLdapRealm</value>\n </param>\n <param>\n <name>main.ldapRealm.userDnTemplate</name>\n <value>uid={0},ou=people,dc=hadoop,dc=apache,dc=org</value>\n </param>\n <param>\n <name>main.ldapRealm.contextFactory.url</name>\n <value>ldap://localhost:33389</value>\n </param>\n <param>\n <name>main.ldapRealm.contextFactory.authenticationMechanism</name>\n <value>simple</value>\n </param>\n <param>\n <name>urls./**</name>\n <value>authcBasic</value>\n </param>\n </provider>\n\n <provider>\n <role>identity-assertion</role>\n <name>Pseudo</name>\n <enabled>true</enabled>\n </provider>\n\n <!--\n Defines rules for mapping host names internal to a Hadoop cluster to externally accessible host names.\n For example, a hadoop service running in AWS may return a response that includes URLs containing the\n some AWS internal host name. If the client needs to make a subsequent request to the host identified\n in those URLs they need to be mapped to external host names that the client Knox can use to connect.\n\n If the external hostname and internal host names are same turn of this provider by setting the value of\n enabled parameter as false.\n\n The name parameter specifies the external host names in a comma separated list.\n The value parameter specifies corresponding internal host names in a comma separated list.\n\n Note that when you are using Sandbox, the external hostname needs to be localhost, as seen in out\n of box sandbox.xml. This is because Sandbox uses port mapping to allow clients to connect to the\n Hadoop services using localhost. In real clusters, external host names would almost never be localhost.\n -->\n <provider>\n <role>hostmap</role>\n <name>static</name>\n <enabled>true</enabled>\n <param><name>localhost</name><value>sandbox,sandbox.hortonworks.com</value></param>\n </provider>\n\n </gateway>\n\n <service>\n <role>NAMENODE</role>\n <url>hdfs://sandbox:8020</url>\n </service>\n\n <service>\n <role>JOBTRACKER</role>\n <url>rpc://sandbox:8050</url>\n </service>\n\n <service>\n <role>WEBHDFS</role>\n <url>http://sandbox:50070/webhdfs</url>\n </service>\n\n <service>\n <role>WEBHCAT</role>\n <url>http://sandbox:50111/templeton</url>\n </service>\n\n <service>\n <role>OOZIE</role>\n <url>http://sandbox:11000/oozie</url>\n </service>\n\n <service>\n <role>WEBHBASE</role>\n <url>http://sandbox:60080</url>\n </service>\n\n <service>\n <role>HIVE</role>\n <url>http://sandbox:10001/cliservice</url>\n </service>\n\n <service>\n <role>RESOURCEMANAGER</role>\n <url>http://sandbox:8088/ws</url>\n </service>\n\n\n</topology>"
  741. }
  742. }
  743. },
  744. {
  745. "pig-env" : {
  746. "properties_attributes" : { },
  747. "properties" : {
  748. "content" : "\nJAVA_HOME={{java64_home}}\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\nif [ -d \"/usr/lib/tez\" ]; then\n PIG_OPTS=\"$PIG_OPTS -Dmapreduce.framework.name=yarn\"\nfi\n "
  749. }
  750. }
  751. },
  752. {
  753. "zookeeper-env" : {
  754. "properties_attributes" : { },
  755. "properties" : {
  756. "zk_log_dir" : "/var/log/zookeeper",
  757. "content" : "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}\n ",
  758. "zk_pid_dir" : "/var/run/zookeeper",
  759. "zk_user" : "zookeeper"
  760. }
  761. }
  762. },
  763. {
  764. "hbase-policy" : {
  765. "properties_attributes" : { },
  766. "properties" : {
  767. "security.client.protocol.acl" : "*",
  768. "security.admin.protocol.acl" : "*",
  769. "security.masterregion.protocol.acl" : "*"
  770. }
  771. }
  772. },
  773. {
  774. "hbase-log4j" : {
  775. "properties_attributes" : { },
  776. "properties" : {
  777. "content" : "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n# Define some default values that can be overridden by system properties\nhbase.root.logger=INFO,console\nhbase.security.logger=INFO,console\nhbase.log.dir=.\nhbase.log.file=hbase.log\n\n# Define the root logger to the system property \"hbase.root.logger\".\nlog4j.rootLogger=${hbase.root.logger}\n\n# Logging Threshold\nlog4j.threshold=ALL\n\n#\n# Daily Rolling File Appender\n#\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n\n# Rolling File Appender properties\nhbase.log.maxfilesize=256MB\nhbase.log.maxbackupindex=20\n\n# Rolling File Appender\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hbase.log.dir}/${hbase.log.file}\n\nlog4j.appender.RFA.MaxFileSize=${hbase.log.maxfilesize}\nlog4j.appender.RFA.MaxBackupIndex=${hbase.log.maxbackupindex}\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n\n#\n# Security audit appender\n#\nhbase.security.log.file=SecurityAuth.audit\nhbase.security.log.maxfilesize=256MB\nhbase.security.log.maxbackupindex=20\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hbase.log.dir}/${hbase.security.log.file}\nlog4j.appender.RFAS.MaxFileSize=${hbase.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hbase.security.log.maxbackupindex}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.category.SecurityLogger=${hbase.security.logger}\nlog4j.additivity.SecurityLogger=false\n#log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.access.AccessController=TRACE\n\n#\n# Null Appender\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n\n# Custom Logging levels\n\nlog4j.logger.org.apache.zookeeper=INFO\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.hbase=INFO\n# Make these two classes INFO-level. Make them DEBUG to see more zk debug.\nlog4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO\nlog4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO\n#log4j.logger.org.apache.hadoop.dfs=DEBUG\n# Set this class to log INFO only otherwise its OTT\n# Enable this to get detailed connection error/retry logging.\n# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE\n\n\n# Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)\n#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG\n\n# Uncomment the below if you want to remove logging of client region caching'\n# and scan of .META. messages\n# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO\n# log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO\n\n "
  778. }
  779. }
  780. },
  781. {
  782. "ams-hbase-policy" : {
  783. "properties_attributes" : { },
  784. "properties" : {
  785. "security.client.protocol.acl" : "*",
  786. "security.admin.protocol.acl" : "*",
  787. "security.masterregion.protocol.acl" : "*"
  788. }
  789. }
  790. },
  791. {
  792. "users-ldif" : {
  793. "properties_attributes" : { },
  794. "properties" : {
  795. "content" : "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nversion: 1\n\n# Please replace with site specific values\ndn: dc=hadoop,dc=apache,dc=org\nobjectclass: organization\nobjectclass: dcObject\no: Hadoop\ndc: hadoop\n\n# Entry for a sample people container\n# Please replace with site specific values\ndn: ou=people,dc=hadoop,dc=apache,dc=org\nobjectclass:top\nobjectclass:organizationalUnit\nou: people\n\n# Entry for a sample end user\n# Please replace with site specific values\ndn: uid=guest,ou=people,dc=hadoop,dc=apache,dc=org\nobjectclass:top\nobjectclass:person\nobjectclass:organizationalPerson\nobjectclass:inetOrgPerson\ncn: Guest\nsn: User\nuid: guest\nuserPassword:guest-password\n\n# entry for sample user admin\ndn: uid=admin,ou=people,dc=hadoop,dc=apache,dc=org\nobjectclass:top\nobjectclass:person\nobjectclass:organizationalPerson\nobjectclass:inetOrgPerson\ncn: Admin\nsn: Admin\nuid: admin\nuserPassword:admin-password\n\n# entry for sample user sam\ndn: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org\nobjectclass:top\nobjectclass:person\nobjectclass:organizationalPerson\nobjectclass:inetOrgPerson\ncn: sam\nsn: sam\nuid: sam\nuserPassword:sam-password\n\n# entry for sample user tom\ndn: uid=tom,ou=people,dc=hadoop,dc=apache,dc=org\nobjectclass:top\nobjectclass:person\nobjectclass:organizationalPerson\nobjectclass:inetOrgPerson\ncn: tom\nsn: tom\nuid: tom\nuserPassword:tom-password\n\n# create FIRST Level groups branch\ndn: ou=groups,dc=hadoop,dc=apache,dc=org\nobjectclass:top\nobjectclass:organizationalUnit\nou: groups\ndescription: generic groups branch\n\n# create the analyst group under groups\ndn: cn=analyst,ou=groups,dc=hadoop,dc=apache,dc=org\nobjectclass:top\nobjectclass: groupofnames\ncn: analyst\ndescription:analyst group\nmember: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org\nmember: uid=tom,ou=people,dc=hadoop,dc=apache,dc=org\n\n\n# create the scientist group under groups\ndn: cn=scientist,ou=groups,dc=hadoop,dc=apache,dc=org\nobjectclass:top\nobjectclass: groupofnames\ncn: scientist\ndescription: scientist group\nmember: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org"
  796. }
  797. }
  798. },
  799. {
  800. "ssl-client" : {
  801. "properties_attributes" : { },
  802. "properties" : {
  803. "ssl.client.truststore.location" : "/etc/security/clientKeys/all.jks",
  804. "ssl.client.truststore.type" : "jks",
  805. "ssl.client.truststore.reload.interval" : "10000",
  806. "ssl.client.keystore.location" : "/etc/security/clientKeys/keystore.jks",
  807. "ssl.client.keystore.type" : "jks"
  808. }
  809. }
  810. },
  811. {
  812. "ranger-hbase-audit" : {
  813. "properties_attributes" : { },
  814. "properties" : {
  815. "xasecure.audit.destination.solr.batch.filespool.dir" : "/var/log/hbase/audit/solr/spool",
  816. "xasecure.audit.destination.hdfs.dir" : "hdfs://NAMENODE_HOSTNAME:8020/ranger/audit",
  817. "xasecure.audit.destination.solr.urls" : "{{ranger_audit_solr_urls}}",
  818. "xasecure.audit.destination.hdfs.batch.filespool.dir" : "/var/log/hbase/audit/hdfs/spool",
  819. "xasecure.audit.is.enabled" : "true",
  820. "xasecure.audit.destination.hdfs" : "true",
  821. "xasecure.audit.credential.provider.file" : "jceks://file{{credential_file}}",
  822. "xasecure.audit.provider.summary.enabled" : "true",
  823. "xasecure.audit.destination.db.jdbc.driver" : "{{jdbc_driver}}",
  824. "xasecure.audit.destination.db.batch.filespool.dir" : "/var/log/hbase/audit/db/spool",
  825. "xasecure.audit.destination.solr" : "false",
  826. "xasecure.audit.destination.db.user" : "{{xa_audit_db_user}}",
  827. "xasecure.audit.destination.db" : "false",
  828. "xasecure.audit.destination.db.jdbc.url" : "{{audit_jdbc_url}}",
  829. "xasecure.audit.destination.solr.zookeepers" : "none"
  830. }
  831. }
  832. },
  833. {
  834. "ranger-hdfs-policymgr-ssl" : {
  835. "properties_attributes" : { },
  836. "properties" : {
  837. "xasecure.policymgr.clientssl.truststore.credential.file" : "jceks://file{{credential_file}}",
  838. "xasecure.policymgr.clientssl.keystore.credential.file" : "jceks://file{{credential_file}}",
  839. "xasecure.policymgr.clientssl.truststore" : "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
  840. "xasecure.policymgr.clientssl.keystore" : "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks"
  841. }
  842. }
  843. },
  844. {
  845. "ranger-kafka-security" : {
  846. "properties_attributes" : { },
  847. "properties" : {
  848. "ranger.plugin.kafka.policy.rest.url" : "{{policymgr_mgr_url}}",
  849. "ranger.plugin.kafka.policy.cache.dir" : "/etc/ranger/{{repo_name}}/policycache",
  850. "ranger.plugin.kafka.service.name" : "{{repo_name}}",
  851. "ranger.plugin.kafka.policy.rest.ssl.config.file" : "/etc/kafka/conf/ranger-policymgr-ssl.xml",
  852. "ranger.plugin.kafka.policy.source.impl" : "org.apache.ranger.admin.client.RangerAdminRESTClient",
  853. "ranger.plugin.kafka.policy.pollIntervalMs" : "30000"
  854. }
  855. }
  856. },
  857. {
  858. "ldap-log4j" : {
  859. "properties_attributes" : { },
  860. "properties" : {
  861. "content" : "\n # Licensed to the Apache Software Foundation (ASF) under one\n # or more contributor license agreements. See the NOTICE file\n # distributed with this work for additional information\n # regarding copyright ownership. The ASF licenses this file\n # to you under the Apache License, Version 2.0 (the\n # \"License\"); you may not use this file except in compliance\n # with the License. You may obtain a copy of the License at\n #\n # http://www.apache.org/licenses/LICENSE-2.0\n #\n # Unless required by applicable law or agreed to in writing, software\n # distributed under the License is distributed on an \"AS IS\" BASIS,\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n\n app.log.dir=${launcher.dir}/../logs\n app.log.file=${launcher.name}.log\n\n log4j.rootLogger=ERROR, drfa\n log4j.logger.org.apache.directory.server.ldap.LdapServer=INFO\n log4j.logger.org.apache.directory=WARN\n\n log4j.appender.stdout=org.apache.log4j.ConsoleAppender\n log4j.appender.stdout.layout=org.apache.log4j.PatternLayout\n log4j.appender.stdout.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender\n log4j.appender.drfa.File=${app.log.dir}/${app.log.file}\n log4j.appender.drfa.DatePattern=.yyyy-MM-dd\n log4j.appender.drfa.layout=org.apache.log4j.PatternLayout\n log4j.appender.drfa.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n "
  862. }
  863. }
  864. },
  865. {
  866. "flume-conf" : {
  867. "properties_attributes" : { },
  868. "properties" : {
  869. "content" : "\n# Flume agent config\n "
  870. }
  871. }
  872. },
  873. {
  874. "flume-env" : {
  875. "properties_attributes" : { },
  876. "properties" : {
  877. "content" : "\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# If this file is placed at FLUME_CONF_DIR/flume-env.sh, it will be sourced\n# during Flume startup.\n\n# Enviroment variables can be set here.\n\nexport JAVA_HOME={{java_home}}\n\n# Give Flume more memory and pre-allocate, enable remote monitoring via JMX\n# export JAVA_OPTS=\"-Xms100m -Xmx2000m -Dcom.sun.management.jmxremote\"\n\n# Note that the Flume conf directory is always included in the classpath.\n# Add flume sink to classpath\nif [ -e \"/usr/lib/flume/lib/ambari-metrics-flume-sink.jar\" ]; then\n export FLUME_CLASSPATH=$FLUME_CLASSPATH:/usr/lib/flume/lib/ambari-metrics-flume-sink.jar\nfi\n\nexport HIVE_HOME={{flume_hive_home}}\nexport HCAT_HOME={{flume_hcat_home}}\n ",
  878. "flume_user" : "flume",
  879. "flume_log_dir" : "/var/log/flume",
  880. "flume_conf_dir" : "/etc/flume/conf"
  881. }
  882. }
  883. },
  884. {
  885. "ranger-hbase-plugin-properties" : {
  886. "properties_attributes" : { },
  887. "properties" : {
  888. "REPOSITORY_CONFIG_USERNAME" : "hbase",
  889. "common.name.for.certificate" : "",
  890. "ranger-hbase-plugin-enabled" : "No",
  891. "policy_user" : "ambari-qa"
  892. }
  893. }
  894. },
  895. {
  896. "ams-hbase-site" : {
  897. "properties_attributes" : { },
  898. "properties" : {
  899. "hbase.hregion.memstore.flush.size" : "134217728",
  900. "phoenix.sequence.saltBuckets" : "2",
  901. "hbase.client.scanner.timeout.period" : "900000",
  902. "hbase.master.info.bindAddress" : "0.0.0.0",
  903. "hbase.local.dir" : "${hbase.tmp.dir}/local",
  904. "hbase.hstore.flusher.count" : "2",
  905. "hbase.regionserver.global.memstore.upperLimit" : "0.5",
  906. "hbase.master.port" : "61300",
  907. "hbase.hregion.majorcompaction" : "0",
  908. "hbase.zookeeper.leaderport" : "61388",
  909. "hbase.client.scanner.caching" : "10000",
  910. "hbase.regionserver.port" : "61320",
  911. "hbase.master.info.port" : "61310",
  912. "phoenix.query.spoolThresholdBytes" : "12582912",
  913. "hbase.cluster.distributed" : "false",
  914. "hbase.rootdir" : "file:///var/lib/ambari-metrics-collector/hbase",
  915. "phoenix.query.maxGlobalMemoryPercentage" : "15",
  916. "phoenix.query.timeoutMs" : "1200000",
  917. "hbase.regionserver.info.port" : "61330",
  918. "zookeeper.session.timeout.localHBaseCluster" : "20000",
  919. "hbase.replication" : "false",
  920. "hbase.regionserver.global.memstore.lowerLimit" : "0.4",
  921. "hbase.zookeeper.peerport" : "61288",
  922. "hbase.tmp.dir" : "/var/lib/ambari-metrics-collector/hbase-tmp",
  923. "hbase.regionserver.thread.compaction.small" : "3",
  924. "phoenix.groupby.maxCacheSize" : "307200000",
  925. "hbase.master.wait.on.regionservers.mintostart" : "1",
  926. "hbase.zookeeper.property.dataDir" : "${hbase.tmp.dir}/zookeeper",
  927. "hbase.snapshot.enabled" : "false",
  928. "hbase.hstore.blockingStoreFiles" : "200",
  929. "hfile.block.cache.size" : "0.3",
  930. "hbase.hregion.memstore.block.multiplier" : "4",
  931. "zookeeper.session.timeout" : "120000",
  932. "hbase.zookeeper.property.clientPort" : "61181",
  933. "hbase.regionserver.thread.compaction.large" : "2",
  934. "phoenix.spool.directory" : "${hbase.tmp.dir}/phoenix-spool",
  935. "hbase.zookeeper.quorum" : "{{zookeeper_quorum_hosts}}"
  936. }
  937. }
  938. },
  939. {
  940. "hcat-env" : {
  941. "properties_attributes" : { },
  942. "properties" : {
  943. "content" : "\n # Licensed to the Apache Software Foundation (ASF) under one\n # or more contributor license agreements. See the NOTICE file\n # distributed with this work for additional information\n # regarding copyright ownership. The ASF licenses this file\n # to you under the Apache License, Version 2.0 (the\n # \"License\"); you may not use this file except in compliance\n # with the License. You may obtain a copy of the License at\n #\n # http://www.apache.org/licenses/LICENSE-2.0\n #\n # Unless required by applicable law or agreed to in writing, software\n # distributed under the License is distributed on an \"AS IS\" BASIS,\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n\n JAVA_HOME={{java64_home}}\n HCAT_PID_DIR={{hcat_pid_dir}}/\n HCAT_LOG_DIR={{hcat_log_dir}}/\n HCAT_CONF_DIR={{hcat_conf_dir}}\n HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n #DBROOT is the path where the connector jars are downloaded\n DBROOT={{hcat_dbroot}}\n USER={{hcat_user}}\n METASTORE_PORT={{hive_metastore_port}}\n "
  944. }
  945. }
  946. },
  947. {
  948. "ranger-storm-plugin-properties" : {
  949. "properties_attributes" : { },
  950. "properties" : {
  951. "REPOSITORY_CONFIG_USERNAME" : "stormtestuser@EXAMPLE.COM",
  952. "ranger-storm-plugin-enabled" : "No",
  953. "common.name.for.certificate" : "",
  954. "policy_user" : "storm"
  955. }
  956. }
  957. },
  958. {
  959. "ranger-knox-policymgr-ssl" : {
  960. "properties_attributes" : { },
  961. "properties" : {
  962. "xasecure.policymgr.clientssl.truststore.credential.file" : "jceks://file{{credential_file}}",
  963. "xasecure.policymgr.clientssl.keystore.credential.file" : "jceks://file{{credential_file}}",
  964. "xasecure.policymgr.clientssl.truststore" : "/usr/hdp/current/knox-server/conf/ranger-plugin-truststore.jks",
  965. "xasecure.policymgr.clientssl.keystore" : "/usr/hdp/current/knox-server/conf/ranger-plugin-keystore.jks"
  966. }
  967. }
  968. },
  969. {
  970. "ranger-hdfs-security" : {
  971. "properties_attributes" : { },
  972. "properties" : {
  973. "ranger.plugin.hdfs.policy.source.impl" : "org.apache.ranger.admin.client.RangerAdminRESTClient",
  974. "xasecure.add-hadoop-authorization" : "true",
  975. "ranger.plugin.hdfs.policy.rest.ssl.config.file" : "/etc/hadoop/conf/ranger-policymgr-ssl.xml",
  976. "ranger.plugin.hdfs.policy.pollIntervalMs" : "30000",
  977. "ranger.plugin.hdfs.policy.cache.dir" : "/etc/ranger/{{repo_name}}/policycache",
  978. "ranger.plugin.hdfs.service.name" : "{{repo_name}}",
  979. "ranger.plugin.hdfs.policy.rest.url" : "{{policymgr_mgr_url}}"
  980. }
  981. }
  982. },
  983. {
  984. "core-site" : {
  985. "properties_attributes" : {
  986. "final" : {
  987. "fs.defaultFS" : "true"
  988. }
  989. },
  990. "properties" : {
  991. "hadoop.proxyuser.root.groups" : "*",
  992. "hadoop.security.key.provider.path" : "",
  993. "hadoop.proxyuser.hbase.hosts" : "*",
  994. "hadoop.proxyuser.hive.groups" : "users",
  995. "hadoop.http.authentication.simple.anonymous.allowed" : "true",
  996. "net.topology.script.file.name" : "/etc/hadoop/conf/topology_script.py",
  997. "hadoop.proxyuser.hue.groups" : "*",
  998. "hadoop.proxyuser.hcat.groups" : "*",
  999. "ipc.client.idlethreshold" : "8000",
  1000. "hadoop.security.authorization" : "false",
  1001. "io.serializations" : "org.apache.hadoop.io.serializer.WritableSerialization",
  1002. "hadoop.proxyuser.hue.hosts" : "*",
  1003. "hadoop.proxyuser.hive.hosts" : "*",
  1004. "io.file.buffer.size" : "131072",
  1005. "hadoop.proxyuser.hcat.hosts" : "*",
  1006. "ipc.client.connection.maxidletime" : "30000",
  1007. "hadoop.proxyuser.falcon.hosts" : "*",
  1008. "hadoop.proxyuser.root.hosts" : "*",
  1009. "io.compression.codecs" : "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec",
  1010. "mapreduce.jobtracker.webinterface.trusted" : "false",
  1011. "hadoop.proxyuser.oozie.groups" : "*",
  1012. "fs.trash.interval" : "360",
  1013. "ha.failover-controller.active-standby-elector.zk.op.retries" : "120",
  1014. "hadoop.security.authentication" : "simple",
  1015. "hadoop.proxyuser.hbase.groups" : "users",
  1016. "fs.defaultFS" : "hdfs://%HOSTGROUP::host_group_1%:8020",
  1017. "ipc.server.tcpnodelay" : "true",
  1018. "hadoop.security.auth_to_local" : "DEFAULT",
  1019. "ipc.client.connect.max.retries" : "50",
  1020. "hadoop.proxyuser.falcon.groups" : "users",
  1021. "hadoop.proxyuser.oozie.hosts" : "*"
  1022. }
  1023. }
  1024. },
  1025. {
  1026. "hdfs-log4j" : {
  1027. "properties_attributes" : { },
  1028. "properties" : {
  1029. "content" : "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.logger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logger}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# mapred audit logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging levels\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateChange=WARN\n "
  1030. }
  1031. }
  1032. },
  1033. {
  1034. "webhcat-log4j" : {
  1035. "properties_attributes" : { },
  1036. "properties" : {
  1037. "content" : "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# Define some default values that can be overridden by system properties\nwebhcat.root.logger = INFO, standard\nwebhcat.log.dir = .\nwebhcat.log.file = webhcat.log\n\nlog4j.rootLogger = ${webhcat.root.logger}\n\n# Logging Threshold\nlog4j.threshhold = DEBUG\n\nlog4j.appender.standard = org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.standard.File = ${webhcat.log.dir}/${webhcat.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern = .yyyy-MM-dd\n\nlog4j.appender.DRFA.layout = org.apache.log4j.PatternLayout\n\nlog4j.appender.standard.layout = org.apache.log4j.PatternLayout\nlog4j.appender.standard.layout.conversionPattern = %-5p | %d{DATE} | %c | %m%n\n\n# Class logging settings\nlog4j.logger.com.sun.jersey = DEBUG\nlog4j.logger.com.sun.jersey.spi.container.servlet.WebComponent = ERROR\nlog4j.logger.org.apache.hadoop = INFO\nlog4j.logger.org.apache.hadoop.conf = WARN\nlog4j.logger.org.apache.zookeeper = WARN\nlog4j.logger.org.eclipse.jetty = INFO\n\n "
  1038. }
  1039. }
  1040. },
  1041. {
  1042. "oozie-env" : {
  1043. "properties_attributes" : { },
  1044. "properties" : {
  1045. "oozie_derby_database" : "Derby",
  1046. "content" : "\n#!/bin/bash\n\nif [ -d \"/usr/lib/bigtop-tomcat\" ]; then\n export OOZIE_CONFIG=${OOZIE_CONFIG:-{{conf_dir}}}\n export CATALINA_BASE=${CATALINA_BASE:-{{oozie_server_dir}}}\n export CATALINA_TMPDIR=${CATALINA_TMPDIR:-/var/tmp/oozie}\n export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat\nfi\n\n#Set JAVA HOME\nexport JAVA_HOME={{java_home}}\n\nexport JRE_HOME=${JAVA_HOME}\n\n# Set Oozie specific environment variables here.\n\n# Settings for the Embedded Tomcat that runs Oozie\n# Java System properties for Oozie should be specified in this variable\n#\n{% if java_version < 8 %}\nexport CATALINA_OPTS=\"$CATALINA_OPTS -Xmx{{oozie_heapsize}} -XX:MaxPermSize={{oozie_permsize}}\"\n{% else %}\nexport CATALINA_OPTS=\"$CATALINA_OPTS -Xmx{{oozie_heapsize}}\"\n{% endif %}\n# Oozie configuration file to load from Oozie configuration directory\n#\n# export OOZIE_CONFIG_FILE=oozie-site.xml\n\n# Oozie logs directory\n#\nexport OOZIE_LOG={{oozie_log_dir}}\n\n# Oozie pid directory\n#\nexport CATALINA_PID={{pid_file}}\n\n#Location of the data for oozie\nexport OOZIE_DATA={{oozie_data_dir}}\n\n# Oozie Log4J configuration file to load from Oozie configuration directory\n#\n# export OOZIE_LOG4J_FILE=oozie-log4j.properties\n\n# Reload interval of the Log4J configuration file, in seconds\n#\n# export OOZIE_LOG4J_RELOAD=10\n\n# The port Oozie server runs\n#\nexport OOZIE_HTTP_PORT={{oozie_server_port}}\n\n# The admin port Oozie server runs\n#\nexport OOZIE_ADMIN_PORT={{oozie_server_admin_port}}\n\n# The host name Oozie server runs on\n#\n# export OOZIE_HTTP_HOSTNAME=`hostname -f`\n\n# The base URL for callback URLs to Oozie\n#\n# export OOZIE_BASE_URL=\"http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie\"\nexport JAVA_LIBRARY_PATH={{hadoop_lib_home}}/native/Linux-amd64-64\n\n# At least 1 minute of retry time to account for server downtime during\n# upgrade/downgrade\nexport OOZIE_CLIENT_OPTS=\"${OOZIE_CLIENT_OPTS} -Doozie.connection.retry.count=5 \"\n ",
  1047. "oozie_heapsize" : "2048m",
  1048. "oozie_data_dir" : "/hadoop/oozie/data",
  1049. "oozie_admin_port" : "11001",
  1050. "oozie_log_dir" : "/var/log/oozie",
  1051. "oozie_permsize" : "512m",
  1052. "oozie_database" : "New Derby Database",
  1053. "oozie_pid_dir" : "/var/run/oozie",
  1054. "oozie_user" : "oozie"
  1055. }
  1056. }
  1057. },
  1058. {
  1059. "ranger-hive-security" : {
  1060. "properties_attributes" : { },
  1061. "properties" : {
  1062. "ranger.plugin.hive.policy.source.impl" : "org.apache.ranger.admin.client.RangerAdminRESTClient",
  1063. "ranger.plugin.hive.policy.rest.ssl.config.file" : "/usr/hdp/current/hive-server2/conf/ranger-policymgr-ssl.xml",
  1064. "ranger.plugin.hive.policy.rest.url" : "{{policymgr_mgr_url}}",
  1065. "ranger.plugin.hive.service.name" : "{{repo_name}}",
  1066. "ranger.plugin.hive.policy.cache.dir" : "/etc/ranger/{{repo_name}}/policycache",
  1067. "xasecure.hive.update.xapolicies.on.grant.revoke" : "true",
  1068. "ranger.plugin.hive.policy.pollIntervalMs" : "30000"
  1069. }
  1070. }
  1071. },
  1072. {
  1073. "ranger-hbase-security" : {
  1074. "properties_attributes" : { },
  1075. "properties" : {
  1076. "ranger.plugin.hbase.policy.pollIntervalMs" : "30000",
  1077. "ranger.plugin.hbase.policy.source.impl" : "org.apache.ranger.admin.client.RangerAdminRESTClient",
  1078. "ranger.plugin.hbase.policy.cache.dir" : "/etc/ranger/{{repo_name}}/policycache",
  1079. "ranger.plugin.hbase.service.name" : "{{repo_name}}",
  1080. "ranger.plugin.hbase.policy.rest.ssl.config.file" : "/etc/hbase/conf/ranger-policymgr-ssl.xml",
  1081. "xasecure.hbase.update.xapolicies.on.grant.revoke" : "true",
  1082. "ranger.plugin.hbase.policy.rest.url" : "{{policymgr_mgr_url}}"
  1083. }
  1084. }
  1085. },
  1086. {
  1087. "ranger-storm-policymgr-ssl" : {
  1088. "properties_attributes" : { },
  1089. "properties" : {
  1090. "xasecure.policymgr.clientssl.truststore.credential.file" : "jceks://file{{credential_file}}",
  1091. "xasecure.policymgr.clientssl.keystore.credential.file" : "jceks://file{{credential_file}}",
  1092. "xasecure.policymgr.clientssl.truststore" : "/usr/hdp/current/storm-client/conf/ranger-plugin-truststore.jks",
  1093. "xasecure.policymgr.clientssl.keystore" : "/usr/hdp/current/storm-client/conf/ranger-plugin-keystore.jks"
  1094. }
  1095. }
  1096. },
  1097. {
  1098. "spark-javaopts-properties" : {
  1099. "properties_attributes" : { },
  1100. "properties" : {
  1101. "content" : " "
  1102. }
  1103. }
  1104. },
  1105. {
  1106. "ranger-knox-security" : {
  1107. "properties_attributes" : { },
  1108. "properties" : {
  1109. "ranger.plugin.knox.service.name" : "{{repo_name}}",
  1110. "ranger.plugin.knox.policy.rest.url" : "{{policymgr_mgr_url}}",
  1111. "ranger.plugin.knox.policy.rest.ssl.config.file" : "/usr/hdp/current/knox-server/conf/ranger-policymgr-ssl.xml",
  1112. "ranger.plugin.knox.policy.pollIntervalMs" : "30000",
  1113. "ranger.plugin.knox.policy.source.impl" : "org.apache.ranger.admin.client.RangerAdminJersey2RESTClient",
  1114. "ranger.plugin.knox.policy.cache.dir" : "/etc/ranger/{{repo_name}}/policycache"
  1115. }
  1116. }
  1117. },
  1118. {
  1119. "application-properties" : {
  1120. "properties_attributes" : { },
  1121. "properties" : {
  1122. "atlas.lineage.hive.table.type.name" : "Table",
  1123. "atlas.authentication.method" : "simple",
  1124. "atlas.http.authentication.kerberos.name.rules" : "RULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\n DEFAULT",
  1125. "atlas.graph.index.search.directory" : "/var/lib/atlas/data/es",
  1126. "atlas.graph.index.search.elasticsearch.client-only" : "false",
  1127. "atlas.graph.storage.backend" : "berkeleyje",
  1128. "atlas.graph.index.search.elasticsearch.local-mode" : "true",
  1129. "atlas.graph.index.search.backend" : "elasticsearch",
  1130. "atlas.lineage.hive.process.type.name" : "LoadProcess",
  1131. "atlas.lineage.hive.column.type.name" : "Column",
  1132. "atlas.graph.storage.directory" : "/var/lib/atlas/data/berkeley",
  1133. "atlas.http.authentication.enabled" : "false",
  1134. "atlas.authentication.keytab" : "/etc/security/keytabs/atlas.service.keytab",
  1135. "atlas.lineage.hive.process.inputs.name" : "inputTables",
  1136. "atlas.server.bind.address" : "localhost",
  1137. "atlas.enableTLS" : "false",
  1138. "atlas.lineage.hive.table.column.name" : "columns",
  1139. "atlas.http.authentication.kerberos.keytab" : "/etc/security/keytabs/spnego.service.keytab",
  1140. "atlas.lineage.hive.process.outputs.name" : "outputTables",
  1141. "atlas.authentication.principal" : "atlas",
  1142. "atlas.http.authentication.type" : "simple",
  1143. "atlas.http.authentication.kerberos.principal" : "HTTP/_HOST@EXAMPLE.COM"
  1144. }
  1145. }
  1146. },
  1147. {
  1148. "pig-properties" : {
  1149. "properties_attributes" : { },
  1150. "properties" : {
  1151. "content" : "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# Pig configuration file. All values can be overwritten by command line\n# arguments; for a description of the properties, run\n#\n# pig -h properties\n#\n\n############################################################################\n#\n# == Logging properties\n#\n\n# Location of pig log file. If blank, a file with a timestamped slug\n# ('pig_1399336559369.log') will be generated in the current working directory.\n#\n# pig.logfile=\n# pig.logfile=/tmp/pig-err.log\n\n# Log4j configuration file. Set at runtime with the -4 parameter. The source\n# distribution has a ./conf/log4j.properties.template file you can rename and\n# customize.\n#\n# log4jconf=./conf/log4j.properties\n\n# Verbose Output.\n# * false (default): print only INFO and above to screen\n# * true: Print all log messages to screen\n#\n# verbose=false\n\n# Omit timestamps on log messages. (default: false)\n#\n# brief=false\n\n# Logging level. debug=OFF|ERROR|WARN|INFO|DEBUG (default: INFO)\n#\n# debug=INFO\n\n# Roll up warnings across tasks, so that when millions of mappers suddenly cry\n# out in error they are partially silenced. (default, recommended: true)\n#\n# aggregate.warning=true\n\n# Should DESCRIBE pretty-print its schema?\n# * false (default): print on a single-line, suitable for pasting back in to your script\n# * true (recommended): prints on multiple lines with indentation, much more readable\n#\n# pig.pretty.print.schema=false\n\n# === Profiling UDFs ===\n\n# Turn on UDF timers? This will cause two counters to be\n# tracked for every UDF and LoadFunc in your script: approx_microsecs measures\n# approximate time spent inside a UDF approx_invocations reports the approximate\n# number of times the UDF was invoked.\n#\n# * false (default): do not record timing information of UDFs.\n# * true: report UDF performance. Uses more counters, but gives more insight\n# into script operation\n#\n# pig.udf.profile=false\n\n# Specify frequency of profiling (default: every 100th).\n# pig.udf.profile.frequency=100\n\n############################################################################\n#\n# == Site-specific Properties\n#\n\n# Execution Mode. Local mode is much faster, but only suitable for small amounts\n# of data. Local mode interprets paths on the local file system; Mapreduce mode\n# on the HDFS. Read more under 'Execution Modes' within the Getting Started\n# documentation.\n#\n# * mapreduce (default): use the Hadoop cluster defined in your Hadoop config files\n# * local: use local mode\n# * tez: use Tez on Hadoop cluster\n# * tez_local: use Tez local mode\n#\n# exectype=mapreduce\n\n# Bootstrap file with default statements to execute in every Pig job, similar to\n# .bashrc. If blank, uses the file '.pigbootup' from your home directory; If a\n# value is supplied, that file is NOT loaded. This does not do tilde expansion\n# -- you must supply the full path to the file.\n#\n# pig.load.default.statements=\n# pig.load.default.statements=/home/bob/.pigrc\n\n# Kill all waiting/running MR jobs upon a MR job failure? (default: false) If\n# false, jobs that can proceed independently will do so unless a parent stage\n# fails. If true, the failure of any stage in the script kills all jobs.\n#\n# stop.on.failure=false\n\n# File containing the pig script to run. Rarely set in the properties file.\n# Commandline: -f\n#\n# file=\n\n# Jarfile to load, colon separated. Rarely used.\n#\n# jar=\n\n# Register additional .jar files to use with your Pig script.\n# Most typically used as a command line option (see http://pig.apache.org/docs/r0.12.0/basic.html#register):\n#\n# pig -Dpig.additional.jars=hdfs://nn.mydomain.com:9020/myjars/my.jar\n#\n# pig.additional.jars=<colon separated list of jars with optional wildcards>\n# pig.additional.jars=/usr/local/share/pig/pig/contrib/piggybank/java/piggybank.jar:/usr/local/share/pig/datafu/datafu-pig/build/libs/datafu-pig-1.2.1.jar\n\n# Specify potential packages to which a UDF or a group of UDFs belong,\n# eliminating the need to qualify the UDF on every call. See\n# http://pig.apache.org/docs/r0.12.0/udf.html#use-short-names\n#\n# Commandline use:\n#\n# pig \\\n# -Dpig.additional.jars=$PIG_HOME/contrib/piggybank/java/piggybank.jar:$PIG_HOME/../datafu/datafu-pig/build/libs/datafu-pig-1.2.1.jar \\\n# -Dudf.import.list=org.apache.pig.piggybank.evaluation:datafu.pig.util \\\n# happy_job.pig\n#\n# udf.import.list=<colon separated list of imports>\n# udf.import.list=org.apache.pig.piggybank.evaluation:datafu.pig.bags:datafu.pig.hash:datafu.pig.stats:datafu.pig.util\n\n#\n# Reuse jars across jobs run by the same user? (default: false) If enabled, jars\n# are placed in ${pig.user.cache.location}/${user.name}/.pigcache. Since most\n# jars change infrequently, this gives a minor speedup.\n#\n# pig.user.cache.enabled=false\n\n# Base path for storing jars cached by the pig.user.cache.enabled feature. (default: /tmp)\n#\n# pig.user.cache.location=/tmp\n\n# Replication factor for cached jars. If not specified mapred.submit.replication\n# is used, whose default is 10.\n#\n# pig.user.cache.replication=10\n\n# Default UTC offset. (default: the host's current UTC offset) Supply a UTC\n# offset in Java's timezone format: e.g., +08:00.\n#\n# pig.datetime.default.tz=\n\n############################################################################\n#\n# Memory impacting properties\n#\n\n# Amount of memory (as fraction of heap) allocated to bags before a spill is\n# forced. Default is 0.2, meaning 20% of available memory. Note that this memory\n# is shared across all large bags used by the application. See\n# http://pig.apache.org/docs/r0.12.0/perf.html#memory-management\n#\n# pig.cachedbag.memusage=0.2\n\n# Don't spill bags smaller than this size (bytes). Default: 5000000, or about\n# 5MB. Usually, the more spilling the longer runtime, so you might want to tune\n# it according to heap size of each task and so forth.\n#\n# pig.spill.size.threshold=5000000\n\n# EXPERIMENTAL: If a file bigger than this size (bytes) is spilled -- thus\n# freeing a bunch of ram -- tell the JVM to perform garbage collection. This\n# should help reduce the number of files being spilled, but causes more-frequent\n# garbage collection. Default: 40000000 (about 40 MB)\n#\n# pig.spill.gc.activation.size=40000000\n\n# Maximum amount of data to replicate using the distributed cache when doing\n# fragment-replicated join. (default: 1000000000, about 1GB) Consider increasing\n# this in a production environment, but carefully.\n#\n# pig.join.replicated.max.bytes=1000000000\n\n# Fraction of heap available for the reducer to perform a skewed join. A low\n# fraction forces Pig to use more reducers, but increases the copying cost. See\n# http://pig.apache.org/docs/r0.12.0/perf.html#skewed-joins\n#\n# pig.skewedjoin.reduce.memusage=0.3\n\n#\n# === SchemaTuple ===\n#\n# The SchemaTuple feature (PIG-2632) uses a tuple's schema (when known) to\n# generate a custom Java class to hold records. Otherwise, tuples are loaded as\n# a plain list that is unaware of its contents' schema -- and so each element\n# has to be wrapped as a Java object on its own. This can provide more efficient\n# CPU utilization, serialization, and most of all memory usage.\n#\n# This feature is considered experimental and is off by default. You can\n# selectively enable it for specific operations using pig.schematuple.udf,\n# pig.schematuple.load, pig.schematuple.fr_join and pig.schematuple.merge_join\n#\n\n# Enable the SchemaTuple optimization in all available cases? (default: false; recommended: true)\n#\n# pig.schematuple=false\n\n# EXPERIMENTAL: Use SchemaTuples with UDFs (default: value of pig.schematuple).\n# pig.schematuple.udf=false\n\n# EXPERIMENTAL, CURRENTLY NOT IMPLEMENTED, but in the future, LoadFunc's with\n# known schemas should output SchemaTuples. (default: value of pig.schematuple)\n# pig.schematuple.load=false\n\n# EXPERIMENTAL: Use SchemaTuples in replicated joins. The potential memory\n# saving here is significant. (default: value of pig.schematuple)\n# pig.schematuple.fr_join=false\n\n# EXPERIMENTAL: Use SchemaTuples in merge joins. (default: value of pig.schematuple).\n# pig.schematuple.merge_join=false\n\n############################################################################\n#\n# Serialization options\n#\n\n# Omit empty part files from the output? (default: false)\n#\n# * false (default): reducers generates an output file, even if output is empty\n# * true (recommended): do not generate zero-byte part files\n#\n# The default behavior of MapReduce is to generate an empty file for no data, so\n# Pig follows that. But many small files can cause annoying extra map tasks and\n# put load on the HDFS, so consider setting this to 'true'\n#\n# pig.output.lazy=false\n\n#\n# === Tempfile Handling\n#\n\n# EXPERIMENTAL: Storage format for temporary files generated by intermediate\n# stages of Pig jobs. This can provide significant speed increases for certain\n# codecs, as reducing the amount of data transferred to and from disk can more\n# than make up for the cost of compression/compression. Recommend that you set\n# up LZO compression in Hadoop and specify tfile storage.\n#\n# Compress temporary files?\n# * false (default): do not compress\n# * true (recommended): compress temporary files.\n#\n# pig.tmpfilecompression=false\n# pig.tmpfilecompression=true\n\n# Tempfile storage container type.\n#\n# * tfile (default, recommended): more efficient, but only supports supports gz(gzip) and lzo compression.\n# https://issues.apache.org/jira/secure/attachment/12396286/TFile%20Specification%2020081217.pdf\n# * seqfile: only supports gz(gzip), lzo, snappy, and bzip2 compression\n#\n# pig.tmpfilecompression.storage=tfile\n\n# Codec types for intermediate job files. tfile supports gz(gzip) and lzo;\n# seqfile support gz(gzip), lzo, snappy, bzip2\n#\n# * lzo (recommended with caveats): moderate compression, low cpu burden;\n# typically leads to a noticeable speedup. Best default choice, but you must\n# set up LZO independently due to license incompatibility\n# * snappy: moderate compression, low cpu burden; typically leads to a noticeable speedup..\n# * gz (default): higher compression, high CPU burden. Typically leads to a noticeable slowdown.\n# * bzip2: most compression, major CPU burden. Typically leads to a noticeable slowdown.\n#\n# pig.tmpfilecompression.codec=gzip\n\n#\n# === Split Combining\n#\n\n#\n# Should pig try to combine small files for fewer map tasks? This improves the\n# efficiency of jobs with many small input files, reduces the overhead on the\n# jobtracker, and reduces the number of output files a map-only job\n# produces. However, it only works with certain loaders and increases non-local\n# map tasks. See http://pig.apache.org/docs/r0.12.0/perf.html#combine-files\n#\n# * false (default, recommended): _do_ combine files\n# * true: do not combine files\n#\n# pig.noSplitCombination=false\n\n#\n# Size, in bytes, of data to be processed by a single map. Smaller files are\n# combined untill this size is reached. If unset, defaults to the file system's\n# default block size.\n#\n# pig.maxCombinedSplitSize=\n\n# ###########################################################################\n#\n# Execution options\n#\n\n# Should pig omit combiners? (default, recommended: false -- meaning pig _will_\n# use combiners)\n#\n# When combiners work well, they eliminate a significant amount of\n# data. However, if they do not eliminate much data -- say, a DISTINCT operation\n# that only eliminates 5% of the records -- they add a noticeable overhead to\n# the job. So the recommended default is false (use combiners), selectively\n# disabling them per-job:\n#\n# pig -Dpig.exec.nocombiner=true distinct_but_not_too_much.pig\n#\n# pig.exec.nocombiner=false\n\n# EXPERIMENTAL: Aggregate records in map task before sending to the combiner?\n# (default: false, 10; recommended: true, 10). In cases where there is a massive\n# reduction of data in the aggregation step, pig can do a first pass of\n# aggregation before the data even leaves the mapper, saving much serialization\n# overhead. It's off by default but can give a major improvement to\n# group-and-aggregate operations. Pig skips partial aggregation unless reduction\n# is better than a factor of minReduction (default: 10). See\n# http://pig.apache.org/docs/r0.12.0/perf.html#hash-based-aggregation\n#\n# pig.exec.mapPartAgg=false\n# pig.exec.mapPartAgg.minReduction=10\n\n#\n# === Control how many reducers are used.\n#\n\n# Estimate number of reducers naively using a fixed amount of data per\n# reducer. Optimally, you have both fewer reducers than available reduce slots,\n# and reducers that are neither getting too little data (less than a half-GB or\n# so) nor too much data (more than 2-3 times the reducer child process max heap\n# size). The default of 1000000000 (about 1GB) is probably low for a production\n# cluster -- however it's much worse to set this too high (reducers spill many\n# times over in group-sort) than too low (delay waiting for reduce slots).\n#\n# pig.exec.reducers.bytes.per.reducer=1000000000\n\n#\n# Don't ever use more than this many reducers. (default: 999)\n#\n# pig.exec.reducers.max=999\n\n#\n# === Local mode for small jobs\n#\n\n# EXPERIMENTAL: Use local mode for small jobs? If true, jobs with input data\n# size smaller than pig.auto.local.input.maxbytes bytes and one or no reducers\n# are run in local mode, which is much faster. Note that file paths are still\n# interpreted as pig.exectype implies.\n#\n# * true (recommended): allow local mode for small jobs, which is much faster.\n# * false (default): always use pig.exectype.\n#\n# pig.auto.local.enabled=false\n\n#\n# Definition of a small job for the pig.auto.local.enabled feature. Only jobs\n# with less than this may bytes are candidates to run locally (default:\n# 100000000 bytes, about 1GB)\n#\n# pig.auto.local.input.maxbytes=100000000\n\n############################################################################\n#\n# Security Features\n#\n\n# Comma-delimited list of commands/operators that are disallowed. This security\n# feature can be used by administrators to block use of certain commands by\n# users.\n#\n# * <blank> (default): all commands and operators are allowed.\n# * fs,set (for example): block all filesystem commands and config changes from pig scripts.\n#\n# pig.blacklist=\n# pig.blacklist=fs,set\n\n# Comma-delimited list of the only commands/operators that are allowed. This\n# security feature can be used by administrators to block use of certain\n# commands by users.\n#\n# * <blank> (default): all commands and operators not on the pig.blacklist are allowed.\n# * load,store,filter,group: only LOAD, STORE, FILTER, GROUP\n# from pig scripts. All other commands and operators will fail.\n#\n# pig.whitelist=\n# pig.whitelist=load,store,filter,group\n\n#####################################################################\n#\n# Advanced Site-specific Customizations\n#\n\n# Remove intermediate output files?\n#\n# * true (default, recommended): remove the files\n# * false: do NOT remove the files. You must clean them up yourself.\n#\n# Keeping them is useful for advanced debugging, but can be dangerous -- you\n# must clean them up yourself. Inspect the intermediate outputs with\n#\n# LOAD '/path/to/tmp/file' USING org.apache.pig.impl.io.TFileStorage();\n#\n# (Or ...SequenceFileInterStorage if pig.tmpfilecompression.storage is seqfile)\n#\n# pig.delete.temp.files=true\n\n# EXPERIMENTAL: A Pig Progress Notification Listener (PPNL) lets you wire pig's\n# progress into your visibility stack. To use a PPNL, supply the fully qualified\n# class name of a PPNL implementation. Note that only one PPNL can be set up, so\n# if you need several, write a PPNL that will chain them.\n#\n# See https://github.com/twitter/ambrose for a pretty awesome one of these\n#\n# pig.notification.listener=<fully qualified class name of a PPNL implementation>\n\n# String argument to pass to your PPNL constructor (optional). Only a single\n# string value is allowed. (default none)\n#\n# pig.notification.listener.arg=<somevalue>\n\n# EXPERIMENTAL: Class invoked to estimate the number of reducers to use.\n# (default: org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.InputSizeReducerEstimator)\n#\n# If you don't know how or why to write a PigReducerEstimator, you're unlikely\n# to use this. By default, the naive mapReduceLayer.InputSizeReducerEstimator is\n# used, but you can specify anything implementing the interface\n# org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigReducerEstimator\n#\n# pig.exec.reducer.estimator=<fully qualified class name of a PigReducerEstimator implementation>\n\n# Optional String argument to pass to your PigReducerEstimator. (default: none;\n# a single String argument is allowed).\n#\n# pig.exec.reducer.estimator.arg=<somevalue>\n\n# Class invoked to report the size of reducers output. By default, the reducers'\n# output is computed as the total size of output files. But not every storage is\n# file-based, and so this logic can be replaced by implementing the interface\n# org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigStatsOutputSizeReader\n# If you need to register more than one reader, you can register them as a comma\n# separated list. Every reader implements a boolean supports(POStore sto) method.\n# When there are more than one reader, they are consulted in order, and the\n# first one whose supports() method returns true will be used.\n#\n# pig.stats.output.size.reader=<fully qualified class name of a PigStatsOutputSizeReader implementation>\n# pig.stats.output.size.reader.unsupported=<comma separated list of StoreFuncs that are not supported by this reader>\n\n# By default, Pig retrieves TaskReports for every launched task to compute\n# various job statistics. But this can cause OOM if the number of tasks is\n# large. In such case, you can disable it by setting this property to true.\n# pig.stats.notaskreport=false\n\n#\n# Override hadoop configs programatically\n#\n# By default, Pig expects hadoop configs (hadoop-site.xml and core-site.xml)\n# to be present on the classpath. There are cases when these configs are\n# needed to be passed programatically, such as while using the PigServer API.\n# In such cases, you can override hadoop configs by setting the property\n# \"pig.use.overriden.hadoop.configs\".\n#\n# When this property is set to true, Pig ignores looking for hadoop configs\n# in the classpath and instead picks it up from Properties/Configuration\n# object passed to it.\n#\n# pig.use.overriden.hadoop.configs=false\n\n# Implied LoadFunc for the LOAD operation when no USING clause is\n# present. Supply the fully qualified class name of a LoadFunc\n# implementation. Note: setting this means you will have to modify most code\n# brought in from elsewhere on the web, as people generally omit the USING\n# clause for TSV files.\n#\n# * org.apache.pig.builtin.PigStorage (default): the traditional tab-separated-values LoadFunc\n# * my.custom.udfcollection.MyCustomLoadFunc (for example): use MyCustomLoadFunc instead\n#\n# pig.default.load.func=<fully qualified class name of a LoadFunc implementation>\n\n# The implied StoreFunc for STORE operations with no USING clause. Supply the\n# fully qualified class name of a StoreFunc implementation.\n#\n# * org.apache.pig.builtin.PigStorage (default): the traditional tab-separated-values StoreFunc.\n# * my.custom.udfcollection.MyCustomStoreFunc (for example): use MyCustomStoreFunc instead\n#\n# pig.default.store.func=<fully qualified class name of a StoreFunc implementation>\n\n# Recover jobs when the application master is restarted? (default: false). This\n# is a Hadoop 2 specific property; enable it to take advantage of AM recovery.\n#\n# pig.output.committer.recovery.support=true\n\n# Should scripts check to prevent multiple stores writing to the same location?\n# (default: false) When set to true, stops the execution of script right away.\n#\npig.location.check.strict=false\n\n# In addition to the fs-style commands (rm, ls, etc) Pig can now execute\n# SQL-style DDL commands, eg \"sql create table pig_test(name string, age int)\".\n# The only implemented backend is hcat, and luckily that's also the default.\n#\n# pig.sql.type=hcat\n\n# Path to the hcat executable, for use with pig.sql.type=hcat (default: null)\n#\nhcat.bin=/usr/local/hcat/bin/hcat\n\n###########################################################################\n#\n# Overrides for extreme environments\n#\n# (Most people won't have to adjust these parameters)\n#\n\n\n# Limit the pig script length placed in the jobconf xml. (default:10240)\n# Extremely long queries can waste space in the JobConf; since its contents are\n# only advisory, the default is fine unless you are retaining it for forensics.\n#\n# pig.script.max.size=10240\n\n# Disable use of counters by Pig. Note that the word 'counter' is singular here.\n#\n# * false (default, recommended): do NOT disable counters.\n# * true: disable counters. Set this to true only when your Pig job will\n# otherwise die because of using more counters than hadoop configured limit\n#\n# pig.disable.counter=true\n\n# Sample size (per-mapper, in number of rows) the ORDER..BY operation's\n# RandomSampleLoader uses to estimate how your data should be\n# partitioned. (default, recommended: 100 rows per task) Increase this if you\n# have exceptionally large input splits and are unhappy with the reducer skew.\n#\n# pig.random.sampler.sample.size=100\n\n# Process an entire script at once, reducing the amount of work and number of\n# tasks? (default, recommended: true) See http://pig.apache.org/docs/r0.12.0/perf.html#multi-query-execution\n#\n# MultiQuery optimization is very useful, and so the recommended default is\n# true. You may find a that a script fails to compile under MultiQuery. If so,\n# disable it at runtime:\n#\n# pig -no_multiquery script_that_makes_pig_sad.pig\n#\n# opt.multiquery=true\n\n# For small queries, fetch data directly from the HDFS. (default, recommended:\n# true). If you want to force Pig to launch a MR job, for example when you're\n# testing a live cluster, disable with the -N option. See PIG-3642.\n#\n# opt.fetch=true\n\n# Enable auto/grace parallelism in tez. These should be used by default unless\n# you encounter some bug in automatic parallelism. If pig.tez.auto.parallelism\n# to false, use 1 as default parallelism\npig.tez.auto.parallelism=true\npig.tez.grace.parallelism=true\n\n###########################################################################\n#\n# Streaming properties\n#\n\n# Define what properties will be set in the streaming environment. Just set this\n# property to a comma-delimited list of properties to set, and those properties\n# will be set in the environment.\n#\n# pig.streaming.environment=<comma-delimited list of propertes>\n\n# Specify a comma-delimited list of local files to ship to distributed cache for\n# streaming job.\n#\n# pig.streaming.ship.files=<comma-delimited list of local files>\n\n# Specify a comma-delimited list of remote files to cache on distributed cache\n# for streaming job.\n#\n# pig.streaming.cache.files=<comma-delimited list of remote files>\n\n# Specify the python command to be used for python streaming udf. By default,\n# python is used, but you can overwrite it with a non-default version such as\n# python2.7.\n#\n# pig.streaming.udf.python.command=python\n\n "
  1152. }
  1153. }
  1154. },
  1155. {
  1156. "kafka-broker" : {
  1157. "properties_attributes" : {
  1158. "property" : {
  1159. "controller.socket.timeout.ms" : "The socket timeout for commands from the partition management controller to the replicas."
  1160. }
  1161. },
  1162. "properties" : {
  1163. "log.roll.hours" : "168",
  1164. "controlled.shutdown.max.retries" : "3",
  1165. "zookeeper.connection.timeout.ms" : "6000",
  1166. "kafka.timeline.metrics.reporter.enabled" : "true",
  1167. "offsets.load.buffer.size" : "5242880",
  1168. "controller.socket.timeout.ms" : "30000",
  1169. "zookeeper.session.timeout.ms" : "30000",
  1170. "authorizer.class.name" : "kafka.security.auth.SimpleAclAuthorizer",
  1171. "kafka.timeline.metrics.port" : "{{metric_collector_port}}",
  1172. "offsets.topic.num.partitions" : "50",
  1173. "controlled.shutdown.enable" : "true",
  1174. "num.recovery.threads.per.data.dir" : "1",
  1175. "message.max.bytes" : "1000000",
  1176. "replica.lag.time.max.ms" : "10000",
  1177. "kafka.ganglia.metrics.group" : "kafka",
  1178. "controller.message.queue.size" : "10",
  1179. "auto.create.topics.enable" : "true",
  1180. "kafka.ganglia.metrics.port" : "8671",
  1181. "zookeeper.sync.time.ms" : "2000",
  1182. "offsets.topic.compression.codec" : "0",
  1183. "replica.fetch.max.bytes" : "1048576",
  1184. "log.retention.hours" : "168",
  1185. "log.dirs" : "/kafka-logs",
  1186. "log.index.size.max.bytes" : "10485760",
  1187. "log.cleanup.interval.mins" : "10",
  1188. "fetch.purgatory.purge.interval.requests" : "10000",
  1189. "offsets.retention.minutes" : "86400000",
  1190. "offsets.commit.timeout.ms" : "5000",
  1191. "log.retention.bytes" : "-1",
  1192. "offset.metadata.max.bytes" : "4096",
  1193. "replica.fetch.wait.max.ms" : "500",
  1194. "leader.imbalance.check.interval.seconds" : "300",
  1195. "offsets.retention.check.interval.ms" : "600000",
  1196. "kafka.timeline.metrics.reporter.sendInterval" : "5900",
  1197. "log.segment.bytes" : "1073741824",
  1198. "producer.purgatory.purge.interval.requests" : "10000",
  1199. "min.insync.replicas" : "1",
  1200. "controlled.shutdown.retry.backoff.ms" : "5000",
  1201. "socket.receive.buffer.bytes" : "102400",
  1202. "log.flush.scheduler.interval.ms" : "3000",
  1203. "num.partitions" : "1",
  1204. "num.io.threads" : "8",
  1205. "leader.imbalance.per.broker.percentage" : "10",
  1206. "num.network.threads" : "3",
  1207. "kafka.timeline.metrics.maxRowCacheSize" : "10000",
  1208. "socket.request.max.bytes" : "104857600",
  1209. "replica.lag.max.messages" : "4000",
  1210. "zookeeper.connect" : "%HOSTGROUP::host_group_1%:2181",
  1211. "queued.max.requests" : "500",
  1212. "offsets.topic.replication.factor" : "3",
  1213. "replica.socket.timeout.ms" : "30000",
  1214. "offsets.topic.segment.bytes" : "104857600",
  1215. "replica.high.watermark.checkpoint.interval.ms" : "5000",
  1216. "listeners" : "PLAINTEXT://localhost:6667",
  1217. "socket.send.buffer.bytes" : "102400",
  1218. "offsets.commit.required.acks" : "-1",
  1219. "log.flush.interval.ms" : "3000",
  1220. "num.replica.fetchers" : "1",
  1221. "kafka.metrics.reporters" : "{{kafka_metrics_reporters}}",
  1222. "default.replication.factor" : "1",
  1223. "replica.socket.receive.buffer.bytes" : "65536",
  1224. "auto.leader.rebalance.enable" : "true",
  1225. "delete.topic.enable" : "false",
  1226. "log.index.interval.bytes" : "4096",
  1227. "kafka.ganglia.metrics.reporter.enabled" : "true",
  1228. "compression.type" : "producer",
  1229. "kafka.timeline.metrics.host" : "{{metric_collector_host}}",
  1230. "replica.fetch.min.bytes" : "1"
  1231. }
  1232. }
  1233. },
  1234. {
  1235. "hiveserver2-site" : {
  1236. "properties_attributes" : { },
  1237. "properties" : {
  1238. "hive.security.authenticator.manager" : "org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator",
  1239. "hive.security.authorization.enabled" : "true",
  1240. "hive.security.authorization.manager" : "org.apache.ranger.authorization.hive.authorizer.RangerHiveAuthorizerFactory"
  1241. }
  1242. }
  1243. },
  1244. {
  1245. "slider-env" : {
  1246. "properties_attributes" : { },
  1247. "properties" : {
  1248. "content" : "\n# Set Slider-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME. All others are\n# optional. When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use. Required.\nexport JAVA_HOME={{java64_home}}\n# The hadoop conf directory. Optional as slider-client.xml can be edited to add properties.\nexport HADOOP_CONF_DIR={{hadoop_conf_dir}}\n "
  1249. }
  1250. }
  1251. },
  1252. {
  1253. "ranger-hive-policymgr-ssl" : {
  1254. "properties_attributes" : { },
  1255. "properties" : {
  1256. "xasecure.policymgr.clientssl.truststore.credential.file" : "jceks://file{{credential_file}}",
  1257. "xasecure.policymgr.clientssl.keystore.credential.file" : "jceks://file{{credential_file}}",
  1258. "xasecure.policymgr.clientssl.truststore" : "/usr/hdp/current/hive-server2/conf/ranger-plugin-truststore.jks",
  1259. "xasecure.policymgr.clientssl.keystore" : "/usr/hdp/current/hive-server2/conf/ranger-plugin-keystore.jks"
  1260. }
  1261. }
  1262. },
  1263. {
  1264. "ranger-knox-audit" : {
  1265. "properties_attributes" : { },
  1266. "properties" : {
  1267. "xasecure.audit.destination.solr.batch.filespool.dir" : "/var/log/knox/audit/solr/spool",
  1268. "xasecure.audit.destination.hdfs.dir" : "hdfs://NAMENODE_HOSTNAME:8020/ranger/audit",
  1269. "xasecure.audit.destination.solr.urls" : "{{ranger_audit_solr_urls}}",
  1270. "xasecure.audit.destination.hdfs.batch.filespool.dir" : "/var/log/knox/audit/hdfs/spool",
  1271. "xasecure.audit.is.enabled" : "true",
  1272. "xasecure.audit.destination.hdfs" : "true",
  1273. "xasecure.audit.credential.provider.file" : "jceks://file{{credential_file}}",
  1274. "xasecure.audit.provider.summary.enabled" : "false",
  1275. "xasecure.audit.destination.db.jdbc.driver" : "{{jdbc_driver}}",
  1276. "xasecure.audit.destination.db.batch.filespool.dir" : "/var/log/knox/audit/db/spool",
  1277. "xasecure.audit.destination.solr" : "false",
  1278. "xasecure.audit.destination.db.user" : "{{xa_audit_db_user}}",
  1279. "xasecure.audit.destination.db" : "false",
  1280. "xasecure.audit.destination.db.jdbc.url" : "{{audit_jdbc_url}}",
  1281. "xasecure.audit.destination.solr.zookeepers" : "none"
  1282. }
  1283. }
  1284. },
  1285. {
  1286. "capacity-scheduler" : {
  1287. "properties_attributes" : { },
  1288. "properties" : {
  1289. "yarn.scheduler.capacity.root.queues" : "default",
  1290. "yarn.scheduler.capacity.root.default.acl_administer_jobs" : "*",
  1291. "yarn.scheduler.capacity.resource-calculator" : "org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator",
  1292. "yarn.scheduler.capacity.root.default.maximum-capacity" : "100",
  1293. "yarn.scheduler.capacity.node-locality-delay" : "40",
  1294. "yarn.scheduler.capacity.maximum-am-resource-percent" : "0.5",
  1295. "yarn.scheduler.capacity.root.default.acl_submit_applications" : "*",
  1296. "yarn.scheduler.capacity.root.default.state" : "RUNNING",
  1297. "yarn.scheduler.capacity.root.accessible-node-labels" : "*",
  1298. "yarn.scheduler.capacity.root.acl_administer_queue" : "*",
  1299. "yarn.scheduler.capacity.maximum-applications" : "10000",
  1300. "yarn.scheduler.capacity.root.default.maximum-am-resource-percent" : "0.5",
  1301. "yarn.scheduler.capacity.root.default.capacity" : "100",
  1302. "yarn.scheduler.capacity.root.default.user-limit-factor" : "1",
  1303. "yarn.scheduler.capacity.root.capacity" : "100",
  1304. "yarn.scheduler.capacity.default.minimum-user-limit-percent" : "100"
  1305. }
  1306. }
  1307. },
  1308. {
  1309. "kafka-log4j" : {
  1310. "properties_attributes" : { },
  1311. "properties" : {
  1312. "content" : "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\nkafka.logs.dir=logs\n\nlog4j.rootLogger=INFO, stdout\n\nlog4j.appender.stdout=org.apache.log4j.ConsoleAppender\nlog4j.appender.stdout.layout=org.apache.log4j.PatternLayout\nlog4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log\nlog4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log\nlog4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log\nlog4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log\nlog4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log\nlog4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\n# Turn on all our debugging info\n#log4j.logger.kafka.producer.async.DefaultEventHandler=DEBUG, kafkaAppender\n#log4j.logger.kafka.client.ClientUtils=DEBUG, kafkaAppender\n#log4j.logger.kafka.perf=DEBUG, kafkaAppender\n#log4j.logger.kafka.perf.ProducerPerformance$ProducerThread=DEBUG, kafkaAppender\n#log4j.logger.org.I0Itec.zkclient.ZkClient=DEBUG\nlog4j.logger.kafka=INFO, kafkaAppender\nlog4j.logger.kafka.network.RequestChannel$=WARN, requestAppender\nlog4j.additivity.kafka.network.RequestChannel$=false\n\n#log4j.logger.kafka.network.Processor=TRACE, requestAppender\n#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender\n#log4j.additivity.kafka.server.KafkaApis=false\nlog4j.logger.kafka.request.logger=WARN, requestAppender\nlog4j.additivity.kafka.request.logger=false\n\nlog4j.logger.kafka.controller=TRACE, controllerAppender\nlog4j.additivity.kafka.controller=false\n\nlog4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender\nlog4j.additivity.kafka.log.LogCleaner=false\n\nlog4j.logger.state.change.logger=TRACE, stateChangeAppender\nlog4j.additivity.state.change.logger=false\n\n "
  1313. }
  1314. }
  1315. },
  1316. {
  1317. "hbase-env" : {
  1318. "properties_attributes" : { },
  1319. "properties" : {
  1320. "content" : "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\n# export HBASE_HEAPSIZE=1000\n\n# Extra Java runtime options.\n# Below are what we set by default. May only work with SUN JVM.\n# For more on why as well as other possible settings,\n# see http://wiki.apache.org/hadoop/PerformanceTuning\nexport SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable JMX exporting\n# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\n# If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# Extra ssh options. Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by default.\nexport HBASE_LOG_DIR={{log_dir}}\n\n# A string representing this instance of hbase. $USER by default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The directory where pid files are stored. /tmp by default.\nexport HBASE_PID_DIR={{pid_dir}}\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if java_version < 8 %}\nJDK_DEPENDED_OPTS=\"-XX:PermSize=128m -XX:MaxPermSize=128m\"\n{% endif %} \n \n{% if security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}}\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}} $JDK_DEPENDED_OPTS\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} {% if hbase_max_direct_memory_size %} -XX:MaxDirectMemorySize={{hbase_max_direct_memory_size}}m {% endif %} -Djava.security.auth.login.config={{regionserver_jaas_config_file}} $JDK_DEPENDED_OPTS\"\nexport PHOENIX_QUERYSERVER_OPTS=\"$PHOENIX_QUERYSERVER_OPTS -Djava.security.auth.login.config={{queryserver_jaas_config_file}}\"\n{% else %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} $JDK_DEPENDED_OPTS\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} {% if hbase_max_direct_memory_size %} -XX:MaxDirectMemorySize={{hbase_max_direct_memory_size}}m {% endif %} $JDK_DEPENDED_OPTS\"\n{% endif %}\n ",
  1321. "override_hbase_uid" : "true",
  1322. "hbase_pid_dir" : "/var/run/hbase",
  1323. "hbase_master_heapsize" : "4096m",
  1324. "hbase_max_direct_memory_size" : "",
  1325. "phoenix_sql_enabled" : "false",
  1326. "hbase_user" : "hbase",
  1327. "hbase_regionserver_heapsize" : "4096m",
  1328. "hbase_regionserver_xmn_max" : "512",
  1329. "hbase_log_dir" : "/var/log/hbase",
  1330. "hbase_regionserver_xmn_ratio" : "0.2"
  1331. }
  1332. }
  1333. },
  1334. {
  1335. "mapred-env" : {
  1336. "properties_attributes" : { },
  1337. "properties" : {
  1338. "content" : "\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\n\nexport HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}\n\nexport HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA\n\n#export HADOOP_JOB_HISTORYSERVER_OPTS=\n#export HADOOP_MAPRED_LOG_DIR=\"\" # Where log files are stored. $HADOOP_MAPRED_HOME/logs by default.\n#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.\n#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.\n#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default\n#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n ",
  1339. "mapred_log_dir_prefix" : "/var/log/hadoop-mapreduce",
  1340. "mapred_pid_dir_prefix" : "/var/run/hadoop-mapreduce",
  1341. "jobhistory_heapsize" : "250",
  1342. "mapred_user" : "mapred"
  1343. }
  1344. }
  1345. },
  1346. {
  1347. "slider-log4j" : {
  1348. "properties_attributes" : { },
  1349. "properties" : {
  1350. "content" : "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n# Define some default values that can be overridden by system properties\nlog4j.rootLogger=INFO,stdout\nlog4j.threshhold=ALL\nlog4j.appender.stdout=org.apache.log4j.ConsoleAppender\nlog4j.appender.stdout.layout=org.apache.log4j.PatternLayout\n\n# log layout skips stack-trace creation operations by avoiding line numbers and method\nlog4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} - %m%n\n\n# debug edition is much more expensive\n#log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\nlog4j.appender.subprocess=org.apache.log4j.ConsoleAppender\nlog4j.appender.subprocess.layout=org.apache.log4j.PatternLayout\nlog4j.appender.subprocess.layout.ConversionPattern=[%c{1}]: %m%n\n#log4j.logger.org.apache.slider.yarn.appmaster.SliderAppMasterer.master=INFO,subprocess\n\n# for debugging Slider\n#log4j.logger.org.apache.slider=DEBUG\n#log4j.logger.org.apache.slider=DEBUG\n\n# uncomment to debug service lifecycle issues\n#log4j.logger.org.apache.hadoop.yarn.service.launcher=DEBUG\n#log4j.logger.org.apache.hadoop.yarn.service=DEBUG\n\n# uncomment for YARN operations\n#log4j.logger.org.apache.hadoop.yarn.client=DEBUG\n\n# uncomment this to debug security problems\n#log4j.logger.org.apache.hadoop.security=DEBUG\n\n#crank back on some noise\nlog4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR\nlog4j.logger.org.apache.hadoop.hdfs=WARN\n\n\nlog4j.logger.org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor=WARN\nlog4j.logger.org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl=WARN\nlog4j.logger.org.apache.zookeeper=WARN\n "
  1351. }
  1352. }
  1353. },
  1354. {
  1355. "falcon-env" : {
  1356. "properties_attributes" : { },
  1357. "properties" : {
  1358. "content" : "\n# The java implementation to use. If JAVA_HOME is not found we expect java and jar to be in path\nexport JAVA_HOME={{java_home}}\n\n# any additional java opts you want to set. This will apply to both client and server operations\n#export FALCON_OPTS=\n\n# any additional java opts that you want to set for client only\n#export FALCON_CLIENT_OPTS=\n\n# java heap size we want to set for the client. Default is 1024MB\n#export FALCON_CLIENT_HEAP=\n\n# any additional opts you want to set for prisim service.\n#export FALCON_PRISM_OPTS=\n\n# java heap size we want to set for the prisim service. Default is 1024MB\n#export FALCON_PRISM_HEAP=\n\n# any additional opts you want to set for falcon service.\nexport FALCON_SERVER_OPTS=\"-Dfalcon.embeddedmq={{falcon_embeddedmq_enabled}} -Dfalcon.emeddedmq.port={{falcon_emeddedmq_port}}\"\n\n# java heap size we want to set for the falcon server. Default is 1024MB\n#export FALCON_SERVER_HEAP=\n\n# What is is considered as falcon home dir. Default is the base location of the installed software\n#export FALCON_HOME_DIR=\n\n# Where log files are stored. Defatult is logs directory under the base install location\nexport FALCON_LOG_DIR={{falcon_log_dir}}\n\n# Where pid files are stored. Defatult is logs directory under the base install location\nexport FALCON_PID_DIR={{falcon_pid_dir}}\n\n# where the falcon active mq data is stored. Defatult is logs/data directory under the base install location\nexport FALCON_DATA_DIR={{falcon_embeddedmq_data}}\n\n# Where do you want to expand the war file. By Default it is in /server/webapp dir under the base install dir.\n#export FALCON_EXPANDED_WEBAPP_DIR=\n ",
  1359. "falcon.emeddedmq.port" : "61616",
  1360. "supports_hive_dr" : "true",
  1361. "falcon_port" : "15000",
  1362. "falcon_store_uri" : "file:///hadoop/falcon/store",
  1363. "falcon_log_dir" : "/var/log/falcon",
  1364. "falcon.embeddedmq.data" : "/hadoop/falcon/embeddedmq/data",
  1365. "falcon_local_dir" : "/hadoop/falcon",
  1366. "falcon_pid_dir" : "/var/run/falcon",
  1367. "falcon_user" : "falcon",
  1368. "falcon.embeddedmq" : "true"
  1369. }
  1370. }
  1371. },
  1372. {
  1373. "yarn-site" : {
  1374. "properties_attributes" : { },
  1375. "properties" : {
  1376. "yarn.nodemanager.linux-container-executor.group" : "hadoop",
  1377. "yarn.timeline-service.client.retry-interval-ms" : "1000",
  1378. "yarn.nodemanager.vmem-check-enabled" : "false",
  1379. "yarn.resourcemanager.connect.max-wait.ms" : "900000",
  1380. "yarn.timeline-service.enabled" : "true",
  1381. "yarn.nodemanager.log.retain-second" : "604800",
  1382. "yarn.resourcemanager.zk-acl" : "world:anyone:rwcda ",
  1383. "yarn.resourcemanager.webapp.address" : "%HOSTGROUP::host_group_1%:8088",
  1384. "yarn.nodemanager.vmem-pmem-ratio" : "10",
  1385. "yarn.nodemanager.health-checker.interval-ms" : "135000",
  1386. "yarn.timeline-service.generic-application-history.store-class" : "org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore",
  1387. "yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage" : "90",
  1388. "yarn.log.server.url" : "http://%HOSTGROUP::host_group_1%:19888/jobhistory/logs",
  1389. "yarn.nodemanager.resource.memory-mb" : "2250",
  1390. "yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds" : "-1",
  1391. "yarn.resourcemanager.scheduler.monitor.enable" : "false",
  1392. "yarn.resourcemanager.zk-retry-interval-ms" : "1000",
  1393. "yarn.http.policy" : "HTTP_ONLY",
  1394. "yarn.nodemanager.log-aggregation.debug-enabled" : "false",
  1395. "yarn.timeline-service.webapp.address" : "%HOSTGROUP::host_group_1%:8188",
  1396. "yarn.timeline-service.webapp.https.address" : "%HOSTGROUP::host_group_1%:8190",
  1397. "yarn.resourcemanager.webapp.https.address" : "%HOSTGROUP::host_group_1%:8090",
  1398. "yarn.resourcemanager.scheduler.class" : "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler",
  1399. "yarn.nodemanager.remote-app-log-dir-suffix" : "logs",
  1400. "yarn.client.nodemanager-connect.max-wait-ms" : "60000",
  1401. "yarn.resourcemanager.bind-host" : "0.0.0.0",
  1402. "yarn.resourcemanager.admin.address" : "%HOSTGROUP::host_group_1%:8141",
  1403. "yarn.resourcemanager.ha.enabled" : "false",
  1404. "yarn.resourcemanager.resource-tracker.address" : "%HOSTGROUP::host_group_1%:8025",
  1405. "yarn.resourcemanager.nodes.exclude-path" : "/etc/hadoop/conf/yarn.exclude",
  1406. "hadoop.registry.rm.enabled" : "false",
  1407. "yarn.timeline-service.http-authentication.simple.anonymous.allowed" : "true",
  1408. "yarn.resourcemanager.fs.state-store.retry-policy-spec" : "2000, 500",
  1409. "yarn.nodemanager.local-dirs" : "/hadoop/yarn/local",
  1410. "yarn.admin.acl" : "yarn",
  1411. "yarn.timeline-service.recovery.enabled" : "true",
  1412. "yarn.nodemanager.pmem-check-enabled" : "false",
  1413. "yarn.scheduler.minimum-allocation-mb" : "250",
  1414. "yarn.nodemanager.admin-env" : "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX",
  1415. "yarn.timeline-service.http-authentication.type" : "simple",
  1416. "yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage" : "false",
  1417. "yarn.log-aggregation.retain-seconds" : "2592000",
  1418. "yarn.nodemanager.address" : "0.0.0.0:45454",
  1419. "yarn.resourcemanager.hostname" : "%HOSTGROUP::host_group_1%",
  1420. "yarn.resourcemanager.zk-state-store.parent-path" : "/rmstore",
  1421. "yarn.nodemanager.container-monitor.interval-ms" : "3000",
  1422. "yarn.resourcemanager.webapp.proxyuser.hcat.hosts" : "*",
  1423. "yarn.timeline-service.address" : "%HOSTGROUP::host_group_1%:10200",
  1424. "yarn.nodemanager.container-executor.class" : "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor",
  1425. "yarn.nodemanager.recovery.enabled" : "true",
  1426. "yarn.nodemanager.recovery.dir" : "{{yarn_log_dir_prefix}}/nodemanager/recovery-state",
  1427. "yarn.resourcemanager.connect.retry-interval.ms" : "30000",
  1428. "yarn.nodemanager.log-aggregation.num-log-files-per-app" : "30",
  1429. "yarn.scheduler.minimum-allocation-vcores" : "1",
  1430. "yarn.timeline-service.leveldb-timeline-store.path" : "/hadoop/yarn/timeline",
  1431. "yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms" : "10000",
  1432. "yarn.resourcemanager.webapp.proxyuser.oozie.hosts" : "*",
  1433. "yarn.resourcemanager.store.class" : "org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore",
  1434. "yarn.nodemanager.linux-container-executor.cgroups.mount" : "false",
  1435. "yarn.node-labels.fs-store.root-dir" : "/system/yarn/node-labels",
  1436. "yarn.nodemanager.linux-container-executor.cgroups.hierarchy" : "hadoop-yarn",
  1437. "yarn.timeline-service.state-store-class" : "org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore",
  1438. "yarn.nodemanager.resource.cpu-vcores" : "8",
  1439. "yarn.nodemanager.disk-health-checker.min-healthy-disks" : "0.25",
  1440. "yarn.resourcemanager.work-preserving-recovery.enabled" : "true",
  1441. "yarn.resourcemanager.zk-num-retries" : "1000",
  1442. "yarn.nodemanager.remote-app-log-dir" : "/app-logs",
  1443. "yarn.nodemanager.aux-services" : "mapreduce_shuffle",
  1444. "yarn.node-labels.fs-store.retry-policy-spec" : "2000, 500",
  1445. "yarn.timeline-service.client.max-retries" : "30",
  1446. "yarn.timeline-service.ttl-ms" : "2678400000",
  1447. "yarn.resourcemanager.webapp.proxyuser.hcat.groups" : "*",
  1448. "yarn.resourcemanager.webapp.proxyuser.oozie.groups" : "*",
  1449. "yarn.nodemanager.log-dirs" : "/hadoop/yarn/log",
  1450. "yarn.resourcemanager.fs.state-store.uri" : " ",
  1451. "yarn.acl.enable" : "false",
  1452. "yarn.timeline-service.leveldb-state-store.path" : "/hadoop/yarn/timeline",
  1453. "yarn.timeline-service.store-class" : "org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore",
  1454. "yarn.timeline-service.bind-host" : "0.0.0.0",
  1455. "yarn.timeline-service.ttl-enable" : "true",
  1456. "yarn.resourcemanager.scheduler.address" : "%HOSTGROUP::host_group_1%:8030",
  1457. "yarn.nodemanager.linux-container-executor.resources-handler.class" : "org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler",
  1458. "yarn.resourcemanager.zk-address" : "%HOSTGROUP::host_group_1%:2181",
  1459. "yarn.resourcemanager.state-store.max-completed-applications" : "${yarn.resourcemanager.max-completed-applications}",
  1460. "yarn.nodemanager.bind-host" : "0.0.0.0",
  1461. "yarn.resourcemanager.address" : "%HOSTGROUP::host_group_1%:8050",
  1462. "yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size" : "10000",
  1463. "yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size" : "10",
  1464. "yarn.resourcemanager.recovery.enabled" : "true",
  1465. "yarn.application.classpath" : "$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*",
  1466. "yarn.node-labels.enabled" : "false",
  1467. "yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled" : "false",
  1468. "yarn.log-aggregation-enable" : "true",
  1469. "yarn.nodemanager.delete.debug-delay-sec" : "0",
  1470. "hadoop.registry.zk.quorum" : "%HOSTGROUP::host_group_1%:2181",
  1471. "yarn.resourcemanager.am.max-attempts" : "2",
  1472. "yarn.scheduler.maximum-allocation-mb" : "2250",
  1473. "yarn.nodemanager.resource.percentage-physical-cpu-limit" : "80",
  1474. "yarn.timeline-service.leveldb-timeline-store.read-cache-size" : "104857600",
  1475. "yarn.scheduler.maximum-allocation-vcores" : "8",
  1476. "yarn.resourcemanager.zk-timeout-ms" : "10000",
  1477. "yarn.nodemanager.aux-services.mapreduce_shuffle.class" : "org.apache.hadoop.mapred.ShuffleHandler",
  1478. "yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size" : "10000",
  1479. "yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms" : "300000",
  1480. "yarn.nodemanager.health-checker.script.timeout-ms" : "60000",
  1481. "yarn.client.nodemanager-connect.retry-interval-ms" : "10000",
  1482. "yarn.nodemanager.log-aggregation.compression-type" : "gz",
  1483. "yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb" : "1000",
  1484. "yarn.resourcemanager.system-metrics-publisher.enabled" : "true"
  1485. }
  1486. }
  1487. },
  1488. {
  1489. "gateway-site" : {
  1490. "properties_attributes" : { },
  1491. "properties" : {
  1492. "sun.security.krb5.debug" : "true",
  1493. "gateway.hadoop.kerberos.secured" : "false",
  1494. "gateway.path" : "gateway",
  1495. "gateway.gateway.conf.dir" : "deployments",
  1496. "java.security.auth.login.config" : "/etc/knox/conf/krb5JAASLogin.conf",
  1497. "java.security.krb5.conf" : "/etc/knox/conf/krb5.conf",
  1498. "gateway.port" : "8443"
  1499. }
  1500. }
  1501. },
  1502. {
  1503. "ranger-hive-plugin-properties" : {
  1504. "properties_attributes" : { },
  1505. "properties" : {
  1506. "jdbc.driverClassName" : "org.apache.hive.jdbc.HiveDriver",
  1507. "REPOSITORY_CONFIG_USERNAME" : "hive",
  1508. "common.name.for.certificate" : "",
  1509. "policy_user" : "ambari-qa"
  1510. }
  1511. }
  1512. },
  1513. {
  1514. "tez-env" : {
  1515. "properties_attributes" : { },
  1516. "properties" : {
  1517. "content" : "\n# Tez specific configuration\nexport TEZ_CONF_DIR={{config_dir}}\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}\n ",
  1518. "tez_user" : "tez"
  1519. }
  1520. }
  1521. },
  1522. {
  1523. "kafka-env" : {
  1524. "properties_attributes" : { },
  1525. "properties" : {
  1526. "content" : "\n#!/bin/bash\n\n# Set KAFKA specific environment variables here.\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}\nexport PATH=$PATH:$JAVA_HOME/bin\nexport PID_DIR={{kafka_pid_dir}}\nexport LOG_DIR={{kafka_log_dir}}\nexport KAFKA_KERBEROS_PARAMS={{kafka_kerberos_params}}\n# Add kafka sink to classpath and related depenencies\nif [ -e \"/usr/lib/ambari-metrics-kafka-sink/ambari-metrics-kafka-sink.jar\" ]; then\n export CLASSPATH=$CLASSPATH:/usr/lib/ambari-metrics-kafka-sink/ambari-metrics-kafka-sink.jar\n export CLASSPATH=$CLASSPATH:/usr/lib/ambari-metrics-kafka-sink/lib/*\nfi\nif [ -f /etc/kafka/conf/kafka-ranger-env.sh ]; then\n. /etc/kafka/conf/kafka-ranger-env.sh\nfi\n ",
  1527. "kafka_pid_dir" : "/var/run/kafka",
  1528. "is_supported_kafka_ranger" : "True",
  1529. "kafka_log_dir" : "/var/log/kafka",
  1530. "kafka_user" : "kafka"
  1531. }
  1532. }
  1533. },
  1534. {
  1535. "ranger-yarn-security" : {
  1536. "properties_attributes" : { },
  1537. "properties" : {
  1538. "ranger.plugin.yarn.service.name" : "{{repo_name}}",
  1539. "ranger.plugin.yarn.policy.rest.ssl.config.file" : "/etc/yarn/conf/ranger-policymgr-ssl.xml",
  1540. "ranger.plugin.yarn.policy.source.impl" : "org.apache.ranger.admin.client.RangerAdminRESTClient",
  1541. "ranger.plugin.yarn.policy.rest.url" : "{{policymgr_mgr_url}}",
  1542. "ranger.plugin.yarn.policy.pollIntervalMs" : "30000",
  1543. "ranger.plugin.yarn.policy.cache.dir" : "/etc/ranger/{{repo_name}}/policycache"
  1544. }
  1545. }
  1546. },
  1547. {
  1548. "hive-log4j" : {
  1549. "properties_attributes" : { },
  1550. "properties" : {
  1551. "content" : "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Define some default values that can be overridden by system properties\nhive.log.threshold=ALL\nhive.root.logger=INFO,DRFA\nhive.log.dir=${java.io.tmpdir}/${user.name}\nhive.log.file=hive.log\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hive.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshold=${hive.log.threshold}\n\n#\n# Daily Rolling File Appender\n#\n# Use the PidDailyerRollingFileAppend class instead if you want to use separate log files\n# for different CLI session.\n#\n# log4j.appender.DRFA=org.apache.hadoop.hive.ql.log.PidDailyRollingFileAppender\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\n\nlog4j.appender.DRFA.File=${hive.log.dir}/${hive.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n\nlog4j.appender.console.encoding=UTF-8\n\n#custom logging levels\n#log4j.logger.xxx=DEBUG\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.hive.shims.HiveEventCounter\n\n\nlog4j.category.DataNucleus=ERROR,DRFA\nlog4j.category.Datastore=ERROR,DRFA\nlog4j.category.Datastore.Schema=ERROR,DRFA\nlog4j.category.JPOX.Datastore=ERROR,DRFA\nlog4j.category.JPOX.Plugin=ERROR,DRFA\nlog4j.category.JPOX.MetaData=ERROR,DRFA\nlog4j.category.JPOX.Query=ERROR,DRFA\nlog4j.category.JPOX.General=ERROR,DRFA\nlog4j.category.JPOX.Enhancer=ERROR,DRFA\n\n\n# Silence useless ZK logs\nlog4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,DRFA\nlog4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,DRFA\n "
  1552. }
  1553. }
  1554. },
  1555. {
  1556. "zoo.cfg" : {
  1557. "properties_attributes" : { },
  1558. "properties" : {
  1559. "initLimit" : "10",
  1560. "clientPort" : "2181",
  1561. "autopurge.snapRetainCount" : "30",
  1562. "tickTime" : "2000",
  1563. "syncLimit" : "5",
  1564. "autopurge.purgeInterval" : "24",
  1565. "dataDir" : "/hadoop/zookeeper"
  1566. }
  1567. }
  1568. },
  1569. {
  1570. "falcon-startup.properties" : {
  1571. "properties_attributes" : { },
  1572. "properties" : {
  1573. "*.config.store.uri" : "file:///hadoop/falcon/store",
  1574. "*.falcon.security.authorization.admin.groups" : "falcon",
  1575. "*.falcon.http.authentication.type" : "simple",
  1576. "*.max.retry.failure.count" : "1",
  1577. "*.falcon.security.authorization.enabled" : "false",
  1578. "*.entity.topic" : "FALCON.ENTITY.TOPIC",
  1579. "*.falcon.graph.serialize.path" : "/hadoop/falcon/data/lineage",
  1580. "prism.application.services" : "org.apache.falcon.entity.store.ConfigurationStore",
  1581. "*.application.services" : "org.apache.falcon.security.AuthenticationInitializationService,\\\n org.apache.falcon.workflow.WorkflowJobEndNotificationService, \\\n org.apache.falcon.service.ProcessSubscriberService,\\\n org.apache.falcon.entity.store.ConfigurationStore,\\\n org.apache.falcon.rerun.service.RetryService,\\\n org.apache.falcon.rerun.service.LateRunService,\\\n org.apache.falcon.service.LogCleanupService,\\\n org.apache.falcon.metadata.MetadataMappingService\n ",
  1582. "*.journal.impl" : "org.apache.falcon.transaction.SharedFileSystemJournal",
  1583. "*.falcon.graph.preserve.history" : "false",
  1584. "*.configstore.listeners" : "org.apache.falcon.entity.v0.EntityGraph,\\\n org.apache.falcon.entity.ColoClusterRelation,\\\n org.apache.falcon.group.FeedGroupMap,\\\n org.apache.falcon.service.SharedLibraryHostingService\n ",
  1585. "*.internal.queue.size" : "1000",
  1586. "*.falcon.enableTLS" : "false",
  1587. "*.falcon.authentication.type" : "simple",
  1588. "*.falcon.security.authorization.provider" : "org.apache.falcon.security.DefaultAuthorizationProvider",
  1589. "*.falcon.http.authentication.kerberos.name.rules" : "DEFAULT",
  1590. "*.falcon.http.authentication.token.validity" : "36000",
  1591. "*.system.lib.location" : "${falcon.home}/server/webapp/${falcon.app.type}/WEB-INF/lib",
  1592. "*.falcon.graph.storage.directory" : "/hadoop/falcon/data/lineage/graphdb",
  1593. "*.retry.recorder.path" : "${falcon.log.dir}/retry",
  1594. "*.falcon.http.authentication.blacklisted.users" : "",
  1595. "*.falcon.http.authentication.cookie.domain" : "EXAMPLE.COM",
  1596. "*.falcon.cleanup.service.frequency" : "days(1)",
  1597. "*.falcon.security.authorization.admin.users" : "falcon,ambari-qa",
  1598. "*.ProcessInstanceManager.impl" : "org.apache.falcon.resource.InstanceManager",
  1599. "*.broker.ttlInMins" : "4320",
  1600. "*.shared.libs" : "activemq-core,ant,geronimo-j2ee-management,jms,json-simple,oozie-client,spring-jms,commons-lang3,commons-el",
  1601. "*.oozie.process.workflow.builder" : "org.apache.falcon.workflow.OozieProcessWorkflowBuilder",
  1602. "*.catalog.service.impl" : "org.apache.falcon.catalog.HiveCatalogService",
  1603. "*.falcon.http.authentication.simple.anonymous.allowed" : "true",
  1604. "*.ConfigSyncService.impl" : "org.apache.falcon.resource.ConfigSyncService",
  1605. "*.domain" : "${falcon.app.type}",
  1606. "*.broker.url" : "tcp://%HOSTGROUP::host_group_1%:61616",
  1607. "*.oozie.feed.workflow.builder" : "org.apache.falcon.workflow.OozieFeedWorkflowBuilder",
  1608. "*.falcon.graph.blueprints.graph" : "com.thinkaurelius.titan.core.TitanFactory",
  1609. "*.falcon.graph.storage.backend" : "berkeleyje",
  1610. "*.broker.impl.class" : "org.apache.activemq.ActiveMQConnectionFactory",
  1611. "*.SchedulableEntityManager.impl" : "org.apache.falcon.resource.SchedulableEntityManager",
  1612. "prism.configstore.listeners" : "org.apache.falcon.entity.v0.EntityGraph,\\\n org.apache.falcon.entity.ColoClusterRelation,\\\n org.apache.falcon.group.FeedGroupMap\n ",
  1613. "*.falcon.security.authorization.superusergroup" : "falcon",
  1614. "*.falcon.http.authentication.signature.secret" : "falcon",
  1615. "*.hive.shared.libs" : "hive-exec,hive-metastore,hive-common,hive-service,hive-hcatalog-server-extensions,\\\nhive-hcatalog-core,hive-jdbc,hive-webhcat-java-client",
  1616. "*.workflow.engine.impl" : "org.apache.falcon.workflow.engine.OozieWorkflowEngine"
  1617. }
  1618. }
  1619. },
  1620. {
  1621. "hadoop-env" : {
  1622. "properties_attributes" : { },
  1623. "properties" : {
  1624. "namenode_opt_permsize" : "128m",
  1625. "hadoop_heapsize" : "250",
  1626. "namenode_opt_maxnewsize" : "100m",
  1627. "namenode_heapsize" : "250m",
  1628. "keyserver_host" : " ",
  1629. "nfsgateway_heapsize" : "1024",
  1630. "dfs.datanode.data.dir.mount.file" : "/etc/hadoop/conf/dfs_data_dir_mount.hist",
  1631. "content" : "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME. All others are\n# optional. When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use. Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options. Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appended to HADOOP_OPTS when specified\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\n\n{% if java_version < 8 %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n{% else %}\nSHARED_HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT\"\nexport HADOOP_NAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-namenode/bin/kill-name-node\\\" -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=\"${SHARED_HADOOP_NAMENODE_OPTS} -XX:OnOutOfMemoryError=\\\"/usr/hdp/current/hadoop-hdfs-secondarynamenode/bin/kill-secondary-name-node\\\" ${HADOOP_SECONDARYNAMENODE_OPTS}\"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n{% endif %}\n\nHADOOP_NFS3_OPTS=\"-Xmx{{nfsgateway_heapsize}}m -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_NFS3_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options. Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored. $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n if [ -d \"/usr/hdp/current/tez-client/conf/\" ]; then\n # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/usr/hdp/current/tez-client/conf/\n fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n ",
  1632. "namenode_opt_newsize" : "50m",
  1633. "hdfs_user" : "hdfs",
  1634. "dtnode_heapsize" : "250m",
  1635. "namenode_opt_maxpermsize" : "256m",
  1636. "keyserver_port" : " ",
  1637. "hdfs_log_dir_prefix" : "/var/log/hadoop",
  1638. "hadoop_pid_dir_prefix" : "/var/run/hadoop",
  1639. "hadoop_root_logger" : "INFO,RFA",
  1640. "proxyuser_group" : "users"
  1641. }
  1642. }
  1643. },
  1644. {
  1645. "spark-env" : {
  1646. "properties_attributes" : { },
  1647. "properties" : {
  1648. "content" : "\n#!/usr/bin/env bash\n\n# This file is sourced when running various Spark programs.\n# Copy it as spark-env.sh and edit that to configure Spark for your site.\n\n# Options read in YARN client mode\n#SPARK_EXECUTOR_INSTANCES=\"2\" #Number of workers to start (Default: 2)\n#SPARK_EXECUTOR_CORES=\"1\" #Number of cores for the workers (Default: 1).\n#SPARK_EXECUTOR_MEMORY=\"1G\" #Memory per Worker (e.g. 1000M, 2G) (Default: 1G)\n#SPARK_DRIVER_MEMORY=\"512 Mb\" #Memory for Master (e.g. 1000M, 2G) (Default: 512 Mb)\n#SPARK_YARN_APP_NAME=\"spark\" #The name of your application (Default: Spark)\n#SPARK_YARN_QUEUE=\"~@~Xdefault~@~Y\" #The hadoop queue to use for allocation requests (Default: @~Xdefault~@~Y)\n#SPARK_YARN_DIST_FILES=\"\" #Comma separated list of files to be distributed with the job.\n#SPARK_YARN_DIST_ARCHIVES=\"\" #Comma separated list of archives to be distributed with the job.\n\n# Generic options for the daemons used in the standalone deploy mode\n\n# Alternate conf dir. (Default: ${SPARK_HOME}/conf)\nexport SPARK_CONF_DIR=${SPARK_HOME:-{{spark_home}}}/conf\n\n# Where log files are stored.(Default:${SPARK_HOME}/logs)\n#export SPARK_LOG_DIR=${SPARK_HOME:-{{spark_home}}}/logs\nexport SPARK_LOG_DIR={{spark_log_dir}}\n\n# Where the pid file is stored. (Default: /tmp)\nexport SPARK_PID_DIR={{spark_pid_dir}}\n\n# A string representing this instance of spark.(Default: $USER)\nSPARK_IDENT_STRING=$USER\n\n# The scheduling priority for daemons. (Default: 0)\nSPARK_NICENESS=0\n\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n# The java implementation to use.\nexport JAVA_HOME={{java_home}}\n\nif [ -d \"/etc/tez/conf/\" ]; then\n export TEZ_CONF_DIR=/etc/tez/conf\nelse\n export TEZ_CONF_DIR=\nfi\n\n",
  1649. "spark_pid_dir" : "/var/run/spark",
  1650. "spark_log_dir" : "/var/log/spark",
  1651. "spark_group" : "spark",
  1652. "spark_user" : "spark"
  1653. }
  1654. }
  1655. },
  1656. {
  1657. "hbase-site" : {
  1658. "properties_attributes" : { },
  1659. "properties" : {
  1660. "hbase.hregion.memstore.flush.size" : "134217728",
  1661. "hbase.hstore.compactionThreshold" : "3",
  1662. "hbase.local.dir" : "${hbase.tmp.dir}/local",
  1663. "hbase.security.authorization" : "true",
  1664. "hbase.region.server.rpc.scheduler.factory.class" : "",
  1665. "hbase.master.port" : "16000",
  1666. "hbase.rpc.engine" : "org.apache.hadoop.hbase.ipc.SecureRpcEngine",
  1667. "hbase.client.scanner.caching" : "100",
  1668. "hbase.regionserver.port" : "16020",
  1669. "hbase.coprocessor.region.classes" : "com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor",
  1670. "dfs.domain.socket.path" : "/var/lib/hadoop-hdfs/dn_socket",
  1671. "hbase.rootdir" : "hdfs://%HOSTGROUP::host_group_1%:8020/apps/hbase/data",
  1672. "phoenix.query.timeoutMs" : "60000",
  1673. "hbase.regionserver.info.port" : "16030",
  1674. "hbase.hregion.majorcompaction.jitter" : "0.50",
  1675. "hbase.tmp.dir" : "/tmp/hbase-${user.name}",
  1676. "hbase.hregion.memstore.mslab.enabled" : "true",
  1677. "hbase.rpc.controllerfactory.class" : "",
  1678. "hbase.client.retries.number" : "35",
  1679. "hbase.defaults.for.version.skip" : "true",
  1680. "hbase.rpc.timeout" : "90000",
  1681. "zookeeper.session.timeout" : "90000",
  1682. "hbase.zookeeper.property.clientPort" : "2181",
  1683. "hbase.bulkload.staging.dir" : "/apps/hbase/staging",
  1684. "phoenix.functions.allowUserDefinedFunctions" : " ",
  1685. "hbase.master.info.bindAddress" : "0.0.0.0",
  1686. "hbase.hstore.compaction.max" : "10",
  1687. "zookeeper.znode.parent" : "/hbase-unsecure",
  1688. "hbase.coprocessor.regionserver.classes" : "",
  1689. "hbase.bucketcache.ioengine" : "",
  1690. "hbase.security.authentication" : "simple",
  1691. "hbase.regionserver.global.memstore.upperLimit" : "0.4",
  1692. "hbase.hregion.majorcompaction" : "604800000",
  1693. "hbase.client.keyvalue.maxsize" : "1048576",
  1694. "hbase.master.info.port" : "16010",
  1695. "hbase.cluster.distributed" : "true",
  1696. "hbase.regionserver.global.memstore.lowerLimit" : "0.38",
  1697. "hbase.regionserver.handler.count" : "30",
  1698. "hbase.bucketcache.size" : "",
  1699. "hbase.coprocessor.master.classes" : "com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor",
  1700. "hbase.superuser" : "hbase",
  1701. "hbase.hstore.blockingStoreFiles" : "10",
  1702. "hfile.block.cache.size" : "0.40",
  1703. "hbase.zookeeper.useMulti" : "true",
  1704. "hbase_regionserver_heapsize" : "250",
  1705. "hbase.regionserver.global.memstore.size" : "${hbase.regionserver.global.memstore.upperLimit}",
  1706. "hbase.hregion.max.filesize" : "10737418240",
  1707. "hbase.hregion.memstore.block.multiplier" : "4",
  1708. "hbase.regionserver.wal.codec" : "org.apache.hadoop.hbase.regionserver.wal.WALCellCodec",
  1709. "hbase_master_heapsize" : "250",
  1710. "hbase.rpc.protection" : "PRIVACY",
  1711. "hbase.bucketcache.percentage.in.combinedcache" : "",
  1712. "hbase.zookeeper.quorum" : "%HOSTGROUP::host_group_1%"
  1713. }
  1714. }
  1715. },
  1716. {
  1717. "ranger-yarn-policymgr-ssl" : {
  1718. "properties_attributes" : { },
  1719. "properties" : {
  1720. "xasecure.policymgr.clientssl.truststore.credential.file" : "jceks://file{{credential_file}}",
  1721. "xasecure.policymgr.clientssl.keystore.credential.file" : "jceks://file{{credential_file}}",
  1722. "xasecure.policymgr.clientssl.truststore" : "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
  1723. "xasecure.policymgr.clientssl.keystore" : "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks"
  1724. }
  1725. }
  1726. },
  1727. {
  1728. "ams-env" : {
  1729. "properties_attributes" : { },
  1730. "properties" : {
  1731. "content" : "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# Collector Log directory for log4j\nexport AMS_COLLECTOR_LOG_DIR={{ams_collector_log_dir}}\n\n# Monitor Log directory for outfile\nexport AMS_MONITOR_LOG_DIR={{ams_monitor_log_dir}}\n\n# Collector pid directory\nexport AMS_COLLECTOR_PID_DIR={{ams_collector_pid_dir}}\n\n# Monitor pid directory\nexport AMS_MONITOR_PID_DIR={{ams_monitor_pid_dir}}\n\n# AMS HBase pid directory\nexport AMS_HBASE_PID_DIR={{hbase_pid_dir}}\n\n# AMS Collector heapsize\nexport AMS_COLLECTOR_HEAPSIZE={{metrics_collector_heapsize}}\n\n# AMS Collector options\nexport AMS_COLLECTOR_OPTS=\"-Djava.library.path=/usr/lib/ams-hbase/lib/hadoop-native -Xmx$AMS_COLLECTOR_HEAPSIZE \"\n{% if security_enabled %}\nexport AMS_COLLECTOR_OPTS=\"$AMS_COLLECTOR_OPTS -Djava.security.auth.login.config={{ams_collector_jaas_config_file}}\"\n{% endif %}\n\n ",
  1732. "metrics_monitor_log_dir" : "/var/log/ambari-metrics-monitor",
  1733. "ambari_metrics_user" : "ams",
  1734. "metrics_collector_heapsize" : "512m",
  1735. "metrics_collector_log_dir" : "/var/log/ambari-metrics-collector",
  1736. "metrics_monitor_pid_dir" : "/var/run/ambari-metrics-monitor",
  1737. "metrics_collector_pid_dir" : "/var/run/ambari-metrics-collector"
  1738. }
  1739. }
  1740. },
  1741. {
  1742. "hdfs-site" : {
  1743. "properties_attributes" : {
  1744. "final" : {
  1745. "dfs.support.append" : "true",
  1746. "dfs.datanode.data.dir" : "true",
  1747. "dfs.namenode.http-address" : "true",
  1748. "dfs.webhdfs.enabled" : "true",
  1749. "dfs.datanode.failed.volumes.tolerated" : "true",
  1750. "dfs.namenode.name.dir" : "true"
  1751. }
  1752. },
  1753. "properties" : {
  1754. "dfs.datanode.data.dir" : "/hadoop/hdfs/data",
  1755. "dfs.namenode.https-address" : "%HOSTGROUP::host_group_1%:50470",
  1756. "dfs.namenode.checkpoint.txns" : "1000000",
  1757. "dfs.datanode.max.xcievers" : "1024",
  1758. "dfs.namenode.audit.log.async" : "true",
  1759. "dfs.datanode.https.address" : "0.0.0.0:50475",
  1760. "dfs.journalnode.edits.dir" : "/hadoop/hdfs/journalnode",
  1761. "dfs.datanode.failed.volumes.tolerated" : "0",
  1762. "dfs.datanode.max.transfer.threads" : "1024",
  1763. "dfs.datanode.du.reserved" : "1073741824",
  1764. "dfs.support.append" : "true",
  1765. "dfs.namenode.handler.count" : "100",
  1766. "dfs.nfs.exports.allowed.hosts" : "* rw",
  1767. "dfs.namenode.checkpoint.dir" : "/hadoop/hdfs/namesecondary",
  1768. "nfs.exports.allowed.hosts" : "* rw",
  1769. "dfs.encryption.key.provider.uri" : "",
  1770. "dfs.domain.socket.path" : "/var/lib/hadoop-hdfs/dn_socket",
  1771. "dfs.namenode.name.dir.restore" : "true",
  1772. "dfs.namenode.write.stale.datanode.ratio" : "1.0f",
  1773. "dfs.block.size" : "34217472",
  1774. "dfs.namenode.checkpoint.period" : "21600",
  1775. "dfs.namenode.avoid.read.stale.datanode" : "true",
  1776. "dfs.block.access.token.enable" : "false",
  1777. "dfs.replication" : "1",
  1778. "dfs.journalnode.https-address" : "0.0.0.0:8481",
  1779. "dfs.namenode.stale.datanode.interval" : "30000",
  1780. "dfs.client.read.shortcircuit.streams.cache.size" : "4096",
  1781. "dfs.permissions.enabled" : "true",
  1782. "dfs.namenode.secondary.http-address" : "%HOSTGROUP::host_group_1%:50090",
  1783. "dfs.namenode.checkpoint.edits.dir" : "${dfs.namenode.checkpoint.dir}",
  1784. "dfs.journalnode.http-address" : "0.0.0.0:8480",
  1785. "dfs.datanode.balance.bandwidthPerSec" : "6250000",
  1786. "dfs.namenode.http-address" : "%HOSTGROUP::host_group_1%:50070",
  1787. "dfs.permissions.superusergroup" : "hdfs",
  1788. "dfs.namenode.rpc-address" : "%HOSTGROUP::host_group_1%:8020",
  1789. "dfs.namenode.fslock.fair" : "false",
  1790. "dfs.datanode.http.address" : "0.0.0.0:50075",
  1791. "dfs.namenode.avoid.write.stale.datanode" : "true",
  1792. "dfs.datanode.address" : "0.0.0.0:50010",
  1793. "dfs.namenode.startup.delay.block.deletion.sec" : "3600",
  1794. "dfs.datanode.data.dir.perm" : "750",
  1795. "dfs.nfs3.dump.dir" : "/tmp/.hdfs-nfs",
  1796. "dfs.http.policy" : "HTTP_ONLY",
  1797. "nfs.file.dump.dir" : "/tmp/.hdfs-nfs",
  1798. "dfs.heartbeat.interval" : "3",
  1799. "dfs.cluster.administrators" : " hdfs",
  1800. "dfs.namenode.name.dir" : "/hadoop/hdfs/namenode",
  1801. "dfs.hosts.exclude" : "/etc/hadoop/conf/dfs.exclude",
  1802. "dfs.permissions" : "true",
  1803. "dfs.webhdfs.enabled" : "true",
  1804. "dfs.blocksize" : "134217728",
  1805. "fs.permissions.umask-mode" : "022",
  1806. "dfs.namenode.safemode.threshold-pct" : "0.999",
  1807. "dfs.datanode.ipc.address" : "0.0.0.0:8010",
  1808. "dfs.client.retry.policy.enabled" : "true",
  1809. "dfs.blockreport.initialDelay" : "120",
  1810. "dfs.namenode.inode.attributes.provider.class" : "org.apache.ranger.authorization.hadoop.RangerHdfsAuthorizer",
  1811. "dfs.https.port" : "50470",
  1812. "dfs.client.read.shortcircuit" : "true",
  1813. "dfs.replication.max" : "50",
  1814. "dfs.namenode.accesstime.precision" : "3600000"
  1815. }
  1816. }
  1817. },
  1818. {
  1819. "ranger-storm-audit" : {
  1820. "properties_attributes" : { },
  1821. "properties" : {
  1822. "xasecure.audit.destination.solr.batch.filespool.dir" : "/var/log/storm/audit/solr/spool",
  1823. "xasecure.audit.destination.hdfs.dir" : "hdfs://NAMENODE_HOSTNAME:8020/ranger/audit",
  1824. "xasecure.audit.destination.solr.urls" : "{{ranger_audit_solr_urls}}",
  1825. "xasecure.audit.destination.hdfs.batch.filespool.dir" : "/var/log/storm/audit/hdfs/spool",
  1826. "xasecure.audit.is.enabled" : "true",
  1827. "xasecure.audit.destination.hdfs" : "true",
  1828. "xasecure.audit.credential.provider.file" : "jceks://file{{credential_file}}",
  1829. "xasecure.audit.provider.summary.enabled" : "false",
  1830. "xasecure.audit.destination.db.jdbc.driver" : "{{jdbc_driver}}",
  1831. "xasecure.audit.destination.db.batch.filespool.dir" : "/var/log/storm/audit/db/spool",
  1832. "xasecure.audit.destination.solr" : "false",
  1833. "xasecure.audit.destination.db.user" : "{{xa_audit_db_user}}",
  1834. "xasecure.audit.destination.db" : "false",
  1835. "xasecure.audit.destination.db.jdbc.url" : "{{audit_jdbc_url}}",
  1836. "xasecure.audit.destination.solr.zookeepers" : "none"
  1837. }
  1838. }
  1839. },
  1840. {
  1841. "ranger-kafka-policymgr-ssl" : {
  1842. "properties_attributes" : { },
  1843. "properties" : {
  1844. "xasecure.policymgr.clientssl.truststore.credential.file" : "jceks://file/{{credential_file}}",
  1845. "xasecure.policymgr.clientssl.keystore.credential.file" : "jceks://file/{{credential_file}}",
  1846. "xasecure.policymgr.clientssl.truststore" : "/usr/hdp/current/kafka-broker/config/ranger-plugin-truststore.jks",
  1847. "xasecure.policymgr.clientssl.keystore" : "/usr/hdp/current/kafka-broker/config/ranger-plugin-keystore.jks"
  1848. }
  1849. }
  1850. },
  1851. {
  1852. "oozie-site" : {
  1853. "properties_attributes" : { },
  1854. "properties" : {
  1855. "oozie.service.ProxyUserService.proxyuser.hue.hosts" : "*",
  1856. "oozie.service.AuthorizationService.security.enabled" : "true",
  1857. "oozie.service.AuthorizationService.authorization.enabled" : "false",
  1858. "oozie.service.ProxyUserService.proxyuser.falcon.groups" : "*",
  1859. "oozie.authentication.simple.anonymous.allowed" : "true",
  1860. "oozie.service.HadoopAccessorService.kerberos.enabled" : "false",
  1861. "oozie.db.schema.name" : "oozie",
  1862. "oozie.authentication.type" : "simple",
  1863. "oozie.services.ext" : "\n org.apache.oozie.service.JMSAccessorService,org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService\n ",
  1864. "oozie.service.JPAService.jdbc.driver" : "org.apache.derby.jdbc.EmbeddedDriver",
  1865. "oozie.service.ProxyUserService.proxyuser.falcon.hosts" : "*",
  1866. "oozie.credentials.credentialclasses" : "hcat=org.apache.oozie.action.hadoop.HCatCredentials,hive2=org.apache.oozie.action.hadoop.Hive2Credentials",
  1867. "oozie.service.ProxyUserService.proxyuser.hue.groups" : "*",
  1868. "oozie.base.url" : "http://%HOSTGROUP::host_group_1%:11000/oozie",
  1869. "oozie.service.JPAService.jdbc.username" : "oozie",
  1870. "oozie.service.HadoopAccessorService.hadoop.configurations" : "*=/etc/hadoop/conf",
  1871. "oozie.authentication.kerberos.name.rules" : "\n ",
  1872. "oozie.service.URIHandlerService.uri.handlers" : "org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler\n "
  1873. }
  1874. }
  1875. },
  1876. {
  1877. "zookeeper-log4j" : {
  1878. "properties_attributes" : { },
  1879. "properties" : {
  1880. "content" : "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE\n\n# Example with rolling log file\n#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n# Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n# Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j's NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n\n "
  1881. }
  1882. }
  1883. },
  1884. {
  1885. "oozie-log4j" : {
  1886. "properties_attributes" : { },
  1887. "properties" : {
  1888. "content" : "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License. See accompanying LICENSE file.\n#\n\n# If the Java System property 'oozie.log.dir' is not defined at Oozie start up time\n# XLogService sets its value to '${oozie.home}/logs'\n\nlog4j.appender.oozie=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.oozie.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.oozie.File=${oozie.log.dir}/oozie.log\nlog4j.appender.oozie.Append=true\nlog4j.appender.oozie.layout=org.apache.log4j.PatternLayout\nlog4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - SERVER[${oozie.instance.id}] %m%n\n\nlog4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.oozieops.DatePattern='.'yyyy-MM-dd\nlog4j.appender.oozieops.File=${oozie.log.dir}/oozie-ops.log\nlog4j.appender.oozieops.Append=true\nlog4j.appender.oozieops.layout=org.apache.log4j.PatternLayout\nlog4j.appender.oozieops.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n\n\nlog4j.appender.oozieinstrumentation=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.oozieinstrumentation.DatePattern='.'yyyy-MM-dd\nlog4j.appender.oozieinstrumentation.File=${oozie.log.dir}/oozie-instrumentation.log\nlog4j.appender.oozieinstrumentation.Append=true\nlog4j.appender.oozieinstrumentation.layout=org.apache.log4j.PatternLayout\nlog4j.appender.oozieinstrumentation.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n\n\nlog4j.appender.oozieaudit=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.oozieaudit.DatePattern='.'yyyy-MM-dd\nlog4j.appender.oozieaudit.File=${oozie.log.dir}/oozie-audit.log\nlog4j.appender.oozieaudit.Append=true\nlog4j.appender.oozieaudit.layout=org.apache.log4j.PatternLayout\nlog4j.appender.oozieaudit.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n\n\nlog4j.appender.openjpa=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.openjpa.DatePattern='.'yyyy-MM-dd\nlog4j.appender.openjpa.File=${oozie.log.dir}/oozie-jpa.log\nlog4j.appender.openjpa.Append=true\nlog4j.appender.openjpa.layout=org.apache.log4j.PatternLayout\nlog4j.appender.openjpa.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n\n\nlog4j.logger.openjpa=INFO, openjpa\nlog4j.logger.oozieops=INFO, oozieops\nlog4j.logger.oozieinstrumentation=ALL, oozieinstrumentation\nlog4j.logger.oozieaudit=ALL, oozieaudit\nlog4j.logger.org.apache.oozie=INFO, oozie\nlog4j.logger.org.apache.hadoop=WARN, oozie\nlog4j.logger.org.mortbay=WARN, oozie\nlog4j.logger.org.hsqldb=WARN, oozie\nlog4j.logger.org.apache.hadoop.security.authentication.server=INFO, oozie\n "
  1889. }
  1890. }
  1891. },
  1892. {
  1893. "ranger-hbase-policymgr-ssl" : {
  1894. "properties_attributes" : { },
  1895. "properties" : {
  1896. "xasecure.policymgr.clientssl.truststore.credential.file" : "jceks://file{{credential_file}}",
  1897. "xasecure.policymgr.clientssl.keystore.credential.file" : "jceks://file{{credential_file}}",
  1898. "xasecure.policymgr.clientssl.truststore" : "/usr/hdp/current/hbase-client/conf/ranger-plugin-truststore.jks",
  1899. "xasecure.policymgr.clientssl.keystore" : "/usr/hdp/current/hbase-client/conf/ranger-plugin-keystore.jks"
  1900. }
  1901. }
  1902. },
  1903. {
  1904. "storm-site" : {
  1905. "properties_attributes" : { },
  1906. "properties" : {
  1907. "drpc.worker.threads" : "64",
  1908. "topology.state.synchronization.timeout.secs" : "60",
  1909. "topology.executor.send.buffer.size" : "1024",
  1910. "topology.worker.childopts" : "null",
  1911. "topology.min.replication.count.default" : "1",
  1912. "storm.messaging.netty.max_retries" : "30",
  1913. "topology.receiver.buffer.size" : "8",
  1914. "topology.max.spout.pending" : "null",
  1915. "storm.log.dir" : "{{log_dir}}",
  1916. "storm.zookeeper.retry.intervalceiling.millis" : "30000",
  1917. "zmq.hwm" : "0",
  1918. "topology.message.timeout.secs" : "30",
  1919. "topology.max.task.parallelism" : "null",
  1920. "topology.spout.wait.strategy" : "backtype.storm.spout.SleepSpoutWaitStrategy",
  1921. "supervisor.slots.ports" : "[6700, 6701]",
  1922. "storm.zookeeper.servers" : "['%HOSTGROUP::host_group_1%']",
  1923. "transactional.zookeeper.servers" : "null",
  1924. "storm.cluster.mode" : "distributed",
  1925. "ui.childopts" : "-Xmx220m",
  1926. "topology.disruptor.wait.strategy" : "com.lmax.disruptor.BlockingWaitStrategy",
  1927. "topology.max.replication.wait.time.sec.default" : "60",
  1928. "topology.enable.message.timeouts" : "true",
  1929. "logviewer.appender.name" : "A1",
  1930. "storm.local.mode.zmq" : "false",
  1931. "drpc.request.timeout.secs" : "600",
  1932. "storm.zookeeper.root" : "/storm",
  1933. "storm.thrift.transport" : "{{storm_thrift_transport}}",
  1934. "topology.debug" : "false",
  1935. "nimbus.thrift.port" : "6627",
  1936. "topology.sleep.spout.wait.strategy.time.ms" : "1",
  1937. "storm.zookeeper.connection.timeout" : "15000",
  1938. "nimbus.reassign" : "true",
  1939. "supervisor.heartbeat.frequency.secs" : "5",
  1940. "zmq.threads" : "1",
  1941. "topology.worker.shared.thread.pool.size" : "4",
  1942. "topology.acker.executors" : "null",
  1943. "dev.zookeeper.path" : "/tmp/dev-storm-zookeeper",
  1944. "transactional.zookeeper.root" : "/transactional",
  1945. "topology.max.replication.wait.time.sec" : "{{actual_topology_max_replication_wait_time_sec}}",
  1946. "worker.heartbeat.frequency.secs" : "1",
  1947. "storm.local.dir" : "/hadoop/storm",
  1948. "nimbus.thrift.max_buffer_size" : "1048576",
  1949. "nimbus.supervisor.timeout.secs" : "60",
  1950. "task.refresh.poll.secs" : "10",
  1951. "supervisor.worker.timeout.secs" : "30",
  1952. "topology.tick.tuple.freq.secs" : "null",
  1953. "storm.messaging.netty.client_worker_threads" : "1",
  1954. "nimbus.file.copy.expiration.secs" : "600",
  1955. "nimbus.task.timeout.secs" : "30",
  1956. "topology.builtin.metrics.bucket.size.secs" : "60",
  1957. "storm.messaging.transport" : "backtype.storm.messaging.netty.Context",
  1958. "topology.transfer.buffer.size" : "1024",
  1959. "drpc.childopts" : "-Xmx220m",
  1960. "topology.workers" : "1",
  1961. "transactional.zookeeper.port" : "null",
  1962. "supervisor.monitor.frequency.secs" : "3",
  1963. "storm.messaging.netty.server_worker_threads" : "1",
  1964. "topology.max.error.report.per.interval" : "5",
  1965. "storm.messaging.netty.buffer_size" : "5242880",
  1966. "storm.messaging.netty.min_wait_ms" : "100",
  1967. "nimbus.childopts" : "-Xmx220m -javaagent:/usr/hdp/current/storm-client/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=%HOSTGROUP::host_group_1%,port=8649,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-client/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM",
  1968. "topology.skip.missing.kryo.registrations" : "false",
  1969. "topology.trident.batch.emit.interval.millis" : "500",
  1970. "topology.min.replication.count" : "{{actual_topology_min_replication_count}}",
  1971. "_storm.thrift.nonsecure.transport" : "backtype.storm.security.auth.SimpleTransportPlugin",
  1972. "topology.stats.sample.rate" : "0.05",
  1973. "storm.zookeeper.port" : "2181",
  1974. "supervisor.worker.start.timeout.secs" : "120",
  1975. "nimbus.cleanup.inbox.freq.secs" : "600",
  1976. "topology.kryo.factory" : "backtype.storm.serialization.DefaultKryoFactory",
  1977. "ui.filter" : "null",
  1978. "task.heartbeat.frequency.secs" : "3",
  1979. "topology.fall.back.on.java.serialization" : "true",
  1980. "logviewer.port" : "8005",
  1981. "ui.port" : "8744",
  1982. "storm.zookeeper.retry.times" : "5",
  1983. "topology.tuple.serializer" : "backtype.storm.serialization.types.ListDelegateSerializer",
  1984. "storm.messaging.netty.max_wait_ms" : "1000",
  1985. "topology.error.throttle.interval.secs" : "10",
  1986. "topology.optimize" : "true",
  1987. "nimbus.task.launch.secs" : "120",
  1988. "drpc.port" : "3772",
  1989. "storm.zookeeper.session.timeout" : "20000",
  1990. "drpc.invocations.port" : "3773",
  1991. "logviewer.childopts" : "-Xmx128m _JAAS_PLACEHOLDER",
  1992. "java.library.path" : "/usr/local/lib:/opt/local/lib:/usr/lib:/usr/hdp/current/storm-client/lib",
  1993. "zmq.linger.millis" : "5000",
  1994. "nimbus.topology.validator" : "backtype.storm.nimbus.DefaultTopologyValidator",
  1995. "nimbus.monitor.freq.secs" : "10",
  1996. "_storm.min.ruid" : "null",
  1997. "topology.executor.receive.buffer.size" : "1024",
  1998. "storm.zookeeper.retry.interval" : "1000",
  1999. "_storm.thrift.secure.transport" : "backtype.storm.security.auth.kerberos.KerberosSaslTransportPlugin",
  2000. "nimbus.inbox.jar.expiration.secs" : "3600",
  2001. "drpc.queue.size" : "128",
  2002. "nimbus.seeds" : "[%HOSTGROUP::host_group_1%]"
  2003. }
  2004. }
  2005. },
  2006. {
  2007. "webhcat-env" : {
  2008. "properties_attributes" : { },
  2009. "properties" : {
  2010. "content" : "\n# The file containing the running pid\nPID_FILE={{webhcat_pid_file}}\n\nTEMPLETON_LOG_DIR={{templeton_log_dir}}/\n\n\nWEBHCAT_LOG_DIR={{templeton_log_dir}}/\n\n# The console error log\nERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log\n\n# The console log\nCONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log\n\n#TEMPLETON_JAR=templeton_jar_name\n\n#HADOOP_PREFIX=hadoop_prefix\n\n#HCAT_PREFIX=hive_prefix\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n "
  2011. }
  2012. }
  2013. },
  2014. {
  2015. "webhcat-site" : {
  2016. "properties_attributes" : { },
  2017. "properties" : {
  2018. "webhcat.proxyuser.hue.hosts" : "*",
  2019. "templeton.storage.class" : "org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage",
  2020. "templeton.jar" : "/usr/hdp/current/hive-webhcat/share/webhcat/svr/lib/hive-webhcat-*.jar",
  2021. "templeton.sqoop.path" : "sqoop.tar.gz/sqoop/bin/sqoop",
  2022. "templeton.streaming.jar" : "hdfs:///hdp/apps/${hdp.version}/mapreduce/hadoop-streaming.jar",
  2023. "templeton.override.enabled" : "false",
  2024. "templeton.hive.home" : "hive.tar.gz/hive",
  2025. "webhcat.proxyuser.hue.groups" : "*",
  2026. "templeton.hive.extra.files" : "/etc/tez/conf/tez-site.xml,/usr/hdp/current/tez-client,/usr/hdp/current/tez-client/lib",
  2027. "templeton.zookeeper.hosts" : "%HOSTGROUP::host_group_1%:2181",
  2028. "templeton.hive.path" : "hive.tar.gz/hive/bin/hive",
  2029. "templeton.hadoop.conf.dir" : "/etc/hadoop/conf",
  2030. "webhcat.proxyuser.hcat.hosts" : "*",
  2031. "templeton.sqoop.archive" : "hdfs:///hdp/apps/${hdp.version}/sqoop/sqoop.tar.gz",
  2032. "templeton.hcat" : "/usr/hdp/current/hive-client/bin/hcat",
  2033. "webhcat.proxyuser.hcat.groups" : "*",
  2034. "templeton.pig.path" : "pig.tar.gz/pig/bin/pig",
  2035. "templeton.hive.properties" : "hive.metastore.local=false,hive.metastore.uris=thrift://%HOSTGROUP::host_group_1%:9083,hive.metastore.sasl.enabled=false,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse",
  2036. "templeton.exec.timeout" : "60000",
  2037. "templeton.hive.archive" : "hdfs:///hdp/apps/${hdp.version}/hive/hive.tar.gz",
  2038. "templeton.port" : "50111",
  2039. "templeton.sqoop.home" : "sqoop.tar.gz/sqoop",
  2040. "templeton.libjars" : "/usr/hdp/current/zookeeper-client/zookeeper.jar,/usr/hdp/current/hive-server2/lib/hive-common.jar",
  2041. "templeton.hadoop" : "/usr/hdp/current/hadoop-client/bin/hadoop",
  2042. "templeton.python" : "${env.PYTHON_CMD}",
  2043. "templeton.pig.archive" : "hdfs:///hdp/apps/${hdp.version}/pig/pig.tar.gz",
  2044. "templeton.hcat.home" : "hive.tar.gz/hive/hcatalog"
  2045. }
  2046. }
  2047. },
  2048. {
  2049. "ranger-hive-audit" : {
  2050. "properties_attributes" : { },
  2051. "properties" : {
  2052. "xasecure.audit.destination.solr.batch.filespool.dir" : "/var/log/hive/audit/solr/spool",
  2053. "xasecure.audit.destination.hdfs.dir" : "hdfs://NAMENODE_HOSTNAME:8020/ranger/audit",
  2054. "xasecure.audit.destination.solr.urls" : "{{ranger_audit_solr_urls}}",
  2055. "xasecure.audit.destination.hdfs.batch.filespool.dir" : "/var/log/hive/audit/hdfs/spool",
  2056. "xasecure.audit.is.enabled" : "true",
  2057. "xasecure.audit.destination.hdfs" : "true",
  2058. "xasecure.audit.credential.provider.file" : "jceks://file{{credential_file}}",
  2059. "xasecure.audit.provider.summary.enabled" : "false",
  2060. "xasecure.audit.destination.db.jdbc.driver" : "{{jdbc_driver}}",
  2061. "xasecure.audit.destination.db.batch.filespool.dir" : "/var/log/hive/audit/db/spool",
  2062. "xasecure.audit.destination.solr" : "false",
  2063. "xasecure.audit.destination.db.user" : "{{xa_audit_db_user}}",
  2064. "xasecure.audit.destination.db" : "false",
  2065. "xasecure.audit.destination.db.jdbc.url" : "{{audit_jdbc_url}}",
  2066. "xasecure.audit.destination.solr.zookeepers" : "none"
  2067. }
  2068. }
  2069. }
  2070. ],
  2071. "host_groups" : [
  2072. {
  2073. "name" : "host_group_1",
  2074. "configurations" : [ ],
  2075. "components" : [
  2076. {
  2077. "name" : "ATLAS_SERVER"
  2078. },
  2079. {
  2080. "name" : "PIG"
  2081. },
  2082. {
  2083. "name" : "KAFKA_BROKER"
  2084. },
  2085. {
  2086. "name" : "HISTORYSERVER"
  2087. },
  2088. {
  2089. "name" : "HBASE_REGIONSERVER"
  2090. },
  2091. {
  2092. "name" : "OOZIE_CLIENT"
  2093. },
  2094. {
  2095. "name" : "HBASE_CLIENT"
  2096. },
  2097. {
  2098. "name" : "NAMENODE"
  2099. },
  2100. {
  2101. "name" : "SUPERVISOR"
  2102. },
  2103. {
  2104. "name" : "FALCON_SERVER"
  2105. },
  2106. {
  2107. "name" : "KNOX_GATEWAY"
  2108. },
  2109. {
  2110. "name" : "SPARK_JOBHISTORYSERVER"
  2111. },
  2112. {
  2113. "name" : "METRICS_MONITOR"
  2114. },
  2115. {
  2116. "name" : "SPARK_CLIENT"
  2117. },
  2118. {
  2119. "name" : "SLIDER"
  2120. },
  2121. {
  2122. "name" : "AMBARI_SERVER"
  2123. },
  2124. {
  2125. "name" : "APP_TIMELINE_SERVER"
  2126. },
  2127. {
  2128. "name" : "HDFS_CLIENT"
  2129. },
  2130. {
  2131. "name" : "HIVE_CLIENT"
  2132. },
  2133. {
  2134. "name" : "NODEMANAGER"
  2135. },
  2136. {
  2137. "name" : "METRICS_COLLECTOR"
  2138. },
  2139. {
  2140. "name" : "FLUME_HANDLER"
  2141. },
  2142. {
  2143. "name" : "DATANODE"
  2144. },
  2145. {
  2146. "name" : "RESOURCEMANAGER"
  2147. },
  2148. {
  2149. "name" : "WEBHCAT_SERVER"
  2150. },
  2151. {
  2152. "name" : "ZOOKEEPER_CLIENT"
  2153. },
  2154. {
  2155. "name" : "ZOOKEEPER_SERVER"
  2156. },
  2157. {
  2158. "name" : "HBASE_MASTER"
  2159. },
  2160. {
  2161. "name" : "STORM_UI_SERVER"
  2162. },
  2163. {
  2164. "name" : "HIVE_SERVER"
  2165. },
  2166. {
  2167. "name" : "OOZIE_SERVER"
  2168. },
  2169. {
  2170. "name" : "FALCON_CLIENT"
  2171. },
  2172. {
  2173. "name" : "SECONDARY_NAMENODE"
  2174. },
  2175. {
  2176. "name" : "TEZ_CLIENT"
  2177. },
  2178. {
  2179. "name" : "HIVE_METASTORE"
  2180. },
  2181. {
  2182. "name" : "SQOOP"
  2183. },
  2184. {
  2185. "name" : "YARN_CLIENT"
  2186. },
  2187. {
  2188. "name" : "MAPREDUCE2_CLIENT"
  2189. },
  2190. {
  2191. "name" : "MYSQL_SERVER"
  2192. },
  2193. {
  2194. "name" : "NIMBUS"
  2195. },
  2196. {
  2197. "name" : "DRPC_SERVER"
  2198. }
  2199. ],
  2200. "cardinality" : "1"
  2201. }
  2202. ],
  2203. "Blueprints" : {
  2204. "stack_name" : "HDP",
  2205. "stack_version" : "2.3"
  2206. }
  2207. }
Add Comment
Please, Sign In to add comment