Guest User

Untitled

a guest
Oct 12th, 2016
53
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
JSON 213.64 KB | None | 0 0
  1. {
  2.     'sqoop-site': {},
  3.     'ranger-hdfs-audit': {
  4.         'xasecure.audit.destination.solr.zookeepers': 'NONE',
  5.         'xasecure.audit.destination.solr.urls': '',
  6.         'xasecure.audit.destination.solr': 'false',
  7.         'xasecure.audit.destination.hdfs.batch.filespool.dir': '/var/log/hadoop/hdfs/audit/hdfs/spool',
  8.         'xasecure.audit.destination.hdfs': 'true',
  9.         'xasecure.audit.destination.solr.batch.filespool.dir': '/var/log/hadoop/hdfs/audit/solr/spool',
  10.         'xasecure.audit.provider.summary.enabled': 'false',
  11.         'xasecure.audit.destination.hdfs.dir': 'hdfs://NAMENODE_HOSTNAME:8020/ranger/audit',
  12.         'xasecure.audit.is.enabled': 'true'
  13.     },
  14.     'webhcat-log4j': {
  15.         'content': '\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# Define some default values that can be overridden by system properties\nwebhcat.root.logger = INFO, standard\nwebhcat.log.dir = .\nwebhcat.log.file = webhcat.log\n\nlog4j.rootLogger = ${webhcat.root.logger}, ETW, FilterLog, FullPIILogs\n\n# Logging Threshold\nlog4j.threshhold = DEBUG\n\nlog4j.appender.standard  =  org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.standard.File = ${webhcat.log.dir}/${webhcat.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern = .yyyy-MM-dd\n\nlog4j.appender.DRFA.layout = org.apache.log4j.PatternLayout\n\nlog4j.appender.standard.layout = org.apache.log4j.PatternLayout\nlog4j.appender.standard.layout.conversionPattern = %-5p | %d{DATE} | %c | %m%n\n\n# Class logging settings\nlog4j.logger.com.sun.jersey = DEBUG, FullPIILogs\nlog4j.logger.com.sun.jersey.spi.container.servlet.WebComponent = ERROR, FullPIILogs\nlog4j.logger.org.apache.hadoop = INFO, FullPIILogs\nlog4j.logger.org.apache.hadoop.conf = WARN, FullPIILogs\nlog4j.logger.org.apache.zookeeper = WARN\nlog4j.logger.org.eclipse.jetty = INFO, FullPIILogs\n#EtwLog Appender\n\n#sends HDP service logs to customer storage account\nlog4j.appender.ETW=com.microsoft.log4jappender.EtwAppender\nlog4j.appender.ETW.source=HadoopServiceLog\nlog4j.appender.ETW.component=templeton\nlog4j.appender.ETW.layout=org.apache.log4j.TTCCLayout\nlog4j.appender.ETW.OSType=Linux\n\n# Full PII log Appender\n# Sends  PII HDP service logs to our storage account\nlog4j.appender.FullPIILogs=com.microsoft.log4jappender.FullPIILogAppender\nlog4j.appender.FullPIILogs.component=hive\nlog4j.appender.FullPIILogs.layout=org.apache.log4j.TTCCLayout\nlog4j.appender.FullPIILogs.Threshold=INFO\nlog4j.appender.FullPIILogs.source=CentralFullServicePIILogs\nlog4j.appender.FullPIILogs.OSType=Linux\nlog4j.appender.FullPIILogs.SuffixHadoopEntryType=true'
  16.     },
  17.     'ranger-yarn-plugin-properties': {
  18.         'hadoop.rpc.protection': '',
  19.         'ranger-yarn-plugin-enabled': 'No',
  20.         'common.name.for.certificate': '',
  21.         'REPOSITORY_CONFIG_USERNAME': 'yarn',
  22.         'REPOSITORY_CONFIG_PASSWORD': 'yarn',
  23.         'policy_user': 'ambari-qa'
  24.     },
  25.     'zeppelin-config': {
  26.         'zeppelin.notebook.s3.user': 'user',
  27.         'zeppelin.ssl.key.manager.password': 'change me',
  28.         'zeppelin.anonymous.allowed': 'true',
  29.         'zeppelin.ssl.truststore.password': 'change me',
  30.         'zeppelin.interpreters': 'org.apache.zeppelin.spark.SparkInterpreter,org.apache.zeppelin.spark.PySparkInterpreter,org.apache.zeppelin.spark.SparkSqlInterpreter,org.apache.zeppelin.spark.DepInterpreter,org.apache.zeppelin.markdown.Markdown,org.apache.zeppelin.angular.AngularInterpreter,org.apache.zeppelin.shell.ShellInterpreter,org.apache.zeppelin.jdbc.JDBCInterpreter,org.apache.zeppelin.phoenix.PhoenixInterpreter,org.apache.zeppelin.livy.LivySparkInterpreter,org.apache.zeppelin.livy.LivyPySparkInterpreter,org.apache.zeppelin.livy.LivySparkRInterpreter,org.apache.zeppelin.livy.LivySparkSQLInterpreter',
  31.         'zeppelin.ssl.truststore.type': 'JKS',
  32.         'zeppelin.ssl': 'false',
  33.         'zeppelin.ssl.truststore.path': 'conf/truststore',
  34.         'zeppelin.notebook.storage': 'org.apache.zeppelin.notebook.repo.VFSNotebookRepo',
  35.         'zeppelin.notebook.dir': 'notebook',
  36.         'zeppelin.ssl.keystore.password': 'change me',
  37.         'zeppelin.notebook.homescreen.hide': 'false',
  38.         'zeppelin.ssl.keystore.path': 'conf/keystore',
  39.         'zeppelin.websocket.max.text.message.size': '1024000',
  40.         'zeppelin.server.addr': '0.0.0.0',
  41.         'zeppelin.server.port': '9995',
  42.         'zeppelin.server.allowed.origins': '*',
  43.         'zeppelin.interpreter.connect.timeout': '30000',
  44.         'zeppelin.notebook.s3.bucket': 'zeppelin',
  45.         'zeppelin.ssl.client.auth': 'false',
  46.         'zeppelin.notebook.homescreen': ' ',
  47.         'zeppelin.interpreter.dir': 'interpreter',
  48.         'zeppelin.ssl.keystore.type': 'JKS'
  49.     },
  50.     'ranger-hdfs-policymgr-ssl': {
  51.         'xasecure.policymgr.clientssl.keystore': '/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks',
  52.         'xasecure.policymgr.clientssl.truststore': '/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks',
  53.         'xasecure.policymgr.clientssl.truststore.credential.file': 'jceks://file{{credential_file}}',
  54.         'xasecure.policymgr.clientssl.keystore.password': 'myKeyFilePassword',
  55.         'xasecure.policymgr.clientssl.truststore.password': 'changeit',
  56.         'xasecure.policymgr.clientssl.keystore.credential.file': 'jceks://file{{credential_file}}'
  57.     },
  58.     'pig-env': {
  59.         'content': '\nJAVA_HOME={{java64_home}}\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\nif [ -d "/usr/lib/tez" ]; then\n  PIG_OPTS="$PIG_OPTS -Dmapreduce.framework.name=yarn"\nfi'
  60.     },
  61.     'slider-env': {
  62.         'content': '\n# Set Slider-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java64_home}}\n# The hadoop conf directory.  Optional as slider-client.xml can be edited to add properties.\nexport HADOOP_CONF_DIR={{hadoop_conf_dir}}'
  63.     },
  64.     'ranger-hive-policymgr-ssl': {
  65.         'xasecure.policymgr.clientssl.keystore': '/usr/hdp/current/hive-server2/conf/ranger-plugin-keystore.jks',
  66.         'xasecure.policymgr.clientssl.truststore': '/usr/hdp/current/hive-server2/conf/ranger-plugin-truststore.jks',
  67.         'xasecure.policymgr.clientssl.truststore.credential.file': 'jceks://file{{credential_file}}',
  68.         'xasecure.policymgr.clientssl.keystore.password': 'myKeyFilePassword',
  69.         'xasecure.policymgr.clientssl.truststore.password': 'changeit',
  70.         'xasecure.policymgr.clientssl.keystore.credential.file': 'jceks://file{{credential_file}}'
  71.     },
  72.     'hivemetastore-site': {
  73.         'hive.service.metrics.hadoop2.component': 'hivemetastore',
  74.         'hive.metastore.metrics.enabled': 'true',
  75.         'hive.service.metrics.file.location': '/var/log/hive/hivemetastore-report.json',
  76.         'hive.service.metrics.reporter': 'JSON_FILE, JMX, HADOOP2'
  77.     },
  78.     'llap-cli-log4j2': {
  79.         'content': '\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nstatus = WARN\nname = LlapCliLog4j2\npackages = org.apache.hadoop.hive.ql.log\n\n# list of properties\nproperty.hive.log.level = INFO\nproperty.hive.root.logger = console\nproperty.hive.log.dir = ${sys:java.io.tmpdir}/${sys:user.name}\nproperty.hive.log.file = llap-cli.log\n\n# list of all appenders\nappenders = console, DRFA\n\n# console appender\nappender.console.type = Console\nappender.console.name = console\nappender.console.target = SYSTEM_ERR\nappender.console.layout.type = PatternLayout\nappender.console.layout.pattern = %p %c{2}: %m%n\n\n# daily rolling file appender\nappender.DRFA.type = RollingRandomAccessFile\nappender.DRFA.name = DRFA\nappender.DRFA.fileName = ${sys:hive.log.dir}/${sys:hive.log.file}\n# Use %pid in the filePattern to append process-id@host-name to the filename if you want separate log files for different CLI session\nappender.DRFA.filePattern = ${sys:hive.log.dir}/${sys:hive.log.file}.%d{yyyy-MM-dd}\nappender.DRFA.layout.type = PatternLayout\nappender.DRFA.layout.pattern = %d{ISO8601} %5p [%t] %c{2}: %m%n\nappender.DRFA.policies.type = Policies\nappender.DRFA.policies.time.type = TimeBasedTriggeringPolicy\nappender.DRFA.policies.time.interval = 1\nappender.DRFA.policies.time.modulate = true\nappender.DRFA.strategy.type = DefaultRolloverStrategy\nappender.DRFA.strategy.max = 30\n\n# list of all loggers\nloggers = ZooKeeper, DataNucleus, Datastore, JPOX, HadoopConf\n\nlogger.ZooKeeper.name = org.apache.zookeeper\nlogger.ZooKeeper.level = WARN\n\nlogger.DataNucleus.name = DataNucleus\nlogger.DataNucleus.level = ERROR\n\nlogger.Datastore.name = Datastore\nlogger.Datastore.level = ERROR\n\nlogger.JPOX.name = JPOX\nlogger.JPOX.level = ERROR\n\nlogger.HadoopConf.name = org.apache.hadoop.conf.Configuration\nlogger.HadoopConf.level = ERROR\n\n# root logger\nrootLogger.level = ${sys:hive.log.level}\nrootLogger.appenderRefs = root, DRFA\nrootLogger.appenderRef.root.ref = ${sys:hive.root.logger}\nrootLogger.appenderRef.DRFA.ref = DRFA'
  80.     },
  81.     'ranger-hive-security': {
  82.         'ranger.plugin.hive.policy.source.impl': 'org.apache.ranger.admin.client.RangerAdminRESTClient',
  83.         'ranger.plugin.hive.policy.rest.ssl.config.file': '/usr/hdp/current/{{ranger_hive_component}}/conf/conf.server/ranger-policymgr-ssl.xml',
  84.         'xasecure.hive.update.xapolicies.on.grant.revoke': 'true',
  85.         'ranger.plugin.hive.service.name': '{{repo_name}}',
  86.         'ranger.plugin.hive.policy.rest.url': '{{policymgr_mgr_url}}',
  87.         'ranger.plugin.hive.policy.cache.dir': '/etc/ranger/{{repo_name}}/policycache',
  88.         'ranger.plugin.hive.policy.pollIntervalMs': '30000'
  89.     },
  90.     'spark2-metrics-properties': {
  91.         'content': '\n# syntax: [instance].sink|source.[name].[options]=[value]\n\n# This file configures Spark\'s internal metrics system. The metrics system is\n# divided into instances which correspond to internal components.\n# Each instance can be configured to report its metrics to one or more sinks.\n# Accepted values for [instance] are "master", "worker", "executor", "driver",\n# and "applications". A wild card "*" can be used as an instance name, in\n# which case all instances will inherit the supplied property.\n#\n# Within an instance, a "source" specifies a particular set of grouped metrics.\n# there are two kinds of sources:\n# 1. Spark internal sources, like MasterSource, WorkerSource, etc, which will\n# collect a Spark component\'s internal state. Each instance is paired with a\n# Spark source that is added automatically.\n# 2. Common sources, like JvmSource, which will collect low level state.\n# These can be added through configuration options and are then loaded\n# using reflection.\n#\n# A "sink" specifies where metrics are delivered to. Each instance can be\n# assigned one or more sinks.\n#\n# The sink|source field specifies whether the property relates to a sink or\n# source.\n#\n# The [name] field specifies the name of source or sink.\n#\n# The [options] field is the specific property of this source or sink. The\n# source or sink is responsible for parsing this property.\n#\n# Notes:\n# 1. To add a new sink, set the "class" option to a fully qualified class\n# name (see examples below).\n# 2. Some sinks involve a polling period. The minimum allowed polling period\n# is 1 second.\n# 3. Wild card properties can be overridden by more specific properties.\n# For example, master.sink.console.period takes precedence over\n# *.sink.console.period.\n# 4. A metrics specific configuration\n# "spark.metrics.conf=${SPARK_HOME}/conf/metrics.properties" should be\n# added to Java properties using -Dspark.metrics.conf=xxx if you want to\n# customize metrics system. You can also put the file in ${SPARK_HOME}/conf\n# and it will be loaded automatically.\n# 5. MetricsServlet is added by default as a sink in master, worker and client\n# driver, you can send http request "/metrics/json" to get a snapshot of all the\n# registered metrics in json format. For master, requests "/metrics/master/json" and\n# "/metrics/applications/json" can be sent seperately to get metrics snapshot of\n# instance master and applications. MetricsServlet may not be configured by self.\n#\n\n## List of available sinks and their properties.\n\n# org.apache.spark.metrics.sink.ConsoleSink\n# Name: Default: Description:\n# period 10 Poll period\n# unit seconds Units of poll period\n\n# org.apache.spark.metrics.sink.CSVSink\n# Name: Default: Description:\n# period 10 Poll period\n# unit seconds Units of poll period\n# directory /tmp Where to store CSV files\n\n# org.apache.spark.metrics.sink.GangliaSink\n# Name: Default: Description:\n# host NONE Hostname or multicast group of Ganglia server\n# port NONE Port of Ganglia server(s)\n# period 10 Poll period\n# unit seconds Units of poll period\n# ttl 1 TTL of messages sent by Ganglia\n# mode multicast Ganglia network mode (\'unicast\' or \'multicast\')\n\n# org.apache.spark.metrics.sink.JmxSink\n\n# org.apache.spark.metrics.sink.MetricsServlet\n# Name: Default: Description:\n# path VARIES* Path prefix from the web server root\n# sample false Whether to show entire set of samples for histograms (\'false\' or \'true\')\n#\n# * Default path is /metrics/json for all instances except the master. The master has two paths:\n# /metrics/aplications/json # App information\n# /metrics/master/json # Master information\n\n# org.apache.spark.metrics.sink.GraphiteSink\n# Name: Default: Description:\n# host NONE Hostname of Graphite server\n# port NONE Port of Graphite server\n# period 10 Poll period\n# unit seconds Units of poll period\n# prefix EMPTY STRING Prefix to prepend to metric name\n\n## Examples\n# Enable JmxSink for all instances by class name\n#*.sink.jmx.class=org.apache.spark.metrics.sink.JmxSink\n\n# Enable ConsoleSink for all instances by class name\n#*.sink.console.class=org.apache.spark.metrics.sink.ConsoleSink\n\n# Polling period for ConsoleSink\n#*.sink.console.period=10\n\n#*.sink.console.unit=seconds\n\n# Master instance overlap polling period\n#master.sink.console.period=15\n\n#master.sink.console.unit=seconds\n\n# Enable CsvSink for all instances\n#*.sink.csv.class=org.apache.spark.metrics.sink.CsvSink\n\n# Polling period for CsvSink\n#*.sink.csv.period=1\n\n#*.sink.csv.unit=minutes\n\n# Polling directory for CsvSink\n#*.sink.csv.directory=/tmp/\n\n# Worker instance overlap polling period\n#worker.sink.csv.period=10\n\n#worker.sink.csv.unit=minutes\n\n# Enable jvm source for instance master, worker, driver and executor\n#master.source.jvm.class=org.apache.spark.metrics.source.JvmSource\n\n#worker.source.jvm.class=org.apache.spark.metrics.source.JvmSource\n\n#driver.source.jvm.class=org.apache.spark.metrics.source.JvmSource\n\n#executor.source.jvm.class=org.apache.spark.metrics.source.JvmSource'
  92.     },
  93.     'ams-hbase-security-site': {
  94.         'hbase.zookeeper.property.kerberos.removeHostFromPrincipal': '',
  95.         'hadoop.security.authentication': '',
  96.         'hbase.regionserver.keytab.file': '',
  97.         'ams.zookeeper.principal': '',
  98.         'hbase.regionserver.kerberos.principal': '',
  99.         'hbase.security.authentication': '',
  100.         'hbase.myclient.principal': '',
  101.         'hbase.myclient.keytab': '',
  102.         'ams.zookeeper.keytab': '',
  103.         'hbase.security.authorization': '',
  104.         'hbase.master.kerberos.principal': '',
  105.         'hbase.coprocessor.region.classes': '',
  106.         'hbase.zookeeper.property.authProvider.1': '',
  107.         'hbase.zookeeper.property.kerberos.removeRealmFromPrincipal': '',
  108.         'hbase.master.keytab.file': '',
  109.         'hbase.coprocessor.master.classes': '',
  110.         'hbase.zookeeper.property.jaasLoginRenew': ''
  111.     },
  112.     'oozie-env': {
  113.         'oozie_log_dir': '/var/log/oozie',
  114.         'oozie_user_nproc_limit': '16000',
  115.         'oozie_existing_mssql_server_host': 'df6ulzeuce.database.windows.net',
  116.         'oozie_admin_users': '{oozie_user}, oozie-admin',
  117.         'oozie_user_nofile_limit': '32000',
  118.         'oozie_database': 'Existing MSSQL Server database with SQL authentication',
  119.         'oozie_hostname': 'df6ulzeuce.database.windows.net',
  120.         'oozie_heapsize': '2048m',
  121.         'oozie_admin_port': '11001',
  122.         'oozie_tmp_dir': '/var/tmp/oozie',
  123.         'oozie_database_type': 'mssql',
  124.         'oozie_pid_dir': '/var/run/oozie',
  125.         'content': '\n#!/bin/bash\n\nif [ -d "/usr/lib/bigtop-tomcat" ]; then\n  export OOZIE_CONFIG=${OOZIE_CONFIG:-/etc/oozie/conf}\n  export CATALINA_BASE=${CATALINA_BASE:-{{oozie_server_dir}}}\n  export CATALINA_TMPDIR=${CATALINA_TMPDIR:-/var/tmp/oozie}\n  export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat\nfi\n\n#Set JAVA HOME\nexport JAVA_HOME={{java_home}}\n\nexport JRE_HOME=${JAVA_HOME}\n\n# Set Oozie specific environment variables here.\n\n# Settings for the Embedded Tomcat that runs Oozie\n# Java System properties for Oozie should be specified in this variable\n#\n# export CATALINA_OPTS=\n\n# Oozie configuration file to load from Oozie configuration directory\n#\n# export OOZIE_CONFIG_FILE=oozie-site.xml\n\n# Oozie logs directory\n#\nexport OOZIE_LOG={{oozie_log_dir}}\n\n# Oozie pid directory\n#\nexport CATALINA_PID={{pid_file}}\n\n#Location of the data for oozie\nexport OOZIE_DATA={{oozie_data_dir}}\n\n# Oozie Log4J configuration file to load from Oozie configuration directory\n#\n# export OOZIE_LOG4J_FILE=oozie-log4j.properties\n\n# Reload interval of the Log4J configuration file, in seconds\n#\n# export OOZIE_LOG4J_RELOAD=10\n\n# The port Oozie server runs\n#\nexport OOZIE_HTTP_PORT={{oozie_server_port}}\n\n# The admin port Oozie server runs\n#\nexport OOZIE_ADMIN_PORT={{oozie_server_admin_port}}\n\n# The host name Oozie server runs on\n#\n# export OOZIE_HTTP_HOSTNAME=`hostname -f`\n\n# The base URL for callback URLs to Oozie\n#\n# export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie"\nexport JAVA_LIBRARY_PATH={{hadoop_lib_home}}/native/Linux-amd64-64\n\n# At least 1 minute of retry time to account for server downtime during\n# upgrade/downgrade\nexport OOZIE_CLIENT_OPTS="${OOZIE_CLIENT_OPTS} -Doozie.connection.retry.count=5 "\n\n# This is needed so that Oozie does not run into OOM or GC Overhead limit\n# exceeded exceptions. If the oozie server is handling large number of\n# workflows/coordinator jobs, the memory settings may need to be revised\nexport CATALINA_OPTS="${CATALINA_OPTS} -Xmx2048m -XX:MaxPermSize=256m -Dwhitelist.filename=core-whitelist.res,oozie-whitelist.res -Dcomponent=oozie"',
  126.         'oozie_user': 'oozie',
  127.         'oozie_data_dir': '/hadoop/oozie/data',
  128.         'oozie_existing_mssql_server_database': 'ambhviewlaf25094bc5a26ffe42769ae339b3f402be29ooziemetastore',
  129.         'oozie_permsize': '256m'
  130.     },
  131.     'mapred-site': {
  132.         'yarn.app.mapreduce.am.log.level': 'INFO',
  133.         'mapreduce.jobhistory.address': 'hn0-094bc5.ambhviewl-af25-ssh.m8.internal.cloudapp.net:10020',
  134.         'mapreduce.cluster.administrators': ' hadoop',
  135.         'mapreduce.reduce.input.buffer.percent': '0.0',
  136.         'mapreduce.output.fileoutputformat.compress': 'false',
  137.         'mapreduce.framework.name': 'yarn',
  138.         'mapreduce.map.speculative': 'false',
  139.         'mapreduce.reduce.shuffle.merge.percent': '0.66',
  140.         'mapreduce.admin.user.env': 'LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64',
  141.         'mapreduce.fileoutputcommitter.algorithm.version': '2',
  142.         'yarn.app.mapreduce.client.job.max-retries': '30',
  143.         'mapreduce.map.java.opts': '-Xmx768M -Xms768M -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC',
  144.         'mapreduce.jobhistory.recovery.store.leveldb.path': '/hadoop/mapreduce/jhs',
  145.         'mapreduce.application.classpath': '$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:$PWD/mr-framework/hadoop/share/hadoop/tools/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure',
  146.         'mapreduce.job.reduce.slowstart.completedmaps': '0.05',
  147.         'mapreduce.jobhistory.intermediate-done-dir': '/mr-history/tmp',
  148.         'mapreduce.output.fileoutputformat.compress.type': 'BLOCK',
  149.         'mapreduce.reduce.speculative': 'false',
  150.         'mapreduce.reduce.java.opts': '-Xmx1536M -Xms1536M -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC',
  151.         'mapreduce.am.max-attempts': '2',
  152.         'yarn.app.mapreduce.am.admin-command-opts': '-Dhdp.version=${hdp.version}',
  153.         'mapreduce.reduce.log.level': 'INFO',
  154.         'mapreduce.map.sort.spill.percent': '0.7',
  155.         'mapreduce.job.emit-timeline-data': 'false',
  156.         'mapreduce.task.io.sort.mb': '409',
  157.         'mapreduce.task.timeout': '300000',
  158.         'mapreduce.map.memory.mb': '1024',
  159.         'mapreduce.task.io.sort.factor': '100',
  160.         'mapreduce.jobhistory.http.policy': 'HTTP_ONLY',
  161.         'mapreduce.reduce.memory.mb': '2048',
  162.         'mapreduce.jobhistory.recovery.enable': 'true',
  163.         'mapreduce.job.queuename': 'default',
  164.         'mapreduce.map.log.level': 'INFO',
  165.         'mapreduce.shuffle.port': '13562',
  166.         'mapreduce.job.counters.max': '130',
  167.         'mapreduce.reduce.shuffle.fetch.retry.timeout-ms': '30000',
  168.         'mapreduce.shuffle.transferTo.allowed': 'true',
  169.         'mapreduce.jobhistory.recovery.store.class': 'org.apache.hadoop.mapreduce.v2.hs.HistoryServerLeveldbStateStoreService',
  170.         'mapreduce.map.output.compress': 'false',
  171.         'yarn.app.mapreduce.am.staging-dir': '/user',
  172.         'mapreduce.reduce.shuffle.parallelcopies': '30',
  173.         'mapreduce.reduce.shuffle.input.buffer.percent': '0.7',
  174.         'mapreduce.jobhistory.webapp.address': 'hn0-094bc5.ambhviewl-af25-ssh.m8.internal.cloudapp.net:19888',
  175.         'mapreduce.jobhistory.done-dir': '/mr-history/done',
  176.         'mapreduce.cluster.local.dir': '/mnt/resource/hadoop/mapred/local',
  177.         'mapreduce.admin.reduce.child.java.opts': '-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}',
  178.         'mapreduce.reduce.shuffle.fetch.retry.enabled': '1',
  179.         'mapreduce.application.framework.path': '/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework',
  180.         'yarn.app.mapreduce.am.command-opts': '-Xmx768M -Xms768M -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC',
  181.         'mapreduce.reduce.shuffle.fetch.retry.interval-ms': '1000',
  182.         'mapreduce.jobhistory.bind-host': '0.0.0.0',
  183.         'mapreduce.admin.map.child.java.opts': '-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}',
  184.         'yarn.app.mapreduce.am.resource.mb': '1024'
  185.     },
  186.     'hdfs-site': {
  187.         'dfs.namenode.datanode.registration.ip-hostname-check': 'false',
  188.         'dfs.namenode.checkpoint.period': '21600',
  189.         'dfs.namenode.avoid.write.stale.datanode': 'true',
  190.         'dfs.namenode.startup.delay.block.deletion.sec': '3600',
  191.         'dfs.namenode.checkpoint.txns': '1000000',
  192.         'dfs.content-summary.limit': '5000',
  193.         'dfs.support.append': 'true',
  194.         'dfs.datanode.address': '0.0.0.0:30010',
  195.         'dfs.cluster.administrators': ' hdfs',
  196.         'dfs.namenode.audit.log.async': 'true',
  197.         'dfs.datanode.balance.bandwidthPerSec': '6250000',
  198.         'dfs.namenode.safemode.threshold-pct': '0.999',
  199.         'dfs.namenode.checkpoint.edits.dir': '${dfs.namenode.checkpoint.dir}',
  200.         'dfs.namenode.rpc-address': 'hn0-094bc5.ambhviewl-af25-ssh.m8.internal.cloudapp.net:8020',
  201.         'dfs.permissions.enabled': 'false',
  202.         'dfs.client.read.shortcircuit': 'true',
  203.         'dfs.https.port': '50470',
  204.         'dfs.namenode.https-address': 'hn0-094bc5.ambhviewl-af25-ssh.m8.internal.cloudapp.net:30470',
  205.         'nfs.file.dump.dir': '/tmp/.hdfs-nfs',
  206.         'dfs.blocksize': '134217728',
  207.         'dfs.blockreport.initialDelay': '120',
  208.         'dfs.journalnode.edits.dir': '/hadoop/hdfs/journalnode',
  209.         'dfs.namenode.fslock.fair': 'false',
  210.         'dfs.datanode.max.transfer.threads': '1024',
  211.         'dfs.heartbeat.interval': '3',
  212.         'dfs.webhdfs.enabled': 'false',
  213.         'dfs.namenode.handler.count': '100',
  214.         'dfs.namenode.checkpoint.dir': '/hadoop/hdfs/namesecondary',
  215.         'fs.permissions.umask-mode': '022',
  216.         'dfs.namenode.stale.datanode.interval': '30000',
  217.         'dfs.datanode.ipc.address': '0.0.0.0:30020',
  218.         'dfs.datanode.failed.volumes.tolerated': '0',
  219.         'dfs.datanode.data.dir': '/mnt/resource/hadoop/hdfs/data',
  220.         'dfs.namenode.http-address': 'hn0-094bc5.ambhviewl-af25-ssh.m8.internal.cloudapp.net:30070',
  221.         'dfs.replication': '3',
  222.         'dfs.encrypt.data.transfer.cipher.suites': 'AES/CTR/NoPadding',
  223.         'dfs.namenode.accesstime.precision': '0',
  224.         'dfs.datanode.https.address': '0.0.0.0:30475',
  225.         'dfs.namenode.write.stale.datanode.ratio': '1.0f',
  226.         'dfs.namenode.secondary.http-address': 'hn0-094bc5.ambhviewl-af25-ssh.m8.internal.cloudapp.net:30090',
  227.         'nfs.exports.allowed.hosts': '* rw',
  228.         'dfs.datanode.http.address': '0.0.0.0:30075',
  229.         'dfs.datanode.du.reserved': '1073741824',
  230.         'dfs.client.read.shortcircuit.streams.cache.size': '4096',
  231.         'dfs.http.policy': 'HTTP_ONLY',
  232.         'dfs.block.access.token.enable': 'true',
  233.         'dfs.client.retry.policy.enabled': 'false',
  234.         'dfs.namenode.name.dir.restore': 'true',
  235.         'dfs.permissions.superusergroup': 'hdfs',
  236.         'dfs.journalnode.https-address': '0.0.0.0:8481',
  237.         'dfs.journalnode.http-address': '0.0.0.0:8480',
  238.         'dfs.domain.socket.path': '/var/lib/hadoop-hdfs/dn_socket',
  239.         'dfs.namenode.avoid.read.stale.datanode': 'true',
  240.         'dfs.hosts.exclude': '/etc/hadoop/conf/dfs.exclude',
  241.         'dfs.datanode.data.dir.perm': '750',
  242.         'dfs.encryption.key.provider.uri': '',
  243.         'dfs.replication.max': '50',
  244.         'dfs.namenode.name.dir': '/hadoop/hdfs/namenode'
  245.     },
  246.     'ams-env': {
  247.         'content': '\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# Collector Log directory for log4j\nexport AMS_COLLECTOR_LOG_DIR={{ams_collector_log_dir}}\n\n# Monitor Log directory for outfile\nexport AMS_MONITOR_LOG_DIR={{ams_monitor_log_dir}}\n\n# Collector pid directory\nexport AMS_COLLECTOR_PID_DIR={{ams_collector_pid_dir}}\n\n# Monitor pid directory\nexport AMS_MONITOR_PID_DIR={{ams_monitor_pid_dir}}\n\n# AMS HBase pid directory\nexport AMS_HBASE_PID_DIR={{hbase_pid_dir}}\n\n# AMS Collector heapsize\nexport AMS_COLLECTOR_HEAPSIZE={{metrics_collector_heapsize}}\n\n# HBase normalizer enabled\nexport AMS_HBASE_NORMALIZER_ENABLED={{ams_hbase_normalizer_enabled}}\n\n# HBase compaction policy enabled\nexport AMS_HBASE_FIFO_COMPACTION_ENABLED={{ams_hbase_fifo_compaction_enabled}}\n\n# HBase Tables Initialization check enabled\nexport AMS_HBASE_INIT_CHECK_ENABLED={{ams_hbase_init_check_enabled}}\n\n# AMS Collector options\nexport AMS_COLLECTOR_OPTS="-Djava.library.path=/usr/lib/ams-hbase/lib/hadoop-native"\n{% if security_enabled %}\nexport AMS_COLLECTOR_OPTS="$AMS_COLLECTOR_OPTS -Djava.security.auth.login.config={{ams_collector_jaas_config_file}}"\n{% endif %}\n\n# AMS Collector GC options\nexport AMS_COLLECTOR_GC_OPTS="-XX:+UseConcMarkSweepGC -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{ams_collector_log_dir}}/collector-gc.log-`date +\'%Y%m%d%H%M\'`"\nexport AMS_COLLECTOR_OPTS="$AMS_COLLECTOR_OPTS $AMS_COLLECTOR_GC_OPTS"',
  248.         'ambari_metrics_user': 'ams',
  249.         'metrics_monitor_log_dir': '/var/log/ambari-metrics-monitor',
  250.         'metrics_collector_log_dir': '/var/log/ambari-metrics-collector',
  251.         'metrics_collector_pid_dir': '/var/run/ambari-metrics-collector',
  252.         'metrics_monitor_pid_dir': '/var/run/ambari-metrics-monitor',
  253.         'metrics_collector_heapsize': '2048m'
  254.     },
  255.     'ams-site': {
  256.         'timeline.metrics.host.aggregator.minute.ttl': '604800',
  257.         'timeline.metrics.cluster.aggregate.splitpoints': ' ',
  258.         'timeline.metrics.service.webapp.address': 'hn0-094bc5.ambhviewl-af25-ssh.m8.internal.cloudapp.net:6188',
  259.         'timeline.metrics.cluster.aggregator.daily.checkpointCutOffMultiplier': '2',
  260.         'timeline.metrics.sink.collection.period': '10',
  261.         'timeline.metrics.cluster.aggregator.second.timeslice.interval': '30',
  262.         'timeline.metrics.service.http.policy': 'HTTP_ONLY',
  263.         'timeline.metrics.service.checkpointDelay': '60',
  264.         'timeline.metrics.host.aggregator.hourly.disabled': 'false',
  265.         'timeline.metrics.cluster.aggregator.daily.interval': '86400',
  266.         'timeline.metrics.whitelist.file': '/etc/ambari-metrics-collector/conf/whitelistedmetrics.txt',
  267.         'timeline.metrics.cluster.aggregator.hourly.ttl': '31536000',
  268.         'timeline.metrics.hbase.fifo.compaction.enabled': 'false',
  269.         'timeline.metrics.cluster.aggregator.hourly.disabled': 'false',
  270.         'timeline.metrics.cluster.aggregator.interpolation.enabled': 'true',
  271.         'timeline.metrics.host.aggregator.daily.disabled': 'false',
  272.         'timeline.metrics.service.watcher.timeout': '30',
  273.         'timeline.metrics.hbase.compression.scheme': 'SNAPPY',
  274.         'timeline.metrics.host.aggregate.splitpoints': ' ',
  275.         'timeline.metrics.cluster.aggregator.hourly.interval': '3600',
  276.         'timeline.metrics.aggregators.skip.blockcache.enabled': 'false',
  277.         'phoenix.spool.directory': '/tmp',
  278.         'timeline.metrics.host.aggregator.ttl': '86400',
  279.         'timeline.metrics.sink.report.interval': '60',
  280.         'timeline.metrics.service.use.groupBy.aggregators': 'false',
  281.         'timeline.metrics.host.aggregator.daily.ttl': '31536000',
  282.         'timeline.metrics.cluster.aggregator.hourly.checkpointCutOffMultiplier': '2',
  283.         'timeline.metrics.service.cluster.aggregator.appIds': 'datanode,nodemanager,hbase',
  284.         'timeline.metrics.service.watcher.delay': '30',
  285.         'timeline.metrics.cluster.aggregator.daily.ttl': '63072000',
  286.         'timeline.metrics.host.aggregator.minute.interval': '300',
  287.         'timeline.metrics.aggregator.checkpoint.dir': '/var/lib/ambari-metrics-collector/checkpoint',
  288.         'timeline.metrics.hbase.data.block.encoding': 'FAST_DIFF',
  289.         'timeline.metrics.cluster.aggregator.minute.ttl': '2592000',
  290.         'timeline.metrics.cluster.aggregator.second.ttl': '259200',
  291.         'timeline.metrics.host.aggregator.minute.disabled': 'false',
  292.         'timeline.metrics.daily.aggregator.minute.interval': '86400',
  293.         'timeline.metrics.service.handler.thread.count': '20',
  294.         'timeline.metrics.cluster.aggregator.minute.interval': '300',
  295.         'timeline.metrics.cache.size': '150',
  296.         'phoenix.query.maxGlobalMemoryPercentage': '25',
  297.         'timeline.metrics.service.operation.mode': 'distributed',
  298.         'timeline.metrics.host.aggregator.minute.checkpointCutOffMultiplier': '2',
  299.         'timeline.metrics.cluster.aggregator.minute.checkpointCutOffMultiplier': '2',
  300.         'timeline.metrics.hbase.init.check.enabled': 'false',
  301.         'timeline.metrics.host.aggregator.hourly.checkpointCutOffMultiplier': '2',
  302.         'timeline.metrics.cluster.aggregator.daily.disabled': 'false',
  303.         'timeline.metrics.cluster.aggregator.minute.disabled': 'false',
  304.         'timeline.metrics.service.rpc.address': '0.0.0.0:30200',
  305.         'timeline.metrics.host.aggregator.hourly.ttl': '2592000',
  306.         'timeline.metrics.service.resultset.fetchSize': '2000',
  307.         'timeline.metrics.service.watcher.initial.delay': '600',
  308.         'timeline.metrics.cache.commit.interval': '3',
  309.         'timeline.metrics.service.default.result.limit': '15840',
  310.         'timeline.metrics.cache.enabled': 'true',
  311.         'timeline.metrics.cluster.aggregator.second.disabled': 'false',
  312.         'timeline.metrics.cluster.aggregator.second.checkpointCutOffMultiplier': '2',
  313.         'timeline.metrics.host.aggregator.daily.checkpointCutOffMultiplier': '2',
  314.         'timeline.metrics.service.watcher.disabled': 'true',
  315.         'timeline.metrics.cluster.aggregator.second.interval': '120',
  316.         'timeline.metrics.host.aggregator.hourly.interval': '3600'
  317.     },
  318.     'ams-hbase-policy': {
  319.         'security.masterregion.protocol.acl': '*',
  320.         'security.admin.protocol.acl': '*',
  321.         'security.client.protocol.acl': '*'
  322.     },
  323.     'zookeeper-log4j': {
  324.         'content': '\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n#\n# ZooKeeper Logging Configuration\n#\n\n# DEFAULT: console appender only\nlog4j.rootLogger=INFO, CONSOLE, ETW, FilterLog\n\n# Example with rolling log file\n#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE\n\n# Example with rolling log file and tracing\n#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE\n\n#\n# Log INFO level and above messages to the console\n#\nlog4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\nlog4j.appender.CONSOLE.Threshold=INFO\nlog4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n#\n# Add ROLLINGFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender\nlog4j.appender.ROLLINGFILE.Threshold=DEBUG\nlog4j.appender.ROLLINGFILE.File=zookeeper.log\n\n# Max log file size of 10MB\nlog4j.appender.ROLLINGFILE.MaxFileSize=10MB\n# uncomment the next line to limit number of backup files\n#log4j.appender.ROLLINGFILE.MaxBackupIndex=10\n\nlog4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout\nlog4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n\n\n\n#\n# Add TRACEFILE to rootLogger to get log file output\n#    Log DEBUG level and above messages to a log file\nlog4j.appender.TRACEFILE=org.apache.log4j.FileAppender\nlog4j.appender.TRACEFILE.Threshold=TRACE\nlog4j.appender.TRACEFILE.File=zookeeper_trace.log\n\nlog4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout\n### Notice we are including log4j\'s NDC here (%x)\nlog4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n\n\n\n#EtwLog Appender\n\n#sends HDP service logs to customer storage account\n\nlog4j.appender.ETW=com.microsoft.log4jappender.EtwAppender\nlog4j.appender.ETW.source=HadoopServiceLog\nlog4j.appender.ETW.component=${component}\nlog4j.appender.ETW.layout=org.apache.log4j.TTCCLayout\nlog4j.appender.ETW.OSType=Linux\n\n# FilterLog Appender\n# Sends filtered HDP service logs to our storage account\nlog4j.appender.FilterLog=com.microsoft.log4jappender.FilterLogAppender\nlog4j.appender.FilterLog.layout=org.apache.log4j.TTCCLayout\nlog4j.appender.FilterLog.source=CentralFilteredHadoopServiceLogs\nlog4j.appender.FilterLog.component=${component}\nlog4j.appender.FilterLog.whitelistFileName=${whitelist.filename}\nlog4j.appender.FilterLog.OSType=Linux'
  325.     },
  326.     'hadoop-policy': {
  327.         'security.job.client.protocol.acl': '*',
  328.         'security.job.task.protocol.acl': '*',
  329.         'security.datanode.protocol.acl': '*',
  330.         'security.namenode.protocol.acl': '*',
  331.         'security.client.datanode.protocol.acl': '*',
  332.         'security.inter.tracker.protocol.acl': '*',
  333.         'security.refresh.usertogroups.mappings.protocol.acl': 'hadoop',
  334.         'security.client.protocol.acl': '*',
  335.         'security.refresh.policy.protocol.acl': 'hadoop',
  336.         'security.admin.operations.protocol.acl': 'hadoop',
  337.         'security.inter.datanode.protocol.acl': '*'
  338.     },
  339.     'hdfs-log4j': {
  340.         'content': '\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\nhadoop.root.logger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property "hadoop.root.logger".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter, ETW, FilterLog\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add "console" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logger}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# mapred audit logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging levels\n\nhadoop.metrics.log.level=WARN\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes "deprecated" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#EtwLog Appender\n#sends HDP service logs to customer storage account\nlog4j.appender.ETW=com.microsoft.log4jappender.EtwAppender\nlog4j.appender.ETW.source=HadoopServiceLog\nlog4j.appender.ETW.component=${component}\nlog4j.appender.ETW.layout=org.apache.log4j.TTCCLayout\nlog4j.appender.ETW.OSType=Linux\n\n# FilterLog Appender\n# Sends filtered HDP service logs to our storage account\nlog4j.appender.FilterLog=com.microsoft.log4jappender.FilterLogAppender\nlog4j.appender.FilterLog.layout=org.apache.log4j.TTCCLayout\nlog4j.appender.FilterLog.source=CentralFilteredHadoopServiceLogs\nlog4j.appender.FilterLog.component=${component}\nlog4j.appender.FilterLog.whitelistFileName=NA\nlog4j.appender.FilterLog.OSType=Linux\n\n#\n# rm amlauncher logging\n#\n\nlog4j.logger.org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncher=${hadoop.root.logger}, ETW, AMFilterLog\nlog4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncher=false\nlog4j.appender.AMFilterLog=com.microsoft.log4jappender.FilterLogAppender\nlog4j.appender.AMFilterLog.layout=org.apache.log4j.TTCCLayout\nlog4j.appender.AMFilterLog.source=CentralFilteredHadoopServiceLogs\nlog4j.appender.AMFilterLog.component=${component}\nlog4j.appender.AMFilterLog.whitelistFileName=${whitelist.filename}\nlog4j.appender.AMFilterLog.OSType=Linux'
  341.     },
  342.     'hive-site': {
  343.         'javax.jdo.option.ConnectionDriverName': 'com.microsoft.sqlserver.jdbc.SQLServerDriver',
  344.         'hive.fetch.task.aggr': 'false',
  345.         'hive.execution.engine': 'mr',
  346.         'atlas.cluster.name': '{{cluster_name}}',
  347.         'hive.tez.java.opts': '-Xmx512M -Xms512M -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC',
  348.         'hive.server2.table.type.mapping': 'CLASSIC',
  349.         'hive.tez.min.partition.factor': '1f',
  350.         'hive.tez.cpu.vcores': '-1',
  351.         'hive.conf.restricted.list': 'hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role',
  352.         'hive.stats.dbclass': 'fs',
  353.         'tez.shuffle-vertex-manager.max-src-fraction': '0.95',
  354.         'hive.tez.auto.reducer.parallelism': 'true',
  355.         'hive.server2.thrift.http.path': '/',
  356.         'hive.exec.scratchdir': 'hdfs://hn0-094bc5.ambhviewl-af25-ssh.m8.internal.cloudapp.net:8020/tmp/hive',
  357.         'hive.exec.post.hooks': 'org.apache.hadoop.hive.ql.hooks.ATSHook',
  358.         'hive.zookeeper.namespace': 'hive_zookeeper_namespace',
  359.         'hive.cbo.enable': 'true',
  360.         'hive.optimize.reducededuplication': 'true',
  361.         'hive.optimize.bucketmapjoin': 'true',
  362.         'hive.mapjoin.bucket.cache.size': '10000',
  363.         'hive.limit.optimize.enable': 'true',
  364.         'hive.server2.max.start.attempts': '5',
  365.         'hive.exec.max.dynamic.partitions': '5000',
  366.         'hive.hmshandler.retry.attempts': '5',
  367.         'hive.metastore.sasl.enabled': 'false',
  368.         'hive.txn.manager': 'org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager',
  369.         'hive.optimize.constant.propagation': 'true',
  370.         'hive.exec.submitviachild': 'false',
  371.         'hive.metastore.kerberos.principal': 'hive/_HOST@EXAMPLE.COM',
  372.         'hive.txn.max.open.batch': '1000',
  373.         'hive.exec.compress.output': 'false',
  374.         'hive.merge.size.per.task': '256000000',
  375.         'hive.security.authenticator.manager': 'org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator',
  376.         'hive.merge.mapfiles': 'true',
  377.         'hive.exec.parallel.thread.number': '8',
  378.         'hive.mapjoin.optimized.hashtable': 'true',
  379.         'hive.default.fileformat': 'TextFile',
  380.         'hive.optimize.metadataonly': 'true',
  381.         'hive.tez.dynamic.partition.pruning.max.event.size': '1048576',
  382.         'hive.cluster.delegation.token.store.class': 'org.apache.hadoop.hive.thrift.ZooKeeperTokenStore',
  383.         'hive.server2.thrift.max.worker.threads': '500',
  384.         'hive.optimize.sort.dynamic.partition': 'true',
  385.         'hive.server2.thrift.http.port': '10001',
  386.         'hive.metastore.pre.event.listeners': 'org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener',
  387.         'hive.metastore.failure.retries': '24',
  388.         'hive.merge.smallfiles.avgsize': '16000000',
  389.         'hive.tez.max.partition.factor': '3f',
  390.         'hive.server2.transport.mode': 'http',
  391.         'hive.tez.container.size': '4096',
  392.         'hive.optimize.bucketmapjoin.sortedmerge': 'false',
  393.         'hive.compactor.worker.threads': '0',
  394.         'hive.security.metastore.authorization.manager': 'org.apache.hadoop.hive.ql.security.authorization.MetaStoreAuthzAPIAuthorizerEmbedOnly',
  395.         'hive.map.aggr.hash.percentmemory': '0.5',
  396.         'hive.user.install.directory': '/user',
  397.         'datanucleus.autoCreateSchema': 'false',
  398.         'hive.compute.query.using.stats': 'true',
  399.         'hive.merge.rcfile.block.level': 'true',
  400.         'hive.map.aggr': 'true',
  401.         'hive.metastore.client.connect.retry.delay': '5s',
  402.         'hive.security.authorization.enabled': 'false',
  403.         'atlas.hook.hive.minThreads': '1',
  404.         'hive.server2.tez.default.queues': 'default',
  405.         'hive.prewarm.enabled': 'false',
  406.         'hive.exec.reducers.max': '1009',
  407.         'hive.metastore.kerberos.keytab.file': '/etc/security/keytabs/hive.service.keytab',
  408.         'hive.stats.fetch.partition.stats': 'true',
  409.         'hive.cli.print.header': 'false',
  410.         'hive.server2.thrift.sasl.qop': 'auth',
  411.         'hive.server2.support.dynamic.service.discovery': 'true',
  412.         'hive.server2.thrift.port': '10000',
  413.         'hive.exec.reducers.bytes.per.reducer': '67108864',
  414.         'hive.fetch.task.conversion': 'more',
  415.         'hive.tez.dynamic.partition.pruning.max.data.size': '104857600',
  416.         'hive.metastore.warehouse.dir': '/hive/warehouse',
  417.         'hive.metastore.client.socket.timeout': '1800s',
  418.         'hive.server2.zookeeper.namespace': 'hiveserver2',
  419.         'hive.prewarm.numcontainers': '3',
  420.         'hive.server2.enable.doAs': 'false',
  421.         'hive.exim.uri.scheme.whitelist': 'wasb,hdfs,pfile',
  422.         'atlas.hook.hive.maxThreads': '1',
  423.         'hive.auto.convert.join': 'true',
  424.         'hive.enforce.bucketing': 'true',
  425.         'datanucleus.connectionPool.maxIdle': '2',
  426.         'hive.server2.authentication.spnego.keytab': 'HTTP/_HOST@EXAMPLE.COM',
  427.         'hive.mapred.reduce.tasks.speculative.execution': 'false',
  428.         'javax.jdo.option.ConnectionURL': 'jdbc:sqlserver://df6ulzeuce.database.windows.net;databaseName=ambhviewlaf25094bc5a26ffe42769ae339b3f402be29hivemetastore;',
  429.         'hive.tez.exec.print.summary': 'true',
  430.         'hive.exec.dynamic.partition.mode': 'nonstrict',
  431.         'tez.runtime.shuffle.connect.timeout': '30000',
  432.         'hive.zookeeper.quorum': 'hn0-094bc5.ambhviewl-af25-ssh.m8.internal.cloudapp.net:2181',
  433.         'hive.security.authorization.manager': 'org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory',
  434.         'hive.exec.parallel': 'false',
  435.         'hive.exec.compress.intermediate': 'false',
  436.         'hive.enforce.sorting': 'true',
  437.         'hive.mv.files.thread': '128',
  438.         'hive.txn.timeout': '300',
  439.         'hive.metastore.authorization.storage.checks': 'false',
  440.         'hive.exec.orc.default.stripe.size': '67108864',
  441.         'hive.metastore.cache.pinobjtypes': 'Table,Database,Type,FieldSchema,Order',
  442.         'hive.server2.logging.operation.enabled': 'true',
  443.         'hive.merge.tezfiles': 'false',
  444.         'hive.compactor.initiator.on': 'false',
  445.         'hive.auto.convert.join.noconditionaltask': 'true',
  446.         'hive.compactor.worker.timeout': '86400L',
  447.         'hive.security.metastore.authenticator.manager': 'org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator',
  448.         'hive.optimize.null.scan': 'true',
  449.         'hive.server2.tez.initialize.default.sessions': 'false',
  450.         'datanucleus.cache.level2.type': 'none',
  451.         'hive.stats.autogather': 'true',
  452.         'hive.server2.use.SSL': 'false',
  453.         'hive.exec.submit.local.task.via.child': 'true',
  454.         'hive.hmshandler.retry.interval': '1000',
  455.         'hive.merge.mapredfiles': 'false',
  456.         'hive.vectorized.execution.enabled': 'true',
  457.         'hive.cluster.delegation.token.store.zookeeper.connectString': 'hn0-094bc5.ambhviewl-af25-ssh.m8.internal.cloudapp.net:2181',
  458.         'hive.auto.convert.sortmerge.join.to.mapjoin': 'false',
  459.         'hive.tez.log.level': 'INFO',
  460.         'hive.server2.tez.sessions.per.default.queue': '1',
  461.         'hive.exec.max.dynamic.partitions.pernode': '2000',
  462.         'hive.tez.dynamic.partition.pruning': 'true',
  463.         'datanucleus.fixedDatastore': 'true',
  464.         'hive.compactor.abortedtxn.threshold': '1000',
  465.         'hive.limit.pushdown.memory.usage': '0.04',
  466.         'hive.security.metastore.authorization.auth.reads': 'true',
  467.         'ambari.hive.db.schema.name': 'hive',
  468.         'hive.vectorized.groupby.checkinterval': '4096',
  469.         'hive.smbjoin.cache.rows': '10000',
  470.         'hive.metastore.execute.setugi': 'false',
  471.         'hive.zookeeper.client.port': '2181',
  472.         'hive.vectorized.groupby.maxentries': '100000',
  473.         'hive.server2.authentication.spnego.principal': '/etc/security/keytabs/spnego.service.keytab',
  474.         'tez.shuffle-vertex-manager.min-src-fraction': '0.9',
  475.         'hive.cluster.delegation.token.store.zookeeper.znode': '/hive/cluster/delegation',
  476.         'javax.jdo.option.ConnectionPassword': 'vQRifF7lLFrvcxaJc7yhUG5Cj95Tnp84i9dBPpyLGqlSXJeEQ4t03NC8T4KniULcHTfCA8YUW4AfLnokqzgxjPci4jvBXyG0kadk2WyYWDmC19',
  477.         'hive.exec.max.created.files': '100000',
  478.         'hive.default.fileformat.managed': 'TextFile',
  479.         'hive.map.aggr.hash.min.reduction': '0.5',
  480.         'hive.fetch.task.conversion.threshold': '1073741824',
  481.         'hive.orc.splits.include.file.footer': 'false',
  482.         'hive.exec.pre.hooks': 'org.apache.hadoop.hive.ql.hooks.ATSHook',
  483.         'hive.merge.orcfile.stripe.level': 'true',
  484.         'hive.exec.failure.hooks': 'org.apache.hadoop.hive.ql.hooks.ATSHook',
  485.         'hive.server2.allow.user.substitution': 'true',
  486.         'hive.optimize.index.filter': 'true',
  487.         'hive.exec.orc.encoding.strategy': 'SPEED',
  488.         'hive.vectorized.execution.reduce.enabled': 'true',
  489.         'hive.metastore.connect.retries': '5',
  490.         'hive.metastore.server.max.threads': '100000',
  491.         'hive.exec.orc.compression.strategy': 'SPEED',
  492.         'hive.vectorized.groupby.flush.percent': '0.1',
  493.         'hive.metastore.uris': 'thrift://hn0-094bc5.ambhviewl-af25-ssh.m8.internal.cloudapp.net:9083',
  494.         'hive.enforce.sortmergebucketmapjoin': 'true',
  495.         'hive.scratchdir.lock': 'true',
  496.         'hive.auto.convert.join.noconditionaltask.size': '1275812904',
  497.         'javax.jdo.option.ConnectionUserName': 'ambhviewlaf25094bc5a26ffe42769ae339b3f402be29hivemetastoreLogin@df6ulzeuce.database.windows.net',
  498.         'hive.compactor.delta.num.threshold': '10',
  499.         'hive.exec.dynamic.partition': 'true',
  500.         'hive.server2.authentication': 'NONE',
  501.         'hive.stats.fetch.column.stats': 'true',
  502.         'hive.orc.compute.splits.num.threads': '10',
  503.         'hive.tez.smb.number.waves': '0.5',
  504.         'hive.convert.join.bucket.mapjoin.tez': 'false',
  505.         'hive.optimize.reducededuplication.min.reducer': '4',
  506.         'hive.auto.convert.sortmerge.join': 'true',
  507.         'hive.metastore.schema.verification': 'true',
  508.         'hive.server2.logging.operation.log.location': '/tmp/hive/operation_logs',
  509.         'hive.tez.input.format': 'org.apache.hadoop.hive.ql.io.HiveInputFormat',
  510.         'hive.exec.orc.default.compress': 'ZLIB',
  511.         'hive.support.concurrency': 'false',
  512.         'hive.compactor.check.interval': '300L',
  513.         'hive.compactor.delta.pct.threshold': '0.1f',
  514.         'hive.map.aggr.hash.force.flush.memory.threshold': '0.9'
  515.     },
  516.     'spark2-thrift-sparkconf': {
  517.         'spark.history.fs.logDirectory': '{{spark_history_dir}}',
  518.         'spark.dynamicAllocation.enabled': 'true',
  519.         'spark.yarn.queue': 'default',
  520.         'spark.dynamicAllocation.minExecutors': '0',
  521.         'spark.driver.extraLibraryPath': '{{spark_hadoop_lib_native}}',
  522.         'spark.hadoop.cacheConf': 'false',
  523.         'spark.master': '{{spark_thrift_master}}',
  524.         'spark.history.provider': 'org.apache.spark.deploy.history.FsHistoryProvider',
  525.         'spark.eventLog.dir': '{{spark_history_dir}}',
  526.         'spark.scheduler.mode': 'FAIR',
  527.         'spark.dynamicAllocation.initialExecutors': '0',
  528.         'spark.scheduler.allocation.file': '{{spark_conf}}/spark-thrift-fairscheduler.xml',
  529.         'spark.dynamicAllocation.maxExecutors': '10',
  530.         'spark.eventLog.enabled': 'true',
  531.         'spark.executor.extraLibraryPath': '{{spark_hadoop_lib_native}}',
  532.         'spark.shuffle.service.enabled': 'true'
  533.     },
  534.     'ranger-yarn-audit': {
  535.         'xasecure.audit.destination.solr.zookeepers': 'NONE',
  536.         'xasecure.audit.destination.solr.urls': '',
  537.         'xasecure.audit.destination.solr': 'false',
  538.         'xasecure.audit.destination.hdfs.batch.filespool.dir': '/var/log/hadoop/yarn/audit/hdfs/spool',
  539.         'xasecure.audit.destination.hdfs': 'true',
  540.         'xasecure.audit.destination.solr.batch.filespool.dir': '/var/log/hadoop/yarn/audit/solr/spool',
  541.         'xasecure.audit.provider.summary.enabled': 'false',
  542.         'xasecure.audit.destination.hdfs.dir': 'hdfs://NAMENODE_HOSTNAME:8020/ranger/audit',
  543.         'xasecure.audit.is.enabled': 'true'
  544.     },
  545.     'hive-interactive-env': {
  546.         'content': '\n      if [ "$SERVICE" = "cli" ]; then\n      if [ -z "$DEBUG" ]; then\n      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"\n      else\n      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"\n      fi\n      fi\n\n      # The heap size of the jvm stared by hive shell script can be controlled via:\n\n      if [ "$SERVICE" = "metastore" ]; then\n      export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore\n      else\n      export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client\n      fi\n\n      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"\n\n      # Larger heap size may be required when running queries over large number of files or partitions.\n      # By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be\n      # appropriate for hive server (hwi etc).\n\n\n      # Set HADOOP_HOME to point to a specific hadoop install directory\n      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n      # Hive Configuration Directory can be controlled by:\n      export HIVE_CONF_DIR={{hive_server_interactive_conf_dir}}\n\n      # Add additional hcatalog jars\n      if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then\n      export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\n      else\n      export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-server2-hive2/lib/hive-hcatalog-core.jar\n      fi\n\n      export METASTORE_PORT={{hive_metastore_port}}\n\n      # Spark assembly contains a conflicting copy of HiveConf from hive-1.2\n      export HIVE_SKIP_SPARK_ASSEMBLY=true',
  547.         'llap_heap_size': '7168',
  548.         'num_llap_nodes': '3',
  549.         'llap_queue_capacity': '80',
  550.         'llap_app_name': 'llap0',
  551.         'enable_hive_interactive': 'true',
  552.         'llap_java_opts': '-XX:+AlwaysPreTouch {% if java_version > 7 %}-XX:+UseG1GC -XX:TLABSize=8m -XX:+ResizeTLAB -XX:+UseNUMA -XX:+AggressiveOpts -XX:MetaspaceSize=1024m -XX:InitiatingHeapOccupancyPercent=80 -XX:MaxGCPauseMillis=200{% else %}-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC{% endif %}',
  553.         'num_retries_for_checking_llap_status': '10',
  554.         'slider_am_container_mb': '1024',
  555.         'hive_server_interactive_host': 'hn0-094bc5.ambhviewl-af25-ssh.m8.internal.cloudapp.net',
  556.         'hive_aux_jars': '/usr/lib/hdinsight-datalake/hadoop-azure-datalake-2.0.0-SNAPSHOT.jar,/usr/lib/hdinsight-datalake/azure-data-lake-store-sdk-2.0.4-SNAPSHOT.jar,/usr/lib/hdinsight-datalake/adls2-oauth2-token-provider-1.0.jar',
  557.         'llap_log_level': 'INFO'
  558.     },
  559.     'ranger-hdfs-plugin-properties': {
  560.         'common.name.for.certificate': '',
  561.         'hadoop.rpc.protection': 'authentication',
  562.         'ranger-hdfs-plugin-enabled': 'No',
  563.         'REPOSITORY_CONFIG_USERNAME': 'hadoop',
  564.         'REPOSITORY_CONFIG_PASSWORD': 'hadoop',
  565.         'policy_user': 'ambari-qa'
  566.     },
  567.     'pig-properties': {
  568.         'content': '\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# Pig configuration file. All values can be overwritten by command line\n# arguments; for a description of the properties, run\n#\n#     pig -h properties\n#\n\n############################################################################\n#\n# == Logging properties\n#\n\n# Location of pig log file. If blank, a file with a timestamped slug\n# (\'pig_1399336559369.log\') will be generated in the current working directory.\n#\n# pig.logfile=\n# pig.logfile=/tmp/pig-err.log\n\n# Log4j configuration file. Set at runtime with the -4 parameter. The source\n# distribution has a ./conf/log4j.properties.template file you can rename and\n# customize.\n#\n# log4jconf=./conf/log4j.properties\n\n# Verbose Output.\n# * false (default): print only INFO and above to screen\n# * true: Print all log messages to screen\n#\n# verbose=false\n\n# Omit timestamps on log messages. (default: false)\n#\n# brief=false\n\n# Logging level. debug=OFF|ERROR|WARN|INFO|DEBUG (default: INFO)\n#\n# debug=INFO\n\n# Roll up warnings across tasks, so that when millions of mappers suddenly cry\n# out in error they are partially silenced. (default, recommended: true)\n#\n# aggregate.warning=true\n\n# Should DESCRIBE pretty-print its schema?\n# * false (default): print on a single-line, suitable for pasting back in to your script\n# * true (recommended): prints on multiple lines with indentation, much more readable\n#\n# pig.pretty.print.schema=false\n\n# === Profiling UDFs  ===\n\n# Turn on UDF timers? This will cause two counters to be\n# tracked for every UDF and LoadFunc in your script: approx_microsecs measures\n# approximate time spent inside a UDF approx_invocations reports the approximate\n# number of times the UDF was invoked.\n#\n# * false (default): do not record timing information of UDFs.\n# * true: report UDF performance. Uses more counters, but gives more insight\n#   into script operation\n#\n# pig.udf.profile=false\n\n# Specify frequency of profiling (default: every 100th).\n# pig.udf.profile.frequency=100\n\n############################################################################\n#\n# == Site-specific Properties\n#\n\n# Execution Mode. Local mode is much faster, but only suitable for small amounts\n# of data. Local mode interprets paths on the local file system; Mapreduce mode\n# on the HDFS. Read more under \'Execution Modes\' within the Getting Started\n# documentation.\n#\n# * mapreduce (default): use the Hadoop cluster defined in your Hadoop config files\n# * local: use local mode\n# * tez: use Tez on Hadoop cluster\n# * tez_local: use Tez local mode\n#\n# exectype=mapreduce\n\n# Bootstrap file with default statements to execute in every Pig job, similar to\n# .bashrc.  If blank, uses the file \'.pigbootup\' from your home directory; If a\n# value is supplied, that file is NOT loaded.  This does not do tilde expansion\n# -- you must supply the full path to the file.\n#\n# pig.load.default.statements=\n# pig.load.default.statements=/home/bob/.pigrc\n\n# Kill all waiting/running MR jobs upon a MR job failure? (default: false) If\n# false, jobs that can proceed independently will do so unless a parent stage\n# fails. If true, the failure of any stage in the script kills all jobs.\n#\n# stop.on.failure=false\n\n# File containing the pig script to run. Rarely set in the properties file.\n# Commandline: -f\n#\n# file=\n\n# Jarfile to load, colon separated. Rarely used.\n#\n# jar=\n\n# Register additional .jar files to use with your Pig script.\n# Most typically used as a command line option (see http://pig.apache.org/docs/r0.12.0/basic.html#register):\n#\n#     pig -Dpig.additional.jars=hdfs://nn.mydomain.com:9020/myjars/my.jar\n#\n# pig.additional.jars=<colon separated list of jars with optional wildcards>\n# pig.additional.jars=/usr/local/share/pig/pig/contrib/piggybank/java/piggybank.jar:/usr/local/share/pig/datafu/datafu-pig/build/libs/datafu-pig-1.2.1.jar\n\n# Specify potential packages to which a UDF or a group of UDFs belong,\n# eliminating the need to qualify the UDF on every call. See\n# http://pig.apache.org/docs/r0.12.0/udf.html#use-short-names\n#\n# Commandline use:\n#\n#     pig \\\n#       -Dpig.additional.jars=$PIG_HOME/contrib/piggybank/java/piggybank.jar:$PIG_HOME/../datafu/datafu-pig/build/libs/datafu-pig-1.2.1.jar \\\n#       -Dudf.import.list=org.apache.pig.piggybank.evaluation:datafu.pig.util \\\n#       happy_job.pig\n#\n# udf.import.list=<colon separated list of imports>\n# udf.import.list=org.apache.pig.piggybank.evaluation:datafu.pig.bags:datafu.pig.hash:datafu.pig.stats:datafu.pig.util\n\n#\n# Reuse jars across jobs run by the same user? (default: false) If enabled, jars\n# are placed in ${pig.user.cache.location}/${user.name}/.pigcache. Since most\n# jars change infrequently, this gives a minor speedup.\n#\n# pig.user.cache.enabled=false\n\n# Base path for storing jars cached by the pig.user.cache.enabled feature. (default: /tmp)\n#\n# pig.user.cache.location=/tmp\n\n# Replication factor for cached jars. If not specified mapred.submit.replication\n# is used, whose default is 10.\n#\n# pig.user.cache.replication=10\n\n# Default UTC offset. (default: the host\'s current UTC offset) Supply a UTC\n# offset in Java\'s timezone format: e.g., +08:00.\n#\n# pig.datetime.default.tz=\n\n############################################################################\n#\n# Memory impacting properties\n#\n\n# Amount of memory (as fraction of heap) allocated to bags before a spill is\n# forced. Default is 0.2, meaning 20% of available memory. Note that this memory\n# is shared across all large bags used by the application. See\n# http://pig.apache.org/docs/r0.12.0/perf.html#memory-management\n#\n# pig.cachedbag.memusage=0.2\n\n# Don\'t spill bags smaller than this size (bytes). Default: 5000000, or about\n# 5MB. Usually, the more spilling the longer runtime, so you might want to tune\n# it according to heap size of each task and so forth.\n#\n# pig.spill.size.threshold=5000000\n\n# EXPERIMENTAL: If a file bigger than this size (bytes) is spilled -- thus\n# freeing a bunch of ram -- tell the JVM to perform garbage collection.  This\n# should help reduce the number of files being spilled, but causes more-frequent\n# garbage collection. Default: 40000000 (about 40 MB)\n#\n# pig.spill.gc.activation.size=40000000\n\n# Maximum amount of data to replicate using the distributed cache when doing\n# fragment-replicated join. (default: 1000000000, about 1GB) Consider increasing\n# this in a production environment, but carefully.\n#\n# pig.join.replicated.max.bytes=1000000000\n\n# Fraction of heap available for the reducer to perform a skewed join. A low\n# fraction forces Pig to use more reducers, but increases the copying cost. See\n# http://pig.apache.org/docs/r0.12.0/perf.html#skewed-joins\n#\n# pig.skewedjoin.reduce.memusage=0.3\n\n#\n# === SchemaTuple ===\n#\n# The SchemaTuple feature (PIG-2632) uses a tuple\'s schema (when known) to\n# generate a custom Java class to hold records. Otherwise, tuples are loaded as\n# a plain list that is unaware of its contents\' schema -- and so each element\n# has to be wrapped as a Java object on its own. This can provide more efficient\n# CPU utilization, serialization, and most of all memory usage.\n#\n# This feature is considered experimental and is off by default. You can\n# selectively enable it for specific operations using pig.schematuple.udf,\n# pig.schematuple.load, pig.schematuple.fr_join and pig.schematuple.merge_join\n#\n\n# Enable the SchemaTuple optimization in all available cases? (default: false; recommended: true)\n#\n# pig.schematuple=false\n\n# EXPERIMENTAL: Use SchemaTuples with UDFs (default: value of pig.schematuple).\n# pig.schematuple.udf=false\n\n# EXPERIMENTAL, CURRENTLY NOT IMPLEMENTED, but in the future, LoadFunc\'s with\n# known schemas should output SchemaTuples. (default: value of pig.schematuple)\n# pig.schematuple.load=false\n\n# EXPERIMENTAL: Use SchemaTuples in replicated joins. The potential memory\n# saving here is significant. (default: value of pig.schematuple)\n# pig.schematuple.fr_join=false\n\n# EXPERIMENTAL: Use SchemaTuples in merge joins. (default: value of pig.schematuple).\n# pig.schematuple.merge_join=false\n\n############################################################################\n#\n# Serialization options\n#\n\n# Omit empty part files from the output? (default: false)\n#\n# * false (default): reducers generates an output file, even if output is empty\n# * true (recommended): do not generate zero-byte part files\n#\n# The default behavior of MapReduce is to generate an empty file for no data, so\n# Pig follows that. But many small files can cause annoying extra map tasks and\n# put load on the HDFS, so consider setting this to \'true\'\n#\n# pig.output.lazy=false\n\n#\n# === Tempfile Handling\n#\n\n# EXPERIMENTAL: Storage format for temporary files generated by intermediate\n# stages of Pig jobs. This can provide significant speed increases for certain\n# codecs, as reducing the amount of data transferred to and from disk can more\n# than make up for the cost of compression/compression. Recommend that you set\n# up LZO compression in Hadoop and specify tfile storage.\n#\n# Compress temporary files?\n# * false (default): do not compress\n# * true (recommended): compress temporary files.\n#\n# pig.tmpfilecompression=false\n# pig.tmpfilecompression=true\n\n# Tempfile storage container type.\n#\n# * tfile (default, recommended): more efficient, but only supports supports gz(gzip) and lzo compression.\n#   https://issues.apache.org/jira/secure/attachment/12396286/TFile%20Specification%2020081217.pdf\n# * seqfile: only supports gz(gzip), lzo, snappy, and bzip2 compression\n#\n# pig.tmpfilecompression.storage=tfile\n\n# Codec types for intermediate job files. tfile supports gz(gzip) and lzo;\n# seqfile support gz(gzip), lzo, snappy, bzip2\n#\n# * lzo (recommended with caveats): moderate compression, low cpu burden;\n#   typically leads to a noticeable speedup. Best default choice, but you must\n#   set up LZO independently due to license incompatibility\n# * snappy: moderate compression, low cpu burden; typically leads to a noticeable speedup..\n# * gz (default): higher compression, high CPU burden. Typically leads to a noticeable slowdown.\n# * bzip2: most compression, major CPU burden. Typically leads to a noticeable slowdown.\n#\n# pig.tmpfilecompression.codec=gzip\n\n#\n# === Split Combining\n#\n\n#\n# Should pig try to combine small files for fewer map tasks? This improves the\n# efficiency of jobs with many small input files, reduces the overhead on the\n# jobtracker, and reduces the number of output files a map-only job\n# produces. However, it only works with certain loaders and increases non-local\n# map tasks. See http://pig.apache.org/docs/r0.12.0/perf.html#combine-files\n#\n# * false (default, recommended): _do_ combine files\n# * true: do not combine files\n#\n# pig.noSplitCombination=false\n\n#\n# Size, in bytes, of data to be processed by a single map. Smaller files are\n# combined untill this size is reached. If unset, defaults to the file system\'s\n# default block size.\n#\n# pig.maxCombinedSplitSize=\n\n# ###########################################################################\n#\n# Execution options\n#\n\n# Should pig omit combiners? (default, recommended: false -- meaning pig _will_\n# use combiners)\n#\n# When combiners work well, they eliminate a significant amount of\n# data. However, if they do not eliminate much data -- say, a DISTINCT operation\n# that only eliminates 5% of the records -- they add a noticeable overhead to\n# the job. So the recommended default is false (use combiners), selectively\n# disabling them per-job:\n#\n#     pig -Dpig.exec.nocombiner=true distinct_but_not_too_much.pig\n#\n# pig.exec.nocombiner=false\n\n# EXPERIMENTAL: Aggregate records in map task before sending to the combiner?\n# (default: false, 10; recommended: true, 10). In cases where there is a massive\n# reduction of data in the aggregation step, pig can do a first pass of\n# aggregation before the data even leaves the mapper, saving much serialization\n# overhead. It\'s off by default but can give a major improvement to\n# group-and-aggregate operations. Pig skips partial aggregation unless reduction\n# is better than a factor of minReduction (default: 10). See\n# http://pig.apache.org/docs/r0.12.0/perf.html#hash-based-aggregation\n#\n# pig.exec.mapPartAgg=false\n# pig.exec.mapPartAgg.minReduction=10\n\n#\n# === Control how many reducers are used.\n#\n\n# Estimate number of reducers naively using a fixed amount of data per\n# reducer. Optimally, you have both fewer reducers than available reduce slots,\n# and reducers that are neither getting too little data (less than a half-GB or\n# so) nor too much data (more than 2-3 times the reducer child process max heap\n# size). The default of 1000000000 (about 1GB) is probably low for a production\n# cluster -- however it\'s much worse to set this too high (reducers spill many\n# times over in group-sort) than too low (delay waiting for reduce slots).\n#\n# pig.exec.reducers.bytes.per.reducer=1000000000\n\n#\n# Don\'t ever use more than this many reducers. (default: 999)\n#\n# pig.exec.reducers.max=999\n\n#\n# === Local mode for small jobs\n#\n\n# EXPERIMENTAL: Use local mode for small jobs? If true, jobs with input data\n# size smaller than pig.auto.local.input.maxbytes bytes and one or no reducers\n# are run in local mode, which is much faster. Note that file paths are still\n# interpreted as pig.exectype implies.\n#\n# * true (recommended): allow local mode for small jobs, which is much faster.\n# * false (default): always use pig.exectype.\n#\n# pig.auto.local.enabled=false\n\n#\n# Definition of a small job for the pig.auto.local.enabled feature. Only jobs\n# with less than this may bytes are candidates to run locally (default:\n# 100000000 bytes, about 1GB)\n#\n# pig.auto.local.input.maxbytes=100000000\n\n############################################################################\n#\n# Security Features\n#\n\n# Comma-delimited list of commands/operators that are disallowed. This security\n# feature can be used by administrators to block use of certain commands by\n# users.\n#\n# * <blank> (default): all commands and operators are allowed.\n# * fs,set (for example): block all filesystem commands and config changes from pig scripts.\n#\n# pig.blacklist=\n# pig.blacklist=fs,set\n\n# Comma-delimited list of the only commands/operators that are allowed. This\n# security feature can be used by administrators to block use of certain\n# commands by users.\n#\n# * <blank> (default): all commands and operators not on the pig.blacklist are allowed.\n# * load,store,filter,group: only LOAD, STORE, FILTER, GROUP\n#   from pig scripts. All other commands and operators will fail.\n#\n# pig.whitelist=\n# pig.whitelist=load,store,filter,group\n\n#####################################################################\n#\n# Advanced Site-specific Customizations\n#\n\n# Remove intermediate output files?\n#\n# * true (default, recommended): remove the files\n# * false: do NOT remove the files. You must clean them up yourself.\n#\n# Keeping them is useful for advanced debugging, but can be dangerous -- you\n# must clean them up yourself.  Inspect the intermediate outputs with\n#\n#     LOAD \'/path/to/tmp/file\' USING org.apache.pig.impl.io.TFileStorage();\n#\n# (Or ...SequenceFileInterStorage if pig.tmpfilecompression.storage is seqfile)\n#\n# pig.delete.temp.files=true\n\n# EXPERIMENTAL: A Pig Progress Notification Listener (PPNL) lets you wire pig\'s\n# progress into your visibility stack. To use a PPNL, supply the fully qualified\n# class name of a PPNL implementation. Note that only one PPNL can be set up, so\n# if you need several, write a PPNL that will chain them.\n#\n# See https://github.com/twitter/ambrose for a pretty awesome one of these\n#\n# pig.notification.listener=<fully qualified class name of a PPNL implementation>\n\n# String argument to pass to your PPNL constructor (optional). Only a single\n# string value is allowed. (default none)\n#\n# pig.notification.listener.arg=<somevalue>\n\n# EXPERIMENTAL: Class invoked to estimate the number of reducers to use.\n# (default: org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.InputSizeReducerEstimator)\n#\n# If you don\'t know how or why to write a PigReducerEstimator, you\'re unlikely\n# to use this. By default, the naive mapReduceLayer.InputSizeReducerEstimator is\n# used, but you can specify anything implementing the interface\n# org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigReducerEstimator\n#\n# pig.exec.reducer.estimator=<fully qualified class name of a PigReducerEstimator implementation>\n\n# Optional String argument to pass to your PigReducerEstimator. (default: none;\n# a single String argument is allowed).\n#\n# pig.exec.reducer.estimator.arg=<somevalue>\n\n# Class invoked to report the size of reducers output. By default, the reducers\'\n# output is computed as the total size of output files. But not every storage is\n# file-based, and so this logic can be replaced by implementing the interface\n# org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigStatsOutputSizeReader\n# If you need to register more than one reader, you can register them as a comma\n# separated list. Every reader implements a boolean supports(POStore sto) method.\n# When there are more than one reader, they are consulted in order, and the\n# first one whose supports() method returns true will be used.\n#\n# pig.stats.output.size.reader=<fully qualified class name of a PigStatsOutputSizeReader implementation>\n# pig.stats.output.size.reader.unsupported=<comma separated list of StoreFuncs that are not supported by this reader>\n\n# By default, Pig retrieves TaskReports for every launched task to compute\n# various job statistics. But this can cause OOM if the number of tasks is\n# large. In such case, you can disable it by setting this property to true.\n# pig.stats.notaskreport=false\n\n#\n# Override hadoop configs programatically\n#\n# By default, Pig expects hadoop configs (hadoop-site.xml and core-site.xml)\n# to be present on the classpath. There are cases when these configs are\n# needed to be passed programatically, such as while using the PigServer API.\n# In such cases, you can override hadoop configs by setting the property\n# "pig.use.overriden.hadoop.configs".\n#\n# When this property is set to true, Pig ignores looking for hadoop configs\n# in the classpath and instead picks it up from Properties/Configuration\n# object passed to it.\n#\n# pig.use.overriden.hadoop.configs=false\n\n# Implied LoadFunc for the LOAD operation when no USING clause is\n# present. Supply the fully qualified class name of a LoadFunc\n# implementation. Note: setting this means you will have to modify most code\n# brought in from elsewhere on the web, as people generally omit the USING\n# clause for TSV files.\n#\n# * org.apache.pig.builtin.PigStorage (default): the traditional tab-separated-values LoadFunc\n# * my.custom.udfcollection.MyCustomLoadFunc (for example): use MyCustomLoadFunc instead\n#\n# pig.default.load.func=<fully qualified class name of a LoadFunc implementation>\n\n# The implied StoreFunc for STORE operations with no USING clause. Supply the\n# fully qualified class name of a StoreFunc implementation.\n#\n# * org.apache.pig.builtin.PigStorage (default): the traditional tab-separated-values StoreFunc.\n# * my.custom.udfcollection.MyCustomStoreFunc (for example): use MyCustomStoreFunc instead\n#\n# pig.default.store.func=<fully qualified class name of a StoreFunc implementation>\n\n# Recover jobs when the application master is restarted? (default: false). This\n# is a Hadoop 2 specific property; enable it to take advantage of AM recovery.\n#\n# pig.output.committer.recovery.support=true\n\n# Should scripts check to prevent multiple stores writing to the same location?\n# (default: false) When set to true, stops the execution of script right away.\n#\npig.location.check.strict=false\n\n# In addition to the fs-style commands (rm, ls, etc) Pig can now execute\n# SQL-style DDL commands, eg "sql create table pig_test(name string, age int)".\n# The only implemented backend is hcat, and luckily that\'s also the default.\n#\n# pig.sql.type=hcat\n\n# Path to the hcat executable, for use with pig.sql.type=hcat (default: null)\n#\nhcat.bin=/usr/local/hcat/bin/hcat\n\n###########################################################################\n#\n# Overrides for extreme environments\n#\n# (Most people won\'t have to adjust these parameters)\n#\n\n\n# Limit the pig script length placed in the jobconf xml. (default:10240)\n# Extremely long queries can waste space in the JobConf; since its contents are\n# only advisory, the default is fine unless you are retaining it for forensics.\n#\n# pig.script.max.size=10240\n\n# Disable use of counters by Pig. Note that the word \'counter\' is singular here.\n#\n# * false (default, recommended): do NOT disable counters.\n# * true: disable counters. Set this to true only when your Pig job will\n#   otherwise die because of using more counters than hadoop configured limit\n#\n# pig.disable.counter=true\n\n# Sample size (per-mapper, in number of rows) the ORDER..BY operation\'s\n# RandomSampleLoader uses to estimate how your data should be\n# partitioned. (default, recommended: 100 rows per task) Increase this if you\n# have exceptionally large input splits and are unhappy with the reducer skew.\n#\n# pig.random.sampler.sample.size=100\n\n# Process an entire script at once, reducing the amount of work and number of\n# tasks? (default, recommended: true) See http://pig.apache.org/docs/r0.12.0/perf.html#multi-query-execution\n#\n# MultiQuery optimization is very useful, and so the recommended default is\n# true. You may find a that a script fails to compile under MultiQuery. If so,\n# disable it at runtime:\n#\n#     pig -no_multiquery script_that_makes_pig_sad.pig\n#\n# opt.multiquery=true\n\n# For small queries, fetch data directly from the HDFS. (default, recommended:\n# true). If you want to force Pig to launch a MR job, for example when you\'re\n# testing a live cluster, disable with the -N option. See PIG-3642.\n#\n# opt.fetch=true\n\n# Enable auto/grace parallelism in tez. These should be used by default unless\n# you encounter some bug in automatic parallelism. If pig.tez.auto.parallelism\n# to false, use 1 as default parallelism\npig.tez.auto.parallelism=true\npig.tez.grace.parallelism=true\n\n###########################################################################\n#\n# Streaming properties\n#\n\n# Define what properties will be set in the streaming environment. Just set this\n# property to a comma-delimited list of properties to set, and those properties\n# will be set in the environment.\n#\n# pig.streaming.environment=<comma-delimited list of propertes>\n\n# Specify a comma-delimited list of local files to ship to distributed cache for\n# streaming job.\n#\n# pig.streaming.ship.files=<comma-delimited list of local files>\n\n# Specify a comma-delimited list of remote files to cache on distributed cache\n# for streaming job.\n#\n# pig.streaming.cache.files=<comma-delimited list of remote files>\n\n# Specify the python command to be used for python streaming udf. By default,\n# python is used, but you can overwrite it with a non-default version such as\n# python2.7.\n#\n# pig.streaming.udf.python.command=python'
  569.     },
  570.     'oozie-log4j': {
  571.         'content': '\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License. See accompanying LICENSE file.\n#\n\n# If the Java System property \'oozie.log.dir\' is not defined at Oozie start up time\n# XLogService sets its value to \'${oozie.home}/logs\'\n\nlog4j.appender.oozie=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.oozie.DatePattern=\'.\'yyyy-MM-dd-HH\nlog4j.appender.oozie.File=${oozie.log.dir}/oozie.log\nlog4j.appender.oozie.Append=true\nlog4j.appender.oozie.layout=org.apache.log4j.PatternLayout\nlog4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - SERVER[${oozie.instance.id}] %m%n\n\nlog4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.oozieops.DatePattern=\'.\'yyyy-MM-dd\nlog4j.appender.oozieops.File=${oozie.log.dir}/oozie-ops.log\nlog4j.appender.oozie.MaxFileSize=80MB\nlog4j.appender.oozie.MaxBackupIndex=10\nlog4j.appender.oozieops.Append=true\nlog4j.appender.oozieops.layout=org.apache.log4j.PatternLayout\nlog4j.appender.oozieops.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n\n\nlog4j.appender.oozieinstrumentation=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.oozieinstrumentation.DatePattern=\'.\'yyyy-MM-dd\nlog4j.appender.oozieinstrumentation.File=${oozie.log.dir}/oozie-instrumentation.log\nlog4j.appender.oozieinstrumentation.Append=true\nlog4j.appender.oozieinstrumentation.layout=org.apache.log4j.PatternLayout\nlog4j.appender.oozieinstrumentation.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n\n\nlog4j.appender.oozieaudit=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.oozieaudit.DatePattern=\'.\'yyyy-MM-dd\nlog4j.appender.oozieaudit.File=${oozie.log.dir}/oozie-audit.log\nlog4j.appender.oozieaudit.Append=true\nlog4j.appender.oozieaudit.layout=org.apache.log4j.PatternLayout\nlog4j.appender.oozieaudit.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n\n\nlog4j.appender.openjpa=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.openjpa.DatePattern=\'.\'yyyy-MM-dd\nlog4j.appender.openjpa.File=${oozie.log.dir}/oozie-jpa.log\nlog4j.appender.openjpa.Append=true\nlog4j.appender.openjpa.layout=org.apache.log4j.PatternLayout\nlog4j.appender.openjpa.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n\n\nlog4j.logger.openjpa=INFO, openjpa, ETW, FullPIILogs\nlog4j.logger.oozieops=INFO, oozieops, ETW, FullPIILogs\nlog4j.logger.oozieinstrumentation=ALL, oozieinstrumentation, ETW, FullPIILogs\nlog4j.logger.oozieaudit=ALL, oozieaudit, ETW, FullPIILogs\nlog4j.logger.org.apache.oozie=INFO, oozie, ETW, FullPIILogs\nlog4j.logger.org.apache.hadoop=WARN, oozie, ETW, FullPIILogs\nlog4j.logger.org.mortbay=WARN, oozie, ETW, FullPIILogs\nlog4j.logger.org.hsqldb=WARN, oozie, ETW, FullPIILogs\nlog4j.logger.org.apache.oozie.action.hadoop=ALL, oozie, ETW, FullPIILogs\nlog4j.logger.org.apache.hadoop.security.authentication.server=INFO, oozie, ETW, FullPIILogs\nlog4j.rootLogger=INFO, ETW, FullPIILogs\n\n#EtwLog Appender\n\n#sends HDP service logs to customer storage account\nlog4j.appender.ETW=com.microsoft.log4jappender.EtwAppender\nlog4j.appender.ETW.source=HadoopServiceLog\nlog4j.appender.ETW.component=${component}\nlog4j.appender.ETW.layout=org.apache.log4j.TTCCLayout\nlog4j.appender.ETW.OSType=Linux\n\n# Full PII log Appender\n# Sends  PII HDP service logs to our storage account\nlog4j.appender.FullPIILogs=com.microsoft.log4jappender.FullPIILogAppender\nlog4j.appender.FullPIILogs.component=oozie\nlog4j.appender.FullPIILogs.layout=org.apache.log4j.TTCCLayout\nlog4j.appender.FullPIILogs.Threshold=INFO\nlog4j.appender.FullPIILogs.source=CentralFullServicePIILogs\nlog4j.appender.FullPIILogs.OSType=Linux\nlog4j.appender.FullPIILogs.SuffixHadoopEntryType=true'
  572.     },
  573.     'spark2-thrift-fairscheduler': {
  574.         'fairscheduler_content': '<?xml version="1.0"?>\n            <allocations>\n            <pool name="default">\n            <schedulingMode>FAIR</schedulingMode>\n            <weight>1</weight>\n            <minShare>2</minShare>\n            </pool>\n            </allocations>'
  575.     },
  576.     'ams-hbase-site': {
  577.         'hbase.master.info.bindAddress': '0.0.0.0',
  578.         'hbase.normalizer.enabled': 'false',
  579.         'phoenix.mutate.batchSize': '10000',
  580.         'hbase.zookeeper.property.tickTime': '6000',
  581.         'phoenix.query.keepAliveMs': '300000',
  582.         'hbase.master.wait.on.regionservers.mintostart': '1',
  583.         'hbase.replication': 'false',
  584.         'hbase.hregion.majorcompaction': '0',
  585.         'dfs.client.read.shortcircuit': 'true',
  586.         'hbase.regionserver.global.memstore.lowerLimit': '0.4',
  587.         'hbase.hregion.memstore.block.multiplier': '4',
  588.         'hbase.hregion.memstore.flush.size': '134217728',
  589.         'hbase.rootdir': '/ams/hbase',
  590.         'hbase.zookeeper.property.clientPort': '{{zookeeper_clientPort}}',
  591.         'phoenix.spool.directory': '${hbase.tmp.dir}/phoenix-spool',
  592.         'phoenix.query.rowKeyOrderSaltedTable': 'true',
  593.         'hbase.client.scanner.timeout.period': '300000',
  594.         'phoenix.groupby.maxCacheSize': '307200000',
  595.         'hbase.normalizer.period': '600000',
  596.         'hbase.snapshot.enabled': 'false',
  597.         'hbase.regionserver.global.memstore.upperLimit': '0.5',
  598.         'phoenix.query.spoolThresholdBytes': '20971520',
  599.         'zookeeper.session.timeout': '120000',
  600.         'hbase.tmp.dir': '/var/lib/ambari-metrics-collector/hbase-tmp',
  601.         'hfile.block.cache.size': '0.3',
  602.         'hbase.rpc.timeout': '300000',
  603.         'hbase.hregion.max.filesize': '4294967296',
  604.         'hbase.regionserver.port': '61320',
  605.         'hbase.regionserver.thread.compaction.small': '3',
  606.         'hbase.master.info.port': '61310',
  607.         'phoenix.coprocessor.maxMetaDataCacheSize': '20480000',
  608.         'phoenix.query.maxGlobalMemoryPercentage': '15',
  609.         'hbase.zookeeper.quorum': '{{zookeeper_quorum_hosts}}',
  610.         'hbase.regionserver.info.port': '61330',
  611.         'zookeeper.znode.parent': '/ams-hbase-unsecure',
  612.         'hbase.hstore.blockingStoreFiles': '200',
  613.         'hbase.master.port': '61300',
  614.         'hbase.zookeeper.leaderport': '61388',
  615.         'hbase.master.normalizer.class': 'org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer',
  616.         'hbase.regionserver.thread.compaction.large': '2',
  617.         'phoenix.query.timeoutMs': '300000',
  618.         'hbase.local.dir': '${hbase.tmp.dir}/local',
  619.         'hbase.cluster.distributed': 'true',
  620.         'zookeeper.session.timeout.localHBaseCluster': '120000',
  621.         'hbase.client.scanner.caching': '10000',
  622.         'hbase.zookeeper.property.dataDir': '${hbase.tmp.dir}/zookeeper',
  623.         'phoenix.sequence.saltBuckets': '2',
  624.         'phoenix.coprocessor.maxServerCacheTimeToLiveMs': '60000',
  625.         'hbase.hstore.flusher.count': '2',
  626.         'hbase.zookeeper.peerport': '61288'
  627.     },
  628.     'hive-env': {
  629.         'hive.heapsize': '10752',
  630.         'hive_user_nproc_limit': '16000',
  631.         'hive_txn_acid': 'off',
  632.         'hive_existing_mssql_server_database': 'ambhviewlaf25094bc5a26ffe42769ae339b3f402be29hivemetastore',
  633.         'hive_hostname': 'df6ulzeuce.database.windows.net',
  634.         'hive_existing_mssql_server_host': 'df6ulzeuce.database.windows.net',
  635.         'hive_database_name': 'ambhviewlaf25094bc5a26ffe42769ae339b3f402be29hivemetastore',
  636.         'hive_exec_orc_storage_strategy': 'SPEED',
  637.         'hive_pid_dir': '/var/run/hive',
  638.         'hive_user': 'hive',
  639.         'hcat_log_dir': '/var/log/webhcat',
  640.         'hive_database': 'Existing MSSQL Server database with SQL authentication',
  641.         'hive.client.heapsize': '512',
  642.         'hcat_pid_dir': '/var/run/webhcat',
  643.         'hive_ambari_database': 'MySQL',
  644.         'webhcat_user': 'hcat',
  645.         'hive_security_authorization': 'None',
  646.         'content': '\n      export HADOOP_USER_CLASSPATH_FIRST=true  #this prevents old metrics libs from mapreduce lib from bringing in old jar deps overriding HIVE_LIB\n      if [ "$SERVICE" = "cli" ]; then\n      if [ -z "$DEBUG" ]; then\n      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseNUMA -XX:+UseParallelGC -XX:-UseGCOverheadLimit"\n      else\n      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"\n      fi\n      fi\n\n      # The heap size of the jvm stared by hive shell script can be controlled via:\n\n      if [ "$SERVICE" = "metastore" ]; then\n      export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore\n      else\n      export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client\n      fi\n\n      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"\n\n      # Larger heap size may be required when running queries over large number of files or partitions.\n      # By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be\n      # appropriate for hive server (hwi etc).\n\n\n      # Set HADOOP_HOME to point to a specific hadoop install directory\n      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n      export HIVE_HOME=${HIVE_HOME:-{{hive_home_dir}}}\n\n      # Hive Configuration Directory can be controlled by:\n      export HIVE_CONF_DIR=${HIVE_CONF_DIR:-{{hive_config_dir}}}\n\n      # Folder containing extra libraries required for hive compilation/execution can be controlled by:\n      if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then\n      if [ -f "${HIVE_AUX_JARS_PATH}" ]; then\n      export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\n      elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then\n      export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar\n      fi\n      elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then\n      export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar\n      fi\n\n      export METASTORE_PORT={{hive_metastore_port}}\n\n      {% if sqla_db_used or lib_dir_available %}\n      export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:{{jdbc_libs_dir}}"\n      export JAVA_LIBRARY_PATH="$JAVA_LIBRARY_PATH:{{jdbc_libs_dir}}"\n      {% endif %}',
  647.         'hive_database_type': 'mssql',
  648.         'hive_timeline_logging_enabled': 'true',
  649.         'hive_user_nofile_limit': '32000',
  650.         'hive.metastore.heapsize': '3584',
  651.         'hcat_user': 'hcat',
  652.         'hive_log_dir': '/var/log/hive'
  653.     },
  654.     'spark2-log4j-properties': {
  655.         'content': '\n# Set everything to be logged to the console\nlog4j.rootCategory=INFO, console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n\n\n# Settings to quiet third party logs that are too verbose\nlog4j.logger.org.eclipse.jetty=WARN\nlog4j.logger.org.eclipse.jetty.util.component.AbstractLifeCycle=ERROR\nlog4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO\nlog4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO'
  656.     },
  657.     'ranger-yarn-policymgr-ssl': {
  658.         'xasecure.policymgr.clientssl.keystore': '/usr/hdp/current/hadoop-client/conf/ranger-yarn-plugin-keystore.jks',
  659.         'xasecure.policymgr.clientssl.truststore': '/usr/hdp/current/hadoop-client/conf/ranger-yarn-plugin-truststore.jks',
  660.         'xasecure.policymgr.clientssl.truststore.credential.file': 'jceks://file{{credential_file}}',
  661.         'xasecure.policymgr.clientssl.keystore.password': 'myKeyFilePassword',
  662.         'xasecure.policymgr.clientssl.truststore.password': 'changeit',
  663.         'xasecure.policymgr.clientssl.keystore.credential.file': 'jceks://file{{credential_file}}'
  664.     },
  665.     'hadoop-env': {
  666.         'proxyuser_group': 'users',
  667.         'hdfs_user_nproc_limit': '65536',
  668.         'hdfs_log_dir_prefix': '/var/log/hadoop',
  669.         'keyserver_host': ' ',
  670.         'namenode_opt_maxnewsize': '200m',
  671.         'nfsgateway_heapsize': '1024',
  672.         'dtnode_heapsize': '1024m',
  673.         'namenode_heapsize': '1024m',
  674.         'namenode_opt_maxpermsize': '256m',
  675.         'namenode_opt_permsize': '128m',
  676.         'hdfs_tmp_dir': '/tmp',
  677.         'hdfs_user': 'hdfs',
  678.         'content': '\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}\n\n# Hadoop Configuration Directory\n#TODO: if env var set that can cause problems\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE="{{hadoop_heapsize}}"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"\n\n# Command specific options appended to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +\'%Y%m%d%H%M\'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT -Dwhitelist.filename=core-whitelist.res,coremanual-whitelist.res -Dcomponent=namenode ${HADOOP_NAMENODE_OPTS}"\nHADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +\'%Y%m%d%H%M\'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"\n\nHADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"\nHADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS -Dwhitelist.filename=core-whitelist.res,coremanual-whitelist.res -Dcomponent=datanode ${HADOOP_DATANODE_OPTS}"\nHADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +\'%Y%m%d%H%M\'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT -Dwhitelist.filename=core-whitelist.res,coremanual-whitelist.res -Dcomponent=secondarynamenode ${HADOOP_SECONDARYNAMENODE_OPTS}"\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER={{hdfs_user}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync\'d from.  Unset by default.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY -Dwhitelist.filename=core-whitelist.res,coremanual-whitelist.res -Dcomponent=resourcemanager"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See \'man nice\'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=""\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n#Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\nif [ -d "/usr/lib/tez" ]; then\n  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf\nfi\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n#Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64',
  679.         'hdfs_user_nofile_limit': '128000',
  680.         'namenode_opt_newsize': '200m',
  681.         'keyserver_port': '',
  682.         'hadoop_root_logger': 'INFO,RFA',
  683.         'hadoop_heapsize': '1024',
  684.         'hadoop_pid_dir_prefix': '/var/run/hadoop'
  685.     },
  686.     'tez-interactive-site': {
  687.         'tez.runtime.pipelined.sorter.lazy-allocate.memory': 'true',
  688.         'tez.lib.uris': '/hdp/apps/${hdp.version}/tez_hive2/tez.tar.gz',
  689.         'tez.runtime.pipelined-shuffle.enabled': 'false',
  690.         'tez.grouping.node.local.only': 'true',
  691.         'tez.runtime.shuffle.memory.limit.percent': '0.25',
  692.         'tez.dag.recovery.enabled': 'false',
  693.         'tez.runtime.shuffle.fetch.buffer.percent': '0.6',
  694.         'tez.runtime.report.partition.stats': 'true',
  695.         'tez.runtime.shuffle.fetch.verify-disk-checksum': 'false',
  696.         'tez.session.am.dag.submit.timeout.secs': '3600',
  697.         'tez.am.resource.memory.mb': '1024'
  698.     },
  699.     'yarn-site': {
  700.         'yarn.timeline-service.sqldb-store.driver': 'com.microsoft.sqlserver.jdbc.SQLServerDriver',
  701.         'yarn.resourcemanager.hostname': 'hn0-094bc5.ambhviewl-af25-ssh.m8.internal.cloudapp.net',
  702.         'yarn.node-labels.enabled': 'false',
  703.         'yarn.resourcemanager.scheduler.monitor.enable': 'false',
  704.         'yarn.resourcemanager.zk-num-retries': '1000',
  705.         'yarn.nodemanager.webapp.address': '0.0.0.0:30060',
  706.         'yarn.timeline-service.bind-host': '0.0.0.0',
  707.         'yarn.resourcemanager.ha.enabled': 'false',
  708.         'yarn.nodemanager.linux-container-executor.cgroups.hierarchy': 'hadoop-yarn',
  709.         'yarn.timeline-service.generic-application-history.hdinsight.filter-container-meta-info': 'true',
  710.         'yarn.timeline-service.webapp.address': 'hn0-094bc5.ambhviewl-af25-ssh.m8.internal.cloudapp.net:8188',
  711.         'yarn.resourcemanager.state-store.max-completed-applications': '${yarn.resourcemanager.max-completed-applications}',
  712.         'yarn.timeline-service.enabled': 'true',
  713.         'yarn.nodemanager.recovery.enabled': 'true',
  714.         'yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath': '',
  715.         'yarn.timeline-service.http-authentication.type': 'simple',
  716.         'yarn.nodemanager.container-metrics.unregister-delay-ms': '60000',
  717.         'yarn.resourcemanager.webapp.https.address': 'hn0-094bc5.ambhviewl-af25-ssh.m8.internal.cloudapp.net:8090',
  718.         'yarn.timeline-service.entity-group-fs-store.summary-store': 'org.apache.hadoop.yarn.server.timeline.SqlDBTimelineStore',
  719.         'yarn.nodemanager.aux-services.spark2_shuffle.classpath': '{{stack_root}}/${hdp.version}/spark2/aux/*',
  720.         'yarn.resourcemanager.am.max-attempts': '2',
  721.         'yarn.nodemanager.log-aggregation.debug-enabled': 'false',
  722.         'yarn.scheduler.maximum-allocation-vcores': '8',
  723.         'yarn.nodemanager.health-checker.script.timeout-ms': '60000',
  724.         'yarn.nodemanager.vmem-pmem-ratio': '2.1',
  725.         'yarn.resourcemanager.max-completed-applications': '1000',
  726.         'yarn.resourcemanager.nodes.exclude-path': '/etc/hadoop/conf/yarn.exclude',
  727.         'yarn.nodemanager.linux-container-executor.cgroups.mount': 'false',
  728.         'yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size': '10',
  729.         'yarn.nodemanager.aux-services.spark2_shuffle.class': 'org.apache.spark.network.yarn.YarnShuffleService',
  730.         'yarn.log.server.url': 'http://hn0-094bc5.ambhviewl-af25-ssh.m8.internal.cloudapp.net:19888/jobhistory/logs',
  731.         'yarn.application.classpath': '$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*',
  732.         'yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled': 'false',
  733.         'yarn.nodemanager.resource.cpu-vcores': '8',
  734.         'yarn.nodemanager.local-dirs': '/mnt/resource/hadoop/yarn/local',
  735.         'yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage': 'false',
  736.         'yarn.nodemanager.remote-app-log-dir-suffix': 'logs',
  737.         'yarn.resourcemanager.connect.max-wait.ms': '7200000',
  738.         'yarn.resourcemanager.address': 'hn0-094bc5.ambhviewl-af25-ssh.m8.internal.cloudapp.net:8050',
  739.         'yarn.timeline-service.entity-file.fs-support-append': 'false',
  740.         'yarn.scheduler.maximum-allocation-mb': '25600',
  741.         'yarn.nodemanager.container-monitor.interval-ms': '3000',
  742.         'yarn.node-labels.fs-store.retry-policy-spec': '2000, 500',
  743.         'yarn.resourcemanager.zk-acl': 'world:anyone:rwcda',
  744.         'yarn.timeline-service.leveldb-state-store.path': '/hadoop/yarn/timeline',
  745.         'yarn.timeline-service.address': 'hn0-094bc5.ambhviewl-af25-ssh.m8.internal.cloudapp.net:10200',
  746.         'yarn.log-aggregation-enable': 'true',
  747.         'yarn.nodemanager.delete.debug-delay-sec': '600',
  748.         'yarn.timeline-service.store-class': 'org.apache.hadoop.yarn.server.timeline.HdInsightEntityGroupFSTimelineStore',
  749.         'yarn.timeline-service.client.retry-interval-ms': '1000',
  750.         'yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes': 'org.apache.tez.dag.history.logging.ats.TimelineCachePluginImpl',
  751.         'hadoop.registry.zk.quorum': 'hn0-094bc5.ambhviewl-af25-ssh.m8.internal.cloudapp.net:2181',
  752.         'yarn.nodemanager.aux-services': 'mapreduce_shuffle,spark_shuffle,spark2_shuffle',
  753.         'yarn.nodemanager.aux-services.mapreduce_shuffle.class': 'org.apache.hadoop.mapred.ShuffleHandler',
  754.         'yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage': '90',
  755.         'yarn.resourcemanager.zk-timeout-ms': '10000',
  756.         'yarn.resourcemanager.fs.state-store.uri': ' ',
  757.         'yarn.nodemanager.linux-container-executor.group': 'hadoop',
  758.         'yarn.nodemanager.remote-app-log-dir': '/app-logs',
  759.         'yarn.nodemanager.aux-services.spark_shuffle.classpath': '{{stack_root}}/${hdp.version}/spark/aux/*',
  760.         'yarn.timeline-service.entity-group-fs-store.cleaner-interval-seconds': '3600',
  761.         'yarn.resourcemanager.fs.state-store.retry-policy-spec': '2000, 500',
  762.         'yarn.timeline-service.generic-application-history.store-class': 'org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore',
  763.         'yarn.timeline-service.http-authentication.proxyuser.root.groups': '*',
  764.         'yarn.app.mapreduce.am.create-intermediate-jh-base-dir': 'true',
  765.         'yarn.nodemanager.disk-health-checker.min-healthy-disks': '0.25',
  766.         'yarn.resourcemanager.work-preserving-recovery.enabled': 'false',
  767.         'yarn.resourcemanager.resource-tracker.address': 'hn0-094bc5.ambhviewl-af25-ssh.m8.internal.cloudapp.net:8025',
  768.         'yarn.resourcemanager.system-metrics-publisher.enabled': 'true',
  769.         'yarn.resourcemanager.scheduler.class': 'org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler',
  770.         'yarn.nodemanager.resource.memory-mb': '25600',
  771.         'yarn.timeline-service.entity-group-fs-store.active-dir': '/atshistory/active',
  772.         'yarn.timeline-service.ttl-ms': '2678400000',
  773.         'yarn.nodemanager.resource.percentage-physical-cpu-limit': '80',
  774.         'yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb': '1000',
  775.         'yarn.timeline-service.sqldb-store.connection-url': 'jdbc:sqlserver://df6ulzeuce.database.windows.net;databaseName=ambhviewlaf25094bc5a26ffe42769ae339b3f402be29AmbariDb;',
  776.         'yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds': '3600',
  777.         'yarn.nodemanager.log.retain-second': '604800',
  778.         'yarn.timeline-service.state-store-class': 'org.apache.hadoop.yarn.server.timeline.recovery.SqlDBTimelineStateStore',
  779.         'yarn.nodemanager.log-dirs': '/mnt/resource/hadoop/yarn/log',
  780.         'yarn.resourcemanager.webapp.address': 'hn0-094bc5.ambhviewl-af25-ssh.m8.internal.cloudapp.net:8088',
  781.         'yarn.timeline-service.client.max-retries': '30',
  782.         'yarn.nodemanager.health-checker.interval-ms': '135000',
  783.         'yarn.nodemanager.admin-env': 'MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX',
  784.         'yarn.nodemanager.vmem-check-enabled': 'false',
  785.         'yarn.acl.enable': 'false',
  786.         'yarn.timeline-service.leveldb-timeline-store.read-cache-size': '104857600',
  787.         'yarn.nodemanager.linux-container-executor.resources-handler.class': 'org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler',
  788.         'yarn.client.nodemanager-connect.max-wait-ms': '60000',
  789.         'yarn.timeline-service.http-authentication.simple.anonymous.allowed': 'true',
  790.         'yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size': '10000',
  791.         'yarn.resourcemanager.bind-host': '0.0.0.0',
  792.         'yarn.http.policy': 'HTTP_ONLY',
  793.         'yarn.timeline-service.version': '1.5',
  794.         'yarn.resourcemanager.zk-address': 'hn0-094bc5.ambhviewl-af25-ssh.m8.internal.cloudapp.net:2181',
  795.         'yarn.nodemanager.recovery.dir': '{{yarn_log_dir_prefix}}/nodemanager/recovery-state',
  796.         'yarn.nodemanager.container-executor.class': 'org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor',
  797.         'yarn.resourcemanager.store.class': 'org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore',
  798.         'yarn.timeline-service.entity-group-fs-store.retain-seconds': '604800',
  799.         'yarn.scheduler.minimum-allocation-vcores': '1',
  800.         'yarn.timeline-service.sqldb-store.connection-username': 'ambhviewlaf25094bc5a26ffe42769ae339b3f402be29AmbariDbLogin@df6ulzeuce.database.windows.net',
  801.         'yarn.timeline-service.leveldb-timeline-store.path': '/hadoop/yarn/timeline',
  802.         'yarn.scheduler.minimum-allocation-mb': '1024',
  803.         'yarn.timeline-service.ttl-enable': 'true',
  804.         'yarn.resourcemanager.scheduler.address': 'hn0-094bc5.ambhviewl-af25-ssh.m8.internal.cloudapp.net:8030',
  805.         'yarn.log-aggregation.retain-seconds': '2592000',
  806.         'yarn.nodemanager.address': '0.0.0.0:30050',
  807.         'hadoop.registry.rm.enabled': 'false',
  808.         'yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms': '43200000',
  809.         'yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms': '10000',
  810.         'yarn.resourcemanager.zk-state-store.parent-path': '/rmstore',
  811.         'yarn.resourcemanager.admin.address': 'hn0-094bc5.ambhviewl-af25-ssh.m8.internal.cloudapp.net:8141',
  812.         'yarn.nodemanager.log-aggregation.compression-type': 'gz',
  813.         'yarn.timeline-service.sqldb-store.connection-password': 'COcykRe9ttVodCxJNWZtNBCsLOwY6jXrasFOq2qETmAk349VEYRV9P3rGe2fuFlGL79MnM1MX6YZIEKdbMSvxsHBZrWaLA5dNFbOWepYG8kOkY',
  814.         'yarn.nodemanager.log-aggregation.num-log-files-per-app': '30',
  815.         'yarn.resourcemanager.recovery.enabled': 'true',
  816.         'yarn.timeline-service.recovery.enabled': 'true',
  817.         'yarn.nodemanager.bind-host': '0.0.0.0',
  818.         'yarn.resourcemanager.zk-retry-interval-ms': '1000',
  819.         'yarn.admin.acl': 'yarn',
  820.         'yarn.timeline-service.http-authentication.proxyuser.root.hosts': 'hn0-094bc5',
  821.         'yarn.node-labels.fs-store.root-dir': '/system/yarn/node-labels',
  822.         'yarn.timeline-service.entity-group-fs-store.scan-interval-seconds': '60',
  823.         'yarn.timeline-service.entity-group-fs-store.done-dir': '/atshistory/done',
  824.         'yarn.nodemanager.aux-services.spark_shuffle.class': 'org.apache.spark.network.yarn.YarnShuffleService',
  825.         'yarn.client.nodemanager-connect.retry-interval-ms': '10000',
  826.         'yarn.timeline-service.generic-application-history.save-non-am-container-meta-info': 'true',
  827.         'yarn.timeline-service.webapp.https.address': 'hn0-094bc5.ambhviewl-af25-ssh.m8.internal.cloudapp.net:8190',
  828.         'yarn.resourcemanager.connect.retry-interval.ms': '30000',
  829.         'yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size': '10000'
  830.     },
  831.     'ams-grafana-ini': {
  832.         'content': '\n##################### Grafana Configuration Example #####################\n#\n# Everything has defaults so you only need to uncomment things you want to\n# change\n\n# possible values : production, development\n; app_mode = production\n\n#################################### Paths ####################################\n[paths]\n# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)\n#\n;data = /var/lib/grafana\ndata = {{ams_grafana_data_dir}}\n#\n# Directory where grafana can store logs\n#\n;logs = /var/log/grafana\nlogs = {{ams_grafana_log_dir}}\n\n\n#################################### Server ####################################\n[server]\n# Protocol (http or https)\n;protocol = http\nprotocol = {{ams_grafana_protocol}}\n# The ip address to bind to, empty will bind to all interfaces\n;http_addr =\n\n# The http port  to use\n;http_port = 3000\nhttp_port = {{ams_grafana_port}}\n\n# The public facing domain name used to access grafana from a browser\n;domain = localhost\n\n# Redirect to correct domain if host header does not match domain\n# Prevents DNS rebinding attacks\n;enforce_domain = false\n\n# The full public facing url\n;root_url = %(protocol)s://%(domain)s:%(http_port)s/\n\n# Log web requests\n;router_logging = false\n\n# the path relative working path\n;static_root_path = public\nstatic_root_path = /usr/lib/ambari-metrics-grafana/public\n\n# enable gzip\n;enable_gzip = false\n\n# https certs & key file\n;cert_file =\n;cert_key =\ncert_file = {{ams_grafana_cert_file}}\ncert_key = {{ams_grafana_cert_key}}\n\n#################################### Database ####################################\n[database]\n# Either "mysql", "postgres" or "sqlite3", it\'s your choice\n;type = sqlite3\n;host = 127.0.0.1:3306\n;name = grafana\n;user = root\n;password =\n\n# For "postgres" only, either "disable", "require" or "verify-full"\n;ssl_mode = disable\n\n# For "sqlite3" only, path relative to data_path setting\n;path = grafana.db\n\n#################################### Session ####################################\n[session]\n# Either "memory", "file", "redis", "mysql", "postgres", default is "file"\n;provider = file\n\n# Provider config options\n# memory: not have any config yet\n# file: session dir path, is relative to grafana data_path\n# redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=grafana`\n# mysql: go-sql-driver/mysql dsn config string, e.g. `user:password@tcp(127.0.0.1:3306)/database_name`\n# postgres: user=a password=b host=localhost port=5432 dbname=c sslmode=disable\n;provider_config = sessions\n\n# Session cookie name\n;cookie_name = grafana_sess\n\n# If you use session in https only, default is false\n;cookie_secure = false\n\n# Session life time, default is 86400\n;session_life_time = 86400\n\n#################################### Analytics ####################################\n[analytics]\n# Server reporting, sends usage counters to stats.grafana.org every 24 hours.\n# No ip addresses are being tracked, only simple counters to track\n# running instances, dashboard and error counts. It is very helpful to us.\n# Change this option to false to disable reporting.\n;reporting_enabled = true\n\n# Google Analytics universal tracking code, only enabled if you specify an id here\n;google_analytics_ua_id =\n\n#################################### Security ####################################\n[security]\n# default admin user, created on startup\nadmin_user = {{ams_grafana_admin_user}}\n\n# default admin password, can be changed before first start of grafana,  or in profile settings\nadmin_password = {{ams_grafana_admin_pwd}}\n\n# used for signing\n;secret_key = SW2YcwTIb9zpOOhoPsMm\n\n# Auto-login remember days\n;login_remember_days = 7\n;cookie_username = grafana_user\n;cookie_remember_name = grafana_remember\n\n# disable gravatar profile images\n;disable_gravatar = false\n\n# data source proxy whitelist (ip_or_domain:port seperated by spaces)\n;data_source_proxy_whitelist =\n\n#################################### Users ####################################\n[users]\n# disable user signup / registration\n;allow_sign_up = true\n\n# Allow non admin users to create organizations\n;allow_org_create = true\n\n# Set to true to automatically assign new users to the default organization (id 1)\n;auto_assign_org = true\n\n# Default role new users will be automatically assigned (if disabled above is set to true)\n;auto_assign_org_role = Viewer\n\n# Background text for the user field on the login page\n;login_hint = email or username\n\n#################################### Anonymous Auth ##########################\n[auth.anonymous]\n# enable anonymous access\nenabled = true\n\n# specify organization name that should be used for unauthenticated users\norg_name = Main Org.\n\n# specify role for unauthenticated users\n;org_role = Admin\n\n#################################### Github Auth ##########################\n[auth.github]\n;enabled = false\n;allow_sign_up = false\n;client_id = some_id\n;client_secret = some_secret\n;scopes = user:email,read:org\n;auth_url = https://github.com/login/oauth/authorize\n;token_url = https://github.com/login/oauth/access_token\n;api_url = https://api.github.com/user\n;team_ids =\n;allowed_organizations =\n\n#################################### Google Auth ##########################\n[auth.google]\n;enabled = false\n;allow_sign_up = false\n;client_id = some_client_id\n;client_secret = some_client_secret\n;scopes = https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email\n;auth_url = https://accounts.google.com/o/oauth2/auth\n;token_url = https://accounts.google.com/o/oauth2/token\n;api_url = https://www.googleapis.com/oauth2/v1/userinfo\n;allowed_domains =\n\n#################################### Auth Proxy ##########################\n[auth.proxy]\n;enabled = false\n;header_name = X-WEBAUTH-USER\n;header_property = username\n;auto_sign_up = true\n\n#################################### Basic Auth ##########################\n[auth.basic]\n;enabled = true\n\n#################################### Auth LDAP ##########################\n[auth.ldap]\n;enabled = false\n;config_file = /etc/grafana/ldap.toml\n\n#################################### SMTP / Emailing ##########################\n[smtp]\n;enabled = false\n;host = localhost:25\n;user =\n;password =\n;cert_file =\n;key_file =\n;skip_verify = false\n;from_address = admin@grafana.localhost\n\n[emails]\n;welcome_email_on_sign_up = false\n\n#################################### Logging ##########################\n[log]\n# Either "console", "file", default is "console"\n# Use comma to separate multiple modes, e.g. "console, file"\n;mode = console, file\n\n# Buffer length of channel, keep it as it is if you don\'t know what it is.\n;buffer_len = 10000\n\n# Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Trace"\n;level = Info\n\n# For "console" mode only\n[log.console]\n;level =\n\n# For "file" mode only\n[log.file]\n;level =\n# This enables automated log rotate(switch of following options), default is true\n;log_rotate = true\n\n# Max line number of single file, default is 1000000\n;max_lines = 1000000\n\n# Max size shift of single file, default is 28 means 1 << 28, 256MB\n;max_lines_shift = 28\n\n# Segment log daily, default is true\n;daily_rotate = true\n\n# Expired days of log file(delete after max days), default is 7\n;max_days = 7\n\n#################################### AMPQ Event Publisher ##########################\n[event_publisher]\n;enabled = false\n;rabbitmq_url = amqp://localhost/\n;exchange = grafana_events\n\n;#################################### Dashboard JSON files ##########################\n[dashboards.json]\n;enabled = false\n;path = /var/lib/grafana/dashboards\npath = /usr/lib/ambari-metrics-grafana/public/dashboards',
  833.         'cert_key': '/etc/ambari-metrics-grafana/conf/ams-grafana.key',
  834.         'protocol': 'http',
  835.         'port': '3000',
  836.         'cert_file': '/etc/ambari-metrics-grafana/conf/ams-grafana.crt'
  837.     },
  838.     'webhcat-site': {
  839.         'templeton.hive.properties': 'hive.metastore.local=false,hive.metastore.uris=thrift://hn0-094bc5.ambhviewl-af25-ssh.m8.internal.cloudapp.net:9083,hive.user.install.directory=/user',
  840.         'templeton.hadoop.conf.dir': '/etc/hadoop/conf',
  841.         'yarn.resourcemanager.connect.max-wait.ms': '40000',
  842.         'templeton.port': '30111',
  843.         'templeton.hive.home': 'hive.tar.gz/hive',
  844.         'templeton.libjars': '/usr/hdp/${hdp.version}/zookeeper/zookeeper.jar,/usr/hdp/${hdp.version}/hive/lib/hive-common.jar',
  845.         'fs.azure.skip.metrics': 'true',
  846.         'templeton.exec.timeout': '60000',
  847.         'templeton.hcat.home': 'hive.tar.gz/hive/hcatalog',
  848.         'templeton.mapper.memory.mb': '1024',
  849.         'templeton.sqoop.home': '/usr/hdp/${hdp.version}/sqoop',
  850.         'templeton.python': '${env.PYTHON_CMD}',
  851.         'templeton.sqoop.archive': 'wasb:///hdp/apps/${hdp.version}/sqoop/sqoop.tar.gz',
  852.         'templeton.hcat': '/usr/hdp/${hdp.version}/hive/bin/hcat',
  853.         'templeton.hadoop': '/usr/hdp/${hdp.version}/hadoop/bin/hadoop',
  854.         'templeton.override.enabled': 'false',
  855.         'templeton.jar': '/usr/hdp/${hdp.version}/hive/share/webhcat/svr/lib/hive-webhcat-*.jar',
  856.         'templeton.storage.class': 'org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage',
  857.         'templeton.hive.extra.files': '/usr/hdp/${hdp.version}/tez/conf/tez-site.xml,/usr/hdp/${hdp.version}/tez,/usr/hdp/${hdp.version}/tez/lib',
  858.         'yarn.app.mapreduce.client.max-retries': '2',
  859.         'webhcat.proxyuser.root.hosts': 'hn0-094bc5',
  860.         'webhcat.proxyuser.root.groups': '*',
  861.         'templeton.hive.path': 'hive.tar.gz/hive/bin/hive',
  862.         'templeton.pig.path': 'pig.tar.gz/pig/bin/pig',
  863.         'templeton.sqoop.path': '/usr/hdp/${hdp.version}/sqoop/bin/sqoop',
  864.         'templeton.zookeeper.hosts': 'hn0-094bc5.ambhviewl-af25-ssh.m8.internal.cloudapp.net:2181',
  865.         'yarn.app.mapreduce.client.job.retry-interval': '1000',
  866.         'templeton.jobs.listorder': 'lexicographicaldesc',
  867.         'templeton.hive.archive': 'wasb:///hdp/apps/${hdp.version}/hive/hive.tar.gz',
  868.         'templeton.streaming.jar': 'wasb:///hdp/apps/${hdp.version}/mapreduce/hadoop-streaming.jar',
  869.         'templeton.pig.archive': 'wasb:///hdp/apps/${hdp.version}/pig/pig.tar.gz',
  870.         'yarn.resourcemanager.connect.retry-interval.ms': '10000',
  871.         'templeton.hadoop.queue.name': 'joblauncher'
  872.     },
  873.     'spark2-hive-site-override': {
  874.         'hive.server2.enable.doAs': 'false',
  875.         'hive.metastore.client.connect.retry.delay': '5',
  876.         'hive.server2.transport.mode': 'binary',
  877.         'hive.server2.thrift.port': '10016',
  878.         'hive.metastore.client.socket.timeout': '1800'
  879.     },
  880.     'hive-log4j': {
  881.         'content': '\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n# Define some default values that can be overridden by system properties\nhive.log.threshold=ALL\nhive.root.logger=DEBUG,DRFA\nhive.log.dir=${java.io.tmpdir}/${user.name}\nhive.log.file=hive.log\n\n# Define the root logger to the system property "hadoop.root.logger".\nlog4j.rootLogger=${hive.root.logger}, EventCounter, ETW, FullPIILogs\n\n# Logging Threshold\nlog4j.threshold=${hive.log.threshold}\n\n#\n# Daily Rolling File Appender\n#\n# Use the PidDailyerRollingFileAppend class instead if you want to use separate log files\n# for different CLI session.\n#\n# log4j.appender.DRFA=org.apache.hadoop.hive.ql.log.PidDailyRollingFileAppender\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\n\nlog4j.appender.DRFA.File=${hive.log.dir}/${hive.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add "console" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n\nlog4j.appender.console.encoding=UTF-8\n\n#custom logging levels\n#log4j.logger.xxx=DEBUG\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.hive.shims.HiveEventCounter\n\n\nlog4j.category.DataNucleus=WARN,DRFA,ETW,FullPIILogs\nlog4j.category.Datastore=WARN,DRFA,ETW,FullPIILogs\nlog4j.category.Datastore.Schema=WARN,DRFA,ETW,FullPIILogs\nlog4j.category.JPOX=WARN,DRFA,ETW,FullPIILogs\nlog4j.category.JPOX.Datastore=WARN,DRFA,ETW,FullPIILogs\nlog4j.category.JPOX.Plugin=WARN,DRFA,ETW,FullPIILogs\nlog4j.category.JPOX.MetaData=WARN,DRFA,ETW,FullPIILogs\nlog4j.category.JPOX.Query=WARN,DRFA,ETW,FullPIILogs\nlog4j.category.JPOX.General=WARN,DRFA,ETW,FullPIILogs\nlog4j.category.JPOX.Enhancer=WARN,DRFA,ETW,FullPIILogs\n\n\n# Silence useless ZK logs\nlog4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,DRFA\nlog4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,DRFA\n\n#EtwLog Appender\n#sends HDP service logs to customer storage account\nlog4j.appender.ETW=com.microsoft.log4jappender.EtwAppender\nlog4j.appender.ETW.source=HadoopServiceLog\nlog4j.appender.ETW.component=hive\nlog4j.appender.ETW.layout=org.apache.log4j.TTCCLayout\nlog4j.appender.ETW.OSType=Linux\n\n# Full PII log Appender\n# Sends  PII HDP service logs to our storage account\nlog4j.appender.FullPIILogs=com.microsoft.log4jappender.FullPIILogAppender\nlog4j.appender.FullPIILogs.component=hive\nlog4j.appender.FullPIILogs.layout=org.apache.log4j.TTCCLayout\nlog4j.appender.FullPIILogs.Threshold=INFO\nlog4j.appender.FullPIILogs.source=CentralFullServicePIILogs\nlog4j.appender.FullPIILogs.OSType=Linux\nlog4j.appender.FullPIILogs.SuffixHadoopEntryType=true'
  882.     },
  883.     'ranger-hdfs-security': {
  884.         'ranger.plugin.hdfs.policy.source.impl': 'org.apache.ranger.admin.client.RangerAdminRESTClient',
  885.         'ranger.plugin.hdfs.policy.pollIntervalMs': '30000',
  886.         'ranger.plugin.hdfs.policy.rest.url': '{{policymgr_mgr_url}}',
  887.         'ranger.plugin.hdfs.policy.rest.ssl.config.file': '/etc/hadoop/conf/ranger-policymgr-ssl.xml',
  888.         'xasecure.add-hadoop-authorization': 'true',
  889.         'ranger.plugin.hdfs.service.name': '{{repo_name}}',
  890.         'ranger.plugin.hdfs.policy.cache.dir': '/etc/ranger/{{repo_name}}/policycache'
  891.     },
  892.     'hiveserver2-site': {
  893.         'hive.security.authenticator.manager': 'org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator',
  894.         'hive.security.authorization.manager': 'org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory',
  895.         'hive.metastore.metrics.enabled': 'true',
  896.         'hive.security.authorization.enabled': 'false',
  897.         'hive.service.metrics.file.location': '/var/log/hive/hiveserver2-report.json',
  898.         'hive.service.metrics.reporter': 'JSON_FILE, JMX, HADOOP2',
  899.         'hive.service.metrics.hadoop2.component': 'hiveserver2',
  900.         'hive.conf.restricted.list': 'hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role'
  901.     },
  902.     'sqoop-atlas-application.properties': {
  903.         'atlas.jaas.KafkaClient.option.renewTicket': 'true',
  904.         'atlas.jaas.KafkaClient.option.useTicketCache': 'true'
  905.     },
  906.     'mapred-env': {
  907.         'content': '\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\n\nexport HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}\n\nexport HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA\n\nexport HADOOP_JOB_HISTORYSERVER_OPTS="-Dwhitelist.filename=core-whitelist.res,coremanual-whitelist.res -Dcomponent=historyserver"\n#export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.\n#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.\n#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.\n#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default\n#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.\nexport HADOOP_OPTS="-Dhdp.version=$HDP_VERSION $HADOOP_OPTS"',
  908.         'mapred_user_nofile_limit': '32768',
  909.         'jobhistory_heapsize': '900',
  910.         'mapred_log_dir_prefix': '/var/log/hadoop-mapreduce',
  911.         'mapred_user_nproc_limit': '65536',
  912.         'mapred_user': 'mapred',
  913.         'mapred_pid_dir_prefix': '/var/run/hadoop-mapreduce'
  914.     },
  915.     'ranger-hive-audit': {
  916.         'xasecure.audit.destination.solr.zookeepers': 'NONE',
  917.         'xasecure.audit.destination.solr.urls': '',
  918.         'xasecure.audit.destination.solr': 'false',
  919.         'xasecure.audit.destination.hdfs.batch.filespool.dir': '/var/log/hive/audit/hdfs/spool',
  920.         'xasecure.audit.destination.hdfs': 'true',
  921.         'xasecure.audit.destination.solr.batch.filespool.dir': '/var/log/hive/audit/solr/spool',
  922.         'xasecure.audit.provider.summary.enabled': 'false',
  923.         'xasecure.audit.destination.hdfs.dir': 'hdfs://NAMENODE_HOSTNAME:8020/ranger/audit',
  924.         'xasecure.audit.is.enabled': 'true'
  925.     },
  926.     'slider-client': {},
  927.     'ssl-client': {
  928.         'ssl.client.truststore.reload.interval': '10000',
  929.         'ssl.client.keystore.password': 'bigdata',
  930.         'ssl.client.keystore.location': '/etc/security/clientKeys/keystore.jks',
  931.         'ssl.client.truststore.password': 'bigdata',
  932.         'ssl.client.truststore.type': 'jks',
  933.         'ssl.client.keystore.type': 'jks'
  934.     },
  935.     'sqoop-env': {
  936.         'content': '\n# Set Hadoop-specific environment variables here.\n\n#Set path to where bin/hadoop is available\n#Set path to where bin/hadoop is available\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n#set the path to where bin/hbase is available\nexport HBASE_HOME=${HBASE_HOME:-{{hbase_home}}}\n\n#Set the path to where bin/hive is available\nexport HIVE_HOME=${HIVE_HOME:-{{hive_home}}}\n\n#Set the path for where zookeper config dir is\nexport ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}\n\n# add libthrift in hive to sqoop class path first so hive imports work\nexport SQOOP_USER_CLASSPATH="`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}"',
  937.         'sqoop_user': 'sqoop',
  938.         'jdbc_drivers': ' '
  939.     },
  940.     'ams-grafana-env': {
  941.         'content': '\n# Set environment variables here.\n\n# AMS UI Server Home Dir\nexport AMS_GRAFANA_HOME_DIR={{ams_grafana_home_dir}}\n\n# AMS UI Server Data Dir\nexport AMS_GRAFANA_DATA_DIR={{ams_grafana_data_dir}}\n\n# AMS UI Server Log Dir\nexport AMS_GRAFANA_LOG_DIR={{ams_grafana_log_dir}}\n\n# AMS UI Server PID Dir\nexport AMS_GRAFANA_PID_DIR={{ams_grafana_pid_dir}}',
  942.         'metrics_grafana_log_dir': '/var/log/ambari-metrics-grafana',
  943.         'metrics_grafana_username': 'admin',
  944.         'metrics_grafana_password': 'HdpCli123!',
  945.         'metrics_grafana_pid_dir': '/var/run/ambari-metrics-grafana',
  946.         'metrics_grafana_data_dir': '/var/lib/ambari-metrics-grafana'
  947.     },
  948.     'spark2-defaults': {
  949.         'spark.eventLog.dir': 'hdfs:///spark2-history/',
  950.         'spark.history.kerberos.keytab': 'none',
  951.         'spark.history.fs.logDirectory': 'hdfs:///spark2-history/',
  952.         'spark.history.kerberos.principal': 'none',
  953.         'spark.yarn.historyServer.address': '{{spark_history_server_host}}:{{spark_history_ui_port}}',
  954.         'spark.eventLog.enabled': 'true',
  955.         'spark.yarn.queue': 'default',
  956.         'spark.driver.extraLibraryPath': '{{spark_hadoop_lib_native}}',
  957.         'spark.history.ui.port': '18081',
  958.         'spark.executor.extraLibraryPath': '{{spark_hadoop_lib_native}}',
  959.         'spark.history.provider': 'org.apache.spark.deploy.history.FsHistoryProvider'
  960.     },
  961.     'hive-log4j2': {
  962.         'content': '\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nstatus = INFO\nname = HiveLog4j2\npackages = org.apache.hadoop.hive.ql.log\n\n# list of properties\nproperty.hive.log.level = INFO\nproperty.hive.root.logger = DRFA\nproperty.hive.log.dir = ${sys:java.io.tmpdir}/${sys:user.name}\nproperty.hive.log.file = hive.log\n\n# list of all appenders\nappenders = console, DRFA\n\n# console appender\nappender.console.type = Console\nappender.console.name = console\nappender.console.target = SYSTEM_ERR\nappender.console.layout.type = PatternLayout\nappender.console.layout.pattern = %d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n\n\n# daily rolling file appender\nappender.DRFA.type = RollingFile\nappender.DRFA.name = DRFA\nappender.DRFA.fileName = ${sys:hive.log.dir}/${sys:hive.log.file}\n# Use %pid in the filePattern to append process-id@host-name to the filename if you want separate log files for different CLI session\nappender.DRFA.filePattern = ${sys:hive.log.dir}/${sys:hive.log.file}.%d{yyyy-MM-dd}.gz\nappender.DRFA.layout.type = PatternLayout\nappender.DRFA.layout.pattern = %d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n\nappender.DRFA.policies.type = Policies\nappender.DRFA.policies.time.type = TimeBasedTriggeringPolicy\nappender.DRFA.policies.time.interval = 1\nappender.DRFA.policies.time.modulate = true\nappender.DRFA.strategy.type = DefaultRolloverStrategy\nappender.DRFA.strategy.max = 30\n\n# list of all loggers\nloggers = NIOServerCnxn, ClientCnxnSocketNIO, DataNucleus, Datastore, JPOX\n\nlogger.NIOServerCnxn.name = org.apache.zookeeper.server.NIOServerCnxn\nlogger.NIOServerCnxn.level = WARN\n\nlogger.ClientCnxnSocketNIO.name = org.apache.zookeeper.ClientCnxnSocketNIO\nlogger.ClientCnxnSocketNIO.level = WARN\n\nlogger.DataNucleus.name = DataNucleus\nlogger.DataNucleus.level = ERROR\n\nlogger.Datastore.name = Datastore\nlogger.Datastore.level = ERROR\n\nlogger.JPOX.name = JPOX\nlogger.JPOX.level = ERROR\n\n# root logger\nrootLogger.level = ${sys:hive.log.level}\nrootLogger.appenderRefs = root\nrootLogger.appenderRef.root.ref = ${sys:hive.root.logger}'
  963.     },
  964.     'beeline-log4j2': {
  965.         'content': '\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nstatus = INFO\nname = BeelineLog4j2\npackages = org.apache.hadoop.hive.ql.log\n\n# list of properties\nproperty.hive.log.level = WARN\nproperty.hive.root.logger = console\n\n# list of all appenders\nappenders = console\n\n# console appender\nappender.console.type = Console\nappender.console.name = console\nappender.console.target = SYSTEM_ERR\nappender.console.layout.type = PatternLayout\nappender.console.layout.pattern = %d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n\n\n# list of all loggers\nloggers = HiveConnection\n\n# HiveConnection logs useful info for dynamic service discovery\nlogger.HiveConnection.name = org.apache.hive.jdbc.HiveConnection\nlogger.HiveConnection.level = INFO\n\n# root logger\nrootLogger.level = ${sys:hive.log.level}\nrootLogger.appenderRefs = root\nrootLogger.appenderRef.root.ref = ${sys:hive.root.logger}'
  966.     },
  967.     'zeppelin-env': {
  968.         'zeppelin.server.kerberos.keytab': '',
  969.         'shiro_ini_content': '\n[users]\n# List of users with their password allowed to access Zeppelin.\n# To use a different strategy (LDAP / Database / ...) check the shiro doc at http://shiro.apache.org/configuration.html#Configuration-INISections\n#admin = password1\n#user1 = password2, role1, role2\n#user2 = password3, role3\n#user3 = password4, role2\n\n# Sample LDAP configuration, for user Authentication, currently tested for single Realm\n[main]\n#activeDirectoryRealm = org.apache.zeppelin.server.ActiveDirectoryGroupRealm\n#activeDirectoryRealm.systemUsername = CN=Administrator,CN=Users,DC=HW,DC=EXAMPLE,DC=COM\n#activeDirectoryRealm.systemPassword = Password1!\n#activeDirectoryRealm.hadoopSecurityCredentialPath = jceks://user/zeppelin/zeppelin.jceks\n#activeDirectoryRealm.searchBase = CN=Users,DC=HW,DC=TEST,DC=COM\n#activeDirectoryRealm.url = ldap://ad-nano.test.example.com:389\n#activeDirectoryRealm.groupRolesMap = ""\n#activeDirectoryRealm.authorizationCachingEnabled = true\n\n#ldapRealm = org.apache.shiro.realm.ldap.JndiLdapRealm\n#ldapRealm.userDnTemplate = uid={0},cn=users,cn=accounts,dc=example,dc=com\n#ldapRealm.contextFactory.url = ldap://ldaphost:389\n#ldapRealm.contextFactory.authenticationMechanism = SIMPLE\n#sessionManager = org.apache.shiro.web.session.mgt.DefaultWebSessionManager\n#securityManager.sessionManager = $sessionManager\n# 86,400,000 milliseconds = 24 hour\n#securityManager.sessionManager.globalSessionTimeout = 86400000\nshiro.loginUrl = /api/login\n\n[urls]\n# anon means the access is anonymous.\n# authcBasic means Basic Auth Security\n# To enfore security, comment the line below and uncomment the next one\n/api/version = anon\n/** = anon\n#/** = authc',
  970.         'zeppelin.spark.jar.dir': '/apps/zeppelin',
  971.         'zeppelin.executor.mem': '512m',
  972.         'zeppelin_pid_dir': '/var/run/zeppelin',
  973.         'zeppelin.executor.instances': '2',
  974.         'log4j_properties_content': '\nlog4j.rootLogger = INFO, dailyfile\nlog4j.appender.stdout = org.apache.log4j.ConsoleAppender\nlog4j.appender.stdout.layout = org.apache.log4j.PatternLayout\nlog4j.appender.stdout.layout.ConversionPattern=%5p [%d] ({%t} %F[%M]:%L) - %m%n\nlog4j.appender.dailyfile.DatePattern=.yyyy-MM-dd\nlog4j.appender.dailyfile.Threshold = INFO\nlog4j.appender.dailyfile = org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.dailyfile.File = ${zeppelin.log.file}\nlog4j.appender.dailyfile.layout = org.apache.log4j.PatternLayout\nlog4j.appender.dailyfile.layout.ConversionPattern=%5p [%d] ({%t} %F[%M]:%L) - %m%n',
  975.         'zeppelin.server.kerberos.principal': '',
  976.         'zeppelin_user': 'zeppelin',
  977.         'zeppelin_env_content': '\n# Spark master url. eg. spark://master_addr:7077. Leave empty if you want to use local mode\nexport MASTER=yarn-client\nexport SPARK_YARN_JAR={{spark_jar}}\n\n\n# Where log files are stored.  PWD by default.\nexport ZEPPELIN_LOG_DIR={{zeppelin_log_dir}}\n\n# The pid files are stored. /tmp by default.\nexport ZEPPELIN_PID_DIR={{zeppelin_pid_dir}}\n\n\nexport JAVA_HOME={{java64_home}}\n\n# Additional jvm options. for example, export ZEPPELIN_JAVA_OPTS="-Dspark.executor.memory=8g -Dspark.cores.max=16"\nexport ZEPPELIN_JAVA_OPTS="-Dhdp.version={{full_stack_version}} -Dspark.executor.memory={{executor_mem}} -Dspark.executor.instances={{executor_instances}} -Dspark.yarn.queue={{spark_queue}}"\n\n\n# Zeppelin jvm mem options Default -Xmx1024m -XX:MaxPermSize=512m\n# export ZEPPELIN_MEM\n\n# zeppelin interpreter process jvm mem options. Defualt = ZEPPELIN_MEM\n# export ZEPPELIN_INTP_MEM\n\n# zeppelin interpreter process jvm options. Default = ZEPPELIN_JAVA_OPTS\n# export ZEPPELIN_INTP_JAVA_OPTS\n\n# Where notebook saved\n# export ZEPPELIN_NOTEBOOK_DIR\n\n# Id of notebook to be displayed in homescreen. ex) 2A94M5J1Z\n# export ZEPPELIN_NOTEBOOK_HOMESCREEN\n\n# hide homescreen notebook from list when this value set to "true". default "false"\n# export ZEPPELIN_NOTEBOOK_HOMESCREEN_HIDE\n\n# Bucket where notebook saved\n# export ZEPPELIN_NOTEBOOK_S3_BUCKET\n\n# User in bucket where notebook saved. For example bucket/user/notebook/2A94M5J1Z/note.json\n# export ZEPPELIN_NOTEBOOK_S3_USER\n\n# A string representing this instance of zeppelin. $USER by default\n# export ZEPPELIN_IDENT_STRING\n\n# The scheduling priority for daemons. Defaults to 0.\n# export ZEPPELIN_NICENESS\n\n\n#### Spark interpreter configuration ####\n\n## Use provided spark installation ##\n## defining SPARK_HOME makes Zeppelin run spark interpreter process using spark-submit\n##\n# (required) When it is defined, load it instead of Zeppelin embedded Spark libraries\nexport SPARK_HOME={{spark_home}}\n\n# (optional) extra options to pass to spark submit. eg) "--driver-memory 512M --executor-memory 1G".\n# export SPARK_SUBMIT_OPTIONS\n\n## Use embedded spark binaries ##\n## without SPARK_HOME defined, Zeppelin still able to run spark interpreter process using embedded spark binaries.\n## however, it is not encouraged when you can define SPARK_HOME\n##\n# Options read in YARN client mode\n# yarn-site.xml is located in configuration directory in HADOOP_CONF_DIR.\nexport HADOOP_CONF_DIR=/etc/hadoop/conf\n\n# Pyspark (supported with Spark 1.2.1 and above)\n# To configure pyspark, you need to set spark distribution\'s path to \'spark.home\' property in Interpreter setting screen in Zeppelin GUI\n# path to the python command. must be the same path on the driver(Zeppelin) and all workers.\n# export PYSPARK_PYTHON\n\nexport PYTHONPATH="${SPARK_HOME}/python:${SPARK_HOME}/python/lib/py4j-0.8.2.1-src.zip"\nexport SPARK_YARN_USER_ENV="PYTHONPATH=${PYTHONPATH}"\n\n## Spark interpreter options ##\n##\n# Use HiveContext instead of SQLContext if set true. true by default.\n# export ZEPPELIN_SPARK_USEHIVECONTEXT\n\n# Execute multiple SQL concurrently if set true. false by default.\n# export ZEPPELIN_SPARK_CONCURRENTSQL\n\n# Max number of SparkSQL result to display. 1000 by default.\n# export ZEPPELIN_SPARK_MAXRESULT',
  978.         'zeppelin_log_dir': '/var/log/zeppelin',
  979.         'zeppelin_group': 'zeppelin'
  980.     },
  981.     'ams-ssl-client': {
  982.         'ssl.client.truststore.password': 'bigdata',
  983.         'ssl.client.truststore.type': 'jks',
  984.         'ssl.client.truststore.location': '/etc/security/clientKeys/all.jks'
  985.     },
  986.     'ams-hbase-env': {
  987.         'hbase_pid_dir': '/var/run/ambari-metrics-collector/',
  988.         'regionserver_xmn_size': '128',
  989.         'hbase_master_maxperm_size': '128',
  990.         'hbase_regionserver_xmn_max': '512m',
  991.         'hbase_regionserver_xmn_ratio': '0.2',
  992.         'hbase_master_heapsize': '2048m',
  993.         'content': '\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH elements. Optional.\nadditional_cp=/usr/hdp/current/hadoop-client/hadoop-azure.jar:/usr/hdp/current/hadoop-client/lib/azure-storage-4.2.0.jar:/usr/lib/hdinsight-datalake/*\n\nif [  -n "${HBASE_CLASSPATH}" ];\nthen\n  export HBASE_CLASSPATH=${HBASE_CLASSPATH}:$additional_cp\nelse\n  export HBASE_CLASSPATH=$additional_cp\nfi\n\n# The maximum amount of heap to use for hbase shell.\n export HBASE_SHELL_OPTS="-Xmx256m"\n\n# Extra Java runtime options.\n# Below are what we set by default. May only work with SUN JVM.\n# For more on why as well as other possible settings,\n# see http://wiki.apache.org/hadoop/PerformanceTuning\nexport HBASE_OPTS="-XX:+UseConcMarkSweepGC -XX:ErrorFile={{hbase_log_dir}}/hs_err_pid%p.log -Djava.io.tmpdir={{hbase_tmp_dir}}"\nexport SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{hbase_log_dir}}/gc.log-`date +\'%Y%m%d%H%M\'`"\n# Uncomment below to enable java garbage collection logging.\n# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"\n\n# Uncomment and adjust to enable JMX exporting\n# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"\n\n{% if java_version < 8 %}\nexport HBASE_MASTER_OPTS=" -XX:PermSize=64m -XX:MaxPermSize={{hbase_master_maxperm_size}} -Xms{{hbase_heapsize}} -Xmx{{hbase_heapsize}} -Xmn{{hbase_master_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly"\nexport HBASE_REGIONSERVER_OPTS="-XX:MaxPermSize=128m -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}"\n{% else %}\nexport HBASE_MASTER_OPTS=" -Xms{{hbase_heapsize}} -Xmx{{hbase_heapsize}} -Xmn{{hbase_master_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly"\nexport HBASE_REGIONSERVER_OPTS=" -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}"\n{% endif %}\n\n\n# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"\n# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"\n\n# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# Extra ssh options. Empty by default.\n# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"\n\n# Where log files are stored. $HBASE_HOME/logs by default.\nexport HBASE_LOG_DIR={{hbase_log_dir}}\n\n# A string representing this instance of hbase. $USER by default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See \'man nice\'.\n# export HBASE_NICENESS=10\n\n# The directory where pid files are stored. /tmp by default.\nexport HBASE_PID_DIR={{hbase_pid_dir}}\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it\'s own instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if security_enabled %}\nexport HBASE_OPTS="$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_config_file}}"\nexport HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}}"\nexport HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}}"\nexport HBASE_ZOOKEEPER_OPTS="$HBASE_ZOOKEEPER_OPTS -Djava.security.auth.login.config={{ams_zookeeper_jaas_config_file}}"\n{% endif %}\n\n# use embedded native libs\n_HADOOP_NATIVE_LIB="/usr/lib/ams-hbase/lib/hadoop-native/"\nexport HBASE_OPTS="$HBASE_OPTS -Djava.library.path=${_HADOOP_NATIVE_LIB}"\n\n# Unset HADOOP_HOME to avoid importing HADOOP installed cluster related configs like: /usr/hdp/2.2.0.0-2041/hadoop/conf/\nexport HADOOP_HOME={{ams_hbase_home_dir}}\nexport HBASE_ROOT_LOGGER="${HBASE_ROOT_LOGGER:-"INFO,RFA"},ETW,FilterLog"',
  994.         'hbase_classpath_additional': '',
  995.         'hbase_regionserver_heapsize': '2048m',
  996.         'hbase_log_dir': '/var/log/ambari-metrics-collector',
  997.         'hbase_regionserver_shutdown_timeout': '30',
  998.         'hbase_master_xmn_size': '512m',
  999.         'max_open_files_limit': '32768'
  1000.     },
  1001.     'webhcat-env': {
  1002.         'content': '\n# The file containing the running pid\nPID_FILE={{webhcat_pid_file}}\n\nTEMPLETON_LOG_DIR={{templeton_log_dir}}/\n\n\nWEBHCAT_LOG_DIR={{templeton_log_dir}}/\n\n# The console error log\nERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log\n\n# The console log\nCONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log\n\n#TEMPLETON_JAR=templeton_jar_name\n\n#HADOOP_PREFIX=hadoop_prefix\n\n#HCAT_PREFIX=hive_prefix\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}'
  1003.     },
  1004.     'hive-atlas-application.properties': {
  1005.         'atlas.hook.hive.synchronous': 'false',
  1006.         'atlas.hook.hive.minThreads': '5',
  1007.         'atlas.hook.hive.queueSize': '1000',
  1008.         'atlas.hook.hive.numRetries': '3',
  1009.         'atlas.hook.hive.maxThreads': '5',
  1010.         'atlas.hook.hive.keepAliveTime': '10'
  1011.     },
  1012.     'hcat-env': {
  1013.         'content': '\n      # Licensed to the Apache Software Foundation (ASF) under one\n      # or more contributor license agreements. See the NOTICE file\n      # distributed with this work for additional information\n      # regarding copyright ownership. The ASF licenses this file\n      # to you under the Apache License, Version 2.0 (the\n      # "License"); you may not use this file except in compliance\n      # with the License. You may obtain a copy of the License at\n      #\n      # http://www.apache.org/licenses/LICENSE-2.0\n      #\n      # Unless required by applicable law or agreed to in writing, software\n      # distributed under the License is distributed on an "AS IS" BASIS,\n      # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n      # See the License for the specific language governing permissions and\n      # limitations under the License.\n\n      JAVA_HOME={{java64_home}}\n      HCAT_PID_DIR={{hcat_pid_dir}}/\n      HCAT_LOG_DIR={{hcat_log_dir}}/\n      HCAT_CONF_DIR={{hcat_conf_dir}}\n      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n      #DBROOT is the path where the connector jars are downloaded\n      DBROOT={{hcat_dbroot}}\n      USER={{hcat_user}}\n      METASTORE_PORT={{hive_metastore_port}}'
  1014.     },
  1015.     'tez-site': {
  1016.         'tez.task.get-task.sleep.interval-ms.max': '200',
  1017.         'tez.dag.recovery.enabled': 'false',
  1018.         'tez.task.max-events-per-heartbeat': '500',
  1019.         'tez.task.launch.cmd-opts': '-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC',
  1020.         'tez.runtime.compress': 'true',
  1021.         'tez.runtime.io.sort.mb': '409',
  1022.         'tez.runtime.shuffle.fetch.buffer.percent': '0.6',
  1023.         'tez.runtime.convert.user-payload.to.history-text': 'false',
  1024.         'tez.generate.debug.artifacts': 'false',
  1025.         'tez.am.tez-ui.history-url.template': '__HISTORY_URL_BASE__?viewPath=%2F%23%2Ftez-app%2F__APPLICATION_ID__',
  1026.         'tez.am.view-acls': '*',
  1027.         'tez.am.log.level': 'INFO',
  1028.         'tez.counters.max.groups': '3000',
  1029.         'tez.counters.max': '10000',
  1030.         'tez.shuffle-vertex-manager.max-src-fraction': '0.4',
  1031.         'tez.runtime.unordered.output.buffer.size-mb': '100',
  1032.         'tez.queue.name': 'default',
  1033.         'tez.task.resource.memory.mb': '1536',
  1034.         'tez.history.logging.service.class': 'org.apache.tez.dag.history.logging.ats.ATSV15HistoryLoggingService',
  1035.         'tez.runtime.optimize.local.fetch': 'true',
  1036.         'tez.am.launch.cmd-opts': '-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC',
  1037.         'tez.task.am.heartbeat.counter.interval-ms.max': '4000',
  1038.         'tez.grouping.split-waves': '1.7',
  1039.         'tez.am.max.app.attempts': '1',
  1040.         'tez.am.launch.env': 'LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64',
  1041.         'tez.am.container.idle.release-timeout-max.millis': '20000',
  1042.         'tez.use.cluster.hadoop-libs': 'false',
  1043.         'tez.am.launch.cluster-default.cmd-opts': '-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}',
  1044.         'tez.am.container.idle.release-timeout-min.millis': '10000',
  1045.         'tez.am.java.opts': '-Xmx768M -Xms768M -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC',
  1046.         'tez.runtime.sorter.class': 'PIPELINED',
  1047.         'tez.runtime.compress.codec': 'org.apache.hadoop.io.compress.SnappyCodec',
  1048.         'tez.task.launch.cluster-default.cmd-opts': '-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}',
  1049.         'tez.task.launch.env': 'LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64',
  1050.         'tez.am.container.reuse.enabled': 'true',
  1051.         'tez.session.am.dag.submit.timeout.secs': '300',
  1052.         'tez.grouping.min-size': '16777216',
  1053.         'tez.grouping.max-size': '1073741824',
  1054.         'tez.session.client.timeout.secs': '-1',
  1055.         'tez.cluster.additional.classpath.prefix': '/usr/hdp/${hdp.version}/hadoop/hadoop-azure-2.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/usr/lib/hdinsight-logging/*',
  1056.         'tez.lib.uris': '/hdp/apps/${hdp.version}/tez/tez.tar.gz',
  1057.         'tez.staging-dir': '/tezstaging/${user.name}/staging',
  1058.         'tez.am.am-rm.heartbeat.interval-ms.max': '250',
  1059.         'tez.runtime.shuffle.memory.limit.percent': '0.25',
  1060.         'tez.task.generate.counters.per.io': 'true',
  1061.         'tez.am.maxtaskfailures.per.node': '10',
  1062.         'tez.am.container.reuse.non-local-fallback.enabled': 'false',
  1063.         'tez.am.container.reuse.rack-fallback.enabled': 'true',
  1064.         'tez.runtime.pipelined.sorter.sort.threads': '2',
  1065.         'tez.am.container.reuse.locality.delay-allocation-millis': '250',
  1066.         'tez.shuffle-vertex-manager.min-src-fraction': '0.2',
  1067.         'tez.am.resource.memory.mb': '1024'
  1068.     },
  1069.     'slider-log4j': {
  1070.         'content': '\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n# Define some default values that can be overridden by system properties\nlog4j.rootLogger=INFO,stdout\nlog4j.threshhold=ALL\nlog4j.appender.stdout=org.apache.log4j.ConsoleAppender\nlog4j.appender.stdout.layout=org.apache.log4j.PatternLayout\n\n# log layout skips stack-trace creation operations by avoiding line numbers and method\nlog4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} - %m%n\n\n# debug edition is much more expensive\n#log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\nlog4j.appender.subprocess=org.apache.log4j.ConsoleAppender\nlog4j.appender.subprocess.layout=org.apache.log4j.PatternLayout\nlog4j.appender.subprocess.layout.ConversionPattern=[%c{1}]: %m%n\n#log4j.logger.org.apache.slider.yarn.appmaster.SliderAppMasterer.master=INFO,subprocess\n\n# for debugging Slider\n#log4j.logger.org.apache.slider=DEBUG\n#log4j.logger.org.apache.slider=DEBUG\n\n# uncomment to debug service lifecycle issues\n#log4j.logger.org.apache.hadoop.yarn.service.launcher=DEBUG\n#log4j.logger.org.apache.hadoop.yarn.service=DEBUG\n\n# uncomment for YARN operations\n#log4j.logger.org.apache.hadoop.yarn.client=DEBUG\n\n# uncomment this to debug security problems\n#log4j.logger.org.apache.hadoop.security=DEBUG\n\n#crank back on some noise\nlog4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR\nlog4j.logger.org.apache.hadoop.hdfs=WARN\n\n\nlog4j.logger.org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor=WARN\nlog4j.logger.org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl=WARN\nlog4j.logger.org.apache.zookeeper=WARN'
  1071.     },
  1072.     'yarn-log4j': {
  1073.         'content': '\n#Relative to Yarn Log Dir Prefix\nyarn.log.dir=.\n#\n# Job Summary Appender\n#\n# Use following logger to send summary to separate file defined by\n# hadoop.mapreduce.jobsummary.log.file rolled daily:\n# hadoop.mapreduce.jobsummary.logger=INFO,JSA\n#\nhadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}\nhadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log\nlog4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender\n# Set the ResourceManager summary log filename\nyarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log\n# Set the ResourceManager summary log level and appender\nyarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}\n#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\n\n# To enable AppSummaryLogging for the RM,\n# set yarn.server.resourcemanager.appsummary.logger to\n# LEVEL,RMSUMMARY in hadoop-env.sh\n\n# Appender for ResourceManager Application Summary Log\n# Requires the following properties to be set\n#    - hadoop.log.dir (Hadoop Log directory)\n#    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)\n#    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)\nlog4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender\nlog4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}\nlog4j.appender.RMSUMMARY.MaxFileSize=256MB\nlog4j.appender.RMSUMMARY.MaxBackupIndex=20\nlog4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.JSA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\nlog4j.appender.JSA.DatePattern=.yyyy-MM-dd\nlog4j.appender.JSA.layout=org.apache.log4j.PatternLayout\nlog4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}\nlog4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false\n\n# Appender for viewing information for errors and warnings\nyarn.ewma.cleanupInterval=300\nyarn.ewma.messageAgeLimitSeconds=86400\nyarn.ewma.maxUniqueMessages=250\nlog4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender\nlog4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}\nlog4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}\nlog4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}\n\n# Audit logging for ResourceManager\nrm.audit.logger=${hadoop.root.logger}\nlog4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=${rm.audit.logger}\nlog4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger=false\nlog4j.appender.RMAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.RMAUDIT.File=${yarn.log.dir}/rm-audit.log\nlog4j.appender.RMAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.RMAUDIT.DatePattern=.yyyy-MM-dd\n\n# Audit logging for NodeManager\nnm.audit.logger=${hadoop.root.logger}\nlog4j.logger.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=${nm.audit.logger}\nlog4j.additivity.org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger=false\nlog4j.appender.NMAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.NMAUDIT.File=${yarn.log.dir}/nm-audit.log\nlog4j.appender.NMAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.NMAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.NMAUDIT.DatePattern=.yyyy-MM-dd'
  1074.     },
  1075.     'hive-exec-log4j': {
  1076.         'content': '\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Define some default values that can be overridden by system properties\n\nhive.log.threshold=ALL\nhive.root.logger=INFO,FA\nhive.log.dir=${java.io.tmpdir}/${user.name}\nhive.query.id=hadoop\nhive.log.file=${hive.query.id}.log\n\n# Define the root logger to the system property "hadoop.root.logger".\nlog4j.rootLogger=${hive.root.logger}, EventCounter,FullPIILogs\n\n# Logging Threshold\nlog4j.threshhold=${hive.log.threshold}\n\n#\n# File Appender\n#\n\nlog4j.appender.FA=org.apache.log4j.FileAppender\nlog4j.appender.FA.File=${hive.log.dir}/${hive.log.file}\nlog4j.appender.FA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\nlog4j.appender.FA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add "console" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#custom logging levels\n#log4j.logger.xxx=DEBUG\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.hive.shims.HiveEventCounter\n\n\nlog4j.category.DataNucleus=WARN,FA,FullPIILogs\nlog4j.category.Datastore=WARN,FA,FullPIILogs\nlog4j.category.Datastore.Schema=WARN,FA,FullPIILogs\nlog4j.category.JPOX.Datastore=WARN,FA,FullPIILogs\nlog4j.category.JPOX.Plugin=WARN,FA,FullPIILogs\nlog4j.category.JPOX.MetaData=WARN,FA,FullPIILogs\nlog4j.category.JPOX.Query=WARN,FA,FullPIILogs\nlog4j.category.JPOX.General=WARN,FA,FullPIILogs\nlog4j.category.JPOX.Enhancer=WARN,FA,FullPIILogs\n\n\n# Silence useless ZK logs\nlog4j.logger.org.apache.zookeeper.server.NIOServerCnxn=ERROR,FA,FullPIILogs\nlog4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=ERROR,FA,FullPIILogs\n\n# Full PII log Appender\n# Sends  PII HDP service logs to our storage account\nlog4j.appender.FullPIILogs=com.microsoft.log4jappender.FullPIILogAppender\nlog4j.appender.FullPIILogs.component=hive\nlog4j.appender.FullPIILogs.layout=org.apache.log4j.TTCCLayout\nlog4j.appender.FullPIILogs.Threshold=INFO\nlog4j.appender.FullPIILogs.source=CentralFullServicePIILogs\nlog4j.appender.FullPIILogs.OSType=Linux\nlog4j.appender.FullPIILogs.SuffixHadoopEntryType=true'
  1077.     },
  1078.     'ranger-hive-plugin-properties': {
  1079.         'REPOSITORY_CONFIG_USERNAME': 'hive',
  1080.         'policy_user': 'ambari-qa',
  1081.         'common.name.for.certificate': '',
  1082.         'jdbc.driverClassName': 'org.apache.hive.jdbc.HiveDriver',
  1083.         'REPOSITORY_CONFIG_PASSWORD': 'hive'
  1084.     },
  1085.     'ams-ssl-server': {
  1086.         'ssl.server.keystore.location': '/etc/security/serverKeys/keystore.jks',
  1087.         'ssl.server.keystore.keypassword': 'bigdata',
  1088.         'ssl.server.truststore.location': '/etc/security/serverKeys/all.jks',
  1089.         'ssl.server.keystore.password': 'bigdata',
  1090.         'ssl.server.truststore.password': 'bigdata',
  1091.         'ssl.server.truststore.type': 'jks',
  1092.         'ssl.server.keystore.type': 'jks',
  1093.         'ssl.server.truststore.reload.interval': '10000'
  1094.     },
  1095.     'tez-env': {
  1096.         'content': '\n# Tez specific configuration\nexport TEZ_CONF_DIR={{config_dir}}\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}',
  1097.         'tez_user': 'tez'
  1098.     },
  1099.     'hive-interactive-site': {
  1100.         'hive.tez.input.generate.consistent.splits': 'true',
  1101.         'hive.llap.client.consistent.splits': 'true',
  1102.         'hive.llap.object.cache.enabled': 'true',
  1103.         'llap.shuffle.connection-keep-alive.enable': 'true',
  1104.         'hive.tez.bucket.pruning': 'true',
  1105.         'hive.vectorized.execution.reduce.enabled': 'true',
  1106.         'hive.mapjoin.hybridgrace.hashtable': 'false',
  1107.         'hive.llap.daemon.service.hosts': '@llap0',
  1108.         'hive.llap.task.scheduler.locality.delay': '-1',
  1109.         'hive.llap.io.memory.size': '11264',
  1110.         'hive.server2.thrift.http.port': '10001',
  1111.         'hive.llap.zk.sm.connectionString': 'hn0-094bc5.ambhviewl-af25-ssh.m8.internal.cloudapp.net:2181',
  1112.         'hive.llap.daemon.task.scheduler.enable.preemption': 'true',
  1113.         'hive.llap.io.threadpool.size': '2',
  1114.         'hive.llap.daemon.vcpus.per.instance': '${hive.llap.daemon.num.executors}',
  1115.         'hive.optimize.dynamic.partition.hashjoin': 'true',
  1116.         'hive.llap.io.memory.mode': '',
  1117.         'hive.vectorized.execution.mapjoin.native.fast.hashtable.enabled': 'true',
  1118.         'hive.server2.tez.initialize.default.sessions': 'true',
  1119.         'hive.llap.auto.allow.uber': 'false',
  1120.         'hive.metastore.event.listeners': '',
  1121.         'hive.server2.tez.default.queues': 'default',
  1122.         'hive.prewarm.enabled': 'false',
  1123.         'hive.metastore.uris': '',
  1124.         'hive.llap.io.enabled': 'true',
  1125.         'hive.llap.daemon.yarn.container.mb': '19456',
  1126.         'hive.server2.thrift.port': '10000',
  1127.         'hive.server2.webui.use.ssl': 'false',
  1128.         'hive.vectorized.execution.mapjoin.native.enabled': 'true',
  1129.         'hive.driver.parallel.compilation': 'true',
  1130.         'hive.llap.daemon.num.executors': '2',
  1131.         'hive.vectorized.execution.mapjoin.minmax.enabled': 'true',
  1132.         'hive.server2.tez.sessions.per.default.queue': '2',
  1133.         'hive.server2.zookeeper.namespace': 'hiveserver2-hive2',
  1134.         'hive.llap.daemon.allow.permanent.fns': 'false',
  1135.         'hive.server2.enable.doAs': 'false',
  1136.         'hive.execution.engine': 'tez',
  1137.         'hive.server2.webui.port': '10502',
  1138.         'hive.llap.daemon.queue.name': 'default',
  1139.         'hive.exec.orc.split.strategy': 'HYBRID',
  1140.         'hive.tez.exec.print.summary': 'true',
  1141.         'hive.execution.mode': 'llap',
  1142.         'hive.llap.management.rpc.port': '15004',
  1143.         'hive.llap.io.use.lrf': 'true',
  1144.         'llap.shuffle.connection-keep-alive.timeout': '60',
  1145.         'hive.llap.daemon.yarn.shuffle.port': '15551',
  1146.         'hive.llap.execution.mode': 'all',
  1147.         'hive.llap.daemon.rpc.port': '15001'
  1148.     },
  1149.     'core-site': {
  1150.         'fs.azure.shellkeyprovider.script': '/usr/lib/python2.7/dist-packages/hdinsight_common/decrypt.sh',
  1151.         'net.topology.script.file.name': '/etc/hadoop/conf/topology_script.py',
  1152.         'hadoop.proxyuser.hdfs.groups': '*',
  1153.         'hadoop.proxyuser.hcat.hosts': 'hn0-094bc5.ambhviewl-af25-ssh.m8.internal.cloudapp.net',
  1154.         'hadoop.proxyuser.hcat.groups': '*',
  1155.         'fs.trash.interval': '360',
  1156.         'hadoop.proxyuser.hive.groups': '*',
  1157.         'hadoop.http.authentication.simple.anonymous.allowed': 'true',
  1158.         'io.compression.codecs': 'org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec',
  1159.         'hadoop.proxyuser.root.groups': '*',
  1160.         'ipc.client.idlethreshold': '8000',
  1161.         'fs.azure.page.blob.dir': '/atshistory,/tezstaging,/mapreducestaging,/ams/hbase/WALs,/ams/hbase/oldWALs,/ams/hbase/MasterProcWALs',
  1162.         'io.file.buffer.size': '131072',
  1163.         'io.serializations': 'org.apache.hadoop.io.serializer.WritableSerialization',
  1164.         'hadoop.security.authentication': 'simple',
  1165.         'hadoop.proxyuser.root.hosts': 'hn0-094bc5',
  1166.         'mapreduce.jobtracker.webinterface.trusted': 'false',
  1167.         'hadoop.proxyuser.hdfs.hosts': '*',
  1168.         'hadoop.proxyuser.hive.hosts': 'hn0-094bc5.ambhviewl-af25-ssh.m8.internal.cloudapp.net',
  1169.         'fs.defaultFS': 'wasb://ambhviewl-af25@humbtestings2jw.blob.core.windows.net',
  1170.         'fs.AbstractFileSystem.wasb.impl': 'org.apache.hadoop.fs.azure.Wasb',
  1171.         'hadoop.proxyuser.oozie.groups': '*',
  1172.         'ha.failover-controller.active-standby-elector.zk.op.retries': '120',
  1173.         'hadoop.security.key.provider.path': '',
  1174.         'fs.azure.account.keyprovider.humbtestings2jw.blob.core.windows.net': 'org.apache.hadoop.fs.azure.ShellDecryptionKeyProvider',
  1175.         'hadoop.security.authorization': 'false',
  1176.         'ipc.server.tcpnodelay': 'true',
  1177.         'ipc.client.connect.max.retries': '50',
  1178.         'hadoop.security.auth_to_local': 'DEFAULT',
  1179.         'hadoop.proxyuser.oozie.hosts': 'hn0-094bc5.ambhviewl-af25-ssh.m8.internal.cloudapp.net',
  1180.         'fs.azure.account.key.humbtestings2jw.blob.core.windows.net': 'MIIB5gYJKoZIhvcNAQcDoIIB1zCCAdMCAQAxggFGMIIBQgIBADAqMBYxFDASBgNVBAMTC0lzb3RvcGUgRU1SAhAl6EFu3G2agURDb7lnZor/MA0GCSqGSIb3DQEBAQUABIIBAIIkVo460+5w+uLRcFJqc2IIB/iiQ58FSNP0BzowIDHBbqT0vJ+g9GnocHF1+4Zqyb+Cc5mvWPQmBLR4gTSRCrecMYtGu29dgxWooMR13ZVQDCzicSCnRT1nh2HavTk/q1i1Tg9zNKLVYMJvdzOHaXDtyb8C9d5CbKTlyH5v2dXoM36kQegVRgENyzdrPwl2eTehGb3geSIK+6n7SJE7v2th4zM9hPXW+Df6kLqY3duO7YPIAPK5N6ET4ARRLUaqSwNxzBp43+Q5JKPzIwlAbKZhdwYRpFYiw3F59hNuKAKBdDLIHLq4ntyrKy96tggK7DNugUwj+cJfcA7TrIvselYwgYMGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQICRmNmZD5S12AYDttmvjbhRx2dwwdp7TaCGhVWSzF4bXsZW8I1IWLIfE/sT8Ol0EwA9X1wfJfAIbdwf/MeLsB+dwM4Bj5zNtgKF9yWJXACGxsUow22Jsh7xjAWQfwj86Ho4OfwwFNcg2xQg==',
  1181.         'ipc.client.connection.maxidletime': '30000'
  1182.     },
  1183.     'yarn-env': {
  1184.         'yarn_heapsize': '1024',
  1185.         'yarn_pid_dir_prefix': '/var/run/hadoop-yarn',
  1186.         'yarn_user_nproc_limit': '65536',
  1187.         'yarn_log_dir_prefix': '/var/log/hadoop-yarn',
  1188.         'yarn_user_nofile_limit': '32768',
  1189.         'is_supported_yarn_ranger': 'true',
  1190.         'nodemanager_heapsize': '1024',
  1191.         'content': '\nexport HADOOP_YARN_HOME={{hadoop_yarn_home}}\nexport YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER\nexport YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\nexport JAVA_HOME={{java64_home}}\n\n# User for YARN daemons\nexport HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}\n\n# resolve links - $0 may be a softlink\nexport YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"\n\n# some Java parameters\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\nif [ "$JAVA_HOME" != "" ]; then\n  #echo "run java in $JAVA_HOME"\n  JAVA_HOME=$JAVA_HOME\nfi\n\nif [ "$JAVA_HOME" = "" ]; then\n  echo "Error: JAVA_HOME is not set."\n  exit 1\nfi\n\nJAVA=$JAVA_HOME/bin/java\nJAVA_HEAP_MAX=-Xmx1000m\n\n# For setting YARN specific HEAP sizes please use this\n# Parameter and set appropriately\nYARN_HEAPSIZE={{yarn_heapsize}}\n\n# check envvars which might override default args\nif [ "$YARN_HEAPSIZE" != "" ]; then\n  JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"\nfi\n\n# Resource Manager specific parameters\n\n# Specify the max Heapsize for the ResourceManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_RESOURCEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}\n\n# Specify the JVM options to be used when starting the ResourceManager.\n# These options will be appended to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_RESOURCEMANAGER_OPTS=\n\n# Node Manager specific parameters\n\n# Specify the max Heapsize for the NodeManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_NODEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}\n\n# Specify the max Heapsize for the HistoryManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1024.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_HISTORYSERVER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}\nexport YARN_TIMELINESERVER_OPTS="-Dwhitelist.filename=core-whitelist.res,coremanual-whitelist.res -Dcomponent=apptimelineserver"\n\n# Specify the JVM options to be used when starting the NodeManager.\n# These options will be appended to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\nexport YARN_NODEMANAGER_OPTS="-Dwhitelist.filename=core-whitelist.res,coremanual-whitelist.res -Dcomponent=nodemanager"\n\n# so that filenames w/ spaces are handled correctly in loops below\nIFS=\n\n\n# default log directory and file\nif [ "$YARN_LOG_DIR" = "" ]; then\n  YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"\nfi\nif [ "$YARN_LOGFILE" = "" ]; then\n  YARN_LOGFILE=\'yarn.log\'\nfi\n\n# default policy file for service-level authorization\nif [ "$YARN_POLICYFILE" = "" ]; then\n  YARN_POLICYFILE="hadoop-policy.xml"\nfi\n\n# restore ordinary behaviour\nunset IFS\n\n\nYARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"\nYARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"\nYARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"\nYARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"\nYARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"\nYARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"\nYARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"\nYARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"\nif [ "x$JAVA_LIBRARY_PATH" != "x" ]; then\n  YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"\nfi\nYARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"',
  1192.         'service_check.queue.name': 'default',
  1193.         'min_user_id': '1000',
  1194.         'yarn_cgroups_enabled': 'false',
  1195.         'yarn_user': 'yarn',
  1196.         'resourcemanager_heapsize': '1024',
  1197.         'apptimelineserver_heapsize': '1024'
  1198.     },
  1199.     'spark2-env': {
  1200.         'spark_pid_dir': '/var/run/spark2',
  1201.         'spark_daemon_memory': '1024',
  1202.         'hive_kerberos_keytab': '{{hive_kerberos_keytab}}',
  1203.         'spark_user': 'spark',
  1204.         'content': '\n#!/usr/bin/env bash\n\n# This file is sourced when running various Spark programs.\n# Copy it as spark-env.sh and edit that to configure Spark for your site.\n\n# Options read in YARN client mode\n#SPARK_EXECUTOR_INSTANCES="2" #Number of workers to start (Default: 2)\n#SPARK_EXECUTOR_CORES="1" #Number of cores for the workers (Default: 1).\n#SPARK_EXECUTOR_MEMORY="1G" #Memory per Worker (e.g. 1000M, 2G) (Default: 1G)\n#SPARK_DRIVER_MEMORY="512M" #Memory for Master (e.g. 1000M, 2G) (Default: 512 Mb)\n#SPARK_YARN_APP_NAME="spark" #The name of your application (Default: Spark)\n#SPARK_YARN_QUEUE="default" #The hadoop queue to use for allocation requests (Default: default)\n#SPARK_YARN_DIST_FILES="" #Comma separated list of files to be distributed with the job.\n#SPARK_YARN_DIST_ARCHIVES="" #Comma separated list of archives to be distributed with the job.\n\n# Generic options for the daemons used in the standalone deploy mode\n\n# Alternate conf dir. (Default: ${SPARK_HOME}/conf)\nexport SPARK_CONF_DIR=${SPARK_CONF_DIR:-{{spark_home}}/conf}\n\n# Where log files are stored.(Default:${SPARK_HOME}/logs)\n#export SPARK_LOG_DIR=${SPARK_HOME:-{{spark_home}}}/logs\nexport SPARK_LOG_DIR={{spark_log_dir}}\n\n# Where the pid file is stored. (Default: /tmp)\nexport SPARK_PID_DIR={{spark_pid_dir}}\n\n#Memory for Master, Worker and history server (default: 1024MB)\nexport SPARK_DAEMON_MEMORY={{spark_daemon_memory}}m\n\n# A string representing this instance of spark.(Default: $USER)\nSPARK_IDENT_STRING=$USER\n\n# The scheduling priority for daemons. (Default: 0)\nSPARK_NICENESS=0\n\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n# The java implementation to use.\nexport JAVA_HOME={{java_home}}',
  1205.         'spark_thrift_cmd_opts': '',
  1206.         'spark_log_dir': '/var/log/spark2',
  1207.         'spark_group': 'spark',
  1208.         'hive_kerberos_principal': '{{hive_kerberos_principal}}'
  1209.     },
  1210.     'hiveserver2-interactive-site': {
  1211.         'hive.service.metrics.hadoop2.component': 'hiveserver2',
  1212.         'hive.metastore.metrics.enabled': 'true',
  1213.         'hive.service.metrics.reporter': 'JSON_FILE, JMX, HADOOP2',
  1214.         'hive.service.metrics.file.location': '/var/log/hive/hiveserver2Interactive-report.json',
  1215.         'hive.async.log.enabled': 'false'
  1216.     },
  1217.     'ranger-yarn-security': {
  1218.         'ranger.plugin.yarn.policy.rest.ssl.config.file': '/etc/hadoop/conf/ranger-policymgr-ssl-yarn.xml',
  1219.         'ranger.plugin.yarn.policy.source.impl': 'org.apache.ranger.admin.client.RangerAdminRESTClient',
  1220.         'ranger.plugin.yarn.policy.pollIntervalMs': '30000',
  1221.         'ranger.plugin.yarn.service.name': '{{repo_name}}',
  1222.         'ranger.plugin.yarn.policy.rest.url': '{{policymgr_mgr_url}}',
  1223.         'ranger.plugin.yarn.policy.cache.dir': '/etc/ranger/{{repo_name}}/policycache'
  1224.     },
  1225.     'capacity-scheduler': {
  1226.         'yarn.scheduler.capacity.default.minimum-user-limit-percent': '100',
  1227.         'yarn.scheduler.capacity.root.default.maximum-capacity': '100',
  1228.         'yarn.scheduler.capacity.root.accessible-node-labels': '*',
  1229.         'yarn.scheduler.capacity.root.capacity': '100',
  1230.         'yarn.scheduler.capacity.maximum-am-resource-percent': '0.33',
  1231.         'yarn.scheduler.capacity.maximum-applications': '10000',
  1232.         'yarn.scheduler.capacity.root.default.user-limit-factor': '10',
  1233.         'yarn.scheduler.capacity.node-locality-delay': '0',
  1234.         'yarn.scheduler.capacity.root.default.acl_submit_applications': '*',
  1235.         'yarn.scheduler.capacity.root.default.state': 'RUNNING',
  1236.         'yarn.scheduler.capacity.root.default.capacity': '100',
  1237.         'yarn.scheduler.capacity.root.acl_administer_queue': '*',
  1238.         'yarn.scheduler.capacity.root.queues': 'default',
  1239.         'yarn.scheduler.capacity.root.default.acl_administer_jobs': '*',
  1240.         'yarn.scheduler.capacity.resource-calculator': 'org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator'
  1241.     },
  1242.     'oozie-site': {
  1243.         'oozie.service.JPAService.validate.db.connection.eviction.num': '10',
  1244.         'oozie.service.SparkConfigurationService.spark.configurations': '*=spark-conf',
  1245.         'oozie.service.JPAService.validate.db.connection': 'true',
  1246.         'oozie.service.HadoopAccessorService.supported.filesystems': 'hdfs,hftp,webhdfs,asv,wasb,asvs,wasbs',
  1247.         'oozie.base.url': 'http://hn0-094bc5.ambhviewl-af25-ssh.m8.internal.cloudapp.net:11000/oozie',
  1248.         'oozie.service.JPAService.jdbc.password': 'lebf9Sqf31i7lXTz5sYFTBCKqlg3aYfk0QwbZ5YrJ0epshD6nZn5HcOGw6Qm4EeSCqmy2vHzTGYsHmP7oYO5uptFf4dcC3F6ukSVga4wssZiuE',
  1249.         'oozie.service.JPAService.jdbc.username': 'ambhviewlaf25094bc5a26ffe42769ae339b3f402be29ooziemetastoreLogin',
  1250.         'oozie.service.AuthorizationService.security.enabled': 'false',
  1251.         'oozie.service.PurgeService.older.than': '3',
  1252.         'oozie.db.schema.name': 'ooziemetastore',
  1253.         'oozie.service.HadoopAccessorService.hadoop.configurations': '*={{hadoop_conf_dir}}',
  1254.         'oozie.service.JPAService.jdbc.url': 'jdbc:sqlserver://df6ulzeuce.database.windows.net;databaseName=ambhviewlaf25094bc5a26ffe42769ae339b3f402be29ooziemetastore;sendStringParametersAsUnicode=false;',
  1255.         'oozie.service.HadoopAccessorService.kerberos.enabled': 'false',
  1256.         'oozie.service.coord.push.check.requeue.interval': '30000',
  1257.         'oozie.credentials.credentialclasses': 'hcat=org.apache.oozie.action.hadoop.HCatCredentials,hive2=org.apache.oozie.action.hadoop.Hive2Credentials',
  1258.         'oozie.service.JPAService.create.db.schema': 'false',
  1259.         'oozie.services.ext': 'org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService',
  1260.         'oozie.authentication.simple.anonymous.allowed': 'true',
  1261.         'oozie.action.retry.interval': '30',
  1262.         'oozie.service.URIHandlerService.uri.handlers': 'org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler',
  1263.         'oozie.authentication.type': 'simple',
  1264.         'oozie.service.JPAService.jdbc.driver': 'com.microsoft.sqlserver.jdbc.SQLServerDriver',
  1265.         'oozie.service.JPAService.validate.db.connection.eviction.interval': '45000'
  1266.     },
  1267.     'ssl-server': {
  1268.         'ssl.server.keystore.location': '/etc/security/serverKeys/keystore.jks',
  1269.         'ssl.server.keystore.keypassword': 'bigdata',
  1270.         'ssl.server.truststore.location': '/etc/security/serverKeys/all.jks',
  1271.         'ssl.server.keystore.password': 'bigdata',
  1272.         'ssl.server.truststore.password': 'bigdata',
  1273.         'ssl.server.truststore.type': 'jks',
  1274.         'ssl.server.keystore.type': 'jks',
  1275.         'ssl.server.truststore.reload.interval': '10000'
  1276.     },
  1277.     'llap-daemon-log4j': {
  1278.         'content': '\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n# This is the log4j2 properties file used by llap-daemons. There\'s several loggers defined, which\n# can be selected while configuring LLAP.\n# Based on the one selected - UI links etc need to be manipulated in the system.\n# Note: Some names and logic is common to this file and llap LogHelpers. Make sure to change that\n# as well, if changing this file.\n\nstatus = INFO\nname = LlapDaemonLog4j2\npackages = org.apache.hadoop.hive.ql.log\n\n# list of properties\nproperty.llap.daemon.log.level = INFO\nproperty.llap.daemon.root.logger = console\nproperty.llap.daemon.log.dir = .\nproperty.llap.daemon.log.file = llapdaemon.log\nproperty.llap.daemon.historylog.file = llapdaemon_history.log\nproperty.llap.daemon.log.maxfilesize = 256MB\nproperty.llap.daemon.log.maxbackupindex = 240\n\n# list of all appenders\nappenders = console, RFA, HISTORYAPPENDER, query-routing\n\n# console appender\nappender.console.type = Console\nappender.console.name = console\nappender.console.target = SYSTEM_ERR\nappender.console.layout.type = PatternLayout\nappender.console.layout.pattern = %d{ISO8601} %5p [%t (%X{fragmentId})] %c{2}: %m%n\n\n# rolling file appender\nappender.RFA.type = RollingRandomAccessFile\nappender.RFA.name = RFA\nappender.RFA.fileName = ${sys:llap.daemon.log.dir}/${sys:llap.daemon.log.file}\nappender.RFA.filePattern = ${sys:llap.daemon.log.dir}/${sys:llap.daemon.log.file}_%d{yyyy-MM-dd-HH}_%i.done\nappender.RFA.layout.type = PatternLayout\nappender.RFA.layout.pattern = %d{ISO8601} %-5p [%t (%X{fragmentId})] %c: %m%n\nappender.RFA.policies.type = Policies\nappender.RFA.policies.time.type = TimeBasedTriggeringPolicy\nappender.RFA.policies.time.interval = 1\nappender.RFA.policies.time.modulate = true\nappender.RFA.policies.size.type = SizeBasedTriggeringPolicy\nappender.RFA.policies.size.size = ${sys:llap.daemon.log.maxfilesize}\nappender.RFA.strategy.type = DefaultRolloverStrategy\nappender.RFA.strategy.max = ${sys:llap.daemon.log.maxbackupindex}\n\n# history file appender\nappender.HISTORYAPPENDER.type = RollingRandomAccessFile\nappender.HISTORYAPPENDER.name = HISTORYAPPENDER\nappender.HISTORYAPPENDER.fileName = ${sys:llap.daemon.log.dir}/${sys:llap.daemon.historylog.file}\nappender.HISTORYAPPENDER.filePattern = ${sys:llap.daemon.log.dir}/${sys:llap.daemon.historylog.file}_%d{yyyy-MM-dd}_%i.done\nappender.HISTORYAPPENDER.layout.type = PatternLayout\nappender.HISTORYAPPENDER.layout.pattern = %m%n\nappender.HISTORYAPPENDER.policies.type = Policies\nappender.HISTORYAPPENDER.policies.size.type = SizeBasedTriggeringPolicy\nappender.HISTORYAPPENDER.policies.size.size = ${sys:llap.daemon.log.maxfilesize}\nappender.HISTORYAPPENDER.policies.time.type = TimeBasedTriggeringPolicy\nappender.HISTORYAPPENDER.policies.time.interval = 1\nappender.HISTORYAPPENDER.policies.time.modulate = true\nappender.HISTORYAPPENDER.strategy.type = DefaultRolloverStrategy\nappender.HISTORYAPPENDER.strategy.max = ${sys:llap.daemon.log.maxbackupindex}\n\n# queryId based routing file appender\nappender.query-routing.type = Routing\nappender.query-routing.name = query-routing\nappender.query-routing.routes.type = Routes\nappender.query-routing.routes.pattern = $${ctx:queryId}\n#Purge polciy for query-based Routing Appender\nappender.query-routing.purgePolicy.type = LlapRoutingAppenderPurgePolicy\n# Note: Do not change this name without changing the corresponding entry in LlapConstants\nappender.query-routing.purgePolicy.name = llapLogPurgerQueryRouting\n# default route\nappender.query-routing.routes.route-default.type = Route\nappender.query-routing.routes.route-default.key = $${ctx:queryId}\nappender.query-routing.routes.route-default.ref = RFA\n# queryId based route\nappender.query-routing.routes.route-mdc.type = Route\nappender.query-routing.routes.route-mdc.file-mdc.type = LlapWrappedAppender\nappender.query-routing.routes.route-mdc.file-mdc.name = IrrelevantName-query-routing\nappender.query-routing.routes.route-mdc.file-mdc.app.type = RandomAccessFile\nappender.query-routing.routes.route-mdc.file-mdc.app.name = file-mdc\nappender.query-routing.routes.route-mdc.file-mdc.app.fileName = ${sys:llap.daemon.log.dir}/${ctx:queryId}-${ctx:dagId}.log\nappender.query-routing.routes.route-mdc.file-mdc.app.layout.type = PatternLayout\nappender.query-routing.routes.route-mdc.file-mdc.app.layout.pattern = %d{ISO8601} %5p [%t (%X{fragmentId})] %c{2}: %m%n\n\n# list of all loggers\nloggers = NIOServerCnxn, ClientCnxnSocketNIO, DataNucleus, Datastore, JPOX, HistoryLogger, LlapIoImpl, LlapIoOrc, LlapIoCache, LlapIoLocking\n\nlogger.LlapIoImpl.name = LlapIoImpl\nlogger.LlapIoImpl.level = INFO\n\nlogger.LlapIoOrc.name = LlapIoOrc\nlogger.LlapIoOrc.level = WARN\n\nlogger.LlapIoCache.name = LlapIoCache\nlogger.LlapIOCache.level = WARN\n\nlogger.LlapIoLocking.name = LlapIoLocking\nlogger.LlapIoLocking.level = WARN\n\nlogger.NIOServerCnxn.name = org.apache.zookeeper.server.NIOServerCnxn\nlogger.NIOServerCnxn.level = WARN\n\nlogger.ClientCnxnSocketNIO.name = org.apache.zookeeper.ClientCnxnSocketNIO\nlogger.ClientCnxnSocketNIO.level = WARN\n\nlogger.DataNucleus.name = DataNucleus\nlogger.DataNucleus.level = ERROR\n\nlogger.Datastore.name = Datastore\nlogger.Datastore.level = ERROR\n\nlogger.JPOX.name = JPOX\nlogger.JPOX.level = ERROR\n\nlogger.HistoryLogger.name = org.apache.hadoop.hive.llap.daemon.HistoryLogger\nlogger.HistoryLogger.level = INFO\nlogger.HistoryLogger.additivity = false\nlogger.HistoryLogger.appenderRefs = HistoryAppender\nlogger.HistoryLogger.appenderRef.HistoryAppender.ref = HISTORYAPPENDER\n\n# root logger\nrootLogger.level = ${sys:llap.daemon.log.level}\nrootLogger.appenderRefs = root\nrootLogger.appenderRef.root.ref = ${sys:llap.daemon.root.logger}'
  1279.     },
  1280.     'zoo.cfg': {
  1281.         'clientPort': '2181',
  1282.         'initLimit': '10',
  1283.         'autopurge.purgeInterval': '24',
  1284.         'syncLimit': '5',
  1285.         'tickTime': '2000',
  1286.         'dataDir': '/hadoop/zookeeper',
  1287.         'autopurge.snapRetainCount': '30'
  1288.     },
  1289.     'pig-log4j': {
  1290.         'content': '\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n\n# ***** Set root logger level to DEBUG and its only appender to A.\nlog4j.logger.org.apache.pig=info, A\n\n# ***** A is set to be a ConsoleAppender.\nlog4j.appender.A=org.apache.log4j.ConsoleAppender\n# ***** A uses PatternLayout.\nlog4j.appender.A.layout=org.apache.log4j.PatternLayout\nlog4j.appender.A.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n'
  1291.     },
  1292.     'ams-log4j': {
  1293.         'content': '\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n# Define some default values that can be overridden by system properties\nams.log.dir=.\nams.log.file=ambari-metrics-collector.log\n\n# Root logger option\nlog4j.rootLogger=INFO,file,ETW,FilterLog\n\n# Direct log messages to a log file\nlog4j.appender.file=org.apache.log4j.RollingFileAppender\nlog4j.appender.file.File=${ams.log.dir}/${ams.log.file}\nlog4j.appender.file.MaxFileSize=80MB\nlog4j.appender.file.MaxBackupIndex=10\nlog4j.appender.file.layout=org.apache.log4j.PatternLayout\nlog4j.appender.file.layout.ConversionPattern=%d{ABSOLUTE} %5p [%t] %c{1}:%L - %m%n\n\n#EtwLog Appender\n#sends HDP service logs to customer storage account\nlog4j.appender.ETW=com.microsoft.log4jappender.EtwAppender\nlog4j.appender.ETW.source=HadoopServiceLog\nlog4j.appender.ETW.component=ambari-metrics-collector\nlog4j.appender.ETW.layout=org.apache.log4j.TTCCLayout\nlog4j.appender.ETW.OSType=Linux\n\n# FilterLog Appender\n# Sends filtered HDP service logs to our storage account\nlog4j.appender.FilterLog=com.microsoft.log4jappender.FilterLogAppender\nlog4j.appender.FilterLog.layout=org.apache.log4j.TTCCLayout\nlog4j.appender.FilterLog.source=CentralFilteredHadoopServiceLogs\nlog4j.appender.FilterLog.component=ambari-metrics-collector\nlog4j.appender.FilterLog.whitelistFileName=NA\nlog4j.appender.FilterLog.OSType=Linux'
  1294.     },
  1295.     'hive-exec-log4j2': {
  1296.         'content': '\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nstatus = INFO\nname = HiveExecLog4j2\npackages = org.apache.hadoop.hive.ql.log\n\n# list of properties\nproperty.hive.log.level = INFO\nproperty.hive.root.logger = FA\nproperty.hive.query.id = hadoop\nproperty.hive.log.dir = ${sys:java.io.tmpdir}/${sys:user.name}\nproperty.hive.log.file = ${sys:hive.query.id}.log\n\n# list of all appenders\nappenders = console, FA\n\n# console appender\nappender.console.type = Console\nappender.console.name = console\nappender.console.target = SYSTEM_ERR\nappender.console.layout.type = PatternLayout\nappender.console.layout.pattern = %d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n\n\n# simple file appender\nappender.FA.type = File\nappender.FA.name = FA\nappender.FA.fileName = ${sys:hive.log.dir}/${sys:hive.log.file}\nappender.FA.layout.type = PatternLayout\nappender.FA.layout.pattern = %d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n\n\n# list of all loggers\nloggers = NIOServerCnxn, ClientCnxnSocketNIO, DataNucleus, Datastore, JPOX\n\nlogger.NIOServerCnxn.name = org.apache.zookeeper.server.NIOServerCnxn\nlogger.NIOServerCnxn.level = WARN\n\nlogger.ClientCnxnSocketNIO.name = org.apache.zookeeper.ClientCnxnSocketNIO\nlogger.ClientCnxnSocketNIO.level = WARN\n\nlogger.DataNucleus.name = DataNucleus\nlogger.DataNucleus.level = ERROR\n\nlogger.Datastore.name = Datastore\nlogger.Datastore.level = ERROR\n\nlogger.JPOX.name = JPOX\nlogger.JPOX.level = ERROR\n\n# root logger\nrootLogger.level = ${sys:hive.log.level}\nrootLogger.appenderRefs = root\nrootLogger.appenderRef.root.ref = ${sys:hive.root.logger}'
  1297.     },
  1298.     'zookeeper-env': {
  1299.         'zk_log_dir': '/var/log/zookeeper',
  1300.         'content': '\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS="{{zk_server_heapsize}}  -Dwhitelist.filename=NA -Dcomponent=zookeeper-server"\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\nexport CLIENT_JVMFLAGS="-Dwhitelist.filename=NA -Dcomponent=zookeeper-client"\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS="$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}} "\nexport CLIENT_JVMFLAGS="$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}"\n{% endif %}',
  1301.         'zk_server_heapsize': '1024m',
  1302.         'zk_pid_dir': '/var/run/zookeeper',
  1303.         'zk_user': 'zookeeper'
  1304.     },
  1305.     'ams-hbase-log4j': {
  1306.         'content': '\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# "License"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n# Define some default values that can be overridden by system properties\nhbase.root.logger=INFO,console\nhbase.security.logger=INFO,console\nhbase.log.dir=.\nhbase.log.file=hbase.log\n\n# Define the root logger to the system property "hbase.root.logger".\nlog4j.rootLogger=${hbase.root.logger}\n\n# Logging Threshold\nlog4j.threshold=ALL\n\n#\n# Daily Rolling File Appender\n#\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 10-day backup\n#log4j.appender.DRFA.MaxBackupIndex=10\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n\n# Rolling File Appender properties\nhbase.log.maxfilesize=256MB\nhbase.log.maxbackupindex=20\n\n# Rolling File Appender\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hbase.log.dir}/${hbase.log.file}\n\nlog4j.appender.RFA.MaxFileSize=${hbase.log.maxfilesize}\nlog4j.appender.RFA.MaxBackupIndex=${hbase.log.maxbackupindex}\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n\n#\n# Security audit appender\n#\nhbase.security.log.file=SecurityAuth.audit\nhbase.security.log.maxfilesize=256MB\nhbase.security.log.maxbackupindex=20\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hbase.log.dir}/${hbase.security.log.file}\nlog4j.appender.RFAS.MaxFileSize=${hbase.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hbase.security.log.maxbackupindex}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.category.SecurityLogger=${hbase.security.logger}\nlog4j.additivity.SecurityLogger=false\n#log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.access.AccessController=TRACE\n\n#\n# Null Appender\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# console\n# Add "console" to rootlogger above if you want to use this\n#\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n\n# Custom Logging levels\n\nlog4j.logger.org.apache.zookeeper=INFO\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.hbase=INFO\n# Make these two classes INFO-level. Make them DEBUG to see more zk debug.\nlog4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO\nlog4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO\n#log4j.logger.org.apache.hadoop.dfs=DEBUG\n# Set this class to log INFO only otherwise its OTT\n# Enable this to get detailed connection error/retry logging.\n# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE\n\n\n# Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)\n#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG\n\n# Uncomment the below if you want to remove logging of client region caching\'\n# and scan of .META. messages\n# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO\n# log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO\n\n#EtwLog Appender\n#sends HDP service logs to customer storage account\nlog4j.appender.ETW=com.microsoft.log4jappender.EtwAppender\nlog4j.appender.ETW.source=HadoopServiceLog\nlog4j.appender.ETW.component=ams-hbase\nlog4j.appender.ETW.layout=org.apache.log4j.TTCCLayout\nlog4j.appender.ETW.OSType=Linux\n\n# FilterLog Appender\n# Sends filtered HDP service logs to our storage account\nlog4j.appender.FilterLog=com.microsoft.log4jappender.FilterLogAppender\nlog4j.appender.FilterLog.layout=org.apache.log4j.TTCCLayout\nlog4j.appender.FilterLog.source=CentralFilteredHadoopServiceLogs\nlog4j.appender.FilterLog.component=ams-hbase\nlog4j.appender.FilterLog.whitelistFileName=NA\nlog4j.appender.FilterLog.OSType=Linux'
  1307.     },
  1308.     'cluster-env': {
  1309.         'security_enabled': 'false',
  1310.         'override_uid': 'true',
  1311.         'fetch_nonlocal_groups': 'true',
  1312.         'one_dir_per_partition': 'false',
  1313.         'commands_to_retry': 'INSTALL,START',
  1314.         'repo_ubuntu_template': '{{package_type}} {{base_url}} {{components}}',
  1315.         'hadoop-streaming_tar_destination_folder': 'wasb:///hdp/apps/{{ hdp_stack_version }}/mapreduce/',
  1316.         'ignore_groupsusers_create': 'false',
  1317.         'alerts_repeat_tolerance': '1',
  1318.         'smokeuser_keytab': '/etc/security/keytabs/smokeuser.headless.keytab',
  1319.         'kerberos_domain': 'EXAMPLE.COM',
  1320.         'hive_tar_destination_folder': 'wasb:///hdp/apps/{{ hdp_stack_version }}/hive/',
  1321.         'manage_dirs_on_root': 'true',
  1322.         'recovery_lifetime_max_count': '1024',
  1323.         'recovery_type': 'AUTO_START',
  1324.         'pig_tar_destination_folder': 'wasb:///hdp/apps/{{ hdp_stack_version }}/pig/',
  1325.         'ignore_bad_mounts': 'false',
  1326.         'recovery_window_in_minutes': '60',
  1327.         'tez_tar_destination_folder': 'wasb:///hdp/apps/{{ hdp_stack_version }}/tez/',
  1328.         'command_retry_enabled': 'true',
  1329.         'sqoop_tar_destination_folder': 'wasb:///hdp/apps/{{ hdp_stack_version }}/sqoop/',
  1330.         'stack_tools': '{\n  "stack_selector": ["hdp-select", "/usr/bin/hdp-select", "hdp-select"],\n  "conf_selector": ["conf-select", "/usr/bin/conf-select", "conf-select"]\n}',
  1331.         'recovery_retry_interval': '5',
  1332.         'command_retry_max_time_in_sec': '600',
  1333.         'stack_features': '{\n  "stack_features": [\n    {\n      "name": "snappy",\n      "description": "Snappy compressor/decompressor support",\n      "min_version": "2.0.0.0",\n      "max_version": "2.2.0.0"\n    },\n    {\n      "name": "lzo",\n      "description": "LZO libraries support",\n      "min_version": "2.2.1.0"\n    },\n    {\n      "name": "express_upgrade",\n      "description": "Express upgrade support",\n      "min_version": "2.1.0.0"\n    },\n    {\n      "name": "rolling_upgrade",\n      "description": "Rolling upgrade support",\n      "min_version": "2.2.0.0"\n    },\n    {\n      "name": "config_versioning",\n      "description": "Configurable versions support",\n      "min_version": "2.3.0.0"\n    },\n    {\n      "name": "datanode_non_root",\n      "description": "DataNode running as non-root support (AMBARI-7615)",\n      "min_version": "2.2.0.0"\n    },\n    {\n      "name": "remove_ranger_hdfs_plugin_env",\n      "description": "HDFS removes Ranger env files (AMBARI-14299)",\n      "min_version": "2.3.0.0"\n    },\n    {\n      "name": "ranger",\n      "description": "Ranger Service support",\n      "min_version": "2.2.0.0"\n    },\n    {\n      "name": "ranger_tagsync_component",\n      "description": "Ranger Tagsync component support (AMBARI-14383)",\n      "min_version": "2.5.0.0"\n    },\n    {\n      "name": "phoenix",\n      "description": "Phoenix Service support",\n      "min_version": "2.3.0.0"\n    },\n    {\n      "name": "nfs",\n      "description": "NFS support",\n      "min_version": "2.3.0.0"\n    },\n    {\n      "name": "tez_for_spark",\n      "description": "Tez dependency for Spark",\n      "min_version": "2.2.0.0",\n      "max_version": "2.3.0.0"\n    },\n    {\n      "name": "timeline_state_store",\n      "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",\n      "min_version": "2.2.0.0"\n    },\n    {\n      "name": "copy_tarball_to_hdfs",\n      "description": "Copy tarball to HDFS support (AMBARI-12113)",\n      "min_version": "2.2.0.0"\n    },\n    {\n      "name": "spark_16plus",\n      "description": "Spark 1.6+",\n      "min_version": "2.4.0.0"\n    },\n    {\n      "name": "spark_thriftserver",\n      "description": "Spark Thrift Server",\n      "min_version": "2.3.2.0"\n    },\n    {\n      "name": "storm_kerberos",\n      "description": "Storm Kerberos support (AMBARI-7570)",\n      "min_version": "2.2.0.0"\n    },\n    {\n      "name": "storm_ams",\n      "description": "Storm AMS integration (AMBARI-10710)",\n      "min_version": "2.2.0.0"\n    },\n    {\n      "name": "create_kafka_broker_id",\n      "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",\n      "min_version": "2.2.0.0",\n      "max_version": "2.3.0.0"\n    },\n    {\n      "name": "kafka_listeners",\n      "description": "Kafka listeners (AMBARI-10984)",\n      "min_version": "2.3.0.0"\n    },\n    {\n      "name": "kafka_kerberos",\n      "description": "Kafka Kerberos support (AMBARI-10984)",\n      "min_version": "2.3.0.0"\n    },\n    {\n      "name": "pig_on_tez",\n      "description": "Pig on Tez support (AMBARI-7863)",\n      "min_version": "2.2.0.0"\n    },\n    {\n      "name": "ranger_usersync_non_root",\n      "description": "Ranger Usersync as non-root user (AMBARI-10416)",\n      "min_version": "2.3.0.0"\n    },\n    {\n      "name": "ranger_audit_db_support",\n      "description": "Ranger Audit to DB support",\n      "min_version": "2.2.0.0",\n      "max_version": "2.5.0.0"\n    },\n    {\n      "name": "accumulo_kerberos_user_auth",\n      "description": "Accumulo Kerberos User Auth (AMBARI-10163)",\n      "min_version": "2.3.0.0"\n    },\n    {\n      "name": "knox_versioned_data_dir",\n      "description": "Use versioned data dir for Knox (AMBARI-13164)",\n      "min_version": "2.3.2.0"\n    },\n    {\n      "name": "knox_sso_topology",\n      "description": "Knox SSO Topology support (AMBARI-13975)",\n      "min_version": "2.3.8.0"\n    },\n    {\n      "name": "atlas_rolling_upgrade",\n      "description": "Rolling upgrade support for Atlas",\n      "min_version": "2.3.0.0"\n    },\n    {\n      "name": "oozie_admin_user",\n      "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",\n      "min_version": "2.2.0.0"\n    },\n    {\n      "name": "oozie_create_hive_tez_configs",\n      "description": "Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)",\n      "min_version": "2.2.0.0"\n    },\n    {\n      "name": "oozie_setup_shared_lib",\n      "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",\n      "min_version": "2.2.0.0"\n    },\n    {\n      "name": "oozie_host_kerberos",\n      "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",\n      "min_version": "2.0.0.0",\n      "max_version": "2.2.0.0"\n    },\n    {\n      "name": "falcon_extensions",\n      "description": "Falcon Extension",\n      "min_version": "2.5.0.0"\n    },\n    {\n      "name": "hive_metastore_upgrade_schema",\n      "description": "Hive metastore upgrade schema support (AMBARI-11176)",\n      "min_version": "2.3.0.0"\n     },\n    {\n      "name": "hive_server_interactive",\n      "description": "Hive server interactive support (AMBARI-15573)",\n      "min_version": "2.5.0.0"\n     },\n    {\n      "name": "hive_webhcat_specific_configs",\n      "description": "Hive webhcat specific configurations support (AMBARI-12364)",\n      "min_version": "2.3.0.0"\n     },\n    {\n      "name": "hive_purge_table",\n      "description": "Hive purge table support (AMBARI-12260)",\n      "min_version": "2.3.0.0"\n     },\n    {\n      "name": "hive_server2_kerberized_env",\n      "description": "Hive server2 working on kerberized environment (AMBARI-13749)",\n      "min_version": "2.2.3.0",\n      "max_version": "2.2.5.0"\n     },\n    {\n      "name": "hive_env_heapsize",\n      "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",\n      "min_version": "2.2.0.0"\n    },\n    {\n      "name": "ranger_kms_hsm_support",\n      "description": "Ranger KMS HSM support (AMBARI-15752)",\n      "min_version": "2.5.0.0"\n    },\n    {\n      "name": "ranger_log4j_support",\n      "description": "Ranger supporting log-4j properties (AMBARI-15681)",\n      "min_version": "2.5.0.0"\n    },\n    {\n      "name": "ranger_kerberos_support",\n      "description": "Ranger Kerberos support",\n      "min_version": "2.5.0.0"\n    },\n    {\n      "name": "hive_metastore_site_support",\n      "description": "Hive Metastore site support",\n      "min_version": "2.5.0.0"\n    },\n    {\n      "name": "ranger_usersync_password_jceks",\n      "description": "Saving Ranger Usersync credentials in jceks",\n      "min_version": "2.5.0.0"\n    },\n    {\n      "name": "ranger_install_infra_client",\n      "description": "Ambari Infra Service support",\n      "min_version": "2.5.0.0"\n    },\n    {\n      "name": "falcon_atlas_support_2_3",\n      "description": "Falcon Atlas integration support for 2.3 stack",\n      "min_version": "2.3.99.0",\n      "max_version": "2.4.0.0"\n    },\n    {\n      "name": "falcon_atlas_support",\n      "description": "Falcon Atlas integration",\n      "min_version": "2.5.0.0"\n    },\n    {\n      "name": "hbase_home_directory",\n      "description": "Hbase home directory in HDFS needed for HBASE backup",\n      "min_version": "2.5.0.0"\n    },\n    {\n      "name": "spark_livy",\n      "description": "Livy as slave component of spark",\n      "min_version": "2.5.0.0"\n    },\n    {\n      "name": "atlas_ranger_plugin_support",\n      "description": "Atlas Ranger plugin support",\n      "min_version": "2.5.0.0"\n    },\n    {\n      "name": "atlas_conf_dir_in_path",\n      "description": "Prepend the Atlas conf dir (/etc/atlas/conf) to the classpath of Storm and Falcon",\n      "min_version": "2.3.0.0",\n      "max_version": "2.4.99.99"\n    },\n    {\n      "name": "atlas_upgrade_support",\n      "description": "Atlas supports express and rolling upgrades",\n      "min_version": "2.5.0.0"\n    },\n    {\n      "name": "atlas_hook_support",\n      "description": "Atlas support for hooks in Hive, Storm, Falcon, and Sqoop",\n      "min_version": "2.5.0.0"\n    },\n    {\n      "name": "ranger_pid_support",\n      "description": "Ranger Service support pid generation AMBARI-16756",\n      "min_version": "2.5.0.0"\n    },\n    {\n      "name": "ranger_kms_pid_support",\n      "description": "Ranger KMS Service support pid generation",\n      "min_version": "2.5.0.0"\n    },\n    {\n      "name": "ranger_admin_password_change",\n      "description": "Allow ranger admin credentials to be specified during cluster creation (AMBARI-17000)",\n      "min_version": "2.5.0.0"\n    },\n    {\n      "name": "storm_metrics_apache_classes",\n      "description": "Metrics sink for Storm that uses Apache class names",\n      "min_version": "2.5.0.0"\n    },\n    {\n      "name": "spark_java_opts_support",\n      "description": "Allow Spark to generate java-opts file",\n      "min_version": "2.2.0.0",\n      "max_version": "2.4.0.0"\n    },\n    {\n      "name": "atlas_hbase_setup",\n      "description": "Use script to create Atlas tables in Hbase and set permissions for Atlas user.",\n      "min_version": "2.5.0.0"\n    },\n    {\n      "name": "ranger_hive_plugin_jdbc_url",\n      "description": "Handle Ranger hive repo config jdbc url change for stack 2.5 (AMBARI-18386)",\n      "min_version": "2.5.0.0"\n    }\n  ]\n}',
  1334.         'recovery_enabled': 'true',
  1335.         'recovery_max_count': '6',
  1336.         'stack_root': '/usr/hdp',
  1337.         'repo_suse_rhel_template': '[{{repo_id}}]\nname={{repo_id}}\n{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0',
  1338.         'mapreduce_tar_destination_folder': 'wasb:///hdp/apps/{{ hdp_stack_version }}/mapreduce/',
  1339.         'user_group': 'hadoop',
  1340.         'managed_hdfs_resource_property_names': '',
  1341.         'smokeuser': 'ambari-qa'
  1342.     }
  1343. }
Add Comment
Please, Sign In to add comment