Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- <?xml version="1.0"?>
- <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
- <!--
- /**
- * Copyright 2009 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- -->
- <configuration>
- <property>
- <name>hbase.rootdir</name>
- <value>file:///tmp/hbase-${user.name}/hbase</value>
- <description>The directory shared by region servers.
- Should be fully-qualified to include the filesystem to use.
- E.g: hdfs://NAMENODE_SERVER:PORT/HBASE_ROOTDIR
- </description>
- </property>
- <property>
- <name>hbase.master.port</name>
- <value>60000</value>
- <description>The port master should bind to.</description>
- </property>
- <property>
- <name>hbase.cluster.distributed</name>
- <value>false</value>
- <description>The mode the cluster will be in. Possible values are
- false: standalone and pseudo-distributed setups with managed Zookeeper
- true: fully-distributed with unmanaged Zookeeper Quorum (see hbase-env.sh)
- </description>
- </property>
- <property>
- <name>hbase.tmp.dir</name>
- <value>/tmp/hbase-${user.name}</value>
- <description>Temporary directory on the local filesystem.</description>
- </property>
- <property>
- <name>hbase.master.info.port</name>
- <value>60010</value>
- <description>The port for the hbase master web UI
- Set to -1 if you do not want the info server to run.
- </description>
- </property>
- <property>
- <name>hbase.master.info.bindAddress</name>
- <value>0.0.0.0</value>
- <description>The address for the hbase master web UI
- </description>
- </property>
- <property>
- <name>hbase.client.write.buffer</name>
- <value>2097152</value>
- <description>Size of the write buffer in bytes. A bigger buffer takes more
- memory -- on both the client and server side since server instantiates
- the passed write buffer to process it -- but reduces the number of RPC.
- For an estimate of server-side memory-used, evaluate
- hbase.client.write.buffer * hbase.regionserver.handler.count
- </description>
- </property>
- <property>
- <name>hbase.master.meta.thread.rescanfrequency</name>
- <value>60000</value>
- <description>How long the HMaster sleeps (in milliseconds) between scans of
- the root and meta tables.
- </description>
- </property>
- <property>
- <name>hbase.regionserver.port</name>
- <value>60020</value>
- <description>The port an HBase region server binds to.
- </description>
- </property>
- <property>
- <name>hbase.regionserver.info.port</name>
- <value>60030</value>
- <description>The port for the hbase regionserver web UI
- Set to -1 if you do not want the info server to run.
- </description>
- </property>
- <property>
- <name>hbase.regionserver.info.port.auto</name>
- <value>false</value>
- <description>Info server auto port bind. Enables automatic port
- search if hbase.regionserver.info.port is already in use.
- Useful for testing, turned off by default.
- </description>
- </property>
- <property>
- <name>hbase.regionserver.info.bindAddress</name>
- <value>0.0.0.0</value>
- <description>The address for the hbase regionserver web UI
- </description>
- </property>
- <property>
- <name>hbase.regionserver.class</name>
- <value>org.apache.hadoop.hbase.ipc.HRegionInterface</value>
- <description>An interface that is assignable to HRegionInterface. Used in HClient for
- opening proxy to remote region server.
- </description>
- </property>
- <property>
- <name>hbase.client.pause</name>
- <value>1000</value>
- <description>General client pause value. Used mostly as value to wait
- before running a retry of a failed get, region lookup, etc.</description>
- </property>
- <property>
- <name>hbase.client.retries.number</name>
- <value>10</value>
- <description>Maximum retries. Used as maximum for all retryable
- operations such as fetching of the root region from root region
- server, getting a cell's value, starting a row update, etc.
- Default: 10.
- </description>
- </property>
- <property>
- <name>hbase.client.scanner.caching</name>
- <value>1</value>
- <description>Number of rows that will be fetched when calling next
- on a scanner if it is not served from memory. Higher caching values
- will enable faster scanners but will eat up more memory and some
- calls of next may take longer and longer times when the cache is empty.
- </description>
- </property>
- <property>
- <name>hbase.client.keyvalue.maxsize</name>
- <value>10485760</value>
- <description>Specifies the combined maximum allowed size of a KeyValue
- instance. This is to set an upper boundary for a single entry saved in a
- storage file. Since they cannot be split it helps avoiding that a region
- cannot be split any further because the data is too large. It seems wise
- to set this to a fraction of the maximum region size. Setting it to zero
- or less disables the check.
- </description>
- </property>
- <property>
- <name>hbase.regionserver.lease.period</name>
- <value>60000</value>
- <description>HRegion server lease period in milliseconds. Default is
- 60 seconds. Clients must report in within this period else they are
- considered dead.</description>
- </property>
- <property>
- <name>hbase.regionserver.handler.count</name>
- <value>25</value>
- <description>Count of RPC Server instances spun up on RegionServers
- Same property is used by the HMaster for count of master handlers.
- Default is 25.
- </description>
- </property>
- <property>
- <name>hbase.regionserver.msginterval</name>
- <value>1000</value>
- <description>Interval between messages from the RegionServer to HMaster
- in milliseconds. Use a high value like 3000 for clusters with more than 10
- nodes. Default is 1 second so that HBase seems more 'live'.
- </description>
- </property>
- <property>
- <name>hbase.regionserver.flushlogentries</name>
- <value>1</value>
- <description>Sync the HLog to the HDFS when it has accumulated this many
- entries. Default 1. Value is checked on every HLog.hflush
- </description>
- </property>
- <property>
- <name>hbase.regionserver.optionallogflushinterval</name>
- <value>1000</value>
- <description>Sync the HLog to the HDFS after this interval if it has not
- accumulated enough entries to trigger a sync. Default 1 second. Units:
- milliseconds.
- </description>
- </property>
- <property>
- <name>hbase.regionserver.regionSplitLimit</name>
- <value>2147483647</value>
- <description>Limit for the number of regions after which no more region
- splitting should take place. This is not a hard limit for the number of
- regions but acts as a guideline for the regionserver to stop splitting after
- a certain limit. Default is set to MAX_INT.
- </description>
- </property>
- <property>
- <name>hbase.regionserver.logroll.period</name>
- <value>3600000</value>
- <description>Period at which we will roll the commit log.</description>
- </property>
- <property>
- <name>hbase.regionserver.hlog.reader.impl</name>
- <value>org.apache.hadoop.hbase.regionserver.wal.SequenceFileLogReader</value>
- <description>The HLog file reader implementation.</description>
- </property>
- <property>
- <name>hbase.regionserver.hlog.writer.impl</name>
- <value>org.apache.hadoop.hbase.regionserver.wal.SequenceFileLogWriter</value>
- <description>The HLog file writer implementation.</description>
- </property>
- <property>
- <name>hbase.regionserver.thread.splitcompactcheckfrequency</name>
- <value>20000</value>
- <description>How often a region server runs the split/compaction check.
- </description>
- </property>
- <property>
- <name>hbase.regionserver.nbreservationblocks</name>
- <value>4</value>
- <description>The number of reservation blocks which are used to prevent
- unstable region servers caused by an OOME.
- </description>
- </property>
- <property>
- <name>hbase.zookeeper.dns.interface</name>
- <value>default</value>
- <description>The name of the Network Interface from which a ZooKeeper server
- should report its IP address.
- </description>
- </property>
- <property>
- <name>hbase.zookeeper.dns.nameserver</name>
- <value>default</value>
- <description>The host name or IP address of the name server (DNS)
- which a ZooKeeper server should use to determine the host name used by the
- master for communication and display purposes.
- </description>
- </property>
- <property>
- <name>hbase.regionserver.dns.interface</name>
- <value>default</value>
- <description>The name of the Network Interface from which a region server
- should report its IP address.
- </description>
- </property>
- <property>
- <name>hbase.regionserver.dns.nameserver</name>
- <value>default</value>
- <description>The host name or IP address of the name server (DNS)
- which a region server should use to determine the host name used by the
- master for communication and display purposes.
- </description>
- </property>
- <property>
- <name>hbase.master.dns.interface</name>
- <value>default</value>
- <description>The name of the Network Interface from which a master
- should report its IP address.
- </description>
- </property>
- <property>
- <name>hbase.master.dns.nameserver</name>
- <value>default</value>
- <description>The host name or IP address of the name server (DNS)
- which a master should use to determine the host name used
- for communication and display purposes.
- </description>
- </property>
- <property>
- <name>hbase.regionserver.global.memstore.upperLimit</name>
- <value>0.4</value>
- <description>Maximum size of all memstores in a region server before new
- updates are blocked and flushes are forced. Defaults to 40% of heap
- </description>
- </property>
- <property>
- <name>hbase.regionserver.global.memstore.lowerLimit</name>
- <value>0.35</value>
- <description>When memstores are being forced to flush to make room in
- memory, keep flushing until we hit this mark. Defaults to 30% of heap.
- This value equal to hbase.regionserver.global.memstore.upperLimit causes
- the minimum possible flushing to occur when updates are blocked due to
- memstore limiting.
- </description>
- </property>
- <property>
- <name>hbase.hbasemaster.maxregionopen</name>
- <value>120000</value>
- <description>Period to wait for a region open. If regionserver
- takes longer than this interval, assign to a new regionserver.
- </description>
- </property>
- <property>
- <name>hbase.master.logcleaner.ttl</name>
- <value>600000</value>
- <description>Maximum time a log can stay in the .oldlogdir directory,
- after which it will be cleaned by a master thread.
- </description>
- </property>
- <property>
- <name>hbase.master.logcleaner.plugins</name>
- <value>org.apache.hadoop.hbase.master.TimeToLiveLogCleaner,org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner</value>
- <description>A comma-separated list of LogCleanerDelegate that are used
- in LogsCleaner. These log cleaners are called in order, so put the log
- cleaner that prunes the most log files in the front. To implement your own
- LogCleanerDelegate, just put it in HBase's classpath and add the fully
- qualified class name here. Without special reason, you should always add
- the above default log cleaners in the list.
- </description>
- </property>
- <property>
- <name>hbase.regions.percheckin</name>
- <value>10</value>
- <description>Maximum number of regions that can be assigned in a single go
- to a region server.
- </description>
- </property>
- <property>
- <name>hbase.server.thread.wakefrequency</name>
- <value>10000</value>
- <description>Time to sleep in between searches for work (in milliseconds).
- Used as sleep interval by service threads such as META scanner and log roller.
- </description>
- </property>
- <property>
- <name>hbase.hregion.memstore.flush.size</name>
- <value>67108864</value>
- <description>
- Memstore will be flushed to disk if size of the memstore
- exceeds this number of bytes. Value is checked by a thread that runs
- every hbase.server.thread.wakefrequency.
- </description>
- </property>
- <property>
- <name>hbase.hregion.preclose.flush.size</name>
- <value>5242880</value>
- <description>
- If the memstores in a region are this size or larger when we go
- to close, run a "pre-flush" to clear out memstores before we put up
- the region closed flag and take the region offline. On close,
- a flush is run under the close flag up to empty memory. During
- this time the region is offline and we are not taking on any writes.
- If the memstore content large, this flush could take a long time to
- complete. The preflush is meant to clean out the bulk of the memstore
- before putting up the close flag and taking the region offline so the
- flush that runs under the close flag has little to do.
- </description>
- </property>
- <property>
- <name>hbase.hregion.memstore.block.multiplier</name>
- <value>2</value>
- <description>
- Block updates if memstore has hbase.hregion.block.memstore
- time hbase.hregion.flush.size bytes. Useful preventing
- runaway memstore during spikes in update traffic. Without an
- upper-bound, memstore fills such that when it flushes the
- resultant flush files take a long time to compact or split, or
- worse, we OOME.
- </description>
- </property>
- <property>
- <name>hbase.hregion.max.filesize</name>
- <value>268435456</value>
- <description>
- Maximum HStoreFile size. If any one of a column families' HStoreFiles has
- grown to exceed this value, the hosting HRegion is split in two.
- Default: 256M.
- </description>
- </property>
- <property>
- <name>hbase.hstore.compactionThreshold</name>
- <value>3</value>
- <description>
- If more than this number of HStoreFiles in any one HStore
- (one HStoreFile is written per flush of memstore) then a compaction
- is run to rewrite all HStoreFiles files as one. Larger numbers
- put off compaction but when it runs, it takes longer to complete.
- During a compaction, updates cannot be flushed to disk. Long
- compactions require memory sufficient to carry the logging of
- all updates across the duration of the compaction.
- If too large, clients timeout during compaction.
- </description>
- </property>
- <property>
- <name>hbase.hstore.blockingStoreFiles</name>
- <value>7</value>
- <description>
- If more than this number of StoreFiles in any one Store
- (one StoreFile is written per flush of MemStore) then updates are
- blocked for this HRegion until a compaction is completed, or
- until hbase.hstore.blockingWaitTime has been exceeded.
- </description>
- </property>
- <property>
- <name>hbase.hstore.blockingWaitTime</name>
- <value>90000</value>
- <description>
- The time an HRegion will block updates for after hitting the StoreFile
- limit defined by hbase.hstore.blockingStoreFiles.
- After this time has elapsed, the HRegion will stop blocking updates even
- if a compaction has not been completed. Default: 90 seconds.
- </description>
- </property>
- <property>
- <name>hbase.hstore.compaction.max</name>
- <value>10</value>
- <description>Max number of HStoreFiles to compact per 'minor' compaction.
- </description>
- </property>
- <property>
- <name>hbase.hregion.majorcompaction</name>
- <value>86400000</value>
- <description>The time (in miliseconds) between 'major' compactions of all
- HStoreFiles in a region. Default: 1 day.
- Set to 0 to disable automated major compactions.
- </description>
- </property>
- <property>
- <name>hbase.regions.slop</name>
- <value>0.3</value>
- <description>Rebalance if regionserver has average + (average * slop) regions.
- Default is 30% slop.
- </description>
- </property>
- <property>
- <name>hfile.min.blocksize.size</name>
- <value>65536</value>
- <description>Minimum store file block size. The smaller you make this, the
- bigger your index and the less you fetch on a random-access. Set size down
- if you have small cells and want faster random-access of individual cells.
- </description>
- </property>
- <property>
- <name>hfile.block.cache.size</name>
- <value>0.2</value>
- <description>
- Percentage of maximum heap (-Xmx setting) to allocate to block cache
- used by HFile/StoreFile. Default of 0.2 means allocate 20%.
- Set to 0 to disable.
- </description>
- </property>
- <property>
- <name>hbase.hash.type</name>
- <value>murmur</value>
- <description>The hashing algorithm for use in HashFunction. Two values are
- supported now: murmur (MurmurHash) and jenkins (JenkinsHash).
- </description>
- </property>
- <property>
- <name>zookeeper.session.timeout</name>
- <value>60000</value>
- <description>ZooKeeper session timeout.
- HBase passes this to the zk quorum as suggested maximum time for a
- session. See http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
- "The client sends a requested timeout, the server responds with the
- timeout that it can give the client. "
- In milliseconds.
- </description>
- </property>
- <property>
- <name>zookeeper.retries</name>
- <value>5</value>
- <description>How many times to retry connections to ZooKeeper. Used for
- reading/writing root region location. Used together with ${zookeeper.pause}
- in an exponential backoff fashion when making queries to ZooKeeper.
- </description>
- </property>
- <property>
- <name>zookeeper.pause</name>
- <value>2000</value>
- <description>Sleep time between retries to ZooKeeper. In milliseconds. Used
- together with ${zookeeper.retries} in an exponential backoff fashion when
- making queries to ZooKeeper.
- </description>
- </property>
- <property>
- <name>zookeeper.znode.parent</name>
- <value>/hbase</value>
- <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
- files that are configured with a relative path will go under this node.
- By default, all of HBase's ZooKeeper file path are configured with a
- relative path, so they will all go under this directory unless changed.
- </description>
- </property>
- <property>
- <name>zookeeper.znode.rootserver</name>
- <value>root-region-server</value>
- <description>Path to ZNode holding root region location. This is written by
- the master and read by clients and region servers. If a relative path is
- given, the parent folder will be ${zookeeper.znode.parent}. By default,
- this means the root location is stored at /hbase/root-region-server.
- </description>
- </property>
- <!--
- The following three properties are used together to create the list of
- host:peer_port:leader_port quorum servers for ZooKeeper.
- -->
- <property>
- <name>hbase.zookeeper.quorum</name>
- <value>localhost</value>
- <description>Comma separated list of servers in the ZooKeeper Quorum.
- For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
- By default this is set to localhost for local and pseudo-distributed modes
- of operation. For a fully-distributed setup, this should be set to a full
- list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
- this is the list of servers which we will start/stop ZooKeeper on.
- </description>
- </property>
- <property>
- <name>hbase.zookeeper.peerport</name>
- <value>2888</value>
- <description>Port used by ZooKeeper peers to talk to each other.
- See http://hadoop.apache.org/zookeeper/docs/r3.1.1/zookeeperStarted.html#sc_RunningReplicatedZooKeeper
- for more information.
- </description>
- </property>
- <property>
- <name>hbase.zookeeper.leaderport</name>
- <value>3888</value>
- <description>Port used by ZooKeeper for leader election.
- See http://hadoop.apache.org/zookeeper/docs/r3.1.1/zookeeperStarted.html#sc_RunningReplicatedZooKeeper
- for more information.
- </description>
- </property>
- <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
- <!--
- Beginning of properties that are directly mapped from ZooKeeper's zoo.cfg.
- All properties with an "hbase.zookeeper.property." prefix are converted for
- ZooKeeper's configuration. Hence, if you want to add an option from zoo.cfg,
- e.g. "initLimit=10" you would append the following to your configuration:
- <property>
- <name>hbase.zookeeper.property.initLimit</name>
- <value>10</value>
- </property>
- -->
- <property>
- <name>hbase.zookeeper.property.initLimit</name>
- <value>10</value>
- <description>Property from ZooKeeper's config zoo.cfg.
- The number of ticks that the initial synchronization phase can take.
- </description>
- </property>
- <property>
- <name>hbase.zookeeper.property.syncLimit</name>
- <value>5</value>
- <description>Property from ZooKeeper's config zoo.cfg.
- The number of ticks that can pass between sending a request and getting an
- acknowledgment.
- </description>
- </property>
- <property>
- <name>hbase.zookeeper.property.dataDir</name>
- <value>${hbase.tmp.dir}/zookeeper</value>
- <description>Property from ZooKeeper's config zoo.cfg.
- The directory where the snapshot is stored.
- </description>
- </property>
- <property>
- <name>hbase.zookeeper.property.clientPort</name>
- <value>2181</value>
- <description>Property from ZooKeeper's config zoo.cfg.
- The port at which the clients will connect.
- </description>
- </property>
- <property>
- <name>hbase.zookeeper.property.maxClientCnxns</name>
- <value>30</value>
- <description>Property from ZooKeeper's config zoo.cfg.
- Limit on number of concurrent connections (at the socket level) that a
- single client, identified by IP address, may make to a single member of
- the ZooKeeper ensemble. Set high to avoid zk connection issues running
- standalone and pseudo-distributed.
- </description>
- </property>
- <!-- End of properties that are directly mapped from ZooKeeper's zoo.cfg -->
- </configuration>
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement