Advertisement
Guest User

Untitled

a guest
Aug 17th, 2017
123
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 31.94 KB | None | 0 0
  1. ######################## CrateDB Configuration File ##########################
  2.  
  3. # The default configuration offers the ability to use CrateDB right away.
  4. # The purpose of this file is to give operators an overview of the various
  5. # different configuration settings. To read the full documentation go to
  6. # <https://crate.io/docs/>
  7.  
  8. # Use this file to fine-tune your CrateDB cluster. If you have any questions
  9. # you are very welcome to contact us on our Community Channel on Slack
  10. # <https://crate.io/docs/support/slackin/>.
  11.  
  12.  
  13. ################################ Quick Settings ##############################
  14.  
  15. # Recommended memory settings:
  16. # - set the environment variable CRATE_HEAP_SIZE to half of your memory
  17. # (e.g. 26G, but not more than ~30G to benefit from CompressedOops).
  18. # Depending on the OS update '/etc/default/crate' or '/etc/sysconfig/crate'
  19. # - disable swapping
  20. #bootstrap.memory_lock : true
  21.  
  22. # Storage: CrateDB can utilize multiple volumes in parallel, make sure to set
  23. # the owner to 'crate:crate'
  24. #path.data: /path/to/data1,/path/to/data2
  25.  
  26. # Clustering: To avoid split-brain situations and data loss on recovery
  27. #gateway.expected_nodes: 5 # Total amount of nodes
  28. #gateway.recover_after_nodes: 3 # More than half of the nodes
  29. #discovery.zen.minimum_master_nodes: 3 # More than half of the nodes
  30.  
  31. # Networking: Bind to an IP address or interface other than localhost.
  32. # Be careful! Never expose an unprotected node to the internet.
  33. # Choose from [IP Address], _local_, _site_, _global_ or _[networkInterface]_
  34. #network.host: _site_
  35.  
  36. # Cluster discovery: Specify the hosts which will form the CrateDB cluster
  37. #discovery.zen.ping:
  38. # unicast.hosts:
  39. # - host1:4300
  40. # - host2:4300
  41.  
  42. # Enterprise features: CrateDB ships by default with all enterprise features
  43. # enabled. Get in touch with us to get a license: https://crate.io/enterprise/
  44. #license.enterprise: true
  45. #license.ident:
  46.  
  47.  
  48. ################################# Full Settings ##############################
  49.  
  50. # The quick settings above should be sufficient for most situations.
  51. # However, below are listed all available configuration options.
  52.  
  53. # Any element in the configuration can be replaced with environment variables
  54. # by placing them in ${...} notation. For example:
  55. #
  56. #node.rack: ${RACK_ENV_VAR}
  57.  
  58.  
  59. ############################# Enterprise Features ############################
  60.  
  61. # Setting this to `false` disables the Enterprise Edition of CrateDB.
  62. #license.enterprise: true
  63.  
  64. # To enable or use any of the enterprise features, Crate.io must have given
  65. # you permission to enable and use the Enterprise Edition of CrateDB (see
  66. # https://crate.io/enterprise/) and you must have a valid Enterprise or
  67. # Subscription Agreement with Crate.io. If you enable or use features that are
  68. # part of the Enterprise Edition, you represent and warrant that you have a
  69. # valid Enterprise or Subscription Agreement with Crate.io. Your use of
  70. # features of the Enterprise Edition is governed by the terms and conditions
  71. # of your Enterprise or Subscription Agreement with Crate.io.
  72.  
  73. #/////////////////////////// JMX Monitoring Plugin ///////////////////////////
  74.  
  75. # The JMX monitoring plugin requires that stat collection is enabled.
  76. #stats.enabled: true
  77.  
  78. #//////////////////////// Database Administration ////////////////////////////
  79.  
  80. # To allow authenticated access to CrateDB from specific hosts, you must
  81. # enable the host based authentication setting.
  82. #auth.host_based.enabled: true
  83.  
  84. # Client access and authentication is then configured via the host based
  85. # config, a map of remote client access entries. For example:
  86. #auth:
  87. # host_based:
  88. # enabled: true
  89. # config:
  90. # 0:
  91. # user: mike
  92. # address: 32.0.0.0/8
  93. # method: trust
  94. # protocol: pg
  95. # a:
  96. # user: barb
  97. # address: 172.16.0.0
  98. # protocol: pg
  99. # b:
  100. # user: crate
  101. # address: 32.0.0.0/8
  102. # method: trust
  103. # y:
  104. # user: eleven
  105. # protocol: pg
  106. # e:
  107. # user: dustin
  108. # address: 172.16.0.0
  109. # method: trust
  110. # protocol: http
  111. # z:
  112. # method: trust
  113.  
  114. # When CrateDB is started, the cluster contains one predefined superuser,
  115. # called `crate`. To enable trust based authentication for the super user,
  116. # `crate` must be specified in the host_based setting, as follows:
  117. #auth:
  118. # host_based:
  119. # enabled: true
  120. # config:
  121. # 0:
  122. # user: crate
  123.  
  124. # When trust based authentication is used, the server just takes the username
  125. # provided by the client as is without further validation. The HTTP
  126. # implementation takes the value of the `X-USER` request header as the
  127. # username. Since a user is always required for trust based authentication, a
  128. # default user (in case `X-USER` is not set) can be defined as follows:
  129. #auth:
  130. # trust:
  131. # http_default_user: dustin
  132.  
  133. #///////////////////////// User Defined Functions ////////////////////////////
  134.  
  135. # To enable Javascript language for user defined functions, you can set it as
  136. # follows:
  137. #lang.js.enabled: true
  138.  
  139. # The Javascript language is an experimental feature and is not securely
  140. # sandboxed, so it is disabled by default.
  141.  
  142. #///////////////////////////////// SSL //////////////////////////////////////
  143.  
  144. # Enable encrypted communication for the HTTP endpoints:
  145. #ssl.http.enabled: true
  146.  
  147. # Enable encrypted communication for the PostgreSQL wire protocol:
  148. #ssl.psql.enabled: true
  149.  
  150. # The full path to the node keystore file
  151. #ssl.keystore_filepath: /path/to/keystore_file.jks
  152.  
  153. # The password used to decrypt the keystore_file.jks
  154. #ssl.keystore_password: myKeyStorePasswd
  155.  
  156. # The password entered at the end of the keytool -genkey command if different
  157. # than the keystore_password.
  158. #ssl.keystore_key_password: myKeyStorePasswd
  159.  
  160. # Optional configuration for truststore
  161.  
  162. # The full path to the node truststore file
  163. #ssl.truststore_filepath: /path/to/truststore_file.jks
  164.  
  165. # The password used to decrypt the truststore_file.jks
  166. #ssl.truststore_password: myTrustStorePasswd
  167.  
  168.  
  169. ################################### Cluster ##################################
  170.  
  171. # Cluster name identifies your cluster for auto-discovery. If you're running
  172. # multiple clusters on the same network, make sure you're using unique names.
  173. #cluster.name: crate
  174.  
  175. # The graceful_stop namespace defines settings for a graceful stop
  176. # procedure of nodes within the cluster.
  177. #
  178. # The minimum data availability the cluster needs to ensure when a certain
  179. # node is shut down. By default the cluster ensures availability of primary
  180. # shards but not replicas. Other options are "full" and "none".
  181. #cluster.graceful_stop.min_availability: primaries
  182. #
  183. # The cluster may reallocate shards to ensure minimum data availabillity
  184. # before a certain node shuts down.
  185. #cluster.graceful_stop.reallocate: true
  186. #
  187. # The time to wait for the reallocation process to be finished.
  188. #cluster.graceful_stop.timeout: 2h
  189. #
  190. # The force setting allows you to force a shutdown of a node when the graceful
  191. # shutdown process runs into the cluster.graceful_stop.timeout.
  192. #cluster.graceful_stop.force: false
  193. #
  194. # By default the regular stop procedure does not consider the graceful_stop
  195. # settings, but only sending the USR2 signal to the process does.
  196.  
  197. # In most cases you want to allow all kinds of shards allocations.
  198. #cluster.routing.allocation.enable = all
  199. #
  200. # However, you can limit shard allocations to certain kinds of shards,
  201. # for example if you perform a rolling cluster upgrade.
  202. #cluster.routing.allocation.enable = new_primaries
  203.  
  204. #################################### Node ####################################
  205.  
  206. # Node names are generated dynamically on startup, so you're relieved
  207. # from configuring them manually. You can tie this node to a specific name:
  208. #node.name: "Piz Buin"
  209.  
  210. # Every node can be configured to allow or deny being eligible as the master,
  211. # and to allow or deny to store the data.
  212. #
  213. # Allow this node to be eligible as a master node (enabled by default):
  214. #node.master: true
  215. #
  216. # Allow this node to store data (enabled by default):
  217. #node.data: true
  218.  
  219. # You can exploit these settings to design advanced cluster topologies.
  220. #
  221. # 1. You want this node to never become a master node, only to hold data.
  222. # This will be the "workhorse" of your cluster.
  223. #node.master: false
  224. #node.data: true
  225. #
  226. # 2. You want this node to only serve as a master: to not store any data and
  227. # to have free resources. This will be the "coordinator" of your cluster.
  228. #node.master: true
  229. #node.data: false
  230. #
  231. # 3. You want this node to be neither master nor data node, but
  232. # to act as a "search load balancer" (fetching data from nodes,
  233. # aggregating results, etc.)
  234. #node.master: false
  235. #node.data: false
  236.  
  237. # Inspect the cluster state via GUI tools
  238. # such as CrateDB Admin [http://localhost:4200/admin/].
  239.  
  240. # A node can have generic attributes associated with it, which can later be
  241. # used for customized shard allocation filtering, or allocation awareness.
  242. # An attribute is a simple key value pair, similar to node.key: value, here is
  243. # an example:
  244. #node.rack: rack314
  245.  
  246. # By default, multiple nodes are not allowed to start from the same
  247. # installation location. To allow it, set the following to a higher value:
  248. #node.max_local_storage_nodes: 1
  249.  
  250. #################################### Table ###################################
  251.  
  252. # You can set a number of options (such as replica options, mapping
  253. # or analyzer definitions, translog settings, ...) for tables globally,
  254. # in this file.
  255. #
  256. # Note, that it makes more sense to configure table settings specifically for
  257. # a certain table when creating it.
  258.  
  259. # Set the number of replicas (additional copies) of a table (1 by default):
  260. #index.number_of_replicas: 1
  261.  
  262. # Set the refresh interval of a shard in milliseconds.
  263. #index.refresh_interval: 1000
  264.  
  265. # Disable/enable read-only table.
  266. #index.blocks.read_only: false
  267.  
  268. # Disable/enable all the read operations.
  269. #index.blocks.read: false
  270.  
  271. # Disable/enable all the write operations.
  272. #index.blocks.write: false
  273.  
  274. # Disables/enables the table settings modifications.
  275. #index.blocks.metadata: false
  276.  
  277. # Set the number of operations before flushing.
  278. #index.translog.flush_threshold_ops: unlimited
  279.  
  280. # Sets size of transaction log prior to flushing.
  281. #index.translog.flush_threshold_size: 512mb
  282.  
  283. # Sets period of no flushing after which force flush occurs.
  284. #index.translog.flush_threshold_period: 30m
  285.  
  286. # Disable/enable flushing.
  287. # Recommended to use only for small periods of time
  288. #index.translog.disable_flush: false
  289.  
  290. # Controls shard allocation for a specific table.
  291. #index.routing.allocation.enable: all
  292.  
  293. # Controls the total number of shards (replicas and primaries) allowed to be
  294. # allocated on a single node.
  295. #index.routing.allocation.total_shards_per_node: -1
  296.  
  297. # When using local gateway a particular shard is recovered only if there can
  298. # be allocated quorum of it’s copies in the cluster.
  299. #index.recovery.initial_shards: quorum
  300.  
  301. #disable/enable table warming. Table warming allows to run registered queries
  302. # to warm up the table before it is available.
  303. #index.warmer.enabled:true
  304.  
  305. # These settings directly affect the performance of table and search
  306. # operations in your cluster. Assuming you have enough machines to hold shards
  307. # and replicas, the rule of thumb is:
  308. #
  309. # 1. Having more *shards* enhances the _indexing_ performance and allows to
  310. # _distribute_ a big table across machines.
  311. # 2. Having more *replicas* enhances the _search_ performance and improves the
  312. # cluster _availability_.
  313. #
  314. # The "number_of_shards" is a one-time setting for a table.
  315. #
  316. # CrateDB takes care about load balancing, relocating, gathering the
  317. # results from nodes, etc. Experiment with different settings to fine-tune
  318. # your setup.
  319.  
  320. # Use the Table Status API (<http://localhost:4200/A/_status>) to inspect
  321. # the table status.
  322.  
  323.  
  324. #################################### Paths ###################################
  325.  
  326. # Path to directory containing configuration (this file and
  327. # log4j2.properties):
  328. #path.conf: /path/to/conf
  329.  
  330. # Path to directory where to store table data allocated for this node.
  331. #path.data: /path/to/data
  332. #
  333. # Can optionally include more than one location, causing data to be striped
  334. # across the locations (a la RAID 0) on a file level, favouring locations with
  335. # most free space on creation. For example:
  336. #path.data: /path/to/data1,/path/to/data2
  337.  
  338. # Path to log files:
  339. #path.logs: /path/to/logs
  340.  
  341. # Path to where plugins are installed:
  342. #path.plugins: /path/to/plugins
  343.  
  344. #path:
  345. # logs: /var/log/crate
  346. # data: /var/lib/crate
  347.  
  348. # BLOBS: Path to directory where to store blob data allocated for this node.
  349. # By default blobs will be stored under the same path as normal data.
  350. # A relative path value is relative to CRATE_HOME.
  351. #blobs.path: /path/to/blobs
  352.  
  353.  
  354. #################################### Plugin ##################################
  355.  
  356. # If a plugin listed here is not installed for current node, the node will not
  357. # start.
  358. #plugin.mandatory: mapper-attachments,lang-groovy
  359.  
  360.  
  361. ################################### Memory ###################################
  362.  
  363. # CrateDB performs poorly when JVM starts swapping: you should ensure that
  364. # it _never_ swaps.
  365. #
  366. # Set this property to true to lock the memory:
  367. #bootstrap.mlockall: true
  368.  
  369. # Make sure that the machine has enough memory to allocate for CrateDB,
  370. # leaving enough memory for the operating system itself.
  371. # You can allocate memory for CrateDB as follows:
  372. # - Set CRATE_MIN_MEM and CRATE_MAX_MEM environment variables
  373. # (We recommend to set MIN and MAX to the same value).
  374. # - Set CRATE_HEAP_SIZE environment varaible. This sets MIN and MAX to the
  375. # same value for you.
  376. #
  377. # You should also make sure that the CrateDB process is allowed to lock
  378. # the memory, eg. by using `ulimit -l unlimited`.
  379.  
  380.  
  381. ############################## Network And HTTP ###############################
  382.  
  383. # CrateDB, by default, binds itself to the loopback addresses on the system,
  384. # and listens on port [4200-4300] for HTTP traffic and on port [4300-4400] for
  385. # node-to-node communication. (the range means that if the port is busy, it
  386. # will automatically try the next port).
  387. # Apart from IPv4 and IPv6 addresses there are some special values that can be
  388. # used:
  389. # _local_ Any loopback addresses on the system, for example 127.0.0.1.
  390. # _site_ Any site-local addresses on the system, for example 192.168.0.1.
  391. # _global_ Any globally-scoped addresses on the system, for example 8.8.8.8.
  392. # _[networkInterface]_ Addresses of a network interface, for example _en0_.
  393.  
  394. # Set the bind address specifically (IPv4, IPv6 or special value):
  395. #network.bind_host: 192.168.0.1
  396.  
  397. # Set the address other nodes will use to communicate with this node. If not
  398. # set, it is automatically derived. It must point to an actual IP address.
  399. #network.publish_host: 192.168.0.1
  400.  
  401. # Set both 'bind_host' and 'publish_host':
  402. #network.host: 192.168.0.1
  403.  
  404. # Set a custom port for the node to node communication (4300 by default):
  405. #transport.tcp.port: 4300
  406.  
  407. # Enable compression for all communication between nodes (disabled by
  408. # default):
  409. #transport.tcp.compress: true
  410.  
  411. # Set a custom port to listen for HTTP traffic:
  412. #http.port: 4200
  413.  
  414. # Set a custom allowed content length:
  415. #http.max_content_length: 100mb
  416.  
  417. # Disable HTTP completely:
  418. #http.enabled: false
  419.  
  420.  
  421. ################################### Gateway ##################################
  422.  
  423. # The gateway persists cluster meta data on disk every time the meta data
  424. # changes. This data is stored persistently across full cluster restarts
  425. # and recovered after nodes are started again.
  426.  
  427. # Defines the number of nodes that need to be started before any cluster
  428. # state recovery will start.
  429. #gateway.recover_after_nodes: 2
  430.  
  431. # Defines the time to wait before starting the recovery once the number
  432. # of nodes defined in gateway.recover_after_nodes are started.
  433. #gateway.recover_after_time: 5m
  434.  
  435. # Defines how many nodes should be waited for until the cluster state is
  436. # recovered immediately. The value should be equal to the number of nodes
  437. # in the cluster.
  438. #gateway.expected_nodes: 3
  439.  
  440.  
  441. ############################ Recovery Throttling #############################
  442.  
  443. # These settings allow to control the process of shards allocation between
  444. # nodes during initial recovery, replica allocation, rebalancing, or when
  445. # adding and removing nodes.
  446.  
  447. # Set the number of concurrent recoveries happening on a node:
  448. #
  449. # 1. During the initial recovery
  450. #cluster.routing.allocation.node_initial_primaries_recoveries: 4
  451. #
  452. # 2. During adding/removing nodes, rebalancing, etc
  453. #cluster.routing.allocation.node_concurrent_recoveries: 2
  454.  
  455. # Set to throttle throughput when recovering (eg. 100mb, by default
  456. # unlimited):
  457. #indices.recovery.max_size_per_sec: 40mb
  458.  
  459. # Set to limit the number of open concurrent streams when recovering a shard
  460. # from a peer:
  461. #indices.recovery.concurrent_streams: 5
  462.  
  463. # Specifies the chunk size used to copy the shard data from the source shard.
  464. #indices.recovery.file_chunk_size: 512kb
  465.  
  466. # Specifies how many transaction log lines should be transfered between shards
  467. # in a single request during the recovery process. Ignored if
  468. # indices.recovery.translog_size is reached first.
  469. #indices.recovery.translog_ops: 1000
  470.  
  471. # Specifies how much data of the transaction log should be transfered between
  472. # shards in a single request during the recovery process.
  473. #indices.recovery.translog_size: 512kb
  474.  
  475. # Define if transferred data should be compressed during the
  476. # recovery process.
  477. #indices.recovery.compress: true
  478.  
  479. # Specifies the maximum number of bytes that can be transferred during shard
  480. # recovery per seconds. Limiting can be disabled by setting it to `0`. Higher
  481. # values may result in higher network utilization, but also faster recovery
  482. # process.
  483. #indices.recovery.max_bytes_per_sec: 20mb
  484.  
  485. # Specifies the time to wait after an issue caused by cluster state syncing
  486. # before retrying to recover.
  487. #indices.recovery.retry_delay_state_sync: 500ms
  488.  
  489. # Specifies the time to wait after an issue caused by the network before
  490. # retrying to recover.
  491. #indices.recovery.retry_delay_network: 5s
  492.  
  493. # Specifies the interval after which idle recoveries will be failed.
  494. #indices.recovery.retry_activity_timeout: 15m
  495.  
  496. # Specifies the timeout for internal requests made as part of the recovery.
  497. #indices.recovery.retry_internal_action_timeout: 15m
  498.  
  499. # Specifies the timeout for internal requests made as part of the recovery
  500. # that are expected to take a long time.
  501. #indices.recovery.retry_internal_long_action_timeout: 30m
  502.  
  503.  
  504. ########################### Store Level Throttling ###########################
  505.  
  506. # Allows to throttle `merge` (or `all`) process of the store module. Options
  507. # are: `all | merge | none`
  508. #indices.store.throttle.type: merge
  509.  
  510. # If throttling is enabled by indices.store.throttle.type, this setting
  511. # specifies the maximum bytes per second a store module process can operate
  512. # with.
  513. #indices.store.throttle.max_bytes_per_sec: 20mb
  514.  
  515.  
  516. ################################# Discovery ##################################
  517.  
  518. # Discovery infrastructure ensures nodes can be found within a cluster
  519. # and a master node is elected. Unicast discovery is the default.
  520. #
  521. # Unicast discovery allows to explicitly control which nodes will be used
  522. # to discover the cluster by pinging them.
  523. #discovery.zen.ping.unicast.hosts:
  524. # - host1:port
  525. # - host2:port
  526. #
  527. # If you want to debug the discovery process, you can set a logger in
  528. # 'config/log4j2.properties' to help you doing so.
  529.  
  530. # Set to ensure a node sees M other master eligible nodes to be considered
  531. # operational within the cluster. It's recommended to set it to a higher value
  532. # than 1 when running more than 2 nodes in the cluster.
  533. #
  534. # We highly recommend to set the minimum master nodes as follows:
  535. # minimum_master_nodes: (N / 2) + 1 where N is the cluster size
  536. # That will ensure a full recovery of the cluster state.
  537. #discovery.zen.minimum_master_nodes: 1
  538.  
  539. # Set the time to wait for ping responses from other nodes when discovering.
  540. # Set this option to a higher value on a slow or congested network to minimize
  541. # discovery failures:
  542. #discovery.zen.ping.timeout: 3s
  543.  
  544. # Time a node is waiting for responses from other nodes to a published
  545. # cluster state.
  546. #discovery.zen.publish_timeout: 30s
  547.  
  548. # Ping configuration
  549. #discovery.zen.fd.ping_timeout: 30s
  550. #discovery.zen.fd.ping_retries: 3
  551. #discovery.zen.fd.ping_interval: 1s
  552.  
  553. #/////////////////////////// Discovery via DNS ///////////////////////////////
  554.  
  555. # Service discovery allows CrateDB to look up hosts for unicast discovery via
  556. # SRV DNS records.
  557.  
  558. # To enable SRV discovery you need to set the discovery type to 'srv'.
  559. #discovery.type: srv
  560.  
  561. # Service discovery requires a query that is used to look up SRV records,
  562. # this is usually in the format _service._protocol.fqdn
  563. #discovery.srv.query: _crate._srv.example.com
  564.  
  565. #////////////////////////////// EC2 Discovery ////////////////////////////////
  566.  
  567. # EC2 discovery allows CrateDB to look up hosts for unicast discovery via the
  568. # AWS EC2 API.
  569.  
  570. # To enable EC2 discovery you need to set the discovery type to 'ec2'.
  571. #discovery.type: ec2
  572.  
  573. # There are multiple ways to filter EC2 instances:
  574. #
  575. # Filter by security groups, either by id or name. Only instances with the
  576. # given group will be used for unicast host discovery.
  577. #discovery.ec2.groups: sg-example-1, sg-example-2
  578. #
  579. # Control whether all (`false`) or just any (`true`) security group must
  580. # be present for the instance to be used for discovery.
  581. #discovery.ec2.any_group: true
  582. #
  583. # Filter by availability zones. Only instances within the given availability
  584. # zone will be used for unicast host discovery.
  585. #discovery.ec2.availability_zones:
  586. # - us-east-1
  587. # - us-west-1
  588. # - us-west-2
  589. # - ap-southeast-1
  590. # - ap-southeast-2
  591. # - ap-northeast-1
  592. # - eu-west-1
  593. # - eu-central-1
  594. # - sa-east-1
  595. # - cn-north-1
  596. #
  597. # EC2 instances for discovery can also be filtered by tags using the
  598. # `discovery.ec2.tag.` prefix plus the tag name.
  599. # E.g. to filter instances that have the `environment` tags with the value
  600. # `dev` your setting will look like:
  601. #discovery.ec2.tag.environment: dev
  602. #discovery.ec2.tag.<name>: <value>
  603. #
  604. # If you have your own compatible implementation of the EC2 API service you
  605. # can set the enpoint that should be used.
  606. #cloud.aws.ec2.endpoind: http://example.com/endpoint
  607.  
  608. #/////////////////////////////// Azure Discovery /////////////////////////////
  609.  
  610. # Azure discovery allows CrateDB to look up hosts for unicast discovery via
  611. # the Azure API.
  612.  
  613. # To enable Azure discovery you need to set the discovery type to 'azure'.
  614. #discovery.type: azure
  615.  
  616. # You should provide resource group name of your instances
  617. #cloud.azure.management.resourcegroup.name: myrg
  618.  
  619. # The following configuration values must be provided for active directory
  620. # authentication.
  621. #cloud.azure.management.subscription.id: xxxxx.xxxx.xxx.xxx
  622. #cloud.azure.management.tenant.id: xxxxxxxxxxx
  623. #cloud.azure.management.app.id: xxxxxxxxxx
  624. #cloud.azure.management.app.secret: my_password
  625.  
  626. # There are two methods of discovery. Discovery method ``vnet`` will discover
  627. # all VMs in the same virtual network.
  628. # The use of method ``subnet`` will add all virtual machines in the same
  629. # subnet to the cluster.
  630. #discovery.azure.method: vnet
  631.  
  632.  
  633. ############################# Routing Allocation #############################
  634.  
  635. # Allows to control shard allocation.
  636. # Options are: `all | new_primaries`
  637. #
  638. # - `all` allows all shard allocations, the cluster can allocate all kinds of
  639. # shards.
  640. # - `new_primaries` allows allocations for new primary shards only. This means
  641. # that for example a newly added node will not allocate any replicas.
  642. # However it is still possible to allocate new primary shards for new
  643. # indices. Whenever you want to perform a zero downtime upgrade of your
  644. # cluster you need to set this value before gracefully stopping the first
  645. # node and reset it to `all` after starting the last updated node.
  646. #cluster.routing.allocation.enable: all
  647.  
  648. # Allow to control when rebalancing will happen based on the total state of
  649. # all the indices shards in the cluster. Defaulting to `indices_all_active`
  650. # to reduce chatter during initial recovery.
  651. # Options are: `always | indices_primary_active | indices_all_active`
  652. #cluster.routing.allocation.allow_rebalance: indices_all_active
  653.  
  654. # Define how many concurrent rebalancing tasks are allowed cluster wide.
  655. #cluster.routing.allocation.cluster_concurrent_rebalance: 2
  656.  
  657. # Define the number of initial recoveries of primaries that are allowed per
  658. # node. Since most times local gateway is used, those should be fast and we
  659. # can handle more of those per node without creating load.
  660. #cluster.routing.allocation.node_initial_primaries_recoveries: 4
  661.  
  662. # How many concurrent recoveries are allowed to happen on a node.
  663. #cluster.routing.allocation.node_concurrent_recoveries: 2
  664.  
  665.  
  666. ################################## Awareness #################################
  667.  
  668. # Cluster allocation awareness allows to configure shard and replicas
  669. # allocation across generic attributes associated with nodes.
  670.  
  671. # Define node attributes which will be used to do awareness based on the
  672. # allocation of a shard and its replicas. Shards will relocate to even the
  673. # number of shards across the nodes, but a shard and its replica will not be
  674. # allocated in the same `rack_id` value.
  675. #
  676. # The awareness attributes can hold several values
  677. #cluster.routing.allocation.awareness.attributes:
  678.  
  679. # Node attributes on which shard allocation will be forced.
  680. # If you know in advance the number of values an awareness attribute
  681. # can have and you would like to never have more replicas then needed
  682. # allocated on a specific group of nodes with the same awareness attribute
  683. # value. For that, we can force awareness on specific attributes.
  684. #cluster.routing.allocation.awareness.force.<attribute>.values:
  685.  
  686.  
  687. ############################### Balanced Shards ##############################
  688.  
  689. # Defines the weight factor for shards allocated on a node (float).
  690. #cluster.routing.allocation.balance.shard: 0.45f
  691.  
  692. # Defines a factor to the number of shards per index allocated on a specific
  693. # node (float).
  694. #cluster.routing.allocation.balance.index: 0.5f
  695.  
  696. # Defines a weight factor for the number of primaries of a specific index
  697. # allocated on a node (float).
  698. #cluster.routing.allocation.balance.primary: 0.05f
  699.  
  700. # Minimal optimization value of operations that should be performed (non
  701. # negative float).
  702. #cluster.routing.allocation.balance.threshold: 1.0f
  703.  
  704.  
  705. ####################### Cluster-Wide Allocation Filtering ####################
  706.  
  707. # Place new shards only on nodes where one of the specified values matches the
  708. # attribute.
  709. #cluster.routing.allocation.include.<attribute>:
  710.  
  711. # Place new shards only on nodes where none of the specified values matches
  712. # the attribute.
  713. #cluster.routing.allocation.exclude.<attribute>:
  714.  
  715. # used to specify a number of rules, which all MUST match for a node in order
  716. # to allocate a shard on it.
  717. # This is in contrast to include which will include a node if ANY rule
  718. # matches.
  719. #cluster.routing.allocation.require.<attribute>:
  720.  
  721.  
  722. ########################## Disk-based Shard Allocation #######################
  723.  
  724. # Prevent shard allocation on nodes depending on the disk usage.
  725. #cluster.routing.allocation.disk.threshold_enabled: true
  726.  
  727. # Defines the lower disk threshold limit for shard allocations.
  728. # New shards will not be allocated on nodes with disk usage greater than this
  729. # value. It can also be set to an absolute bytes value (like e.g. `500mb`)
  730. #cluster.routing.allocation.disk.watermark.low: 85%
  731.  
  732. # Defines the higher disk threshold limit for shard allocations.
  733. # The cluster will attempt to relocate existing shards to another node if the
  734. # disk usage on a node rises above this value. It can also be set to an
  735. # absolute bytes value (like e.g. `500mb`)
  736. #cluster.routing.allocation.disk.watermark.high: 90%
  737.  
  738.  
  739. ########################## Field Data Circuit Breaker #########################
  740.  
  741. # The field data circuit breaker allows estimation of needed memory required
  742. # for loading field data into memory.
  743.  
  744. # Specifies the limit for the fielddata breaker.
  745. #indices.fielddata.breaker.limit: 60%
  746.  
  747. # A constant that all field data estimations are multiplied with to
  748. # determine a final estimation.
  749. #indices.fielddata.breaker.overhead: 1.0.3
  750.  
  751.  
  752. ################################# Threadpools ################################
  753.  
  754. # Every node holds several thread pools to improve how threads are managed
  755. # within a node. There are several pools, but the important ones include:
  756. # index, search, bulk, refresh
  757. #thread_pool.index.type: fixed
  758. #thread_pool.index.queue_size: 200
  759.  
  760.  
  761. ################################## Metadata ##################################
  762.  
  763. # Defines how often the cluster collection metadata information
  764. # (e.g. disk usages etc.) if no concrete event is triggered.
  765. #cluster.info.update.interval: 30s
  766.  
  767.  
  768. ################################## GC Logging ################################
  769.  
  770. #monitor.jvm.gc.collector.young.warn: 1000ms
  771. #monitor.jvm.gc.collector.young.info: 700ms
  772. #monitor.jvm.gc.collector.young.debug: 400ms
  773.  
  774. #monitor.jvm.gc.collector.old.warn: 10s
  775. #monitor.jvm.gc.collector.old.info: 5s
  776. #monitor.jvm.gc.collector.old.debug: 2s
  777.  
  778.  
  779. ###################################### SQL ####################################
  780.  
  781. # Defines whether SQL statements resulting in modification operations are
  782. # allowed on this node.
  783. #node.sql.read_only: false
  784.  
  785. # A SQL DML operation over a huge amount of rows (such as INSERT FROM
  786. # SUBQUERY, UPDATE or COPY FROM) can take an enormous amount of time and
  787. # resources of your cluster. To execute such queries more failsafe even with
  788. # slower hardware increase this timeout
  789. #bulk.request_timeout: 1m
  790.  
  791.  
  792. ######################### SQL Query Circuit Breaker ##########################
  793.  
  794. # The query circuit breaker allows estimation of needed memory required by
  795. # a query.
  796.  
  797. # Specifies the limit for the query breaker.
  798. #node.breaker.query.limit: 60%
  799.  
  800. # A constant that all query data estimations are multiplied with to
  801. # determine a final estimation.
  802. #node.breaker.query.overhead: 1.09
  803.  
  804.  
  805. ##################################### UDC ####################################
  806.  
  807. # Usage Data Collection
  808. #
  809. # If enabled CrateDB will send usage data to the url stored in setting
  810. # `udc.url`. The sent usage data doesn't contain any confidential information.
  811.  
  812. # enable/disable usage data collection at all
  813. #udc.enabled: true
  814.  
  815. # The delay for first ping after start-up.
  816. #udc.initial_delay: 10m
  817.  
  818. # The interval a ping is sent.
  819. #udc.interval: 24h
  820.  
  821. # The url the ping is sent to.
  822. #udc.url: https://udc.crate.io/
  823.  
  824.  
  825. ######################### ELASTICSEARCH HTTP REST API ########################
  826.  
  827. # by default the elasticsearch HTTP REST API is disabled
  828.  
  829. # WARNING: if you index data via elasticsearch and not
  830. # insert it via SQL you might get inconsistent data when querying
  831.  
  832. # enable/disable elasticsearch HTTP REST API
  833. #es.api.enabled: false
  834.  
  835.  
  836. ############################# BACKUP / RESTORE ###############################
  837.  
  838. # Paths where Repositories of type fs may be created:
  839. #path.repo: /path/to/shared/fs,/other/shared/fs
  840.  
  841. # Whitelist for urls that can be used with the url repository type
  842. # Wildcards are supported in the host, path, query and fragment parts.
  843. #
  844. # Supported protocols are: "http", "https", "ftp", "file" and "jar"
  845. # While only "http", "https" and "ftp" need to be listed here for usage in
  846. # URL repsoitories.
  847. # "file" urls must be prefixed with an entry configured in ``path.repo``
  848. #repositories.url.allowed_urls: ["http://example.org/root/*", "https://*.mydomain.com/*?*#*"]
  849.  
  850.  
  851. ###################### POSTGRES WIRE PROTOCOL SUPPORT ########################
  852.  
  853. # CrateDB supports the PostgreSQL wire protocol v3 and emulates a PostgreSQL
  854. # server v9.5. For further information read our documentation:
  855. # https://crate.io/docs/reference/en/latest/protocols/postgres.html
  856. #
  857. # This allows you to connect to CrateDB using one of the many Postgres tools
  858. # and libraries
  859. #psql.enabled: true
  860. #psql.port: 5432
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement