Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # If you are running more than one instances of graylog2-server you have to select one of these
- # instances as master. The master will perform some periodical tasks that non-masters won't perform.
- is_master = true
- # The auto-generated node ID will be stored in this file and read after restarts. It is a good idea
- # to use an absolute file path here if you are starting graylog2-server from init scripts or similar.
- node_id_file = /etc/graylog2-server-node-id
- # You MUST set a secret to secure/pepper the stored user passwords here. Use at least 64 characters.
- # Generate one by using for example: pwgen -s 96
- password_secret = SECRET=$(pwgen -s 96 1)
- sudo -E sed -i -e 's/password_secret =.*/password_secret = '$SECRET'/' /etc/graylog2.conf
- # the default root user is named 'admin'
- # root_username = admin
- # You MUST specify a hash password for the root user (which you only need to initially set up the
- # system and in case you lose connectivity to your authentication backend)
- # This password cannot be changed using the API or via the web interface. If you need to change it,
- # modify it in this file.
- # Create one by using for example: echo -n yourpassword | shasum -a 256
- # and put the resulting hash value into the following line
- root_password_sha2 = PASSWORD=$(echo -n #0danicri0# | shasum -a 256 | awk '{print $1}')
- sudo -E sed -i -e 's/root_password_sha2 =.*/root_password_sha2 = '$PASSWORD'/' /etc/graylog2.conf
- # Set plugin directory here (relative or absolute)
- plugin_dir = plugin
- # REST API listen URI. Must be reachable by other graylog2-server nodes if you run a cluster.
- rest_listen_uri = http://127.0.0.1:12900/
- # REST API transport address. Defaults to first non-loopback IPv4 system address and port 12900.
- # This will be promoted in the cluster discovery APIs and other nodes may try to connect on this
- # address. (see rest_listen_uri)
- #rest_transport_uri = http://127.0.0.1:12900/
- # Enable CORS headers for REST api. This is necessary for JS-clients accessing the server directly.
- # If these are disabled, modern browsers will not be able to retrieve resources from the server.
- # This is disabled by default. Uncomment the next line to enable it.
- #rest_enable_cors = true
- # Enable GZIP support for REST api. This compresses API responses and therefore helps to reduce
- # overall round trip times. This is disabled by default. Uncomment the next line to enable it.
- #rest_enable_gzip = true
- # Embedded elasticsearch configuration file
- # pay attention to the working directory of the server, maybe use an absolute path here
- elasticsearch_config_file = /etc/elasticsearch/elasticsearch.yml
- elasticsearch_max_docs_per_index = 20000000
- # How many indices do you want to keep?
- # elasticsearch_max_number_of_indices*elasticsearch_max_docs_per_index=total number of messages in your setup
- elasticsearch_max_number_of_indices = 20
- # Decide what happens with the oldest indices when the maximum number of indices is reached.
- # The following strategies are availble:
- # - delete # Deletes the index completely (Default)
- # - close # Closes the index and hides it from the system. Can be re-opened later.
- retention_strategy = delete
- # How many ElasticSearch shards and replicas should be used per index? Note that this only applies to newly created indices.
- elasticsearch_shards = 1
- elasticsearch_replicas = 0
- elasticsearch_index_prefix = graylog2
- # Do you want to allow searches with leading wildcards? This can be extremely resource hungry and should only
- # be enabled with care. See also: http://support.torch.sh/help/kb/graylog2-web-interface/the-search-bar-explained
- allow_leading_wildcard_searches = false
- # Do you want to allow searches to be highlighted? Depending on the size of your messages this can be memory hungry and
- # should only be enabled after making sure your elasticsearch cluster has enough memory.
- # settings to be passed to elasticsearch's client (overriding those in the provided elasticsearch_config_file)
- # all these
- # this must be the same as for your elasticsearch cluster
- elasticsearch_cluster_name = graylog-production
- # you could also leave this out, but makes it easier to identify the graylog2 client instance
- elasticsearch_node_name = graylog2-server
- # we don't want the graylog2 server to store any data, or be master node
- elasticsearch_node_master = true
- elasticsearch_node_data = true
- # use a different port if you run multiple elasticsearch nodes on one machine
- elasticsearch_transport_tcp_port = 9200
- # we don't need to run the embedded HTTP server here
- elasticsearch_http_enabled = true
- elasticsearch_discovery_zen_ping_multicast_enabled = true
- elasticsearch_discovery_zen_ping_unicast_hosts = 192.168.217.67:9300
- # the following settings allow to change the bind addresses for the elasticsearch client in graylog2
- # these settings are empty by default, letting elasticsearch choose automatically,
- # override them here or in the 'elasticsearch_config_file' if you need to bind to a special address
- # refer to http://www.elasticsearch.org/guide/en/elasticsearch/reference/0.90/modules-network.html for special values here
- # elasticsearch_network_host =
- # elasticsearch_network_bind_host =
- # elasticsearch_network_publish_host =
- # Analyzer (tokenizer) to use for message and full_message field. The "standard" filter usually is a good idea.
- # All supported analyzers are: standard, simple, whitespace, stop, keyword, pattern, language, snowball, custom
- # ElasticSearch documentation: http://www.elasticsearch.org/guide/reference/index-modules/analysis/
- # Note that this setting only takes effect on newly created indices.
- elasticsearch_analyzer = standard
- # Batch size for all outputs. This is the maximum (!) number of messages an output module will get at once.
- # For example, if this is set to 5000 (default), the ElasticSearch Output will not index more than 5000 messages
- # at once. After that index operation is performed, the next batch will be indexed. If there is only 1 message
- # waiting, it will only index that single message. It is important to raise this parameter if you send in so
- # many messages that it is not enough to index 5000 messages at once. (Only at *really* high message rates)
- output_batch_size = 5000
- # The number of parallel running processors.
- # Raise this number if your buffers are filling up.
- processbuffer_processors = 5
- outputbuffer_processors = 5
- # Wait strategy describing how buffer processors wait on a cursor sequence. (default: sleeping)
- # Possible types:
- # - yielding
- # Compromise between performance and CPU usage.
- # - sleeping
- # Compromise between performance and CPU usage. Latency spikes can occur after quiet periods.
- # - blocking
- # High throughput, low latency, higher CPU usage.
- # - busy_spinning
- # Avoids syscalls which could introduce latency jitter. Best when threads can be bound to specific CPU cores.
- processor_wait_strategy = blocking
- # Size of internal ring buffers. Raise this if raising outputbuffer_processors does not help anymore.
- # For optimum performance your LogMessage objects in the ring buffer should fit in your CPU L3 cache.
- # Start server with --statistics flag to see buffer utilization.
- # Must be a power of 2. (512, 1024, 2048, ...)
- ring_size = 1024
- # EXPERIMENTAL: Dead Letters
- # Every failed indexing attempt is logged by default and made visible in the web-interface. You can enable
- # the experimental dead letters feature to write every message that was not successfully indexed into the
- # MongoDB "dead_letters" collection to make sure that you never lose a message. The actual writing of dead
- # letter should work fine already but it is not heavily tested yet and will get more features in future
- # releases.
- dead_letters_enabled = false
- # How many seconds to wait between marking node as DEAD for possible load balancers and starting the actual
- # shutdown process. Set to 0 if you have no status checking load balancers in front.
- lb_recognition_period_seconds = 3
- # MongoDB Configuration
- mongodb_useauth = false
- #mongodb_user = grayloguser
- #mongodb_password = 123
- mongodb_host = 127.0.0.1
- #mongodb_replica_set = localhost:27017,localhost:27018,localhost:27019
- mongodb_database = graylog2
- mongodb_port = 27017
- # Raise this according to the maximum connections your MongoDB server can handle if you encounter MongoDB connection problems.
- mongodb_max_connections = 100
- # Number of threads allowed to be blocked by MongoDB connections multiplier. Default: 5
- # If mongodb_max_connections is 100, and mongodb_threads_allowed_to_block_multiplier is 5, then 500 threads can block. More than t$
- # http://api.mongodb.org/java/current/com/mongodb/MongoOptions.html#threadsAllowedToBlockForConnectionMultiplier
- mongodb_threads_allowed_to_block_multiplier = 5
- # Drools Rule File (Use to rewrite incoming log messages)
- # See: http://support.torch.sh/help/kb/graylog2-server/custom-message-rewritingprocessing
- # rules_file = /etc/graylog2.drl
- # Email transport
- transport_email_enabled = false
- transport_email_hostname = mail.example.com
- transport_email_port = 587
- transport_email_use_auth = true
- transport_email_use_tls = true
- transport_email_use_ssl = true
- transport_email_auth_username = you@example.com
- transport_email_auth_password = secret
- transport_email_subject_prefix = [graylog2]
- transport_email_from_email = graylog2@example.com
- # Specify and uncomment this if you want to include links to the stream in your stream alert mails.
- # This should define the fully qualified base url to your web interface exactly the same way as it is accessed by your users.
- #
- # transport_email_web_interface_url = https://graylog2.example.com
- # HTTP proxy for outgoing HTTP calls
- #http_proxy_uri =
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement