Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- Kibana.yml
- # Kibana is served by a back end server. This controls which port to use.
- # server.port: 5601
- # The host to bind the server to.
- server.host: "192.168.164.155"
- # 192.168.164.155 points to Ubuntu Server
- # If you are running kibana behind a proxy, and want to mount it at a path,
- # specify that path here. The basePath can't end in a slash.
- # server.basePath: ""
- # The maximum payload size in bytes on incoming server requests.
- # server.maxPayloadBytes: 1048576
- # The Elasticsearch instance to use for all your queries.
- # elasticsearch.url: "http://localhost:9200"
- # preserve_elasticsearch_host true will send the hostname specified in `elasticsearch`. If you set it to false,
- # then the host you use to connect to *this* Kibana instance will be sent.
- elasticsearch.preserveHost: true
- # Kibana uses an index in Elasticsearch to store saved searches, visualizations
- # and dashboards. It will create a new index if it doesn't already exist.
- # kibana.index: ".kibana"
- # The default application to load.
- # kibana.defaultAppId: "discover"
- # If your Elasticsearch is protected with basic auth, these are the user credentials
- # used by the Kibana server to perform maintenance on the kibana_index at startup. Your Kibana
- # users will still need to authenticate with Elasticsearch (which is proxied through
- # the Kibana server)
- # elasticsearch.username: "user"
- # elasticsearch.password: "pass"
- # SSL for outgoing requests from the Kibana Server to the browser (PEM formatted)
- # server.ssl.cert: /path/to/your/server.crt
- # server.ssl.key: /path/to/your/server.key
- # Optional setting to validate that your Elasticsearch backend uses the same key files (PEM formatted)
- # elasticsearch.ssl.cert: /path/to/your/client.crt
- # elasticsearch.ssl.key: /path/to/your/client.key
- # If you need to provide a CA certificate for your Elasticsearch instance, put
- # the path of the pem file here.
- # elasticsearch.ssl.ca: /path/to/your/CA.pem
- # Set to false to have a complete disregard for the validity of the SSL
- # certificate.
- # elasticsearch.ssl.verify: true
- # Time in milliseconds to wait for elasticsearch to respond to pings, defaults to
- # request_timeout setting
- # elasticsearch.pingTimeout: 1500
- # Time in milliseconds to wait for responses from the back end or elasticsearch.
- # This must be > 0
- # elasticsearch.requestTimeout: 30000
- # Header names and values that are sent to Elasticsearch. Any custom headers cannot be overwritten
- # by client-side headers.
- # elasticsearch.customHeaders: {}
- # Time in milliseconds for Elasticsearch to wait for responses from shards.
- # Set to 0 to disable.
- # elasticsearch.shardTimeout: 0
- # Time in milliseconds to wait for Elasticsearch at Kibana startup before retrying
- # elasticsearch.startupTimeout: 5000
- # Set the path to where you would like the process id file to be created.
- # pid.file: /var/run/kibana.pid
- # If you would like to send the log output to a file you can set the path below.
- # logging.dest: stdout
- # Set this to true to suppress all logging output.
- # logging.silent: false
- # Set this to true to suppress all logging output except for error messages.
- # logging.quiet: false
- # Set this to true to log all events, including system usage information and all requests.
- # logging.verbose: false
- logstash.yml
- # Settings file in YAML
- #
- # Settings can be specified either in hierarchical form, e.g.:
- #
- # pipeline:
- # batch:
- # size: 125
- # delay: 5
- #
- # Or as flat keys:
- #
- # pipeline.batch.size: 125
- # pipeline.batch.delay: 5
- #
- # ------------ Node identity ------------
- #
- # Use a descriptive name for the node:
- #
- # node.name: test
- #
- # If omitted the node name will default to the machine's host name
- #
- # ------------ Data path ------------------
- #
- # Which directory should be used by logstash and its plugins
- # for any persistent needs. Defaults to LOGSTASH_HOME/data
- #
- path.data: /var/lib/logstash
- #
- # ------------ Pipeline Settings --------------
- #
- # The ID of the pipeline.
- #
- # pipeline.id: main
- #
- # Set the number of workers that will, in parallel, execute the filters+outputs
- # stage of the pipeline.
- #
- # This defaults to the number of the host's CPU cores.
- #
- # pipeline.workers: 2
- #
- # How many events to retrieve from inputs before sending to filters+workers
- #
- # pipeline.batch.size: 125
- #
- # How long to wait in milliseconds while polling for the next event
- # before dispatching an undersized batch to filters+outputs
- #
- # pipeline.batch.delay: 50
- #
- # Force Logstash to exit during shutdown even if there are still inflight
- # events in memory. By default, logstash will refuse to quit until all
- # received events have been pushed to the outputs.
- #
- # WARNING: enabling this can lead to data loss during shutdown
- #
- # pipeline.unsafe_shutdown: false
- #
- # ------------ Pipeline Configuration Settings --------------
- #
- # Where to fetch the pipeline configuration for the main pipeline
- #
- # path.config:
- #
- # Pipeline configuration string for the main pipeline
- #
- # config.string:
- #
- # At startup, test if the configuration is valid and exit (dry run)
- #
- # config.test_and_exit: false
- #
- # Periodically check if the configuration has changed and reload the pipeline
- # This can also be triggered manually through the SIGHUP signal
- #
- # config.reload.automatic: false
- #
- # How often to check if the pipeline configuration has changed (in seconds)
- #
- # config.reload.interval: 3s
- #
- # Show fully compiled configuration as debug log message
- # NOTE: --log.level must be 'debug'
- #
- # config.debug: false
- #
- # When enabled, process escaped characters such as \n and \" in strings in the
- # pipeline configuration files.
- #
- # config.support_escapes: false
- #
- # ------------ Module Settings ---------------
- # Define modules here. Modules definitions must be defined as an array.
- # The simple way to see this is to prepend each `name` with a `-`, and keep
- # all associated variables under the `name` they are associated with, and
- # above the next, like this:
- #
- # modules:
- # - name: MODULE_NAME
- # var.PLUGINTYPE1.PLUGINNAME1.KEY1: VALUE
- # var.PLUGINTYPE1.PLUGINNAME1.KEY2: VALUE
- # var.PLUGINTYPE2.PLUGINNAME1.KEY1: VALUE
- # var.PLUGINTYPE3.PLUGINNAME3.KEY1: VALUE
- #
- # Module variable names must be in the format of
- #
- # var.PLUGIN_TYPE.PLUGIN_NAME.KEY
- #
- # modules:
- #
- # ------------ Cloud Settings ---------------
- # Define Elastic Cloud settings here.
- # Format of cloud.id is a base64 value e.g. dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyRub3RhcmVhbCRpZGVudGlmaWVy
- # and it may have an label prefix e.g. staging:dXMtZ...
- # This will overwrite 'var.elasticsearch.hosts' and 'var.kibana.host'
- # cloud.id: <identifier>
- #
- # Format of cloud.auth is: <user>:<pass>
- # This is optional
- # If supplied this will overwrite 'var.elasticsearch.username' and 'var.elasticsearch.password'
- # If supplied this will overwrite 'var.kibana.username' and 'var.kibana.password'
- # cloud.auth: elastic:<password>
- #
- # ------------ Queuing Settings --------------
- #
- # Internal queuing model, "memory" for legacy in-memory based queuing and
- # "persisted" for disk-based acked queueing. Defaults is memory
- #
- # queue.type: memory
- #
- # If using queue.type: persisted, the directory path where the data files will be stored.
- # Default is path.data/queue
- #
- # path.queue:
- #
- # If using queue.type: persisted, the page data files size. The queue data consists of
- # append-only data files separated into pages. Default is 64mb
- #
- # queue.page_capacity: 64mb
- #
- # If using queue.type: persisted, the maximum number of unread events in the queue.
- # Default is 0 (unlimited)
- #
- # queue.max_events: 0
- #
- # If using queue.type: persisted, the total capacity of the queue in number of bytes.
- # If you would like more unacked events to be buffered in Logstash, you can increase the
- # capacity using this setting. Please make sure your disk drive has capacity greater than
- # the size specified here. If both max_bytes and max_events are specified, Logstash will pick
- # whichever criteria is reached first
- # Default is 1024mb or 1gb
- #
- # queue.max_bytes: 1024mb
- #
- # If using queue.type: persisted, the maximum number of acked events before forcing a checkpoint
- # Default is 1024, 0 for unlimited
- #
- # queue.checkpoint.acks: 1024
- #
- # If using queue.type: persisted, the maximum number of written events before forcing a checkpoint
- # Default is 1024, 0 for unlimited
- #
- # queue.checkpoint.writes: 1024
- #
- # If using queue.type: persisted, the interval in milliseconds when a checkpoint is forced on the head page
- # Default is 1000, 0 for no periodic checkpoint.
- #
- # queue.checkpoint.interval: 1000
- #
- # ------------ Dead-Letter Queue Settings --------------
- # Flag to turn on dead-letter queue.
- #
- # dead_letter_queue.enable: false
- # If using dead_letter_queue.enable: true, the maximum size of each dead letter queue. Entries
- # will be dropped if they would increase the size of the dead letter queue beyond this setting.
- # Default is 1024mb
- # dead_letter_queue.max_bytes: 1024mb
- # If using dead_letter_queue.enable: true, the directory path where the data files will be stored.
- # Default is path.data/dead_letter_queue
- #
- # path.dead_letter_queue:
- #
- # ------------ Metrics Settings --------------
- #
- # Bind address for the metrics REST endpoint
- #
- # http.host: "127.0.0.1"
- #
- # Bind port for the metrics REST endpoint, this option also accept a range
- # (9600-9700) and logstash will pick up the first available ports.
- #
- # http.port: 9600-9700
- #
- # ------------ Debugging Settings --------------
- #
- # Options for log.level:
- # * fatal
- # * error
- # * warn
- # * info (default)
- # * debug
- # * trace
- #
- # log.level: info
- path.logs: /var/log/logstash
- #
- # ------------ Other Settings --------------
- #
- # Where to find custom plugins
- # path.plugins: []
- #
- # ------------ X-Pack Settings (not applicable for OSS build)--------------
- #
- # X-Pack Monitoring
- # https://www.elastic.co/guide/en/logstash/current/monitoring-logstash.html
- #xpack.monitoring.enabled: false
- #xpack.monitoring.elasticsearch.username: logstash_system
- #xpack.monitoring.elasticsearch.password: password
- #xpack.monitoring.elasticsearch.url: ["https://es1:9200", "https://es2:9200"]
- #xpack.monitoring.elasticsearch.ssl.ca: [ "/path/to/ca.crt" ]
- #xpack.monitoring.elasticsearch.ssl.truststore.path: path/to/file
- #xpack.monitoring.elasticsearch.ssl.truststore.password: password
- #xpack.monitoring.elasticsearch.ssl.keystore.path: /path/to/file
- #xpack.monitoring.elasticsearch.ssl.keystore.password: password
- #xpack.monitoring.elasticsearch.ssl.verification_mode: certificate
- #xpack.monitoring.elasticsearch.sniffing: false
- #xpack.monitoring.collection.interval: 10s
- #xpack.monitoring.collection.pipeline.details.enabled: true
- #
- # X-Pack Management
- # https://www.elastic.co/guide/en/logstash/current/logstash-centralized-pipeline-management.html
- #xpack.management.enabled: false
- #xpack.management.pipeline.id: ["main", "apache_logs"]
- #xpack.management.elasticsearch.username: logstash_admin_user
- #xpack.management.elasticsearch.password: password
- #xpack.management.elasticsearch.url: ["https://es1:9200", "https://es2:9200"]
- #xpack.management.elasticsearch.ssl.ca: [ "/path/to/ca.crt" ]
- #xpack.management.elasticsearch.ssl.truststore.path: /path/to/file
- #xpack.management.elasticsearch.ssl.truststore.password: password
- #xpack.management.elasticsearch.ssl.keystore.path: /path/to/file
- #xpack.management.elasticsearch.ssl.keystore.password: password
- #xpack.management.elasticsearch.sniffing: false
- #xpack.management.logstash.poll_interval: 5s
- 30-elasticsearch-output.conf
- output {
- elasticsearch {
- hosts => ["192.168.164.155:9200"]
- sniffing => true
- manage_template => false
- index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}"
- document_type => "%{[@metadata][type]}"
- }
- }
- elasticsearch.yml
- # ======================== Elasticsearch Configuration =========================
- #
- # NOTE: Elasticsearch comes with reasonable defaults for most settings.
- # Before you set out to tweak and tune the configuration, make sure you
- # understand what are you trying to accomplish and the consequences.
- #
- # The primary way of configuring a node is via this file. This template lists
- # the most important settings you may want to configure for a production cluster.
- #
- # Please see the documentation for further information on configuration options:
- # <http://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration.html>
- #
- # ---------------------------------- Cluster -----------------------------------
- #
- # Use a descriptive name for your cluster:
- #
- # cluster.name: my-application
- #
- # ------------------------------------ Node ------------------------------------
- #
- # Use a descriptive name for the node:
- #
- # node.name: node-1
- #
- # Add custom attributes to the node:
- #
- # node.rack: r1
- #
- # ----------------------------------- Paths ------------------------------------
- #
- # Path to directory where to store the data (separate multiple locations by comma):
- #
- # path.data: /path/to/data
- #
- # Path to log files:
- #
- # path.logs: /path/to/logs
- #
- # ----------------------------------- Memory -----------------------------------
- #
- # Lock the memory on startup:
- #
- # bootstrap.memory_lock: true
- #
- # Make sure that the `ES_HEAP_SIZE` environment variable is set to about half the memory
- # available on the system and that the owner of the process is allowed to use this limit.
- #
- # Elasticsearch performs poorly when the system is swapping the memory.
- #
- # ---------------------------------- Network -----------------------------------
- #
- # Set the bind address to a specific IP (IPv4 or IPv6):
- #
- network.host: 192.168.164.155
- network.bind_host: 0
- #
- # Set a custom port for HTTP:
- #
- # http.port: 9200
- #
- # For more information, see the documentation at:
- # <http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html>
- #
- # --------------------------------- Discovery ----------------------------------
- #
- # Pass an initial list of hosts to perform discovery when new node is started:
- # The default list of hosts is ["127.0.0.1", "[::1]"]
- #
- # discovery.zen.ping.unicast.hosts: ["host1", "host2"]
- #
- # Prevent the "split brain" by configuring the majority of nodes (total number of nodes / 2 + 1):
- #
- # discovery.zen.minimum_master_nodes: 3
- #
- # For more information, see the documentation at:
- # <http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery.html>
- #
- # ---------------------------------- Gateway -----------------------------------
- #
- # Block initial recovery after a full cluster restart until N nodes are started:
- #
- # gateway.recover_after_nodes: 3
- #
- # For more information, see the documentation at:
- # <http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-gateway.html>
- #
- # ---------------------------------- Various -----------------------------------
- #
- # Disable starting multiple nodes on a single system:
- #
- # node.max_local_storage_nodes: 1
- #
- # Require explicit names when deleting indices:
- #
- # action.destructive_requires_name: true
- elasticsearch/logging.yml
- # you can override this using by setting a system property, for example -Des.logger.level=DEBUG
- es.logger.level: INFO
- rootLogger: ${es.logger.level}, console, file
- logger:
- # log action execution errors for easier debugging
- action: DEBUG
- # deprecation logging, turn to DEBUG to see them
- deprecation: INFO, deprecation_log_file
- # reduce the logging for aws, too much is logged under the default INFO
- com.amazonaws: WARN
- # aws will try to do some sketchy JMX stuff, but its not needed.
- com.amazonaws.jmx.SdkMBeanRegistrySupport: ERROR
- com.amazonaws.metrics.AwsSdkMetrics: ERROR
- org.apache.http: INFO
- # gateway
- #gateway: DEBUG
- #index.gateway: DEBUG
- # peer shard recovery
- #indices.recovery: DEBUG
- # discovery
- #discovery: TRACE
- index.search.slowlog: TRACE, index_search_slow_log_file
- index.indexing.slowlog: TRACE, index_indexing_slow_log_file
- additivity:
- index.search.slowlog: false
- index.indexing.slowlog: false
- deprecation: false
- appender:
- console:
- type: console
- layout:
- type: consolePattern
- conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
- file:
- type: dailyRollingFile
- file: ${path.logs}/${cluster.name}.log
- datePattern: "'.'yyyy-MM-dd"
- layout:
- type: pattern
- conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %.10000m%n"
- # Use the following log4j-extras RollingFileAppender to enable gzip compression of log files.
- # For more information see https://logging.apache.org/log4j/extras/apidocs/org/apache/log4j/rolling/RollingFileAppender.html
- #file:
- #type: extrasRollingFile
- #file: ${path.logs}/${cluster.name}.log
- #rollingPolicy: timeBased
- #rollingPolicy.FileNamePattern: ${path.logs}/${cluster.name}.log.%d{yyyy-MM-dd}.gz
- #layout:
- #type: pattern
- #conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
- deprecation_log_file:
- type: dailyRollingFile
- file: ${path.logs}/${cluster.name}_deprecation.log
- datePattern: "'.'yyyy-MM-dd"
- layout:
- type: pattern
- conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
- index_search_slow_log_file:
- type: dailyRollingFile
- file: ${path.logs}/${cluster.name}_index_search_slowlog.log
- datePattern: "'.'yyyy-MM-dd"
- layout:
- type: pattern
- conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
- index_indexing_slow_log_file:
- type: dailyRollingFile
- file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log
- datePattern: "'.'yyyy-MM-dd"
- layout:
- type: pattern
- conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement