Advertisement
Guest User

Untitled

a guest
Oct 11th, 2016
115
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Bash 31.84 KB | None | 0 0
  1. ##################$$$###### Filebeat Configuration ############################
  2.  
  3. # This file is a full configuration example documenting all non-deprecated
  4. # options in comments. For a shorter configuration example, that contains only
  5. # the most common options, please see filebeat.yml in the same directory.
  6. #
  7. # You can find the full configuration reference here:
  8. # https://www.elastic.co/guide/en/beats/filebeat/index.html
  9.  
  10. #=========================== Filebeat prospectors =============================
  11.  
  12. # List of prospectors to fetch data.
  13. filebeat.prospectors:
  14. # Each - is a prospector. Most options can be set at the prospector level, so
  15. # you can use different prospectors for various configurations.
  16. # Below are the prospector specific configurations.
  17.  
  18. # Type of the files. Based on this the way the file is read is decided.
  19. # The different types cannot be mixed in one prospector
  20. #
  21. # Possible options are:
  22. # * log: Reads every line of the log file (default)
  23. # * stdin: Reads the standard in
  24.  
  25. #------------------------------ Log prospector --------------------------------
  26. - input_type: log
  27.  
  28.   # Paths that should be crawled and fetched. Glob based paths.
  29.   # To fetch all ".log" files from a specific level of subdirectories
  30.   # /var/log/*/*.log can be used.
  31.   # For each file found under this path, a harvester is started.
  32.   # Make sure not file is defined twice as this can lead to unexpected behaviour.
  33.   paths:
  34.     - /var/log/tomcat/tomcat.json
  35.     #- c:\programdata\elasticsearch\logs\*
  36.  
  37.   # Configure the file encoding for reading files with international characters
  38.   # following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding).
  39.   # Some sample encodings:
  40.   #   plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk,
  41.   #    hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ...
  42.   #encoding: plain
  43.  
  44.  
  45.   # Exclude lines. A list of regular expressions to match. It drops the lines that are
  46.   # matching any regular expression from the list. The include_lines is called before
  47.   # exclude_lines. By default, no lines are dropped.
  48.   exclude_lines: ['^Request:ibe']
  49.  
  50.   # Include lines. A list of regular expressions to match. It exports the lines that are
  51.   # matching any regular expression from the list. The include_lines is called before
  52.   # exclude_lines. By default, all the lines are exported.
  53.   #include_lines: ["^ERR", "^WARN"]
  54.  
  55.   # Exclude files. A list of regular expressions to match. Filebeat drops the files that
  56.   # are matching any regular expression from the list. By default, no files are dropped.
  57.   #exclude_files: [".gz$"]
  58.  
  59.   # Optional additional fields. These field can be freely picked
  60.   # to add additional information to the crawled log files for filtering
  61.   #fields:
  62.   #  level: debug
  63.   #  review: 1
  64.  
  65.   # Set to true to store the additional fields as top level fields instead
  66.   # of under the "fields" sub-dictionary. In case of name conflicts with the
  67.   # fields added by Filebeat itself, the custom fields overwrite the default
  68.   # fields.
  69.   #fields_under_root: false
  70.  
  71.   # Ignore files which were modified more then the defined timespan in the past.
  72.   # ignore_older is disabled by default, so no files are ignored by setting it to 0.
  73.   # Time strings like 2h (2 hours), 5m (5 minutes) can be used.
  74.   #ignore_older: 0
  75.  
  76.   # Type to be published in the 'type' field. For Elasticsearch output,
  77.   # the type defines the document type these entries should be stored
  78.   # in. Default: log
  79.   #document_type: log
  80.  
  81.   # How often the prospector checks for new files in the paths that are specified
  82.   # for harvesting. Specify 1s to scan the directory as frequently as possible
  83.   # without causing Filebeat to scan too frequently. Default: 10s.
  84.   #scan_frequency: 10s
  85.  
  86.   # Defines the buffer size every harvester uses when fetching the file
  87.   #harvester_buffer_size: 16384
  88.  
  89.   # Maximum number of bytes a single log event can have
  90.   # All bytes after max_bytes are discarded and not sent. The default is 10MB.
  91.   # This is especially useful for multiline log messages which can get large.
  92.   #max_bytes: 10485760
  93.  
  94.   ### JSON configuration
  95.  
  96.   # Decode JSON options. Enable this if your logs are structured in JSON.
  97.   # JSON key on which to apply the line filtering and multiline settings. This key
  98.   # must be top level and its value must be string, otherwise it is ignored. If
  99.   # no text key is defined, the line filtering and multiline features cannot be used.
  100.   json.message_key: message
  101.  
  102.   # By default, the decoded JSON is placed under a "json" key in the output document.
  103.   # If you enable this setting, the keys are copied top level in the output document.
  104.   json.keys_under_root: true
  105.  
  106.   # If keys_under_root and this setting are enabled, then the values from the decoded
  107.   # JSON object overwrite the fields that Filebeat normally adds (type, source, offset, etc.)
  108.   # in case of conflicts.
  109.   # json.overwrite_keys: yes
  110.  
  111.   # If this setting is enabled, Filebeat adds a "json_error" key in case of JSON
  112.   # unmarshaling errors or when a text key is defined in the configuration but cannot
  113.   # be used.
  114.   #json.add_error_key: false
  115.  
  116.   ### Multiline options
  117.  
  118.   # Mutiline can be used for log messages spanning multiple lines. This is common
  119.   # for Java Stack Traces or C-Line Continuation
  120.  
  121.   # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [
  122.   #multiline.pattern: ^\[
  123.  
  124.   # Defines if the pattern set under pattern should be negated or not. Default is false.
  125.   #multiline.negate: false
  126.  
  127.   # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern
  128.   # that was (not) matched before or after or as long as a pattern is not matched based on negate.
  129.   # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash
  130.   #multiline.match: after
  131.  
  132.   # The maximum number of lines that are combined to one event.
  133.   # In case there are more the max_lines the additional lines are discarded.
  134.   # Default is 500
  135.   #multiline.max_lines: 500
  136.  
  137.   # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event
  138.   # Default is 5s.
  139.   #multiline.timeout: 5s
  140.  
  141.   # Setting tail_files to true means filebeat starts reading new files at the end
  142.   # instead of the beginning. If this is used in combination with log rotation
  143.   # this can mean that the first entries of a new file are skipped.
  144.   #tail_files: false
  145.  
  146.   # Experimental: If symlinks is enabled, symlinks are opened and harvested. The harvester is openening the
  147.   # original for harvesting but will report the symlink name as source.
  148.   #symlinks: false
  149.  
  150.   # Backoff values define how aggressively filebeat crawls new files for updates
  151.   # The default values can be used in most cases. Backoff defines how long it is waited
  152.   # to check a file again after EOF is reached. Default is 1s which means the file
  153.   # is checked every second if new lines were added. This leads to a near real time crawling.
  154.   # Every time a new line appears, backoff is reset to the initial value.
  155.   #backoff: 1s
  156.  
  157.   # Max backoff defines what the maximum backoff time is. After having backed off multiple times
  158.   # from checking the files, the waiting time will never exceed max_backoff independent of the
  159.   # backoff factor. Having it set to 10s means in the worst case a new line can be added to a log
  160.   # file after having backed off multiple times, it takes a maximum of 10s to read the new line
  161.   #max_backoff: 10s
  162.  
  163.   # The backoff factor defines how fast the algorithm backs off. The bigger the backoff factor,
  164.   # the faster the max_backoff value is reached. If this value is set to 1, no backoff will happen.
  165.   # The backoff value will be multiplied each time with the backoff_factor until max_backoff is reached
  166.   #backoff_factor: 2
  167.  
  168.   # Experimental: Max number of harvesters that are started in parallel.
  169.   # Default is 0 which means unlimited
  170.   #harvester_limit: 0
  171.  
  172.   ### Harvester closing options
  173.  
  174.   # Close inactive closes the file handler after the predefined period.
  175.   # The period starts when the last line of the file was, not the file ModTime.
  176.   # Time strings like 2h (2 hours), 5m (5 minutes) can be used.
  177.   #close_inactive: 5m
  178.  
  179.   # Close renamed closes a file handler when the file is renamed or rotated.
  180.   # Note: Potential data loss. Make sure to read and understand the docs for this option.
  181.   #close_renamed: false
  182.  
  183.   # When enabling this option, a file handler is closed immediately in case a file can't be found
  184.   # any more. In case the file shows up again later, harvesting will continue at the last known position
  185.   # after scan_frequency.
  186.   #close_removed: true
  187.  
  188.   # Closes the file handler as soon as the harvesters reaches the end of the file.
  189.   # By default this option is disabled.
  190.   # Note: Potential data loss. Make sure to read and understand the docs for this option.
  191.   #close_eof: false
  192.  
  193.   ### State options
  194.  
  195.   # Files for the modification data is older then clean_inactive the state from the registry is removed
  196.   # By default this is disabled.
  197.   #clean_inactive: 0
  198.  
  199.   # Removes the state for file which cannot be found on disk anymore immediately
  200.   #clean_removed: true
  201.  
  202.   # Close timeout closes the harvester after the predefined time.
  203.   # This is independent if the harvester did finish reading the file or not.
  204.   # By default this option is disabled.
  205.   # Note: Potential data loss. Make sure to read and understand the docs for this option.
  206.   #close_timeout: 0
  207.  
  208. #----------------------------- Stdin prospector -------------------------------
  209. # Configuration to use stdin input
  210. #- input_type: stdin
  211.  
  212. #========================= Filebeat global options ============================
  213.  
  214. # Event count spool threshold - forces network flush if exceeded
  215. #filebeat.spool_size: 2048
  216.  
  217. # Enable async publisher pipeline in filebeat (Experimental!)
  218. #filebeat.publish_async: false
  219.  
  220. # Defines how often the spooler is flushed. After idle_timeout the spooler is
  221. # Flush even though spool_size is not reached.
  222. #filebeat.idle_timeout: 5s
  223.  
  224. # Name of the registry file. If a relative path is used, it is considered relative to the
  225. # data path.
  226. #filebeat.registry_file: ${path.data}/registry
  227.  
  228. #
  229. # These config files must have the full filebeat config part inside, but only
  230. # the prospector part is processed. All global options like spool_size are ignored.
  231. # The config_dir MUST point to a different directory then where the main filebeat config file is in.
  232. #filebeat.config_dir:
  233.  
  234. # How long filebeat waits on shutdown for the publisher to finish.
  235. # Default is 0, not waiting.
  236. #filebeat.shutdown_timeout: 0
  237.  
  238. #================================ General =====================================
  239.  
  240. # The name of the shipper that publishes the network data. It can be used to group
  241. # all the transactions sent by a single shipper in the web interface.
  242. # If this options is not defined, the hostname is used.
  243. #name:
  244.  
  245. # The tags of the shipper are included in their own field with each
  246. # transaction published. Tags make it easy to group servers by different
  247. # logical properties.
  248. #tags: ["service-X", "web-tier"]
  249.  
  250. # Optional fields that you can specify to add additional information to the
  251. # output. Fields can be scalar values, arrays, dictionaries, or any nested
  252. # combination of these.
  253. #fields:
  254. #  env: staging
  255.  
  256. # If this option is set to true, the custom fields are stored as top-level
  257. # fields in the output document instead of being grouped under a fields
  258. # sub-dictionary. Default is false.
  259. #fields_under_root: false
  260.  
  261. # Internal queue size for single events in processing pipeline
  262. #queue_size: 1000
  263.  
  264. # The internal queue size for bulk events in the processing pipeline.
  265. # Do not modify this value.
  266. #bulk_queue_size: 0
  267.  
  268. # Sets the maximum number of CPUs that can be executing simultaneously. The
  269. # default is the number of logical CPUs available in the system.
  270. #max_procs:
  271.  
  272. #================================ Processors =====================================
  273.  
  274. # Processors are used to reduce the number of fields in the exported event or to
  275. # enhance the event with external meta data. This section defines a list of processors
  276. # that are applied one by one and the first one receives the initial event:
  277. #
  278. #   event -> filter1 -> event1 -> filter2 ->event2 ...
  279. #
  280. # Supported processors: drop_fields, drop_event, include_fields
  281. #
  282. # For example, you can use the following processors to keep
  283. # the fields that contain CPU load percentages, but remove the fields that
  284. # contain CPU ticks values:
  285. #
  286. #processors:
  287. #- include_fields:
  288. #    fields: ["cpu"]
  289. #- drop_fields:
  290. #    fields: ["cpu.user", "cpu.system"]
  291. #
  292. # The following example drops the events that have the HTTP response code 200:
  293. #
  294. #processors:
  295. #- drop_event:
  296. #    when:
  297. #       equals:
  298. #           http.code: 200
  299. #
  300.  
  301. #================================ Outputs =====================================
  302.  
  303. # Configure what outputs to use when sending the data collected by the beat.
  304. # Multiple outputs may be used.
  305.  
  306. #-------------------------- Elasticsearch output ------------------------------
  307. #output.elasticsearch:
  308.   # Boolean flag to enable or disable the output module.
  309.   #enabled: true
  310.  
  311.   # Array of hosts to connect to.
  312.   # Scheme and port can be left out and will be set to the default (http and 9200)
  313.   # In case you specify and additional path, the scheme is required: http://localhost:9200/path
  314.   # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
  315.   #hosts: ["localhost:9200"]
  316.  
  317.   # Set gzip compression level.
  318.   #compression_level: 0
  319.  
  320.   # Optional protocol and basic auth credentials.
  321.   #protocol: "https"
  322.   #username: "elastic"
  323.   #password: "changeme"
  324.  
  325.   # Dictionary of HTTP parameters to pass within the url with index operations.
  326.   #parameters:
  327.     #param1: value1
  328.     #param2: value2
  329.  
  330.   # Number of workers per Elasticsearch host.
  331.   #worker: 1
  332.  
  333.   # Optional index name. The default is "filebeat" plus date
  334.   # and generates [filebeat-]YYYY.MM.DD keys.
  335.   #index: "filebeat-%{+yyyy.MM.dd}"
  336.  
  337.   # Optional ingest node pipeline. By default no pipeline will be used.
  338.   #pipeline: ""
  339.  
  340.   # Optional HTTP Path
  341.   #path: "/elasticsearch"
  342.  
  343.   # Proxy server url
  344.   #proxy_url: http://proxy:3128
  345.  
  346.   # The number of times a particular Elasticsearch index operation is attempted. If
  347.   # the indexing operation doesn't succeed after this many retries, the events are
  348.   # dropped. The default is 3.
  349.   #max_retries: 3
  350.  
  351.   # The maximum number of events to bulk in a single Elasticsearch bulk API index request.
  352.   # The default is 50.
  353.   #bulk_max_size: 50
  354.  
  355.   # Configure http request timeout before failing an request to Elasticsearch.
  356.   #timeout: 90
  357.  
  358.   # The number of seconds to wait for new events between two bulk API index requests.
  359.   # If `bulk_max_size` is reached before this interval expires, addition bulk index
  360.   # requests are made.
  361.   #flush_interval: 1s
  362.  
  363.   # A template is used to set the mapping in Elasticsearch
  364.   # By default template loading is enabled and the template is loaded.
  365.   # These settings can be adjusted to load your own template or overwrite existing ones.
  366.  
  367.   # Set to false to disable template loading.
  368.   #template.enabled: true
  369.  
  370.   # Template name. By default the template name is filebeat.
  371.   #template.name: "filebeat"
  372.  
  373.   # Path to template file
  374.   #template.path: "${path.config}/filebeat.template.json"
  375.  
  376.   # Overwrite existing template
  377.   #template.overwrite: false
  378.  
  379.   # If set to true, filebeat checks the Elasticsearch version at connect time, and if it
  380.   # is 2.x, it loads the file specified by the template.versions.2x.path setting. The
  381.   # default is true.
  382.   #template.versions.2x.enabled: true
  383.  
  384.   # Path to the Elasticsearch 2.x version of the template file.
  385.   #template.versions.2x.path: "${path.config}/filebeat.template-es2x.json"
  386.  
  387.   # Use SSL settings for HTTPS. Default is true.
  388.   #ssl.enabled: true
  389.  
  390.   # Configure SSL verification mode. If `none` is configured, all server hosts
  391.   # and certificates will be accepted. In this mode, SSL based connections are
  392.   # susceptible to man-in-the-middle attacks. Use only for testing. Default is
  393.   # `full`.
  394.   #ssl.verification_mode: full
  395.  
  396.   # List of supported/valid TLS versions. By default all TLS versions 1.0 up to
  397.   # 1.2 are enabled.
  398.   #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
  399.  
  400.   # SSL configuration. By default is off.
  401.   # List of root certificates for HTTPS server verifications
  402.   #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
  403.  
  404.   # Certificate for SSL client authentication
  405.   #ssl.certificate: "/etc/pki/client/cert.pem"
  406.  
  407.   # Client Certificate Key
  408.   #ssl.key: "/etc/pki/client/cert.key"
  409.  
  410.   # Optional passphrase for decrypting the Certificate Key.
  411.   #ssl.key_passphrase: ''
  412.  
  413.   # Configure cipher suites to be used for SSL connections
  414.   #ssl.cipher_suites: []
  415.  
  416.   # Configure curve types for ECDHE based cipher suites
  417.   #ssl.curve_types: []
  418.  
  419.  
  420. #----------------------------- Logstash output --------------------------------
  421. #output.logstash:
  422.   # Boolean flag to enable or disable the output module.
  423.   #enabled: true
  424.  
  425.   # The Logstash hosts
  426.   #hosts: ["localhost:5044"]
  427.  
  428.   # Number of workers per Logstash host.
  429.   #worker: 1
  430.  
  431.   # Set gzip compression level.
  432.   #compression_level: 3
  433.  
  434.   # Optional load balance the events between the Logstash hosts
  435.   #loadbalance: true
  436.  
  437.   # Number of batches to be send asynchronously to logstash while processing
  438.   # new batches.
  439.   #pipelining: 0
  440.  
  441.   # Optional index name. The default index name is set to name of the beat
  442.   # in all lowercase.
  443.   #index: 'filebeat'
  444.  
  445.   # SOCKS5 proxy server URL
  446.   #proxy_url: socks5://user:password@socks5-server:2233
  447.  
  448.   # Resolve names locally when using a proxy server. Defaults to false.
  449.   #proxy_use_local_resolver: false
  450.  
  451.   # Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
  452.   #ssl.enabled: true
  453.  
  454.   # Configure SSL verification mode. If `none` is configured, all server hosts
  455.   # and certificates will be accepted. In this mode, SSL based connections are
  456.   # susceptible to man-in-the-middle attacks. Use only for testing. Default is
  457.   # `full`.
  458.   #ssl.verification_mode: full
  459.  
  460.   # List of supported/valid TLS versions. By default all TLS versions 1.0 up to
  461.   # 1.2 are enabled.
  462.   #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
  463.  
  464.   # Optional SSL configuration options. SSL is off by default.
  465.   # List of root certificates for HTTPS server verifications
  466.   #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
  467.  
  468.   # Certificate for SSL client authentication
  469.   #ssl.certificate: "/etc/pki/client/cert.pem"
  470.  
  471.   # Client Certificate Key
  472.   #ssl.key: "/etc/pki/client/cert.key"
  473.  
  474.   # Optional passphrase for decrypting the Certificate Key.
  475.   #ssl.key_passphrase: ''
  476.  
  477.   # Configure cipher suites to be used for SSL connections
  478.   #ssl.cipher_suites: []
  479.  
  480.   # Configure curve types for ECDHE based cipher suites
  481.   #ssl.curve_types: []
  482.  
  483. #------------------------------- Kafka output ---------------------------------
  484. #output.kafka:
  485.   # Boolean flag to enable or disable the output module.
  486.   #enabled: true
  487.  
  488.   # The list of Kafka broker addresses from where to fetch the cluster metadata.
  489.   # The cluster metadata contain the actual Kafka brokers events are published
  490.   # to.
  491.   #hosts: ["localhost:9092"]
  492.  
  493.   # The Kafka topic used for produced events. The setting can be a format string
  494.   # using any event field. To set the topic from document type use `%{[type]}`.
  495.   #topic: beats
  496.  
  497.   # The Kafka event key setting. Use format string to create unique event key.
  498.   # By default no event key will be generated.
  499.   #key: ''
  500.  
  501.   # The Kafka event partitioning strategy. Default hashing strategy is `hash`
  502.   # using the `output.kafka.key` setting or randomly distributes events if
  503.   # `output.kafka.key` is not configured.
  504.   #partition.hash:
  505.     # If enabled, events will only be published to partitions with reachable
  506.     # leaders. Default is false.
  507.     #reachable_only: false
  508.  
  509.     # Configure alternative event field names used to compute the hash value.
  510.     # If empty `output.kafka.key` setting will be used.
  511.     # Default value is empty list.
  512.     #hash: []
  513.  
  514.   # Authentication details. Password is required if username is set.
  515.   #username: ''
  516.   #password: ''
  517.  
  518.   # Kafka version filebeat is assumed to run against. Defaults to the oldest
  519.   # supported stable version (currently version 0.8.2.0)
  520.   #version: 0.8.2
  521.  
  522.   # Metadata update configuration. Metadata do contain leader information
  523.   # deciding which broker to use when publishing.
  524.   #metadata:
  525.     # Max metadata request retry attempts when cluster is in middle of leader
  526.     # election. Defaults to 3 retries.
  527.     #retry.max: 3
  528.  
  529.     # Waiting time between retries during leader elections. Default is 250ms.
  530.     #retry.backoff: 250ms
  531.  
  532.     # Refresh metadata interval. Defaults to every 10 minutes.
  533.     #refresh_frequency: 10m
  534.  
  535.   # The number of concurrent load-balanced Kafka output workers.
  536.   #worker: 1
  537.  
  538.   # The number of times to retry publishing an event after a publishing failure.
  539.   # After the specified number of retries, the events are typically dropped.
  540.   # Some Beats, such as Filebeat, ignore the max_retries setting and retry until
  541.   # all events are published.  Set max_retries to a value less than 0 to retry
  542.   # until all events are published. The default is 3.
  543.   #max_retries: 3
  544.  
  545.   # The maximum number of events to bulk in a single Kafka request. The default
  546.   # is 2048.
  547.   #bulk_max_size: 2048
  548.  
  549.   # The number of seconds to wait for responses from the Kafka brokers before
  550.   # timing out. The default is 30s.
  551.   #timeout: 30s
  552.  
  553.   # The maximum duration a broker will wait for number of required ACKs. The
  554.   # default is 10s.
  555.   #broker_timeout: 10s
  556.  
  557.   # The number of messages buffered for each Kafka broker. The default is 256.
  558.   #channel_buffer_size: 256
  559.  
  560.   # The keep-alive period for an active network connection. If 0s, keep-alives
  561.   # are disabled. The default is 0 seconds.
  562.   #keep_alive: 0
  563.  
  564.   # Sets the output compression codec. Must be one of none, snappy and gzip. The
  565.   # default is gzip.
  566.   #compression: gzip
  567.  
  568.   # The maximum permitted size of JSON-encoded messages. Bigger messages will be
  569.   # dropped. The default value is 1000000 (bytes). This value should be equal to
  570.   # or less than the broker's message.max.bytes.
  571.   #max_message_bytes: 1000000
  572.  
  573.   # The ACK reliability level required from broker. 0=no response, 1=wait for
  574.   # local commit, -1=wait for all replicas to commit. The default is 1.  Note:
  575.   # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently
  576.   # on error.
  577.   #required_acks: 1
  578.  
  579.   # The number of seconds to wait for new events between two producer API calls.
  580.   #flush_interval: 1s
  581.  
  582.   # The configurable ClientID used for logging, debugging, and auditing
  583.   # purposes.  The default is "beats".
  584.   #client_id: beats
  585.  
  586.   # Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
  587.   #ssl.enabled: true
  588.  
  589.   # Optional SSL configuration options. SSL is off by default.
  590.   # List of root certificates for HTTPS server verifications
  591.   #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
  592.  
  593.   # Configure SSL verification mode. If `none` is configured, all server hosts
  594.   # and certificates will be accepted. In this mode, SSL based connections are
  595.   # susceptible to man-in-the-middle attacks. Use only for testing. Default is
  596.   # `full`.
  597.   #ssl.verification_mode: full
  598.  
  599.   # List of supported/valid TLS versions. By default all TLS versions 1.0 up to
  600.   # 1.2 are enabled.
  601.   #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
  602.  
  603.   # Certificate for SSL client authentication
  604.   #ssl.certificate: "/etc/pki/client/cert.pem"
  605.  
  606.   # Client Certificate Key
  607.   #ssl.key: "/etc/pki/client/cert.key"
  608.  
  609.   # Optional passphrase for decrypting the Certificate Key.
  610.   #ssl.key_passphrase: ''
  611.  
  612.   # Configure cipher suites to be used for SSL connections
  613.   #ssl.cipher_suites: []
  614.  
  615.   # Configure curve types for ECDHE based cipher suites
  616.   #ssl.curve_types: []
  617.  
  618. #------------------------------- Redis output ---------------------------------
  619. output.redis:
  620.   # Boolean flag to enable or disable the output module.
  621.   #enabled: true
  622.  
  623.   # The list of Redis servers to connect to. If load balancing is enabled, the
  624.   # events are distributed to the servers in the list. If one server becomes
  625.  # # unreachable, the events are distributed to the reachable servers only.
  626.   hosts: ["172.27.30.102:6379"]
  627.   #hosts: ["localhost:6379"]
  628.  
  629.   # The Redis port to use if hosts does not contain a port number. The default
  630.   # is 6379.
  631.   #port: 6379
  632.  
  633.   # The name of the Redis list or channel the events are published to. The
  634.   # default is filebeat.
  635.   #key: filebeat
  636.  
  637.   # The password to authenticate with. The default is no authentication.
  638.   #password:
  639.  
  640.   # The Redis database number where the events are published. The default is 0.
  641.   #db: 0
  642.  
  643.   # The Redis data type to use for publishing events. If the data type is list,
  644.   # the Redis RPUSH command is used. If the data type is channel, the Redis
  645.   # PUBLISH command is used. The default value is list.
  646.   #datetype: list
  647.  
  648.   # The number of workers to use for each host configured to publish events to
  649.   # Redis. Use this setting along with the loadbalance option. For example, if
  650.   # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each
  651.   # host).
  652.   #worker: 1
  653.  
  654.   # If set to true and multiple hosts or workers are configured, the output
  655.   # plugin load balances published events onto all Redis hosts. If set to false,
  656.   # the output plugin sends all events to only one host (determined at random)
  657.   # and will switch to another host if the currently selected one becomes
  658.   # unreachable. The default value is true.
  659.   #loadbalance: true
  660.  
  661.   # The Redis connection timeout in seconds. The default is 5 seconds.
  662.   timeout: 300s
  663.  
  664.   # The number of times to retry publishing an event after a publishing failure.
  665.   # After the specified number of retries, the events are typically dropped.
  666.   # Some Beats, such as Filebeat, ignore the max_retries setting and retry until
  667.   # all events are published. Set max_retries to a value less than 0 to retry
  668.   # until all events are published. The default is 3.
  669.   #max_retries: 3
  670.  
  671.   # The maximum number of events to bulk in a single Redis request or pipeline.
  672.   # The default is 2048.
  673.   #bulk_max_size: 2048
  674.  
  675.   # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The
  676.   # value must be a URL with a scheme of socks5://.
  677.   #proxy_url:
  678.  
  679.   # This option determines whether Redis hostnames are resolved locally when
  680.   # using a proxy. The default value is false, which means that name resolution
  681.   # occurs on the proxy server.
  682.   #proxy_use_local_resolver: false
  683.  
  684.   # Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
  685.   #ssl.enabled: true
  686.  
  687.   # Configure SSL verification mode. If `none` is configured, all server hosts
  688.   # and certificates will be accepted. In this mode, SSL based connections are
  689.   # susceptible to man-in-the-middle attacks. Use only for testing. Default is
  690.   # `full`.
  691.   #ssl.verification_mode: full
  692.  
  693.   # List of supported/valid TLS versions. By default all TLS versions 1.0 up to
  694.   # 1.2 are enabled.
  695.   #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
  696.  
  697.   # Optional SSL configuration options. SSL is off by default.
  698.   # List of root certificates for HTTPS server verifications
  699.   #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
  700.  
  701.   # Certificate for SSL client authentication
  702.   #ssl.certificate: "/etc/pki/client/cert.pem"
  703.  
  704.   # Client Certificate Key
  705.   #ssl.key: "/etc/pki/client/cert.key"
  706.  
  707.   # Optional passphrase for decrypting the Certificate Key.
  708.   #ssl.key_passphrase: ''
  709.  
  710.   # Configure cipher suites to be used for SSL connections
  711.   #ssl.cipher_suites: []
  712.  
  713.   # Configure curve types for ECDHE based cipher suites
  714.   #ssl.curve_types: []
  715.  
  716.  
  717. #------------------------------- File output ----------------------------------
  718. #output.file:
  719.   # Boolean flag to enable or disable the output module.
  720.   #enabled: true
  721.  
  722.   # Path to the directory where to save the generated files. The option is
  723.   # mandatory.
  724.   #path: "/tmp/filebeat"
  725.  
  726.   # Name of the generated files. The default is `filebeat` and it generates
  727.   # files: `filebeat`, `filebeat.1`, `filebeat.2`, etc.
  728.   #filename: filebeat
  729.  
  730.   # Maximum size in kilobytes of each file. When this size is reached, and on
  731.   # every filebeat restart, the files are rotated. The default value is 10240
  732.   # kB.
  733.   #rotate_every_kb: 10000
  734.  
  735.   # Maximum number of files under path. When this number of files is reached,
  736.   # the oldest file is deleted and the rest are shifted from last to first. The
  737.   # default is 7 files.
  738.   #number_of_files: 7
  739.  
  740.  
  741. #----------------------------- Console output ---------------------------------
  742. #output.console:
  743.   # Boolean flag to enable or disable the output module.
  744.   #enabled: true
  745.  
  746.   # Pretty print json event
  747.   #pretty: false
  748.  
  749. #================================= Paths ======================================
  750.  
  751. # The home path for the filebeat installation. This is the default base path
  752. # for all other path settings and for miscellaneous files that come with the
  753. # distribution (for example, the sample dashboards).
  754. # If not set by a CLI flag or in the configuration file, the default for the
  755. # home path is the location of the binary.
  756. #path.home:
  757.  
  758. # The configuration path for the filebeat installation. This is the default
  759. # base path for configuration files, including the main YAML configuration file
  760. # and the Elasticsearch template file. If not set by a CLI flag or in the
  761. # configuration file, the default for the configuration path is the home path.
  762. #path.config: ${path.home}
  763.  
  764. # The data path for the filebeat installation. This is the default base path
  765. # for all the files in which filebeat needs to store its data. If not set by a
  766. # CLI flag or in the configuration file, the default for the data path is a data
  767. # subdirectory inside the home path.
  768. #path.data: ${path.home}/data
  769.  
  770. # The logs path for a filebeat installation. This is the default location for
  771. # the Beat's log files. If not set by a CLI flag or in the configuration file,
  772. # the default for the logs path is a logs subdirectory inside the home path.
  773. #path.logs: ${path.home}/logs
  774.  
  775. #================================ Logging =====================================
  776. # There are three options for the log output: syslog, file, stderr.
  777. # Under Windows systems, the log files are per default sent to the file output,
  778. # under all other system per default to syslog.
  779.  
  780. # Sets log level. The default log level is info.
  781. # Available log levels are: critical, error, warning, info, debug
  782. #logging.level: info
  783.  
  784. # Enable debug output for selected components. To enable all selectors use ["*"]
  785. # Other available selectors are "beat", "publish", "service"
  786. # Multiple selectors can be chained.
  787. #logging.selectors: [ ]
  788.  
  789. # Send all logging output to syslog. The default is false.
  790. #logging.to_syslog: true
  791.  
  792. # If enabled, filebeat periodically logs its internal metrics that have changed
  793. # in the last period. For each metric that changed, the delta from the value at
  794. # the beginning of the period is logged. Also, the total values for
  795. # all non-zero internal metrics are logged on shutdown. The default is true.
  796. #logging.metrics.enabled: true
  797.  
  798. # The period after which to log the internal metrics. The default is 30s.
  799. #logging.metrics.period: 30s
  800.  
  801. # Logging to rotating files files. Set logging.to_files to false to disable logging to
  802. # files.
  803. logging.to_files: true
  804. logging.files:
  805.   # Configure the path where the logs are written. The default is the logs directory
  806.   # under the home path (the binary location).
  807.   path: /var/log/filebeat
  808.  
  809.   # The name of the files where the logs are written to.
  810.   #name: filebeat
  811.  
  812.   # Configure log file size limit. If limit is reached, log file will be
  813.   # automatically rotated
  814.   #rotateeverybytes: 10485760 # = 10MB
  815.  
  816.   # Number of rotated log files to keep. Oldest files will be deleted first.
  817.   #keepfiles: 7
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement