Advertisement
Guest User

Untitled

a guest
Jun 8th, 2017
143
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 37.96 KB | None | 0 0
  1. ######################## Filebeat Configuration ############################
  2.  
  3. # This file is a full configuration example documenting all non-deprecated
  4. # options in comments. For a shorter configuration example, that contains only
  5. # the most common options, please see filebeat.yml in the same directory.
  6. #
  7. # You can find the full configuration reference here:
  8. # https://www.elastic.co/guide/en/beats/filebeat/index.html
  9.  
  10.  
  11. #========================== Modules configuration ============================
  12. filebeat.modules:
  13.  
  14. #------------------------------- System Module -------------------------------
  15. #- module: system
  16. # Syslog
  17. #syslog:
  18. #enabled: true
  19.  
  20. # Set custom paths for the log files. If left empty,
  21. # Filebeat will choose the paths depending on your OS.
  22. #var.paths:
  23.  
  24. # Prospector configuration (advanced). Any prospector configuration option
  25. # can be added under this section.
  26. #prospector:
  27.  
  28. #------------------------------- Apache2 Module ------------------------------
  29. #- module: apache2
  30. # Access logs
  31. #access:
  32. #enabled: true
  33.  
  34. # Ingest Node pipeline to use. Options are `with_plugins` (default)
  35. # and `no_plugins`. Use `no_plugins` if you don't have the geoip or
  36. # the user agent Node ingest plugins installed.
  37. #var.pipeline: with_plugins
  38.  
  39. # Set custom paths for the log files. If left empty,
  40. # Filebeat will choose the paths depending on your OS.
  41. #var.paths:
  42.  
  43. # Prospector configuration (advanced). Any prospector configuration option
  44. # can be added under this section.
  45. #prospector:
  46.  
  47. # Error logs
  48. #error:
  49. #enabled: true
  50.  
  51. # Set custom paths for the log files. If left empty,
  52. # Filebeat will choose the paths depending on your OS.
  53. #var.paths:
  54.  
  55. # Prospector configuration (advanced). Any prospector configuration option
  56. # can be added under this section.
  57. #prospector:
  58.  
  59. #------------------------------- Auditd Module -------------------------------
  60. #- module: auditd
  61. #log:
  62. #enabled: true
  63.  
  64. # Set custom paths for the log files. If left empty,
  65. # Filebeat will choose the paths depending on your OS.
  66. #var.paths:
  67.  
  68. # Prospector configuration (advanced). Any prospector configuration option
  69. # can be added under this section.
  70. #prospector:
  71.  
  72. #-------------------------------- MySQL Module -------------------------------
  73. #- module: mysql
  74. # Error logs
  75. #error:
  76. #enabled: true
  77.  
  78. # Set custom paths for the log files. If left empty,
  79. # Filebeat will choose the paths depending on your OS.
  80. #var.paths:
  81.  
  82. # Prospector configuration (advanced). Any prospector configuration option
  83. # can be added under this section.
  84. #prospector:
  85.  
  86. # Slow logs
  87. #slowlog:
  88. #enabled: true
  89.  
  90. # Set custom paths for the log files. If left empty,
  91. # Filebeat will choose the paths depending on your OS.
  92. #var.paths:
  93.  
  94. # Prospector configuration (advanced). Any prospector configuration option
  95. # can be added under this section.
  96. #prospector:
  97.  
  98. #-------------------------------- Nginx Module -------------------------------
  99. #- module: nginx
  100. # Access logs
  101. #access:
  102. #enabled: true
  103.  
  104. # Ingest Node pipeline to use. Options are `with_plugins` (default)
  105. # and `no_plugins`. Use `no_plugins` if you don't have the geoip or
  106. # the user agent Node ingest plugins installed.
  107. #var.pipeline: with_plugins
  108.  
  109. # Set custom paths for the log files. If left empty,
  110. # Filebeat will choose the paths depending on your OS.
  111. #var.paths:
  112.  
  113. # Prospector configuration (advanced). Any prospector configuration option
  114. # can be added under this section.
  115. #prospector:
  116.  
  117. # Error logs
  118. #error:
  119. #enabled: true
  120.  
  121. # Set custom paths for the log files. If left empty,
  122. # Filebeat will choose the paths depending on your OS.
  123. #var.paths:
  124.  
  125. # Prospector configuration (advanced). Any prospector configuration option
  126. # can be added under this section.
  127. #prospector:
  128.  
  129.  
  130. #=========================== Filebeat prospectors =============================
  131.  
  132. # List of prospectors to fetch data.
  133. filebeat.prospectors:
  134. # Each - is a prospector. Most options can be set at the prospector level, so
  135. # you can use different prospectors for various configurations.
  136. # Below are the prospector specific configurations.
  137.  
  138. # Type of the files. Based on this the way the file is read is decided.
  139. # The different types cannot be mixed in one prospector
  140. #
  141. # Possible options are:
  142. # * log: Reads every line of the log file (default)
  143. # * stdin: Reads the standard in
  144.  
  145. #------------------------------ Log prospector --------------------------------
  146. - input_type: log
  147. - document_type: log
  148.  
  149. # Paths that should be crawled and fetched. Glob based paths.
  150. # To fetch all ".log" files from a specific level of subdirectories
  151. # /var/log/*/*.log can be used.
  152. # For each file found under this path, a harvester is started.
  153. # Make sure not file is defined twice as this can lead to unexpected behaviour.
  154. paths:
  155. - C:\MAMP\logs\ApacheLogs\*.log
  156. #- c:\programdata\elasticsearch\logs\*
  157.  
  158. # Configure the file encoding for reading files with international characters
  159. # following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding).
  160. # Some sample encodings:
  161. # plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk,
  162. # hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ...
  163. #encoding: plain
  164.  
  165.  
  166. # Exclude lines. A list of regular expressions to match. It drops the lines that are
  167. # matching any regular expression from the list. The include_lines is called before
  168. # exclude_lines. By default, no lines are dropped.
  169. #exclude_lines: ["^DBG"]
  170.  
  171. # Include lines. A list of regular expressions to match. It exports the lines that are
  172. # matching any regular expression from the list. The include_lines is called before
  173. # exclude_lines. By default, all the lines are exported.
  174. #include_lines: ["^ERR", "^WARN"]
  175.  
  176. # Exclude files. A list of regular expressions to match. Filebeat drops the files that
  177. # are matching any regular expression from the list. By default, no files are dropped.
  178. #exclude_files: [".gz$"]
  179.  
  180. # Optional additional fields. These field can be freely picked
  181. # to add additional information to the crawled log files for filtering
  182. #fields:
  183. # level: debug
  184. # review: 1
  185.  
  186. # Set to true to store the additional fields as top level fields instead
  187. # of under the "fields" sub-dictionary. In case of name conflicts with the
  188. # fields added by Filebeat itself, the custom fields overwrite the default
  189. # fields.
  190. #fields_under_root: false
  191.  
  192. # Ignore files which were modified more then the defined timespan in the past.
  193. # ignore_older is disabled by default, so no files are ignored by setting it to 0.
  194. # Time strings like 2h (2 hours), 5m (5 minutes) can be used.
  195. #ignore_older: 0
  196.  
  197. # Type to be published in the 'type' field. For Elasticsearch output,
  198. # the type defines the document type these entries should be stored
  199. # in. Default: log
  200. #document_type: log
  201.  
  202. # How often the prospector checks for new files in the paths that are specified
  203. # for harvesting. Specify 1s to scan the directory as frequently as possible
  204. # without causing Filebeat to scan too frequently. Default: 10s.
  205. #scan_frequency: 10s
  206.  
  207. # Defines the buffer size every harvester uses when fetching the file
  208. #harvester_buffer_size: 16384
  209.  
  210. # Maximum number of bytes a single log event can have
  211. # All bytes after max_bytes are discarded and not sent. The default is 10MB.
  212. # This is especially useful for multiline log messages which can get large.
  213. #max_bytes: 10485760
  214.  
  215. ### JSON configuration
  216.  
  217. # Decode JSON options. Enable this if your logs are structured in JSON.
  218. # JSON key on which to apply the line filtering and multiline settings. This key
  219. # must be top level and its value must be string, otherwise it is ignored. If
  220. # no text key is defined, the line filtering and multiline features cannot be used.
  221. #json.message_key:
  222.  
  223. # By default, the decoded JSON is placed under a "json" key in the output document.
  224. # If you enable this setting, the keys are copied top level in the output document.
  225. #json.keys_under_root: false
  226.  
  227. # If keys_under_root and this setting are enabled, then the values from the decoded
  228. # JSON object overwrite the fields that Filebeat normally adds (type, source, offset, etc.)
  229. # in case of conflicts.
  230. #json.overwrite_keys: false
  231.  
  232. # If this setting is enabled, Filebeat adds a "json_error" key in case of JSON
  233. # unmarshaling errors or when a text key is defined in the configuration but cannot
  234. # be used.
  235. #json.add_error_key: false
  236.  
  237. ### Multiline options
  238.  
  239. # Mutiline can be used for log messages spanning multiple lines. This is common
  240. # for Java Stack Traces or C-Line Continuation
  241.  
  242. # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [
  243. #multiline.pattern: ^\[
  244.  
  245. # Defines if the pattern set under pattern should be negated or not. Default is false.
  246. #multiline.negate: false
  247.  
  248. # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern
  249. # that was (not) matched before or after or as long as a pattern is not matched based on negate.
  250. # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash
  251. #multiline.match: after
  252.  
  253. # The maximum number of lines that are combined to one event.
  254. # In case there are more the max_lines the additional lines are discarded.
  255. # Default is 500
  256. #multiline.max_lines: 500
  257.  
  258. # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event
  259. # Default is 5s.
  260. #multiline.timeout: 5s
  261.  
  262. # Setting tail_files to true means filebeat starts reading new files at the end
  263. # instead of the beginning. If this is used in combination with log rotation
  264. # this can mean that the first entries of a new file are skipped.
  265. #tail_files: false
  266.  
  267. # The Ingest Node pipeline ID associated with this prospector. If this is set, it
  268. # overwrites the pipeline option from the Elasticsearch output.
  269. #pipeline:
  270.  
  271. # If symlinks is enabled, symlinks are opened and harvested. The harvester is openening the
  272. # original for harvesting but will report the symlink name as source.
  273. #symlinks: false
  274.  
  275. # Backoff values define how aggressively filebeat crawls new files for updates
  276. # The default values can be used in most cases. Backoff defines how long it is waited
  277. # to check a file again after EOF is reached. Default is 1s which means the file
  278. # is checked every second if new lines were added. This leads to a near real time crawling.
  279. # Every time a new line appears, backoff is reset to the initial value.
  280. #backoff: 1s
  281.  
  282. # Max backoff defines what the maximum backoff time is. After having backed off multiple times
  283. # from checking the files, the waiting time will never exceed max_backoff independent of the
  284. # backoff factor. Having it set to 10s means in the worst case a new line can be added to a log
  285. # file after having backed off multiple times, it takes a maximum of 10s to read the new line
  286. #max_backoff: 10s
  287.  
  288. # The backoff factor defines how fast the algorithm backs off. The bigger the backoff factor,
  289. # the faster the max_backoff value is reached. If this value is set to 1, no backoff will happen.
  290. # The backoff value will be multiplied each time with the backoff_factor until max_backoff is reached
  291. #backoff_factor: 2
  292.  
  293. # Max number of harvesters that are started in parallel.
  294. # Default is 0 which means unlimited
  295. #harvester_limit: 0
  296.  
  297. ### Harvester closing options
  298.  
  299. # Close inactive closes the file handler after the predefined period.
  300. # The period starts when the last line of the file was, not the file ModTime.
  301. # Time strings like 2h (2 hours), 5m (5 minutes) can be used.
  302. #close_inactive: 5m
  303.  
  304. # Close renamed closes a file handler when the file is renamed or rotated.
  305. # Note: Potential data loss. Make sure to read and understand the docs for this option.
  306. #close_renamed: false
  307.  
  308. # When enabling this option, a file handler is closed immediately in case a file can't be found
  309. # any more. In case the file shows up again later, harvesting will continue at the last known position
  310. # after scan_frequency.
  311. #close_removed: true
  312.  
  313. # Closes the file handler as soon as the harvesters reaches the end of the file.
  314. # By default this option is disabled.
  315. # Note: Potential data loss. Make sure to read and understand the docs for this option.
  316. #close_eof: false
  317.  
  318. ### State options
  319.  
  320. # Files for the modification data is older then clean_inactive the state from the registry is removed
  321. # By default this is disabled.
  322. #clean_inactive: 0
  323.  
  324. # Removes the state for file which cannot be found on disk anymore immediately
  325. #clean_removed: true
  326.  
  327. # Close timeout closes the harvester after the predefined time.
  328. # This is independent if the harvester did finish reading the file or not.
  329. # By default this option is disabled.
  330. # Note: Potential data loss. Make sure to read and understand the docs for this option.
  331. #close_timeout: 0
  332.  
  333. # Defines if prospectors is enabled
  334. #enabled: true
  335.  
  336. #----------------------------- Stdin prospector -------------------------------
  337. # Configuration to use stdin input
  338. #- input_type: stdin
  339.  
  340. #========================= Filebeat global options ============================
  341.  
  342. # Event count spool threshold - forces network flush if exceeded
  343. #filebeat.spool_size: 2048
  344.  
  345. # Enable async publisher pipeline in filebeat (Experimental!)
  346. #filebeat.publish_async: false
  347.  
  348. # Defines how often the spooler is flushed. After idle_timeout the spooler is
  349. # Flush even though spool_size is not reached.
  350. #filebeat.idle_timeout: 5s
  351.  
  352. # Name of the registry file. If a relative path is used, it is considered relative to the
  353. # data path.
  354. #filebeat.registry_file: ${path.data}/registry
  355.  
  356. #
  357. # These config files must have the full filebeat config part inside, but only
  358. # the prospector part is processed. All global options like spool_size are ignored.
  359. # The config_dir MUST point to a different directory then where the main filebeat config file is in.
  360. #filebeat.config_dir:
  361.  
  362. # How long filebeat waits on shutdown for the publisher to finish.
  363. # Default is 0, not waiting.
  364. #filebeat.shutdown_timeout: 0
  365.  
  366. #================================ General ======================================
  367.  
  368. # The name of the shipper that publishes the network data. It can be used to group
  369. # all the transactions sent by a single shipper in the web interface.
  370. # If this options is not defined, the hostname is used.
  371. #name:
  372.  
  373. # The tags of the shipper are included in their own field with each
  374. # transaction published. Tags make it easy to group servers by different
  375. # logical properties.
  376. #tags: ["service-X", "web-tier"]
  377.  
  378. # Optional fields that you can specify to add additional information to the
  379. # output. Fields can be scalar values, arrays, dictionaries, or any nested
  380. # combination of these.
  381. #fields:
  382. # env: staging
  383.  
  384. # If this option is set to true, the custom fields are stored as top-level
  385. # fields in the output document instead of being grouped under a fields
  386. # sub-dictionary. Default is false.
  387. #fields_under_root: false
  388.  
  389. # Internal queue size for single events in processing pipeline
  390. #queue_size: 1000
  391.  
  392. # The internal queue size for bulk events in the processing pipeline.
  393. # Do not modify this value.
  394. #bulk_queue_size: 0
  395.  
  396. # Sets the maximum number of CPUs that can be executing simultaneously. The
  397. # default is the number of logical CPUs available in the system.
  398. #max_procs:
  399.  
  400. #================================ Processors ===================================
  401.  
  402. # Processors are used to reduce the number of fields in the exported event or to
  403. # enhance the event with external metadata. This section defines a list of
  404. # processors that are applied one by one and the first one receives the initial
  405. # event:
  406. #
  407. # event -> filter1 -> event1 -> filter2 ->event2 ...
  408. #
  409. # The supported processors are drop_fields, drop_event, include_fields, and
  410. # add_cloud_metadata.
  411. #
  412. # For example, you can use the following processors to keep the fields that
  413. # contain CPU load percentages, but remove the fields that contain CPU ticks
  414. # values:
  415. #
  416. #processors:
  417. #- include_fields:
  418. # fields: ["cpu"]
  419. #- drop_fields:
  420. # fields: ["cpu.user", "cpu.system"]
  421. #
  422. # The following example drops the events that have the HTTP response code 200:
  423. #
  424. #processors:
  425. #- drop_event:
  426. # when:
  427. # equals:
  428. # http.code: 200
  429. #
  430. # The following example enriches each event with metadata from the cloud
  431. # provider about the host machine. It works on EC2, GCE, and DigitalOcean.
  432. #
  433. #processors:
  434. #- add_cloud_metadata:
  435. #
  436.  
  437. #================================ Outputs ======================================
  438.  
  439. # Configure what outputs to use when sending the data collected by the beat.
  440. # Multiple outputs may be used.
  441.  
  442. #-------------------------- Elasticsearch output -------------------------------
  443. output.elasticsearch:
  444. # Boolean flag to enable or disable the output module.
  445. #enabled: true
  446.  
  447. # Array of hosts to connect to.
  448. # Scheme and port can be left out and will be set to the default (http and 9200)
  449. # In case you specify and additional path, the scheme is required: http://localhost:9200/path
  450. # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
  451. #hosts: ["localhost:9200"]
  452.  
  453. # Set gzip compression level.
  454. #compression_level: 0
  455.  
  456. # Optional protocol and basic auth credentials.
  457. #protocol: "https"
  458. #username: "elastic"
  459. #password: "changeme"
  460.  
  461. # Dictionary of HTTP parameters to pass within the url with index operations.
  462. #parameters:
  463. #param1: value1
  464. #param2: value2
  465.  
  466. # Number of workers per Elasticsearch host.
  467. #worker: 1
  468.  
  469. # Optional index name. The default is "filebeat" plus date
  470. # and generates [filebeat-]YYYY.MM.DD keys.
  471. #index: "filebeat-%{+yyyy.MM.dd}"
  472.  
  473. # Optional ingest node pipeline. By default no pipeline will be used.
  474. #pipeline: ""
  475.  
  476. # Optional HTTP Path
  477. #path: "/elasticsearch"
  478.  
  479. # Custom HTTP headers to add to each request
  480. #headers:
  481. # X-My-Header: Contents of the header
  482.  
  483. # Proxy server url
  484. #proxy_url: http://proxy:3128
  485.  
  486. # The number of times a particular Elasticsearch index operation is attempted. If
  487. # the indexing operation doesn't succeed after this many retries, the events are
  488. # dropped. The default is 3.
  489. #max_retries: 3
  490.  
  491. # The maximum number of events to bulk in a single Elasticsearch bulk API index request.
  492. # The default is 50.
  493. #bulk_max_size: 50
  494.  
  495. # Configure http request timeout before failing an request to Elasticsearch.
  496. #timeout: 90
  497.  
  498. # The number of seconds to wait for new events between two bulk API index requests.
  499. # If `bulk_max_size` is reached before this interval expires, addition bulk index
  500. # requests are made.
  501. #flush_interval: 1s
  502.  
  503. # A template is used to set the mapping in Elasticsearch
  504. # By default template loading is enabled and the template is loaded.
  505. # These settings can be adjusted to load your own template or overwrite existing ones.
  506.  
  507. # Set to false to disable template loading.
  508. #template.enabled: true
  509.  
  510. # Template name. By default the template name is filebeat.
  511. #template.name: "filebeat"
  512.  
  513. # Path to template file
  514. #template.path: "${path.config}/filebeat.template.json"
  515.  
  516. # Overwrite existing template
  517. #template.overwrite: false
  518.  
  519. # If set to true, filebeat checks the Elasticsearch version at connect time, and if it
  520. # is 2.x, it loads the file specified by the template.versions.2x.path setting. The
  521. # default is true.
  522. #template.versions.2x.enabled: true
  523.  
  524. # Path to the Elasticsearch 2.x version of the template file.
  525. #template.versions.2x.path: "${path.config}/filebeat.template-es2x.json"
  526.  
  527. # If set to true, filebeat checks the Elasticsearch version at connect time, and if it
  528. # is 6.x, it loads the file specified by the template.versions.6x.path setting. The
  529. # default is true.
  530. #template.versions.6x.enabled: true
  531.  
  532. # Path to the Elasticsearch 6.x version of the template file.
  533. #template.versions.6x.path: "${path.config}/filebeat.template-es6x.json"
  534.  
  535. # Use SSL settings for HTTPS. Default is true.
  536. #ssl.enabled: true
  537.  
  538. # Configure SSL verification mode. If `none` is configured, all server hosts
  539. # and certificates will be accepted. In this mode, SSL based connections are
  540. # susceptible to man-in-the-middle attacks. Use only for testing. Default is
  541. # `full`.
  542. #ssl.verification_mode: full
  543.  
  544. # List of supported/valid TLS versions. By default all TLS versions 1.0 up to
  545. # 1.2 are enabled.
  546. #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
  547.  
  548. # SSL configuration. By default is off.
  549. # List of root certificates for HTTPS server verifications
  550. #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
  551.  
  552. # Certificate for SSL client authentication
  553. #ssl.certificate: "/etc/pki/client/cert.pem"
  554.  
  555. # Client Certificate Key
  556. #ssl.key: "/etc/pki/client/cert.key"
  557.  
  558. # Optional passphrase for decrypting the Certificate Key.
  559. #ssl.key_passphrase: ''
  560.  
  561. # Configure cipher suites to be used for SSL connections
  562. #ssl.cipher_suites: []
  563.  
  564. # Configure curve types for ECDHE based cipher suites
  565. #ssl.curve_types: []
  566.  
  567.  
  568. #----------------------------- Logstash output ---------------------------------
  569. #output.logstash:
  570. # Boolean flag to enable or disable the output module.
  571. #enabled: true
  572.  
  573. # The Logstash hosts
  574. hosts: ["localhost:5044"]
  575.  
  576. # Number of workers per Logstash host.
  577. #worker: 1
  578.  
  579. # Set gzip compression level.
  580. #compression_level: 3
  581.  
  582. # Optional load balance the events between the Logstash hosts
  583. #loadbalance: true
  584.  
  585. # Number of batches to be send asynchronously to logstash while processing
  586. # new batches.
  587. #pipelining: 0
  588.  
  589. # Optional index name. The default index name is set to name of the beat
  590. # in all lowercase.
  591. #index: 'filebeat'
  592.  
  593. # SOCKS5 proxy server URL
  594. #proxy_url: socks5://user:password@socks5-server:2233
  595.  
  596. # Resolve names locally when using a proxy server. Defaults to false.
  597. #proxy_use_local_resolver: false
  598.  
  599. # Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
  600. #ssl.enabled: true
  601.  
  602. # Configure SSL verification mode. If `none` is configured, all server hosts
  603. # and certificates will be accepted. In this mode, SSL based connections are
  604. # susceptible to man-in-the-middle attacks. Use only for testing. Default is
  605. # `full`.
  606. #ssl.verification_mode: full
  607.  
  608. # List of supported/valid TLS versions. By default all TLS versions 1.0 up to
  609. # 1.2 are enabled.
  610. #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
  611.  
  612. # Optional SSL configuration options. SSL is off by default.
  613. # List of root certificates for HTTPS server verifications
  614. #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
  615.  
  616. # Certificate for SSL client authentication
  617. #ssl.certificate: "/etc/pki/client/cert.pem"
  618.  
  619. # Client Certificate Key
  620. #ssl.key: "/etc/pki/client/cert.key"
  621.  
  622. # Optional passphrase for decrypting the Certificate Key.
  623. #ssl.key_passphrase: ''
  624.  
  625. # Configure cipher suites to be used for SSL connections
  626. #ssl.cipher_suites: []
  627.  
  628. # Configure curve types for ECDHE based cipher suites
  629. #ssl.curve_types: []
  630.  
  631. #------------------------------- Kafka output ----------------------------------
  632. #output.kafka:
  633. # Boolean flag to enable or disable the output module.
  634. #enabled: true
  635.  
  636. # The list of Kafka broker addresses from where to fetch the cluster metadata.
  637. # The cluster metadata contain the actual Kafka brokers events are published
  638. # to.
  639. #hosts: ["localhost:9092"]
  640.  
  641. # The Kafka topic used for produced events. The setting can be a format string
  642. # using any event field. To set the topic from document type use `%{[type]}`.
  643. #topic: beats
  644.  
  645. # The Kafka event key setting. Use format string to create unique event key.
  646. # By default no event key will be generated.
  647. #key: ''
  648.  
  649. # The Kafka event partitioning strategy. Default hashing strategy is `hash`
  650. # using the `output.kafka.key` setting or randomly distributes events if
  651. # `output.kafka.key` is not configured.
  652. #partition.hash:
  653. # If enabled, events will only be published to partitions with reachable
  654. # leaders. Default is false.
  655. #reachable_only: false
  656.  
  657. # Configure alternative event field names used to compute the hash value.
  658. # If empty `output.kafka.key` setting will be used.
  659. # Default value is empty list.
  660. #hash: []
  661.  
  662. # Authentication details. Password is required if username is set.
  663. #username: ''
  664. #password: ''
  665.  
  666. # Kafka version filebeat is assumed to run against. Defaults to the oldest
  667. # supported stable version (currently version 0.8.2.0)
  668. #version: 0.8.2
  669.  
  670. # Metadata update configuration. Metadata do contain leader information
  671. # deciding which broker to use when publishing.
  672. #metadata:
  673. # Max metadata request retry attempts when cluster is in middle of leader
  674. # election. Defaults to 3 retries.
  675. #retry.max: 3
  676.  
  677. # Waiting time between retries during leader elections. Default is 250ms.
  678. #retry.backoff: 250ms
  679.  
  680. # Refresh metadata interval. Defaults to every 10 minutes.
  681. #refresh_frequency: 10m
  682.  
  683. # The number of concurrent load-balanced Kafka output workers.
  684. #worker: 1
  685.  
  686. # The number of times to retry publishing an event after a publishing failure.
  687. # After the specified number of retries, the events are typically dropped.
  688. # Some Beats, such as Filebeat, ignore the max_retries setting and retry until
  689. # all events are published. Set max_retries to a value less than 0 to retry
  690. # until all events are published. The default is 3.
  691. #max_retries: 3
  692.  
  693. # The maximum number of events to bulk in a single Kafka request. The default
  694. # is 2048.
  695. #bulk_max_size: 2048
  696.  
  697. # The number of seconds to wait for responses from the Kafka brokers before
  698. # timing out. The default is 30s.
  699. #timeout: 30s
  700.  
  701. # The maximum duration a broker will wait for number of required ACKs. The
  702. # default is 10s.
  703. #broker_timeout: 10s
  704.  
  705. # The number of messages buffered for each Kafka broker. The default is 256.
  706. #channel_buffer_size: 256
  707.  
  708. # The keep-alive period for an active network connection. If 0s, keep-alives
  709. # are disabled. The default is 0 seconds.
  710. #keep_alive: 0
  711.  
  712. # Sets the output compression codec. Must be one of none, snappy and gzip. The
  713. # default is gzip.
  714. #compression: gzip
  715.  
  716. # The maximum permitted size of JSON-encoded messages. Bigger messages will be
  717. # dropped. The default value is 1000000 (bytes). This value should be equal to
  718. # or less than the broker's message.max.bytes.
  719. #max_message_bytes: 1000000
  720.  
  721. # The ACK reliability level required from broker. 0=no response, 1=wait for
  722. # local commit, -1=wait for all replicas to commit. The default is 1. Note:
  723. # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently
  724. # on error.
  725. #required_acks: 1
  726.  
  727. # The number of seconds to wait for new events between two producer API calls.
  728. #flush_interval: 1s
  729.  
  730. # The configurable ClientID used for logging, debugging, and auditing
  731. # purposes. The default is "beats".
  732. #client_id: beats
  733.  
  734. # Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
  735. #ssl.enabled: true
  736.  
  737. # Optional SSL configuration options. SSL is off by default.
  738. # List of root certificates for HTTPS server verifications
  739. #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
  740.  
  741. # Configure SSL verification mode. If `none` is configured, all server hosts
  742. # and certificates will be accepted. In this mode, SSL based connections are
  743. # susceptible to man-in-the-middle attacks. Use only for testing. Default is
  744. # `full`.
  745. #ssl.verification_mode: full
  746.  
  747. # List of supported/valid TLS versions. By default all TLS versions 1.0 up to
  748. # 1.2 are enabled.
  749. #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
  750.  
  751. # Certificate for SSL client authentication
  752. #ssl.certificate: "/etc/pki/client/cert.pem"
  753.  
  754. # Client Certificate Key
  755. #ssl.key: "/etc/pki/client/cert.key"
  756.  
  757. # Optional passphrase for decrypting the Certificate Key.
  758. #ssl.key_passphrase: ''
  759.  
  760. # Configure cipher suites to be used for SSL connections
  761. #ssl.cipher_suites: []
  762.  
  763. # Configure curve types for ECDHE based cipher suites
  764. #ssl.curve_types: []
  765.  
  766. #------------------------------- Redis output ----------------------------------
  767. #output.redis:
  768. # Boolean flag to enable or disable the output module.
  769. #enabled: true
  770.  
  771. # The list of Redis servers to connect to. If load balancing is enabled, the
  772. # events are distributed to the servers in the list. If one server becomes
  773. # unreachable, the events are distributed to the reachable servers only.
  774. #hosts: ["localhost:6379"]
  775.  
  776. # The Redis port to use if hosts does not contain a port number. The default
  777. # is 6379.
  778. #port: 6379
  779.  
  780. # The name of the Redis list or channel the events are published to. The
  781. # default is filebeat.
  782. #key: filebeat
  783.  
  784. # The password to authenticate with. The default is no authentication.
  785. #password:
  786.  
  787. # The Redis database number where the events are published. The default is 0.
  788. #db: 0
  789.  
  790. # The Redis data type to use for publishing events. If the data type is list,
  791. # the Redis RPUSH command is used. If the data type is channel, the Redis
  792. # PUBLISH command is used. The default value is list.
  793. #datatype: list
  794.  
  795. # The number of workers to use for each host configured to publish events to
  796. # Redis. Use this setting along with the loadbalance option. For example, if
  797. # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each
  798. # host).
  799. #worker: 1
  800.  
  801. # If set to true and multiple hosts or workers are configured, the output
  802. # plugin load balances published events onto all Redis hosts. If set to false,
  803. # the output plugin sends all events to only one host (determined at random)
  804. # and will switch to another host if the currently selected one becomes
  805. # unreachable. The default value is true.
  806. #loadbalance: true
  807.  
  808. # The Redis connection timeout in seconds. The default is 5 seconds.
  809. #timeout: 5s
  810.  
  811. # The number of times to retry publishing an event after a publishing failure.
  812. # After the specified number of retries, the events are typically dropped.
  813. # Some Beats, such as Filebeat, ignore the max_retries setting and retry until
  814. # all events are published. Set max_retries to a value less than 0 to retry
  815. # until all events are published. The default is 3.
  816. #max_retries: 3
  817.  
  818. # The maximum number of events to bulk in a single Redis request or pipeline.
  819. # The default is 2048.
  820. #bulk_max_size: 2048
  821.  
  822. # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The
  823. # value must be a URL with a scheme of socks5://.
  824. #proxy_url:
  825.  
  826. # This option determines whether Redis hostnames are resolved locally when
  827. # using a proxy. The default value is false, which means that name resolution
  828. # occurs on the proxy server.
  829. #proxy_use_local_resolver: false
  830.  
  831. # Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
  832. #ssl.enabled: true
  833.  
  834. # Configure SSL verification mode. If `none` is configured, all server hosts
  835. # and certificates will be accepted. In this mode, SSL based connections are
  836. # susceptible to man-in-the-middle attacks. Use only for testing. Default is
  837. # `full`.
  838. #ssl.verification_mode: full
  839.  
  840. # List of supported/valid TLS versions. By default all TLS versions 1.0 up to
  841. # 1.2 are enabled.
  842. #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
  843.  
  844. # Optional SSL configuration options. SSL is off by default.
  845. # List of root certificates for HTTPS server verifications
  846. #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
  847.  
  848. # Certificate for SSL client authentication
  849. #ssl.certificate: "/etc/pki/client/cert.pem"
  850.  
  851. # Client Certificate Key
  852. #ssl.key: "/etc/pki/client/cert.key"
  853.  
  854. # Optional passphrase for decrypting the Certificate Key.
  855. #ssl.key_passphrase: ''
  856.  
  857. # Configure cipher suites to be used for SSL connections
  858. #ssl.cipher_suites: []
  859.  
  860. # Configure curve types for ECDHE based cipher suites
  861. #ssl.curve_types: []
  862.  
  863.  
  864. #------------------------------- File output -----------------------------------
  865. #output.file:
  866. # Boolean flag to enable or disable the output module.
  867. #enabled: true
  868.  
  869. # Path to the directory where to save the generated files. The option is
  870. # mandatory.
  871. #path: "/tmp/filebeat"
  872.  
  873. # Name of the generated files. The default is `filebeat` and it generates
  874. # files: `filebeat`, `filebeat.1`, `filebeat.2`, etc.
  875. #filename: filebeat
  876.  
  877. # Maximum size in kilobytes of each file. When this size is reached, and on
  878. # every filebeat restart, the files are rotated. The default value is 10240
  879. # kB.
  880. #rotate_every_kb: 10000
  881.  
  882. # Maximum number of files under path. When this number of files is reached,
  883. # the oldest file is deleted and the rest are shifted from last to first. The
  884. # default is 7 files.
  885. #number_of_files: 7
  886.  
  887.  
  888. #----------------------------- Console output ---------------------------------
  889. #output.console:
  890. # Boolean flag to enable or disable the output module.
  891. #enabled: true
  892.  
  893. # Pretty print json event
  894. #pretty: false
  895.  
  896. #================================= Paths ======================================
  897.  
  898. # The home path for the filebeat installation. This is the default base path
  899. # for all other path settings and for miscellaneous files that come with the
  900. # distribution (for example, the sample dashboards).
  901. # If not set by a CLI flag or in the configuration file, the default for the
  902. # home path is the location of the binary.
  903. #path.home:
  904.  
  905. # The configuration path for the filebeat installation. This is the default
  906. # base path for configuration files, including the main YAML configuration file
  907. # and the Elasticsearch template file. If not set by a CLI flag or in the
  908. # configuration file, the default for the configuration path is the home path.
  909. #path.config: ${path.home}
  910.  
  911. # The data path for the filebeat installation. This is the default base path
  912. # for all the files in which filebeat needs to store its data. If not set by a
  913. # CLI flag or in the configuration file, the default for the data path is a data
  914. # subdirectory inside the home path.
  915. #path.data: ${path.home}/data
  916.  
  917. # The logs path for a filebeat installation. This is the default location for
  918. # the Beat's log files. If not set by a CLI flag or in the configuration file,
  919. # the default for the logs path is a logs subdirectory inside the home path.
  920. #path.logs: ${path.home}/logs
  921.  
  922. #============================== Dashboards =====================================
  923. # These settings control loading the sample dashboards to the Kibana index. Loading
  924. # the dashboards is disabled by default and can be enabled either by setting the
  925. # options here, or by using the `-setup` CLI flag.
  926. #dashboards.enabled: false
  927.  
  928. # The URL from where to download the dashboards archive. By default this URL
  929. # has a value which is computed based on the Beat name and version. For released
  930. # versions, this URL points to the dashboard archive on the artifacts.elastic.co
  931. # website.
  932. #dashboards.url:
  933.  
  934. # The directory from where to read the dashboards. It is used instead of the URL
  935. # when it has a value.
  936. #dashboards.directory:
  937.  
  938. # The file archive (zip file) from where to read the dashboards. It is used instead
  939. # of the URL when it has a value.
  940. #dashboards.file:
  941.  
  942. # If this option is enabled, the snapshot URL is used instead of the default URL.
  943. #dashboards.snapshot: false
  944.  
  945. # The URL from where to download the snapshot version of the dashboards. By default
  946. # this has a value which is computed based on the Beat name and version.
  947. #dashboards.snapshot_url
  948.  
  949. # In case the archive contains the dashboards from multiple Beats, this lets you
  950. # select which one to load. You can load all the dashboards in the archive by
  951. # setting this to the empty string.
  952. #dashboards.beat: filebeat
  953.  
  954. # The name of the Kibana index to use for setting the configuration. Default is ".kibana"
  955. #dashboards.kibana_index: .kibana
  956.  
  957. # The Elasticsearch index name. This overwrites the index name defined in the
  958. # dashboards and index pattern. Example: testbeat-*
  959. #dashboards.index:
  960.  
  961. #================================ Logging ======================================
  962. # There are three options for the log output: syslog, file, stderr.
  963. # Under Windows systems, the log files are per default sent to the file output,
  964. # under all other system per default to syslog.
  965.  
  966. # Sets log level. The default log level is info.
  967. # Available log levels are: critical, error, warning, info, debug
  968. logging.level: warning
  969.  
  970. # Enable debug output for selected components. To enable all selectors use ["*"]
  971. # Other available selectors are "beat", "publish", "service"
  972. # Multiple selectors can be chained.
  973. #logging.selectors: [ ]
  974.  
  975. # Send all logging output to syslog. The default is false.
  976. #logging.to_syslog: true
  977.  
  978. # If enabled, filebeat periodically logs its internal metrics that have changed
  979. # in the last period. For each metric that changed, the delta from the value at
  980. # the beginning of the period is logged. Also, the total values for
  981. # all non-zero internal metrics are logged on shutdown. The default is true.
  982. #logging.metrics.enabled: true
  983.  
  984. # The period after which to log the internal metrics. The default is 30s.
  985. #logging.metrics.period: 30s
  986.  
  987. # Logging to rotating files files. Set logging.to_files to false to disable logging to
  988. # files.
  989. logging.to_files: true
  990. logging.files:
  991. # Configure the path where the logs are written. The default is the logs directory
  992. # under the home path (the binary location).
  993. path: C:\Program Files (x86)\filebeat
  994.  
  995. # The name of the files where the logs are written to.
  996. #name: filebeat
  997.  
  998. # Configure log file size limit. If limit is reached, log file will be
  999. # automatically rotated
  1000. #rotateeverybytes: 10485760 # = 10MB
  1001.  
  1002. # Number of rotated log files to keep. Oldest files will be deleted first.
  1003. #keepfiles: 7
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement