Advertisement
Guest User

filebeat-config

a guest
May 11th, 2016
585
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 17.25 KB | None | 0 0
  1. ################### Filebeat Configuration Example #########################
  2.  
  3. ############################# Filebeat ######################################
  4. filebeat:
  5. # List of prospectors to fetch data.
  6. prospectors:
  7. # Each - is a prospector. Below are the prospector specific configurations
  8. -
  9. # Paths that should be crawled and fetched. Glob based paths.
  10. # To fetch all ".log" files from a specific level of subdirectories
  11. # /var/log/*/*.log can be used.
  12. # For each file found under this path, a harvester is started.
  13. # Make sure not file is defined twice as this can lead to unexpected behaviour.
  14. paths:
  15. #- /var/log/*.log
  16. -
  17. input_type: log
  18. document_type: mail
  19.  
  20. # Configure the file encoding for reading files with international characters
  21. # following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding).
  22. # Some sample encodings:
  23. # plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk,
  24. # hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ...
  25. #encoding: plain
  26.  
  27. # Type of the files. Based on this the way the file is read is decided.
  28. # The different types cannot be mixed in one prospector
  29. #
  30. # Possible options are:
  31. # * log: Reads every line of the log file (default)
  32. # * stdin: Reads the standard in
  33.  
  34. # Exclude lines. A list of regular expressions to match. It drops the lines that are
  35. # matching any regular expression from the list. The include_lines is called before
  36. # exclude_lines. By default, no lines are dropped.
  37. # exclude_lines: ["^DBG"]
  38.  
  39. # Include lines. A list of regular expressions to match. It exports the lines that are
  40. # matching any regular expression from the list. The include_lines is called before
  41. # exclude_lines. By default, all the lines are exported.
  42. # include_lines: ["^ERR", "^WARN"]
  43.  
  44. # Exclude files. A list of regular expressions to match. Filebeat drops the files that
  45. # are matching any regular expression from the list. By default, no files are dropped.
  46. # exclude_files: [".gz$"]
  47.  
  48. # Optional additional fields. These field can be freely picked
  49. # to add additional information to the crawled log files for filtering
  50. #fields:
  51. #ip_host: "IP server cua cac anh"
  52. # review: 1
  53.  
  54. # Set to true to store the additional fields as top level fields instead
  55. # of under the "fields" sub-dictionary. In case of name conflicts with the
  56. # fields added by Filebeat itself, the custom fields overwrite the default
  57. # fields.
  58. #fields_under_root: true
  59.  
  60. # Ignore files which were modified more then the defined timespan in the past.
  61. # In case all files on your system must be read you can set this value very large.
  62. # Time strings like 2h (2 hours), 5m (5 minutes) can be used.
  63. #ignore_older: 0
  64.  
  65. # Close older closes the file handler for which were not modified
  66. # for longer then close_older
  67. # Time strings like 2h (2 hours), 5m (5 minutes) can be used.
  68. #close_older: 1h
  69.  
  70. # Type to be published in the 'type' field. For Elasticsearch output,
  71. # the type defines the document type these entries should be stored
  72. # in. Default: log
  73. #document_type: log
  74.  
  75. # Scan frequency in seconds.
  76. # How often these files should be checked for changes. In case it is set
  77. # to 0s, it is done as often as possible. Default: 10s
  78. #scan_frequency: 10s
  79.  
  80. # Defines the buffer size every harvester uses when fetching the file
  81. #harvester_buffer_size: 16384
  82.  
  83. # Maximum number of bytes a single log event can have
  84. # All bytes after max_bytes are discarded and not sent. The default is 10MB.
  85. # This is especially useful for multiline log messages which can get large.
  86. #max_bytes: 10485760
  87.  
  88. # Mutiline can be used for log messages spanning multiple lines. This is common
  89. # for Java Stack Traces or C-Line Continuation
  90. #multiline:
  91.  
  92. # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [
  93. #pattern: ^\[
  94.  
  95. # Defines if the pattern set under pattern should be negated or not. Default is false.
  96. #negate: false
  97.  
  98. # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern
  99. # that was (not) matched before or after or as long as a pattern is not matched based on negate.
  100. # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash
  101. #match: after
  102.  
  103. # The maximum number of lines that are combined to one event.
  104. # In case there are more the max_lines the additional lines are discarded.
  105. # Default is 500
  106. #max_lines: 500
  107.  
  108. # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event
  109. # Default is 5s.
  110. #timeout: 5s
  111.  
  112. # Setting tail_files to true means filebeat starts readding new files at the end
  113. # instead of the beginning. If this is used in combination with log rotation
  114. # this can mean that the first entries of a new file are skipped.
  115. #tail_files: false
  116.  
  117. # Backoff values define how agressively filebeat crawls new files for updates
  118. # The default values can be used in most cases. Backoff defines how long it is waited
  119. # to check a file again after EOF is reached. Default is 1s which means the file
  120. # is checked every second if new lines were added. This leads to a near real time crawling.
  121. # Every time a new line appears, backoff is reset to the initial value.
  122. #backoff: 1s
  123.  
  124. # Max backoff defines what the maximum backoff time is. After having backed off multiple times
  125. # from checking the files, the waiting time will never exceed max_backoff idenependent of the
  126. # backoff factor. Having it set to 10s means in the worst case a new line can be added to a log
  127. # file after having backed off multiple times, it takes a maximum of 10s to read the new line
  128. #max_backoff: 10s
  129.  
  130. # The backoff factor defines how fast the algorithm backs off. The bigger the backoff factor,
  131. # the faster the max_backoff value is reached. If this value is set to 1, no backoff will happen.
  132. # The backoff value will be multiplied each time with the backoff_factor until max_backoff is reached
  133. #backoff_factor: 2
  134.  
  135. # This option closes a file, as soon as the file name changes.
  136. # This config option is recommended on windows only. Filebeat keeps the files it's reading open. This can cause
  137. # issues when the file is removed, as the file will not be fully removed until also Filebeat closes
  138. # the reading. Filebeat closes the file handler after ignore_older. During this time no new file with the
  139. # same name can be created. Turning this feature on the other hand can lead to loss of data
  140. # on rotate files. It can happen that after file rotation the beginning of the new
  141. # file is skipped, as the reading starts at the end. We recommend to leave this option on false
  142. # but lower the ignore_older value to release files faster.
  143. #force_close_files: false
  144.  
  145. # Additional prospector
  146. -
  147. paths:
  148. #- /var/log/*.log
  149. input_type: log
  150. document_type: iis-access
  151. # Configuration to use stdin input
  152. #input_type: stdin
  153.  
  154. # General filebeat configuration options
  155. #
  156. # Event count spool threshold - forces network flush if exceeded
  157. #spool_size: 2048
  158.  
  159. # Enable async publisher pipeline in filebeat (Experimental!)
  160. #publish_async: false
  161.  
  162. # Defines how often the spooler is flushed. After idle_timeout the spooler is
  163. # Flush even though spool_size is not reached.
  164. #idle_timeout: 5s
  165.  
  166. # Name of the registry file. Per default it is put in the current working
  167. # directory. In case the working directory is changed after when running
  168. # filebeat again, indexing starts from the beginning again.
  169. registry_file: "C:/ProgramData/filebeat/registry"
  170.  
  171. # Full Path to directory with additional prospector configuration files. Each file must end with .yml
  172. # These config files must have the full filebeat config part inside, but only
  173. # the prospector part is processed. All global options like spool_size are ignored.
  174. # The config_dir MUST point to a different directory then where the main filebeat config file is in.
  175. #config_dir:
  176.  
  177. ###############################################################################
  178. ############################# Libbeat Config ##################################
  179. # Base config file used by all other beats for using libbeat features
  180.  
  181. ############################# Output ##########################################
  182.  
  183. # Configure what outputs to use when sending the data collected by the beat.
  184. # Multiple outputs may be used.
  185. output:
  186.  
  187. ### Elasticsearch as output
  188. #elasticsearch:
  189. # Array of hosts to connect to.
  190. # Scheme and port can be left out and will be set to the default (http and 9200)
  191. # In case you specify and additional path, the scheme is required: http://localhost:9200/path
  192. # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
  193. #hosts: ["localhost:9200"]
  194.  
  195. # Optional protocol and basic auth credentials.
  196. #protocol: "https"
  197. #username: "admin"
  198. #password: "s3cr3t"
  199.  
  200. # Number of workers per Elasticsearch host.
  201. #worker: 1
  202.  
  203. # Optional index name. The default is "filebeat" and generates
  204. # [filebeat-]YYYY.MM.DD keys.
  205. #index: "filebeat"
  206.  
  207. # A template is used to set the mapping in Elasticsearch
  208. # By default template loading is disabled and no template is loaded.
  209. # These settings can be adjusted to load your own template or overwrite existing ones
  210. #template:
  211.  
  212. # Template name. By default the template name is filebeat.
  213. #name: "filebeat"
  214.  
  215. # Path to template file
  216. #path: "filebeat.template.json"
  217.  
  218. # Overwrite existing template
  219. #overwrite: false
  220.  
  221. # Optional HTTP Path
  222. #path: "/elasticsearch"
  223.  
  224. # Proxy server url
  225. #proxy_url: http://proxy:3128
  226.  
  227. # The number of times a particular Elasticsearch index operation is attempted. If
  228. # the indexing operation doesn't succeed after this many retries, the events are
  229. # dropped. The default is 3.
  230. #max_retries: 3
  231.  
  232. # The maximum number of events to bulk in a single Elasticsearch bulk API index request.
  233. # The default is 50.
  234. #bulk_max_size: 50
  235.  
  236. # Configure http request timeout before failing an request to Elasticsearch.
  237. #timeout: 90
  238.  
  239. # The number of seconds to wait for new events between two bulk API index requests.
  240. # If `bulk_max_size` is reached before this interval expires, addition bulk index
  241. # requests are made.
  242. #flush_interval: 1
  243.  
  244. # Boolean that sets if the topology is kept in Elasticsearch. The default is
  245. # false. This option makes sense only for Packetbeat.
  246. #save_topology: false
  247.  
  248. # The time to live in seconds for the topology information that is stored in
  249. # Elasticsearch. The default is 15 seconds.
  250. #topology_expire: 15
  251.  
  252. # tls configuration. By default is off.
  253. #tls:
  254. # List of root certificates for HTTPS server verifications
  255. #certificate_authorities: ["/etc/pki/root/ca.pem"]
  256.  
  257. # Certificate for TLS client authentication
  258. #certificate: "/etc/pki/client/cert.pem"
  259.  
  260. # Client Certificate Key
  261. #certificate_key: "/etc/pki/client/cert.key"
  262.  
  263. # Controls whether the client verifies server certificates and host name.
  264. # If insecure is set to true, all server host names and certificates will be
  265. # accepted. In this mode TLS based connections are susceptible to
  266. # man-in-the-middle attacks. Use only for testing.
  267. #insecure: true
  268.  
  269. # Configure cipher suites to be used for TLS connections
  270. #cipher_suites: []
  271.  
  272. # Configure curve types for ECDHE based cipher suites
  273. #curve_types: []
  274.  
  275. # Configure minimum TLS version allowed for connection to logstash
  276. #min_version: 1.0
  277.  
  278. # Configure maximum TLS version allowed for connection to logstash
  279. #max_version: 1.2
  280.  
  281.  
  282. ### Logstash as output
  283. logstash:
  284. # The Logstash hosts
  285. hosts: ["10.1.6.246:5044","10.1.6.247:5044"]
  286.  
  287. # Number of workers per Logstash host.
  288. worker: 3
  289.  
  290. # Set gzip compression level.
  291. compression_level: 3
  292.  
  293. # Optional load balance the events between the Logstash hosts
  294. loadbalance: true
  295.  
  296. # Optional index name. The default index name depends on the each beat.
  297. # For Packetbeat, the default is set to packetbeat, for Topbeat
  298. # top topbeat and for Filebeat to filebeat.
  299. index: fb-mail
  300.  
  301. # Optional TLS. By default is off.
  302. #tls:
  303. # List of root certificates for HTTPS server verifications
  304. #certificate_authorities: ["/etc/pki/root/ca.pem"]
  305.  
  306. # Certificate for TLS client authentication
  307. #certificate: "/etc/pki/client/cert.pem"
  308.  
  309. # Client Certificate Key
  310. #certificate_key: "/etc/pki/client/cert.key"
  311.  
  312. # Controls whether the client verifies server certificates and host name.
  313. # If insecure is set to true, all server host names and certificates will be
  314. # accepted. In this mode TLS based connections are susceptible to
  315. # man-in-the-middle attacks. Use only for testing.
  316. #insecure: true
  317.  
  318. # Configure cipher suites to be used for TLS connections
  319. #cipher_suites: []
  320.  
  321. # Configure curve types for ECDHE based cipher suites
  322. #curve_types: []
  323.  
  324.  
  325. ### File as output
  326. #file:
  327. # Path to the directory where to save the generated files. The option is mandatory.
  328. #path: "/tmp/filebeat"
  329.  
  330. # Name of the generated files. The default is `filebeat` and it generates files: `filebeat`, `filebeat.1`, `filebeat.2`, etc.
  331. #filename: filebeat
  332.  
  333. # Maximum size in kilobytes of each file. When this size is reached, the files are
  334. # rotated. The default value is 10 MB.
  335. #rotate_every_kb: 10000
  336.  
  337. # Maximum number of files under path. When this number of files is reached, the
  338. # oldest file is deleted and the rest are shifted from last to first. The default
  339. # is 7 files.
  340. #number_of_files: 7
  341.  
  342.  
  343. ### Console output
  344. # console:
  345. # Pretty print json event
  346. #pretty: false
  347.  
  348.  
  349. ############################# Shipper #########################################
  350.  
  351. shipper:
  352. # The name of the shipper that publishes the network data. It can be used to group
  353. # all the transactions sent by a single shipper in the web interface.
  354. # If this options is not defined, the hostname is used.
  355. #name:
  356.  
  357. # The tags of the shipper are included in their own field with each
  358. # transaction published. Tags make it easy to group servers by different
  359. # logical properties.
  360. #tags: ["service-X", "web-tier"]
  361.  
  362. # Uncomment the following if you want to ignore transactions created
  363. # by the server on which the shipper is installed. This option is useful
  364. # to remove duplicates if shippers are installed on multiple servers.
  365. #ignore_outgoing: true
  366.  
  367. # How often (in seconds) shippers are publishing their IPs to the topology map.
  368. # The default is 10 seconds.
  369. #refresh_topology_freq: 10
  370.  
  371. # Expiration time (in seconds) of the IPs published by a shipper to the topology map.
  372. # All the IPs will be deleted afterwards. Note, that the value must be higher than
  373. # refresh_topology_freq. The default is 15 seconds.
  374. #topology_expire: 15
  375.  
  376. # Internal queue size for single events in processing pipeline
  377. #queue_size: 1000
  378.  
  379. # Configure local GeoIP database support.
  380. # If no paths are not configured geoip is disabled.
  381. #geoip:
  382. #paths:
  383. # - "/usr/share/GeoIP/GeoLiteCity.dat"
  384. # - "/usr/local/var/GeoIP/GeoLiteCity.dat"
  385.  
  386.  
  387. ############################# Logging #########################################
  388.  
  389. # There are three options for the log ouput: syslog, file, stderr.
  390. # Under Windos systems, the log files are per default sent to the file output,
  391. # under all other system per default to syslog.
  392. logging:
  393.  
  394. # Send all logging output to syslog. On Windows default is false, otherwise
  395. # default is true.
  396. #to_syslog: true
  397.  
  398. # Write all logging output to files. Beats automatically rotate files if rotateeverybytes
  399. # limit is reached.
  400. #to_files: false
  401.  
  402. # To enable logging to files, to_files option has to be set to true
  403. files:
  404. # The directory where the log files will written to.
  405. #path: /var/log/mybeat
  406.  
  407. # The name of the files where the logs are written to.
  408. #name: mybeat
  409.  
  410. # Configure log file size limit. If limit is reached, log file will be
  411. # automatically rotated
  412. rotateeverybytes: 10485760 # = 10MB
  413.  
  414. # Number of rotated log files to keep. Oldest files will be deleted first.
  415. #keepfiles: 7
  416.  
  417. # Enable debug output for selected components. To enable all selectors use ["*"]
  418. # Other available selectors are beat, publish, service
  419. # Multiple selectors can be chained.
  420. #selectors: [ ]
  421.  
  422. # Sets log level. The default log level is error.
  423. # Available log levels are: critical, error, warning, info, debug
  424. #level: error
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement