akhfa

TA: Konfigurasi Flebeat

Mar 20th, 2016
83
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
  1. ################### Filebeat Configuration Example #########################
  2.  
  3. ############################# Filebeat ######################################
  4. filebeat:
  5. # List of prospectors to fetch data.
  6. prospectors:
  7. # Each - is a prospector. Below are the prospector specific configurations
  8. -
  9. # Paths that should be crawled and fetched. Glob based paths.
  10. # To fetch all ".log" files from a specific level of subdirectories
  11. # /var/log/*/*.log can be used.
  12. # For each file found under this path, a harvester is started.
  13. # Make sure not file is defined twice as this can lead to unexpected behaviour.
  14. paths:
  15. - /var/log/secure
  16. - /var/log/messages
  17.  
  18. # Configure the file encoding for reading files with international characters
  19. # following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding).
  20. # Some sample encodings:
  21. # plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk,
  22. # hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ...
  23. #encoding: plain
  24.  
  25. # Type of the files. Based on this the way the file is read is decided.
  26. # The different types cannot be mixed in one prospector
  27. #
  28. # Possible options are:
  29. # * log: Reads every line of the log file (default)
  30. # * stdin: Reads the standard in
  31. input_type: log
  32.  
  33. # Exclude lines. A list of regular expressions to match. It drops the lines that are
  34. # matching any regular expression from the list. The include_lines is called before
  35. # exclude_lines. By default, no lines are dropped.
  36. # exclude_lines: ["^DBG"]
  37.  
  38. # Include lines. A list of regular expressions to match. It exports the lines that are
  39. # matching any regular expression from the list. The include_lines is called before
  40. # exclude_lines. By default, all the lines are exported.
  41. # include_lines: ["^ERR", "^WARN"]
  42.  
  43. # Exclude files. A list of regular expressions to match. Filebeat drops the files that
  44. # are matching any regular expression from the list. By default, no files are dropped.
  45. # exclude_files: [".gz$"]
  46.  
  47. # Optional additional fields. These field can be freely picked
  48. # to add additional information to the crawled log files for filtering
  49. #fields:
  50. # level: debug
  51. # review: 1
  52.  
  53. # Set to true to store the additional fields as top level fields instead
  54. # of under the "fields" sub-dictionary. In case of name conflicts with the
  55. # fields added by Filebeat itself, the custom fields overwrite the default
  56. # fields.
  57. #fields_under_root: false
  58.  
  59. # Ignore files which were modified more then the defined timespan in the past
  60. # Time strings like 2h (2 hours), 5m (5 minutes) can be used.
  61. #ignore_older: 24h
  62.  
  63. # Type to be published in the 'type' field. For Elasticsearch output,
  64. # the type defines the document type these entries should be stored
  65. # in. Default: log
  66. document_type: syslog
  67.  
  68. # Scan frequency in seconds.
  69. # How often these files should be checked for changes. In case it is set
  70. # to 0s, it is done as often as possible. Default: 10s
  71. #scan_frequency: 10s
  72.  
  73. # Defines the buffer size every harvester uses when fetching the file
  74. #harvester_buffer_size: 16384
  75.  
  76. # Maximum number of bytes a single log event can have
  77. # All bytes after max_bytes are discarded and not sent. The default is 10MB.
  78. # This is especially useful for multiline log messages which can get large.
  79. #max_bytes: 10485760
  80.  
  81. # Mutiline can be used for log messages spanning multiple lines. This is common
  82. # for Java Stack Traces or C-Line Continuation
  83. #multiline:
  84.  
  85. # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [
  86. #pattern: ^\[
  87.  
  88. # Defines if the pattern set under pattern should be negated or not. Default is false.
  89. #negate: false
  90.  
  91. # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern
  92. # that was (not) matched before or after or as long as a pattern is not matched based on negate.
  93. # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash
  94. #match: after
  95.  
  96. # The maximum number of lines that are combined to one event.
  97. # In case there are more the max_lines the additional lines are discarded.
  98. # Default is 500
  99. #max_lines: 500
  100.  
  101. # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event
  102. # Default is 5s.
  103. #timeout: 5s
  104.  
  105. # Setting tail_files to true means filebeat starts readding new files at the end
  106. # instead of the beginning. If this is used in combination with log rotation
  107. # this can mean that the first entries of a new file are skipped.
  108. #tail_files: false
  109.  
  110. # Backoff values define how agressively filebeat crawls new files for updates
  111. # The default values can be used in most cases. Backoff defines how long it is waited
  112. # to check a file again after EOF is reached. Default is 1s which means the file
  113. # is checked every second if new lines were added. This leads to a near real time crawling.
  114. # Every time a new line appears, backoff is reset to the initial value.
  115. #backoff: 1s
  116.  
  117. # Max backoff defines what the maximum backoff time is. After having backed off multiple times
  118. # from checking the files, the waiting time will never exceed max_backoff idenependent of the
  119. # backoff factor. Having it set to 10s means in the worst case a new line can be added to a log
  120. # file after having backed off multiple times, it takes a maximum of 10s to read the new line
  121. #max_backoff: 10s
  122.  
  123. # The backoff factor defines how fast the algorithm backs off. The bigger the backoff factor,
  124. # the faster the max_backoff value is reached. If this value is set to 1, no backoff will happen.
  125. # The backoff value will be multiplied each time with the backoff_factor until max_backoff is reached
  126. #backoff_factor: 2
  127.  
  128. # This option closes a file, as soon as the file name changes.
  129. # This config option is recommended on windows only. Filebeat keeps the files it's reading open. This can cause
  130. # issues when the file is removed, as the file will not be fully removed until also Filebeat closes
  131. # the reading. Filebeat closes the file handler after ignore_older. During this time no new file with the
  132. # same name can be created. Turning this feature on the other hand can lead to loss of data
  133. # on rotate files. It can happen that after file rotation the beginning of the new
  134. # file is skipped, as the reading starts at the end. We recommend to leave this option on false
  135. # but lower the ignore_older value to release files faster.
  136. #force_close_files: false
  137.  
  138. # Additional prospector
  139. #-
  140. # Configuration to use stdin input
  141. #input_type: stdin
  142.  
  143. # General filebeat configuration options
  144. #
  145. # Event count spool threshold - forces network flush if exceeded
  146. #spool_size: 2048
  147.  
  148. # Defines how often the spooler is flushed. After idle_timeout the spooler is
  149. # Flush even though spool_size is not reached.
  150. #idle_timeout: 5s
  151.  
  152. # Name of the registry file. Per default it is put in the current working
  153. # directory. In case the working directory is changed after when running
  154. # filebeat again, indexing starts from the beginning again.
  155. registry_file: /var/lib/filebeat/registry
  156.  
  157. # Full Path to directory with additional prospector configuration files. Each file must end with .yml
  158. # These config files must have the full filebeat config part inside, but only
  159. # the prospector part is processed. All global options like spool_size are ignored.
  160. # The config_dir MUST point to a different directory then where the main filebeat config file is in.
  161. #config_dir:
  162.  
  163. ###############################################################################
  164. ############################# Libbeat Config ##################################
  165. # Base config file used by all other beats for using libbeat features
  166.  
  167. ############################# Output ##########################################
  168.  
  169. # Configure what outputs to use when sending the data collected by the beat.
  170. # Multiple outputs may be used.
  171. output:
  172.  
  173. ### Elasticsearch as output
  174. #elasticsearch:
  175. # Array of hosts to connect to.
  176. # Scheme and port can be left out and will be set to the default (http and 9200)
  177. # In case you specify and additional path, the scheme is required: http://localhost:9200/path
  178. # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
  179. #hosts: ["localhost:9200"]
  180.  
  181. # Optional protocol and basic auth credentials.
  182. #protocol: "https"
  183. #username: "admin"
  184. #password: "s3cr3t"
  185.  
  186. # Number of workers per Elasticsearch host.
  187. #worker: 1
  188.  
  189. # Optional index name. The default is "filebeat" and generates
  190. # [filebeat-]YYYY.MM.DD keys.
  191. #index: "filebeat"
  192.  
  193. # Optional HTTP Path
  194. #path: "/elasticsearch"
  195.  
  196. # Proxy server url
  197. #proxy_url: http://proxy:3128
  198.  
  199. # The number of times a particular Elasticsearch index operation is attempted. If
  200. # the indexing operation doesn't succeed after this many retries, the events are
  201. # dropped. The default is 3.
  202. #max_retries: 3
  203.  
  204. # The maximum number of events to bulk in a single Elasticsearch bulk API index request.
  205. # The default is 50.
  206. #bulk_max_size: 50
  207.  
  208. # Configure http request timeout before failing an request to Elasticsearch.
  209. #timeout: 90
  210.  
  211. # The number of seconds to wait for new events between two bulk API index requests.
  212. # If `bulk_max_size` is reached before this interval expires, addition bulk index
  213. # requests are made.
  214. #flush_interval: 1
  215.  
  216. # Boolean that sets if the topology is kept in Elasticsearch. The default is
  217. # false. This option makes sense only for Packetbeat.
  218. #save_topology: false
  219.  
  220. # The time to live in seconds for the topology information that is stored in
  221. # Elasticsearch. The default is 15 seconds.
  222. #topology_expire: 15
  223.  
  224. # tls configuration. By default is off.
  225. #tls:
  226. # List of root certificates for HTTPS server verifications
  227. #certificate_authorities: ["/etc/pki/root/ca.pem"]
  228.  
  229. # Certificate for TLS client authentication
  230. #certificate: "/etc/pki/client/cert.pem"
  231.  
  232. # Client Certificate Key
  233. #certificate_key: "/etc/pki/client/cert.key"
  234.  
  235. # Controls whether the client verifies server certificates and host name.
  236. # If insecure is set to true, all server host names and certificates will be
  237. # accepted. In this mode TLS based connections are susceptible to
  238. # man-in-the-middle attacks. Use only for testing.
  239. #insecure: true
  240.  
  241. # Configure cipher suites to be used for TLS connections
  242. #cipher_suites: []
  243.  
  244. # Configure curve types for ECDHE based cipher suites
  245. #curve_types: []
  246.  
  247. # Configure minimum TLS version allowed for connection to logstash
  248. #min_version: 1.0
  249.  
  250. # Configure maximum TLS version allowed for connection to logstash
  251. #max_version: 1.2
  252.  
  253.  
  254. ### Logstash as output
  255. logstash:
  256. # The Logstash hosts
  257. hosts: ["1.1.1.1:5044"]
  258. bulk_max_size: 1024
  259.  
  260. # Number of workers per Logstash host.
  261. #worker: 1
  262.  
  263. # Set gzip compression level.
  264. #compression_level: 3
  265.  
  266. # Optional load balance the events between the Logstash hosts
  267. #loadbalance: true
  268.  
  269. # Optional index name. The default index name depends on the each beat.
  270. # For Packetbeat, the default is set to packetbeat, for Topbeat
  271. # top topbeat and for Filebeat to filebeat.
  272. index: filebeat
  273.  
  274. # Optional TLS. By default is off.
  275. tls:
  276. # List of root certificates for HTTPS server verifications
  277. certificate_authorities: ["/etc/pki/tls/certs/logstash-forwarder.crt"]
  278.  
  279. # Certificate for TLS client authentication
  280. #certificate: "/etc/pki/client/cert.pem"
  281.  
  282. # Client Certificate Key
  283. #certificate_key: "/etc/pki/client/cert.key"
  284.  
  285. # Controls whether the client verifies server certificates and host name.
  286. # If insecure is set to true, all server host names and certificates will be
  287. # accepted. In this mode TLS based connections are susceptible to
  288. # man-in-the-middle attacks. Use only for testing.
  289. #insecure: true
  290.  
  291. # Configure cipher suites to be used for TLS connections
  292. #cipher_suites: []
  293.  
  294. # Configure curve types for ECDHE based cipher suites
  295. #curve_types: []
  296.  
  297.  
  298. ### File as output
  299. #file:
  300. # Path to the directory where to save the generated files. The option is mandatory.
  301. #path: "/tmp/filebeat"
  302.  
  303. # Name of the generated files. The default is `filebeat` and it generates files: `filebeat`, `filebeat.1`, `filebeat.2`, etc.
  304. #filename: filebeat
  305.  
  306. # Maximum size in kilobytes of each file. When this size is reached, the files are
  307. # rotated. The default value is 10 MB.
  308. #rotate_every_kb: 10000
  309.  
  310. # Maximum number of files under path. When this number of files is reached, the
  311. # oldest file is deleted and the rest are shifted from last to first. The default
  312. # is 7 files.
  313. #number_of_files: 7
  314.  
  315.  
  316. ### Console output
  317. # console:
  318. # Pretty print json event
  319. #pretty: false
  320.  
  321.  
  322. ############################# Shipper #########################################
  323.  
  324. shipper:
  325. # The name of the shipper that publishes the network data. It can be used to group
  326. # all the transactions sent by a single shipper in the web interface.
  327. # If this options is not defined, the hostname is used.
  328. #name:
  329.  
  330. # The tags of the shipper are included in their own field with each
  331. # transaction published. Tags make it easy to group servers by different
  332. # logical properties.
  333. #tags: ["service-X", "web-tier"]
  334.  
  335. # Uncomment the following if you want to ignore transactions created
  336. # by the server on which the shipper is installed. This option is useful
  337. # to remove duplicates if shippers are installed on multiple servers.
  338. #ignore_outgoing: true
  339.  
  340. # How often (in seconds) shippers are publishing their IPs to the topology map.
  341. # The default is 10 seconds.
  342. #refresh_topology_freq: 10
  343.  
  344. # Expiration time (in seconds) of the IPs published by a shipper to the topology map.
  345. # All the IPs will be deleted afterwards. Note, that the value must be higher than
  346. # refresh_topology_freq. The default is 15 seconds.
  347. #topology_expire: 15
  348.  
  349. # Internal queue size for single events in processing pipeline
  350. #queue_size: 1000
  351.  
  352. # Configure local GeoIP database support.
  353. # If no paths are not configured geoip is disabled.
  354. #geoip:
  355. #paths:
  356. # - "/usr/share/GeoIP/GeoLiteCity.dat"
  357. # - "/usr/local/var/GeoIP/GeoLiteCity.dat"
  358.  
  359.  
  360. ############################# Logging #########################################
  361.  
  362. # There are three options for the log ouput: syslog, file, stderr.
  363. # Under Windos systems, the log files are per default sent to the file output,
  364. # under all other system per default to syslog.
  365. logging:
  366.  
  367. # Send all logging output to syslog. On Windows default is false, otherwise
  368. # default is true.
  369. #to_syslog: true
  370.  
  371. # Write all logging output to files. Beats automatically rotate files if rotateeverybytes
  372. # limit is reached.
  373. #to_files: false
  374.  
  375. # To enable logging to files, to_files option has to be set to true
  376. files:
  377. # The directory where the log files will written to.
  378. path: /var/log/mybeat
  379.  
  380. # The name of the files where the logs are written to.
  381. name: mybeat
  382.  
  383. # Configure log file size limit. If limit is reached, log file will be
  384. # automatically rotated
  385. rotateeverybytes: 10485760 # = 10MB
  386.  
  387. # Number of rotated log files to keep. Oldest files will be deleted first.
  388. #keepfiles: 7
  389.  
  390. # Enable debug output for selected components. To enable all selectors use ["*"]
  391. # Other available selectors are beat, publish, service
  392. # Multiple selectors can be chained.
  393. #selectors: [ ]
  394.  
  395. # Sets log level. The default log level is error.
  396. # Available log levels are: critical, error, warning, info, debug
  397. level: error
RAW Paste Data