SHARE
TWEET

Untitled

a guest Jan 14th, 2019 137 Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
  1. ###################### Filebeat Configuration Example #########################
  2.  
  3. # This file is an example configuration file highlighting only the most common
  4. # options. The filebeat.reference.yml file from the same directory contains all the
  5. # supported options with more comments. You can use it as a reference.
  6. #
  7. # You can find the full configuration reference here:
  8. # https://www.elastic.co/guide/en/beats/filebeat/index.html
  9.  
  10. # For more available modules and options, please see the filebeat.reference.yml sample
  11. # configuration file.
  12.  
  13. #=========================== Filebeat inputs =============================
  14.  
  15. filebeat.inputs:
  16. # Each - is an input. Most options can be set at the input level, so
  17. # you can use different inputs for various configurations.
  18. # Below are the input specific configurations.
  19.  
  20. - type: log
  21.   ignore_older: 120h
  22.   close_inactive : 120h
  23.   scan_frequency: 5s
  24.  
  25.   # Change to true to enable this input configuration.
  26.   enabled: true
  27.  
  28.   # Paths that should be crawled and fetched. Glob based paths.
  29.   paths:
  30.    - /var/log/pmta/*.csv
  31.   #- c:\programdata\elasticsearch\logs\*
  32.  
  33.   # Exclude lines. A list of regular expressions to match. It drops the lines that are
  34.   # matching any regular expression from the list.
  35.   #exclude_lines: ['^DBG']
  36.  
  37.   # Include lines. A list of regular expressions to match. It exports the lines that are
  38.   # matching any regular expression from the list.
  39.   #include_lines: ['^ERR', '^WARN']
  40.  
  41.   # Exclude files. A list of regular expressions to match. Filebeat drops the files that
  42.   # are matching any regular expression from the list. By default, no files are dropped.
  43.   #exclude_files: ['.gz$']
  44.  
  45.   # Optional additional fields. These fields can be freely picked
  46.   # to add additional information to the crawled log files for filtering
  47.   #fields:
  48.   #  level: debug
  49.   #  review: 1
  50.  
  51.   ### Multiline options
  52.  
  53.   # Multiline can be used for log messages spanning multiple lines. This is common
  54.   # for Java Stack Traces or C-Line Continuation
  55.  
  56.   # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [
  57.   #multiline.pattern: ^\[
  58.  
  59.   # Defines if the pattern set under pattern should be negated or not. Default is false.
  60.   #multiline.negate: false
  61.  
  62.   # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern
  63.   # that was (not) matched before or after or as long as a pattern is not matched based on negate.
  64.   # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash
  65.   #multiline.match: after
  66.   beat.name: server.mta8.filebeat2
  67.  
  68. #============================= Filebeat modules ===============================
  69.  
  70. #filebeat.config.modules:
  71.   # Glob pattern for configuration loading
  72.   #path: ${path.config}/modules.d/*.yml
  73.  
  74.   # Set to true to enable config reloading
  75.   #reload.enabled: false
  76.  
  77.   # Period on which files under path should be checked for changes
  78.   #reload.period: 10s
  79.  
  80. #==================== Elasticsearch template setting ==========================
  81.  
  82. #setup.template.settings:
  83.   #index.number_of_shards: 3
  84.   #index.codec: best_compression
  85.   #_source.enabled: false
  86.  
  87. #================================ General =====================================
  88.  
  89. # The name of the shipper that publishes the network data. It can be used to group
  90. # all the transactions sent by a single shipper in the web interface.
  91. #name:
  92.  
  93. # The tags of the shipper are included in their own field with each
  94. # transaction published.
  95. #tags: ["service-X", "web-tier"]
  96.  
  97. # Optional fields that you can specify to add additional information to the
  98. # output.
  99. #fields:
  100. #  env: staging
  101.  
  102.  
  103. #============================== Dashboards =====================================
  104. # These settings control loading the sample dashboards to the Kibana index. Loading
  105. # the dashboards is disabled by default and can be enabled either by setting the
  106. # options here, or by using the `-setup` CLI flag or the `setup` command.
  107. #setup.dashboards.enabled: false
  108.  
  109. # The URL from where to download the dashboards archive. By default this URL
  110. # has a value which is computed based on the Beat name and version. For released
  111. # versions, this URL points to the dashboard archive on the artifacts.elastic.co
  112. # website.
  113. #setup.dashboards.url:
  114.  
  115. #============================== Kibana =====================================
  116.  
  117. # Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
  118. # This requires a Kibana endpoint configuration.
  119. #setup.kibana:
  120.  
  121.   # Kibana Host
  122.   # Scheme and port can be left out and will be set to the default (http and 5601)
  123.   # In case you specify and additional path, the scheme is required: http://localhost:5601/path
  124.   # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
  125.   #host: "localhost:5601"
  126.  
  127.   # Kibana Space ID
  128.   # ID of the Kibana Space into which the dashboards should be loaded. By default,
  129.   # the Default Space will be used.
  130.   #space.id:
  131.  
  132. #============================= Elastic Cloud ==================================
  133.  
  134. # These settings simplify using filebeat with the Elastic Cloud (https://cloud.elastic.co/).
  135.  
  136. # The cloud.id setting overwrites the `output.elasticsearch.hosts` and
  137. # `setup.kibana.host` options.
  138. # You can find the `cloud.id` in the Elastic Cloud web UI.
  139. #cloud.id:
  140.  
  141. # The cloud.auth setting overwrites the `output.elasticsearch.username` and
  142. # `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
  143. #cloud.auth:
  144.  
  145. #================================ Outputs =====================================
  146.  
  147. # Configure what output to use when sending the data collected by the beat.
  148.  
  149. #-------------------------- Elasticsearch output ------------------------------
  150. #output.elasticsearch:
  151.   # Array of hosts to connect to.
  152.   #hosts: ["localhost:9200"]
  153.  
  154.   # Optional protocol and basic auth credentials.
  155.   #protocol: "https"
  156.   #username: "elastic"
  157.   #password: "changeme"
  158.  
  159. #----------------------------- Logstash output --------------------------------
  160. output.logstash:
  161.  # The Logstash hosts
  162.   hosts: ["remote.server.com:5044"]
  163.  
  164.   worker: 8 # Number of workers per Logstash host.
  165.   bulk_max_size: 2048 # The maximum number of events to bulk in a single Logstash request
  166.   compression_level: 7 # Set gzip compression level.
  167.  
  168.   # Optional SSL. By default is off.
  169.   # List of root certificates for HTTPS server verifications
  170.   ssl.certificate_authorities: ["/etc/pki/tls/certs/logstash-forwarder-remote.server.com.crt"]
  171.  
  172.  
  173.  
  174.   # Certificate for SSL client authentication
  175.   #ssl.certificate: "/etc/pki/client/cert.pem"
  176.  
  177.   # Client Certificate Key
  178.   #ssl.key: "/etc/pki/client/cert.key"
  179.  
  180. #================================ Procesors =====================================
  181.  
  182. # Configure processors to enhance or manipulate events generated by the beat.
  183.  
  184. #processors:
  185. #  - add_host_metadata: ~
  186. #  - add_cloud_metadata: ~
  187.  
  188. #================================ Logging =====================================
  189.  
  190. # Sets log level. The default log level is info.
  191. # Available log levels are: error, warning, info, debug
  192.   logging.level: info
  193.   logging.to_syslog: false
  194.   logging.to_files: true
  195.   logging.files:
  196.    path: /var/log/filebeat
  197.    name: filebeat.log
  198.    rotateeverybytes: 26214400 # = 25MB
  199.    keepfiles: 3
  200.  
  201.  
  202. # At debug level, you can selectively enable logging only for some components.
  203. # To enable all selectors use ["*"]. Examples of other selectors are "beat",
  204. # "publish", "service".
  205. #logging.selectors: ["*"]
  206.  
  207. #============================== Xpack Monitoring ===============================
  208. # filebeat can export internal metrics to a central Elasticsearch monitoring
  209. # cluster.  This requires xpack monitoring to be enabled in Elasticsearch.  The
  210. # reporting is disabled by default.
  211.  
  212. # Set to true to enable the monitoring reporter.
  213. #xpack.monitoring.enabled: false
  214.  
  215. # Uncomment to send the metrics to Elasticsearch. Most settings from the
  216. # Elasticsearch output are accepted here as well. Any setting that is not set is
  217. # automatically inherited from the Elasticsearch output configuration, so if you
  218. # have the Elasticsearch output configured, you can simply uncomment the
  219. # following line.
  220. #xpack.monitoring.elasticsearch:
RAW Paste Data
We use cookies for various purposes including analytics. By continuing to use Pastebin, you agree to our use of cookies as described in the Cookies Policy. OK, I Understand
 
Top