Guest User

Untitled

a guest
Jul 17th, 2020
524
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 9.37 KB | None | 0 0
  1. [root@server150~]# cat /etc/filebeat/filebeat.yml
  2. ###################### Filebeat Configuration Example #########################
  3.  
  4. # This file is an example configuration file highlighting only the most common
  5. # options. The filebeat.reference.yml file from the same directory contains all the
  6. # supported options with more comments. You can use it as a reference.
  7. #
  8. # You can find the full configuration reference here:
  9. # https://www.elastic.co/guide/en/beats/filebeat/index.html
  10.  
  11. # For more available modules and options, please see the filebeat.reference.yml sample
  12. # configuration file.
  13.  
  14. #=========================== Filebeat inputs =============================
  15.  
  16. filebeat.inputs:
  17.  
  18. # Each - is an input. Most options can be set at the input level, so
  19. # you can use different inputs for various configurations.
  20. # Below are the input specific configurations.
  21.  
  22. - type: log
  23.  
  24. # Change to true to enable this input configuration.
  25. enabled: true
  26.  
  27. # Paths that should be crawled and fetched. Glob based paths.
  28. paths:
  29. - /home/cowrie/cowrie/var/log/cowrie/cowrie.json*
  30. #- c:\programdata\elasticsearch\logs\*
  31.  
  32. # Exclude lines. A list of regular expressions to match. It drops the lines that are
  33. # matching any regular expression from the list.
  34. #exclude_lines: ['^DBG']
  35.  
  36. # Include lines. A list of regular expressions to match. It exports the lines that are
  37. # matching any regular expression from the list.
  38. #include_lines: ['^ERR', '^WARN']
  39.  
  40. # Exclude files. A list of regular expressions to match. Filebeat drops the files that
  41. # are matching any regular expression from the list. By default, no files are dropped.
  42. #exclude_files: ['.gz$']
  43.  
  44. # Optional additional fields. These fields can be freely picked
  45. # to add additional information to the crawled log files for filtering
  46. #fields:
  47. # level: debug
  48. # review: 1
  49.  
  50. ### Multiline options
  51.  
  52. # Multiline can be used for log messages spanning multiple lines. This is common
  53. # for Java Stack Traces or C-Line Continuation
  54.  
  55. # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [
  56. #multiline.pattern: ^\[
  57.  
  58. # Defines if the pattern set under pattern should be negated or not. Default is false.
  59. #multiline.negate: false
  60.  
  61. # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern
  62. # that was (not) matched before or after or as long as a pattern is not matched based on negate.
  63. # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash
  64. #multiline.match: after
  65.  
  66.  
  67. #============================= Filebeat modules ===============================
  68.  
  69. filebeat.config.modules:
  70. # Glob pattern for configuration loading
  71. path: ${path.config}/modules.d/*.yml
  72.  
  73. # Set to true to enable config reloading
  74. reload.enabled: false
  75.  
  76. # Period on which files under path should be checked for changes
  77. #reload.period: 10s
  78.  
  79. #==================== Elasticsearch template setting ==========================
  80.  
  81. setup.template.settings:
  82. index.number_of_shards: 1
  83. #index.codec: best_compression
  84. #_source.enabled: false
  85.  
  86. #================================ General =====================================
  87.  
  88. # The name of the shipper that publishes the network data. It can be used to group
  89. # all the transactions sent by a single shipper in the web interface.
  90. #name:
  91.  
  92. # The tags of the shipper are included in their own field with each
  93. # transaction published.
  94. #tags: ["service-X", "web-tier"]
  95.  
  96. # Optional fields that you can specify to add additional information to the
  97. # output.
  98. #fields:
  99. # env: staging
  100.  
  101.  
  102. #============================== Dashboards =====================================
  103. # These settings control loading the sample dashboards to the Kibana index. Loading
  104. # the dashboards is disabled by default and can be enabled either by setting the
  105. # options here or by using the `setup` command.
  106. #setup.dashboards.enabled: false
  107.  
  108. # The URL from where to download the dashboards archive. By default this URL
  109. # has a value which is computed based on the Beat name and version. For released
  110. # versions, this URL points to the dashboard archive on the artifacts.elastic.co
  111. # website.
  112. #setup.dashboards.url:
  113.  
  114. #============================== Kibana =====================================
  115.  
  116. # Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
  117. # This requires a Kibana endpoint configuration.
  118. setup.kibana:
  119.  
  120. # Kibana Host
  121. # Scheme and port can be left out and will be set to the default (http and 5601)
  122. # In case you specify and additional path, the scheme is required: http://localhost:5601/path
  123. # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
  124. #host: "*public-server-ip*:5601"
  125.  
  126. # Kibana Space ID
  127. # ID of the Kibana Space into which the dashboards should be loaded. By default,
  128. # the Default Space will be used.
  129. #space.id:
  130.  
  131. #============================= Elastic Cloud ==================================
  132.  
  133. # These settings simplify using Filebeat with the Elastic Cloud (https://cloud.elastic.co/).
  134.  
  135. # The cloud.id setting overwrites the `output.elasticsearch.hosts` and
  136. # `setup.kibana.host` options.
  137. # You can find the `cloud.id` in the Elastic Cloud web UI.
  138. #cloud.id:
  139.  
  140. # The cloud.auth setting overwrites the `output.elasticsearch.username` and
  141. # `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
  142. #cloud.auth:
  143.  
  144. #================================ Outputs =====================================
  145.  
  146. # Configure what output to use when sending the data collected by the beat.
  147.  
  148. #-------------------------- Elasticsearch output ------------------------------
  149. #output.elasticsearch:
  150. # Array of hosts to connect to.
  151. enabled: false
  152. #hosts: ["*public-server-ip*:9200"]
  153.  
  154. # Protocol - either `http` (default) or `https`.
  155. #protocol: "https"
  156.  
  157. # Authentication credentials - either API key or username/password.
  158. #api_key: "id:api_key"
  159. #username: "elastic"
  160. #password: "changeme"
  161.  
  162. #----------------------------- Logstash output --------------------------------
  163. output.logstash:
  164. enabled: true
  165. # The Logstash hosts
  166. hosts: ["*public-server-ip*:5044"]
  167.  
  168. # Optional SSL. By default is off.
  169. # List of root certificates for HTTPS server verifications
  170. #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
  171.  
  172. # Certificate for SSL client authentication
  173. #ssl.certificate: "/etc/pki/client/cert.pem"
  174.  
  175. # Client Certificate Key
  176. #ssl.key: "/etc/pki/client/cert.key"
  177.  
  178. #================================ Processors =====================================
  179.  
  180. # Configure processors to enhance or manipulate events generated by the beat.
  181.  
  182. processors:
  183. # - decode_json_fields:
  184. # fields: ["message"]
  185. # process_array: false
  186. # max_depth: 1
  187. # target: ""
  188. # overwrite_keys: false
  189. # add_error_key: true
  190. - add_host_metadata: ~
  191. - add_cloud_metadata: ~
  192. - add_docker_metadata: ~
  193. - add_kubernetes_metadata: ~
  194.  
  195. #================================ Logging =====================================
  196.  
  197. # Sets log level. The default log level is info.
  198. # Available log levels are: error, warning, info, debug
  199. #logging.level: debug
  200.  
  201. # At debug level, you can selectively enable logging only for some components.
  202. # To enable all selectors use ["*"]. Examples of other selectors are "beat",
  203. # "publish", "service".
  204. #logging.selectors: ["*"]
  205.  
  206. #============================== X-Pack Monitoring ===============================
  207. # filebeat can export internal metrics to a central Elasticsearch monitoring
  208. # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The
  209. # reporting is disabled by default.
  210.  
  211. # Set to true to enable the monitoring reporter.
  212. #monitoring.enabled: false
  213.  
  214. # Sets the UUID of the Elasticsearch cluster under which monitoring data for this
  215. # Filebeat instance will appear in the Stack Monitoring UI. If output.elasticsearch
  216. # is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.
  217. #monitoring.cluster_uuid:
  218.  
  219. # Uncomment to send the metrics to Elasticsearch. Most settings from the
  220. # Elasticsearch output are accepted here as well.
  221. # Note that the settings should point to your Elasticsearch *monitoring* cluster.
  222. # Any setting that is not set is automatically inherited from the Elasticsearch
  223. # output configuration, so if you have the Elasticsearch output configured such
  224. # that it is pointing to your Elasticsearch monitoring cluster, you can simply
  225. # uncomment the following line.
  226. #monitoring.elasticsearch:
  227.  
  228. #================================= Migration ==================================
  229.  
  230. # This allows to enable 6.7 migration aliases
  231. #migration.6_to_7.enabled: true
  232. setup.template.enabled: false
Add Comment
Please, Sign In to add comment