SHARE
TWEET

Untitled

a guest Nov 8th, 2019 79 Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
  1. # Telegraf Configuration
  2. #
  3. # Telegraf is entirely plugin driven. All metrics are gathered from the
  4. # declared inputs, and sent to the declared outputs.
  5. #
  6. # Plugins must be declared in here to be active.
  7. # To deactivate a plugin, comment out the name and any variables.
  8. #
  9. # Use 'telegraf -config telegraf.conf -test' to see what metrics a config
  10. # file would generate.
  11. #
  12. # Environment variables can be used anywhere in this config file, simply surround
  13. # them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"),
  14. # for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})
  15.  
  16.  
  17. # Global tags can be specified here in key="value" format.
  18. [global_tags]
  19.   # dc = "us-east-1" # will tag all metrics with dc=us-east-1
  20.   # rack = "1a"
  21.   ## Environment variables can be used as tags, and throughout the config file
  22.   # user = "$USER"
  23.  
  24.  
  25. # Configuration for telegraf agent
  26. [agent]
  27.   ## Default data collection interval for all inputs
  28.   interval = "10s"
  29.   ## Rounds collection interval to 'interval'
  30.   ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
  31.   round_interval = true
  32.  
  33.   ## Telegraf will send metrics to outputs in batches of at most
  34.   ## metric_batch_size metrics.
  35.   ## This controls the size of writes that Telegraf sends to output plugins.
  36.   metric_batch_size = 1000
  37.  
  38.   ## Maximum number of unwritten metrics per output.
  39.   metric_buffer_limit = 10000
  40.  
  41.   ## Collection jitter is used to jitter the collection by a random amount.
  42.   ## Each plugin will sleep for a random time within jitter before collecting.
  43.   ## This can be used to avoid many plugins querying things like sysfs at the
  44.   ## same time, which can have a measurable effect on the system.
  45.   collection_jitter = "0s"
  46.  
  47.   ## Default flushing interval for all outputs. Maximum flush_interval will be
  48.   ## flush_interval + flush_jitter
  49.   flush_interval = "10s"
  50.   ## Jitter the flush interval by a random amount. This is primarily to avoid
  51.   ## large write spikes for users running a large number of telegraf instances.
  52.   ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
  53.   flush_jitter = "0s"
  54.  
  55.   ## By default or when set to "0s", precision will be set to the same
  56.   ## timestamp order as the collection interval, with the maximum being 1s.
  57.   ##   ie, when interval = "10s", precision will be "1s"
  58.   ##       when interval = "250ms", precision will be "1ms"
  59.   ## Precision will NOT be used for service inputs. It is up to each individual
  60.   ## service input to set the timestamp at the appropriate precision.
  61.   ## Valid time units are "ns", "us" (or "µs"), "ms", "s".
  62.   precision = ""
  63.  
  64.   ## Log at debug level.
  65.   # debug = false
  66.   ## Log only error level messages.
  67.   # quiet = false
  68.  
  69.   ## Log file name, the empty string means to log to stderr.
  70.   # logfile = ""
  71.  
  72.   ## The logfile will be rotated after the time interval specified.  When set
  73.   ## to 0 no time based rotation is performed.  Logs are rotated only when
  74.   ## written to, if there is no log activity rotation may be delayed.
  75.   # logfile_rotation_interval = "0d"
  76.  
  77.   ## The logfile will be rotated when it becomes larger than the specified
  78.   ## size.  When set to 0 no size based rotation is performed.
  79.   # logfile_rotation_max_size = "0MB"
  80.  
  81.   ## Maximum number of rotated archives to keep, any older logs are deleted.
  82.   ## If set to -1, no archives are removed.
  83.   # logfile_rotation_max_archives = 5
  84.  
  85.   ## Override default hostname, if empty use os.Hostname()
  86.   hostname = ""
  87.   ## If set to true, do no set the "host" tag in the telegraf agent.
  88.   omit_hostname = false
  89.  
  90.  
  91. ###############################################################################
  92. #                            OUTPUT PLUGINS                                   #
  93. ###############################################################################
  94.  
  95.  
  96. # Configuration for sending metrics to InfluxDB
  97. [[outputs.influxdb]]
  98.   ## The full HTTP or UDP URL for your InfluxDB instance.
  99.   ##
  100.   ## Multiple URLs can be specified for a single cluster, only ONE of the
  101.   ## urls will be written to each interval.
  102.   # urls = ["unix:///var/run/influxdb.sock"]
  103.   # urls = ["udp://127.0.0.1:8089"]
  104.   # urls = ["http://127.0.0.1:8086"]
  105.   urls = ["https://influxdb.my-home.giorgosdimtsas.net"]
  106.  
  107.   ## The target database for metrics; will be created as needed.
  108.   ## For UDP url endpoint database needs to be configured on server side.
  109.   database = "telegraf"
  110.  
  111.   ## The value of this tag will be used to determine the database.  If this
  112.   ## tag is not set the 'database' option is used as the default.
  113.   # database_tag = ""
  114.  
  115.   ## If true, the database tag will not be added to the metric.
  116.   # exclude_database_tag = false
  117.  
  118.   ## If true, no CREATE DATABASE queries will be sent.  Set to true when using
  119.   ## Telegraf with a user without permissions to create databases or when the
  120.   ## database already exists.
  121.   # skip_database_creation = false
  122.  
  123.   ## Name of existing retention policy to write to.  Empty string writes to
  124.   ## the default retention policy.  Only takes effect when using HTTP.
  125.   # retention_policy = ""
  126.  
  127.   ## Write consistency (clusters only), can be: "any", "one", "quorum", "all".
  128.   ## Only takes effect when using HTTP.
  129.   # write_consistency = "any"
  130.  
  131.   ## Timeout for HTTP messages.
  132.   # timeout = "5s"
  133.  
  134.   ## HTTP Basic Auth
  135.   # username = "telegraf"
  136.   # password = "metricsmetricsmetricsmetrics"
  137.  
  138.   ## HTTP User-Agent
  139.   # user_agent = "telegraf"
  140.  
  141.   ## UDP payload size is the maximum packet size to send.
  142.   # udp_payload = "512B"
  143.  
  144.   ## Optional TLS Config for use on HTTP connections.
  145.   # tls_ca = "/etc/telegraf/ca.pem"
  146.   # tls_cert = "/etc/telegraf/cert.pem"
  147.   # tls_key = "/etc/telegraf/key.pem"
  148.   ## Use TLS but skip chain & host verification
  149.   # insecure_skip_verify = false
  150.  
  151.   ## HTTP Proxy override, if unset values the standard proxy environment
  152.   ## variables are consulted to determine which proxy, if any, should be used.
  153.   # http_proxy = "http://corporate.proxy:3128"
  154.  
  155.   ## Additional HTTP headers
  156.   # http_headers = {"X-Special-Header" = "Special-Value"}
  157.  
  158.   ## HTTP Content-Encoding for write request body, can be set to "gzip" to
  159.   ## compress body or "identity" to apply no encoding.
  160.   # content_encoding = "identity"
  161.  
  162.   ## When true, Telegraf will output unsigned integers as unsigned values,
  163.   ## i.e.: "42u".  You will need a version of InfluxDB supporting unsigned
  164.   ## integer values.  Enabling this option will result in field type errors if
  165.   ## existing data has been written.
  166.   # influx_uint_support = false
  167.  
  168.  
  169. # # Configuration for Amon Server to send metrics to.
  170. # [[outputs.amon]]
  171. #   ## Amon Server Key
  172. #   server_key = "my-server-key" # required.
  173. #
  174. #   ## Amon Instance URL
  175. #   amon_instance = "https://youramoninstance" # required
  176. #
  177. #   ## Connection timeout.
  178. #   # timeout = "5s"
  179.  
  180.  
  181. # # Publishes metrics to an AMQP broker
  182. # [[outputs.amqp]]
  183. #   ## Broker to publish to.
  184. #   ##   deprecated in 1.7; use the brokers option
  185. #   # url = "amqp://localhost:5672/influxdb"
  186. #
  187. #   ## Brokers to publish to.  If multiple brokers are specified a random broker
  188. #   ## will be selected anytime a connection is established.  This can be
  189. #   ## helpful for load balancing when not using a dedicated load balancer.
  190. #   brokers = ["amqp://localhost:5672/influxdb"]
  191. #
  192. #   ## Maximum messages to send over a connection.  Once this is reached, the
  193. #   ## connection is closed and a new connection is made.  This can be helpful for
  194. #   ## load balancing when not using a dedicated load balancer.
  195. #   # max_messages = 0
  196. #
  197. #   ## Exchange to declare and publish to.
  198. #   exchange = "telegraf"
  199. #
  200. #   ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
  201. #   # exchange_type = "topic"
  202. #
  203. #   ## If true, exchange will be passively declared.
  204. #   # exchange_passive = false
  205. #
  206. #   ## Exchange durability can be either "transient" or "durable".
  207. #   # exchange_durability = "durable"
  208. #
  209. #   ## Additional exchange arguments.
  210. #   # exchange_arguments = { }
  211. #   # exchange_arguments = {"hash_propery" = "timestamp"}
  212. #
  213. #   ## Authentication credentials for the PLAIN auth_method.
  214. #   # username = ""
  215. #   # password = ""
  216. #
  217. #   ## Auth method. PLAIN and EXTERNAL are supported
  218. #   ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
  219. #   ## described here: https://www.rabbitmq.com/plugins.html
  220. #   # auth_method = "PLAIN"
  221. #
  222. #   ## Metric tag to use as a routing key.
  223. #   ##   ie, if this tag exists, its value will be used as the routing key
  224. #   # routing_tag = "host"
  225. #
  226. #   ## Static routing key.  Used when no routing_tag is set or as a fallback
  227. #   ## when the tag specified in routing tag is not found.
  228. #   # routing_key = ""
  229. #   # routing_key = "telegraf"
  230. #
  231. #   ## Delivery Mode controls if a published message is persistent.
  232. #   ##   One of "transient" or "persistent".
  233. #   # delivery_mode = "transient"
  234. #
  235. #   ## InfluxDB database added as a message header.
  236. #   ##   deprecated in 1.7; use the headers option
  237. #   # database = "telegraf"
  238. #
  239. #   ## InfluxDB retention policy added as a message header
  240. #   ##   deprecated in 1.7; use the headers option
  241. #   # retention_policy = "default"
  242. #
  243. #   ## Static headers added to each published message.
  244. #   # headers = { }
  245. #   # headers = {"database" = "telegraf", "retention_policy" = "default"}
  246. #
  247. #   ## Connection timeout.  If not provided, will default to 5s.  0s means no
  248. #   ## timeout (not recommended).
  249. #   # timeout = "5s"
  250. #
  251. #   ## Optional TLS Config
  252. #   # tls_ca = "/etc/telegraf/ca.pem"
  253. #   # tls_cert = "/etc/telegraf/cert.pem"
  254. #   # tls_key = "/etc/telegraf/key.pem"
  255. #   ## Use TLS but skip chain & host verification
  256. #   # insecure_skip_verify = false
  257. #
  258. #   ## If true use batch serialization format instead of line based delimiting.
  259. #   ## Only applies to data formats which are not line based such as JSON.
  260. #   ## Recommended to set to true.
  261. #   # use_batch_format = false
  262. #
  263. #   ## Content encoding for message payloads, can be set to "gzip" to or
  264. #   ## "identity" to apply no encoding.
  265. #   ##
  266. #   ## Please note that when use_batch_format = false each amqp message contains only
  267. #   ## a single metric, it is recommended to use compression with batch format
  268. #   ## for best results.
  269. #   # content_encoding = "identity"
  270. #
  271. #   ## Data format to output.
  272. #   ## Each data format has its own unique set of configuration options, read
  273. #   ## more about them here:
  274. #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
  275. #   # data_format = "influx"
  276.  
  277.  
  278. # # Send metrics to Azure Application Insights
  279. # [[outputs.application_insights]]
  280. #   ## Instrumentation key of the Application Insights resource.
  281. #   instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx"
  282. #
  283. #   ## Timeout for closing (default: 5s).
  284. #   # timeout = "5s"
  285. #
  286. #   ## Enable additional diagnostic logging.
  287. #   # enable_diagnostic_logging = false
  288. #
  289. #   ## Context Tag Sources add Application Insights context tags to a tag value.
  290. #   ##
  291. #   ## For list of allowed context tag keys see:
  292. #   ## https://github.com/Microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go
  293. #   # [outputs.application_insights.context_tag_sources]
  294. #   #   "ai.cloud.role" = "kubernetes_container_name"
  295. #   #   "ai.cloud.roleInstance" = "kubernetes_pod_name"
  296.  
  297.  
  298. # # Send aggregate metrics to Azure Monitor
  299. # [[outputs.azure_monitor]]
  300. #   ## Timeout for HTTP writes.
  301. #   # timeout = "20s"
  302. #
  303. #   ## Set the namespace prefix, defaults to "Telegraf/<input-name>".
  304. #   # namespace_prefix = "Telegraf/"
  305. #
  306. #   ## Azure Monitor doesn't have a string value type, so convert string
  307. #   ## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows
  308. #   ## a maximum of 10 dimensions so Telegraf will only send the first 10
  309. #   ## alphanumeric dimensions.
  310. #   # strings_as_dimensions = false
  311. #
  312. #   ## Both region and resource_id must be set or be available via the
  313. #   ## Instance Metadata service on Azure Virtual Machines.
  314. #   #
  315. #   ## Azure Region to publish metrics against.
  316. #   ##   ex: region = "southcentralus"
  317. #   # region = ""
  318. #   #
  319. #   ## The Azure Resource ID against which metric will be logged, e.g.
  320. #   ##   ex: resource_id = "/subscriptions/<subscription_id>/resourceGroups/<resource_group>/providers/Microsoft.Compute/virtualMachines/<vm_name>"
  321. #   # resource_id = ""
  322. #
  323. #   ## Optionally, if in Azure US Government, China or other sovereign
  324. #   ## cloud environment, set appropriate REST endpoint for receiving
  325. #   ## metrics. (Note: region may be unused in this context)
  326. #   # endpoint_url = "https://monitoring.core.usgovcloudapi.net"
  327.  
  328.  
  329. # # Publish Telegraf metrics to a Google Cloud PubSub topic
  330. # [[outputs.cloud_pubsub]]
  331. #   ## Required. Name of Google Cloud Platform (GCP) Project that owns
  332. #   ## the given PubSub topic.
  333. #   project = "my-project"
  334. #
  335. #   ## Required. Name of PubSub topic to publish metrics to.
  336. #   topic = "my-topic"
  337. #
  338. #   ## Required. Data format to consume.
  339. #   ## Each data format has its own unique set of configuration options.
  340. #   ## Read more about them here:
  341. #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  342. #   data_format = "influx"
  343. #
  344. #   ## Optional. Filepath for GCP credentials JSON file to authorize calls to
  345. #   ## PubSub APIs. If not set explicitly, Telegraf will attempt to use
  346. #   ## Application Default Credentials, which is preferred.
  347. #   # credentials_file = "path/to/my/creds.json"
  348. #
  349. #   ## Optional. If true, will send all metrics per write in one PubSub message.
  350. #   # send_batched = true
  351. #
  352. #   ## The following publish_* parameters specifically configures batching
  353. #   ## requests made to the GCP Cloud PubSub API via the PubSub Golang library. Read
  354. #   ## more here: https://godoc.org/cloud.google.com/go/pubsub#PublishSettings
  355. #
  356. #   ## Optional. Send a request to PubSub (i.e. actually publish a batch)
  357. #   ## when it has this many PubSub messages. If send_batched is true,
  358. #   ## this is ignored and treated as if it were 1.
  359. #   # publish_count_threshold = 1000
  360. #
  361. #   ## Optional. Send a request to PubSub (i.e. actually publish a batch)
  362. #   ## when it has this many PubSub messages. If send_batched is true,
  363. #   ## this is ignored and treated as if it were 1
  364. #   # publish_byte_threshold = 1000000
  365. #
  366. #   ## Optional. Specifically configures requests made to the PubSub API.
  367. #   # publish_num_go_routines = 2
  368. #
  369. #   ## Optional. Specifies a timeout for requests to the PubSub API.
  370. #   # publish_timeout = "30s"
  371. #
  372. #   ## Optional. If true, published PubSub message data will be base64-encoded.
  373. #   # base64_data = false
  374. #
  375. #   ## Optional. PubSub attributes to add to metrics.
  376. #   # [[inputs.pubsub.attributes]]
  377. #   #   my_attr = "tag_value"
  378.  
  379.  
  380. # # Configuration for AWS CloudWatch output.
  381. # [[outputs.cloudwatch]]
  382. #   ## Amazon REGION
  383. #   region = "us-east-1"
  384. #
  385. #   ## Amazon Credentials
  386. #   ## Credentials are loaded in the following order
  387. #   ## 1) Assumed credentials via STS if role_arn is specified
  388. #   ## 2) explicit credentials from 'access_key' and 'secret_key'
  389. #   ## 3) shared profile from 'profile'
  390. #   ## 4) environment variables
  391. #   ## 5) shared credentials file
  392. #   ## 6) EC2 Instance Profile
  393. #   #access_key = ""
  394. #   #secret_key = ""
  395. #   #token = ""
  396. #   #role_arn = ""
  397. #   #profile = ""
  398. #   #shared_credential_file = ""
  399. #
  400. #   ## Endpoint to make request against, the correct endpoint is automatically
  401. #   ## determined and this option should only be set if you wish to override the
  402. #   ## default.
  403. #   ##   ex: endpoint_url = "http://localhost:8000"
  404. #   # endpoint_url = ""
  405. #
  406. #   ## Namespace for the CloudWatch MetricDatums
  407. #   namespace = "InfluxData/Telegraf"
  408. #
  409. #   ## If you have a large amount of metrics, you should consider to send statistic
  410. #   ## values instead of raw metrics which could not only improve performance but
  411. #   ## also save AWS API cost. If enable this flag, this plugin would parse the required
  412. #   ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch.
  413. #   ## You could use basicstats aggregator to calculate those fields. If not all statistic
  414. #   ## fields are available, all fields would still be sent as raw metrics.
  415. #   # write_statistics = false
  416.  
  417.  
  418. # # Configuration for CrateDB to send metrics to.
  419. # [[outputs.cratedb]]
  420. #   # A github.com/jackc/pgx connection string.
  421. #   # See https://godoc.org/github.com/jackc/pgx#ParseDSN
  422. #   url = "postgres://user:password@localhost/schema?sslmode=disable"
  423. #   # Timeout for all CrateDB queries.
  424. #   timeout = "5s"
  425. #   # Name of the table to store metrics in.
  426. #   table = "metrics"
  427. #   # If true, and the metrics table does not exist, create it automatically.
  428. #   table_create = true
  429.  
  430.  
  431. # # Configuration for DataDog API to send metrics to.
  432. # [[outputs.datadog]]
  433. #   ## Datadog API key
  434. #   apikey = "my-secret-key" # required.
  435. #
  436. #   # The base endpoint URL can optionally be specified but it defaults to:
  437. #   #url = "https://app.datadoghq.com/api/v1/series"
  438. #
  439. #   ## Connection timeout.
  440. #   # timeout = "5s"
  441.  
  442.  
  443. # # Send metrics to nowhere at all
  444. # [[outputs.discard]]
  445. #   # no configuration
  446.  
  447.  
  448. # # Configuration for Elasticsearch to send metrics to.
  449. # [[outputs.elasticsearch]]
  450. #   ## The full HTTP endpoint URL for your Elasticsearch instance
  451. #   ## Multiple urls can be specified as part of the same cluster,
  452. #   ## this means that only ONE of the urls will be written to each interval.
  453. #   urls = [ "http://node1.es.example.com:9200" ] # required.
  454. #   ## Elasticsearch client timeout, defaults to "5s" if not set.
  455. #   timeout = "5s"
  456. #   ## Set to true to ask Elasticsearch a list of all cluster nodes,
  457. #   ## thus it is not necessary to list all nodes in the urls config option.
  458. #   enable_sniffer = false
  459. #   ## Set the interval to check if the Elasticsearch nodes are available
  460. #   ## Setting to "0s" will disable the health check (not recommended in production)
  461. #   health_check_interval = "10s"
  462. #   ## HTTP basic authentication details
  463. #   # username = "telegraf"
  464. #   # password = "mypassword"
  465. #
  466. #   ## Index Config
  467. #   ## The target index for metrics (Elasticsearch will create if it not exists).
  468. #   ## You can use the date specifiers below to create indexes per time frame.
  469. #   ## The metric timestamp will be used to decide the destination index name
  470. #   # %Y - year (2016)
  471. #   # %y - last two digits of year (00..99)
  472. #   # %m - month (01..12)
  473. #   # %d - day of month (e.g., 01)
  474. #   # %H - hour (00..23)
  475. #   # %V - week of the year (ISO week) (01..53)
  476. #   ## Additionally, you can specify a tag name using the notation {{tag_name}}
  477. #   ## which will be used as part of the index name. If the tag does not exist,
  478. #   ## the default tag value will be used.
  479. #   # index_name = "telegraf-{{host}}-%Y.%m.%d"
  480. #   # default_tag_value = "none"
  481. #   index_name = "telegraf-%Y.%m.%d" # required.
  482. #
  483. #   ## Optional TLS Config
  484. #   # tls_ca = "/etc/telegraf/ca.pem"
  485. #   # tls_cert = "/etc/telegraf/cert.pem"
  486. #   # tls_key = "/etc/telegraf/key.pem"
  487. #   ## Use TLS but skip chain & host verification
  488. #   # insecure_skip_verify = false
  489. #
  490. #   ## Template Config
  491. #   ## Set to true if you want telegraf to manage its index template.
  492. #   ## If enabled it will create a recommended index template for telegraf indexes
  493. #   manage_template = true
  494. #   ## The template name used for telegraf indexes
  495. #   template_name = "telegraf"
  496. #   ## Set to true if you want telegraf to overwrite an existing template
  497. #   overwrite_template = false
  498.  
  499.  
  500. # # Send metrics to command as input over stdin
  501. # [[outputs.exec]]
  502. #   ## Command to injest metrics via stdin.
  503. #   command = ["tee", "-a", "/dev/null"]
  504. #
  505. #   ## Timeout for command to complete.
  506. #   # timeout = "5s"
  507. #
  508. #   ## Data format to output.
  509. #   ## Each data format has its own unique set of configuration options, read
  510. #   ## more about them here:
  511. #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
  512. #   # data_format = "influx"
  513.  
  514.  
  515. # # Send telegraf metrics to file(s)
  516. # [[outputs.file]]
  517. #   ## Files to write to, "stdout" is a specially handled file.
  518. #   files = ["stdout", "/tmp/metrics.out"]
  519. #
  520. #   ## The file will be rotated after the time interval specified.  When set
  521. #   ## to 0 no time based rotation is performed.
  522. #   # rotation_interval = "0d"
  523. #
  524. #   ## The logfile will be rotated when it becomes larger than the specified
  525. #   ## size.  When set to 0 no size based rotation is performed.
  526. #   # rotation_max_size = "0MB"
  527. #
  528. #   ## Maximum number of rotated archives to keep, any older logs are deleted.
  529. #   ## If set to -1, no archives are removed.
  530. #   # rotation_max_archives = 5
  531. #
  532. #   ## Data format to output.
  533. #   ## Each data format has its own unique set of configuration options, read
  534. #   ## more about them here:
  535. #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
  536. #   data_format = "influx"
  537.  
  538.  
  539. # # Configuration for Graphite server to send metrics to
  540. # [[outputs.graphite]]
  541. #   ## TCP endpoint for your graphite instance.
  542. #   ## If multiple endpoints are configured, output will be load balanced.
  543. #   ## Only one of the endpoints will be written to with each iteration.
  544. #   servers = ["localhost:2003"]
  545. #   ## Prefix metrics name
  546. #   prefix = ""
  547. #   ## Graphite output template
  548. #   ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
  549. #   template = "host.tags.measurement.field"
  550. #
  551. #   ## Enable Graphite tags support
  552. #   # graphite_tag_support = false
  553. #
  554. #   ## timeout in seconds for the write connection to graphite
  555. #   timeout = 2
  556. #
  557. #   ## Optional TLS Config
  558. #   # tls_ca = "/etc/telegraf/ca.pem"
  559. #   # tls_cert = "/etc/telegraf/cert.pem"
  560. #   # tls_key = "/etc/telegraf/key.pem"
  561. #   ## Use TLS but skip chain & host verification
  562. #   # insecure_skip_verify = false
  563.  
  564.  
  565. # # Send telegraf metrics to graylog(s)
  566. # [[outputs.graylog]]
  567. #   ## UDP endpoint for your graylog instance.
  568. #   servers = ["127.0.0.1:12201", "192.168.1.1:12201"]
  569.  
  570.  
  571. # # Configurable HTTP health check resource based on metrics
  572. # [[outputs.health]]
  573. #   ## Address and port to listen on.
  574. #   ##   ex: service_address = "http://localhost:8080"
  575. #   ##       service_address = "unix:///var/run/telegraf-health.sock"
  576. #   # service_address = "http://:8080"
  577. #
  578. #   ## The maximum duration for reading the entire request.
  579. #   # read_timeout = "5s"
  580. #   ## The maximum duration for writing the entire response.
  581. #   # write_timeout = "5s"
  582. #
  583. #   ## Username and password to accept for HTTP basic authentication.
  584. #   # basic_username = "user1"
  585. #   # basic_password = "secret"
  586. #
  587. #   ## Allowed CA certificates for client certificates.
  588. #   # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
  589. #
  590. #   ## TLS server certificate and private key.
  591. #   # tls_cert = "/etc/telegraf/cert.pem"
  592. #   # tls_key = "/etc/telegraf/key.pem"
  593. #
  594. #   ## One or more check sub-tables should be defined, it is also recommended to
  595. #   ## use metric filtering to limit the metrics that flow into this output.
  596. #   ##
  597. #   ## When using the default buffer sizes, this example will fail when the
  598. #   ## metric buffer is half full.
  599. #   ##
  600. #   ## namepass = ["internal_write"]
  601. #   ## tagpass = { output = ["influxdb"] }
  602. #   ##
  603. #   ## [[outputs.health.compares]]
  604. #   ##   field = "buffer_size"
  605. #   ##   lt = 5000.0
  606. #   ##
  607. #   ## [[outputs.health.contains]]
  608. #   ##   field = "buffer_size"
  609.  
  610.  
  611. # # A plugin that can transmit metrics over HTTP
  612. # [[outputs.http]]
  613. #   ## URL is the address to send metrics to
  614. #   url = "http://127.0.0.1:8080/telegraf"
  615. #
  616. #   ## Timeout for HTTP message
  617. #   # timeout = "5s"
  618. #
  619. #   ## HTTP method, one of: "POST" or "PUT"
  620. #   # method = "POST"
  621. #
  622. #   ## HTTP Basic Auth credentials
  623. #   # username = "username"
  624. #   # password = "pa$$word"
  625. #
  626. #   ## OAuth2 Client Credentials Grant
  627. #   # client_id = "clientid"
  628. #   # client_secret = "secret"
  629. #   # token_url = "https://indentityprovider/oauth2/v1/token"
  630. #   # scopes = ["urn:opc:idm:__myscopes__"]
  631. #
  632. #   ## Optional TLS Config
  633. #   # tls_ca = "/etc/telegraf/ca.pem"
  634. #   # tls_cert = "/etc/telegraf/cert.pem"
  635. #   # tls_key = "/etc/telegraf/key.pem"
  636. #   ## Use TLS but skip chain & host verification
  637. #   # insecure_skip_verify = false
  638. #
  639. #   ## Data format to output.
  640. #   ## Each data format has it's own unique set of configuration options, read
  641. #   ## more about them here:
  642. #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
  643. #   # data_format = "influx"
  644. #
  645. #   ## HTTP Content-Encoding for write request body, can be set to "gzip" to
  646. #   ## compress body or "identity" to apply no encoding.
  647. #   # content_encoding = "identity"
  648. #
  649. #   ## Additional HTTP headers
  650. #   # [outputs.http.headers]
  651. #   #   # Should be set manually to "application/json" for json data_format
  652. #   #   Content-Type = "text/plain; charset=utf-8"
  653.  
  654.  
  655. # # Configuration for sending metrics to InfluxDB
  656. # [[outputs.influxdb_v2]]
  657. #   ## The URLs of the InfluxDB cluster nodes.
  658. #   ##
  659. #   ## Multiple URLs can be specified for a single cluster, only ONE of the
  660. #   ## urls will be written to each interval.
  661. #   urls = ["http://127.0.0.1:9999"]
  662. #
  663. #   ## Token for authentication.
  664. #   token = ""
  665. #
  666. #   ## Organization is the name of the organization you wish to write to; must exist.
  667. #   organization = ""
  668. #
  669. #   ## Destination bucket to write into.
  670. #   bucket = ""
  671. #
  672. #   ## The value of this tag will be used to determine the bucket.  If this
  673. #   ## tag is not set the 'bucket' option is used as the default.
  674. #   # bucket_tag = ""
  675. #
  676. #   ## If true, the bucket tag will not be added to the metric.
  677. #   # exclude_bucket_tag = false
  678. #
  679. #   ## Timeout for HTTP messages.
  680. #   # timeout = "5s"
  681. #
  682. #   ## Additional HTTP headers
  683. #   # http_headers = {"X-Special-Header" = "Special-Value"}
  684. #
  685. #   ## HTTP Proxy override, if unset values the standard proxy environment
  686. #   ## variables are consulted to determine which proxy, if any, should be used.
  687. #   # http_proxy = "http://corporate.proxy:3128"
  688. #
  689. #   ## HTTP User-Agent
  690. #   # user_agent = "telegraf"
  691. #
  692. #   ## Content-Encoding for write request body, can be set to "gzip" to
  693. #   ## compress body or "identity" to apply no encoding.
  694. #   # content_encoding = "gzip"
  695. #
  696. #   ## Enable or disable uint support for writing uints influxdb 2.0.
  697. #   # influx_uint_support = false
  698. #
  699. #   ## Optional TLS Config for use on HTTP connections.
  700. #   # tls_ca = "/etc/telegraf/ca.pem"
  701. #   # tls_cert = "/etc/telegraf/cert.pem"
  702. #   # tls_key = "/etc/telegraf/key.pem"
  703. #   ## Use TLS but skip chain & host verification
  704. #   # insecure_skip_verify = false
  705.  
  706.  
  707. # # Configuration for sending metrics to an Instrumental project
  708. # [[outputs.instrumental]]
  709. #   ## Project API Token (required)
  710. #   api_token = "API Token" # required
  711. #   ## Prefix the metrics with a given name
  712. #   prefix = ""
  713. #   ## Stats output template (Graphite formatting)
  714. #   ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
  715. #   template = "host.tags.measurement.field"
  716. #   ## Timeout in seconds to connect
  717. #   timeout = "2s"
  718. #   ## Display Communcation to Instrumental
  719. #   debug = false
  720.  
  721.  
  722. # # Configuration for the Kafka server to send metrics to
  723. # [[outputs.kafka]]
  724. #   ## URLs of kafka brokers
  725. #   brokers = ["localhost:9092"]
  726. #   ## Kafka topic for producer messages
  727. #   topic = "telegraf"
  728. #
  729. #   ## Optional Client id
  730. #   # client_id = "Telegraf"
  731. #
  732. #   ## Set the minimal supported Kafka version.  Setting this enables the use of new
  733. #   ## Kafka features and APIs.  Of particular interest, lz4 compression
  734. #   ## requires at least version 0.10.0.0.
  735. #   ##   ex: version = "1.1.0"
  736. #   # version = ""
  737. #
  738. #   ## Optional topic suffix configuration.
  739. #   ## If the section is omitted, no suffix is used.
  740. #   ## Following topic suffix methods are supported:
  741. #   ##   measurement - suffix equals to separator + measurement's name
  742. #   ##   tags        - suffix equals to separator + specified tags' values
  743. #   ##                 interleaved with separator
  744. #
  745. #   ## Suffix equals to "_" + measurement name
  746. #   # [outputs.kafka.topic_suffix]
  747. #   #   method = "measurement"
  748. #   #   separator = "_"
  749. #
  750. #   ## Suffix equals to "__" + measurement's "foo" tag value.
  751. #   ##   If there's no such a tag, suffix equals to an empty string
  752. #   # [outputs.kafka.topic_suffix]
  753. #   #   method = "tags"
  754. #   #   keys = ["foo"]
  755. #   #   separator = "__"
  756. #
  757. #   ## Suffix equals to "_" + measurement's "foo" and "bar"
  758. #   ##   tag values, separated by "_". If there is no such tags,
  759. #   ##   their values treated as empty strings.
  760. #   # [outputs.kafka.topic_suffix]
  761. #   #   method = "tags"
  762. #   #   keys = ["foo", "bar"]
  763. #   #   separator = "_"
  764. #
  765. #   ## Telegraf tag to use as a routing key
  766. #   ##  ie, if this tag exists, its value will be used as the routing key
  767. #   routing_tag = "host"
  768. #
  769. #   ## Static routing key.  Used when no routing_tag is set or as a fallback
  770. #   ## when the tag specified in routing tag is not found.  If set to "random",
  771. #   ## a random value will be generated for each message.
  772. #   ##   ex: routing_key = "random"
  773. #   ##       routing_key = "telegraf"
  774. #   # routing_key = ""
  775. #
  776. #   ## CompressionCodec represents the various compression codecs recognized by
  777. #   ## Kafka in messages.
  778. #   ##  0 : No compression
  779. #   ##  1 : Gzip compression
  780. #   ##  2 : Snappy compression
  781. #   ##  3 : LZ4 compression
  782. #   # compression_codec = 0
  783. #
  784. #   ##  RequiredAcks is used in Produce Requests to tell the broker how many
  785. #   ##  replica acknowledgements it must see before responding
  786. #   ##   0 : the producer never waits for an acknowledgement from the broker.
  787. #   ##       This option provides the lowest latency but the weakest durability
  788. #   ##       guarantees (some data will be lost when a server fails).
  789. #   ##   1 : the producer gets an acknowledgement after the leader replica has
  790. #   ##       received the data. This option provides better durability as the
  791. #   ##       client waits until the server acknowledges the request as successful
  792. #   ##       (only messages that were written to the now-dead leader but not yet
  793. #   ##       replicated will be lost).
  794. #   ##   -1: the producer gets an acknowledgement after all in-sync replicas have
  795. #   ##       received the data. This option provides the best durability, we
  796. #   ##       guarantee that no messages will be lost as long as at least one in
  797. #   ##       sync replica remains.
  798. #   # required_acks = -1
  799. #
  800. #   ## The maximum number of times to retry sending a metric before failing
  801. #   ## until the next flush.
  802. #   # max_retry = 3
  803. #
  804. #   ## The maximum permitted size of a message. Should be set equal to or
  805. #   ## smaller than the broker's 'message.max.bytes'.
  806. #   # max_message_bytes = 1000000
  807. #
  808. #   ## Optional TLS Config
  809. #   # tls_ca = "/etc/telegraf/ca.pem"
  810. #   # tls_cert = "/etc/telegraf/cert.pem"
  811. #   # tls_key = "/etc/telegraf/key.pem"
  812. #   ## Use TLS but skip chain & host verification
  813. #   # insecure_skip_verify = false
  814. #
  815. #   ## Optional SASL Config
  816. #   # sasl_username = "kafka"
  817. #   # sasl_password = "secret"
  818. #
  819. #   ## Data format to output.
  820. #   ## Each data format has its own unique set of configuration options, read
  821. #   ## more about them here:
  822. #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
  823. #   # data_format = "influx"
  824.  
  825.  
  826. # # Configuration for the AWS Kinesis output.
  827. # [[outputs.kinesis]]
  828. #   ## Amazon REGION of kinesis endpoint.
  829. #   region = "ap-southeast-2"
  830. #
  831. #   ## Amazon Credentials
  832. #   ## Credentials are loaded in the following order
  833. #   ## 1) Assumed credentials via STS if role_arn is specified
  834. #   ## 2) explicit credentials from 'access_key' and 'secret_key'
  835. #   ## 3) shared profile from 'profile'
  836. #   ## 4) environment variables
  837. #   ## 5) shared credentials file
  838. #   ## 6) EC2 Instance Profile
  839. #   #access_key = ""
  840. #   #secret_key = ""
  841. #   #token = ""
  842. #   #role_arn = ""
  843. #   #profile = ""
  844. #   #shared_credential_file = ""
  845. #
  846. #   ## Endpoint to make request against, the correct endpoint is automatically
  847. #   ## determined and this option should only be set if you wish to override the
  848. #   ## default.
  849. #   ##   ex: endpoint_url = "http://localhost:8000"
  850. #   # endpoint_url = ""
  851. #
  852. #   ## Kinesis StreamName must exist prior to starting telegraf.
  853. #   streamname = "StreamName"
  854. #   ## DEPRECATED: PartitionKey as used for sharding data.
  855. #   partitionkey = "PartitionKey"
  856. #   ## DEPRECATED: If set the paritionKey will be a random UUID on every put.
  857. #   ## This allows for scaling across multiple shards in a stream.
  858. #   ## This will cause issues with ordering.
  859. #   use_random_partitionkey = false
  860. #   ## The partition key can be calculated using one of several methods:
  861. #   ##
  862. #   ## Use a static value for all writes:
  863. #   #  [outputs.kinesis.partition]
  864. #   #    method = "static"
  865. #   #    key = "howdy"
  866. #   #
  867. #   ## Use a random partition key on each write:
  868. #   #  [outputs.kinesis.partition]
  869. #   #    method = "random"
  870. #   #
  871. #   ## Use the measurement name as the partition key:
  872. #   #  [outputs.kinesis.partition]
  873. #   #    method = "measurement"
  874. #   #
  875. #   ## Use the value of a tag for all writes, if the tag is not set the empty
  876. #   ## default option will be used. When no default, defaults to "telegraf"
  877. #   #  [outputs.kinesis.partition]
  878. #   #    method = "tag"
  879. #   #    key = "host"
  880. #   #    default = "mykey"
  881. #
  882. #
  883. #   ## Data format to output.
  884. #   ## Each data format has its own unique set of configuration options, read
  885. #   ## more about them here:
  886. #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
  887. #   data_format = "influx"
  888. #
  889. #   ## debug will show upstream aws messages.
  890. #   debug = false
  891.  
  892.  
  893. # # Configuration for Librato API to send metrics to.
  894. # [[outputs.librato]]
  895. #   ## Librator API Docs
  896. #   ## http://dev.librato.com/v1/metrics-authentication
  897. #   ## Librato API user
  898. #   api_user = "telegraf@influxdb.com" # required.
  899. #   ## Librato API token
  900. #   api_token = "my-secret-token" # required.
  901. #   ## Debug
  902. #   # debug = false
  903. #   ## Connection timeout.
  904. #   # timeout = "5s"
  905. #   ## Output source Template (same as graphite buckets)
  906. #   ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite
  907. #   ## This template is used in librato's source (not metric's name)
  908. #   template = "host"
  909. #
  910.  
  911.  
  912. # # Configuration for MQTT server to send metrics to
  913. # [[outputs.mqtt]]
  914. #   servers = ["localhost:1883"] # required.
  915. #
  916. #   ## MQTT outputs send metrics to this topic format
  917. #   ##    "<topic_prefix>/<hostname>/<pluginname>/"
  918. #   ##   ex: prefix/web01.example.com/mem
  919. #   topic_prefix = "telegraf"
  920. #
  921. #   ## QoS policy for messages
  922. #   ##   0 = at most once
  923. #   ##   1 = at least once
  924. #   ##   2 = exactly once
  925. #   # qos = 2
  926. #
  927. #   ## username and password to connect MQTT server.
  928. #   # username = "telegraf"
  929. #   # password = "metricsmetricsmetricsmetrics"
  930. #
  931. #   ## client ID, if not set a random ID is generated
  932. #   # client_id = ""
  933. #
  934. #   ## Timeout for write operations. default: 5s
  935. #   # timeout = "5s"
  936. #
  937. #   ## Optional TLS Config
  938. #   # tls_ca = "/etc/telegraf/ca.pem"
  939. #   # tls_cert = "/etc/telegraf/cert.pem"
  940. #   # tls_key = "/etc/telegraf/key.pem"
  941. #   ## Use TLS but skip chain & host verification
  942. #   # insecure_skip_verify = false
  943. #
  944. #   ## When true, metrics will be sent in one MQTT message per flush.  Otherwise,
  945. #   ## metrics are written one metric per MQTT message.
  946. #   # batch = false
  947. #
  948. #   ## When true, metric will have RETAIN flag set, making broker cache entries until someone
  949. #   ## actually reads it
  950. #   # retain = false
  951. #
  952. #   ## Data format to output.
  953. #   ## Each data format has its own unique set of configuration options, read
  954. #   ## more about them here:
  955. #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
  956. #   data_format = "influx"
  957.  
  958.  
  959. # # Send telegraf measurements to NATS
  960. # [[outputs.nats]]
  961. #   ## URLs of NATS servers
  962. #   servers = ["nats://localhost:4222"]
  963. #   ## Optional credentials
  964. #   # username = ""
  965. #   # password = ""
  966. #   ## NATS subject for producer messages
  967. #   subject = "telegraf"
  968. #
  969. #   ## Use Transport Layer Security
  970. #   # secure = false
  971. #
  972. #   ## Optional TLS Config
  973. #   # tls_ca = "/etc/telegraf/ca.pem"
  974. #   # tls_cert = "/etc/telegraf/cert.pem"
  975. #   # tls_key = "/etc/telegraf/key.pem"
  976. #   ## Use TLS but skip chain & host verification
  977. #   # insecure_skip_verify = false
  978. #
  979. #   ## Data format to output.
  980. #   ## Each data format has its own unique set of configuration options, read
  981. #   ## more about them here:
  982. #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
  983. #   data_format = "influx"
  984.  
  985.  
  986. # # Send telegraf measurements to NSQD
  987. # [[outputs.nsq]]
  988. #   ## Location of nsqd instance listening on TCP
  989. #   server = "localhost:4150"
  990. #   ## NSQ topic for producer messages
  991. #   topic = "telegraf"
  992. #
  993. #   ## Data format to output.
  994. #   ## Each data format has its own unique set of configuration options, read
  995. #   ## more about them here:
  996. #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
  997. #   data_format = "influx"
  998.  
  999.  
  1000. # # Configuration for OpenTSDB server to send metrics to
  1001. # [[outputs.opentsdb]]
  1002. #   ## prefix for metrics keys
  1003. #   prefix = "my.specific.prefix."
  1004. #
  1005. #   ## DNS name of the OpenTSDB server
  1006. #   ## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the
  1007. #   ## telnet API. "http://opentsdb.example.com" will use the Http API.
  1008. #   host = "opentsdb.example.com"
  1009. #
  1010. #   ## Port of the OpenTSDB server
  1011. #   port = 4242
  1012. #
  1013. #   ## Number of data points to send to OpenTSDB in Http requests.
  1014. #   ## Not used with telnet API.
  1015. #   http_batch_size = 50
  1016. #
  1017. #   ## URI Path for Http requests to OpenTSDB.
  1018. #   ## Used in cases where OpenTSDB is located behind a reverse proxy.
  1019. #   http_path = "/api/put"
  1020. #
  1021. #   ## Debug true - Prints OpenTSDB communication
  1022. #   debug = false
  1023. #
  1024. #   ## Separator separates measurement name from field
  1025. #   separator = "_"
  1026.  
  1027.  
  1028. # # Configuration for the Prometheus client to spawn
  1029. # [[outputs.prometheus_client]]
  1030. #   ## Address to listen on
  1031. #   listen = ":9273"
  1032. #
  1033. #   ## Use HTTP Basic Authentication.
  1034. #   # basic_username = "Foo"
  1035. #   # basic_password = "Bar"
  1036. #
  1037. #   ## If set, the IP Ranges which are allowed to access metrics.
  1038. #   ##   ex: ip_range = ["192.168.0.0/24", "192.168.1.0/30"]
  1039. #   # ip_range = []
  1040. #
  1041. #   ## Path to publish the metrics on.
  1042. #   # path = "/metrics"
  1043. #
  1044. #   ## Expiration interval for each metric. 0 == no expiration
  1045. #   # expiration_interval = "60s"
  1046. #
  1047. #   ## Collectors to enable, valid entries are "gocollector" and "process".
  1048. #   ## If unset, both are enabled.
  1049. #   # collectors_exclude = ["gocollector", "process"]
  1050. #
  1051. #   ## Send string metrics as Prometheus labels.
  1052. #   ## Unless set to false all string metrics will be sent as labels.
  1053. #   # string_as_label = true
  1054. #
  1055. #   ## If set, enable TLS with the given certificate.
  1056. #   # tls_cert = "/etc/ssl/telegraf.crt"
  1057. #   # tls_key = "/etc/ssl/telegraf.key"
  1058. #
  1059. #   ## Set one or more allowed client CA certificate file names to
  1060. #   ## enable mutually authenticated TLS connections
  1061. #   # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
  1062. #
  1063. #   ## Export metric collection time.
  1064. #   # export_timestamp = false
  1065.  
  1066.  
  1067. # # Configuration for the Riemann server to send metrics to
  1068. # [[outputs.riemann]]
  1069. #   ## The full TCP or UDP URL of the Riemann server
  1070. #   url = "tcp://localhost:5555"
  1071. #
  1072. #   ## Riemann event TTL, floating-point time in seconds.
  1073. #   ## Defines how long that an event is considered valid for in Riemann
  1074. #   # ttl = 30.0
  1075. #
  1076. #   ## Separator to use between measurement and field name in Riemann service name
  1077. #   ## This does not have any effect if 'measurement_as_attribute' is set to 'true'
  1078. #   separator = "/"
  1079. #
  1080. #   ## Set measurement name as Riemann attribute 'measurement', instead of prepending it to the Riemann service name
  1081. #   # measurement_as_attribute = false
  1082. #
  1083. #   ## Send string metrics as Riemann event states.
  1084. #   ## Unless enabled all string metrics will be ignored
  1085. #   # string_as_state = false
  1086. #
  1087. #   ## A list of tag keys whose values get sent as Riemann tags.
  1088. #   ## If empty, all Telegraf tag values will be sent as tags
  1089. #   # tag_keys = ["telegraf","custom_tag"]
  1090. #
  1091. #   ## Additional Riemann tags to send.
  1092. #   # tags = ["telegraf-output"]
  1093. #
  1094. #   ## Description for Riemann event
  1095. #   # description_text = "metrics collected from telegraf"
  1096. #
  1097. #   ## Riemann client write timeout, defaults to "5s" if not set.
  1098. #   # timeout = "5s"
  1099.  
  1100.  
  1101. # # Configuration for the Riemann server to send metrics to
  1102. # [[outputs.riemann_legacy]]
  1103. #   ## URL of server
  1104. #   url = "localhost:5555"
  1105. #   ## transport protocol to use either tcp or udp
  1106. #   transport = "tcp"
  1107. #   ## separator to use between input name and field name in Riemann service name
  1108. #   separator = " "
  1109.  
  1110.  
  1111. # # Generic socket writer capable of handling multiple socket types.
  1112. # [[outputs.socket_writer]]
  1113. #   ## URL to connect to
  1114. #   # address = "tcp://127.0.0.1:8094"
  1115. #   # address = "tcp://example.com:http"
  1116. #   # address = "tcp4://127.0.0.1:8094"
  1117. #   # address = "tcp6://127.0.0.1:8094"
  1118. #   # address = "tcp6://[2001:db8::1]:8094"
  1119. #   # address = "udp://127.0.0.1:8094"
  1120. #   # address = "udp4://127.0.0.1:8094"
  1121. #   # address = "udp6://127.0.0.1:8094"
  1122. #   # address = "unix:///tmp/telegraf.sock"
  1123. #   # address = "unixgram:///tmp/telegraf.sock"
  1124. #
  1125. #   ## Optional TLS Config
  1126. #   # tls_ca = "/etc/telegraf/ca.pem"
  1127. #   # tls_cert = "/etc/telegraf/cert.pem"
  1128. #   # tls_key = "/etc/telegraf/key.pem"
  1129. #   ## Use TLS but skip chain & host verification
  1130. #   # insecure_skip_verify = false
  1131. #
  1132. #   ## Period between keep alive probes.
  1133. #   ## Only applies to TCP sockets.
  1134. #   ## 0 disables keep alive probes.
  1135. #   ## Defaults to the OS configuration.
  1136. #   # keep_alive_period = "5m"
  1137. #
  1138. #   ## Data format to generate.
  1139. #   ## Each data format has its own unique set of configuration options, read
  1140. #   ## more about them here:
  1141. #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  1142. #   # data_format = "influx"
  1143.  
  1144.  
  1145. # # Configuration for Google Cloud Stackdriver to send metrics to
  1146. # [[outputs.stackdriver]]
  1147. #   ## GCP Project
  1148. #   project = "erudite-bloom-151019"
  1149. #
  1150. #   ## The namespace for the metric descriptor
  1151. #   namespace = "telegraf"
  1152. #
  1153. #   ## Custom resource type
  1154. #   # resource_type = "generic_node"
  1155. #
  1156. #   ## Additonal resource labels
  1157. #   # [outputs.stackdriver.resource_labels]
  1158. #   #   node_id = "$HOSTNAME"
  1159. #   #   namespace = "myapp"
  1160. #   #   location = "eu-north0"
  1161.  
  1162.  
  1163. # # Configuration for Syslog server to send metrics to
  1164. # [[outputs.syslog]]
  1165. #   ## URL to connect to
  1166. #   ## ex: address = "tcp://127.0.0.1:8094"
  1167. #   ## ex: address = "tcp4://127.0.0.1:8094"
  1168. #   ## ex: address = "tcp6://127.0.0.1:8094"
  1169. #   ## ex: address = "tcp6://[2001:db8::1]:8094"
  1170. #   ## ex: address = "udp://127.0.0.1:8094"
  1171. #   ## ex: address = "udp4://127.0.0.1:8094"
  1172. #   ## ex: address = "udp6://127.0.0.1:8094"
  1173. #   address = "tcp://127.0.0.1:8094"
  1174. #
  1175. #   ## Optional TLS Config
  1176. #   # tls_ca = "/etc/telegraf/ca.pem"
  1177. #   # tls_cert = "/etc/telegraf/cert.pem"
  1178. #   # tls_key = "/etc/telegraf/key.pem"
  1179. #   ## Use TLS but skip chain & host verification
  1180. #   # insecure_skip_verify = false
  1181. #
  1182. #   ## Period between keep alive probes.
  1183. #   ## Only applies to TCP sockets.
  1184. #   ## 0 disables keep alive probes.
  1185. #   ## Defaults to the OS configuration.
  1186. #   # keep_alive_period = "5m"
  1187. #
  1188. #   ## The framing technique with which it is expected that messages are
  1189. #   ## transported (default = "octet-counting").  Whether the messages come
  1190. #   ## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1),
  1191. #   ## or the non-transparent framing technique (RFC6587#section-3.4.2).  Must
  1192. #   ## be one of "octet-counting", "non-transparent".
  1193. #   # framing = "octet-counting"
  1194. #
  1195. #   ## The trailer to be expected in case of non-trasparent framing (default = "LF").
  1196. #   ## Must be one of "LF", or "NUL".
  1197. #   # trailer = "LF"
  1198. #
  1199. #   ## SD-PARAMs settings
  1200. #   ## Syslog messages can contain key/value pairs within zero or more
  1201. #   ## structured data sections.  For each unrecognised metric tag/field a
  1202. #   ## SD-PARAMS is created.
  1203. #   ##
  1204. #   ## Example:
  1205. #   ##   [[outputs.syslog]]
  1206. #   ##     sdparam_separator = "_"
  1207. #   ##     default_sdid = "default@32473"
  1208. #   ##     sdids = ["foo@123", "bar@456"]
  1209. #   ##
  1210. #   ##   input => xyzzy,x=y foo@123_value=42,bar@456_value2=84,something_else=1
  1211. #   ##   output (structured data only) => [foo@123 value=42][bar@456 value2=84][default@32473 something_else=1 x=y]
  1212. #
  1213. #   ## SD-PARAMs separator between the sdid and tag/field key (default = "_")
  1214. #   # sdparam_separator = "_"
  1215. #
  1216. #   ## Default sdid used for tags/fields that don't contain a prefix defined in
  1217. #   ## the explict sdids setting below If no default is specified, no SD-PARAMs
  1218. #   ## will be used for unrecognised field.
  1219. #   # default_sdid = "default@32473"
  1220. #
  1221. #   ## List of explicit prefixes to extract from tag/field keys and use as the
  1222. #   ## SDID, if they match (see above example for more details):
  1223. #   # sdids = ["foo@123", "bar@456"]
  1224. #
  1225. #   ## Default severity value. Severity and Facility are used to calculate the
  1226. #   ## message PRI value (RFC5424#section-6.2.1).  Used when no metric field
  1227. #   ## with key "severity_code" is defined.  If unset, 5 (notice) is the default
  1228. #   # default_severity_code = 5
  1229. #
  1230. #   ## Default facility value. Facility and Severity are used to calculate the
  1231. #   ## message PRI value (RFC5424#section-6.2.1).  Used when no metric field with
  1232. #   ## key "facility_code" is defined.  If unset, 1 (user-level) is the default
  1233. #   # default_facility_code = 1
  1234. #
  1235. #   ## Default APP-NAME value (RFC5424#section-6.2.5)
  1236. #   ## Used when no metric tag with key "appname" is defined.
  1237. #   ## If unset, "Telegraf" is the default
  1238. #   # default_appname = "Telegraf"
  1239.  
  1240.  
  1241. # # Configuration for Wavefront server to send metrics to
  1242. # [[outputs.wavefront]]
  1243. #   ## Url for Wavefront Direct Ingestion or using HTTP with Wavefront Proxy
  1244. #   ## If using Wavefront Proxy, also specify port. example: http://proxyserver:2878
  1245. #   url = "https://metrics.wavefront.com"
  1246. #
  1247. #   ## Authentication Token for Wavefront. Only required if using Direct Ingestion
  1248. #   #token = "DUMMY_TOKEN"
  1249. #
  1250. #   ## DNS name of the wavefront proxy server. Do not use if url is specified
  1251. #   #host = "wavefront.example.com"
  1252. #
  1253. #   ## Port that the Wavefront proxy server listens on. Do not use if url is specified
  1254. #   #port = 2878
  1255. #
  1256. #   ## prefix for metrics keys
  1257. #   #prefix = "my.specific.prefix."
  1258. #
  1259. #   ## whether to use "value" for name of simple fields. default is false
  1260. #   #simple_fields = false
  1261. #
  1262. #   ## character to use between metric and field name.  default is . (dot)
  1263. #   #metric_separator = "."
  1264. #
  1265. #   ## Convert metric name paths to use metricSeparator character
  1266. #   ## When true will convert all _ (underscore) characters in final metric name. default is true
  1267. #   #convert_paths = true
  1268. #
  1269. #   ## Use Strict rules to sanitize metric and tag names from invalid characters
  1270. #   ## When enabled forward slash (/) and comma (,) will be accpeted
  1271. #   #use_strict = false
  1272. #
  1273. #   ## Use Regex to sanitize metric and tag names from invalid characters
  1274. #   ## Regex is more thorough, but significantly slower. default is false
  1275. #   #use_regex = false
  1276. #
  1277. #   ## point tags to use as the source name for Wavefront (if none found, host will be used)
  1278. #   #source_override = ["hostname", "address", "agent_host", "node_host"]
  1279. #
  1280. #   ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true
  1281. #   #convert_bool = true
  1282. #
  1283. #   ## Define a mapping, namespaced by metric prefix, from string values to numeric values
  1284. #   ##   deprecated in 1.9; use the enum processor plugin
  1285. #   #[[outputs.wavefront.string_to_number.elasticsearch]]
  1286. #   #  green = 1.0
  1287. #   #  yellow = 0.5
  1288. #   #  red = 0.0
  1289.  
  1290.  
  1291. ###############################################################################
  1292. #                            PROCESSOR PLUGINS                                #
  1293. ###############################################################################
  1294.  
  1295.  
  1296. # # Convert values to another metric value type
  1297. # [[processors.converter]]
  1298. #   ## Tags to convert
  1299. #   ##
  1300. #   ## The table key determines the target type, and the array of key-values
  1301. #   ## select the keys to convert.  The array may contain globs.
  1302. #   ##   <target-type> = [<tag-key>...]
  1303. #   [processors.converter.tags]
  1304. #     string = []
  1305. #     integer = []
  1306. #     unsigned = []
  1307. #     boolean = []
  1308. #     float = []
  1309. #
  1310. #   ## Fields to convert
  1311. #   ##
  1312. #   ## The table key determines the target type, and the array of key-values
  1313. #   ## select the keys to convert.  The array may contain globs.
  1314. #   ##   <target-type> = [<field-key>...]
  1315. #   [processors.converter.fields]
  1316. #     tag = []
  1317. #     string = []
  1318. #     integer = []
  1319. #     unsigned = []
  1320. #     boolean = []
  1321. #     float = []
  1322.  
  1323.  
  1324. # # Dates measurements, tags, and fields that pass through this filter.
  1325. # [[processors.date]]
  1326. #   ## New tag to create
  1327. #   tag_key = "month"
  1328. #
  1329. #   ## Date format string, must be a representation of the Go "reference time"
  1330. #   ## which is "Mon Jan 2 15:04:05 -0700 MST 2006".
  1331. #   date_format = "Jan"
  1332.  
  1333.  
  1334. # # Map enum values according to given table.
  1335. # [[processors.enum]]
  1336. #   [[processors.enum.mapping]]
  1337. #     ## Name of the field to map
  1338. #     field = "status"
  1339. #
  1340. #     ## Name of the tag to map
  1341. #     # tag = "status"
  1342. #
  1343. #     ## Destination tag or field to be used for the mapped value.  By default the
  1344. #     ## source tag or field is used, overwriting the original value.
  1345. #     dest = "status_code"
  1346. #
  1347. #     ## Default value to be used for all values not contained in the mapping
  1348. #     ## table.  When unset, the unmodified value for the field will be used if no
  1349. #     ## match is found.
  1350. #     # default = 0
  1351. #
  1352. #     ## Table of mappings
  1353. #     [processors.enum.mapping.value_mappings]
  1354. #       green = 1
  1355. #       amber = 2
  1356. #       red = 3
  1357.  
  1358.  
  1359. # # Apply metric modifications using override semantics.
  1360. # [[processors.override]]
  1361. #   ## All modifications on inputs and aggregators can be overridden:
  1362. #   # name_override = "new_name"
  1363. #   # name_prefix = "new_name_prefix"
  1364. #   # name_suffix = "new_name_suffix"
  1365. #
  1366. #   ## Tags to be added (all values must be strings)
  1367. #   # [processors.override.tags]
  1368. #   #   additional_tag = "tag_value"
  1369.  
  1370.  
  1371. # # Parse a value in a specified field/tag(s) and add the result in a new metric
  1372. # [[processors.parser]]
  1373. #   ## The name of the fields whose value will be parsed.
  1374. #   parse_fields = []
  1375. #
  1376. #   ## If true, incoming metrics are not emitted.
  1377. #   drop_original = false
  1378. #
  1379. #   ## If set to override, emitted metrics will be merged by overriding the
  1380. #   ## original metric using the newly parsed metrics.
  1381. #   merge = "override"
  1382. #
  1383. #   ## The dataformat to be read from files
  1384. #   ## Each data format has its own unique set of configuration options, read
  1385. #   ## more about them here:
  1386. #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  1387. #   data_format = "influx"
  1388.  
  1389.  
  1390. # # Rotate a single valued metric into a multi field metric
  1391. # [[processors.pivot]]
  1392. #   ## Tag to use for naming the new field.
  1393. #   tag_key = "name"
  1394. #   ## Field to use as the value of the new field.
  1395. #   value_key = "value"
  1396.  
  1397.  
  1398. # # Print all metrics that pass through this filter.
  1399. # [[processors.printer]]
  1400.  
  1401.  
  1402. # # Transforms tag and field values with regex pattern
  1403. # [[processors.regex]]
  1404. #   ## Tag and field conversions defined in a separate sub-tables
  1405. #   # [[processors.regex.tags]]
  1406. #   #   ## Tag to change
  1407. #   #   key = "resp_code"
  1408. #   #   ## Regular expression to match on a tag value
  1409. #   #   pattern = "^(\\d)\\d\\d$"
  1410. #   #   ## Matches of the pattern will be replaced with this string.  Use ${1}
  1411. #   #   ## notation to use the text of the first submatch.
  1412. #   #   replacement = "${1}xx"
  1413. #
  1414. #   # [[processors.regex.fields]]
  1415. #   #   ## Field to change
  1416. #   #   key = "request"
  1417. #   #   ## All the power of the Go regular expressions available here
  1418. #   #   ## For example, named subgroups
  1419. #   #   pattern = "^/api(?P<method>/[\\w/]+)\\S*"
  1420. #   #   replacement = "${method}"
  1421. #   #   ## If result_key is present, a new field will be created
  1422. #   #   ## instead of changing existing field
  1423. #   #   result_key = "method"
  1424. #
  1425. #   ## Multiple conversions may be applied for one field sequentially
  1426. #   ## Let's extract one more value
  1427. #   # [[processors.regex.fields]]
  1428. #   #   key = "request"
  1429. #   #   pattern = ".*category=(\\w+).*"
  1430. #   #   replacement = "${1}"
  1431. #   #   result_key = "search_category"
  1432.  
  1433.  
  1434. # # Rename measurements, tags, and fields that pass through this filter.
  1435. # [[processors.rename]]
  1436.  
  1437.  
  1438. # # Perform string processing on tags, fields, and measurements
  1439. # [[processors.strings]]
  1440. #   ## Convert a tag value to uppercase
  1441. #   # [[processors.strings.uppercase]]
  1442. #   #   tag = "method"
  1443. #
  1444. #   ## Convert a field value to lowercase and store in a new field
  1445. #   # [[processors.strings.lowercase]]
  1446. #   #   field = "uri_stem"
  1447. #   #   dest = "uri_stem_normalised"
  1448. #
  1449. #   ## Trim leading and trailing whitespace using the default cutset
  1450. #   # [[processors.strings.trim]]
  1451. #   #   field = "message"
  1452. #
  1453. #   ## Trim leading characters in cutset
  1454. #   # [[processors.strings.trim_left]]
  1455. #   #   field = "message"
  1456. #   #   cutset = "\t"
  1457. #
  1458. #   ## Trim trailing characters in cutset
  1459. #   # [[processors.strings.trim_right]]
  1460. #   #   field = "message"
  1461. #   #   cutset = "\r\n"
  1462. #
  1463. #   ## Trim the given prefix from the field
  1464. #   # [[processors.strings.trim_prefix]]
  1465. #   #   field = "my_value"
  1466. #   #   prefix = "my_"
  1467. #
  1468. #   ## Trim the given suffix from the field
  1469. #   # [[processors.strings.trim_suffix]]
  1470. #   #   field = "read_count"
  1471. #   #   suffix = "_count"
  1472. #
  1473. #   ## Replace all non-overlapping instances of old with new
  1474. #   # [[processors.strings.replace]]
  1475. #   #   measurement = "*"
  1476. #   #   old = ":"
  1477. #   #   new = "_"
  1478. #
  1479. #   ## Trims strings based on width
  1480. #   # [[processors.strings.left]]
  1481. #   #   field = "message"
  1482. #   #   width = 10
  1483.  
  1484.  
  1485. # # Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit.
  1486. # [[processors.tag_limit]]
  1487. #   ## Maximum number of tags to preserve
  1488. #   limit = 10
  1489. #
  1490. #   ## List of tags to preferentially preserve
  1491. #   keep = ["foo", "bar", "baz"]
  1492.  
  1493.  
  1494. # # Print all metrics that pass through this filter.
  1495. # [[processors.topk]]
  1496. #   ## How many seconds between aggregations
  1497. #   # period = 10
  1498. #
  1499. #   ## How many top metrics to return
  1500. #   # k = 10
  1501. #
  1502. #   ## Over which tags should the aggregation be done. Globs can be specified, in
  1503. #   ## which case any tag matching the glob will aggregated over. If set to an
  1504. #   ## empty list is no aggregation over tags is done
  1505. #   # group_by = ['*']
  1506. #
  1507. #   ## Over which fields are the top k are calculated
  1508. #   # fields = ["value"]
  1509. #
  1510. #   ## What aggregation to use. Options: sum, mean, min, max
  1511. #   # aggregation = "mean"
  1512. #
  1513. #   ## Instead of the top k largest metrics, return the bottom k lowest metrics
  1514. #   # bottomk = false
  1515. #
  1516. #   ## The plugin assigns each metric a GroupBy tag generated from its name and
  1517. #   ## tags. If this setting is different than "" the plugin will add a
  1518. #   ## tag (which name will be the value of this setting) to each metric with
  1519. #   ## the value of the calculated GroupBy tag. Useful for debugging
  1520. #   # add_groupby_tag = ""
  1521. #
  1522. #   ## These settings provide a way to know the position of each metric in
  1523. #   ## the top k. The 'add_rank_field' setting allows to specify for which
  1524. #   ## fields the position is required. If the list is non empty, then a field
  1525. #   ## will be added to each and every metric for each string present in this
  1526. #   ## setting. This field will contain the ranking of the group that
  1527. #   ## the metric belonged to when aggregated over that field.
  1528. #   ## The name of the field will be set to the name of the aggregation field,
  1529. #   ## suffixed with the string '_topk_rank'
  1530. #   # add_rank_fields = []
  1531. #
  1532. #   ## These settings provide a way to know what values the plugin is generating
  1533. #   ## when aggregating metrics. The 'add_agregate_field' setting allows to
  1534. #   ## specify for which fields the final aggregation value is required. If the
  1535. #   ## list is non empty, then a field will be added to each every metric for
  1536. #   ## each field present in this setting. This field will contain
  1537. #   ## the computed aggregation for the group that the metric belonged to when
  1538. #   ## aggregated over that field.
  1539. #   ## The name of the field will be set to the name of the aggregation field,
  1540. #   ## suffixed with the string '_topk_aggregate'
  1541. #   # add_aggregate_fields = []
  1542.  
  1543.  
  1544. # # Rotate multi field metric into several single field metrics
  1545. # [[processors.unpivot]]
  1546. #   ## Tag to use for the name.
  1547. #   tag_key = "name"
  1548. #   ## Field to use for the name of the value.
  1549. #   value_key = "value"
  1550.  
  1551.  
  1552. ###############################################################################
  1553. #                            AGGREGATOR PLUGINS                               #
  1554. ###############################################################################
  1555.  
  1556.  
  1557. # # Keep the aggregate basicstats of each metric passing through.
  1558. # [[aggregators.basicstats]]
  1559. #   ## The period on which to flush & clear the aggregator.
  1560. #   period = "30s"
  1561. #   ## If true, the original metric will be dropped by the
  1562. #   ## aggregator and will not get sent to the output plugins.
  1563. #   drop_original = false
  1564. #
  1565. #   ## Configures which basic stats to push as fields
  1566. #   # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"]
  1567.  
  1568.  
  1569. # # Report the final metric of a series
  1570. # [[aggregators.final]]
  1571. #   ## The period on which to flush & clear the aggregator.
  1572. #   period = "30s"
  1573. #   ## If true, the original metric will be dropped by the
  1574. #   ## aggregator and will not get sent to the output plugins.
  1575. #   drop_original = false
  1576. #
  1577. #   ## The time that a series is not updated until considering it final.
  1578. #   series_timeout = "5m"
  1579.  
  1580.  
  1581. # # Create aggregate histograms.
  1582. # [[aggregators.histogram]]
  1583. #   ## The period in which to flush the aggregator.
  1584. #   period = "30s"
  1585. #
  1586. #   ## If true, the original metric will be dropped by the
  1587. #   ## aggregator and will not get sent to the output plugins.
  1588. #   drop_original = false
  1589. #
  1590. #   ## If true, the histogram will be reset on flush instead
  1591. #   ## of accumulating the results.
  1592. #   reset = false
  1593. #
  1594. #   ## Example config that aggregates all fields of the metric.
  1595. #   # [[aggregators.histogram.config]]
  1596. #   #   ## The set of buckets.
  1597. #   #   buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]
  1598. #   #   ## The name of metric.
  1599. #   #   measurement_name = "cpu"
  1600. #
  1601. #   ## Example config that aggregates only specific fields of the metric.
  1602. #   # [[aggregators.histogram.config]]
  1603. #   #   ## The set of buckets.
  1604. #   #   buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
  1605. #   #   ## The name of metric.
  1606. #   #   measurement_name = "diskio"
  1607. #   #   ## The concrete fields of metric
  1608. #   #   fields = ["io_time", "read_time", "write_time"]
  1609.  
  1610.  
  1611. # # Keep the aggregate min/max of each metric passing through.
  1612. # [[aggregators.minmax]]
  1613. #   ## General Aggregator Arguments:
  1614. #   ## The period on which to flush & clear the aggregator.
  1615. #   period = "30s"
  1616. #   ## If true, the original metric will be dropped by the
  1617. #   ## aggregator and will not get sent to the output plugins.
  1618. #   drop_original = false
  1619.  
  1620.  
  1621. # # Count the occurrence of values in fields.
  1622. # [[aggregators.valuecounter]]
  1623. #   ## General Aggregator Arguments:
  1624. #   ## The period on which to flush & clear the aggregator.
  1625. #   period = "30s"
  1626. #   ## If true, the original metric will be dropped by the
  1627. #   ## aggregator and will not get sent to the output plugins.
  1628. #   drop_original = false
  1629. #   ## The fields for which the values will be counted
  1630. #   fields = []
  1631.  
  1632.  
  1633. ###############################################################################
  1634. #                            INPUT PLUGINS                                    #
  1635. ###############################################################################
  1636.  
  1637.  
  1638. # Read metrics about cpu usage
  1639. [[inputs.cpu]]
  1640.   ## Whether to report per-cpu stats or not
  1641.   percpu = true
  1642.   ## Whether to report total system cpu stats or not
  1643.   totalcpu = true
  1644.   ## If true, collect raw CPU time metrics.
  1645.   collect_cpu_time = false
  1646.   ## If true, compute and report the sum of all non-idle CPU states.
  1647.   report_active = false
  1648.  
  1649.  
  1650. # Read metrics about disk usage by mount point
  1651. [[inputs.disk]]
  1652.   ## By default stats will be gathered for all mount points.
  1653.   ## Set mount_points will restrict the stats to only the specified mount points.
  1654.   # mount_points = ["/"]
  1655.  
  1656.   ## Ignore mount points by filesystem type.
  1657.   ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"]
  1658.  
  1659.  
  1660. # Read metrics about disk IO by device
  1661. [[inputs.diskio]]
  1662.   ## By default, telegraf will gather stats for all devices including
  1663.   ## disk partitions.
  1664.   ## Setting devices will restrict the stats to the specified devices.
  1665.   # devices = ["sda", "sdb", "vd*"]
  1666.   ## Uncomment the following line if you need disk serial numbers.
  1667.   # skip_serial_number = false
  1668.   #
  1669.   ## On systems which support it, device metadata can be added in the form of
  1670.   ## tags.
  1671.   ## Currently only Linux is supported via udev properties. You can view
  1672.   ## available properties for a device by running:
  1673.   ## 'udevadm info -q property -n /dev/sda'
  1674.   ## Note: Most, but not all, udev properties can be accessed this way. Properties
  1675.   ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH.
  1676.   # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"]
  1677.   #
  1678.   ## Using the same metadata source as device_tags, you can also customize the
  1679.   ## name of the device via templates.
  1680.   ## The 'name_templates' parameter is a list of templates to try and apply to
  1681.   ## the device. The template may contain variables in the form of '$PROPERTY' or
  1682.   ## '${PROPERTY}'. The first template which does not contain any variables not
  1683.   ## present for the device is used as the device name tag.
  1684.   ## The typical use case is for LVM volumes, to get the VG/LV name instead of
  1685.   ## the near-meaningless DM-0 name.
  1686.   # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"]
  1687.  
  1688.  
  1689. # Get kernel statistics from /proc/stat
  1690. [[inputs.kernel]]
  1691.   # no configuration
  1692.  
  1693.  
  1694. # Read metrics about memory usage
  1695. [[inputs.mem]]
  1696.   # no configuration
  1697.  
  1698.  
  1699. # Get the number of processes and group them by status
  1700. [[inputs.processes]]
  1701.   # no configuration
  1702.  
  1703.  
  1704. # Read metrics about swap memory usage
  1705. [[inputs.swap]]
  1706.   # no configuration
  1707.  
  1708.  
  1709. # Read metrics about system load & uptime
  1710. [[inputs.system]]
  1711.   ## Uncomment to remove deprecated metrics.
  1712.   # fielddrop = ["uptime_format"]
  1713.  
  1714.  
  1715. # # Gather ActiveMQ metrics
  1716. # [[inputs.activemq]]
  1717. #   ## ActiveMQ WebConsole URL
  1718. #   url = "http://127.0.0.1:8161"
  1719. #
  1720. #   ## Required ActiveMQ Endpoint
  1721. #   ##   deprecated in 1.11; use the url option
  1722. #   # server = "127.0.0.1"
  1723. #   # port = 8161
  1724. #
  1725. #   ## Credentials for basic HTTP authentication
  1726. #   # username = "admin"
  1727. #   # password = "admin"
  1728. #
  1729. #   ## Required ActiveMQ webadmin root path
  1730. #   # webadmin = "admin"
  1731. #
  1732. #   ## Maximum time to receive response.
  1733. #   # response_timeout = "5s"
  1734. #
  1735. #   ## Optional TLS Config
  1736. #   # tls_ca = "/etc/telegraf/ca.pem"
  1737. #   # tls_cert = "/etc/telegraf/cert.pem"
  1738. #   # tls_key = "/etc/telegraf/key.pem"
  1739. #   ## Use TLS but skip chain & host verification
  1740. #   # insecure_skip_verify = false
  1741.  
  1742.  
  1743. # # Read stats from aerospike server(s)
  1744. # [[inputs.aerospike]]
  1745. #   ## Aerospike servers to connect to (with port)
  1746. #   ## This plugin will query all namespaces the aerospike
  1747. #   ## server has configured and get stats for them.
  1748. #   servers = ["localhost:3000"]
  1749. #
  1750. #   # username = "telegraf"
  1751. #   # password = "pa$$word"
  1752. #
  1753. #   ## Optional TLS Config
  1754. #   # enable_tls = false
  1755. #   # tls_ca = "/etc/telegraf/ca.pem"
  1756. #   # tls_cert = "/etc/telegraf/cert.pem"
  1757. #   # tls_key = "/etc/telegraf/key.pem"
  1758. #   ## If false, skip chain & host verification
  1759. #   # insecure_skip_verify = true
  1760.  
  1761.  
  1762. # # Read Apache status information (mod_status)
  1763. # [[inputs.apache]]
  1764. #   ## An array of URLs to gather from, must be directed at the machine
  1765. #   ## readable version of the mod_status page including the auto query string.
  1766. #   ## Default is "http://localhost/server-status?auto".
  1767. #   urls = ["http://localhost/server-status?auto"]
  1768. #
  1769. #   ## Credentials for basic HTTP authentication.
  1770. #   # username = "myuser"
  1771. #   # password = "mypassword"
  1772. #
  1773. #   ## Maximum time to receive response.
  1774. #   # response_timeout = "5s"
  1775. #
  1776. #   ## Optional TLS Config
  1777. #   # tls_ca = "/etc/telegraf/ca.pem"
  1778. #   # tls_cert = "/etc/telegraf/cert.pem"
  1779. #   # tls_key = "/etc/telegraf/key.pem"
  1780. #   ## Use TLS but skip chain & host verification
  1781. #   # insecure_skip_verify = false
  1782.  
  1783.  
  1784. # # Monitor APC UPSes connected to apcupsd
  1785. # [[inputs.apcupsd]]
  1786. #   # A list of running apcupsd server to connect to.
  1787. #   # If not provided will default to tcp://127.0.0.1:3551
  1788. #   servers = ["tcp://127.0.0.1:3551"]
  1789. #
  1790. #   ## Timeout for dialing server.
  1791. #   timeout = "5s"
  1792.  
  1793.  
  1794. # # Gather metrics from Apache Aurora schedulers
  1795. # [[inputs.aurora]]
  1796. #   ## Schedulers are the base addresses of your Aurora Schedulers
  1797. #   schedulers = ["http://127.0.0.1:8081"]
  1798. #
  1799. #   ## Set of role types to collect metrics from.
  1800. #   ##
  1801. #   ## The scheduler roles are checked each interval by contacting the
  1802. #   ## scheduler nodes; zookeeper is not contacted.
  1803. #   # roles = ["leader", "follower"]
  1804. #
  1805. #   ## Timeout is the max time for total network operations.
  1806. #   # timeout = "5s"
  1807. #
  1808. #   ## Username and password are sent using HTTP Basic Auth.
  1809. #   # username = "username"
  1810. #   # password = "pa$$word"
  1811. #
  1812. #   ## Optional TLS Config
  1813. #   # tls_ca = "/etc/telegraf/ca.pem"
  1814. #   # tls_cert = "/etc/telegraf/cert.pem"
  1815. #   # tls_key = "/etc/telegraf/key.pem"
  1816. #   ## Use TLS but skip chain & host verification
  1817. #   # insecure_skip_verify = false
  1818.  
  1819.  
  1820. # # Read metrics of bcache from stats_total and dirty_data
  1821. # [[inputs.bcache]]
  1822. #   ## Bcache sets path
  1823. #   ## If not specified, then default is:
  1824. #   bcachePath = "/sys/fs/bcache"
  1825. #
  1826. #   ## By default, telegraf gather stats for all bcache devices
  1827. #   ## Setting devices will restrict the stats to the specified
  1828. #   ## bcache devices.
  1829. #   bcacheDevs = ["bcache0"]
  1830.  
  1831.  
  1832. # # Collects Beanstalkd server and tubes stats
  1833. # [[inputs.beanstalkd]]
  1834. #   ## Server to collect data from
  1835. #   server = "localhost:11300"
  1836. #
  1837. #   ## List of tubes to gather stats about.
  1838. #   ## If no tubes specified then data gathered for each tube on server reported by list-tubes command
  1839. #   tubes = ["notifications"]
  1840.  
  1841.  
  1842. # # Read BIND nameserver XML statistics
  1843. # [[inputs.bind]]
  1844. #   ## An array of BIND XML statistics URI to gather stats.
  1845. #   ## Default is "http://localhost:8053/xml/v3".
  1846. #   # urls = ["http://localhost:8053/xml/v3"]
  1847. #   # gather_memory_contexts = false
  1848. #   # gather_views = false
  1849.  
  1850.  
  1851. # # Collect bond interface status, slaves statuses and failures count
  1852. # [[inputs.bond]]
  1853. #   ## Sets 'proc' directory path
  1854. #   ## If not specified, then default is /proc
  1855. #   # host_proc = "/proc"
  1856. #
  1857. #   ## By default, telegraf gather stats for all bond interfaces
  1858. #   ## Setting interfaces will restrict the stats to the specified
  1859. #   ## bond interfaces.
  1860. #   # bond_interfaces = ["bond0"]
  1861.  
  1862.  
  1863. # # Collect Kafka topics and consumers status from Burrow HTTP API.
  1864. # [[inputs.burrow]]
  1865. #   ## Burrow API endpoints in format "schema://host:port".
  1866. #   ## Default is "http://localhost:8000".
  1867. #   servers = ["http://localhost:8000"]
  1868. #
  1869. #   ## Override Burrow API prefix.
  1870. #   ## Useful when Burrow is behind reverse-proxy.
  1871. #   # api_prefix = "/v3/kafka"
  1872. #
  1873. #   ## Maximum time to receive response.
  1874. #   # response_timeout = "5s"
  1875. #
  1876. #   ## Limit per-server concurrent connections.
  1877. #   ## Useful in case of large number of topics or consumer groups.
  1878. #   # concurrent_connections = 20
  1879. #
  1880. #   ## Filter clusters, default is no filtering.
  1881. #   ## Values can be specified as glob patterns.
  1882. #   # clusters_include = []
  1883. #   # clusters_exclude = []
  1884. #
  1885. #   ## Filter consumer groups, default is no filtering.
  1886. #   ## Values can be specified as glob patterns.
  1887. #   # groups_include = []
  1888. #   # groups_exclude = []
  1889. #
  1890. #   ## Filter topics, default is no filtering.
  1891. #   ## Values can be specified as glob patterns.
  1892. #   # topics_include = []
  1893. #   # topics_exclude = []
  1894. #
  1895. #   ## Credentials for basic HTTP authentication.
  1896. #   # username = ""
  1897. #   # password = ""
  1898. #
  1899. #   ## Optional SSL config
  1900. #   # ssl_ca = "/etc/telegraf/ca.pem"
  1901. #   # ssl_cert = "/etc/telegraf/cert.pem"
  1902. #   # ssl_key = "/etc/telegraf/key.pem"
  1903. #   # insecure_skip_verify = false
  1904.  
  1905.  
  1906. # # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
  1907. # [[inputs.ceph]]
  1908. #   ## This is the recommended interval to poll.  Too frequent and you will lose
  1909. #   ## data points due to timeouts during rebalancing and recovery
  1910. #   interval = '1m'
  1911. #
  1912. #   ## All configuration values are optional, defaults are shown below
  1913. #
  1914. #   ## location of ceph binary
  1915. #   ceph_binary = "/usr/bin/ceph"
  1916. #
  1917. #   ## directory in which to look for socket files
  1918. #   socket_dir = "/var/run/ceph"
  1919. #
  1920. #   ## prefix of MON and OSD socket files, used to determine socket type
  1921. #   mon_prefix = "ceph-mon"
  1922. #   osd_prefix = "ceph-osd"
  1923. #
  1924. #   ## suffix used to identify socket files
  1925. #   socket_suffix = "asok"
  1926. #
  1927. #   ## Ceph user to authenticate as
  1928. #   ceph_user = "client.admin"
  1929. #
  1930. #   ## Ceph configuration to use to locate the cluster
  1931. #   ceph_config = "/etc/ceph/ceph.conf"
  1932. #
  1933. #   ## Whether to gather statistics via the admin socket
  1934. #   gather_admin_socket_stats = true
  1935. #
  1936. #   ## Whether to gather statistics via ceph commands
  1937. #   gather_cluster_stats = false
  1938.  
  1939.  
  1940. # # Read specific statistics per cgroup
  1941. # [[inputs.cgroup]]
  1942. #   ## Directories in which to look for files, globs are supported.
  1943. #   ## Consider restricting paths to the set of cgroups you really
  1944. #   ## want to monitor if you have a large number of cgroups, to avoid
  1945. #   ## any cardinality issues.
  1946. #   # paths = [
  1947. #   #   "/cgroup/memory",
  1948. #   #   "/cgroup/memory/child1",
  1949. #   #   "/cgroup/memory/child2/*",
  1950. #   # ]
  1951. #   ## cgroup stat fields, as file names, globs are supported.
  1952. #   ## these file names are appended to each path from above.
  1953. #   # files = ["memory.*usage*", "memory.limit_in_bytes"]
  1954.  
  1955.  
  1956. # # Get standard chrony metrics, requires chronyc executable.
  1957. # [[inputs.chrony]]
  1958. #   ## If true, chronyc tries to perform a DNS lookup for the time server.
  1959. #   # dns_lookup = false
  1960.  
  1961.  
  1962. # # Pull Metric Statistics from Amazon CloudWatch
  1963. # [[inputs.cloudwatch]]
  1964. #   ## Amazon Region
  1965. #   region = "us-east-1"
  1966. #
  1967. #   ## Amazon Credentials
  1968. #   ## Credentials are loaded in the following order
  1969. #   ## 1) Assumed credentials via STS if role_arn is specified
  1970. #   ## 2) explicit credentials from 'access_key' and 'secret_key'
  1971. #   ## 3) shared profile from 'profile'
  1972. #   ## 4) environment variables
  1973. #   ## 5) shared credentials file
  1974. #   ## 6) EC2 Instance Profile
  1975. #   # access_key = ""
  1976. #   # secret_key = ""
  1977. #   # token = ""
  1978. #   # role_arn = ""
  1979. #   # profile = ""
  1980. #   # shared_credential_file = ""
  1981. #
  1982. #   ## Endpoint to make request against, the correct endpoint is automatically
  1983. #   ## determined and this option should only be set if you wish to override the
  1984. #   ## default.
  1985. #   ##   ex: endpoint_url = "http://localhost:8000"
  1986. #   # endpoint_url = ""
  1987. #
  1988. #   # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all
  1989. #   # metrics are made available to the 1 minute period. Some are collected at
  1990. #   # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.
  1991. #   # Note that if a period is configured that is smaller than the minimum for a
  1992. #   # particular metric, that metric will not be returned by the Cloudwatch API
  1993. #   # and will not be collected by Telegraf.
  1994. #   #
  1995. #   ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
  1996. #   period = "5m"
  1997. #
  1998. #   ## Collection Delay (required - must account for metrics availability via CloudWatch API)
  1999. #   delay = "5m"
  2000. #
  2001. #   ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid
  2002. #   ## gaps or overlap in pulled data
  2003. #   interval = "5m"
  2004. #
  2005. #   ## Configure the TTL for the internal cache of metrics.
  2006. #   # cache_ttl = "1h"
  2007. #
  2008. #   ## Metric Statistic Namespace (required)
  2009. #   namespace = "AWS/ELB"
  2010. #
  2011. #   ## Maximum requests per second. Note that the global default AWS rate limit is
  2012. #   ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a
  2013. #   ## maximum of 50.
  2014. #   ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html
  2015. #   # ratelimit = 25
  2016. #
  2017. #   ## Namespace-wide statistic filters. These allow fewer queries to be made to
  2018. #   ## cloudwatch.
  2019. #   # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ]
  2020. #   # statistic_exclude = []
  2021. #
  2022. #   ## Metrics to Pull
  2023. #   ## Defaults to all Metrics in Namespace if nothing is provided
  2024. #   ## Refreshes Namespace available metrics every 1h
  2025. #   #[[inputs.cloudwatch.metrics]]
  2026. #   #  names = ["Latency", "RequestCount"]
  2027. #   #
  2028. #   #  ## Statistic filters for Metric.  These allow for retrieving specific
  2029. #   #  ## statistics for an individual metric.
  2030. #   #  # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ]
  2031. #   #  # statistic_exclude = []
  2032. #   #
  2033. #   #  ## Dimension filters for Metric.  All dimensions defined for the metric names
  2034. #   #  ## must be specified in order to retrieve the metric statistics.
  2035. #   #  [[inputs.cloudwatch.metrics.dimensions]]
  2036. #   #    name = "LoadBalancerName"
  2037. #   #    value = "p-example"
  2038.  
  2039.  
  2040. # Collects conntrack stats from the configured directories and files.
  2041. [[inputs.conntrack]]
  2042.    ## The following defaults would work with multiple versions of conntrack.
  2043.    ## Note the nf_ and ip_ filename prefixes are mutually exclusive across
  2044.    ## kernel versions, as are the directory locations.
  2045.  
  2046.    ## Superset of filenames to look for within the conntrack dirs.
  2047.    ## Missing files will be ignored.
  2048.    files = ["ip_conntrack_count","ip_conntrack_max",
  2049.             "nf_conntrack_count","nf_conntrack_max"]
  2050.  
  2051.    ## Directories to search within for the conntrack files above.
  2052.    ## Missing directrories will be ignored.
  2053.    dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"]
  2054.  
  2055.  
  2056. # # Gather health check statuses from services registered in Consul
  2057. # [[inputs.consul]]
  2058. #   ## Consul server address
  2059. #   # address = "localhost"
  2060. #
  2061. #   ## URI scheme for the Consul server, one of "http", "https"
  2062. #   # scheme = "http"
  2063. #
  2064. #   ## ACL token used in every request
  2065. #   # token = ""
  2066. #
  2067. #   ## HTTP Basic Authentication username and password.
  2068. #   # username = ""
  2069. #   # password = ""
  2070. #
  2071. #   ## Data center to query the health checks from
  2072. #   # datacenter = ""
  2073. #
  2074. #   ## Optional TLS Config
  2075. #   # tls_ca = "/etc/telegraf/ca.pem"
  2076. #   # tls_cert = "/etc/telegraf/cert.pem"
  2077. #   # tls_key = "/etc/telegraf/key.pem"
  2078. #   ## Use TLS but skip chain & host verification
  2079. #   # insecure_skip_verify = true
  2080. #
  2081. #   ## Consul checks' tag splitting
  2082. #   # When tags are formatted like "key:value" with ":" as a delimiter then
  2083. #   # they will be splitted and reported as proper key:value in Telegraf
  2084. #   # tag_delimiter = ":"
  2085.  
  2086.  
  2087. # # Read metrics from one or many couchbase clusters
  2088. # [[inputs.couchbase]]
  2089. #   ## specify servers via a url matching:
  2090. #   ##  [protocol://][:password]@address[:port]
  2091. #   ##  e.g.
  2092. #   ##    http://couchbase-0.example.com/
  2093. #   ##    http://admin:secret@couchbase-0.example.com:8091/
  2094. #   ##
  2095. #   ## If no servers are specified, then localhost is used as the host.
  2096. #   ## If no protocol is specified, HTTP is used.
  2097. #   ## If no port is specified, 8091 is used.
  2098. #   servers = ["http://localhost:8091"]
  2099.  
  2100.  
  2101. # # Read CouchDB Stats from one or more servers
  2102. # [[inputs.couchdb]]
  2103. #   ## Works with CouchDB stats endpoints out of the box
  2104. #   ## Multiple Hosts from which to read CouchDB stats:
  2105. #   hosts = ["http://localhost:8086/_stats"]
  2106. #
  2107. #   ## Use HTTP Basic Authentication.
  2108. #   # basic_username = "telegraf"
  2109. #   # basic_password = "p@ssw0rd"
  2110.  
  2111.  
  2112. # # Input plugin for DC/OS metrics
  2113. # [[inputs.dcos]]
  2114. #   ## The DC/OS cluster URL.
  2115. #   cluster_url = "https://dcos-ee-master-1"
  2116. #
  2117. #   ## The ID of the service account.
  2118. #   service_account_id = "telegraf"
  2119. #   ## The private key file for the service account.
  2120. #   service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem"
  2121. #
  2122. #   ## Path containing login token.  If set, will read on every gather.
  2123. #   # token_file = "/home/dcos/.dcos/token"
  2124. #
  2125. #   ## In all filter options if both include and exclude are empty all items
  2126. #   ## will be collected.  Arrays may contain glob patterns.
  2127. #   ##
  2128. #   ## Node IDs to collect metrics from.  If a node is excluded, no metrics will
  2129. #   ## be collected for its containers or apps.
  2130. #   # node_include = []
  2131. #   # node_exclude = []
  2132. #   ## Container IDs to collect container metrics from.
  2133. #   # container_include = []
  2134. #   # container_exclude = []
  2135. #   ## Container IDs to collect app metrics from.
  2136. #   # app_include = []
  2137. #   # app_exclude = []
  2138. #
  2139. #   ## Maximum concurrent connections to the cluster.
  2140. #   # max_connections = 10
  2141. #   ## Maximum time to receive a response from cluster.
  2142. #   # response_timeout = "20s"
  2143. #
  2144. #   ## Optional TLS Config
  2145. #   # tls_ca = "/etc/telegraf/ca.pem"
  2146. #   # tls_cert = "/etc/telegraf/cert.pem"
  2147. #   # tls_key = "/etc/telegraf/key.pem"
  2148. #   ## If false, skip chain & host verification
  2149. #   # insecure_skip_verify = true
  2150. #
  2151. #   ## Recommended filtering to reduce series cardinality.
  2152. #   # [inputs.dcos.tagdrop]
  2153. #   #   path = ["/var/lib/mesos/slave/slaves/*"]
  2154.  
  2155.  
  2156. # # Read metrics from one or many disque servers
  2157. # [[inputs.disque]]
  2158. #   ## An array of URI to gather stats about. Specify an ip or hostname
  2159. #   ## with optional port and password.
  2160. #   ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc.
  2161. #   ## If no servers are specified, then localhost is used as the host.
  2162. #   servers = ["localhost"]
  2163.  
  2164.  
  2165. # # Provide a native collection for dmsetup based statistics for dm-cache
  2166. # [[inputs.dmcache]]
  2167. #   ## Whether to report per-device stats or not
  2168. #   per_device = true
  2169.  
  2170.  
  2171. # # Query given DNS server and gives statistics
  2172. # [[inputs.dns_query]]
  2173. #   ## servers to query
  2174. #   servers = ["8.8.8.8"]
  2175. #
  2176. #   ## Network is the network protocol name.
  2177. #   # network = "udp"
  2178. #
  2179. #   ## Domains or subdomains to query.
  2180. #   # domains = ["."]
  2181. #
  2182. #   ## Query record type.
  2183. #   ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
  2184. #   # record_type = "A"
  2185. #
  2186. #   ## Dns server port.
  2187. #   # port = 53
  2188. #
  2189. #   ## Query timeout in seconds.
  2190. #   # timeout = 2
  2191.  
  2192.  
  2193. # # Read metrics about docker containers
  2194. [[inputs.docker]]
  2195.   ## Docker Endpoint
  2196.   ##   To use TCP, set endpoint = "tcp://[ip]:[port]"
  2197.   ##   To use environment variables (ie, docker-machine), set endpoint = "ENV"
  2198.   endpoint = "unix:///var/run/docker.sock"
  2199.  
  2200.   ## Set to true to collect Swarm metrics(desired_replicas, running_replicas)
  2201.   gather_services = false
  2202.  
  2203.   ## Only collect metrics for these containers, collect all if empty
  2204.   container_names = []
  2205.  
  2206.   ## Containers to include and exclude. Globs accepted.
  2207.   ## Note that an empty array for both will include all containers
  2208.   container_name_include = []
  2209.   container_name_exclude = []
  2210.  
  2211.   ## Container states to include and exclude. Globs accepted.
  2212.   ## When empty only containers in the "running" state will be captured.
  2213.   ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
  2214.   ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"]
  2215.   # container_state_include = []
  2216.   # container_state_exclude = []
  2217.  
  2218.   ## Timeout for docker list, info, and stats commands
  2219.   timeout = "5s"
  2220.  
  2221.   ## Whether to report for each container per-device blkio (8:0, 8:1...) and
  2222.   ## network (eth0, eth1, ...) stats or not
  2223.   perdevice = true
  2224.   ## Whether to report for each container total blkio and network stats or not
  2225.   total = false
  2226.   ## Which environment variables should we use as a tag
  2227.   ##tag_env = ["JAVA_HOME", "HEAP_SIZE"]
  2228.  
  2229.   ## docker labels to include and exclude as tags.  Globs accepted.
  2230.   ## Note that an empty array for both will include all labels as tags
  2231.   docker_label_include = []
  2232.   docker_label_exclude = []
  2233.  
  2234.   ## Optional TLS Config
  2235.   # tls_ca = "/etc/telegraf/ca.pem"
  2236.   # tls_cert = "/etc/telegraf/cert.pem"
  2237.   # tls_key = "/etc/telegraf/key.pem"
  2238.   ## Use TLS but skip chain & host verification
  2239.   # insecure_skip_verify = false
  2240.  
  2241.  
  2242. # # Read statistics from one or many dovecot servers
  2243. # [[inputs.dovecot]]
  2244. #   ## specify dovecot servers via an address:port list
  2245. #   ##  e.g.
  2246. #   ##    localhost:24242
  2247. #   ##
  2248. #   ## If no servers are specified, then localhost is used as the host.
  2249. #   servers = ["localhost:24242"]
  2250. #   ## Type is one of "user", "domain", "ip", or "global"
  2251. #   type = "global"
  2252. #   ## Wildcard matches like "*.com". An empty string "" is same as "*"
  2253. #   ## If type = "ip" filters should be <IP/network>
  2254. #   filters = [""]
  2255.  
  2256.  
  2257. # # Read metrics about docker containers from Fargate/ECS v2 meta endpoints.
  2258. # [[inputs.ecs]]
  2259. #   ## ECS metadata url
  2260. #   # endpoint_url = "http://169.254.170.2"
  2261. #
  2262. #   ## Containers to include and exclude. Globs accepted.
  2263. #   ## Note that an empty array for both will include all containers
  2264. #   # container_name_include = []
  2265. #   # container_name_exclude = []
  2266. #
  2267. #   ## Container states to include and exclude. Globs accepted.
  2268. #   ## When empty only containers in the "RUNNING" state will be captured.
  2269. #   ## Possible values are "NONE", "PULLED", "CREATED", "RUNNING",
  2270. #   ## "RESOURCES_PROVISIONED", "STOPPED".
  2271. #   # container_status_include = []
  2272. #   # container_status_exclude = []
  2273. #
  2274. #   ## ecs labels to include and exclude as tags.  Globs accepted.
  2275. #   ## Note that an empty array for both will include all labels as tags
  2276. #   ecs_label_include = [ "com.amazonaws.ecs.*" ]
  2277. #   ecs_label_exclude = []
  2278. #
  2279. #   ## Timeout for queries.
  2280. #   # timeout = "5s"
  2281.  
  2282.  
  2283. # # Read stats from one or more Elasticsearch servers or clusters
  2284. # [[inputs.elasticsearch]]
  2285. #   ## specify a list of one or more Elasticsearch servers
  2286. #   # you can add username and password to your url to use basic authentication:
  2287. #   # servers = ["http://user:pass@localhost:9200"]
  2288. #   servers = ["http://localhost:9200"]
  2289. #
  2290. #   ## Timeout for HTTP requests to the elastic search server(s)
  2291. #   http_timeout = "5s"
  2292. #
  2293. #   ## When local is true (the default), the node will read only its own stats.
  2294. #   ## Set local to false when you want to read the node stats from all nodes
  2295. #   ## of the cluster.
  2296. #   local = true
  2297. #
  2298. #   ## Set cluster_health to true when you want to also obtain cluster health stats
  2299. #   cluster_health = false
  2300. #
  2301. #   ## Adjust cluster_health_level when you want to also obtain detailed health stats
  2302. #   ## The options are
  2303. #   ##  - indices (default)
  2304. #   ##  - cluster
  2305. #   # cluster_health_level = "indices"
  2306. #
  2307. #   ## Set cluster_stats to true when you want to also obtain cluster stats.
  2308. #   cluster_stats = false
  2309. #
  2310. #   ## Only gather cluster_stats from the master node. To work this require local = true
  2311. #   cluster_stats_only_from_master = true
  2312. #
  2313. #   ## Indices to collect; can be one or more indices names or _all
  2314. #   indices_include = ["_all"]
  2315. #
  2316. #   ## One of "shards", "cluster", "indices"
  2317. #   indices_level = "shards"
  2318. #
  2319. #   ## node_stats is a list of sub-stats that you want to have gathered. Valid options
  2320. #   ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http",
  2321. #   ## "breaker". Per default, all stats are gathered.
  2322. #   # node_stats = ["jvm", "http"]
  2323. #
  2324. #   ## HTTP Basic Authentication username and password.
  2325. #   # username = ""
  2326. #   # password = ""
  2327. #
  2328. #   ## Optional TLS Config
  2329. #   # tls_ca = "/etc/telegraf/ca.pem"
  2330. #   # tls_cert = "/etc/telegraf/cert.pem"
  2331. #   # tls_key = "/etc/telegraf/key.pem"
  2332. #   ## Use TLS but skip chain & host verification
  2333. #   # insecure_skip_verify = false
  2334.  
  2335.  
  2336. # # Read metrics from one or more commands that can output to stdout
  2337. # [[inputs.exec]]
  2338. #   ## Commands array
  2339. #   commands = [
  2340. #     "/tmp/test.sh",
  2341. #     "/usr/bin/mycollector --foo=bar",
  2342. #     "/tmp/collect_*.sh"
  2343. #   ]
  2344. #
  2345. #   ## Timeout for each command to complete.
  2346. #   timeout = "5s"
  2347. #
  2348. #   ## measurement name suffix (for separating different commands)
  2349. #   name_suffix = "_mycollector"
  2350. #
  2351. #   ## Data format to consume.
  2352. #   ## Each data format has its own unique set of configuration options, read
  2353. #   ## more about them here:
  2354. #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  2355. #   data_format = "influx"
  2356.  
  2357.  
  2358. # # Read metrics from fail2ban.
  2359. # [[inputs.fail2ban]]
  2360. #   ## Use sudo to run fail2ban-client
  2361. #   use_sudo = false
  2362.  
  2363.  
  2364. # # Read devices value(s) from a Fibaro controller
  2365. # [[inputs.fibaro]]
  2366. #   ## Required Fibaro controller address/hostname.
  2367. #   ## Note: at the time of writing this plugin, Fibaro only implemented http - no https available
  2368. #   url = "http://<controller>:80"
  2369. #
  2370. #   ## Required credentials to access the API (http://<controller/api/<component>)
  2371. #   username = "<username>"
  2372. #   password = "<password>"
  2373. #
  2374. #   ## Amount of time allowed to complete the HTTP request
  2375. #   # timeout = "5s"
  2376.  
  2377.  
  2378. # # Reload and gather from file[s] on telegraf's interval.
  2379. # [[inputs.file]]
  2380. #   ## Files to parse each interval.
  2381. #   ## These accept standard unix glob matching rules, but with the addition of
  2382. #   ## ** as a "super asterisk". ie:
  2383. #   ##   /var/log/**.log     -> recursively find all .log files in /var/log
  2384. #   ##   /var/log/*/*.log    -> find all .log files with a parent dir in /var/log
  2385. #   ##   /var/log/apache.log -> only read the apache log file
  2386. #   files = ["/var/log/apache/access.log"]
  2387. #
  2388. #   ## The dataformat to be read from files
  2389. #   ## Each data format has its own unique set of configuration options, read
  2390. #   ## more about them here:
  2391. #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  2392. #   data_format = "influx"
  2393.  
  2394.  
  2395. # # Count files in a directory
  2396. # [[inputs.filecount]]
  2397. #   ## Directory to gather stats about.
  2398. #   ##   deprecated in 1.9; use the directories option
  2399. #   # directory = "/var/cache/apt/archives"
  2400. #
  2401. #   ## Directories to gather stats about.
  2402. #   ## This accept standard unit glob matching rules, but with the addition of
  2403. #   ## ** as a "super asterisk". ie:
  2404. #   ##   /var/log/**    -> recursively find all directories in /var/log and count files in each directories
  2405. #   ##   /var/log/*/*   -> find all directories with a parent dir in /var/log and count files in each directories
  2406. #   ##   /var/log       -> count all files in /var/log and all of its subdirectories
  2407. #   directories = ["/var/cache/apt/archives"]
  2408. #
  2409. #   ## Only count files that match the name pattern. Defaults to "*".
  2410. #   name = "*.deb"
  2411. #
  2412. #   ## Count files in subdirectories. Defaults to true.
  2413. #   recursive = false
  2414. #
  2415. #   ## Only count regular files. Defaults to true.
  2416. #   regular_only = true
  2417. #
  2418. #   ## Only count files that are at least this size. If size is
  2419. #   ## a negative number, only count files that are smaller than the
  2420. #   ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ...
  2421. #   ## Without quotes and units, interpreted as size in bytes.
  2422. #   size = "0B"
  2423. #
  2424. #   ## Only count files that have not been touched for at least this
  2425. #   ## duration. If mtime is negative, only count files that have been
  2426. #   ## touched in this duration. Defaults to "0s".
  2427. #   mtime = "0s"
  2428.  
  2429.  
  2430. # # Read stats about given file(s)
  2431. # [[inputs.filestat]]
  2432. #   ## Files to gather stats about.
  2433. #   ## These accept standard unix glob matching rules, but with the addition of
  2434. #   ## ** as a "super asterisk". ie:
  2435. #   ##   "/var/log/**.log"  -> recursively find all .log files in /var/log
  2436. #   ##   "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
  2437. #   ##   "/var/log/apache.log" -> just tail the apache log file
  2438. #   ##
  2439. #   ## See https://github.com/gobwas/glob for more examples
  2440. #   ##
  2441. #   files = ["/var/log/**.log"]
  2442. #   ## If true, read the entire file and calculate an md5 checksum.
  2443. #   md5 = false
  2444.  
  2445.  
  2446. # # Read real time temps from fireboard.io servers
  2447. # [[inputs.fireboard]]
  2448. #   ## Specify auth token for your account
  2449. #   auth_token = "invalidAuthToken"
  2450. #   ## You can override the fireboard server URL if necessary
  2451. #   # url = https://fireboard.io/api/v1/devices.json
  2452. #   ## You can set a different http_timeout if you need to
  2453. #   ## You should set a string using an number and time indicator
  2454. #   ## for example "12s" for 12 seconds.
  2455. #   # http_timeout = "4s"
  2456.  
  2457.  
  2458. # # Read metrics exposed by fluentd in_monitor plugin
  2459. # [[inputs.fluentd]]
  2460. #   ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint).
  2461. #   ##
  2462. #   ## Endpoint:
  2463. #   ## - only one URI is allowed
  2464. #   ## - https is not supported
  2465. #   endpoint = "http://localhost:24220/api/plugins.json"
  2466. #
  2467. #   ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent)
  2468. #   exclude = [
  2469. #     "monitor_agent",
  2470. #     "dummy",
  2471. #   ]
  2472.  
  2473.  
  2474. # # Gather repository information from GitHub hosted repositories.
  2475. # [[inputs.github]]
  2476. #   ## List of repositories to monitor.
  2477. #   repositories = [
  2478. #    "influxdata/telegraf",
  2479. #    "influxdata/influxdb"
  2480. #   ]
  2481. #
  2482. #   ## Github API access token.  Unauthenticated requests are limited to 60 per hour.
  2483. #   # access_token = ""
  2484. #
  2485. #   ## Github API enterprise url. Github Enterprise accounts must specify their base url.
  2486. #   # enterprise_base_url = ""
  2487. #
  2488. #   ## Timeout for HTTP requests.
  2489. #   # http_timeout = "5s"
  2490.  
  2491.  
  2492. # # Read flattened metrics from one or more GrayLog HTTP endpoints
  2493. # [[inputs.graylog]]
  2494. #   ## API endpoint, currently supported API:
  2495. #   ##
  2496. #   ##   - multiple  (Ex http://<host>:12900/system/metrics/multiple)
  2497. #   ##   - namespace (Ex http://<host>:12900/system/metrics/namespace/{namespace})
  2498. #   ##
  2499. #   ## For namespace endpoint, the metrics array will be ignored for that call.
  2500. #   ## Endpoint can contain namespace and multiple type calls.
  2501. #   ##
  2502. #   ## Please check http://[graylog-server-ip]:12900/api-browser for full list
  2503. #   ## of endpoints
  2504. #   servers = [
  2505. #     "http://[graylog-server-ip]:12900/system/metrics/multiple",
  2506. #   ]
  2507. #
  2508. #   ## Metrics list
  2509. #   ## List of metrics can be found on Graylog webservice documentation.
  2510. #   ## Or by hitting the the web service api at:
  2511. #   ##   http://[graylog-host]:12900/system/metrics
  2512. #   metrics = [
  2513. #     "jvm.cl.loaded",
  2514. #     "jvm.memory.pools.Metaspace.committed"
  2515. #   ]
  2516. #
  2517. #   ## Username and password
  2518. #   username = ""
  2519. #   password = ""
  2520. #
  2521. #   ## Optional TLS Config
  2522. #   # tls_ca = "/etc/telegraf/ca.pem"
  2523. #   # tls_cert = "/etc/telegraf/cert.pem"
  2524. #   # tls_key = "/etc/telegraf/key.pem"
  2525. #   ## Use TLS but skip chain & host verification
  2526. #   # insecure_skip_verify = false
  2527.  
  2528.  
  2529. # # Read metrics of haproxy, via socket or csv stats page
  2530. # [[inputs.haproxy]]
  2531. #   ## An array of address to gather stats about. Specify an ip on hostname
  2532. #   ## with optional port. ie localhost, 10.10.3.33:1936, etc.
  2533. #   ## Make sure you specify the complete path to the stats endpoint
  2534. #   ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats
  2535. #
  2536. #   ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats
  2537. #   servers = ["http://myhaproxy.com:1936/haproxy?stats"]
  2538. #
  2539. #   ## Credentials for basic HTTP authentication
  2540. #   # username = "admin"
  2541. #   # password = "admin"
  2542. #
  2543. #   ## You can also use local socket with standard wildcard globbing.
  2544. #   ## Server address not starting with 'http' will be treated as a possible
  2545. #   ## socket, so both examples below are valid.
  2546. #   # servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"]
  2547. #
  2548. #   ## By default, some of the fields are renamed from what haproxy calls them.
  2549. #   ## Setting this option to true results in the plugin keeping the original
  2550. #   ## field names.
  2551. #   # keep_field_names = false
  2552. #
  2553. #   ## Optional TLS Config
  2554. #   # tls_ca = "/etc/telegraf/ca.pem"
  2555. #   # tls_cert = "/etc/telegraf/cert.pem"
  2556. #   # tls_key = "/etc/telegraf/key.pem"
  2557. #   ## Use TLS but skip chain & host verification
  2558. #   # insecure_skip_verify = false
  2559.  
  2560.  
  2561. # # Monitor disks' temperatures using hddtemp
  2562. # [[inputs.hddtemp]]
  2563. #   ## By default, telegraf gathers temps data from all disks detected by the
  2564. #   ## hddtemp.
  2565. #   ##
  2566. #   ## Only collect temps from the selected disks.
  2567. #   ##
  2568. #   ## A * as the device name will return the temperature values of all disks.
  2569. #   ##
  2570. #   # address = "127.0.0.1:7634"
  2571. #   # devices = ["sda", "*"]
  2572.  
  2573.  
  2574. # # Read formatted metrics from one or more HTTP endpoints
  2575. # [[inputs.http]]
  2576. #   ## One or more URLs from which to read formatted metrics
  2577. #   urls = [
  2578. #     "http://localhost/metrics"
  2579. #   ]
  2580. #
  2581. #   ## HTTP method
  2582. #   # method = "GET"
  2583. #
  2584. #   ## Optional HTTP headers
  2585. #   # headers = {"X-Special-Header" = "Special-Value"}
  2586. #
  2587. #   ## Optional HTTP Basic Auth Credentials
  2588. #   # username = "username"
  2589. #   # password = "pa$$word"
  2590. #
  2591. #   ## HTTP entity-body to send with POST/PUT requests.
  2592. #   # body = ""
  2593. #
  2594. #   ## HTTP Content-Encoding for write request body, can be set to "gzip" to
  2595. #   ## compress body or "identity" to apply no encoding.
  2596. #   # content_encoding = "identity"
  2597. #
  2598. #   ## Optional TLS Config
  2599. #   # tls_ca = "/etc/telegraf/ca.pem"
  2600. #   # tls_cert = "/etc/telegraf/cert.pem"
  2601. #   # tls_key = "/etc/telegraf/key.pem"
  2602. #   ## Use TLS but skip chain & host verification
  2603. #   # insecure_skip_verify = false
  2604. #
  2605. #   ## Amount of time allowed to complete the HTTP request
  2606. #   # timeout = "5s"
  2607. #
  2608. #   ## Data format to consume.
  2609. #   ## Each data format has its own unique set of configuration options, read
  2610. #   ## more about them here:
  2611. #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  2612. #   # data_format = "influx"
  2613.  
  2614.  
  2615. # # HTTP/HTTPS request given an address a method and a timeout
  2616. # [[inputs.http_response]]
  2617. #   ## Deprecated in 1.12, use 'urls'
  2618. #   ## Server address (default http://localhost)
  2619. #   # address = "http://localhost"
  2620. #
  2621. #   ## List of urls to query.
  2622. #   # urls = ["http://localhost"]
  2623. #
  2624. #   ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set)
  2625. #   # http_proxy = "http://localhost:8888"
  2626. #
  2627. #   ## Set response_timeout (default 5 seconds)
  2628. #   # response_timeout = "5s"
  2629. #
  2630. #   ## HTTP Request Method
  2631. #   # method = "GET"
  2632. #
  2633. #   ## Whether to follow redirects from the server (defaults to false)
  2634. #   # follow_redirects = false
  2635. #
  2636. #   ## Optional HTTP Request Body
  2637. #   # body = '''
  2638. #   # {'fake':'data'}
  2639. #   # '''
  2640. #
  2641. #   ## Optional substring or regex match in body of the response
  2642. #   # response_string_match = "\"service_status\": \"up\""
  2643. #   # response_string_match = "ok"
  2644. #   # response_string_match = "\".*_status\".?:.?\"up\""
  2645. #
  2646. #   ## Optional TLS Config
  2647. #   # tls_ca = "/etc/telegraf/ca.pem"
  2648. #   # tls_cert = "/etc/telegraf/cert.pem"
  2649. #   # tls_key = "/etc/telegraf/key.pem"
  2650. #   ## Use TLS but skip chain & host verification
  2651. #   # insecure_skip_verify = false
  2652. #
  2653. #   ## HTTP Request Headers (all values must be strings)
  2654. #   # [inputs.http_response.headers]
  2655. #   #   Host = "github.com"
  2656. #
  2657. #   ## Interface to use when dialing an address
  2658. #   # interface = "eth0"
  2659.  
  2660.  
  2661. # # Read flattened metrics from one or more JSON HTTP endpoints
  2662. # [[inputs.httpjson]]
  2663. #   ## NOTE This plugin only reads numerical measurements, strings and booleans
  2664. #   ## will be ignored.
  2665. #
  2666. #   ## Name for the service being polled.  Will be appended to the name of the
  2667. #   ## measurement e.g. httpjson_webserver_stats
  2668. #   ##
  2669. #   ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead.
  2670. #   name = "webserver_stats"
  2671. #
  2672. #   ## URL of each server in the service's cluster
  2673. #   servers = [
  2674. #     "http://localhost:9999/stats/",
  2675. #     "http://localhost:9998/stats/",
  2676. #   ]
  2677. #   ## Set response_timeout (default 5 seconds)
  2678. #   response_timeout = "5s"
  2679. #
  2680. #   ## HTTP method to use: GET or POST (case-sensitive)
  2681. #   method = "GET"
  2682. #
  2683. #   ## List of tag names to extract from top-level of JSON server response
  2684. #   # tag_keys = [
  2685. #   #   "my_tag_1",
  2686. #   #   "my_tag_2"
  2687. #   # ]
  2688. #
  2689. #   ## Optional TLS Config
  2690. #   # tls_ca = "/etc/telegraf/ca.pem"
  2691. #   # tls_cert = "/etc/telegraf/cert.pem"
  2692. #   # tls_key = "/etc/telegraf/key.pem"
  2693. #   ## Use TLS but skip chain & host verification
  2694. #   # insecure_skip_verify = false
  2695. #
  2696. #   ## HTTP parameters (all values must be strings).  For "GET" requests, data
  2697. #   ## will be included in the query.  For "POST" requests, data will be included
  2698. #   ## in the request body as "x-www-form-urlencoded".
  2699. #   # [inputs.httpjson.parameters]
  2700. #   #   event_type = "cpu_spike"
  2701. #   #   threshold = "0.75"
  2702. #
  2703. #   ## HTTP Headers (all values must be strings)
  2704. #   # [inputs.httpjson.headers]
  2705. #   #   X-Auth-Token = "my-xauth-token"
  2706. #   #   apiVersion = "v1"
  2707.  
  2708.  
  2709. # # Gather Icinga2 status
  2710. # [[inputs.icinga2]]
  2711. #   ## Required Icinga2 server address (default: "https://localhost:5665")
  2712. #   # server = "https://localhost:5665"
  2713. #
  2714. #   ## Required Icinga2 object type ("services" or "hosts, default "services")
  2715. #   # object_type = "services"
  2716. #
  2717. #   ## Credentials for basic HTTP authentication
  2718. #   # username = "admin"
  2719. #   # password = "admin"
  2720. #
  2721. #   ## Maximum time to receive response.
  2722. #   # response_timeout = "5s"
  2723. #
  2724. #   ## Optional TLS Config
  2725. #   # tls_ca = "/etc/telegraf/ca.pem"
  2726. #   # tls_cert = "/etc/telegraf/cert.pem"
  2727. #   # tls_key = "/etc/telegraf/key.pem"
  2728. #   ## Use TLS but skip chain & host verification
  2729. #   # insecure_skip_verify = true
  2730.  
  2731.  
  2732. # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints
  2733. [[inputs.influxdb]]
  2734.   ## Works with InfluxDB debug endpoints out of the box,
  2735.   ## but other services can use this format too.
  2736.   ## See the influxdb plugin's README for more details.
  2737.  
  2738.   ## Multiple URLs from which to read InfluxDB-formatted JSON
  2739.   ## Default is "http://localhost:8086/debug/vars".
  2740.   urls = [
  2741.     "http://influxdb.my-home.giorgosdimtsas.net/debug/vars"
  2742.   ]
  2743.  
  2744.   ## Optional TLS Config
  2745.   # tls_ca = "/etc/telegraf/ca.pem"
  2746.   # tls_cert = "/etc/telegraf/cert.pem"
  2747.   # tls_key = "/etc/telegraf/key.pem"
  2748.   ## Use TLS but skip chain & host verification
  2749.   # insecure_skip_verify = false
  2750.  
  2751.   ## http request & header timeout
  2752.   timeout = "5s"
  2753.  
  2754.  
  2755. # # Collect statistics about itself
  2756. # [[inputs.internal]]
  2757. #   ## If true, collect telegraf memory stats.
  2758. #   # collect_memstats = true
  2759.  
  2760.  
  2761. # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs.
  2762. [[inputs.interrupts]]
  2763.   ## When set to true, cpu metrics are tagged with the cpu.  Otherwise cpu is
  2764.   ## stored as a field.
  2765.   ##
  2766.   ## The default is false for backwards compatibility, and will be changed to
  2767.   ## true in a future version.  It is recommended to set to true on new
  2768.   ## deployments.
  2769.   # cpu_as_tag = false
  2770.  
  2771.   ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e.
  2772.   # [inputs.interrupts.tagdrop]
  2773.   #   irq = [ "NET_RX", "TASKLET" ]
  2774.  
  2775.  
  2776. # # Read metrics from the bare metal servers via IPMI
  2777. # [[inputs.ipmi_sensor]]
  2778. #   ## optionally specify the path to the ipmitool executable
  2779. #   # path = "/usr/bin/ipmitool"
  2780. #   ##
  2781. #   ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR
  2782. #   # privilege = "ADMINISTRATOR"
  2783. #   ##
  2784. #   ## optionally specify one or more servers via a url matching
  2785. #   ##  [username[:password]@][protocol[(address)]]
  2786. #   ##  e.g.
  2787. #   ##    root:passwd@lan(127.0.0.1)
  2788. #   ##
  2789. #   ## if no servers are specified, local machine sensor stats will be queried
  2790. #   ##
  2791. #   # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
  2792. #
  2793. #   ## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid
  2794. #   ## gaps or overlap in pulled data
  2795. #   interval = "30s"
  2796. #
  2797. #   ## Timeout for the ipmitool command to complete
  2798. #   timeout = "20s"
  2799. #
  2800. #   ## Schema Version: (Optional, defaults to version 1)
  2801. #   metric_version = 2
  2802.  
  2803.  
  2804. # # Gather packets and bytes counters from Linux ipsets
  2805. # [[inputs.ipset]]
  2806. #   ## By default, we only show sets which have already matched at least 1 packet.
  2807. #   ## set include_unmatched_sets = true to gather them all.
  2808. #   include_unmatched_sets = false
  2809. #   ## Adjust your sudo settings appropriately if using this option ("sudo ipset save")
  2810. #   use_sudo = false
  2811. #   ## The default timeout of 1s for ipset execution can be overridden here:
  2812. #   # timeout = "1s"
  2813.  
  2814.  
  2815. # # Gather packets and bytes throughput from iptables
  2816. # [[inputs.iptables]]
  2817. #   ## iptables require root access on most systems.
  2818. #   ## Setting 'use_sudo' to true will make use of sudo to run iptables.
  2819. #   ## Users must configure sudo to allow telegraf user to run iptables with no password.
  2820. #   ## iptables can be restricted to only list command "iptables -nvL".
  2821. #   use_sudo = false
  2822. #   ## Setting 'use_lock' to true runs iptables with the "-w" option.
  2823. #   ## Adjust your sudo settings appropriately if using this option ("iptables -w 5 -nvl")
  2824. #   use_lock = false
  2825. #   ## Define an alternate executable, such as "ip6tables". Default is "iptables".
  2826. #   # binary = "ip6tables"
  2827. #   ## defines the table to monitor:
  2828. #   table = "filter"
  2829. #   ## defines the chains to monitor.
  2830. #   ## NOTE: iptables rules without a comment will not be monitored.
  2831. #   ## Read the plugin documentation for more information.
  2832. #   chains = [ "INPUT" ]
  2833.  
  2834.  
  2835. # # Collect virtual and real server stats from Linux IPVS
  2836. # [[inputs.ipvs]]
  2837. #   # no configuration
  2838.  
  2839.  
  2840. # # Read jobs and cluster metrics from Jenkins instances
  2841. # [[inputs.jenkins]]
  2842. #   ## The Jenkins URL
  2843. #   url = "http://my-jenkins-instance:8080"
  2844. #   # username = "admin"
  2845. #   # password = "admin"
  2846. #
  2847. #   ## Set response_timeout
  2848. #   response_timeout = "5s"
  2849. #
  2850. #   ## Optional TLS Config
  2851. #   # tls_ca = "/etc/telegraf/ca.pem"
  2852. #   # tls_cert = "/etc/telegraf/cert.pem"
  2853. #   # tls_key = "/etc/telegraf/key.pem"
  2854. #   ## Use SSL but skip chain & host verification
  2855. #   # insecure_skip_verify = false
  2856. #
  2857. #   ## Optional Max Job Build Age filter
  2858. #   ## Default 1 hour, ignore builds older than max_build_age
  2859. #   # max_build_age = "1h"
  2860. #
  2861. #   ## Optional Sub Job Depth filter
  2862. #   ## Jenkins can have unlimited layer of sub jobs
  2863. #   ## This config will limit the layers of pulling, default value 0 means
  2864. #   ## unlimited pulling until no more sub jobs
  2865. #   # max_subjob_depth = 0
  2866. #
  2867. #   ## Optional Sub Job Per Layer
  2868. #   ## In workflow-multibranch-plugin, each branch will be created as a sub job.
  2869. #   ## This config will limit to call only the lasted branches in each layer,
  2870. #   ## empty will use default value 10
  2871. #   # max_subjob_per_layer = 10
  2872. #
  2873. #   ## Jobs to exclude from gathering
  2874. #   # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"]
  2875. #
  2876. #   ## Nodes to exclude from gathering
  2877. #   # node_exclude = [ "node1", "node2" ]
  2878. #
  2879. #   ## Worker pool for jenkins plugin only
  2880. #   ## Empty this field will use default value 5
  2881. #   # max_connections = 5
  2882.  
  2883.  
  2884. # # Read JMX metrics through Jolokia
  2885. # [[inputs.jolokia]]
  2886. #   # DEPRECATED: the jolokia plugin has been deprecated in favor of the
  2887. #   # jolokia2 plugin
  2888. #   # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2
  2889. #
  2890. #   ## This is the context root used to compose the jolokia url
  2891. #   ## NOTE that Jolokia requires a trailing slash at the end of the context root
  2892. #   ## NOTE that your jolokia security policy must allow for POST requests.
  2893. #   context = "/jolokia/"
  2894. #
  2895. #   ## This specifies the mode used
  2896. #   # mode = "proxy"
  2897. #   #
  2898. #   ## When in proxy mode this section is used to specify further
  2899. #   ## proxy address configurations.
  2900. #   ## Remember to change host address to fit your environment.
  2901. #   # [inputs.jolokia.proxy]
  2902. #   #   host = "127.0.0.1"
  2903. #   #   port = "8080"
  2904. #
  2905. #   ## Optional http timeouts
  2906. #   ##
  2907. #   ## response_header_timeout, if non-zero, specifies the amount of time to wait
  2908. #   ## for a server's response headers after fully writing the request.
  2909. #   # response_header_timeout = "3s"
  2910. #   ##
  2911. #   ## client_timeout specifies a time limit for requests made by this client.
  2912. #   ## Includes connection time, any redirects, and reading the response body.
  2913. #   # client_timeout = "4s"
  2914. #
  2915. #   ## Attribute delimiter
  2916. #   ##
  2917. #   ## When multiple attributes are returned for a single
  2918. #   ## [inputs.jolokia.metrics], the field name is a concatenation of the metric
  2919. #   ## name, and the attribute name, separated by the given delimiter.
  2920. #   # delimiter = "_"
  2921. #
  2922. #   ## List of servers exposing jolokia read service
  2923. #   [[inputs.jolokia.servers]]
  2924. #     name = "as-server-01"
  2925. #     host = "127.0.0.1"
  2926. #     port = "8080"
  2927. #     # username = "myuser"
  2928. #     # password = "mypassword"
  2929. #
  2930. #   ## List of metrics collected on above servers
  2931. #   ## Each metric consists in a name, a jmx path and either
  2932. #   ## a pass or drop slice attribute.
  2933. #   ## This collect all heap memory usage metrics.
  2934. #   [[inputs.jolokia.metrics]]
  2935. #     name = "heap_memory_usage"
  2936. #     mbean  = "java.lang:type=Memory"
  2937. #     attribute = "HeapMemoryUsage"
  2938. #
  2939. #   ## This collect thread counts metrics.
  2940. #   [[inputs.jolokia.metrics]]
  2941. #     name = "thread_count"
  2942. #     mbean  = "java.lang:type=Threading"
  2943. #     attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount"
  2944. #
  2945. #   ## This collect number of class loaded/unloaded counts metrics.
  2946. #   [[inputs.jolokia.metrics]]
  2947. #     name = "class_count"
  2948. #     mbean  = "java.lang:type=ClassLoading"
  2949. #     attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount"
  2950.  
  2951.  
  2952. # # Read JMX metrics from a Jolokia REST agent endpoint
  2953. # [[inputs.jolokia2_agent]]
  2954. #   # default_tag_prefix      = ""
  2955. #   # default_field_prefix    = ""
  2956. #   # default_field_separator = "."
  2957. #
  2958. #   # Add agents URLs to query
  2959. #   urls = ["http://localhost:8080/jolokia"]
  2960. #   # username = ""
  2961. #   # password = ""
  2962. #   # response_timeout = "5s"
  2963. #
  2964. #   ## Optional TLS config
  2965. #   # tls_ca   = "/var/private/ca.pem"
  2966. #   # tls_cert = "/var/private/client.pem"
  2967. #   # tls_key  = "/var/private/client-key.pem"
  2968. #   # insecure_skip_verify = false
  2969. #
  2970. #   ## Add metrics to read
  2971. #   [[inputs.jolokia2_agent.metric]]
  2972. #     name  = "java_runtime"
  2973. #     mbean = "java.lang:type=Runtime"
  2974. #     paths = ["Uptime"]
  2975.  
  2976.  
  2977. # # Read JMX metrics from a Jolokia REST proxy endpoint
  2978. # [[inputs.jolokia2_proxy]]
  2979. #   # default_tag_prefix      = ""
  2980. #   # default_field_prefix    = ""
  2981. #   # default_field_separator = "."
  2982. #
  2983. #   ## Proxy agent
  2984. #   url = "http://localhost:8080/jolokia"
  2985. #   # username = ""
  2986. #   # password = ""
  2987. #   # response_timeout = "5s"
  2988. #
  2989. #   ## Optional TLS config
  2990. #   # tls_ca   = "/var/private/ca.pem"
  2991. #   # tls_cert = "/var/private/client.pem"
  2992. #   # tls_key  = "/var/private/client-key.pem"
  2993. #   # insecure_skip_verify = false
  2994. #
  2995. #   ## Add proxy targets to query
  2996. #   # default_target_username = ""
  2997. #   # default_target_password = ""
  2998. #   [[inputs.jolokia2_proxy.target]]
  2999. #     url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi"
  3000. #     # username = ""
  3001. #     # password = ""
  3002. #
  3003. #   ## Add metrics to read
  3004. #   [[inputs.jolokia2_proxy.metric]]
  3005. #     name  = "java_runtime"
  3006. #     mbean = "java.lang:type=Runtime"
  3007. #     paths = ["Uptime"]
  3008.  
  3009.  
  3010. # # Read Kapacitor-formatted JSON metrics from one or more HTTP endpoints
  3011. # [[inputs.kapacitor]]
  3012. #   ## Multiple URLs from which to read Kapacitor-formatted JSON
  3013. #   ## Default is "http://localhost:9092/kapacitor/v1/debug/vars".
  3014. #   urls = [
  3015. #     "http://localhost:9092/kapacitor/v1/debug/vars"
  3016. #   ]
  3017. #
  3018. #   ## Time limit for http requests
  3019. #   timeout = "5s"
  3020. #
  3021. #   ## Optional TLS Config
  3022. #   # tls_ca = "/etc/telegraf/ca.pem"
  3023. #   # tls_cert = "/etc/telegraf/cert.pem"
  3024. #   # tls_key = "/etc/telegraf/key.pem"
  3025. #   ## Use TLS but skip chain & host verification
  3026. #   # insecure_skip_verify = false
  3027.  
  3028.  
  3029. # # Get kernel statistics from /proc/vmstat
  3030. # [[inputs.kernel_vmstat]]
  3031. #   # no configuration
  3032.  
  3033.  
  3034. # # Read status information from one or more Kibana servers
  3035. # [[inputs.kibana]]
  3036. #   ## specify a list of one or more Kibana servers
  3037. #   servers = ["http://localhost:5601"]
  3038. #
  3039. #   ## Timeout for HTTP requests
  3040. #   timeout = "5s"
  3041. #
  3042. #   ## HTTP Basic Auth credentials
  3043. #   # username = "username"
  3044. #   # password = "pa$$word"
  3045. #
  3046. #   ## Optional TLS Config
  3047. #   # tls_ca = "/etc/telegraf/ca.pem"
  3048. #   # tls_cert = "/etc/telegraf/cert.pem"
  3049. #   # tls_key = "/etc/telegraf/key.pem"
  3050. #   ## Use TLS but skip chain & host verification
  3051. #   # insecure_skip_verify = false
  3052.  
  3053.  
  3054. # # Read metrics from the Kubernetes api
  3055. # [[inputs.kube_inventory]]
  3056. #   ## URL for the Kubernetes API
  3057. #   url = "https://127.0.0.1"
  3058. #
  3059. #   ## Namespace to use. Set to "" to use all namespaces.
  3060. #   # namespace = "default"
  3061. #
  3062. #   ## Use bearer token for authorization. ('bearer_token' takes priority)
  3063. #   # bearer_token = "/path/to/bearer/token"
  3064. #   ## OR
  3065. #   # bearer_token_string = "abc_123"
  3066. #
  3067. #   ## Set response_timeout (default 5 seconds)
  3068. #   # response_timeout = "5s"
  3069. #
  3070. #   ## Optional Resources to exclude from gathering
  3071. #   ## Leave them with blank with try to gather everything available.
  3072. #   ## Values can be - "daemonsets", deployments", "endpoints", "ingress", "nodes",
  3073. #   ## "persistentvolumes", "persistentvolumeclaims", "pods", "services", "statefulsets"
  3074. #   # resource_exclude = [ "deployments", "nodes", "statefulsets" ]
  3075. #
  3076. #   ## Optional Resources to include when gathering
  3077. #   ## Overrides resource_exclude if both set.
  3078. #   # resource_include = [ "deployments", "nodes", "statefulsets" ]
  3079. #
  3080. #   ## Optional TLS Config
  3081. #   # tls_ca = "/path/to/cafile"
  3082. #   # tls_cert = "/path/to/certfile"
  3083. #   # tls_key = "/path/to/keyfile"
  3084. #   ## Use TLS but skip chain & host verification
  3085. #   # insecure_skip_verify = false
  3086.  
  3087.  
  3088. # # Read metrics from the kubernetes kubelet api
  3089. # [[inputs.kubernetes]]
  3090. #   ## URL for the kubelet
  3091. #   url = "http://127.0.0.1:10255"
  3092. #
  3093. #   ## Use bearer token for authorization. ('bearer_token' takes priority)
  3094. #   # bearer_token = "/path/to/bearer/token"
  3095. #   ## OR
  3096. #   # bearer_token_string = "abc_123"
  3097. #
  3098. #   ## Set response_timeout (default 5 seconds)
  3099. #   # response_timeout = "5s"
  3100. #
  3101. #   ## Optional TLS Config
  3102. #   # tls_ca = /path/to/cafile
  3103. #   # tls_cert = /path/to/certfile
  3104. #   # tls_key = /path/to/keyfile
  3105. #   ## Use TLS but skip chain & host verification
  3106. #   # insecure_skip_verify = false
  3107.  
  3108.  
  3109. # # Read metrics from a LeoFS Server via SNMP
  3110. # [[inputs.leofs]]
  3111. #   ## An array of URLs of the form:
  3112. #   ##   host [ ":" port]
  3113. #   servers = ["127.0.0.1:4020"]
  3114.  
  3115.  
  3116. # # Provides Linux sysctl fs metrics
  3117. # [[inputs.linux_sysctl_fs]]
  3118. #   # no configuration
  3119.  
  3120.  
  3121. # # Read metrics exposed by Logstash
  3122. # [[inputs.logstash]]
  3123. #   ## The URL of the exposed Logstash API endpoint.
  3124. #   url = "http://127.0.0.1:9600"
  3125. #
  3126. #   ## Use Logstash 5 single pipeline API, set to true when monitoring
  3127. #   ## Logstash 5.
  3128. #   # single_pipeline = false
  3129. #
  3130. #   ## Enable optional collection components.  Can contain
  3131. #   ## "pipelines", "process", and "jvm".
  3132. #   # collect = ["pipelines", "process", "jvm"]
  3133. #
  3134. #   ## Timeout for HTTP requests.
  3135. #   # timeout = "5s"
  3136. #
  3137. #   ## Optional HTTP Basic Auth credentials.
  3138. #   # username = "username"
  3139. #   # password = "pa$$word"
  3140. #
  3141. #   ## Optional TLS Config.
  3142. #   # tls_ca = "/etc/telegraf/ca.pem"
  3143. #   # tls_cert = "/etc/telegraf/cert.pem"
  3144. #   # tls_key = "/etc/telegraf/key.pem"
  3145. #
  3146. #   ## Use TLS but skip chain & host verification.
  3147. #   # insecure_skip_verify = false
  3148. #
  3149. #   ## Optional HTTP headers.
  3150. #   # [inputs.logstash.headers]
  3151. #   #   "X-Special-Header" = "Special-Value"
  3152.  
  3153.  
  3154. # # Read metrics from local Lustre service on OST, MDS
  3155. # [[inputs.lustre2]]
  3156. #   ## An array of /proc globs to search for Lustre stats
  3157. #   ## If not specified, the default will work on Lustre 2.5.x
  3158. #   ##
  3159. #   # ost_procfiles = [
  3160. #   #   "/proc/fs/lustre/obdfilter/*/stats",
  3161. #   #   "/proc/fs/lustre/osd-ldiskfs/*/stats",
  3162. #   #   "/proc/fs/lustre/obdfilter/*/job_stats",
  3163. #   # ]
  3164. #   # mds_procfiles = [
  3165. #   #   "/proc/fs/lustre/mdt/*/md_stats",
  3166. #   #   "/proc/fs/lustre/mdt/*/job_stats",
  3167. #   # ]
  3168.  
  3169.  
  3170. # # Gathers metrics from the /3.0/reports MailChimp API
  3171. # [[inputs.mailchimp]]
  3172. #   ## MailChimp API key
  3173. #   ## get from https://admin.mailchimp.com/account/api/
  3174. #   api_key = "" # required
  3175. #   ## Reports for campaigns sent more than days_old ago will not be collected.
  3176. #   ## 0 means collect all.
  3177. #   days_old = 0
  3178. #   ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old
  3179. #   # campaign_id = ""
  3180.  
  3181.  
  3182. # # Retrives information on a specific host in a MarkLogic Cluster
  3183. # [[inputs.marklogic]]
  3184. #   ## Base URL of the MarkLogic HTTP Server.
  3185. #   url = "http://localhost:8002"
  3186. #
  3187. #   ## List of specific hostnames to retrieve information. At least (1) required.
  3188. #   # hosts = ["hostname1", "hostname2"]
  3189. #
  3190. #   ## Using HTTP Basic Authentication. Management API requires 'manage-user' role privileges
  3191. #   # username = "myuser"
  3192. #   # password = "mypassword"
  3193. #
  3194. #   ## Optional TLS Config
  3195. #   # tls_ca = "/etc/telegraf/ca.pem"
  3196. #   # tls_cert = "/etc/telegraf/cert.pem"
  3197. #   # tls_key = "/etc/telegraf/key.pem"
  3198. #   ## Use TLS but skip chain & host verification
  3199. #   # insecure_skip_verify = false
  3200.  
  3201.  
  3202. # # Read metrics from one or many mcrouter servers
  3203. # [[inputs.mcrouter]]
  3204. #   ## An array of address to gather stats about. Specify an ip or hostname
  3205. #   ## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc.
  3206. #   servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"]
  3207. #
  3208. #   ## Timeout for metric collections from all servers.  Minimum timeout is "1s".
  3209. #   # timeout = "5s"
  3210.  
  3211.  
  3212. # # Read metrics from one or many memcached servers
  3213. # [[inputs.memcached]]
  3214. #   ## An array of address to gather stats about. Specify an ip on hostname
  3215. #   ## with optional port. ie localhost, 10.0.0.1:11211, etc.
  3216. #   servers = ["localhost:11211"]
  3217. #   # unix_sockets = ["/var/run/memcached.sock"]
  3218.  
  3219.  
  3220. # # Telegraf plugin for gathering metrics from N Mesos masters
  3221. # [[inputs.mesos]]
  3222. #   ## Timeout, in ms.
  3223. #   timeout = 100
  3224. #   ## A list of Mesos masters.
  3225. #   masters = ["http://localhost:5050"]
  3226. #   ## Master metrics groups to be collected, by default, all enabled.
  3227. #   master_collections = [
  3228. #     "resources",
  3229. #     "master",
  3230. #     "system",
  3231. #     "agents",
  3232. #     "frameworks",
  3233. #     "framework_offers",
  3234. #     "tasks",
  3235. #     "messages",
  3236. #     "evqueue",
  3237. #     "registrar",
  3238. #     "allocator",
  3239. #   ]
  3240. #   ## A list of Mesos slaves, default is []
  3241. #   # slaves = []
  3242. #   ## Slave metrics groups to be collected, by default, all enabled.
  3243. #   # slave_collections = [
  3244. #   #   "resources",
  3245. #   #   "agent",
  3246. #   #   "system",
  3247. #   #   "executors",
  3248. #   #   "tasks",
  3249. #   #   "messages",
  3250. #   # ]
  3251. #
  3252. #   ## Optional TLS Config
  3253. #   # tls_ca = "/etc/telegraf/ca.pem"
  3254. #   # tls_cert = "/etc/telegraf/cert.pem"
  3255. #   # tls_key = "/etc/telegraf/key.pem"
  3256. #   ## Use TLS but skip chain & host verification
  3257. #   # insecure_skip_verify = false
  3258.  
  3259.  
  3260. # # Collects scores from a Minecraft server's scoreboard using the RCON protocol
  3261. # [[inputs.minecraft]]
  3262. #   ## Address of the Minecraft server.
  3263. #   # server = "localhost"
  3264. #
  3265. #   ## Server RCON Port.
  3266. #   # port = "25575"
  3267. #
  3268. #   ## Server RCON Password.
  3269. #   password = ""
  3270. #
  3271. #   ## Uncomment to remove deprecated metric components.
  3272. #   # tagdrop = ["server"]
  3273.  
  3274.  
  3275. # # Read metrics from one or many MongoDB servers
  3276. # [[inputs.mongodb]]
  3277. #   ## An array of URLs of the form:
  3278. #   ##   "mongodb://" [user ":" pass "@"] host [ ":" port]
  3279. #   ## For example:
  3280. #   ##   mongodb://user:auth_key@10.10.3.30:27017,
  3281. #   ##   mongodb://10.10.3.33:18832,
  3282. #   servers = ["mongodb://127.0.0.1:27017"]
  3283. #
  3284. #   ## When true, collect per database stats
  3285. #   # gather_perdb_stats = false
  3286. #
  3287. #   ## When true, collect per collection stats
  3288. #   # gather_col_stats = false
  3289. #
  3290. #   ## List of db where collections stats are collected
  3291. #   ## If empty, all db are concerned
  3292. #   # col_stats_dbs = ["local"]
  3293. #
  3294. #   ## Optional TLS Config
  3295. #   # tls_ca = "/etc/telegraf/ca.pem"
  3296. #   # tls_cert = "/etc/telegraf/cert.pem"
  3297. #   # tls_key = "/etc/telegraf/key.pem"
  3298. #   ## Use TLS but skip chain & host verification
  3299. #   # insecure_skip_verify = false
  3300.  
  3301.  
  3302. # # Aggregates the contents of multiple files into a single point
  3303. # [[inputs.multifile]]
  3304. #   ## Base directory where telegraf will look for files.
  3305. #   ## Omit this option to use absolute paths.
  3306. #   base_dir = "/sys/bus/i2c/devices/1-0076/iio:device0"
  3307. #
  3308. #   ## If true, Telegraf discard all data when a single file can't be read.
  3309. #   ## Else, Telegraf omits the field generated from this file.
  3310. #   # fail_early = true
  3311. #
  3312. #   ## Files to parse each interval.
  3313. #   [[inputs.multifile.file]]
  3314. #     file = "in_pressure_input"
  3315. #     dest = "pressure"
  3316. #     conversion = "float"
  3317. #   [[inputs.multifile.file]]
  3318. #     file = "in_temp_input"
  3319. #     dest = "temperature"
  3320. #     conversion = "float(3)"
  3321. #   [[inputs.multifile.file]]
  3322. #     file = "in_humidityrelative_input"
  3323. #     dest = "humidityrelative"
  3324. #     conversion = "float(3)"
  3325.  
  3326.  
  3327. # # Read metrics from one or many mysql servers
  3328. # [[inputs.mysql]]
  3329. #   ## specify servers via a url matching:
  3330. #   ##  [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]]
  3331. #   ##  see https://github.com/go-sql-driver/mysql#dsn-data-source-name
  3332. #   ##  e.g.
  3333. #   ##    servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"]
  3334. #   ##    servers = ["user@tcp(127.0.0.1:3306)/?tls=false"]
  3335. #   #
  3336. #   ## If no servers are specified, then localhost is used as the host.
  3337. #   servers = ["tcp(127.0.0.1:3306)/"]
  3338. #
  3339. #   ## Selects the metric output format.
  3340. #   ##
  3341. #   ## This option exists to maintain backwards compatibility, if you have
  3342. #   ## existing metrics do not set or change this value until you are ready to
  3343. #   ## migrate to the new format.
  3344. #   ##
  3345. #   ## If you do not have existing metrics from this plugin set to the latest
  3346. #   ## version.
  3347. #   ##
  3348. #   ## Telegraf >=1.6: metric_version = 2
  3349. #   ##           <1.6: metric_version = 1 (or unset)
  3350. #   metric_version = 2
  3351. #
  3352. #   ## the limits for metrics form perf_events_statements
  3353. #   perf_events_statements_digest_text_limit  = 120
  3354. #   perf_events_statements_limit              = 250
  3355. #   perf_events_statements_time_limit         = 86400
  3356. #   #
  3357. #   ## if the list is empty, then metrics are gathered from all databasee tables
  3358. #   table_schema_databases                    = []
  3359. #   #
  3360. #   ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list
  3361. #   gather_table_schema                       = false
  3362. #   #
  3363. #   ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST
  3364. #   gather_process_list                       = true
  3365. #   #
  3366. #   ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS
  3367. #   gather_user_statistics                    = true
  3368. #   #
  3369. #   ## gather auto_increment columns and max values from information schema
  3370. #   gather_info_schema_auto_inc               = true
  3371. #   #
  3372. #   ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS
  3373. #   gather_innodb_metrics                     = true
  3374. #   #
  3375. #   ## gather metrics from SHOW SLAVE STATUS command output
  3376. #   gather_slave_status                       = true
  3377. #   #
  3378. #   ## gather metrics from SHOW BINARY LOGS command output
  3379. #   gather_binary_logs                        = false
  3380. #   #
  3381. #   ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE
  3382. #   gather_table_io_waits                     = false
  3383. #   #
  3384. #   ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS
  3385. #   gather_table_lock_waits                   = false
  3386. #   #
  3387. #   ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE
  3388. #   gather_index_io_waits                     = false
  3389. #   #
  3390. #   ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS
  3391. #   gather_event_waits                        = false
  3392. #   #
  3393. #   ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME
  3394. #   gather_file_events_stats                  = false
  3395. #   #
  3396. #   ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST
  3397. #   gather_perf_events_statements             = false
  3398. #   #
  3399. #   ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
  3400. #   interval_slow                   = "30m"
  3401. #
  3402. #   ## Optional TLS Config (will be used if tls=custom parameter specified in server uri)
  3403. #   # tls_ca = "/etc/telegraf/ca.pem"
  3404. #   # tls_cert = "/etc/telegraf/cert.pem"
  3405. #   # tls_key = "/etc/telegraf/key.pem"
  3406. #   ## Use TLS but skip chain & host verification
  3407. #   # insecure_skip_verify = false
  3408.  
  3409.  
  3410. # # Provides metrics about the state of a NATS server
  3411. # [[inputs.nats]]
  3412. #   ## The address of the monitoring endpoint of the NATS server
  3413. #   server = "http://localhost:8222"
  3414. #
  3415. #   ## Maximum time to receive response
  3416. #   # response_timeout = "5s"
  3417.  
  3418.  
  3419. # # Neptune Apex data collector
  3420. # [[inputs.neptune_apex]]
  3421. #   ## The Neptune Apex plugin reads the publicly available status.xml data from a local Apex.
  3422. #   ## Measurements will be logged under "apex".
  3423. #
  3424. #   ## The base URL of the local Apex(es). If you specify more than one server, they will
  3425. #   ## be differentiated by the "source" tag.
  3426. #   servers = [
  3427. #     "http://apex.local",
  3428. #   ]
  3429. #
  3430. #   ## The response_timeout specifies how long to wait for a reply from the Apex.
  3431. #   #response_timeout = "5s"
  3432.  
  3433.  
  3434. # Read metrics about network interface usage
  3435. [[inputs.net]]
  3436.   ## By default, telegraf gathers stats from any up interface (excluding loopback)
  3437.   ## Setting interfaces will tell it to gather these explicit interfaces,
  3438.   ## regardless of status.
  3439.   ##
  3440.   # interfaces = ["eth0"]
  3441.   ##
  3442.   ## On linux systems telegraf also collects protocol stats.
  3443.   ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics.
  3444.   ##
  3445.   # ignore_protocol_stats = false
  3446.   ##
  3447.  
  3448.  
  3449. # # Collect response time of a TCP or UDP connection
  3450. # [[inputs.net_response]]
  3451. #   ## Protocol, must be "tcp" or "udp"
  3452. #   ## NOTE: because the "udp" protocol does not respond to requests, it requires
  3453. #   ## a send/expect string pair (see below).
  3454. #   protocol = "tcp"
  3455. #   ## Server address (default localhost)
  3456. #   address = "localhost:80"
  3457. #
  3458. #   ## Set timeout
  3459. #   # timeout = "1s"
  3460. #
  3461. #   ## Set read timeout (only used if expecting a response)
  3462. #   # read_timeout = "1s"
  3463. #
  3464. #   ## The following options are required for UDP checks. For TCP, they are
  3465. #   ## optional. The plugin will send the given string to the server and then
  3466. #   ## expect to receive the given 'expect' string back.
  3467. #   ## string sent to the server
  3468. #   # send = "ssh"
  3469. #   ## expected string in answer
  3470. #   # expect = "ssh"
  3471. #
  3472. #   ## Uncomment to remove deprecated fields
  3473. #   # fielddrop = ["result_type", "string_found"]
  3474.  
  3475.  
  3476. # Read TCP metrics such as established, time wait and sockets counts.
  3477. [[inputs.netstat]]
  3478.   # no configuration
  3479.  
  3480.  
  3481. # # Read Nginx's basic status information (ngx_http_stub_status_module)
  3482. # [[inputs.nginx]]
  3483. #   # An array of Nginx stub_status URI to gather stats.
  3484. #   urls = ["http://localhost/server_status"]
  3485. #
  3486. #   ## Optional TLS Config
  3487. #   tls_ca = "/etc/telegraf/ca.pem"
  3488. #   tls_cert = "/etc/telegraf/cert.cer"
  3489. #   tls_key = "/etc/telegraf/key.key"
  3490. #   ## Use TLS but skip chain & host verification
  3491. #   insecure_skip_verify = false
  3492. #
  3493. #   # HTTP response timeout (default: 5s)
  3494. #   response_timeout = "5s"
  3495.  
  3496.  
  3497. # # Read Nginx Plus' full status information (ngx_http_status_module)
  3498. # [[inputs.nginx_plus]]
  3499. #   ## An array of ngx_http_status_module or status URI to gather stats.
  3500. #   urls = ["http://localhost/status"]
  3501. #
  3502. #   # HTTP response timeout (default: 5s)
  3503. #   response_timeout = "5s"
  3504. #
  3505. #   ## Optional TLS Config
  3506. #   # tls_ca = "/etc/telegraf/ca.pem"
  3507. #   # tls_cert = "/etc/telegraf/cert.pem"
  3508. #   # tls_key = "/etc/telegraf/key.pem"
  3509. #   ## Use TLS but skip chain & host verification
  3510. #   # insecure_skip_verify = false
  3511.  
  3512.  
  3513. # # Read Nginx Plus Api documentation
  3514. # [[inputs.nginx_plus_api]]
  3515. #   ## An array of API URI to gather stats.
  3516. #   urls = ["http://localhost/api"]
  3517. #
  3518. #   # Nginx API version, default: 3
  3519. #   # api_version = 3
  3520. #
  3521. #   # HTTP response timeout (default: 5s)
  3522. #   response_timeout = "5s"
  3523. #
  3524. #   ## Optional TLS Config
  3525. #   # tls_ca = "/etc/telegraf/ca.pem"
  3526. #   # tls_cert = "/etc/telegraf/cert.pem"
  3527. #   # tls_key = "/etc/telegraf/key.pem"
  3528. #   ## Use TLS but skip chain & host verification
  3529. #   # insecure_skip_verify = false
  3530.  
  3531.  
  3532. # # Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module)
  3533. # [[inputs.nginx_upstream_check]]
  3534. #   ## An URL where Nginx Upstream check module is enabled
  3535. #   ## It should be set to return a JSON formatted response
  3536. #   url = "http://127.0.0.1/status?format=json"
  3537. #
  3538. #   ## HTTP method
  3539. #   # method = "GET"
  3540. #
  3541. #   ## Optional HTTP headers
  3542. #   # headers = {"X-Special-Header" = "Special-Value"}
  3543. #
  3544. #   ## Override HTTP "Host" header
  3545. #   # host_header = "check.example.com"
  3546. #
  3547. #   ## Timeout for HTTP requests
  3548. #   timeout = "5s"
  3549. #
  3550. #   ## Optional HTTP Basic Auth credentials
  3551. #   # username = "username"
  3552. #   # password = "pa$$word"
  3553. #
  3554. #   ## Optional TLS Config
  3555. #   # tls_ca = "/etc/telegraf/ca.pem"
  3556. #   # tls_cert = "/etc/telegraf/cert.pem"
  3557. #   # tls_key = "/etc/telegraf/key.pem"
  3558. #   ## Use TLS but skip chain & host verification
  3559. #   # insecure_skip_verify = false
  3560.  
  3561.  
  3562. # # Read Nginx virtual host traffic status module information (nginx-module-vts)
  3563. # [[inputs.nginx_vts]]
  3564. #   ## An array of ngx_http_status_module or status URI to gather stats.
  3565. #   urls = ["http://localhost/status"]
  3566. #
  3567. #   ## HTTP response timeout (default: 5s)
  3568. #   response_timeout = "5s"
  3569. #
  3570. #   ## Optional TLS Config
  3571. #   # tls_ca = "/etc/telegraf/ca.pem"
  3572. #   # tls_cert = "/etc/telegraf/cert.pem"
  3573. #   # tls_key = "/etc/telegraf/key.pem"
  3574. #   ## Use TLS but skip chain & host verification
  3575. #   # insecure_skip_verify = false
  3576.  
  3577.  
  3578. # # Read NSQ topic and channel statistics.
  3579. # [[inputs.nsq]]
  3580. #   ## An array of NSQD HTTP API endpoints
  3581. #   endpoints  = ["http://localhost:4151"]
  3582. #
  3583. #   ## Optional TLS Config
  3584. #   # tls_ca = "/etc/telegraf/ca.pem"
  3585. #   # tls_cert = "/etc/telegraf/cert.pem"
  3586. #   # tls_key = "/etc/telegraf/key.pem"
  3587. #   ## Use TLS but skip chain & host verification
  3588. #   # insecure_skip_verify = false
  3589.  
  3590.  
  3591. # # Collect kernel snmp counters and network interface statistics
  3592. # [[inputs.nstat]]
  3593. #   ## file paths for proc files. If empty default paths will be used:
  3594. #   ##    /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6
  3595. #   ## These can also be overridden with env variables, see README.
  3596. #   proc_net_netstat = "/proc/net/netstat"
  3597. #   proc_net_snmp = "/proc/net/snmp"
  3598. #   proc_net_snmp6 = "/proc/net/snmp6"
  3599. #   ## dump metrics with 0 values too
  3600. #   dump_zeros       = true
  3601.  
  3602.  
  3603. # # Get standard NTP query metrics, requires ntpq executable.
  3604. # [[inputs.ntpq]]
  3605. #   ## If false, set the -n ntpq flag. Can reduce metric gather time.
  3606. #   dns_lookup = true
  3607.  
  3608.  
  3609. # # Pulls statistics from nvidia GPUs attached to the host
  3610. # [[inputs.nvidia_smi]]
  3611. #   ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath
  3612. #   # bin_path = "/usr/bin/nvidia-smi"
  3613. #
  3614. #   ## Optional: timeout for GPU polling
  3615. #   # timeout = "5s"
  3616.  
  3617.  
  3618. # # OpenLDAP cn=Monitor plugin
  3619. # [[inputs.openldap]]
  3620. #   host = "localhost"
  3621. #   port = 389
  3622. #
  3623. #   # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption.
  3624. #   # note that port will likely need to be changed to 636 for ldaps
  3625. #   # valid options: "" | "starttls" | "ldaps"
  3626. #   tls = ""
  3627. #
  3628. #   # skip peer certificate verification. Default is false.
  3629. #   insecure_skip_verify = false
  3630. #
  3631. #   # Path to PEM-encoded Root certificate to use to verify server certificate
  3632. #   tls_ca = "/etc/ssl/certs.pem"
  3633. #
  3634. #   # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed.
  3635. #   bind_dn = ""
  3636. #   bind_password = ""
  3637. #
  3638. #   # Reverse metric names so they sort more naturally. Recommended.
  3639. #   # This defaults to false if unset, but is set to true when generating a new config
  3640. #   reverse_metric_names = true
  3641.  
  3642.  
  3643. # # Get standard NTP query metrics from OpenNTPD.
  3644. # [[inputs.openntpd]]
  3645. #   ## Run ntpctl binary with sudo.
  3646. #   # use_sudo = false
  3647. #
  3648. #   ## Location of the ntpctl binary.
  3649. #   # binary = "/usr/sbin/ntpctl"
  3650. #
  3651. #   ## Maximum time the ntpctl binary is allowed to run.
  3652. #   # timeout = "5ms"
  3653.  
  3654.  
  3655. # # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver
  3656. # [[inputs.opensmtpd]]
  3657. #   ## If running as a restricted user you can prepend sudo for additional access:
  3658. #   #use_sudo = false
  3659. #
  3660. #   ## The default location of the smtpctl binary can be overridden with:
  3661. #   binary = "/usr/sbin/smtpctl"
  3662. #
  3663. #   ## The default timeout of 1000ms can be overriden with (in milliseconds):
  3664. #   timeout = 1000
  3665.  
  3666.  
  3667. # # Read current weather and forecasts data from openweathermap.org
  3668. # [[inputs.openweathermap]]
  3669. #   ## OpenWeatherMap API key.
  3670. #   app_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
  3671. #
  3672. #   ## City ID's to collect weather data from.
  3673. #   city_id = ["5391959"]
  3674. #
  3675. #   ## APIs to fetch; can contain "weather" or "forecast".
  3676. #   fetch = ["weather", "forecast"]
  3677. #
  3678. #   ## OpenWeatherMap base URL
  3679. #   # base_url = "https://api.openweathermap.org/"
  3680. #
  3681. #   ## Timeout for HTTP response.
  3682. #   # response_timeout = "5s"
  3683. #
  3684. #   ## Preferred unit system for temperature and wind speed. Can be one of
  3685. #   ## "metric", "imperial", or "standard".
  3686. #   # units = "metric"
  3687. #
  3688. #   ## Query interval; OpenWeatherMap updates their weather data every 10
  3689. #   ## minutes.
  3690. #   interval = "10m"
  3691.  
  3692.  
  3693. # # Read metrics of passenger using passenger-status
  3694. # [[inputs.passenger]]
  3695. #   ## Path of passenger-status.
  3696. #   ##
  3697. #   ## Plugin gather metric via parsing XML output of passenger-status
  3698. #   ## More information about the tool:
  3699. #   ##   https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html
  3700. #   ##
  3701. #   ## If no path is specified, then the plugin simply execute passenger-status
  3702. #   ## hopefully it can be found in your PATH
  3703. #   command = "passenger-status -v --show=xml"
  3704.  
  3705.  
  3706. # # Gather counters from PF
  3707. # [[inputs.pf]]
  3708. #   ## PF require root access on most systems.
  3709. #   ## Setting 'use_sudo' to true will make use of sudo to run pfctl.
  3710. #   ## Users must configure sudo to allow telegraf user to run pfctl with no password.
  3711. #   ## pfctl can be restricted to only list command "pfctl -s info".
  3712. #   use_sudo = false
  3713.  
  3714.  
  3715. # # Read metrics of phpfpm, via HTTP status page or socket
  3716. # [[inputs.phpfpm]]
  3717. #   ## An array of addresses to gather stats about. Specify an ip or hostname
  3718. #   ## with optional port and path
  3719. #   ##
  3720. #   ## Plugin can be configured in three modes (either can be used):
  3721. #   ##   - http: the URL must start with http:// or https://, ie:
  3722. #   ##       "http://localhost/status"
  3723. #   ##       "http://192.168.130.1/status?full"
  3724. #   ##
  3725. #   ##   - unixsocket: path to fpm socket, ie:
  3726. #   ##       "/var/run/php5-fpm.sock"
  3727. #   ##      or using a custom fpm status path:
  3728. #   ##       "/var/run/php5-fpm.sock:fpm-custom-status-path"
  3729. #   ##
  3730. #   ##   - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie:
  3731. #   ##       "fcgi://10.0.0.12:9000/status"
  3732. #   ##       "cgi://10.0.10.12:9001/status"
  3733. #   ##
  3734. #   ## Example of multiple gathering from local socket and remote host
  3735. #   ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"]
  3736. #   urls = ["http://localhost/status"]
  3737. #
  3738. #   ## Duration allowed to complete HTTP requests.
  3739. #   # timeout = "5s"
  3740. #
  3741. #   ## Optional TLS Config
  3742. #   # tls_ca = "/etc/telegraf/ca.pem"
  3743. #   # tls_cert = "/etc/telegraf/cert.pem"
  3744. #   # tls_key = "/etc/telegraf/key.pem"
  3745. #   ## Use TLS but skip chain & host verification
  3746. #   # insecure_skip_verify = false
  3747.  
  3748.  
  3749. # # Ping given url(s) and return statistics
  3750. # [[inputs.ping]]
  3751. #   ## List of urls to ping
  3752. #   urls = ["example.org"]
  3753. #
  3754. #   ## Number of pings to send per collection (ping -c <COUNT>)
  3755. #   # count = 1
  3756. #
  3757. #   ## Interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
  3758. #   # ping_interval = 1.0
  3759. #
  3760. #   ## Per-ping timeout, in s. 0 == no timeout (ping -W <TIMEOUT>)
  3761. #   # timeout = 1.0
  3762. #
  3763. #   ## Total-ping deadline, in s. 0 == no deadline (ping -w <DEADLINE>)
  3764. #   # deadline = 10
  3765. #
  3766. #   ## Interface or source address to send ping from (ping -I[-S] <INTERFACE/SRC_ADDR>)
  3767. #   # interface = ""
  3768. #
  3769. #   ## How to ping. "native" doesn't have external dependencies, while "exec" depends on 'ping'.
  3770. #   # method = "exec"
  3771. #
  3772. #   ## Specify the ping executable binary, default is "ping"
  3773. #   # binary = "ping"
  3774. #
  3775. #   ## Arguments for ping command. When arguments is not empty, system binary will be used and
  3776. #   ## other options (ping_interval, timeout, etc) will be ignored.
  3777. #   # arguments = ["-c", "3"]
  3778. #
  3779. #   ## Use only ipv6 addresses when resolving hostnames.
  3780. #   # ipv6 = false
  3781.  
  3782.  
  3783. # # Measure postfix queue statistics
  3784. # [[inputs.postfix]]
  3785. #   ## Postfix queue directory. If not provided, telegraf will try to use
  3786. #   ## 'postconf -h queue_directory' to determine it.
  3787. #   # queue_directory = "/var/spool/postfix"
  3788.  
  3789.  
  3790. # # Read metrics from one or many PowerDNS servers
  3791. # [[inputs.powerdns]]
  3792. #   ## An array of sockets to gather stats about.
  3793. #   ## Specify a path to unix socket.
  3794. #   unix_sockets = ["/var/run/pdns.controlsocket"]
  3795.  
  3796.  
  3797. # # Read metrics from one or many PowerDNS Recursor servers
  3798. # [[inputs.powerdns_recursor]]
  3799. #   ## Path to the Recursor control socket.
  3800. #   unix_sockets = ["/var/run/pdns_recursor.controlsocket"]
  3801. #
  3802. #   ## Directory to create receive socket.  This default is likely not writable,
  3803. #   ## please reference the full plugin documentation for a recommended setup.
  3804. #   # socket_dir = "/var/run/"
  3805. #   ## Socket permissions for the receive socket.
  3806. #   # socket_mode = "0666"
  3807.  
  3808.  
  3809. # # Monitor process cpu and memory usage
  3810. # [[inputs.procstat]]
  3811. #   ## PID file to monitor process
  3812. #   pid_file = "/var/run/nginx.pid"
  3813. #   ## executable name (ie, pgrep <exe>)
  3814. #   # exe = "nginx"
  3815. #   ## pattern as argument for pgrep (ie, pgrep -f <pattern>)
  3816. #   # pattern = "nginx"
  3817. #   ## user as argument for pgrep (ie, pgrep -u <user>)
  3818. #   # user = "nginx"
  3819. #   ## Systemd unit name
  3820. #   # systemd_unit = "nginx.service"
  3821. #   ## CGroup name or path
  3822. #   # cgroup = "systemd/system.slice/nginx.service"
  3823. #
  3824. #   ## Windows service name
  3825. #   # win_service = ""
  3826. #
  3827. #   ## override for process_name
  3828. #   ## This is optional; default is sourced from /proc/<pid>/status
  3829. #   # process_name = "bar"
  3830. #
  3831. #   ## Field name prefix
  3832. #   # prefix = ""
  3833. #
  3834. #   ## When true add the full cmdline as a tag.
  3835. #   # cmdline_tag = false
  3836. #
  3837. #   ## Add PID as a tag instead of a field; useful to differentiate between
  3838. #   ## processes whose tags are otherwise the same.  Can create a large number
  3839. #   ## of series, use judiciously.
  3840. #   # pid_tag = false
  3841. #
  3842. #   ## Method to use when finding process IDs.  Can be one of 'pgrep', or
  3843. #   ## 'native'.  The pgrep finder calls the pgrep executable in the PATH while
  3844. #   ## the native finder performs the search directly in a manor dependent on the
  3845. #   ## platform.  Default is 'pgrep'
  3846. #   # pid_finder = "pgrep"
  3847.  
  3848.  
  3849. # # Reads last_run_summary.yaml file and converts to measurments
  3850. # [[inputs.puppetagent]]
  3851. #   ## Location of puppet last run summary file
  3852. #   location = "/var/lib/puppet/state/last_run_summary.yaml"
  3853.  
  3854.  
  3855. # # Reads metrics from RabbitMQ servers via the Management Plugin
  3856. # [[inputs.rabbitmq]]
  3857. #   ## Management Plugin url. (default: http://localhost:15672)
  3858. #   # url = "http://localhost:15672"
  3859. #   ## Tag added to rabbitmq_overview series; deprecated: use tags
  3860. #   # name = "rmq-server-1"
  3861. #   ## Credentials
  3862. #   # username = "guest"
  3863. #   # password = "guest"
  3864. #
  3865. #   ## Optional TLS Config
  3866. #   # tls_ca = "/etc/telegraf/ca.pem"
  3867. #   # tls_cert = "/etc/telegraf/cert.pem"
  3868. #   # tls_key = "/etc/telegraf/key.pem"
  3869. #   ## Use TLS but skip chain & host verification
  3870. #   # insecure_skip_verify = false
  3871. #
  3872. #   ## Optional request timeouts
  3873. #   ##
  3874. #   ## ResponseHeaderTimeout, if non-zero, specifies the amount of time to wait
  3875. #   ## for a server's response headers after fully writing the request.
  3876. #   # header_timeout = "3s"
  3877. #   ##
  3878. #   ## client_timeout specifies a time limit for requests made by this client.
  3879. #   ## Includes connection time, any redirects, and reading the response body.
  3880. #   # client_timeout = "4s"
  3881. #
  3882. #   ## A list of nodes to gather as the rabbitmq_node measurement. If not
  3883. #   ## specified, metrics for all nodes are gathered.
  3884. #   # nodes = ["rabbit@node1", "rabbit@node2"]
  3885. #
  3886. #   ## A list of queues to gather as the rabbitmq_queue measurement. If not
  3887. #   ## specified, metrics for all queues are gathered.
  3888. #   # queues = ["telegraf"]
  3889. #
  3890. #   ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not
  3891. #   ## specified, metrics for all exchanges are gathered.
  3892. #   # exchanges = ["telegraf"]
  3893. #
  3894. #   ## Queues to include and exclude. Globs accepted.
  3895. #   ## Note that an empty array for both will include all queues
  3896. #   queue_name_include = []
  3897. #   queue_name_exclude = []
  3898.  
  3899.  
  3900. # # Read raindrops stats (raindrops - real-time stats for preforking Rack servers)
  3901. # [[inputs.raindrops]]
  3902. #   ## An array of raindrops middleware URI to gather stats.
  3903. #   urls = ["http://localhost:8080/_raindrops"]
  3904.  
  3905.  
  3906. # # Read metrics from one or many redis servers
  3907. # [[inputs.redis]]
  3908. #   ## specify servers via a url matching:
  3909. #   ##  [protocol://][:password]@address[:port]
  3910. #   ##  e.g.
  3911. #   ##    tcp://localhost:6379
  3912. #   ##    tcp://:password@192.168.99.100
  3913. #   ##    unix:///var/run/redis.sock
  3914. #   ##
  3915. #   ## If no servers are specified, then localhost is used as the host.
  3916. #   ## If no port is specified, 6379 is used
  3917. #   servers = ["tcp://localhost:6379"]
  3918. #
  3919. #   ## specify server password
  3920. #   # password = "s#cr@t%"
  3921. #
  3922. #   ## Optional TLS Config
  3923. #   # tls_ca = "/etc/telegraf/ca.pem"
  3924. #   # tls_cert = "/etc/telegraf/cert.pem"
  3925. #   # tls_key = "/etc/telegraf/key.pem"
  3926. #   ## Use TLS but skip chain & host verification
  3927. #   # insecure_skip_verify = true
  3928.  
  3929.  
  3930. # # Read metrics from one or many RethinkDB servers
  3931. # [[inputs.rethinkdb]]
  3932. #   ## An array of URI to gather stats about. Specify an ip or hostname
  3933. #   ## with optional port add password. ie,
  3934. #   ##   rethinkdb://user:auth_key@10.10.3.30:28105,
  3935. #   ##   rethinkdb://10.10.3.33:18832,
  3936. #   ##   10.0.0.1:10000, etc.
  3937. #   servers = ["127.0.0.1:28015"]
  3938. #   ##
  3939. #   ## If you use actual rethinkdb of > 2.3.0 with username/password authorization,
  3940. #   ## protocol have to be named "rethinkdb2" - it will use 1_0 H.
  3941. #   # servers = ["rethinkdb2://username:password@127.0.0.1:28015"]
  3942. #   ##
  3943. #   ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol
  3944. #   ## have to be named "rethinkdb".
  3945. #   # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"]
  3946.  
  3947.  
  3948. # # Read metrics one or many Riak servers
  3949. # [[inputs.riak]]
  3950. #   # Specify a list of one or more riak http servers
  3951. #   servers = ["http://localhost:8098"]
  3952.  
  3953.  
  3954. # # Read API usage and limits for a Salesforce organisation
  3955. # [[inputs.salesforce]]
  3956. #   ## specify your credentials
  3957. #   ##
  3958. #   username = "your_username"
  3959. #   password = "your_password"
  3960. #   ##
  3961. #   ## (optional) security token
  3962. #   # security_token = "your_security_token"
  3963. #   ##
  3964. #   ## (optional) environment type (sandbox or production)
  3965. #   ## default is: production
  3966. #   ##
  3967. #   # environment = "production"
  3968. #   ##
  3969. #   ## (optional) API version (default: "39.0")
  3970. #   ##
  3971. #   # version = "39.0"
  3972.  
  3973.  
  3974. # # Monitor sensors, requires lm-sensors package
  3975. # [[inputs.sensors]]
  3976. #   ## Remove numbers from field names.
  3977. #   ## If true, a field name like 'temp1_input' will be changed to 'temp_input'.
  3978. #   # remove_numbers = true
  3979. #
  3980. #   ## Timeout is the maximum amount of time that the sensors command can run.
  3981. #   # timeout = "5s"
  3982.  
  3983.  
  3984. # # Read metrics from storage devices supporting S.M.A.R.T.
  3985. # [[inputs.smart]]
  3986. #   ## Optionally specify the path to the smartctl executable
  3987. #   # path = "/usr/bin/smartctl"
  3988. #
  3989. #   ## On most platforms smartctl requires root access.
  3990. #   ## Setting 'use_sudo' to true will make use of sudo to run smartctl.
  3991. #   ## Sudo must be configured to to allow the telegraf user to run smartctl
  3992. #   ## without a password.
  3993. #   # use_sudo = false
  3994. #
  3995. #   ## Skip checking disks in this power mode. Defaults to
  3996. #   ## "standby" to not wake up disks that have stoped rotating.
  3997. #   ## See --nocheck in the man pages for smartctl.
  3998. #   ## smartctl version 5.41 and 5.42 have faulty detection of
  3999. #   ## power mode and might require changing this value to
  4000. #   ## "never" depending on your disks.
  4001. #   # nocheck = "standby"
  4002. #
  4003. #   ## Gather all returned S.M.A.R.T. attribute metrics and the detailed
  4004. #   ## information from each drive into the 'smart_attribute' measurement.
  4005. #   # attributes = false
  4006. #
  4007. #   ## Optionally specify devices to exclude from reporting.
  4008. #   # excludes = [ "/dev/pass6" ]
  4009. #
  4010. #   ## Optionally specify devices and device type, if unset
  4011. #   ## a scan (smartctl --scan) for S.M.A.R.T. devices will
  4012. #   ## done and all found will be included except for the
  4013. #   ## excluded in excludes.
  4014. #   # devices = [ "/dev/ada0 -d atacam" ]
  4015. #
  4016. #   ## Timeout for the smartctl command to complete.
  4017. #   # timeout = "30s"
  4018.  
  4019.  
  4020. # # Retrieves SNMP values from remote agents
  4021. # [[inputs.snmp]]
  4022. #   agents = [ "127.0.0.1:161" ]
  4023. #   ## Timeout for each SNMP query.
  4024. #   timeout = "5s"
  4025. #   ## Number of retries to attempt within timeout.
  4026. #   retries = 3
  4027. #   ## SNMP version, values can be 1, 2, or 3
  4028. #   version = 2
  4029. #
  4030. #   ## SNMP community string.
  4031. #   community = "public"
  4032. #
  4033. #   ## The GETBULK max-repetitions parameter
  4034. #   max_repetitions = 10
  4035. #
  4036. #   ## SNMPv3 auth parameters
  4037. #   #sec_name = "myuser"
  4038. #   #auth_protocol = "md5"      # Values: "MD5", "SHA", ""
  4039. #   #auth_password = "pass"
  4040. #   #sec_level = "authNoPriv"   # Values: "noAuthNoPriv", "authNoPriv", "authPriv"
  4041. #   #context_name = ""
  4042. #   #priv_protocol = ""         # Values: "DES", "AES", ""
  4043. #   #priv_password = ""
  4044. #
  4045. #   ## measurement name
  4046. #   name = "system"
  4047. #   [[inputs.snmp.field]]
  4048. #     name = "hostname"
  4049. #     oid = ".1.0.0.1.1"
  4050. #   [[inputs.snmp.field]]
  4051. #     name = "uptime"
  4052. #     oid = ".1.0.0.1.2"
  4053. #   [[inputs.snmp.field]]
  4054. #     name = "load"
  4055. #     oid = ".1.0.0.1.3"
  4056. #   [[inputs.snmp.field]]
  4057. #     oid = "HOST-RESOURCES-MIB::hrMemorySize"
  4058. #
  4059. #   [[inputs.snmp.table]]
  4060. #     ## measurement name
  4061. #     name = "remote_servers"
  4062. #     inherit_tags = [ "hostname" ]
  4063. #     [[inputs.snmp.table.field]]
  4064. #       name = "server"
  4065. #       oid = ".1.0.0.0.1.0"
  4066. #       is_tag = true
  4067. #     [[inputs.snmp.table.field]]
  4068. #       name = "connections"
  4069. #       oid = ".1.0.0.0.1.1"
  4070. #     [[inputs.snmp.table.field]]
  4071. #       name = "latency"
  4072. #       oid = ".1.0.0.0.1.2"
  4073. #
  4074. #   [[inputs.snmp.table]]
  4075. #     ## auto populate table's fields using the MIB
  4076. #     oid = "HOST-RESOURCES-MIB::hrNetworkTable"
  4077.  
  4078.  
  4079. # # DEPRECATED! PLEASE USE inputs.snmp INSTEAD.
  4080. # [[inputs.snmp_legacy]]
  4081. #   ## Use 'oids.txt' file to translate oids to names
  4082. #   ## To generate 'oids.txt' you need to run:
  4083. #   ##   snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
  4084. #   ## Or if you have an other MIB folder with custom MIBs
  4085. #   ##   snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
  4086. #   snmptranslate_file = "/tmp/oids.txt"
  4087. #   [[inputs.snmp.host]]
  4088. #     address = "192.168.2.2:161"
  4089. #     # SNMP community
  4090. #     community = "public" # default public
  4091. #     # SNMP version (1, 2 or 3)
  4092. #     # Version 3 not supported yet
  4093. #     version = 2 # default 2
  4094. #     # SNMP response timeout
  4095. #     timeout = 2.0 # default 2.0
  4096. #     # SNMP request retries
  4097. #     retries = 2 # default 2
  4098. #     # Which get/bulk do you want to collect for this host
  4099. #     collect = ["mybulk", "sysservices", "sysdescr"]
  4100. #     # Simple list of OIDs to get, in addition to "collect"
  4101. #     get_oids = []
  4102. #
  4103. #   [[inputs.snmp.host]]
  4104. #     address = "192.168.2.3:161"
  4105. #     community = "public"
  4106. #     version = 2
  4107. #     timeout = 2.0
  4108. #     retries = 2
  4109. #     collect = ["mybulk"]
  4110. #     get_oids = [
  4111. #         "ifNumber",
  4112. #         ".1.3.6.1.2.1.1.3.0",
  4113. #     ]
  4114. #
  4115. #   [[inputs.snmp.get]]
  4116. #     name = "ifnumber"
  4117. #     oid = "ifNumber"
  4118. #
  4119. #   [[inputs.snmp.get]]
  4120. #     name = "interface_speed"
  4121. #     oid = "ifSpeed"
  4122. #     instance = "0"
  4123. #
  4124. #   [[inputs.snmp.get]]
  4125. #     name = "sysuptime"
  4126. #     oid = ".1.3.6.1.2.1.1.3.0"
  4127. #     unit = "second"
  4128. #
  4129. #   [[inputs.snmp.bulk]]
  4130. #     name = "mybulk"
  4131. #     max_repetition = 127
  4132. #     oid = ".1.3.6.1.2.1.1"
  4133. #
  4134. #   [[inputs.snmp.bulk]]
  4135. #     name = "ifoutoctets"
  4136. #     max_repetition = 127
  4137. #     oid = "ifOutOctets"
  4138. #
  4139. #   [[inputs.snmp.host]]
  4140. #     address = "192.168.2.13:161"
  4141. #     #address = "127.0.0.1:161"
  4142. #     community = "public"
  4143. #     version = 2
  4144. #     timeout = 2.0
  4145. #     retries = 2
  4146. #     #collect = ["mybulk", "sysservices", "sysdescr", "systype"]
  4147. #     collect = ["sysuptime" ]
  4148. #     [[inputs.snmp.host.table]]
  4149. #       name = "iftable3"
  4150. #       include_instances = ["enp5s0", "eth1"]
  4151. #
  4152. #   # SNMP TABLEs
  4153. #   # table without mapping neither subtables
  4154. #   [[inputs.snmp.table]]
  4155. #     name = "iftable1"
  4156. #     oid = ".1.3.6.1.2.1.31.1.1.1"
  4157. #
  4158. #   # table without mapping but with subtables
  4159. #   [[inputs.snmp.table]]
  4160. #     name = "iftable2"
  4161. #     oid = ".1.3.6.1.2.1.31.1.1.1"
  4162. #     sub_tables = [".1.3.6.1.2.1.2.2.1.13"]
  4163. #
  4164. #   # table with mapping but without subtables
  4165. #   [[inputs.snmp.table]]
  4166. #     name = "iftable3"
  4167. #     oid = ".1.3.6.1.2.1.31.1.1.1"
  4168. #     # if empty. get all instances
  4169. #     mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
  4170. #     # if empty, get all subtables
  4171. #
  4172. #   # table with both mapping and subtables
  4173. #   [[inputs.snmp.table]]
  4174. #     name = "iftable4"
  4175. #     oid = ".1.3.6.1.2.1.31.1.1.1"
  4176. #     # if empty get all instances
  4177. #     mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
  4178. #     # if empty get all subtables
  4179. #     # sub_tables could be not "real subtables"
  4180. #     sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]
  4181.  
  4182.  
  4183. # # Read stats from one or more Solr servers or cores
  4184. # [[inputs.solr]]
  4185. #   ## specify a list of one or more Solr servers
  4186. #   servers = ["http://localhost:8983"]
  4187. #
  4188. #   ## specify a list of one or more Solr cores (default - all)
  4189. #   # cores = ["main"]
  4190. #
  4191. #   ## Optional HTTP Basic Auth Credentials
  4192. #   # username = "username"
  4193. #   # password = "pa$$word"
  4194.  
  4195.  
  4196. # # Read metrics from Microsoft SQL Server
  4197. # [[inputs.sqlserver]]
  4198. #   ## Specify instances to monitor with a list of connection strings.
  4199. #   ## All connection parameters are optional.
  4200. #   ## By default, the host is localhost, listening on default port, TCP 1433.
  4201. #   ##   for Windows, the user is the currently running AD user (SSO).
  4202. #   ##   See https://github.com/denisenkom/go-mssqldb for detailed connection
  4203. #   ##   parameters.
  4204. #   # servers = [
  4205. #   #  "Server=192.168.1.10;Port=1433;User Id=<user>;Password=<pw>;app name=telegraf;log=1;",
  4206. #   # ]
  4207. #
  4208. #   ## Optional parameter, setting this to 2 will use a new version
  4209. #   ## of the collection queries that break compatibility with the original
  4210. #   ## dashboards.
  4211. #   query_version = 2
  4212. #
  4213. #   ## If you are using AzureDB, setting this to true will gather resource utilization metrics
  4214. #   # azuredb = false
  4215. #
  4216. #   ## If you would like to exclude some of the metrics queries, list them here
  4217. #   ## Possible choices:
  4218. #   ## - PerformanceCounters
  4219. #   ## - WaitStatsCategorized
  4220. #   ## - DatabaseIO
  4221. #   ## - DatabaseProperties
  4222. #   ## - CPUHistory
  4223. #   ## - DatabaseSize
  4224. #   ## - DatabaseStats
  4225. #   ## - MemoryClerk
  4226. #   ## - VolumeSpace
  4227. #   ## - PerformanceMetrics
  4228. #   ## - Schedulers
  4229. #   ## - AzureDBResourceStats
  4230. #   ## - AzureDBResourceGovernance
  4231. #   ## - SqlRequests
  4232. #   exclude_query = [ 'Schedulers' ]
  4233.  
  4234.  
  4235. # # Gather timeseries from Google Cloud Platform v3 monitoring API
  4236. # [[inputs.stackdriver]]
  4237. #   ## GCP Project
  4238. #   project = "erudite-bloom-151019"
  4239. #
  4240. #   ## Include timeseries that start with the given metric type.
  4241. #   metric_type_prefix_include = [
  4242. #     "compute.googleapis.com/",
  4243. #   ]
  4244. #
  4245. #   ## Exclude timeseries that start with the given metric type.
  4246. #   # metric_type_prefix_exclude = []
  4247. #
  4248. #   ## Many metrics are updated once per minute; it is recommended to override
  4249. #   ## the agent level interval with a value of 1m or greater.
  4250. #   interval = "1m"
  4251. #
  4252. #   ## Maximum number of API calls to make per second.  The quota for accounts
  4253. #   ## varies, it can be viewed on the API dashboard:
  4254. #   ##   https://cloud.google.com/monitoring/quotas#quotas_and_limits
  4255. #   # rate_limit = 14
  4256. #
  4257. #   ## The delay and window options control the number of points selected on
  4258. #   ## each gather.  When set, metrics are gathered between:
  4259. #   ##   start: now() - delay - window
  4260. #   ##   end:   now() - delay
  4261. #   #
  4262. #   ## Collection delay; if set too low metrics may not yet be available.
  4263. #   # delay = "5m"
  4264. #   #
  4265. #   ## If unset, the window will start at 1m and be updated dynamically to span
  4266. #   ## the time between calls (approximately the length of the plugin interval).
  4267. #   # window = "1m"
  4268. #
  4269. #   ## TTL for cached list of metric types.  This is the maximum amount of time
  4270. #   ## it may take to discover new metrics.
  4271. #   # cache_ttl = "1h"
  4272. #
  4273. #   ## If true, raw bucket counts are collected for distribution value types.
  4274. #   ## For a more lightweight collection, you may wish to disable and use
  4275. #   ## distribution_aggregation_aligners instead.
  4276. #   # gather_raw_distribution_buckets = true
  4277. #
  4278. #   ## Aggregate functions to be used for metrics whose value type is
  4279. #   ## distribution.  These aggregate values are recorded in in addition to raw
  4280. #   ## bucket counts; if they are enabled.
  4281. #   ##
  4282. #   ## For a list of aligner strings see:
  4283. #   ##   https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#aligner
  4284. #   # distribution_aggregation_aligners = [
  4285. #   #   "ALIGN_PERCENTILE_99",
  4286. #   #   "ALIGN_PERCENTILE_95",
  4287. #   #   "ALIGN_PERCENTILE_50",
  4288. #   # ]
  4289. #
  4290. #   ## Filters can be added to reduce the number of time series matched.  All
  4291. #   ## functions are supported: starts_with, ends_with, has_substring, and
  4292. #   ## one_of.  Only the '=' operator is supported.
  4293. #   ##
  4294. #   ## The logical operators when combining filters are defined statically using
  4295. #   ## the following values:
  4296. #   ##   filter ::= <resource_labels> {AND <metric_labels>}
  4297. #   ##   resource_labels ::= <resource_labels> {OR <resource_label>}
  4298. #   ##   metric_labels ::= <metric_labels> {OR <metric_label>}
  4299. #   ##
  4300. #   ## For more details, see https://cloud.google.com/monitoring/api/v3/filters
  4301. #   #
  4302. #   ## Resource labels refine the time series selection with the following expression:
  4303. #   ##   resource.labels.<key> = <value>
  4304. #   # [[inputs.stackdriver.filter.resource_labels]]
  4305. #   #   key = "instance_name"
  4306. #   #   value = 'starts_with("localhost")'
  4307. #   #
  4308. #   ## Metric labels refine the time series selection with the following expression:
  4309. #   ##   metric.labels.<key> = <value>
  4310. #   #  [[inputs.stackdriver.filter.metric_labels]]
  4311. #   #    key = "device_name"
  4312. #   #    value = 'one_of("sda", "sdb")'
  4313.  
  4314.  
  4315. # # Sysstat metrics collector
  4316. # [[inputs.sysstat]]
  4317. #   ## Path to the sadc command.
  4318. #   #
  4319. #   ## Common Defaults:
  4320. #   ##   Debian/Ubuntu: /usr/lib/sysstat/sadc
  4321. #   ##   Arch:          /usr/lib/sa/sadc
  4322. #   ##   RHEL/CentOS:   /usr/lib64/sa/sadc
  4323. #   sadc_path = "/usr/lib/sa/sadc" # required
  4324. #   #
  4325. #   #
  4326. #   ## Path to the sadf command, if it is not in PATH
  4327. #   # sadf_path = "/usr/bin/sadf"
  4328. #   #
  4329. #   #
  4330. #   ## Activities is a list of activities, that are passed as argument to the
  4331. #   ## sadc collector utility (e.g: DISK, SNMP etc...)
  4332. #   ## The more activities that are added, the more data is collected.
  4333. #   # activities = ["DISK"]
  4334. #   #
  4335. #   #
  4336. #   ## Group metrics to measurements.
  4337. #   ##
  4338. #   ## If group is false each metric will be prefixed with a description
  4339. #   ## and represents itself a measurement.
  4340. #   ##
  4341. #   ## If Group is true, corresponding metrics are grouped to a single measurement.
  4342. #   # group = true
  4343. #   #
  4344. #   #
  4345. #   ## Options for the sadf command. The values on the left represent the sadf
  4346. #   ## options and the values on the right their description (which are used for
  4347. #   ## grouping and prefixing metrics).
  4348. #   ##
  4349. #   ## Run 'sar -h' or 'man sar' to find out the supported options for your
  4350. #   ## sysstat version.
  4351. #   [inputs.sysstat.options]
  4352. #     -C = "cpu"
  4353. #     -B = "paging"
  4354. #     -b = "io"
  4355. #     -d = "disk"             # requires DISK activity
  4356. #     "-n ALL" = "network"
  4357. #     "-P ALL" = "per_cpu"
  4358. #     -q = "queue"
  4359. #     -R = "mem"
  4360. #     -r = "mem_util"
  4361. #     -S = "swap_util"
  4362. #     -u = "cpu_util"
  4363. #     -v = "inode"
  4364. #     -W = "swap"
  4365. #     -w = "task"
  4366. #   #  -H = "hugepages"        # only available for newer linux distributions
  4367. #   #  "-I ALL" = "interrupts" # requires INT activity
  4368. #   #
  4369. #   #
  4370. #   ## Device tags can be used to add additional tags for devices.
  4371. #   ## For example the configuration below adds a tag vg with value rootvg for
  4372. #   ## all metrics with sda devices.
  4373. #   # [[inputs.sysstat.device_tags.sda]]
  4374. #   #  vg = "rootvg"
  4375.  
  4376.  
  4377. # # Reads metrics from a Teamspeak 3 Server via ServerQuery
  4378. # [[inputs.teamspeak]]
  4379. #   ## Server address for Teamspeak 3 ServerQuery
  4380. #   # server = "127.0.0.1:10011"
  4381. #   ## Username for ServerQuery
  4382. #   username = "serverqueryuser"
  4383. #   ## Password for ServerQuery
  4384. #   password = "secret"
  4385. #   ## Array of virtual servers
  4386. #   # virtual_servers = [1]
  4387.  
  4388.  
  4389. # # Read metrics about temperature
  4390. # [[inputs.temp]]
  4391. #   # no configuration
  4392.  
  4393.  
  4394. # # Read Tengine's basic status information (ngx_http_reqstat_module)
  4395. # [[inputs.tengine]]
  4396. #   # An array of Tengine reqstat module URI to gather stats.
  4397. #   urls = ["http://127.0.0.1/us"]
  4398. #
  4399. #   # HTTP response timeout (default: 5s)
  4400. #   # response_timeout = "5s"
  4401. #
  4402. #   ## Optional TLS Config
  4403. #   # tls_ca = "/etc/telegraf/ca.pem"
  4404. #   # tls_cert = "/etc/telegraf/cert.cer"
  4405. #   # tls_key = "/etc/telegraf/key.key"
  4406. #   ## Use TLS but skip chain & host verification
  4407. #   # insecure_skip_verify = false
  4408.  
  4409.  
  4410. # # Gather metrics from the Tomcat server status page.
  4411. # [[inputs.tomcat]]
  4412. #   ## URL of the Tomcat server status
  4413. #   # url = "http://127.0.0.1:8080/manager/status/all?XML=true"
  4414. #
  4415. #   ## HTTP Basic Auth Credentials
  4416. #   # username = "tomcat"
  4417. #   # password = "s3cret"
  4418. #
  4419. #   ## Request timeout
  4420. #   # timeout = "5s"
  4421. #
  4422. #   ## Optional TLS Config
  4423. #   # tls_ca = "/etc/telegraf/ca.pem"
  4424. #   # tls_cert = "/etc/telegraf/cert.pem"
  4425. #   # tls_key = "/etc/telegraf/key.pem"
  4426. #   ## Use TLS but skip chain & host verification
  4427. #   # insecure_skip_verify = false
  4428.  
  4429.  
  4430. # # Inserts sine and cosine waves for demonstration purposes
  4431. # [[inputs.trig]]
  4432. #   ## Set the amplitude
  4433. #   amplitude = 10.0
  4434.  
  4435.  
  4436. # # Read Twemproxy stats data
  4437. # [[inputs.twemproxy]]
  4438. #   ## Twemproxy stats address and port (no scheme)
  4439. #   addr = "localhost:22222"
  4440. #   ## Monitor pool name
  4441. #   pools = ["redis_pool", "mc_pool"]
  4442.  
  4443.  
  4444. # # A plugin to collect stats from the Unbound DNS resolver
  4445. # [[inputs.unbound]]
  4446. #   ## Address of server to connect to, read from unbound conf default, optionally ':port'
  4447. #   ## Will lookup IP if given a hostname
  4448. #   server = "127.0.0.1:8953"
  4449. #
  4450. #   ## If running as a restricted user you can prepend sudo for additional access:
  4451. #   # use_sudo = false
  4452. #
  4453. #   ## The default location of the unbound-control binary can be overridden with:
  4454. #   # binary = "/usr/sbin/unbound-control"
  4455. #
  4456. #   ## The default timeout of 1s can be overriden with:
  4457. #   # timeout = "1s"
  4458. #
  4459. #   ## When set to true, thread metrics are tagged with the thread id.
  4460. #   ##
  4461. #   ## The default is false for backwards compatibility, and will be changed to
  4462. #   ## true in a future version.  It is recommended to set to true on new
  4463. #   ## deployments.
  4464. #   thread_as_tag = false
  4465.  
  4466.  
  4467. # # Read uWSGI metrics.
  4468. # [[inputs.uwsgi]]
  4469. #   ## List with urls of uWSGI Stats servers. URL must match pattern:
  4470. #   ## scheme://address[:port]
  4471. #   ##
  4472. #   ## For example:
  4473. #   ## servers = ["tcp://localhost:5050", "http://localhost:1717", "unix:///tmp/statsock"]
  4474. #   servers = ["tcp://127.0.0.1:1717"]
  4475. #
  4476. #   ## General connection timout
  4477. #   # timeout = "5s"
  4478.  
  4479.  
  4480. # # A plugin to collect stats from Varnish HTTP Cache
  4481. # [[inputs.varnish]]
  4482. #   ## If running as a restricted user you can prepend sudo for additional access:
  4483. #   #use_sudo = false
  4484. #
  4485. #   ## The default location of the varnishstat binary can be overridden with:
  4486. #   binary = "/usr/bin/varnishstat"
  4487. #
  4488. #   ## By default, telegraf gather stats for 3 metric points.
  4489. #   ## Setting stats will override the defaults shown below.
  4490. #   ## Glob matching can be used, ie, stats = ["MAIN.*"]
  4491. #   ## stats may also be set to ["*"], which will collect all stats
  4492. #   stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"]
  4493. #
  4494. #   ## Optional name for the varnish instance (or working directory) to query
  4495. #   ## Usually appened after -n in varnish cli
  4496. #   # instance_name = instanceName
  4497. #
  4498. #   ## Timeout for varnishstat command
  4499. #   # timeout = "1s"
  4500.  
  4501.  
  4502. # # Monitor wifi signal strength and quality
  4503. # [[inputs.wireless]]
  4504. #   ## Sets 'proc' directory path
  4505. #   ## If not specified, then default is /proc
  4506. #   # host_proc = "/proc"
  4507.  
  4508.  
  4509. # # Reads metrics from a SSL certificate
  4510. # [[inputs.x509_cert]]
  4511. #   ## List certificate sources
  4512. #   sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "tcp://example.org:443"]
  4513. #
  4514. #   ## Timeout for SSL connection
  4515. #   # timeout = "5s"
  4516. #
  4517. #   ## Optional TLS Config
  4518. #   # tls_ca = "/etc/telegraf/ca.pem"
  4519. #   # tls_cert = "/etc/telegraf/cert.pem"
  4520. #   # tls_key = "/etc/telegraf/key.pem"
  4521.  
  4522.  
  4523. # # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools
  4524. # [[inputs.zfs]]
  4525. #   ## ZFS kstat path. Ignored on FreeBSD
  4526. #   ## If not specified, then default is:
  4527. #   # kstatPath = "/proc/spl/kstat/zfs"
  4528. #
  4529. #   ## By default, telegraf gather all zfs stats
  4530. #   ## If not specified, then default is:
  4531. #   # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"]
  4532. #   ## For Linux, the default is:
  4533. #   # kstatMetrics = ["abdstats", "arcstats", "dnodestats", "dbufcachestats",
  4534. #   #   "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"]
  4535. #   ## By default, don't gather zpool stats
  4536. #   # poolMetrics = false
  4537.  
  4538.  
  4539. # # Reads 'mntr' stats from one or many zookeeper servers
  4540. # [[inputs.zookeeper]]
  4541. #   ## An array of address to gather stats about. Specify an ip or hostname
  4542. #   ## with port. ie localhost:2181, 10.0.0.1:2181, etc.
  4543. #
  4544. #   ## If no servers are specified, then localhost is used as the host.
  4545. #   ## If no port is specified, 2181 is used
  4546. #   servers = [":2181"]
  4547. #
  4548. #   ## Timeout for metric collections from all servers.  Minimum timeout is "1s".
  4549. #   # timeout = "5s"
  4550. #
  4551. #   ## Optional TLS Config
  4552. #   # enable_tls = true
  4553. #   # tls_ca = "/etc/telegraf/ca.pem"
  4554. #   # tls_cert = "/etc/telegraf/cert.pem"
  4555. #   # tls_key = "/etc/telegraf/key.pem"
  4556. #   ## If false, skip chain & host verification
  4557. #   # insecure_skip_verify = true
  4558.  
  4559.  
  4560. ###############################################################################
  4561. #                            SERVICE INPUT PLUGINS                            #
  4562. ###############################################################################
  4563.  
  4564.  
  4565. # # AMQP consumer plugin
  4566. # [[inputs.amqp_consumer]]
  4567. #   ## Broker to consume from.
  4568. #   ##   deprecated in 1.7; use the brokers option
  4569. #   # url = "amqp://localhost:5672/influxdb"
  4570. #
  4571. #   ## Brokers to consume from.  If multiple brokers are specified a random broker
  4572. #   ## will be selected anytime a connection is established.  This can be
  4573. #   ## helpful for load balancing when not using a dedicated load balancer.
  4574. #   brokers = ["amqp://localhost:5672/influxdb"]
  4575. #
  4576. #   ## Authentication credentials for the PLAIN auth_method.
  4577. #   # username = ""
  4578. #   # password = ""
  4579. #
  4580. #   ## Name of the exchange to declare.  If unset, no exchange will be declared.
  4581. #   exchange = "telegraf"
  4582. #
  4583. #   ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
  4584. #   # exchange_type = "topic"
  4585. #
  4586. #   ## If true, exchange will be passively declared.
  4587. #   # exchange_passive = false
  4588. #
  4589. #   ## Exchange durability can be either "transient" or "durable".
  4590. #   # exchange_durability = "durable"
  4591. #
  4592. #   ## Additional exchange arguments.
  4593. #   # exchange_arguments = { }
  4594. #   # exchange_arguments = {"hash_propery" = "timestamp"}
  4595. #
  4596. #   ## AMQP queue name.
  4597. #   queue = "telegraf"
  4598. #
  4599. #   ## AMQP queue durability can be "transient" or "durable".
  4600. #   queue_durability = "durable"
  4601. #
  4602. #   ## If true, queue will be passively declared.
  4603. #   # queue_passive = false
  4604. #
  4605. #   ## A binding between the exchange and queue using this binding key is
  4606. #   ## created.  If unset, no binding is created.
  4607. #   binding_key = "#"
  4608. #
  4609. #   ## Maximum number of messages server should give to the worker.
  4610. #   # prefetch_count = 50
  4611. #
  4612. #   ## Maximum messages to read from the broker that have not been written by an
  4613. #   ## output.  For best throughput set based on the number of metrics within
  4614. #   ## each message and the size of the output's metric_batch_size.
  4615. #   ##
  4616. #   ## For example, if each message from the queue contains 10 metrics and the
  4617. #   ## output metric_batch_size is 1000, setting this to 100 will ensure that a
  4618. #   ## full batch is collected and the write is triggered immediately without
  4619. #   ## waiting until the next flush_interval.
  4620. #   # max_undelivered_messages = 1000
  4621. #
  4622. #   ## Auth method. PLAIN and EXTERNAL are supported
  4623. #   ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
  4624. #   ## described here: https://www.rabbitmq.com/plugins.html
  4625. #   # auth_method = "PLAIN"
  4626. #
  4627. #   ## Optional TLS Config
  4628. #   # tls_ca = "/etc/telegraf/ca.pem"
  4629. #   # tls_cert = "/etc/telegraf/cert.pem"
  4630. #   # tls_key = "/etc/telegraf/key.pem"
  4631. #   ## Use TLS but skip chain & host verification
  4632. #   # insecure_skip_verify = false
  4633. #
  4634. #   ## Content encoding for message payloads, can be set to "gzip" to or
  4635. #   ## "identity" to apply no encoding.
  4636. #   # content_encoding = "identity"
  4637. #
  4638. #   ## Data format to consume.
  4639. #   ## Each data format has its own unique set of configuration options, read
  4640. #   ## more about them here:
  4641. #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  4642. #   data_format = "influx"
  4643.  
  4644.  
  4645. # # Read Cassandra metrics through Jolokia
  4646. # [[inputs.cassandra]]
  4647. #   ## DEPRECATED: The cassandra plugin has been deprecated.  Please use the
  4648. #   ## jolokia2 plugin instead.
  4649. #   ##
  4650. #   ## see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2
  4651. #
  4652. #   context = "/jolokia/read"
  4653. #   ## List of cassandra servers exposing jolokia read service
  4654. #   servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
  4655. #   ## List of metrics collected on above servers
  4656. #   ## Each metric consists of a jmx path.
  4657. #   ## This will collect all heap memory usage metrics from the jvm and
  4658. #   ## ReadLatency metrics for all keyspaces and tables.
  4659. #   ## "type=Table" in the query works with Cassandra3.0. Older versions might
  4660. #   ## need to use "type=ColumnFamily"
  4661. #   metrics  = [
  4662. #     "/java.lang:type=Memory/HeapMemoryUsage",
  4663. #     "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency"
  4664. #   ]
  4665.  
  4666.  
  4667. # # Cisco GNMI telemetry input plugin based on GNMI telemetry data produced in IOS XR
  4668. # [[inputs.cisco_telemetry_gnmi]]
  4669. #  ## Address and port of the GNMI GRPC server
  4670. #  addresses = ["10.49.234.114:57777"]
  4671. #
  4672. #  ## define credentials
  4673. #  username = "cisco"
  4674. #  password = "cisco"
  4675. #
  4676. #  ## GNMI encoding requested (one of: "proto", "json", "json_ietf")
  4677. #  # encoding = "proto"
  4678. #
  4679. #  ## redial in case of failures after
  4680. #  redial = "10s"
  4681. #
  4682. #  ## enable client-side TLS and define CA to authenticate the device
  4683. #  # enable_tls = true
  4684. #  # tls_ca = "/etc/telegraf/ca.pem"
  4685. #  # insecure_skip_verify = true
  4686. #
  4687. #  ## define client-side TLS certificate & key to authenticate to the device
  4688. #  # tls_cert = "/etc/telegraf/cert.pem"
  4689. #  # tls_key = "/etc/telegraf/key.pem"
  4690. #
  4691. #  ## GNMI subscription prefix (optional, can usually be left empty)
  4692. #  ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths
  4693. #  # origin = ""
  4694. #  # prefix = ""
  4695. #  # target = ""
  4696. #
  4697. #  ## Define additional aliases to map telemetry encoding paths to simple measurement names
  4698. #  #[inputs.cisco_telemetry_gnmi.aliases]
  4699. #  #  ifcounters = "openconfig:/interfaces/interface/state/counters"
  4700. #
  4701. #  [[inputs.cisco_telemetry_gnmi.subscription]]
  4702. #   ## Name of the measurement that will be emitted
  4703. #   name = "ifcounters"
  4704. #
  4705. #   ## Origin and path of the subscription
  4706. #   ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths
  4707. #   ##
  4708. #   ## origin usually refers to a (YANG) data model implemented by the device
  4709. #   ## and path to a specific substructe inside it that should be subscribed to (similar to an XPath)
  4710. #   ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr
  4711. #   origin = "openconfig-interfaces"
  4712. #   path = "/interfaces/interface/state/counters"
  4713. #
  4714. #   # Subscription mode (one of: "target_defined", "sample", "on_change") and interval
  4715. #   subscription_mode = "sample"
  4716. #   sample_interval = "10s"
  4717. #
  4718. #   ## Suppress redundant transmissions when measured values are unchanged
  4719. #   # suppress_redundant = false
  4720. #
  4721. #   ## If suppression is enabled, send updates at least every X seconds anyway
  4722. #   # heartbeat_interval = "60s"
  4723.  
  4724.  
  4725. # # Cisco model-driven telemetry (MDT) input plugin for IOS XR, IOS XE and NX-OS platforms
  4726. # [[inputs.cisco_telemetry_mdt]]
  4727. #  ## Telemetry transport can be "tcp" or "grpc".  TLS is only supported when
  4728. #  ## using the grpc transport.
  4729. #  transport = "grpc"
  4730. #
  4731. #  ## Address and port to host telemetry listener
  4732. #  service_address = ":57000"
  4733. #
  4734. #  ## Enable TLS; grpc transport only.
  4735. #  # tls_cert = "/etc/telegraf/cert.pem"
  4736. #  # tls_key = "/etc/telegraf/key.pem"
  4737. #
  4738. #  ## Enable TLS client authentication and define allowed CA certificates; grpc
  4739. #  ##  transport only.
  4740. #  # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
  4741. #
  4742. #  ## Define aliases to map telemetry encoding paths to simple measurement names
  4743. #  [inputs.cisco_telemetry_mdt.aliases]
  4744. #    ifstats = "ietf-interfaces:interfaces-state/interface/statistics"
  4745.  
  4746.  
  4747. # # Read metrics from Google PubSub
  4748. # [[inputs.cloud_pubsub]]
  4749. #   ## Required. Name of Google Cloud Platform (GCP) Project that owns
  4750. #   ## the given PubSub subscription.
  4751. #   project = "my-project"
  4752. #
  4753. #   ## Required. Name of PubSub subscription to ingest metrics from.
  4754. #   subscription = "my-subscription"
  4755. #
  4756. #   ## Required. Data format to consume.
  4757. #   ## Each data format has its own unique set of configuration options.
  4758. #   ## Read more about them here:
  4759. #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  4760. #   data_format = "influx"
  4761. #
  4762. #   ## Optional. Filepath for GCP credentials JSON file to authorize calls to
  4763. #   ## PubSub APIs. If not set explicitly, Telegraf will attempt to use
  4764. #   ## Application Default Credentials, which is preferred.
  4765. #   # credentials_file = "path/to/my/creds.json"
  4766. #
  4767. #   ## Optional. Number of seconds to wait before attempting to restart the
  4768. #   ## PubSub subscription receiver after an unexpected error.
  4769. #   ## If the streaming pull for a PubSub Subscription fails (receiver),
  4770. #   ## the agent attempts to restart receiving messages after this many seconds.
  4771. #   # retry_delay_seconds = 5
  4772. #
  4773. #   ## Optional. Maximum byte length of a message to consume.
  4774. #   ## Larger messages are dropped with an error. If less than 0 or unspecified,
  4775. #   ## treated as no limit.
  4776. #   # max_message_len = 1000000
  4777. #
  4778. #   ## Optional. Maximum messages to read from PubSub that have not been written
  4779. #   ## to an output. Defaults to 1000.
  4780. #   ## For best throughput set based on the number of metrics within
  4781. #   ## each message and the size of the output's metric_batch_size.
  4782. #   ##
  4783. #   ## For example, if each message contains 10 metrics and the output
  4784. #   ## metric_batch_size is 1000, setting this to 100 will ensure that a
  4785. #   ## full batch is collected and the write is triggered immediately without
  4786. #   ## waiting until the next flush_interval.
  4787. #   # max_undelivered_messages = 1000
  4788. #
  4789. #   ## The following are optional Subscription ReceiveSettings in PubSub.
  4790. #   ## Read more about these values:
  4791. #   ## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings
  4792. #
  4793. #   ## Optional. Maximum number of seconds for which a PubSub subscription
  4794. #   ## should auto-extend the PubSub ACK deadline for each message. If less than
  4795. #   ## 0, auto-extension is disabled.
  4796. #   # max_extension = 0
  4797. #
  4798. #   ## Optional. Maximum number of unprocessed messages in PubSub
  4799. #   ## (unacknowledged but not yet expired in PubSub).
  4800. #   ## A value of 0 is treated as the default PubSub value.
  4801. #   ## Negative values will be treated as unlimited.
  4802. #   # max_outstanding_messages = 0
  4803. #
  4804. #   ## Optional. Maximum size in bytes of unprocessed messages in PubSub
  4805. #   ## (unacknowledged but not yet expired in PubSub).
  4806. #   ## A value of 0 is treated as the default PubSub value.
  4807. #   ## Negative values will be treated as unlimited.
  4808. #   # max_outstanding_bytes = 0
  4809. #
  4810. #   ## Optional. Max number of goroutines a PubSub Subscription receiver can spawn
  4811. #   ## to pull messages from PubSub concurrently. This limit applies to each
  4812. #   ## subscription separately and is treated as the PubSub default if less than
  4813. #   ## 1. Note this setting does not limit the number of messages that can be
  4814. #   ## processed concurrently (use "max_outstanding_messages" instead).
  4815. #   # max_receiver_go_routines = 0
  4816. #
  4817. #   ## Optional. If true, Telegraf will attempt to base64 decode the
  4818. #   ## PubSub message data before parsing
  4819. #   # base64_data = false
  4820.  
  4821.  
  4822. # # Google Cloud Pub/Sub Push HTTP listener
  4823. # [[inputs.cloud_pubsub_push]]
  4824. #   ## Address and port to host HTTP listener on
  4825. #   service_address = ":8080"
  4826. #
  4827. #   ## Application secret to verify messages originate from Cloud Pub/Sub
  4828. #   # token = ""
  4829. #
  4830. #   ## Path to listen to.
  4831. #   # path = "/"
  4832. #
  4833. #   ## Maximum duration before timing out read of the request
  4834. #   # read_timeout = "10s"
  4835. #   ## Maximum duration before timing out write of the response. This should be set to a value
  4836. #   ## large enough that you can send at least 'metric_batch_size' number of messages within the
  4837. #   ## duration.
  4838. #   # write_timeout = "10s"
  4839. #
  4840. #   ## Maximum allowed http request body size in bytes.
  4841. #   ## 0 means to use the default of 524,288,00 bytes (500 mebibytes)
  4842. #   # max_body_size = "500MB"
  4843. #
  4844. #   ## Whether to add the pubsub metadata, such as message attributes and subscription as a tag.
  4845. #   # add_meta = false
  4846. #
  4847. #   ## Optional. Maximum messages to read from PubSub that have not been written
  4848. #   ## to an output. Defaults to 1000.
  4849. #   ## For best throughput set based on the number of metrics within
  4850. #   ## each message and the size of the output's metric_batch_size.
  4851. #   ##
  4852. #   ## For example, if each message contains 10 metrics and the output
  4853. #   ## metric_batch_size is 1000, setting this to 100 will ensure that a
  4854. #   ## full batch is collected and the write is triggered immediately without
  4855. #   ## waiting until the next flush_interval.
  4856. #   # max_undelivered_messages = 1000
  4857. #
  4858. #   ## Set one or more allowed client CA certificate file names to
  4859. #   ## enable mutually authenticated TLS connections
  4860. #   # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
  4861. #
  4862. #   ## Add service certificate and key
  4863. #   # tls_cert = "/etc/telegraf/cert.pem"
  4864. #   # tls_key = "/etc/telegraf/key.pem"
  4865. #
  4866. #   ## Data format to consume.
  4867. #   ## Each data format has its own unique set of configuration options, read
  4868. #   ## more about them here:
  4869. #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  4870. #   data_format = "influx"
  4871.  
  4872.  
  4873. # # Read logging output from the Docker engine
  4874. # [[inputs.docker_log]]
  4875. #   ## Docker Endpoint
  4876. #   ##   To use TCP, set endpoint = "tcp://[ip]:[port]"
  4877. #   ##   To use environment variables (ie, docker-machine), set endpoint = "ENV"
  4878. #   # endpoint = "unix:///var/run/docker.sock"
  4879. #
  4880. #   ## When true, container logs are read from the beginning; otherwise
  4881. #   ## reading begins at the end of the log.
  4882. #   # from_beginning = false
  4883. #
  4884. #   ## Timeout for Docker API calls.
  4885. #   # timeout = "5s"
  4886. #
  4887. #   ## Containers to include and exclude. Globs accepted.
  4888. #   ## Note that an empty array for both will include all containers
  4889. #   # container_name_include = []
  4890. #   # container_name_exclude = []
  4891. #
  4892. #   ## Container states to include and exclude. Globs accepted.
  4893. #   ## When empty only containers in the "running" state will be captured.
  4894. #   # container_state_include = []
  4895. #   # container_state_exclude = []
  4896. #
  4897. #   ## docker labels to include and exclude as tags.  Globs accepted.
  4898. #   ## Note that an empty array for both will include all labels as tags
  4899. #   # docker_label_include = []
  4900. #   # docker_label_exclude = []
  4901. #
  4902. #   ## Optional TLS Config
  4903. #   # tls_ca = "/etc/telegraf/ca.pem"
  4904. #   # tls_cert = "/etc/telegraf/cert.pem"
  4905. #   # tls_key = "/etc/telegraf/key.pem"
  4906. #   ## Use TLS but skip chain & host verification
  4907. #   # insecure_skip_verify = false
  4908.  
  4909.  
  4910. # # Influx HTTP write listener
  4911. # [[inputs.http_listener]]
  4912. #   ## Address and port to host HTTP listener on
  4913. #   service_address = ":8186"
  4914. #
  4915. #   ## maximum duration before timing out read of the request
  4916. #   read_timeout = "10s"
  4917. #   ## maximum duration before timing out write of the response
  4918. #   write_timeout = "10s"
  4919. #
  4920. #   ## Maximum allowed http request body size in bytes.
  4921. #   ## 0 means to use the default of 524,288,000 bytes (500 mebibytes)
  4922. #   max_body_size = "500MiB"
  4923. #
  4924. #   ## Maximum line size allowed to be sent in bytes.
  4925. #   ## 0 means to use the default of 65536 bytes (64 kibibytes)
  4926. #   max_line_size = "64KiB"
  4927. #
  4928. #
  4929. #   ## Optional tag name used to store the database.
  4930. #   ## If the write has a database in the query string then it will be kept in this tag name.
  4931. #   ## This tag can be used in downstream outputs.
  4932. #   ## The default value of nothing means it will be off and the database will not be recorded.
  4933. #   # database_tag = ""
  4934. #
  4935. #   ## Set one or more allowed client CA certificate file names to
  4936. #   ## enable mutually authenticated TLS connections
  4937. #   tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
  4938. #
  4939. #   ## Add service certificate and key
  4940. #   tls_cert = "/etc/telegraf/cert.pem"
  4941. #   tls_key = "/etc/telegraf/key.pem"
  4942. #
  4943. #   ## Optional username and password to accept for HTTP basic authentication.
  4944. #   ## You probably want to make sure you have TLS configured above for this.
  4945. #   # basic_username = "foobar"
  4946. #   # basic_password = "barfoo"
  4947.  
  4948.  
  4949. # # Generic HTTP write listener
  4950. # [[inputs.http_listener_v2]]
  4951. #   ## Address and port to host HTTP listener on
  4952. #   service_address = ":8080"
  4953. #
  4954. #   ## Path to listen to.
  4955. #   # path = "/telegraf"
  4956. #
  4957. #   ## HTTP methods to accept.
  4958. #   # methods = ["POST", "PUT"]
  4959. #
  4960. #   ## maximum duration before timing out read of the request
  4961. #   # read_timeout = "10s"
  4962. #   ## maximum duration before timing out write of the response
  4963. #   # write_timeout = "10s"
  4964. #
  4965. #   ## Maximum allowed http request body size in bytes.
  4966. #   ## 0 means to use the default of 524,288,00 bytes (500 mebibytes)
  4967. #   # max_body_size = "500MB"
  4968. #
  4969. #   ## Part of the request to consume.  Available options are "body" and
  4970. #   ## "query".
  4971. #   # data_source = "body"
  4972. #
  4973. #   ## Set one or more allowed client CA certificate file names to
  4974. #   ## enable mutually authenticated TLS connections
  4975. #   # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
  4976. #
  4977. #   ## Add service certificate and key
  4978. #   # tls_cert = "/etc/telegraf/cert.pem"
  4979. #   # tls_key = "/etc/telegraf/key.pem"
  4980. #
  4981. #   ## Optional username and password to accept for HTTP basic authentication.
  4982. #   ## You probably want to make sure you have TLS configured above for this.
  4983. #   # basic_username = "foobar"
  4984. #   # basic_password = "barfoo"
  4985. #
  4986. #   ## Data format to consume.
  4987. #   ## Each data format has its own unique set of configuration options, read
  4988. #   ## more about them here:
  4989. #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  4990. #   data_format = "influx"
  4991.  
  4992.  
  4993. # # Influx HTTP write listener
  4994. # [[inputs.influxdb_listener]]
  4995. #   ## Address and port to host HTTP listener on
  4996. #   service_address = ":8186"
  4997. #
  4998. #   ## maximum duration before timing out read of the request
  4999. #   read_timeout = "10s"
  5000. #   ## maximum duration before timing out write of the response
  5001. #   write_timeout = "10s"
  5002. #
  5003. #   ## Maximum allowed http request body size in bytes.
  5004. #   ## 0 means to use the default of 524,288,000 bytes (500 mebibytes)
  5005. #   max_body_size = "500MiB"
  5006. #
  5007. #   ## Maximum line size allowed to be sent in bytes.
  5008. #   ## 0 means to use the default of 65536 bytes (64 kibibytes)
  5009. #   max_line_size = "64KiB"
  5010. #
  5011. #
  5012. #   ## Optional tag name used to store the database.
  5013. #   ## If the write has a database in the query string then it will be kept in this tag name.
  5014. #   ## This tag can be used in downstream outputs.
  5015. #   ## The default value of nothing means it will be off and the database will not be recorded.
  5016. #   # database_tag = ""
  5017. #
  5018. #   ## Set one or more allowed client CA certificate file names to
  5019. #   ## enable mutually authenticated TLS connections
  5020. #   tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
  5021. #
  5022. #   ## Add service certificate and key
  5023. #   tls_cert = "/etc/telegraf/cert.pem"
  5024. #   tls_key = "/etc/telegraf/key.pem"
  5025. #
  5026. #   ## Optional username and password to accept for HTTP basic authentication.
  5027. #   ## You probably want to make sure you have TLS configured above for this.
  5028. #   # basic_username = "foobar"
  5029. #   # basic_password = "barfoo"
  5030.  
  5031.  
  5032. # # Read JTI OpenConfig Telemetry from listed sensors
  5033. # [[inputs.jti_openconfig_telemetry]]
  5034. #   ## List of device addresses to collect telemetry from
  5035. #   servers = ["localhost:1883"]
  5036. #
  5037. #   ## Authentication details. Username and password are must if device expects
  5038. #   ## authentication. Client ID must be unique when connecting from multiple instances
  5039. #   ## of telegraf to the same device
  5040. #   username = "user"
  5041. #   password = "pass"
  5042. #   client_id = "telegraf"
  5043. #
  5044. #   ## Frequency to get data
  5045. #   sample_frequency = "1000ms"
  5046. #
  5047. #   ## Sensors to subscribe for
  5048. #   ## A identifier for each sensor can be provided in path by separating with space
  5049. #   ## Else sensor path will be used as identifier
  5050. #   ## When identifier is used, we can provide a list of space separated sensors.
  5051. #   ## A single subscription will be created with all these sensors and data will
  5052. #   ## be saved to measurement with this identifier name
  5053. #   sensors = [
  5054. #    "/interfaces/",
  5055. #    "collection /components/ /lldp",
  5056. #   ]
  5057. #
  5058. #   ## We allow specifying sensor group level reporting rate. To do this, specify the
  5059. #   ## reporting rate in Duration at the beginning of sensor paths / collection
  5060. #   ## name. For entries without reporting rate, we use configured sample frequency
  5061. #   sensors = [
  5062. #    "1000ms customReporting /interfaces /lldp",
  5063. #    "2000ms collection /components",
  5064. #    "/interfaces",
  5065. #   ]
  5066. #
  5067. #   ## Optional TLS Config
  5068. #   # enable_tls = true
  5069. #   # tls_ca = "/etc/telegraf/ca.pem"
  5070. #   # tls_cert = "/etc/telegraf/cert.pem"
  5071. #   # tls_key = "/etc/telegraf/key.pem"
  5072. #   ## Use TLS but skip chain & host verification
  5073. #   # insecure_skip_verify = false
  5074. #
  5075. #   ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms.
  5076. #   ## Failed streams/calls will not be retried if 0 is provided
  5077. #   retry_delay = "1000ms"
  5078. #
  5079. #   ## To treat all string values as tags, set this to true
  5080. #   str_as_tags = false
  5081.  
  5082.  
  5083. # # Read metrics from Kafka topics
  5084. # [[inputs.kafka_consumer]]
  5085. #   ## Kafka brokers.
  5086. #   brokers = ["localhost:9092"]
  5087. #
  5088. #   ## Topics to consume.
  5089. #   topics = ["telegraf"]
  5090. #
  5091. #   ## When set this tag will be added to all metrics with the topic as the value.
  5092. #   # topic_tag = ""
  5093. #
  5094. #   ## Optional Client id
  5095. #   # client_id = "Telegraf"
  5096. #
  5097. #   ## Set the minimal supported Kafka version.  Setting this enables the use of new
  5098. #   ## Kafka features and APIs.  Must be 0.10.2.0 or greater.
  5099. #   ##   ex: version = "1.1.0"
  5100. #   # version = ""
  5101. #
  5102. #   ## Optional TLS Config
  5103. #   # tls_ca = "/etc/telegraf/ca.pem"
  5104. #   # tls_cert = "/etc/telegraf/cert.pem"
  5105. #   # tls_key = "/etc/telegraf/key.pem"
  5106. #   ## Use TLS but skip chain & host verification
  5107. #   # insecure_skip_verify = false
  5108. #
  5109. #   ## Optional SASL Config
  5110. #   # sasl_username = "kafka"
  5111. #   # sasl_password = "secret"
  5112. #
  5113. #   ## Name of the consumer group.
  5114. #   # consumer_group = "telegraf_metrics_consumers"
  5115. #
  5116. #   ## Initial offset position; one of "oldest" or "newest".
  5117. #   # offset = "oldest"
  5118. #
  5119. #   ## Maximum length of a message to consume, in bytes (default 0/unlimited);
  5120. #   ## larger messages are dropped
  5121. #   max_message_len = 1000000
  5122. #
  5123. #   ## Maximum messages to read from the broker that have not been written by an
  5124. #   ## output.  For best throughput set based on the number of metrics within
  5125. #   ## each message and the size of the output's metric_batch_size.
  5126. #   ##
  5127. #   ## For example, if each message from the queue contains 10 metrics and the
  5128. #   ## output metric_batch_size is 1000, setting this to 100 will ensure that a
  5129. #   ## full batch is collected and the write is triggered immediately without
  5130. #   ## waiting until the next flush_interval.
  5131. #   # max_undelivered_messages = 1000
  5132. #
  5133. #   ## Data format to consume.
  5134. #   ## Each data format has its own unique set of configuration options, read
  5135. #   ## more about them here:
  5136. #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  5137. #   data_format = "influx"
  5138.  
  5139.  
  5140. # # Read metrics from Kafka topic(s)
  5141. # [[inputs.kafka_consumer_legacy]]
  5142. #   ## topic(s) to consume
  5143. #   topics = ["telegraf"]
  5144. #   ## an array of Zookeeper connection strings
  5145. #   zookeeper_peers = ["localhost:2181"]
  5146. #   ## Zookeeper Chroot
  5147. #   zookeeper_chroot = ""
  5148. #   ## the name of the consumer group
  5149. #   consumer_group = "telegraf_metrics_consumers"
  5150. #   ## Offset (must be either "oldest" or "newest")
  5151. #   offset = "oldest"
  5152. #
  5153. #   ## Data format to consume.
  5154. #   ## Each data format has its own unique set of configuration options, read
  5155. #   ## more about them here:
  5156. #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  5157. #   data_format = "influx"
  5158. #
  5159. #   ## Maximum length of a message to consume, in bytes (default 0/unlimited);
  5160. #   ## larger messages are dropped
  5161. #   max_message_len = 65536
  5162.  
  5163.  
  5164. # # Configuration for the AWS Kinesis input.
  5165. # [[inputs.kinesis_consumer]]
  5166. #   ## Amazon REGION of kinesis endpoint.
  5167. #   region = "ap-southeast-2"
  5168. #
  5169. #   ## Amazon Credentials
  5170. #   ## Credentials are loaded in the following order
  5171. #   ## 1) Assumed credentials via STS if role_arn is specified
  5172. #   ## 2) explicit credentials from 'access_key' and 'secret_key'
  5173. #   ## 3) shared profile from 'profile'
  5174. #   ## 4) environment variables
  5175. #   ## 5) shared credentials file
  5176. #   ## 6) EC2 Instance Profile
  5177. #   # access_key = ""
  5178. #   # secret_key = ""
  5179. #   # token = ""
  5180. #   # role_arn = ""
  5181. #   # profile = ""
  5182. #   # shared_credential_file = ""
  5183. #
  5184. #   ## Endpoint to make request against, the correct endpoint is automatically
  5185. #   ## determined and this option should only be set if you wish to override the
  5186. #   ## default.
  5187. #   ##   ex: endpoint_url = "http://localhost:8000"
  5188. #   # endpoint_url = ""
  5189. #
  5190. #   ## Kinesis StreamName must exist prior to starting telegraf.
  5191. #   streamname = "StreamName"
  5192. #
  5193. #   ## Shard iterator type (only 'TRIM_HORIZON' and 'LATEST' currently supported)
  5194. #   # shard_iterator_type = "TRIM_HORIZON"
  5195. #
  5196. #   ## Maximum messages to read from the broker that have not been written by an
  5197. #   ## output.  For best throughput set based on the number of metrics within
  5198. #   ## each message and the size of the output's metric_batch_size.
  5199. #   ##
  5200. #   ## For example, if each message from the queue contains 10 metrics and the
  5201. #   ## output metric_batch_size is 1000, setting this to 100 will ensure that a
  5202. #   ## full batch is collected and the write is triggered immediately without
  5203. #   ## waiting until the next flush_interval.
  5204. #   # max_undelivered_messages = 1000
  5205. #
  5206. #   ## Data format to consume.
  5207. #   ## Each data format has its own unique set of configuration options, read
  5208. #   ## more about them here:
  5209. #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  5210. #   data_format = "influx"
  5211. #
  5212. #   ## Optional
  5213. #   ## Configuration for a dynamodb checkpoint
  5214. #   [inputs.kinesis_consumer.checkpoint_dynamodb]
  5215. #   ## unique name for this consumer
  5216. #   app_name = "default"
  5217. #   table_name = "default"
  5218.  
  5219.  
  5220. # # Stream and parse log file(s).
  5221. # [[inputs.logparser]]
  5222. #   ## Log files to parse.
  5223. #   ## These accept standard unix glob matching rules, but with the addition of
  5224. #   ## ** as a "super asterisk". ie:
  5225. #   ##   /var/log/**.log     -> recursively find all .log files in /var/log
  5226. #   ##   /var/log/*/*.log    -> find all .log files with a parent dir in /var/log
  5227. #   ##   /var/log/apache.log -> only tail the apache log file
  5228. #   files = ["/var/log/apache/access.log"]
  5229. #
  5230. #   ## Read files that currently exist from the beginning. Files that are created
  5231. #   ## while telegraf is running (and that match the "files" globs) will always
  5232. #   ## be read from the beginning.
  5233. #   from_beginning = false
  5234. #
  5235. #   ## Method used to watch for file updates.  Can be either "inotify" or "poll".
  5236. #   # watch_method = "inotify"
  5237. #
  5238. #   ## Parse logstash-style "grok" patterns:
  5239. #   [inputs.logparser.grok]
  5240. #     ## This is a list of patterns to check the given log file(s) for.
  5241. #     ## Note that adding patterns here increases processing time. The most
  5242. #     ## efficient configuration is to have one pattern per logparser.
  5243. #     ## Other common built-in patterns are:
  5244. #     ##   %{COMMON_LOG_FORMAT}   (plain apache & nginx access logs)
  5245. #     ##   %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
  5246. #     patterns = ["%{COMBINED_LOG_FORMAT}"]
  5247. #
  5248. #     ## Name of the outputted measurement name.
  5249. #     measurement = "apache_access_log"
  5250. #
  5251. #     ## Full path(s) to custom pattern files.
  5252. #     custom_pattern_files = []
  5253. #
  5254. #     ## Custom patterns can also be defined here. Put one pattern per line.
  5255. #     custom_patterns = '''
  5256. #     '''
  5257. #
  5258. #     ## Timezone allows you to provide an override for timestamps that
  5259. #     ## don't already include an offset
  5260. #     ## e.g. 04/06/2016 12:41:45 data one two 5.43µs
  5261. #     ##
  5262. #     ## Default: "" which renders UTC
  5263. #     ## Options are as follows:
  5264. #     ##   1. Local             -- interpret based on machine localtime
  5265. #     ##   2. "Canada/Eastern"  -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
  5266. #     ##   3. UTC               -- or blank/unspecified, will return timestamp in UTC
  5267. #     # timezone = "Canada/Eastern"
  5268. #
  5269. #   ## When set to "disable", timestamp will not incremented if there is a
  5270. #   ## duplicate.
  5271. #     # unique_timestamp = "auto"
  5272.  
  5273.  
  5274. # # Read metrics from MQTT topic(s)
  5275. # [[inputs.mqtt_consumer]]
  5276. #   ## MQTT broker URLs to be used. The format should be scheme://host:port,
  5277. #   ## schema can be tcp, ssl, or ws.
  5278. #   servers = ["tcp://127.0.0.1:1883"]
  5279. #
  5280. #   ## Topics that will be subscribed to.
  5281. #   topics = [
  5282. #     "telegraf/host01/cpu",
  5283. #     "telegraf/+/mem",
  5284. #     "sensors/#",
  5285. #   ]
  5286. #
  5287. #   ## The message topic will be stored in a tag specified by this value.  If set
  5288. #   ## to the empty string no topic tag will be created.
  5289. #   # topic_tag = "topic"
  5290. #
  5291. #   ## QoS policy for messages
  5292. #   ##   0 = at most once
  5293. #   ##   1 = at least once
  5294. #   ##   2 = exactly once
  5295. #   ##
  5296. #   ## When using a QoS of 1 or 2, you should enable persistent_session to allow
  5297. #   ## resuming unacknowledged messages.
  5298. #   # qos = 0
  5299. #
  5300. #   ## Connection timeout for initial connection in seconds
  5301. #   # connection_timeout = "30s"
  5302. #
  5303. #   ## Maximum messages to read from the broker that have not been written by an
  5304. #   ## output.  For best throughput set based on the number of metrics within
  5305. #   ## each message and the size of the output's metric_batch_size.
  5306. #   ##
  5307. #   ## For example, if each message from the queue contains 10 metrics and the
  5308. #   ## output metric_batch_size is 1000, setting this to 100 will ensure that a
  5309. #   ## full batch is collected and the write is triggered immediately without
  5310. #   ## waiting until the next flush_interval.
  5311. #   # max_undelivered_messages = 1000
  5312. #
  5313. #   ## Persistent session disables clearing of the client session on connection.
  5314. #   ## In order for this option to work you must also set client_id to identity
  5315. #   ## the client.  To receive messages that arrived while the client is offline,
  5316. #   ## also set the qos option to 1 or 2 and don't forget to also set the QoS when
  5317. #   ## publishing.
  5318. #   # persistent_session = false
  5319. #
  5320. #   ## If unset, a random client ID will be generated.
  5321. #   # client_id = ""
  5322. #
  5323. #   ## Username and password to connect MQTT server.
  5324. #   # username = "telegraf"
  5325. #   # password = "metricsmetricsmetricsmetrics"
  5326. #
  5327. #   ## Optional TLS Config
  5328. #   # tls_ca = "/etc/telegraf/ca.pem"
  5329. #   # tls_cert = "/etc/telegraf/cert.pem"
  5330. #   # tls_key = "/etc/telegraf/key.pem"
  5331. #   ## Use TLS but skip chain & host verification
  5332. #   # insecure_skip_verify = false
  5333. #
  5334. #   ## Data format to consume.
  5335. #   ## Each data format has its own unique set of configuration options, read
  5336. #   ## more about them here:
  5337. #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  5338. #   data_format = "influx"
  5339.  
  5340.  
  5341. # # Read metrics from NATS subject(s)
  5342. # [[inputs.nats_consumer]]
  5343. #   ## urls of NATS servers
  5344. #   servers = ["nats://localhost:4222"]
  5345. #
  5346. #   ## subject(s) to consume
  5347. #   subjects = ["telegraf"]
  5348. #   ## name a queue group
  5349. #   queue_group = "telegraf_consumers"
  5350. #
  5351. #   ## Optional credentials
  5352. #   # username = ""
  5353. #   # password = ""
  5354. #
  5355. #   ## Use Transport Layer Security
  5356. #   # secure = false
  5357. #
  5358. #   ## Optional TLS Config
  5359. #   # tls_ca = "/etc/telegraf/ca.pem"
  5360. #   # tls_cert = "/etc/telegraf/cert.pem"
  5361. #   # tls_key = "/etc/telegraf/key.pem"
  5362. #   ## Use TLS but skip chain & host verification
  5363. #   # insecure_skip_verify = false
  5364. #
  5365. #   ## Sets the limits for pending msgs and bytes for each subscription
  5366. #   ## These shouldn't need to be adjusted except in very high throughput scenarios
  5367. #   # pending_message_limit = 65536
  5368. #   # pending_bytes_limit = 67108864
  5369. #
  5370. #   ## Maximum messages to read from the broker that have not been written by an
  5371. #   ## output.  For best throughput set based on the number of metrics within
  5372. #   ## each message and the size of the output's metric_batch_size.
  5373. #   ##
  5374. #   ## For example, if each message from the queue contains 10 metrics and the
  5375. #   ## output metric_batch_size is 1000, setting this to 100 will ensure that a
  5376. #   ## full batch is collected and the write is triggered immediately without
  5377. #   ## waiting until the next flush_interval.
  5378. #   # max_undelivered_messages = 1000
  5379. #
  5380. #   ## Data format to consume.
  5381. #   ## Each data format has its own unique set of configuration options, read
  5382. #   ## more about them here:
  5383. #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  5384. #   data_format = "influx"
  5385.  
  5386.  
  5387. # # Read NSQ topic for metrics.
  5388. # [[inputs.nsq_consumer]]
  5389. #   ## Server option still works but is deprecated, we just prepend it to the nsqd array.
  5390. #   # server = "localhost:4150"
  5391. #   ## An array representing the NSQD TCP HTTP Endpoints
  5392. #   nsqd = ["localhost:4150"]
  5393. #   ## An array representing the NSQLookupd HTTP Endpoints
  5394. #   nsqlookupd = ["localhost:4161"]
  5395. #   topic = "telegraf"
  5396. #   channel = "consumer"
  5397. #   max_in_flight = 100
  5398. #
  5399. #   ## Maximum messages to read from the broker that have not been written by an
  5400. #   ## output.  For best throughput set based on the number of metrics within
  5401. #   ## each message and the size of the output's metric_batch_size.
  5402. #   ##
  5403. #   ## For example, if each message from the queue contains 10 metrics and the
  5404. #   ## output metric_batch_size is 1000, setting this to 100 will ensure that a
  5405. #   ## full batch is collected and the write is triggered immediately without
  5406. #   ## waiting until the next flush_interval.
  5407. #   # max_undelivered_messages = 1000
  5408. #
  5409. #   ## Data format to consume.
  5410. #   ## Each data format has its own unique set of configuration options, read
  5411. #   ## more about them here:
  5412. #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  5413. #   data_format = "influx"
  5414.  
  5415.  
  5416. # # Read metrics from one or many pgbouncer servers
  5417. # [[inputs.pgbouncer]]
  5418. #   ## specify address via a url matching:
  5419. #   ##   postgres://[pqgotest[:password]]@localhost[/dbname]\
  5420. #   ##       ?sslmode=[disable|verify-ca|verify-full]
  5421. #   ## or a simple string:
  5422. #   ##   host=localhost user=pqotest password=... sslmode=... dbname=app_production
  5423. #   ##
  5424. #   ## All connection parameters are optional.
  5425. #   ##
  5426. #   address = "host=localhost user=pgbouncer sslmode=disable"
  5427.  
  5428.  
  5429. # # Read metrics from one or many postgresql servers
  5430. # [[inputs.postgresql]]
  5431. #   ## specify address via a url matching:
  5432. #   ##   postgres://[pqgotest[:password]]@localhost[/dbname]\
  5433. #   ##       ?sslmode=[disable|verify-ca|verify-full]
  5434. #   ## or a simple string:
  5435. #   ##   host=localhost user=pqotest password=... sslmode=... dbname=app_production
  5436. #   ##
  5437. #   ## All connection parameters are optional.
  5438. #   ##
  5439. #   ## Without the dbname parameter, the driver will default to a database
  5440. #   ## with the same name as the user. This dbname is just for instantiating a
  5441. #   ## connection with the server and doesn't restrict the databases we are trying
  5442. #   ## to grab metrics for.
  5443. #   ##
  5444. #   address = "host=localhost user=postgres sslmode=disable"
  5445. #   ## A custom name for the database that will be used as the "server" tag in the
  5446. #   ## measurement output. If not specified, a default one generated from
  5447. #   ## the connection address is used.
  5448. #   # outputaddress = "db01"
  5449. #
  5450. #   ## connection configuration.
  5451. #   ## maxlifetime - specify the maximum lifetime of a connection.
  5452. #   ## default is forever (0s)
  5453. #   max_lifetime = "0s"
  5454. #
  5455. #   ## A  list of databases to explicitly ignore.  If not specified, metrics for all
  5456. #   ## databases are gathered.  Do NOT use with the 'databases' option.
  5457. #   # ignored_databases = ["postgres", "template0", "template1"]
  5458. #
  5459. #   ## A list of databases to pull metrics about. If not specified, metrics for all
  5460. #   ## databases are gathered.  Do NOT use with the 'ignored_databases' option.
  5461. #   # databases = ["app_production", "testing"]
  5462.  
  5463.  
  5464. # # Read metrics from one or many postgresql servers
  5465. # [[inputs.postgresql_extensible]]
  5466. #   ## specify address via a url matching:
  5467. #   ##   postgres://[pqgotest[:password]]@localhost[/dbname]\
  5468. #   ##       ?sslmode=[disable|verify-ca|verify-full]
  5469. #   ## or a simple string:
  5470. #   ##   host=localhost user=pqotest password=... sslmode=... dbname=app_production
  5471. #   #
  5472. #   ## All connection parameters are optional.  #
  5473. #   ## Without the dbname parameter, the driver will default to a database
  5474. #   ## with the same name as the user. This dbname is just for instantiating a
  5475. #   ## connection with the server and doesn't restrict the databases we are trying
  5476. #   ## to grab metrics for.
  5477. #   #
  5478. #   address = "host=localhost user=postgres sslmode=disable"
  5479. #
  5480. #   ## connection configuration.
  5481. #   ## maxlifetime - specify the maximum lifetime of a connection.
  5482. #   ## default is forever (0s)
  5483. #   max_lifetime = "0s"
  5484. #
  5485. #   ## A list of databases to pull metrics about. If not specified, metrics for all
  5486. #   ## databases are gathered.
  5487. #   ## databases = ["app_production", "testing"]
  5488. #   #
  5489. #   ## A custom name for the database that will be used as the "server" tag in the
  5490. #   ## measurement output. If not specified, a default one generated from
  5491. #   ## the connection address is used.
  5492. #   # outputaddress = "db01"
  5493. #   #
  5494. #   ## Define the toml config where the sql queries are stored
  5495. #   ## New queries can be added, if the withdbname is set to true and there is no
  5496. #   ## databases defined in the 'databases field', the sql query is ended by a
  5497. #   ## 'is not null' in order to make the query succeed.
  5498. #   ## Example :
  5499. #   ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become
  5500. #   ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')"
  5501. #   ## because the databases variable was set to ['postgres', 'pgbench' ] and the
  5502. #   ## withdbname was true. Be careful that if the withdbname is set to false you
  5503. #   ## don't have to define the where clause (aka with the dbname) the tagvalue
  5504. #   ## field is used to define custom tags (separated by commas)
  5505. #   ## The optional "measurement" value can be used to override the default
  5506. #   ## output measurement name ("postgresql").
  5507. #   #
  5508. #   ## Structure :
  5509. #   ## [[inputs.postgresql_extensible.query]]
  5510. #   ##   sqlquery string
  5511. #   ##   version string
  5512. #   ##   withdbname boolean
  5513. #   ##   tagvalue string (comma separated)
  5514. #   ##   measurement string
  5515. #   [[inputs.postgresql_extensible.query]]
  5516. #     sqlquery="SELECT * FROM pg_stat_database"
  5517. #     version=901
  5518. #     withdbname=false
  5519. #     tagvalue=""
  5520. #     measurement=""
  5521. #   [[inputs.postgresql_extensible.query]]
  5522. #     sqlquery="SELECT * FROM pg_stat_bgwriter"
  5523. #     version=901
  5524. #     withdbname=false
  5525. #     tagvalue="postgresql.stats"
  5526.  
  5527.  
  5528. # # Read metrics from one or many prometheus clients
  5529. # [[inputs.prometheus]]
  5530. #   ## An array of urls to scrape metrics from.
  5531. #   urls = ["http://localhost:9100/metrics"]
  5532. #
  5533. #   ## An array of Kubernetes services to scrape metrics from.
  5534. #   # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"]
  5535. #
  5536. #   ## Kubernetes config file to create client from.
  5537. #   # kube_config = "/path/to/kubernetes.config"
  5538. #
  5539. #   ## Scrape Kubernetes pods for the following prometheus annotations:
  5540. #   ## - prometheus.io/scrape: Enable scraping for this pod
  5541. #   ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to
  5542. #   ##     set this to 'https' & most likely set the tls config.
  5543. #   ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation.
  5544. #   ## - prometheus.io/port: If port is not 9102 use this annotation
  5545. #   # monitor_kubernetes_pods = true
  5546. #   ## Restricts Kubernetes monitoring to a single namespace
  5547. #   ##   ex: monitor_kubernetes_pods_namespace = "default"
  5548. #   # monitor_kubernetes_pods_namespace = ""
  5549. #
  5550. #   ## Use bearer token for authorization. ('bearer_token' takes priority)
  5551. #   # bearer_token = "/path/to/bearer/token"
  5552. #   ## OR
  5553. #   # bearer_token_string = "abc_123"
  5554. #
  5555. #   ## HTTP Basic Authentication username and password. ('bearer_token' and
  5556. #   ## 'bearer_token_string' take priority)
  5557. #   # username = ""
  5558. #   # password = ""
  5559. #
  5560. #   ## Specify timeout duration for slower prometheus clients (default is 3s)
  5561. #   # response_timeout = "3s"
  5562. #
  5563. #   ## Optional TLS Config
  5564. #   # tls_ca = /path/to/cafile
  5565. #   # tls_cert = /path/to/certfile
  5566. #   # tls_key = /path/to/keyfile
  5567. #   ## Use TLS but skip chain & host verification
  5568. #   # insecure_skip_verify = false
  5569.  
  5570.  
  5571. # # Generic socket listener capable of handling multiple socket types.
  5572. # [[inputs.socket_listener]]
  5573. #   ## URL to listen on
  5574. #   # service_address = "tcp://:8094"
  5575. #   # service_address = "tcp://127.0.0.1:http"
  5576. #   # service_address = "tcp4://:8094"
  5577. #   # service_address = "tcp6://:8094"
  5578. #   # service_address = "tcp6://[2001:db8::1]:8094"
  5579. #   # service_address = "udp://:8094"
  5580. #   # service_address = "udp4://:8094"
  5581. #   # service_address = "udp6://:8094"
  5582. #   # service_address = "unix:///tmp/telegraf.sock"
  5583. #   # service_address = "unixgram:///tmp/telegraf.sock"
  5584. #
  5585. #   ## Change the file mode bits on unix sockets.  These permissions may not be
  5586. #   ## respected by some platforms, to safely restrict write permissions it is best
  5587. #   ## to place the socket into a directory that has previously been created
  5588. #   ## with the desired permissions.
  5589. #   ##   ex: socket_mode = "777"
  5590. #   # socket_mode = ""
  5591. #
  5592. #   ## Maximum number of concurrent connections.
  5593. #   ## Only applies to stream sockets (e.g. TCP).
  5594. #   ## 0 (default) is unlimited.
  5595. #   # max_connections = 1024
  5596. #
  5597. #   ## Read timeout.
  5598. #   ## Only applies to stream sockets (e.g. TCP).
  5599. #   ## 0 (default) is unlimited.
  5600. #   # read_timeout = "30s"
  5601. #
  5602. #   ## Optional TLS configuration.
  5603. #   ## Only applies to stream sockets (e.g. TCP).
  5604. #   # tls_cert = "/etc/telegraf/cert.pem"
  5605. #   # tls_key  = "/etc/telegraf/key.pem"
  5606. #   ## Enables client authentication if set.
  5607. #   # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
  5608. #
  5609. #   ## Maximum socket buffer size (in bytes when no unit specified).
  5610. #   ## For stream sockets, once the buffer fills up, the sender will start backing up.
  5611. #   ## For datagram sockets, once the buffer fills up, metrics will start dropping.
  5612. #   ## Defaults to the OS default.
  5613. #   # read_buffer_size = "64KiB"
  5614. #
  5615. #   ## Period between keep alive probes.
  5616. #   ## Only applies to TCP sockets.
  5617. #   ## 0 disables keep alive probes.
  5618. #   ## Defaults to the OS configuration.
  5619. #   # keep_alive_period = "5m"
  5620. #
  5621. #   ## Data format to consume.
  5622. #   ## Each data format has its own unique set of configuration options, read
  5623. #   ## more about them here:
  5624. #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  5625. #   # data_format = "influx"
  5626.  
  5627.  
  5628. # # Statsd UDP/TCP Server
  5629. # [[inputs.statsd]]
  5630. #   ## Protocol, must be "tcp", "udp", "udp4" or "udp6" (default=udp)
  5631. #   protocol = "udp"
  5632. #
  5633. #   ## MaxTCPConnection - applicable when protocol is set to tcp (default=250)
  5634. #   max_tcp_connections = 250
  5635. #
  5636. #   ## Enable TCP keep alive probes (default=false)
  5637. #   tcp_keep_alive = false
  5638. #
  5639. #   ## Specifies the keep-alive period for an active network connection.
  5640. #   ## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false.
  5641. #   ## Defaults to the OS configuration.
  5642. #   # tcp_keep_alive_period = "2h"
  5643. #
  5644. #   ## Address and port to host UDP listener on
  5645. #   service_address = ":8125"
  5646. #
  5647. #   ## The following configuration options control when telegraf clears it's cache
  5648. #   ## of previous values. If set to false, then telegraf will only clear it's
  5649. #   ## cache when the daemon is restarted.
  5650. #   ## Reset gauges every interval (default=true)
  5651. #   delete_gauges = true
  5652. #   ## Reset counters every interval (default=true)
  5653. #   delete_counters = true
  5654. #   ## Reset sets every interval (default=true)
  5655. #   delete_sets = true
  5656. #   ## Reset timings & histograms every interval (default=true)
  5657. #   delete_timings = true
  5658. #
  5659. #   ## Percentiles to calculate for timing & histogram stats
  5660. #   percentiles = [50.0, 90.0, 99.0, 99.9, 99.95, 100.0]
  5661. #
  5662. #   ## separator to use between elements of a statsd metric
  5663. #   metric_separator = "_"
  5664. #
  5665. #   ## Parses tags in the datadog statsd format
  5666. #   ## http://docs.datadoghq.com/guides/dogstatsd/
  5667. #   parse_data_dog_tags = false
  5668. #
  5669. #   ## Parses datadog extensions to the statsd format
  5670. #   datadog_extensions = false
  5671. #
  5672. #   ## Statsd data translation templates, more info can be read here:
  5673. #   ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md
  5674. #   # templates = [
  5675. #   #     "cpu.* measurement*"
  5676. #   # ]
  5677. #
  5678. #   ## Number of UDP messages allowed to queue up, once filled,
  5679. #   ## the statsd server will start dropping packets
  5680. #   allowed_pending_messages = 10000
  5681. #
  5682. #   ## Number of timing/histogram values to track per-measurement in the
  5683. #   ## calculation of percentiles. Raising this limit increases the accuracy
  5684. #   ## of percentiles but also increases the memory usage and cpu time.
  5685. #   percentile_limit = 1000
  5686.  
  5687.  
  5688. # # Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587
  5689. # [[inputs.syslog]]
  5690. #   ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514
  5691. #   ## Protocol, address and port to host the syslog receiver.
  5692. #   ## If no host is specified, then localhost is used.
  5693. #   ## If no port is specified, 6514 is used (RFC5425#section-4.1).
  5694. #   server = "tcp://:6514"
  5695. #
  5696. #   ## TLS Config
  5697. #   # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"]
  5698. #   # tls_cert = "/etc/telegraf/cert.pem"
  5699. #   # tls_key = "/etc/telegraf/key.pem"
  5700. #
  5701. #   ## Period between keep alive probes.
  5702. #   ## 0 disables keep alive probes.
  5703. #   ## Defaults to the OS configuration.
  5704. #   ## Only applies to stream sockets (e.g. TCP).
  5705. #   # keep_alive_period = "5m"
  5706. #
  5707. #   ## Maximum number of concurrent connections (default = 0).
  5708. #   ## 0 means unlimited.
  5709. #   ## Only applies to stream sockets (e.g. TCP).
  5710. #   # max_connections = 1024
  5711. #
  5712. #   ## Read timeout is the maximum time allowed for reading a single message (default = 5s).
  5713. #   ## 0 means unlimited.
  5714. #   # read_timeout = "5s"
  5715. #
  5716. #   ## The framing technique with which it is expected that messages are transported (default = "octet-counting").
  5717. #   ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1),
  5718. #   ## or the non-transparent framing technique (RFC6587#section-3.4.2).
  5719. #   ## Must be one of "octet-counting", "non-transparent".
  5720. #   # framing = "octet-counting"
  5721. #
  5722. #   ## The trailer to be expected in case of non-trasparent framing (default = "LF").
  5723. #   ## Must be one of "LF", or "NUL".
  5724. #   # trailer = "LF"
  5725. #
  5726. #   ## Whether to parse in best effort mode or not (default = false).
  5727. #   ## By default best effort parsing is off.
  5728. #   # best_effort = false
  5729. #
  5730. #   ## Character to prepend to SD-PARAMs (default = "_").
  5731. #   ## A syslog message can contain multiple parameters and multiple identifiers within structured data section.
  5732. #   ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"]
  5733. #   ## For each combination a field is created.
  5734. #   ## Its name is created concatenating identifier, sdparam_separator, and parameter name.
  5735. #   # sdparam_separator = "_"
  5736.  
  5737.  
  5738. # # Stream a log file, like the tail -f command
  5739. # [[inputs.tail]]
  5740. #   ## files to tail.
  5741. #   ## These accept standard unix glob matching rules, but with the addition of
  5742. #   ## ** as a "super asterisk". ie:
  5743. #   ##   "/var/log/**.log"  -> recursively find all .log files in /var/log
  5744. #   ##   "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
  5745. #   ##   "/var/log/apache.log" -> just tail the apache log file
  5746. #   ##
  5747. #   ## See https://github.com/gobwas/glob for more examples
  5748. #   ##
  5749. #   files = ["/var/mymetrics.out"]
  5750. #   ## Read file from beginning.
  5751. #   from_beginning = false
  5752. #   ## Whether file is a named pipe
  5753. #   pipe = false
  5754. #
  5755. #   ## Method used to watch for file updates.  Can be either "inotify" or "poll".
  5756. #   # watch_method = "inotify"
  5757. #
  5758. #   ## Data format to consume.
  5759. #   ## Each data format has its own unique set of configuration options, read
  5760. #   ## more about them here:
  5761. #   ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  5762. #   data_format = "influx"
  5763.  
  5764.  
  5765. # # Generic TCP listener
  5766. # [[inputs.tcp_listener]]
  5767. #   # DEPRECATED: the TCP listener plugin has been deprecated in favor of the
  5768. #   # socket_listener plugin
  5769. #   # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener
  5770.  
  5771.  
  5772. # # Generic UDP listener
  5773. # [[inputs.udp_listener]]
  5774. #   # DEPRECATED: the TCP listener plugin has been deprecated in favor of the
  5775. #   # socket_listener plugin
  5776. #   # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener
  5777.  
  5778.  
  5779. # # Read metrics from VMware vCenter
  5780. # [[inputs.vsphere]]
  5781. #   ## List of vCenter URLs to be monitored. These three lines must be uncommented
  5782. #   ## and edited for the plugin to work.
  5783. #   vcenters = [ "https://vcenter.local/sdk" ]
  5784. #   username = "user@corp.local"
  5785. #   password = "secret"
  5786. #
  5787. #   ## VMs
  5788. #   ## Typical VM metrics (if omitted or empty, all metrics are collected)
  5789. #   vm_metric_include = [
  5790. #     "cpu.demand.average",
  5791. #     "cpu.idle.summation",
  5792. #     "cpu.latency.average",
  5793. #     "cpu.readiness.average",
  5794. #     "cpu.ready.summation",
  5795. #     "cpu.run.summation",
  5796. #     "cpu.usagemhz.average",
  5797. #     "cpu.used.summation",
  5798. #     "cpu.wait.summation",
  5799. #     "mem.active.average",
  5800. #     "mem.granted.average",
  5801. #     "mem.latency.average",
  5802. #     "mem.swapin.average",
  5803. #     "mem.swapinRate.average",
  5804. #     "mem.swapout.average",
  5805. #     "mem.swapoutRate.average",
  5806. #     "mem.usage.average",
  5807. #     "mem.vmmemctl.average",
  5808. #     "net.bytesRx.average",
  5809. #     "net.bytesTx.average",
  5810. #     "net.droppedRx.summation",
  5811. #     "net.droppedTx.summation",
  5812. #     "net.usage.average",
  5813. #     "power.power.average",
  5814. #     "virtualDisk.numberReadAveraged.average",
  5815. #     "virtualDisk.numberWriteAveraged.average",
  5816. #     "virtualDisk.read.average",
  5817. #     "virtualDisk.readOIO.latest",
  5818. #     "virtualDisk.throughput.usage.average",
  5819. #     "virtualDisk.totalReadLatency.average",
  5820. #     "virtualDisk.totalWriteLatency.average",
  5821. #     "virtualDisk.write.average",
  5822. #     "virtualDisk.writeOIO.latest",
  5823. #     "sys.uptime.latest",
  5824. #   ]
  5825. #   # vm_metric_exclude = [] ## Nothing is excluded by default
  5826. #   # vm_instances = true ## true by default
  5827. #
  5828. #   ## Hosts
  5829. #   ## Typical host metrics (if omitted or empty, all metrics are collected)
  5830. #   host_metric_include = [
  5831. #     "cpu.coreUtilization.average",
  5832. #     "cpu.costop.summation",
  5833. #     "cpu.demand.average",
  5834. #     "cpu.idle.summation",
  5835. #     "cpu.latency.average",
  5836. #     "cpu.readiness.average",
  5837. #     "cpu.ready.summation",
  5838. #     "cpu.swapwait.summation",
  5839. #     "cpu.usage.average",
  5840. #     "cpu.usagemhz.average",
  5841. #     "cpu.used.summation",
  5842. #     "cpu.utilization.average",
  5843. #     "cpu.wait.summation",
  5844. #     "disk.deviceReadLatency.average",
  5845. #     "disk.deviceWriteLatency.average",
  5846. #     "disk.kernelReadLatency.average",
  5847. #     "disk.kernelWriteLatency.average",
  5848. #     "disk.numberReadAveraged.average",
  5849. #     "disk.numberWriteAveraged.average",
  5850. #     "disk.read.average",
  5851. #     "disk.totalReadLatency.average",
  5852. #     "disk.totalWriteLatency.average",
  5853. #     "disk.write.average",
  5854. #     "mem.active.average",
  5855. #     "mem.latency.average",
  5856. #     "mem.state.latest",
  5857. #     "mem.swapin.average",
  5858. #     "mem.swapinRate.average",
  5859. #     "mem.swapout.average",
  5860. #     "mem.swapoutRate.average",
  5861. #     "mem.totalCapacity.average",
  5862. #     "mem.usage.average",
  5863. #     "mem.vmmemctl.average",
  5864. #     "net.bytesRx.average",
  5865. #     "net.bytesTx.average",
  5866. #     "net.droppedRx.summation",
  5867. #     "net.droppedTx.summation",
  5868. #     "net.errorsRx.summation",
  5869. #     "net.errorsTx.summation",
  5870. #     "net.usage.average",
  5871. #     "power.power.average",
  5872. #     "storageAdapter.numberReadAveraged.average",
  5873. #     "storageAdapter.numberWriteAveraged.average",
  5874. #     "storageAdapter.read.average",
  5875. #     "storageAdapter.write.average",
  5876. #     "sys.uptime.latest",
  5877. #   ]
  5878. #   ## Collect IP addresses? Valid values are "ipv4" and "ipv6"
  5879. #   # ip_addresses = ["ipv6", "ipv4" ]
  5880. #   # host_metric_exclude = [] ## Nothing excluded by default
  5881. #   # host_instances = true ## true by default
  5882. #
  5883. #   ## Clusters
  5884. #   # cluster_metric_include = [] ## if omitted or empty, all metrics are collected
  5885. #   # cluster_metric_exclude = [] ## Nothing excluded by default
  5886. #   # cluster_instances = false ## false by default
  5887. #
  5888. #   ## Datastores
  5889. #   # datastore_metric_include = [] ## if omitted or empty, all metrics are collected
  5890. #   # datastore_metric_exclude = [] ## Nothing excluded by default
  5891. #   # datastore_instances = false ## false by default for Datastores only
  5892. #
  5893. #   ## Datacenters
  5894. #   datacenter_metric_include = [] ## if omitted or empty, all metrics are collected
  5895. #   datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default.
  5896. #   # datacenter_instances = false ## false by default for Datastores only
  5897. #
  5898. #   ## Plugin Settings
  5899. #   ## separator character to use for measurement and field names (default: "_")
  5900. #   # separator = "_"
  5901. #
  5902. #   ## number of objects to retreive per query for realtime resources (vms and hosts)
  5903. #   ## set to 64 for vCenter 5.5 and 6.0 (default: 256)
  5904. #   # max_query_objects = 256
  5905. #
  5906. #   ## number of metrics to retreive per query for non-realtime resources (clusters and datastores)
  5907. #   ## set to 64 for vCenter 5.5 and 6.0 (default: 256)
  5908. #   # max_query_metrics = 256
  5909. #
  5910. #   ## number of go routines to use for collection and discovery of objects and metrics
  5911. #   # collect_concurrency = 1
  5912. #   # discover_concurrency = 1
  5913. #
  5914. #   ## whether or not to force discovery of new objects on initial gather call before collecting metrics
  5915. #   ## when true for large environments this may cause errors for time elapsed while collecting metrics
  5916. #   ## when false (default) the first collection cycle may result in no or limited metrics while objects are discovered
  5917. #   # force_discover_on_init = false
  5918. #
  5919. #   ## the interval before (re)discovering objects subject to metrics collection (default: 300s)
  5920. #   # object_discovery_interval = "300s"
  5921. #
  5922. #   ## timeout applies to any of the api request made to vcenter
  5923. #   # timeout = "60s"
  5924. #
  5925. #   ## When set to true, all samples are sent as integers. This makes the output
  5926. #   ## data types backwards compatible with Telegraf 1.9 or lower. Normally all
  5927. #   ## samples from vCenter, with the exception of percentages, are integer
  5928. #   ## values, but under some conditions, some averaging takes place internally in
  5929. #   ## the plugin. Setting this flag to "false" will send values as floats to
  5930. #   ## preserve the full precision when averaging takes place.
  5931. #   # use_int_samples = true
  5932. #
  5933. #   ## Custom attributes from vCenter can be very useful for queries in order to slice the
  5934. #   ## metrics along different dimension and for forming ad-hoc relationships. They are disabled
  5935. #   ## by default, since they can add a considerable amount of tags to the resulting metrics. To
  5936. #   ## enable, simply set custom_attribute_exlude to [] (empty set) and use custom_attribute_include
  5937. #   ## to select the attributes you want to include.
  5938. #   # custom_attribute_include = []
  5939. #   # custom_attribute_exclude = ["*"]
  5940. #
  5941. #   ## Optional SSL Config
  5942. #   # ssl_ca = "/path/to/cafile"
  5943. #   # ssl_cert = "/path/to/certfile"
  5944. #   # ssl_key = "/path/to/keyfile"
  5945. #   ## Use SSL but skip chain & host verification
  5946. #   # insecure_skip_verify = false
  5947.  
  5948.  
  5949. # # A Webhooks Event collector
  5950. # [[inputs.webhooks]]
  5951. #   ## Address and port to host Webhook listener on
  5952. #   service_address = ":1619"
  5953. #
  5954. #   [inputs.webhooks.filestack]
  5955. #     path = "/filestack"
  5956. #
  5957. #   [inputs.webhooks.github]
  5958. #     path = "/github"
  5959. #     # secret = ""
  5960. #
  5961. #   [inputs.webhooks.mandrill]
  5962. #     path = "/mandrill"
  5963. #
  5964. #   [inputs.webhooks.rollbar]
  5965. #     path = "/rollbar"
  5966. #
  5967. #   [inputs.webhooks.papertrail]
  5968. #     path = "/papertrail"
  5969. #
  5970. #   [inputs.webhooks.particle]
  5971. #     path = "/particle"
  5972.  
  5973.  
  5974. # # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures.
  5975. # [[inputs.zipkin]]
  5976. #   # path = "/api/v1/spans" # URL path for span data
  5977. #   # port = 9411            # Port on which Telegraf listens
RAW Paste Data
We use cookies for various purposes including analytics. By continuing to use Pastebin, you agree to our use of cookies as described in the Cookies Policy. OK, I Understand
Not a member of Pastebin yet?
Sign Up, it unlocks many cool features!
 
Top