Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # Telegraf Configuration
- #
- # Telegraf is entirely plugin driven. All metrics are gathered from the
- # declared inputs, and sent to the declared outputs.
- #
- # Plugins must be declared in here to be active.
- # To deactivate a plugin, comment out the name and any variables.
- #
- # Use 'telegraf -config telegraf.conf -test' to see what metrics a config
- # file would generate.
- #
- # Environment variables can be used anywhere in this config file, simply prepend
- # them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
- # for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
- # Global tags can be specified here in key="value" format.
- [global_tags]
- # dc = "us-east-1" # will tag all metrics with dc=us-east-1
- # rack = "1a"
- ## Environment variables can be used as tags, and throughout the config file
- # user = "$USER"
- # Configuration for telegraf agent
- [agent]
- ## Default data collection interval for all inputs
- interval = "10s"
- ## Rounds collection interval to 'interval'
- ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
- round_interval = false
- ## Telegraf will send metrics to outputs in batches of at
- ## most metric_batch_size metrics.
- metric_batch_size = 1000
- ## For failed writes, telegraf will cache metric_buffer_limit metrics for each
- ## output, and will flush this buffer on a successful write. Oldest metrics
- ## are dropped first when this buffer fills.
- metric_buffer_limit = 10000
- ## Collection jitter is used to jitter the collection by a random amount.
- ## Each plugin will sleep for a random time within jitter before collecting.
- ## This can be used to avoid many plugins querying things like sysfs at the
- ## same time, which can have a measurable effect on the system.
- collection_jitter = "0s"
- ## Default flushing interval for all outputs. You shouldn't set this below
- ## interval. Maximum flush_interval will be flush_interval + flush_jitter
- flush_interval = "30s"
- ## Jitter the flush interval by a random amount. This is primarily to avoid
- ## large write spikes for users running a large number of telegraf instances.
- ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
- flush_jitter = "0s"
- ## By default, precision will be set to the same timestamp order as the
- ## collection interval, with the maximum being 1s.
- ## Precision will NOT be used for service inputs, such as logparser and statsd.
- ## Valid values are "Nns", "Nus" (or "NĀµs"), "Nms", "Ns".
- precision = ""
- ## Run telegraf in debug mode
- debug = false
- ## Run telegraf in quiet mode
- quiet = false
- ## Override default hostname, if empty use os.Hostname()
- ## hostname = ""
- ## If set to true, do no set the "host" tag in the telegraf agent.
- omit_hostname = false
- ###############################################################################
- # OUTPUT PLUGINS #
- ###############################################################################
- ###############################################################################
- # INPUT PLUGINS #
- ###############################################################################
- # Statsd Server
- #[[inputs.statsd]]
- ## Address and port to host UDP listener on
- #service_address = ":8125"
- ## Delete gauges every interval (default=false)
- #delete_gauges = false
- ## Delete counters every interval (default=false)
- #delete_counters = false
- ## Delete sets every interval (default=false)
- #delete_sets = false
- ## Delete timings & histograms every interval (default=true)
- #delete_timings = true
- ## Percentiles to calculate for timing & histogram stats
- #percentiles = [90]
- ## separator to use between elements of a statsd metric
- #metric_separator = "_"
- ## Parses tags in the datadog statsd format
- ## http://docs.datadoghq.com/guides/dogstatsd/
- #parse_data_dog_tags = false
- ## Statsd data translation templates, more info can be read here:
- ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite
- # templates = [
- # "cpu.* measurement*"
- # ]
- ## Number of UDP messages allowed to queue up, once filled,
- ## the statsd server will start dropping packets
- #allowed_pending_messages = 10000
- ## Number of timing/histogram values to track per-measurement in the
- ## calculation of percentiles. Raising this limit increases the accuracy
- ## of percentiles but also increases the memory usage and cpu time.
- #percentile_limit = 1000
- # Read metrics about cpu usage
- [[inputs.cpu]]
- ## Whether to report per-cpu stats or not
- percpu = true
- ## Whether to report total system cpu stats or not
- totalcpu = true
- ## Comment this line if you want the raw CPU time metrics
- fielddrop = ["time_*"]
- # Read metrics about disk usage by mount point
- [[inputs.disk]]
- ## By default, telegraf gather stats for all mountpoints.
- ## Setting mountpoints will restrict the stats to the specified mountpoints.
- # mount_points = ["/"]
- ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually
- ## present on /run, /var/run, /dev/shm or /dev).
- ignore_fs = ["tmpfs", "devtmpfs", "squashfs"]
- # Read metrics about disk IO by device
- # [[inputs.diskio]]
- ## By default, telegraf will gather stats for all devices including
- ## disk partitions.
- ## Setting devices will restrict the stats to the specified devices.
- # devices = ["sda", "sdb"]
- ## Uncomment the following line if you need disk serial numbers.
- # skip_serial_number = false
- # Get kernel statistics from /proc/stat
- [[inputs.kernel]]
- # no configuration
- # Read metrics about memory usage
- [[inputs.mem]]
- # no configuration
- # Get the number of processes and group them by status
- [[inputs.processes]]
- # no configuration
- # Read metrics about swap memory usage
- [[inputs.swap]]
- # no configuration
- # Read metrics about system load & uptime
- [[inputs.system]]
- fielddrop = ["uptime_format"]
- # # Read stats from aerospike server(s)
- # [[inputs.aerospike]]
- # ## Aerospike servers to connect to (with port)
- # ## This plugin will query all namespaces the aerospike
- # ## server has configured and get stats for them.
- # servers = ["localhost:3000"]
- # # Read Apache status information (mod_status)
- # [[inputs.apache]]
- # ## An array of Apache status URI to gather stats.
- # ## Default is "http://localhost/server-status?auto".
- # urls = ["http://localhost/server-status?auto"]
- # # Read metrics of bcache from stats_total and dirty_data
- # [[inputs.bcache]]
- # ## Bcache sets path
- # ## If not specified, then default is:
- # bcachePath = "/sys/fs/bcache"
- #
- # ## By default, telegraf gather stats for all bcache devices
- # ## Setting devices will restrict the stats to the specified
- # ## bcache devices.
- # bcacheDevs = ["bcache0"]
- # # Read Cassandra metrics through Jolokia
- # [[inputs.cassandra]]
- # # This is the context root used to compose the jolokia url
- # context = "/jolokia/read"
- # ## List of cassandra servers exposing jolokia read service
- # servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
- # ## List of metrics collected on above servers
- # ## Each metric consists of a jmx path.
- # ## This will collect all heap memory usage metrics from the jvm and
- # ## ReadLatency metrics for all keyspaces and tables.
- # ## "type=Table" in the query works with Cassandra3.0. Older versions might
- # ## need to use "type=ColumnFamily"
- # metrics = [
- # "/java.lang:type=Memory/HeapMemoryUsage",
- # "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency"
- # ]
- # # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.
- # [[inputs.ceph]]
- # ## All configuration values are optional, defaults are shown below
- #
- # ## location of ceph binary
- # ceph_binary = "/usr/bin/ceph"
- #
- # ## directory in which to look for socket files
- # socket_dir = "/var/run/ceph"
- #
- # ## prefix of MON and OSD socket files, used to determine socket type
- # mon_prefix = "ceph-mon"
- # osd_prefix = "ceph-osd"
- #
- # ## suffix used to identify socket files
- # socket_suffix = "asok"
- # # Read specific statistics per cgroup
- # [[inputs.cgroup]]
- # ## Directories in which to look for files, globs are supported.
- # # paths = [
- # # "/cgroup/memory",
- # # "/cgroup/memory/child1",
- # # "/cgroup/memory/child2/*",
- # # ]
- # ## cgroup stat fields, as file names, globs are supported.
- # ## these file names are appended to each path from above.
- # # files = ["memory.*usage*", "memory.limit_in_bytes"]
- # # Pull Metric Statistics from Amazon CloudWatch
- # [[inputs.cloudwatch]]
- # ## Amazon Region
- # region = 'us-east-1'
- #
- # ## Amazon Credentials
- # ## Credentials are loaded in the following order
- # ## 1) Assumed credentials via STS if role_arn is specified
- # ## 2) explicit credentials from 'access_key' and 'secret_key'
- # ## 3) shared profile from 'profile'
- # ## 4) environment variables
- # ## 5) shared credentials file
- # ## 6) EC2 Instance Profile
- # #access_key = ""
- # #secret_key = ""
- # #token = ""
- # #role_arn = ""
- # #profile = ""
- # #shared_credential_file = ""
- #
- # ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)
- # period = '1m'
- #
- # ## Collection Delay (required - must account for metrics availability via CloudWatch API)
- # delay = '1m'
- #
- # ## Recomended: use metric 'interval' that is a multiple of 'period' to avoid
- # ## gaps or overlap in pulled data
- # interval = '1m'
- #
- # ## Configure the TTL for the internal cache of metrics.
- # ## Defaults to 1 hr if not specified
- # #cache_ttl = '10m'
- #
- # ## Metric Statistic Namespace (required)
- # namespace = 'AWS/ELB'
- #
- # ## Metrics to Pull (optional)
- # ## Defaults to all Metrics in Namespace if nothing is provided
- # ## Refreshes Namespace available metrics every 1h
- # #[[inputs.cloudwatch.metrics]]
- # # names = ['Latency', 'RequestCount']
- # #
- # # ## Dimension filters for Metric (optional)
- # # [[inputs.cloudwatch.metrics.dimensions]]
- # # name = 'LoadBalancerName'
- # # value = 'p-example'
- # # Gather health check statuses from services registered in Consul
- # [[inputs.consul]]
- # ## Most of these values defaults to the one configured on a Consul's agent level.
- # ## Optional Consul server address (default: "localhost")
- # # address = "localhost"
- # ## Optional URI scheme for the Consul server (default: "http")
- # # scheme = "http"
- # ## Optional ACL token used in every request (default: "")
- # # token = ""
- # ## Optional username used for request HTTP Basic Authentication (default: "")
- # # username = ""
- # ## Optional password used for HTTP Basic Authentication (default: "")
- # # password = ""
- # ## Optional data centre to query the health checks from (default: "")
- # # datacentre = ""
- # # Read metrics from one or many couchbase clusters
- # [[inputs.couchbase]]
- # ## specify servers via a url matching:
- # ## [protocol://][:password]@address[:port]
- # ## e.g.
- # ## http://couchbase-0.example.com/
- # ## http://admin:secret@couchbase-0.example.com:8091/
- # ##
- # ## If no servers are specified, then localhost is used as the host.
- # ## If no protocol is specifed, HTTP is used.
- # ## If no port is specified, 8091 is used.
- # servers = ["http://localhost:8091"]
- # # Read CouchDB Stats from one or more servers
- # [[inputs.couchdb]]
- # ## Works with CouchDB stats endpoints out of the box
- # ## Multiple HOSTs from which to read CouchDB stats:
- # hosts = ["http://localhost:8086/_stats"]
- # # Read metrics from one or many disque servers
- # [[inputs.disque]]
- # ## An array of URI to gather stats about. Specify an ip or hostname
- # ## with optional port and password.
- # ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc.
- # ## If no servers are specified, then localhost is used as the host.
- # servers = ["localhost"]
- # # Query given DNS server and gives statistics
- # [[inputs.dns_query]]
- # ## servers to query
- # servers = ["8.8.8.8"] # required
- #
- # ## Domains or subdomains to query. "."(root) is default
- # domains = ["."] # optional
- #
- # ## Query record type. Default is "A"
- # ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.
- # record_type = "A" # optional
- #
- # ## Dns server port. 53 is default
- # port = 53 # optional
- #
- # ## Query timeout in seconds. Default is 2 seconds
- # timeout = 2 # optional
- # # Read metrics about docker containers
- # [[inputs.docker]]
- # ## Docker Endpoint
- # ## To use TCP, set endpoint = "tcp://[ip]:[port]"
- # ## To use environment variables (ie, docker-machine), set endpoint = "ENV"
- # endpoint = "unix:///var/run/docker.sock"
- # ## Only collect metrics for these containers, collect all if empty
- # container_names = []
- # ## Timeout for docker list, info, and stats commands
- # timeout = "5s"
- #
- # ## Whether to report for each container per-device blkio (8:0, 8:1...) and
- # ## network (eth0, eth1, ...) stats or not
- # perdevice = true
- # ## Whether to report for each container total blkio and network stats or not
- # total = false
- #
- # # Read statistics from one or many dovecot servers
- # [[inputs.dovecot]]
- # ## specify dovecot servers via an address:port list
- # ## e.g.
- # ## localhost:24242
- # ##
- # ## If no servers are specified, then localhost is used as the host.
- # servers = ["localhost:24242"]
- # ## Type is one of "user", "domain", "ip", or "global"
- # type = "global"
- # ## Wildcard matches like "*.com". An empty string "" is same as "*"
- # ## If type = "ip" filters should be <IP/network>
- # filters = [""]
- # # Read stats from one or more Elasticsearch servers or clusters
- # [[inputs.elasticsearch]]
- # ## specify a list of one or more Elasticsearch servers
- # servers = ["http://localhost:9200"]
- #
- # ## set local to false when you want to read the indices stats from all nodes
- # ## within the cluster
- # local = true
- #
- # ## set cluster_health to true when you want to also obtain cluster level stats
- # cluster_health = false
- #
- # ## Optional SSL Config
- # # ssl_ca = "/etc/telegraf/ca.pem"
- # # ssl_cert = "/etc/telegraf/cert.pem"
- # # ssl_key = "/etc/telegraf/key.pem"
- # ## Use SSL but skip chain & host verification
- # # insecure_skip_verify = false
- # # Read metrics from one or more commands that can output to stdout
- # [[inputs.exec]]
- # ## Commands array
- # commands = [
- # "/tmp/test.sh",
- # "/usr/bin/mycollector --foo=bar",
- # "/tmp/collect_*.sh"
- # ]
- #
- # ## Timeout for each command to complete.
- # timeout = "5s"
- #
- # ## measurement name suffix (for separating different commands)
- # name_suffix = "_mycollector"
- #
- # ## Data format to consume.
- # ## Each data format has it's own unique set of configuration options, read
- # ## more about them here:
- # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
- # data_format = "influx"
- # # Read stats about given file(s)
- # [[inputs.filestat]]
- # ## Files to gather stats about.
- # ## These accept standard unix glob matching rules, but with the addition of
- # ## ** as a "super asterisk". ie:
- # ## "/var/log/**.log" -> recursively find all .log files in /var/log
- # ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
- # ## "/var/log/apache.log" -> just tail the apache log file
- # ##
- # ## See https://github.com/gobwas/glob for more examples
- # ##
- # files = ["/var/log/**.log"]
- # ## If true, read the entire file and calculate an md5 checksum.
- # md5 = false
- # # Read flattened metrics from one or more GrayLog HTTP endpoints
- # [[inputs.graylog]]
- # ## API endpoint, currently supported API:
- # ##
- # ## - multiple (Ex http://<host>:12900/system/metrics/multiple)
- # ## - namespace (Ex http://<host>:12900/system/metrics/namespace/{namespace})
- # ##
- # ## For namespace endpoint, the metrics array will be ignored for that call.
- # ## Endpoint can contain namespace and multiple type calls.
- # ##
- # ## Please check http://[graylog-server-ip]:12900/api-browser for full list
- # ## of endpoints
- # servers = [
- # "http://[graylog-server-ip]:12900/system/metrics/multiple",
- # ]
- #
- # ## Metrics list
- # ## List of metrics can be found on Graylog webservice documentation.
- # ## Or by hitting the the web service api at:
- # ## http://[graylog-host]:12900/system/metrics
- # metrics = [
- # "jvm.cl.loaded",
- # "jvm.memory.pools.Metaspace.committed"
- # ]
- #
- # ## Username and password
- # username = ""
- # password = ""
- #
- # ## Optional SSL Config
- # # ssl_ca = "/etc/telegraf/ca.pem"
- # # ssl_cert = "/etc/telegraf/cert.pem"
- # # ssl_key = "/etc/telegraf/key.pem"
- # ## Use SSL but skip chain & host verification
- # # insecure_skip_verify = false
- # # Read metrics of haproxy, via socket or csv stats page
- # [[inputs.haproxy]]
- # ## An array of address to gather stats about. Specify an ip on hostname
- # ## with optional port. ie localhost, 10.10.3.33:1936, etc.
- # ## Make sure you specify the complete path to the stats endpoint
- # ## ie 10.10.3.33:1936/haproxy?stats
- # #
- # ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats
- # servers = ["http://myhaproxy.com:1936/haproxy?stats"]
- # ## Or you can also use local socket
- # ## servers = ["socket:/run/haproxy/admin.sock"]
- # # HTTP/HTTPS request given an address a method and a timeout
- # [[inputs.http_response]]
- # ## Server address (default http://localhost)
- # address = "http://github.com"
- # ## Set response_timeout (default 5 seconds)
- # response_timeout = "5s"
- # ## HTTP Request Method
- # method = "GET"
- # ## Whether to follow redirects from the server (defaults to false)
- # follow_redirects = true
- # ## HTTP Request Headers (all values must be strings)
- # # [inputs.http_response.headers]
- # # Host = "github.com"
- # ## Optional HTTP Request Body
- # # body = '''
- # # {'fake':'data'}
- # # '''
- #
- # ## Optional SSL Config
- # # ssl_ca = "/etc/telegraf/ca.pem"
- # # ssl_cert = "/etc/telegraf/cert.pem"
- # # ssl_key = "/etc/telegraf/key.pem"
- # ## Use SSL but skip chain & host verification
- # # insecure_skip_verify = false
- # # Read flattened metrics from one or more JSON HTTP endpoints
- # [[inputs.httpjson]]
- # ## NOTE This plugin only reads numerical measurements, strings and booleans
- # ## will be ignored.
- #
- # ## a name for the service being polled
- # name = "webserver_stats"
- #
- # ## URL of each server in the service's cluster
- # servers = [
- # "http://localhost:9999/stats/",
- # "http://localhost:9998/stats/",
- # ]
- #
- # ## HTTP method to use: GET or POST (case-sensitive)
- # method = "GET"
- #
- # ## List of tag names to extract from top-level of JSON server response
- # # tag_keys = [
- # # "my_tag_1",
- # # "my_tag_2"
- # # ]
- #
- # ## HTTP parameters (all values must be strings)
- # [inputs.httpjson.parameters]
- # event_type = "cpu_spike"
- # threshold = "0.75"
- #
- # ## HTTP Header parameters (all values must be strings)
- # # [inputs.httpjson.headers]
- # # X-Auth-Token = "my-xauth-token"
- # # apiVersion = "v1"
- #
- # ## Optional SSL Config
- # # ssl_ca = "/etc/telegraf/ca.pem"
- # # ssl_cert = "/etc/telegraf/cert.pem"
- # # ssl_key = "/etc/telegraf/key.pem"
- # ## Use SSL but skip chain & host verification
- # # insecure_skip_verify = false
- # # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints
- # [[inputs.influxdb]]
- # ## Works with InfluxDB debug endpoints out of the box,
- # ## but other services can use this format too.
- # ## See the influxdb plugin's README for more details.
- #
- # ## Multiple URLs from which to read InfluxDB-formatted JSON
- # ## Default is "http://localhost:8086/debug/vars".
- # urls = [
- # "http://localhost:8086/debug/vars"
- # ]
- # # Read metrics from one or many bare metal servers
- # [[inputs.ipmi_sensor]]
- # ## specify servers via a url matching:
- # ## [username[:password]@][protocol[(address)]]
- # ## e.g.
- # ## root:passwd@lan(127.0.0.1)
- # ##
- # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"]
- # # Read JMX metrics through Jolokia
- # [[inputs.jolokia]]
- # ## This is the context root used to compose the jolokia url
- # context = "/jolokia"
- #
- # ## This specifies the mode used
- # # mode = "proxy"
- # #
- # ## When in proxy mode this section is used to specify further
- # ## proxy address configurations.
- # ## Remember to change host address to fit your environment.
- # # [inputs.jolokia.proxy]
- # # host = "127.0.0.1"
- # # port = "8080"
- #
- #
- # ## List of servers exposing jolokia read service
- # [[inputs.jolokia.servers]]
- # name = "as-server-01"
- # host = "127.0.0.1"
- # port = "8080"
- # # username = "myuser"
- # # password = "mypassword"
- #
- # ## List of metrics collected on above servers
- # ## Each metric consists in a name, a jmx path and either
- # ## a pass or drop slice attribute.
- # ## This collect all heap memory usage metrics.
- # [[inputs.jolokia.metrics]]
- # name = "heap_memory_usage"
- # mbean = "java.lang:type=Memory"
- # attribute = "HeapMemoryUsage"
- #
- # ## This collect thread counts metrics.
- # [[inputs.jolokia.metrics]]
- # name = "thread_count"
- # mbean = "java.lang:type=Threading"
- # attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount"
- #
- # ## This collect number of class loaded/unloaded counts metrics.
- # [[inputs.jolokia.metrics]]
- # name = "class_count"
- # mbean = "java.lang:type=ClassLoading"
- # attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount"
- # # Read metrics from a LeoFS Server via SNMP
- # [[inputs.leofs]]
- # ## An array of URI to gather stats about LeoFS.
- # ## Specify an ip or hostname with port. ie 127.0.0.1:4020
- # servers = ["127.0.0.1:4021"]
- # # Read metrics from local Lustre service on OST, MDS
- # [[inputs.lustre2]]
- # ## An array of /proc globs to search for Lustre stats
- # ## If not specified, the default will work on Lustre 2.5.x
- # ##
- # # ost_procfiles = [
- # # "/proc/fs/lustre/obdfilter/*/stats",
- # # "/proc/fs/lustre/osd-ldiskfs/*/stats",
- # # "/proc/fs/lustre/obdfilter/*/job_stats",
- # # ]
- # # mds_procfiles = [
- # # "/proc/fs/lustre/mdt/*/md_stats",
- # # "/proc/fs/lustre/mdt/*/job_stats",
- # # ]
- # # Gathers metrics from the /3.0/reports MailChimp API
- # [[inputs.mailchimp]]
- # ## MailChimp API key
- # ## get from https://admin.mailchimp.com/account/api/
- # api_key = "" # required
- # ## Reports for campaigns sent more than days_old ago will not be collected.
- # ## 0 means collect all.
- # days_old = 0
- # ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old
- # # campaign_id = ""
- # # Read metrics from one or many memcached servers
- # [[inputs.memcached]]
- # ## An array of address to gather stats about. Specify an ip on hostname
- # ## with optional port. ie localhost, 10.0.0.1:11211, etc.
- # servers = ["localhost:11211"]
- # # unix_sockets = ["/var/run/memcached.sock"]
- # # Telegraf plugin for gathering metrics from N Mesos masters
- # [[inputs.mesos]]
- # ## Timeout, in ms.
- # timeout = 100
- # ## A list of Mesos masters.
- # masters = ["localhost:5050"]
- # ## Master metrics groups to be collected, by default, all enabled.
- # master_collections = [
- # "resources",
- # "master",
- # "system",
- # "agents",
- # "frameworks",
- # "tasks",
- # "messages",
- # "evqueue",
- # "registrar",
- # ]
- # ## A list of Mesos slaves, default is []
- # # slaves = []
- # ## Slave metrics groups to be collected, by default, all enabled.
- # # slave_collections = [
- # # "resources",
- # # "agent",
- # # "system",
- # # "executors",
- # # "tasks",
- # # "messages",
- # # ]
- # ## Include mesos tasks statistics, default is false
- # # slave_tasks = true
- # # Read metrics from one or many MongoDB servers
- # [[inputs.mongodb]]
- # ## An array of URI to gather stats about. Specify an ip or hostname
- # ## with optional port add password. ie,
- # ## mongodb://user:auth_key@10.10.3.30:27017,
- # ## mongodb://10.10.3.33:18832,
- # ## 10.0.0.1:10000, etc.
- # servers = ["127.0.0.1:27017"]
- # gather_perdb_stats = false
- # # Read metrics from one or many mysql servers
- # [[inputs.mysql]]
- # ## specify servers via a url matching:
- # ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]]
- # ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name
- # ## e.g.
- # ## db_user:passwd@tcp(127.0.0.1:3306)/?tls=false
- # ## db_user@tcp(127.0.0.1:3306)/?tls=false
- # #
- # ## If no servers are specified, then localhost is used as the host.
- # servers = ["tcp(127.0.0.1:3306)/"]
- # ## the limits for metrics form perf_events_statements
- # perf_events_statements_digest_text_limit = 120
- # perf_events_statements_limit = 250
- # perf_events_statements_time_limit = 86400
- # #
- # ## if the list is empty, then metrics are gathered from all databasee tables
- # table_schema_databases = []
- # #
- # ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list
- # gather_table_schema = false
- # #
- # ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST
- # gather_process_list = true
- # #
- # ## gather auto_increment columns and max values from information schema
- # gather_info_schema_auto_inc = true
- # #
- # ## gather metrics from SHOW SLAVE STATUS command output
- # gather_slave_status = true
- # #
- # ## gather metrics from SHOW BINARY LOGS command output
- # gather_binary_logs = false
- # #
- # ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMART_BY_TABLE
- # gather_table_io_waits = false
- # #
- # ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS
- # gather_table_lock_waits = false
- # #
- # ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMART_BY_INDEX_USAGE
- # gather_index_io_waits = false
- # #
- # ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS
- # gather_event_waits = false
- # #
- # ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME
- # gather_file_events_stats = false
- # #
- # ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST
- # gather_perf_events_statements = false
- # #
- # ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)
- # interval_slow = "30m"
- # # Read metrics about network interface usage
- [[inputs.net]]
- # ## By default, telegraf gathers stats from any up interface (excluding loopback)
- # ## Setting interfaces will tell it to gather these explicit interfaces,
- # ## regardless of status.
- # ##
- # # interfaces = ["eth0"]
- # # TCP or UDP 'ping' given url and collect response time in seconds
- # [[inputs.net_response]]
- # ## Protocol, must be "tcp" or "udp"
- # protocol = "tcp"
- # ## Server address (default localhost)
- # address = "github.com:80"
- # ## Set timeout
- # timeout = "1s"
- #
- # ## Optional string sent to the server
- # # send = "ssh"
- # ## Optional expected string in answer
- # # expect = "ssh"
- # ## Set read timeout (only used if expecting a response)
- # read_timeout = "1s"
- # # Read TCP metrics such as established, time wait and sockets counts.
- # [[inputs.netstat]]
- # # no configuration
- # # Read Nginx's basic status information (ngx_http_stub_status_module)
- # [[inputs.nginx]]
- # ## An array of Nginx stub_status URI to gather stats.
- # urls = ["http://localhost/status"]
- # # Read NSQ topic and channel statistics.
- # [[inputs.nsq]]
- # ## An array of NSQD HTTP API endpoints
- # endpoints = ["http://localhost:4151"]
- # # Collect kernel snmp counters and network interface statistics
- # [[inputs.nstat]]
- # ## file paths for proc files. If empty default paths will be used:
- # ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6
- # ## These can also be overridden with env variables, see README.
- # proc_net_netstat = "/proc/net/netstat"
- # proc_net_snmp = "/proc/net/snmp"
- # proc_net_snmp6 = "/proc/net/snmp6"
- # ## dump metrics with 0 values too
- # dump_zeros = true
- # # Get standard NTP query metrics, requires ntpq executable.
- # [[inputs.ntpq]]
- # ## If false, set the -n ntpq flag. Can reduce metric gather time.
- # dns_lookup = true
- # # Read metrics of passenger using passenger-status
- # [[inputs.passenger]]
- # ## Path of passenger-status.
- # ##
- # ## Plugin gather metric via parsing XML output of passenger-status
- # ## More information about the tool:
- # ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html
- # ##
- # ## If no path is specified, then the plugin simply execute passenger-status
- # ## hopefully it can be found in your PATH
- # command = "passenger-status -v --show=xml"
- # # Read metrics of phpfpm, via HTTP status page or socket
- # [[inputs.phpfpm]]
- # ## An array of addresses to gather stats about. Specify an ip or hostname
- # ## with optional port and path
- # ##
- # ## Plugin can be configured in three modes (either can be used):
- # ## - http: the URL must start with http:// or https://, ie:
- # ## "http://localhost/status"
- # ## "http://192.168.130.1/status?full"
- # ##
- # ## - unixsocket: path to fpm socket, ie:
- # ## "/var/run/php5-fpm.sock"
- # ## or using a custom fpm status path:
- # ## "/var/run/php5-fpm.sock:fpm-custom-status-path"
- # ##
- # ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie:
- # ## "fcgi://10.0.0.12:9000/status"
- # ## "cgi://10.0.10.12:9001/status"
- # ##
- # ## Example of multiple gathering from local socket and remove host
- # ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"]
- # urls = ["http://localhost/status"]
- # # Ping given url(s) and return statistics
- # [[inputs.ping]]
- # ## NOTE: this plugin forks the ping command. You may need to set capabilities
- # ## via setcap cap_net_raw+p /bin/ping
- # #
- # ## urls to ping
- # urls = ["www.google.com"] # required
- # ## number of pings to send per collection (ping -c <COUNT>)
- # count = 1 # required
- # ## interval, in s, at which to ping. 0 == default (ping -i <PING_INTERVAL>)
- # ping_interval = 0.0
- # ## per-ping timeout, in s. 0 == no timeout (ping -W <TIMEOUT>)
- # timeout = 1.0
- # ## interface to send ping from (ping -I <INTERFACE>)
- # interface = ""
- # # Read metrics from one or many postgresql servers
- # [[inputs.postgresql]]
- # ## specify address via a url matching:
- # ## postgres://[pqgotest[:password]]@localhost[/dbname]\
- # ## ?sslmode=[disable|verify-ca|verify-full]
- # ## or a simple string:
- # ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
- # ##
- # ## All connection parameters are optional.
- # ##
- # ## Without the dbname parameter, the driver will default to a database
- # ## with the same name as the user. This dbname is just for instantiating a
- # ## connection with the server and doesn't restrict the databases we are trying
- # ## to grab metrics for.
- # ##
- # address = "host=localhost user=postgres sslmode=disable"
- #
- # ## A list of databases to pull metrics about. If not specified, metrics for all
- # ## databases are gathered.
- # # databases = ["app_production", "testing"]
- # # Read metrics from one or many postgresql servers
- # [[inputs.postgresql_extensible]]
- # ## specify address via a url matching:
- # ## postgres://[pqgotest[:password]]@localhost[/dbname]\
- # ## ?sslmode=[disable|verify-ca|verify-full]
- # ## or a simple string:
- # ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
- # #
- # ## All connection parameters are optional. #
- # ## Without the dbname parameter, the driver will default to a database
- # ## with the same name as the user. This dbname is just for instantiating a
- # ## connection with the server and doesn't restrict the databases we are trying
- # ## to grab metrics for.
- # #
- # address = "host=localhost user=postgres sslmode=disable"
- # ## A list of databases to pull metrics about. If not specified, metrics for all
- # ## databases are gathered.
- # ## databases = ["app_production", "testing"]
- # #
- # # outputaddress = "db01"
- # ## A custom name for the database that will be used as the "server" tag in the
- # ## measurement output. If not specified, a default one generated from
- # ## the connection address is used.
- # #
- # ## Define the toml config where the sql queries are stored
- # ## New queries can be added, if the withdbname is set to true and there is no
- # ## databases defined in the 'databases field', the sql query is ended by a
- # ## 'is not null' in order to make the query succeed.
- # ## Example :
- # ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become
- # ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')"
- # ## because the databases variable was set to ['postgres', 'pgbench' ] and the
- # ## withdbname was true. Be careful that if the withdbname is set to false you
- # ## don't have to define the where clause (aka with the dbname) the tagvalue
- # ## field is used to define custom tags (separated by commas)
- # ## The optional "measurement" value can be used to override the default
- # ## output measurement name ("postgresql").
- # #
- # ## Structure :
- # ## [[inputs.postgresql_extensible.query]]
- # ## sqlquery string
- # ## version string
- # ## withdbname boolean
- # ## tagvalue string (comma separated)
- # ## measurement string
- # [[inputs.postgresql_extensible.query]]
- # sqlquery="SELECT * FROM pg_stat_database"
- # version=901
- # withdbname=false
- # tagvalue=""
- # measurement=""
- # [[inputs.postgresql_extensible.query]]
- # sqlquery="SELECT * FROM pg_stat_bgwriter"
- # version=901
- # withdbname=false
- # tagvalue="postgresql.stats"
- # # Read metrics from one or many PowerDNS servers
- # [[inputs.powerdns]]
- # ## An array of sockets to gather stats about.
- # ## Specify a path to unix socket.
- # unix_sockets = ["/var/run/pdns.controlsocket"]
- # # Monitor process cpu and memory usage
- # [[inputs.procstat]]
- # ## Must specify one of: pid_file, exe, or pattern
- # ## PID file to monitor process
- # pid_file = "/var/run/nginx.pid"
- # ## executable name (ie, pgrep <exe>)
- # # exe = "nginx"
- # ## pattern as argument for pgrep (ie, pgrep -f <pattern>)
- # # pattern = "nginx"
- # ## user as argument for pgrep (ie, pgrep -u <user>)
- # # user = "nginx"
- #
- # ## override for process_name
- # ## This is optional; default is sourced from /proc/<pid>/status
- # # process_name = "bar"
- # ## Field name prefix
- # prefix = ""
- # ## comment this out if you want raw cpu_time stats
- # fielddrop = ["cpu_time_*"]
- # # Read metrics from one or many prometheus clients
- # [[inputs.prometheus]]
- # ## An array of urls to scrape metrics from.
- # urls = ["http://localhost:9100/metrics"]
- #
- # ## Use bearer token for authorization
- # # bearer_token = /path/to/bearer/token
- #
- # ## Optional SSL Config
- # # ssl_ca = /path/to/cafile
- # # ssl_cert = /path/to/certfile
- # # ssl_key = /path/to/keyfile
- # ## Use SSL but skip chain & host verification
- # # insecure_skip_verify = false
- # # Reads last_run_summary.yaml file and converts to measurments
- # [[inputs.puppetagent]]
- # ## Location of puppet last run summary file
- # location = "/var/lib/puppet/state/last_run_summary.yaml"
- # # Read metrics from one or many RabbitMQ servers via the management API
- # [[inputs.rabbitmq]]
- # # url = "http://localhost:15672"
- # # name = "rmq-server-1" # optional tag
- # # username = "guest"
- # # password = "guest"
- #
- # ## Optional SSL Config
- # # ssl_ca = "/etc/telegraf/ca.pem"
- # # ssl_cert = "/etc/telegraf/cert.pem"
- # # ssl_key = "/etc/telegraf/key.pem"
- # ## Use SSL but skip chain & host verification
- # # insecure_skip_verify = false
- #
- # ## A list of nodes to pull metrics about. If not specified, metrics for
- # ## all nodes are gathered.
- # # nodes = ["rabbit@node1", "rabbit@node2"]
- # # Read raindrops stats (raindrops - real-time stats for preforking Rack servers)
- # [[inputs.raindrops]]
- # ## An array of raindrops middleware URI to gather stats.
- # urls = ["http://localhost:8080/_raindrops"]
- # # Read metrics from one or many redis servers
- # [[inputs.redis]]
- # ## specify servers via a url matching:
- # ## [protocol://][:password]@address[:port]
- # ## e.g.
- # ## tcp://localhost:6379
- # ## tcp://:password@192.168.99.100
- # ## unix:///var/run/redis.sock
- # ##
- # ## If no servers are specified, then localhost is used as the host.
- # ## If no port is specified, 6379 is used
- # servers = ["tcp://localhost:6379"]
- # # Read metrics from one or many RethinkDB servers
- # [[inputs.rethinkdb]]
- # ## An array of URI to gather stats about. Specify an ip or hostname
- # ## with optional port add password. ie,
- # ## rethinkdb://user:auth_key@10.10.3.30:28105,
- # ## rethinkdb://10.10.3.33:18832,
- # ## 10.0.0.1:10000, etc.
- # servers = ["127.0.0.1:28015"]
- # # Read metrics one or many Riak servers
- # [[inputs.riak]]
- # # Specify a list of one or more riak http servers
- # servers = ["http://localhost:8098"]
- # # Reads oids value from one or many snmp agents
- # [[inputs.snmp]]
- # ## Use 'oids.txt' file to translate oids to names
- # ## To generate 'oids.txt' you need to run:
- # ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
- # ## Or if you have an other MIB folder with custom MIBs
- # ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
- # snmptranslate_file = "/tmp/oids.txt"
- # [[inputs.snmp.host]]
- # address = "192.168.2.2:161"
- # # SNMP community
- # community = "public" # default public
- # # SNMP version (1, 2 or 3)
- # # Version 3 not supported yet
- # version = 2 # default 2
- # # SNMP response timeout
- # timeout = 2.0 # default 2.0
- # # SNMP request retries
- # retries = 2 # default 2
- # # Which get/bulk do you want to collect for this host
- # collect = ["mybulk", "sysservices", "sysdescr"]
- # # Simple list of OIDs to get, in addition to "collect"
- # get_oids = []
- #
- # [[inputs.snmp.host]]
- # address = "192.168.2.3:161"
- # community = "public"
- # version = 2
- # timeout = 2.0
- # retries = 2
- # collect = ["mybulk"]
- # get_oids = [
- # "ifNumber",
- # ".1.3.6.1.2.1.1.3.0",
- # ]
- #
- # [[inputs.snmp.get]]
- # name = "ifnumber"
- # oid = "ifNumber"
- #
- # [[inputs.snmp.get]]
- # name = "interface_speed"
- # oid = "ifSpeed"
- # instance = "0"
- #
- # [[inputs.snmp.get]]
- # name = "sysuptime"
- # oid = ".1.3.6.1.2.1.1.3.0"
- # unit = "second"
- #
- # [[inputs.snmp.bulk]]
- # name = "mybulk"
- # max_repetition = 127
- # oid = ".1.3.6.1.2.1.1"
- #
- # [[inputs.snmp.bulk]]
- # name = "ifoutoctets"
- # max_repetition = 127
- # oid = "ifOutOctets"
- #
- # [[inputs.snmp.host]]
- # address = "192.168.2.13:161"
- # #address = "127.0.0.1:161"
- # community = "public"
- # version = 2
- # timeout = 2.0
- # retries = 2
- # #collect = ["mybulk", "sysservices", "sysdescr", "systype"]
- # collect = ["sysuptime" ]
- # [[inputs.snmp.host.table]]
- # name = "iftable3"
- # include_instances = ["enp5s0", "eth1"]
- #
- # # SNMP TABLEs
- # # table without mapping neither subtables
- # [[inputs.snmp.table]]
- # name = "iftable1"
- # oid = ".1.3.6.1.2.1.31.1.1.1"
- #
- # # table without mapping but with subtables
- # [[inputs.snmp.table]]
- # name = "iftable2"
- # oid = ".1.3.6.1.2.1.31.1.1.1"
- # sub_tables = [".1.3.6.1.2.1.2.2.1.13"]
- #
- # # table with mapping but without subtables
- # [[inputs.snmp.table]]
- # name = "iftable3"
- # oid = ".1.3.6.1.2.1.31.1.1.1"
- # # if empty. get all instances
- # mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
- # # if empty, get all subtables
- #
- # # table with both mapping and subtables
- # [[inputs.snmp.table]]
- # name = "iftable4"
- # oid = ".1.3.6.1.2.1.31.1.1.1"
- # # if empty get all instances
- # mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
- # # if empty get all subtables
- # # sub_tables could be not "real subtables"
- # sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]
- # # Read metrics from Microsoft SQL Server
- # [[inputs.sqlserver]]
- # ## Specify instances to monitor with a list of connection strings.
- # ## All connection parameters are optional.
- # ## By default, the host is localhost, listening on default port, TCP 1433.
- # ## for Windows, the user is the currently running AD user (SSO).
- # ## See https://github.com/denisenkom/go-mssqldb for detailed connection
- # ## parameters.
- # # servers = [
- # # "Server=192.168.1.10;Port=1433;User Id=<user>;Password=<pw>;app name=telegraf;log=1;",
- # # ]
- # # Inserts sine and cosine waves for demonstration purposes
- # [[inputs.trig]]
- # ## Set the amplitude
- # amplitude = 10.0
- # # Read Twemproxy stats data
- # [[inputs.twemproxy]]
- # ## Twemproxy stats address and port (no scheme)
- # addr = "localhost:22222"
- # ## Monitor pool name
- # pools = ["redis_pool", "mc_pool"]
- # # A plugin to collect stats from Varnish HTTP Cache
- # [[inputs.varnish]]
- # ## The default location of the varnishstat binary can be overridden with:
- # binary = "/usr/bin/varnishstat"
- #
- # ## By default, telegraf gather stats for 3 metric points.
- # ## Setting stats will override the defaults shown below.
- # ## Glob matching can be used, ie, stats = ["MAIN.*"]
- # ## stats may also be set to ["*"], which will collect all stats
- # stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"]
- # # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools
- # [[inputs.zfs]]
- # ## ZFS kstat path. Ignored on FreeBSD
- # ## If not specified, then default is:
- # # kstatPath = "/proc/spl/kstat/zfs"
- #
- # ## By default, telegraf gather all zfs stats
- # ## If not specified, then default is:
- # # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"]
- #
- # ## By default, don't gather zpool stats
- # # poolMetrics = false
- # # Reads 'mntr' stats from one or many zookeeper servers
- # [[inputs.zookeeper]]
- # ## An array of address to gather stats about. Specify an ip or hostname
- # ## with port. ie localhost:2181, 10.0.0.1:2181, etc.
- #
- # ## If no servers are specified, then localhost is used as the host.
- # ## If no port is specified, 2181 is used
- # servers = [":2181"]
- ###############################################################################
- # SERVICE INPUT PLUGINS #
- ###############################################################################
- # # Read metrics from Kafka topic(s)
- # [[inputs.kafka_consumer]]
- # ## topic(s) to consume
- # topics = ["telegraf"]
- # ## an array of Zookeeper connection strings
- # zookeeper_peers = ["localhost:2181"]
- # ## Zookeeper Chroot
- # zookeeper_chroot = ""
- # ## the name of the consumer group
- # consumer_group = "telegraf_metrics_consumers"
- # ## Offset (must be either "oldest" or "newest")
- # offset = "oldest"
- #
- # ## Data format to consume.
- # ## Each data format has it's own unique set of configuration options, read
- # ## more about them here:
- # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
- # data_format = "influx"
- # # Stream and parse log file(s).
- # [[inputs.logparser]]
- # ## Log files to parse.
- # ## These accept standard unix glob matching rules, but with the addition of
- # ## ** as a "super asterisk". ie:
- # ## /var/log/**.log -> recursively find all .log files in /var/log
- # ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
- # ## /var/log/apache.log -> only tail the apache log file
- # files = ["/var/log/influxdb/influxdb.log"]
- # ## Read file from beginning.
- # from_beginning = false
- #
- # ## Parse logstash-style "grok" patterns:
- # ## Telegraf built-in parsing patterns: https://goo.gl/dkay10
- # [inputs.logparser.grok]
- # ## This is a list of patterns to check the given log file(s) for.
- # ## Note that adding patterns here increases processing time. The most
- # ## efficient configuration is to have one pattern per logparser.
- # ## Other common built-in patterns are:
- # ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs)
- # ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
- # patterns = ["%{INFLUXDB_HTTPD_LOG}"]
- # ## Name of the outputted measurement name.
- # measurement = "influxdb_log"
- # ## Full path(s) to custom pattern files.
- # custom_pattern_files = []
- # ## Custom patterns can also be defined here. Put one pattern per line.
- # custom_patterns = '''
- # '''
- # # Read metrics from MQTT topic(s)
- # [[inputs.mqtt_consumer]]
- # servers = ["localhost:1883"]
- # ## MQTT QoS, must be 0, 1, or 2
- # qos = 0
- #
- # ## Topics to subscribe to
- # topics = [
- # "telegraf/host01/cpu",
- # "telegraf/+/mem",
- # "sensors/#",
- # ]
- #
- # # if true, messages that can't be delivered while the subscriber is offline
- # # will be delivered when it comes back (such as on service restart).
- # # NOTE: if true, client_id MUST be set
- # persistent_session = false
- # # If empty, a random client ID will be generated.
- # client_id = ""
- #
- # ## username and password to connect MQTT server.
- # # username = "telegraf"
- # # password = "metricsmetricsmetricsmetrics"
- #
- # ## Optional SSL Config
- # # ssl_ca = "/etc/telegraf/ca.pem"
- # # ssl_cert = "/etc/telegraf/cert.pem"
- # # ssl_key = "/etc/telegraf/key.pem"
- # ## Use SSL but skip chain & host verification
- # # insecure_skip_verify = false
- #
- # ## Data format to consume.
- # ## Each data format has it's own unique set of configuration options, read
- # ## more about them here:
- # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
- # data_format = "influx"
- # # Read metrics from NATS subject(s)
- # [[inputs.nats_consumer]]
- # ## urls of NATS servers
- # servers = ["nats://localhost:4222"]
- # ## Use Transport Layer Security
- # secure = false
- # ## subject(s) to consume
- # subjects = ["telegraf"]
- # ## name a queue group
- # queue_group = "telegraf_consumers"
- #
- # ## Data format to consume.
- # ## Each data format has it's own unique set of configuration options, read
- # ## more about them here:
- # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
- # data_format = "influx"
- # # Read NSQ topic for metrics.
- # [[inputs.nsq_consumer]]
- # ## An string representing the NSQD TCP Endpoint
- # server = "localhost:4150"
- # topic = "telegraf"
- # channel = "consumer"
- # max_in_flight = 100
- #
- # ## Data format to consume.
- # ## Each data format has it's own unique set of configuration options, read
- # ## more about them here:
- # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
- # data_format = "influx"
- # # Statsd Server
- # [[inputs.statsd]]
- # ## Address and port to host UDP listener on
- # service_address = ":8125"
- # ## Delete gauges every interval (default=false)
- # delete_gauges = false
- # ## Delete counters every interval (default=false)
- # delete_counters = false
- # ## Delete sets every interval (default=false)
- # delete_sets = false
- # ## Delete timings & histograms every interval (default=true)
- # delete_timings = true
- # ## Percentiles to calculate for timing & histogram stats
- # percentiles = [90]
- #
- # ## separator to use between elements of a statsd metric
- # metric_separator = "_"
- #
- # ## Parses tags in the datadog statsd format
- # ## http://docs.datadoghq.com/guides/dogstatsd/
- # parse_data_dog_tags = false
- #
- # ## Statsd data translation templates, more info can be read here:
- # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite
- # # templates = [
- # # "cpu.* measurement*"
- # # ]
- #
- # ## Number of UDP messages allowed to queue up, once filled,
- # ## the statsd server will start dropping packets
- # allowed_pending_messages = 10000
- #
- # ## Number of timing/histogram values to track per-measurement in the
- # ## calculation of percentiles. Raising this limit increases the accuracy
- # ## of percentiles but also increases the memory usage and cpu time.
- # percentile_limit = 1000
- # # Stream a log file, like the tail -f command
- # [[inputs.tail]]
- # ## files to tail.
- # ## These accept standard unix glob matching rules, but with the addition of
- # ## ** as a "super asterisk". ie:
- # ## "/var/log/**.log" -> recursively find all .log files in /var/log
- # ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
- # ## "/var/log/apache.log" -> just tail the apache log file
- # ##
- # ## See https://github.com/gobwas/glob for more examples
- # ##
- # files = ["/var/mymetrics.out"]
- # ## Read file from beginning.
- # from_beginning = false
- #
- # ## Data format to consume.
- # ## Each data format has it's own unique set of configuration options, read
- # ## more about them here:
- # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
- # data_format = "influx"
- # # Generic TCP listener
- # [[inputs.tcp_listener]]
- # ## Address and port to host TCP listener on
- # service_address = ":8094"
- #
- # ## Number of TCP messages allowed to queue up. Once filled, the
- # ## TCP listener will start dropping packets.
- # allowed_pending_messages = 10000
- #
- # ## Maximum number of concurrent TCP connections to allow
- # max_tcp_connections = 250
- #
- # ## Data format to consume.
- # ## Each data format has it's own unique set of configuration options, read
- # ## more about them here:
- # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
- # data_format = "influx"
- # # Generic UDP listener
- # [[inputs.udp_listener]]
- # ## Address and port to host UDP listener on
- # service_address = ":8092"
- #
- # ## Number of UDP messages allowed to queue up. Once filled, the
- # ## UDP listener will start dropping packets.
- # allowed_pending_messages = 10000
- #
- # ## Data format to consume.
- # ## Each data format has it's own unique set of configuration options, read
- # ## more about them here:
- # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
- # data_format = "influx"
- # # A Webhooks Event collector
- # [[inputs.webhooks]]
- # ## Address and port to host Webhook listener on
- # service_address = ":1619"
- #
- # [inputs.webhooks.github]
- # path = "/github"
- #
- # [inputs.webhooks.mandrill]
- # path = "/mandrill"
- #
- # [inputs.webhooks.rollbar]
- # path = "/rollbar"
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement