Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # Telegraf Configuration
- #
- # Telegraf is entirely plugin driven. All metrics are gathered from the
- # declared inputs, and sent to the declared outputs.
- #
- # Plugins must be declared in here to be active.
- # To deactivate a plugin, comment out the name and any variables.
- #
- # Use 'telegraf -config telegraf.conf -test' to see what metrics a config
- # file would generate.
- #
- # Environment variables can be used anywhere in this config file, simply prepend
- # them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
- # for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
- # Global tags can be specified here in key="value" format.
- [global_tags]
- # dc = "us-east-1" # will tag all metrics with dc=us-east-1
- # rack = "1a"
- ## Environment variables can be used as tags, and throughout the config file
- # user = "$USER"
- # Configuration for telegraf agent
- [agent]
- ## Default data collection interval for all inputs
- interval = "10s"
- ## Rounds collection interval to 'interval'
- ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
- round_interval = true
- ## Telegraf will send metrics to outputs in batches of at most
- ## metric_batch_size metrics.
- ## This controls the size of writes that Telegraf sends to output plugins.
- metric_batch_size = 1000
- ## For failed writes, telegraf will cache metric_buffer_limit metrics for each
- ## output, and will flush this buffer on a successful write. Oldest metrics
- ## are dropped first when this buffer fills.
- ## This buffer only fills when writes fail to output plugin(s).
- metric_buffer_limit = 10000
- ## Collection jitter is used to jitter the collection by a random amount.
- ## Each plugin will sleep for a random time within jitter before collecting.
- ## This can be used to avoid many plugins querying things like sysfs at the
- ## same time, which can have a measurable effect on the system.
- collection_jitter = "0s"
- ## Default flushing interval for all outputs. Maximum flush_interval will be
- ## flush_interval + flush_jitter
- flush_interval = "10s"
- ## Jitter the flush interval by a random amount. This is primarily to avoid
- ## large write spikes for users running a large number of telegraf instances.
- ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
- flush_jitter = "0s"
- ## By default or when set to "0s", precision will be set to the same
- ## timestamp order as the collection interval, with the maximum being 1s.
- ## ie, when interval = "10s", precision will be "1s"
- ## when interval = "250ms", precision will be "1ms"
- ## Precision will NOT be used for service inputs. It is up to each individual
- ## service input to set the timestamp at the appropriate precision.
- ## Valid time units are "ns", "us" (or "µs"), "ms", "s".
- precision = ""
- ## Logging configuration:
- ## Run telegraf with debug log messages.
- debug = false
- ## Run telegraf in quiet mode (error log messages only).
- quiet = false
- ## Specify the log file name. The empty string means to log to stderr.
- logfile = ""
- ## Override default hostname, if empty use os.Hostname()
- hostname = ""
- ## If set to true, do no set the "host" tag in the telegraf agent.
- omit_hostname = false
- ###############################################################################
- # OUTPUT PLUGINS #
- ###############################################################################
- # Configuration for sending metrics to InfluxDB
- [[outputs.influxdb]]
- urls = ["http://127.0.0.1:8086"]
- database = “telegraf”
- username = “telegraf”
- password = “yourpassword”
- ###############################################################################
- # INPUT PLUGINS #
- ###############################################################################
- # Read metrics about cpu usage
- [[inputs.cpu]]
- ## Whether to report per-cpu stats or not
- percpu = true
- ## Whether to report total system cpu stats or not
- totalcpu = true
- ## If true, collect raw CPU time metrics.
- collect_cpu_time = false
- ## If true, compute and report the sum of all non-idle CPU states.
- report_active = false
- # Read metrics about disk usage by mount point
- [[inputs.disk]]
- ## By default stats will be gathered for all mount points.
- ## Set mount_points will restrict the stats to only the specified mount points.
- # mount_points = ["/"]
- ## Ignore mount points by filesystem type.
- ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"]
- # Read metrics about disk IO by device
- [[inputs.diskio]]
- ## By default, telegraf will gather stats for all devices including
- ## disk partitions.
- ## Setting devices will restrict the stats to the specified devices.
- # devices = ["sda", "sdb", "vd*"]
- ## Uncomment the following line if you need disk serial numbers.
- # skip_serial_number = false
- #
- ## On systems which support it, device metadata can be added in the form of
- ## tags.
- ## Currently only Linux is supported via udev properties. You can view
- ## available properties for a device by running:
- ## 'udevadm info -q property -n /dev/sda'
- # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"]
- #
- ## Using the same metadata source as device_tags, you can also customize the
- ## name of the device via templates.
- ## The 'name_templates' parameter is a list of templates to try and apply to
- ## the device. The template may contain variables in the form of '$PROPERTY' or
- ## '${PROPERTY}'. The first template which does not contain any variables not
- ## present for the device is used as the device name tag.
- ## The typical use case is for LVM volumes, to get the VG/LV name instead of
- ## the near-meaningless DM-0 name.
- # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"]
- # Get kernel statistics from /proc/stat
- [[inputs.kernel]]
- # no configuration
- # Read metrics about memory usage
- [[inputs.mem]]
- # no configuration
- # Get the number of processes and group them by status
- [[inputs.processes]]
- # no configuration
- # Read metrics about swap memory usage
- [[inputs.swap]]
- # no configuration
- # Read metrics about system load & uptime
- [[inputs.system]]
- # no configuration
- # # Retrieves SNMP values from remote agents
- [[inputs.snmp]]
- agents = [ "192.168.100.251:161" ]
- # ## Timeout for each SNMP query.
- # timeout = "5s"
- # ## Number of retries to attempt within timeout.
- # retries = 3
- # ## SNMP version, values can be 1, 2, or 3
- # version = 2
- #
- # ## SNMP community string.
- community = "public"
- #
- # ## The GETBULK max-repetitions parameter
- # max_repetitions = 10
- #
- # ## SNMPv3 auth parameters
- # #sec_name = "myuser"
- # #auth_protocol = "md5" # Values: "MD5", "SHA", ""
- # #auth_password = "pass"
- # #sec_level = "authNoPriv" # Values: "noAuthNoPriv", "authNoPriv", "authPriv"
- # #context_name = ""
- # #priv_protocol = "" # Values: "DES", "AES", ""
- # #priv_password = ""
- #
- [[inputs.snmp.field]]
- name = "hostname"
- oid = "RFC1213-MIB::sysName.0"
- is_tag = true
- [[inputs.snmp.field]]
- name = "uptime"
- oid = "DISMAN-EXPRESSION-MIB::sysUpTimeInstance"
- [[inputs.snmp.field]]
- name = "upsBasicBatteryTimeOnBattery"
- oid = "SNMPv2-SMI::mib-2.33.1.2.3.0"
- [[inputs.snmp.field]]
- name = "upsAdvBatteryRunTimeRemaining"
- oid = "PowerNet-MIB::upsAdvBatteryRunTimeRemaining.0"
- [[inputs.snmp.field]]
- name = "upsAdvBatteryReplaceIndicator"
- oid = "PowerNet-MIB::upsAdvBatteryReplaceIndicator.0"
- [[inputs.snmp.field]]
- name = "upsHighPrecBatteryCapacity"
- oid = "PowerNet-MIB::upsHighPrecBatteryCapacity.0"
- conversion = "float(1)"
- [[inputs.snmp.field]]
- name = "upsHighPrecBatteryTemperature"
- oid = "PowerNet-MIB::upsHighPrecBatteryTemperature.0"
- conversion = "float(1)"
- [[inputs.snmp.field]]
- name = "upsBasicOutputStatus"
- oid = "PowerNet-MIB::upsBasicOutputStatus.0"
- [[inputs.snmp.field]]
- name = "upsHighPrecOutputLoad"
- oid = "PowerNet-MIB::upsHighPrecOutputLoad.0"
- conversion = "float(1)"
- [[inputs.snmp.field]]
- name = "upsHighPrecOutputEfficiency"
- oid = "PowerNet-MIB::upsHighPrecOutputEfficiency.0"
- conversion = "float(1)"
- [[inputs.snmp.field]]
- name = "upsHighPrecOutputVoltage"
- oid = "PowerNet-MIB::upsHighPrecOutputVoltage.0"
- conversion = "float(1)"
- [[inputs.snmp.field]]
- name = "upsHighPrecInputLineVoltage"
- oid = "PowerNet-MIB::upsHighPrecInputLineVoltage.0"
- conversion = "float(1)"
- [[inputs.snmp.field]]
- name = "upsHighPrecOutputCurrent"
- oid = "PowerNet-MIB::upsHighPrecOutputCurrent.0"
- conversion = "float(1)"
- [[inputs.snmp.field]]
- name = "upsHighPrecOutputEnergyUsage"
- oid = "PowerNet-MIB::upsHighPrecOutputEnergyUsage.0"
- conversion = "float(1)"
- # IF-MIB::ifTable contains counters on input and output traffic as well as errors and discards.
- [[inputs.snmp.table]]
- name = "interface"
- inherit_tags = [ "hostname" ]
- oid = "IF-MIB::ifTable"
- # Interface tag - used to identify interface in metrics database
- [[inputs.snmp.table.field]]
- name = "ifDescr"
- oid = "IF-MIB::ifDescr"
- is_tag = true
- # IF-MIB::ifXTable contains newer High Capacity (HC) counters that do not overflow as fast for a few of the ifTable counters
- [[inputs.snmp.table]]
- name = "interface"
- inherit_tags = [ "hostname" ]
- oid = "IF-MIB::ifXTable"
- # Interface tag - used to identify interface in metrics database
- [[inputs.snmp.table.field]]
- name = "ifDescr"
- oid = "IF-MIB::ifDescr"
- is_tag = true
- # EtherLike-MIB::dot3StatsTable contains detailed ethernet-level information about what kind of errors have been logged on an interface (such as FCS error, frame too long, etc)
- [[inputs.snmp.table]]
- name = "interface"
- inherit_tags = [ "hostname" ]
- oid = "EtherLike-MIB::dot3StatsTable"
- # Interface tag - used to identify interface in metrics database
- [[inputs.snmp.table.field]]
- name = "ifDescr"
- oid = "IF-MIB::ifDescr"
- is_tag = true
- # ## measurement name
- # name = "system"
- # [[inputs.snmp.field]]
- # name = "hostname"
- # oid = ".1.0.0.1.1"
- # [[inputs.snmp.field]]
- # name = "uptime"
- # oid = ".1.0.0.1.2"
- # [[inputs.snmp.field]]
- # name = "load"
- # oid = ".1.0.0.1.3"
- # [[inputs.snmp.field]]
- # oid = "HOST-RESOURCES-MIB::hrMemorySize"
- #
- # [[inputs.snmp.table]]
- # ## measurement name
- # name = "remote_servers"
- # inherit_tags = [ "hostname" ]
- # [[inputs.snmp.table.field]]
- # name = "server"
- # oid = ".1.0.0.0.1.0"
- # is_tag = true
- # [[inputs.snmp.table.field]]
- # name = "connections"
- # oid = ".1.0.0.0.1.1"
- # [[inputs.snmp.table.field]]
- # name = "latency"
- # oid = ".1.0.0.0.1.2"
- #
- # [[inputs.snmp.table]]
- # ## auto populate table's fields using the MIB
- # oid = "HOST-RESOURCES-MIB::hrNetworkTable"
- # # DEPRECATED! PLEASE USE inputs.snmp INSTEAD.
- # [[inputs.snmp_legacy]]
- # ## Use 'oids.txt' file to translate oids to names
- # ## To generate 'oids.txt' you need to run:
- # ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
- # ## Or if you have an other MIB folder with custom MIBs
- # ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
- # snmptranslate_file = "/tmp/oids.txt"
- # [[inputs.snmp.host]]
- # address = "192.168.2.2:161"
- # # SNMP community
- # community = "public" # default public
- # # SNMP version (1, 2 or 3)
- # # Version 3 not supported yet
- # version = 2 # default 2
- # # SNMP response timeout
- # timeout = 2.0 # default 2.0
- # # SNMP request retries
- # retries = 2 # default 2
- # # Which get/bulk do you want to collect for this host
- # collect = ["mybulk", "sysservices", "sysdescr"]
- # # Simple list of OIDs to get, in addition to "collect"
- # get_oids = []
- #
- # [[inputs.snmp.host]]
- # address = "192.168.2.3:161"
- # community = "public"
- # version = 2
- # timeout = 2.0
- # retries = 2
- # collect = ["mybulk"]
- # get_oids = [
- # "ifNumber",
- # ".1.3.6.1.2.1.1.3.0",
- # ]
- #
- # [[inputs.snmp.get]]
- # name = "ifnumber"
- # oid = "ifNumber"
- #
- # [[inputs.snmp.get]]
- # name = "interface_speed"
- # oid = "ifSpeed"
- # instance = "0"
- #
- # [[inputs.snmp.get]]
- # name = "sysuptime"
- # oid = ".1.3.6.1.2.1.1.3.0"
- # unit = "second"
- #
- # [[inputs.snmp.bulk]]
- # name = "mybulk"
- # max_repetition = 127
- # oid = ".1.3.6.1.2.1.1"
- #
- # [[inputs.snmp.bulk]]
- # name = "ifoutoctets"
- # max_repetition = 127
- # oid = "ifOutOctets"
- #
- # [[inputs.snmp.host]]
- # address = "192.168.2.13:161"
- # #address = "127.0.0.1:161"
- # community = "public"
- # version = 2
- # timeout = 2.0
- # retries = 2
- # #collect = ["mybulk", "sysservices", "sysdescr", "systype"]
- # collect = ["sysuptime" ]
- # [[inputs.snmp.host.table]]
- # name = "iftable3"
- # include_instances = ["enp5s0", "eth1"]
- #
- # # SNMP TABLEs
- # # table without mapping neither subtables
- # [[inputs.snmp.table]]
- # name = "iftable1"
- # oid = ".1.3.6.1.2.1.31.1.1.1"
- #
- # # table without mapping but with subtables
- # [[inputs.snmp.table]]
- # name = "iftable2"
- # oid = ".1.3.6.1.2.1.31.1.1.1"
- # sub_tables = [".1.3.6.1.2.1.2.2.1.13"]
- #
- # # table with mapping but without subtables
- # [[inputs.snmp.table]]
- # name = "iftable3"
- # oid = ".1.3.6.1.2.1.31.1.1.1"
- # # if empty. get all instances
- # mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
- # # if empty, get all subtables
- #
- # # table with both mapping and subtables
- # [[inputs.snmp.table]]
- # name = "iftable4"
- # oid = ".1.3.6.1.2.1.31.1.1.1"
- # # if empty get all instances
- # mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
- # # if empty get all subtables
- # # sub_tables could be not "real subtables"
- # sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]
- # # Read stats from one or more Solr servers or cores
- # [[inputs.solr]]
- # ## specify a list of one or more Solr servers
- # servers = ["http://localhost:8983"]
- #
- # ## specify a list of one or more Solr cores (default - all)
- # # cores = ["main"]
- # # Read metrics from Microsoft SQL Server
- # [[inputs.sqlserver]]
- # ## Specify instances to monitor with a list of connection strings.
- # ## All connection parameters are optional.
- # ## By default, the host is localhost, listening on default port, TCP 1433.
- # ## for Windows, the user is the currently running AD user (SSO).
- # ## See https://github.com/denisenkom/go-mssqldb for detailed connection
- # ## parameters.
- # # servers = [
- # # "Server=192.168.1.10;Port=1433;User Id=<user>;Password=<pw>;app name=telegraf;log=1;",
- # # ]
- #
- # ## Optional parameter, setting this to 2 will use a new version
- # ## of the collection queries that break compatibility with the original
- # ## dashboards.
- # query_version = 2
- #
- # ## If you are using AzureDB, setting this to true will gather resource utilization metrics
- # # azuredb = false
- #
- # ## If you would like to exclude some of the metrics queries, list them here
- # ## Possible choices:
- # ## - PerformanceCounters
- # ## - WaitStatsCategorized
- # ## - DatabaseIO
- # ## - DatabaseProperties
- # ## - CPUHistory
- # ## - DatabaseSize
- # ## - DatabaseStats
- # ## - MemoryClerk
- # ## - VolumeSpace
- # ## - PerformanceMetrics
- # # exclude_query = [ 'DatabaseIO' ]
- # # Sysstat metrics collector
- # [[inputs.sysstat]]
- # ## Path to the sadc command.
- # #
- # ## Common Defaults:
- # ## Debian/Ubuntu: /usr/lib/sysstat/sadc
- # ## Arch: /usr/lib/sa/sadc
- # ## RHEL/CentOS: /usr/lib64/sa/sadc
- # sadc_path = "/usr/lib/sa/sadc" # required
- # #
- # #
- # ## Path to the sadf command, if it is not in PATH
- # # sadf_path = "/usr/bin/sadf"
- # #
- # #
- # ## Activities is a list of activities, that are passed as argument to the
- # ## sadc collector utility (e.g: DISK, SNMP etc...)
- # ## The more activities that are added, the more data is collected.
- # # activities = ["DISK"]
- # #
- # #
- # ## Group metrics to measurements.
- # ##
- # ## If group is false each metric will be prefixed with a description
- # ## and represents itself a measurement.
- # ##
- # ## If Group is true, corresponding metrics are grouped to a single measurement.
- # # group = true
- # #
- # #
- # ## Options for the sadf command. The values on the left represent the sadf
- # ## options and the values on the right their description (which are used for
- # ## grouping and prefixing metrics).
- # ##
- # ## Run 'sar -h' or 'man sar' to find out the supported options for your
- # ## sysstat version.
- # [inputs.sysstat.options]
- # -C = "cpu"
- # -B = "paging"
- # -b = "io"
- # -d = "disk" # requires DISK activity
- # "-n ALL" = "network"
- # "-P ALL" = "per_cpu"
- # -q = "queue"
- # -R = "mem"
- # -r = "mem_util"
- # -S = "swap_util"
- # -u = "cpu_util"
- # -v = "inode"
- # -W = "swap"
- # -w = "task"
- # # -H = "hugepages" # only available for newer linux distributions
- # # "-I ALL" = "interrupts" # requires INT activity
- # #
- # #
- # ## Device tags can be used to add additional tags for devices.
- # ## For example the configuration below adds a tag vg with value rootvg for
- # ## all metrics with sda devices.
- # # [[inputs.sysstat.device_tags.sda]]
- # # vg = "rootvg"
- # # Reads metrics from a Teamspeak 3 Server via ServerQuery
- # [[inputs.teamspeak]]
- # ## Server address for Teamspeak 3 ServerQuery
- # # server = "127.0.0.1:10011"
- # ## Username for ServerQuery
- # username = "serverqueryuser"
- # ## Password for ServerQuery
- # password = "secret"
- # ## Array of virtual servers
- # # virtual_servers = [1]
- # # Read metrics about temperature
- # [[inputs.temp]]
- # # no configuration
- # # Read Tengine's basic status information (ngx_http_reqstat_module)
- # [[inputs.tengine]]
- # # An array of Tengine reqstat module URI to gather stats.
- # urls = ["http://127.0.0.1/us"]
- #
- # # HTTP response timeout (default: 5s)
- # # response_timeout = "5s"
- #
- # ## Optional TLS Config
- # # tls_ca = "/etc/telegraf/ca.pem"
- # # tls_cert = "/etc/telegraf/cert.cer"
- # # tls_key = "/etc/telegraf/key.key"
- # ## Use TLS but skip chain & host verification
- # # insecure_skip_verify = false
- # # Gather metrics from the Tomcat server status page.
- # [[inputs.tomcat]]
- # ## URL of the Tomcat server status
- # # url = "http://127.0.0.1:8080/manager/status/all?XML=true"
- #
- # ## HTTP Basic Auth Credentials
- # # username = "tomcat"
- # # password = "s3cret"
- #
- # ## Request timeout
- # # timeout = "5s"
- #
- # ## Optional TLS Config
- # # tls_ca = "/etc/telegraf/ca.pem"
- # # tls_cert = "/etc/telegraf/cert.pem"
- # # tls_key = "/etc/telegraf/key.pem"
- # ## Use TLS but skip chain & host verification
- # # insecure_skip_verify = false
- # # Inserts sine and cosine waves for demonstration purposes
- # [[inputs.trig]]
- # ## Set the amplitude
- # amplitude = 10.0
- # # Read Twemproxy stats data
- # [[inputs.twemproxy]]
- # ## Twemproxy stats address and port (no scheme)
- # addr = "localhost:22222"
- # ## Monitor pool name
- # pools = ["redis_pool", "mc_pool"]
- # # A plugin to collect stats from the Unbound DNS resolver
- # [[inputs.unbound]]
- # ## Address of server to connect to, read from unbound conf default, optionally ':port'
- # ## Will lookup IP if given a hostname
- # server = "127.0.0.1:8953"
- #
- # ## If running as a restricted user you can prepend sudo for additional access:
- # # use_sudo = false
- #
- # ## The default location of the unbound-control binary can be overridden with:
- # # binary = "/usr/sbin/unbound-control"
- #
- # ## The default timeout of 1s can be overriden with:
- # # timeout = "1s"
- #
- # ## When set to true, thread metrics are tagged with the thread id.
- # ##
- # ## The default is false for backwards compatibility, and will be change to
- # ## true in a future version. It is recommended to set to true on new
- # ## deployments.
- # thread_as_tag = false
- # # A plugin to collect stats from Varnish HTTP Cache
- # [[inputs.varnish]]
- # ## If running as a restricted user you can prepend sudo for additional access:
- # #use_sudo = false
- #
- # ## The default location of the varnishstat binary can be overridden with:
- # binary = "/usr/bin/varnishstat"
- #
- # ## By default, telegraf gather stats for 3 metric points.
- # ## Setting stats will override the defaults shown below.
- # ## Glob matching can be used, ie, stats = ["MAIN.*"]
- # ## stats may also be set to ["*"], which will collect all stats
- # stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"]
- #
- # ## Optional name for the varnish instance (or working directory) to query
- # ## Usually appened after -n in varnish cli
- # # instance_name = instanceName
- # # Monitor wifi signal strength and quality
- # [[inputs.wireless]]
- # ## Sets 'proc' directory path
- # ## If not specified, then default is /proc
- # # host_proc = "/proc"
- # # Reads metrics from a SSL certificate
- # [[inputs.x509_cert]]
- # ## List certificate sources
- # sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "tcp://example.org:443"]
- #
- # ## Timeout for SSL connection
- # # timeout = "5s"
- #
- # ## Optional TLS Config
- # # tls_ca = "/etc/telegraf/ca.pem"
- # # tls_cert = "/etc/telegraf/cert.pem"
- # # tls_key = "/etc/telegraf/key.pem"
- #
- # ## Use TLS but skip chain & host verification
- # # insecure_skip_verify = false
- # # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools
- # [[inputs.zfs]]
- # ## ZFS kstat path. Ignored on FreeBSD
- # ## If not specified, then default is:
- # # kstatPath = "/proc/spl/kstat/zfs"
- #
- # ## By default, telegraf gather all zfs stats
- # ## If not specified, then default is:
- # # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"]
- # ## For Linux, the default is:
- # # kstatMetrics = ["abdstats", "arcstats", "dnodestats", "dbufcachestats",
- # # "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"]
- # ## By default, don't gather zpool stats
- # # poolMetrics = false
- # # Reads 'mntr' stats from one or many zookeeper servers
- # [[inputs.zookeeper]]
- # ## An array of address to gather stats about. Specify an ip or hostname
- # ## with port. ie localhost:2181, 10.0.0.1:2181, etc.
- #
- # ## If no servers are specified, then localhost is used as the host.
- # ## If no port is specified, 2181 is used
- # servers = [":2181"]
- #
- # ## Timeout for metric collections from all servers. Minimum timeout is "1s".
- # # timeout = "5s"
- #
- # ## Optional TLS Config
- # # enable_tls = true
- # # tls_ca = "/etc/telegraf/ca.pem"
- # # tls_cert = "/etc/telegraf/cert.pem"
- # # tls_key = "/etc/telegraf/key.pem"
- # ## If false, skip chain & host verification
- # # insecure_skip_verify = true
- ###############################################################################
- # SERVICE INPUT PLUGINS #
- ###############################################################################
- # # AMQP consumer plugin
- # [[inputs.amqp_consumer]]
- # ## Broker to consume from.
- # ## deprecated in 1.7; use the brokers option
- # # url = "amqp://localhost:5672/influxdb"
- #
- # ## Brokers to consume from. If multiple brokers are specified a random broker
- # ## will be selected anytime a connection is established. This can be
- # ## helpful for load balancing when not using a dedicated load balancer.
- # brokers = ["amqp://localhost:5672/influxdb"]
- #
- # ## Authentication credentials for the PLAIN auth_method.
- # # username = ""
- # # password = ""
- #
- # ## Exchange to declare and consume from.
- # exchange = "telegraf"
- #
- # ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
- # # exchange_type = "topic"
- #
- # ## If true, exchange will be passively declared.
- # # exchange_passive = false
- #
- # ## Exchange durability can be either "transient" or "durable".
- # # exchange_durability = "durable"
- #
- # ## Additional exchange arguments.
- # # exchange_arguments = { }
- # # exchange_arguments = {"hash_propery" = "timestamp"}
- #
- # ## AMQP queue name.
- # queue = "telegraf"
- #
- # ## AMQP queue durability can be "transient" or "durable".
- # queue_durability = "durable"
- #
- # ## Binding Key.
- # binding_key = "#"
- #
- # ## Maximum number of messages server should give to the worker.
- # # prefetch_count = 50
- #
- # ## Maximum messages to read from the broker that have not been written by an
- # ## output. For best throughput set based on the number of metrics within
- # ## each message and the size of the output's metric_batch_size.
- # ##
- # ## For example, if each message from the queue contains 10 metrics and the
- # ## output metric_batch_size is 1000, setting this to 100 will ensure that a
- # ## full batch is collected and the write is triggered immediately without
- # ## waiting until the next flush_interval.
- # # max_undelivered_messages = 1000
- #
- # ## Auth method. PLAIN and EXTERNAL are supported
- # ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
- # ## described here: https://www.rabbitmq.com/plugins.html
- # # auth_method = "PLAIN"
- #
- # ## Optional TLS Config
- # # tls_ca = "/etc/telegraf/ca.pem"
- # # tls_cert = "/etc/telegraf/cert.pem"
- # # tls_key = "/etc/telegraf/key.pem"
- # ## Use TLS but skip chain & host verification
- # # insecure_skip_verify = false
- #
- # ## Data format to consume.
- # ## Each data format has its own unique set of configuration options, read
- # ## more about them here:
- # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
- # data_format = "influx"
- # # Read Cassandra metrics through Jolokia
- # [[inputs.cassandra]]
- # ## DEPRECATED: The cassandra plugin has been deprecated. Please use the
- # ## jolokia2 plugin instead.
- # ##
- # ## see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2
- #
- # context = "/jolokia/read"
- # ## List of cassandra servers exposing jolokia read service
- # servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
- # ## List of metrics collected on above servers
- # ## Each metric consists of a jmx path.
- # ## This will collect all heap memory usage metrics from the jvm and
- # ## ReadLatency metrics for all keyspaces and tables.
- # ## "type=Table" in the query works with Cassandra3.0. Older versions might
- # ## need to use "type=ColumnFamily"
- # metrics = [
- # "/java.lang:type=Memory/HeapMemoryUsage",
- # "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency"
- # ]
- # # Influx HTTP write listener
- # [[inputs.http_listener]]
- # ## Address and port to host HTTP listener on
- # service_address = ":8186"
- #
- # ## maximum duration before timing out read of the request
- # read_timeout = "10s"
- # ## maximum duration before timing out write of the response
- # write_timeout = "10s"
- #
- # ## Maximum allowed http request body size in bytes.
- # ## 0 means to use the default of 524,288,000 bytes (500 mebibytes)
- # max_body_size = "500MiB"
- #
- # ## Maximum line size allowed to be sent in bytes.
- # ## 0 means to use the default of 65536 bytes (64 kibibytes)
- # max_line_size = "64KiB"
- #
- # ## Set one or more allowed client CA certificate file names to
- # ## enable mutually authenticated TLS connections
- # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
- #
- # ## Add service certificate and key
- # tls_cert = "/etc/telegraf/cert.pem"
- # tls_key = "/etc/telegraf/key.pem"
- #
- # ## Optional username and password to accept for HTTP basic authentication.
- # ## You probably want to make sure you have TLS configured above for this.
- # # basic_username = "foobar"
- # # basic_password = "barfoo"
- # # Generic HTTP write listener
- # [[inputs.http_listener_v2]]
- # ## Address and port to host HTTP listener on
- # service_address = ":8080"
- #
- # ## Path to listen to.
- # # path = "/telegraf"
- #
- # ## HTTP methods to accept.
- # # methods = ["POST", "PUT"]
- #
- # ## maximum duration before timing out read of the request
- # # read_timeout = "10s"
- # ## maximum duration before timing out write of the response
- # # write_timeout = "10s"
- #
- # ## Maximum allowed http request body size in bytes.
- # ## 0 means to use the default of 524,288,00 bytes (500 mebibytes)
- # # max_body_size = "500MB"
- #
- # ## Set one or more allowed client CA certificate file names to
- # ## enable mutually authenticated TLS connections
- # # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
- #
- # ## Add service certificate and key
- # # tls_cert = "/etc/telegraf/cert.pem"
- # # tls_key = "/etc/telegraf/key.pem"
- #
- # ## Optional username and password to accept for HTTP basic authentication.
- # ## You probably want to make sure you have TLS configured above for this.
- # # basic_username = "foobar"
- # # basic_password = "barfoo"
- #
- # ## Data format to consume.
- # ## Each data format has its own unique set of configuration options, read
- # ## more about them here:
- # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
- # data_format = "influx"
- # # Influx HTTP write listener
- # [[inputs.influxdb_listener]]
- # ## Address and port to host HTTP listener on
- # service_address = ":8186"
- #
- # ## maximum duration before timing out read of the request
- # read_timeout = "10s"
- # ## maximum duration before timing out write of the response
- # write_timeout = "10s"
- #
- # ## Maximum allowed http request body size in bytes.
- # ## 0 means to use the default of 524,288,000 bytes (500 mebibytes)
- # max_body_size = "500MiB"
- #
- # ## Maximum line size allowed to be sent in bytes.
- # ## 0 means to use the default of 65536 bytes (64 kibibytes)
- # max_line_size = "64KiB"
- #
- # ## Set one or more allowed client CA certificate file names to
- # ## enable mutually authenticated TLS connections
- # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
- #
- # ## Add service certificate and key
- # tls_cert = "/etc/telegraf/cert.pem"
- # tls_key = "/etc/telegraf/key.pem"
- #
- # ## Optional username and password to accept for HTTP basic authentication.
- # ## You probably want to make sure you have TLS configured above for this.
- # # basic_username = "foobar"
- # # basic_password = "barfoo"
- # # Read JTI OpenConfig Telemetry from listed sensors
- # [[inputs.jti_openconfig_telemetry]]
- # ## List of device addresses to collect telemetry from
- # servers = ["localhost:1883"]
- #
- # ## Authentication details. Username and password are must if device expects
- # ## authentication. Client ID must be unique when connecting from multiple instances
- # ## of telegraf to the same device
- # username = "user"
- # password = "pass"
- # client_id = "telegraf"
- #
- # ## Frequency to get data
- # sample_frequency = "1000ms"
- #
- # ## Sensors to subscribe for
- # ## A identifier for each sensor can be provided in path by separating with space
- # ## Else sensor path will be used as identifier
- # ## When identifier is used, we can provide a list of space separated sensors.
- # ## A single subscription will be created with all these sensors and data will
- # ## be saved to measurement with this identifier name
- # sensors = [
- # "/interfaces/",
- # "collection /components/ /lldp",
- # ]
- #
- # ## We allow specifying sensor group level reporting rate. To do this, specify the
- # ## reporting rate in Duration at the beginning of sensor paths / collection
- # ## name. For entries without reporting rate, we use configured sample frequency
- # sensors = [
- # "1000ms customReporting /interfaces /lldp",
- # "2000ms collection /components",
- # "/interfaces",
- # ]
- #
- # ## x509 Certificate to use with TLS connection. If it is not provided, an insecure
- # ## channel will be opened with server
- # ssl_cert = "/etc/telegraf/cert.pem"
- #
- # ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms.
- # ## Failed streams/calls will not be retried if 0 is provided
- # retry_delay = "1000ms"
- #
- # ## To treat all string values as tags, set this to true
- # str_as_tags = false
- # # Read metrics from Kafka topic(s)
- # [[inputs.kafka_consumer]]
- # ## kafka servers
- # brokers = ["localhost:9092"]
- # ## topic(s) to consume
- # topics = ["telegraf"]
- #
- # ## Optional Client id
- # # client_id = "Telegraf"
- #
- # ## Set the minimal supported Kafka version. Setting this enables the use of new
- # ## Kafka features and APIs. Of particular interest, lz4 compression
- # ## requires at least version 0.10.0.0.
- # ## ex: version = "1.1.0"
- # # version = ""
- #
- # ## Optional TLS Config
- # # tls_ca = "/etc/telegraf/ca.pem"
- # # tls_cert = "/etc/telegraf/cert.pem"
- # # tls_key = "/etc/telegraf/key.pem"
- # ## Use TLS but skip chain & host verification
- # # insecure_skip_verify = false
- #
- # ## Optional SASL Config
- # # sasl_username = "kafka"
- # # sasl_password = "secret"
- #
- # ## the name of the consumer group
- # consumer_group = "telegraf_metrics_consumers"
- # ## Offset (must be either "oldest" or "newest")
- # offset = "oldest"
- # ## Maximum length of a message to consume, in bytes (default 0/unlimited);
- # ## larger messages are dropped
- # max_message_len = 1000000
- #
- # ## Maximum messages to read from the broker that have not been written by an
- # ## output. For best throughput set based on the number of metrics within
- # ## each message and the size of the output's metric_batch_size.
- # ##
- # ## For example, if each message from the queue contains 10 metrics and the
- # ## output metric_batch_size is 1000, setting this to 100 will ensure that a
- # ## full batch is collected and the write is triggered immediately without
- # ## waiting until the next flush_interval.
- # # max_undelivered_messages = 1000
- #
- # ## Data format to consume.
- # ## Each data format has its own unique set of configuration options, read
- # ## more about them here:
- # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
- # data_format = "influx"
- # # Read metrics from Kafka topic(s)
- # [[inputs.kafka_consumer_legacy]]
- # ## topic(s) to consume
- # topics = ["telegraf"]
- # ## an array of Zookeeper connection strings
- # zookeeper_peers = ["localhost:2181"]
- # ## Zookeeper Chroot
- # zookeeper_chroot = ""
- # ## the name of the consumer group
- # consumer_group = "telegraf_metrics_consumers"
- # ## Offset (must be either "oldest" or "newest")
- # offset = "oldest"
- #
- # ## Data format to consume.
- # ## Each data format has its own unique set of configuration options, read
- # ## more about them here:
- # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
- # data_format = "influx"
- #
- # ## Maximum length of a message to consume, in bytes (default 0/unlimited);
- # ## larger messages are dropped
- # max_message_len = 65536
- # # Stream and parse log file(s).
- # [[inputs.logparser]]
- # ## Log files to parse.
- # ## These accept standard unix glob matching rules, but with the addition of
- # ## ** as a "super asterisk". ie:
- # ## /var/log/**.log -> recursively find all .log files in /var/log
- # ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
- # ## /var/log/apache.log -> only tail the apache log file
- # files = ["/var/log/apache/access.log"]
- #
- # ## Read files that currently exist from the beginning. Files that are created
- # ## while telegraf is running (and that match the "files" globs) will always
- # ## be read from the beginning.
- # from_beginning = false
- #
- # ## Method used to watch for file updates. Can be either "inotify" or "poll".
- # # watch_method = "inotify"
- #
- # ## Parse logstash-style "grok" patterns:
- # [inputs.logparser.grok]
- # ## This is a list of patterns to check the given log file(s) for.
- # ## Note that adding patterns here increases processing time. The most
- # ## efficient configuration is to have one pattern per logparser.
- # ## Other common built-in patterns are:
- # ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs)
- # ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
- # patterns = ["%{COMBINED_LOG_FORMAT}"]
- #
- # ## Name of the outputted measurement name.
- # measurement = "apache_access_log"
- #
- # ## Full path(s) to custom pattern files.
- # custom_pattern_files = []
- #
- # ## Custom patterns can also be defined here. Put one pattern per line.
- # custom_patterns = '''
- # '''
- #
- # ## Timezone allows you to provide an override for timestamps that
- # ## don't already include an offset
- # ## e.g. 04/06/2016 12:41:45 data one two 5.43µs
- # ##
- # ## Default: "" which renders UTC
- # ## Options are as follows:
- # ## 1. Local -- interpret based on machine localtime
- # ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
- # ## 3. UTC -- or blank/unspecified, will return timestamp in UTC
- # # timezone = "Canada/Eastern"
- # # Read metrics from MQTT topic(s)
- # [[inputs.mqtt_consumer]]
- # ## MQTT broker URLs to be used. The format should be scheme://host:port,
- # ## schema can be tcp, ssl, or ws.
- # servers = ["tcp://localhost:1883"]
- #
- # ## QoS policy for messages
- # ## 0 = at most once
- # ## 1 = at least once
- # ## 2 = exactly once
- # ##
- # ## When using a QoS of 1 or 2, you should enable persistent_session to allow
- # ## resuming unacknowledged messages.
- # qos = 0
- #
- # ## Connection timeout for initial connection in seconds
- # connection_timeout = "30s"
- #
- # ## Maximum messages to read from the broker that have not been written by an
- # ## output. For best throughput set based on the number of metrics within
- # ## each message and the size of the output's metric_batch_size.
- # ##
- # ## For example, if each message from the queue contains 10 metrics and the
- # ## output metric_batch_size is 1000, setting this to 100 will ensure that a
- # ## full batch is collected and the write is triggered immediately without
- # ## waiting until the next flush_interval.
- # # max_undelivered_messages = 1000
- #
- # ## Topics to subscribe to
- # topics = [
- # "telegraf/host01/cpu",
- # "telegraf/+/mem",
- # "sensors/#",
- # ]
- #
- # # if true, messages that can't be delivered while the subscriber is offline
- # # will be delivered when it comes back (such as on service restart).
- # # NOTE: if true, client_id MUST be set
- # persistent_session = false
- # # If empty, a random client ID will be generated.
- # client_id = ""
- #
- # ## username and password to connect MQTT server.
- # # username = "telegraf"
- # # password = "metricsmetricsmetricsmetrics"
- #
- # ## Optional TLS Config
- # # tls_ca = "/etc/telegraf/ca.pem"
- # # tls_cert = "/etc/telegraf/cert.pem"
- # # tls_key = "/etc/telegraf/key.pem"
- # ## Use TLS but skip chain & host verification
- # # insecure_skip_verify = false
- #
- # ## Data format to consume.
- # ## Each data format has its own unique set of configuration options, read
- # ## more about them here:
- # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
- # data_format = "influx"
- # # Read metrics from NATS subject(s)
- # [[inputs.nats_consumer]]
- # ## urls of NATS servers
- # servers = ["nats://localhost:4222"]
- # ## Use Transport Layer Security
- # secure = false
- # ## subject(s) to consume
- # subjects = ["telegraf"]
- # ## name a queue group
- # queue_group = "telegraf_consumers"
- #
- # ## Sets the limits for pending msgs and bytes for each subscription
- # ## These shouldn't need to be adjusted except in very high throughput scenarios
- # # pending_message_limit = 65536
- # # pending_bytes_limit = 67108864
- #
- # ## Maximum messages to read from the broker that have not been written by an
- # ## output. For best throughput set based on the number of metrics within
- # ## each message and the size of the output's metric_batch_size.
- # ##
- # ## For example, if each message from the queue contains 10 metrics and the
- # ## output metric_batch_size is 1000, setting this to 100 will ensure that a
- # ## full batch is collected and the write is triggered immediately without
- # ## waiting until the next flush_interval.
- # # max_undelivered_messages = 1000
- #
- # ## Data format to consume.
- # ## Each data format has its own unique set of configuration options, read
- # ## more about them here:
- # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
- # data_format = "influx"
- # # Read NSQ topic for metrics.
- # [[inputs.nsq_consumer]]
- # ## Server option still works but is deprecated, we just prepend it to the nsqd array.
- # # server = "localhost:4150"
- # ## An array representing the NSQD TCP HTTP Endpoints
- # nsqd = ["localhost:4150"]
- # ## An array representing the NSQLookupd HTTP Endpoints
- # nsqlookupd = ["localhost:4161"]
- # topic = "telegraf"
- # channel = "consumer"
- # max_in_flight = 100
- #
- # ## Maximum messages to read from the broker that have not been written by an
- # ## output. For best throughput set based on the number of metrics within
- # ## each message and the size of the output's metric_batch_size.
- # ##
- # ## For example, if each message from the queue contains 10 metrics and the
- # ## output metric_batch_size is 1000, setting this to 100 will ensure that a
- # ## full batch is collected and the write is triggered immediately without
- # ## waiting until the next flush_interval.
- # # max_undelivered_messages = 1000
- #
- # ## Data format to consume.
- # ## Each data format has its own unique set of configuration options, read
- # ## more about them here:
- # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
- # data_format = "influx"
- # # Read metrics from one or many pgbouncer servers
- # [[inputs.pgbouncer]]
- # ## specify address via a url matching:
- # ## postgres://[pqgotest[:password]]@localhost[/dbname]\
- # ## ?sslmode=[disable|verify-ca|verify-full]
- # ## or a simple string:
- # ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
- # ##
- # ## All connection parameters are optional.
- # ##
- # address = "host=localhost user=pgbouncer sslmode=disable"
- # # Read metrics from one or many postgresql servers
- # [[inputs.postgresql]]
- # ## specify address via a url matching:
- # ## postgres://[pqgotest[:password]]@localhost[/dbname]\
- # ## ?sslmode=[disable|verify-ca|verify-full]
- # ## or a simple string:
- # ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
- # ##
- # ## All connection parameters are optional.
- # ##
- # ## Without the dbname parameter, the driver will default to a database
- # ## with the same name as the user. This dbname is just for instantiating a
- # ## connection with the server and doesn't restrict the databases we are trying
- # ## to grab metrics for.
- # ##
- # address = "host=localhost user=postgres sslmode=disable"
- # ## A custom name for the database that will be used as the "server" tag in the
- # ## measurement output. If not specified, a default one generated from
- # ## the connection address is used.
- # # outputaddress = "db01"
- #
- # ## connection configuration.
- # ## maxlifetime - specify the maximum lifetime of a connection.
- # ## default is forever (0s)
- # max_lifetime = "0s"
- #
- # ## A list of databases to explicitly ignore. If not specified, metrics for all
- # ## databases are gathered. Do NOT use with the 'databases' option.
- # # ignored_databases = ["postgres", "template0", "template1"]
- #
- # ## A list of databases to pull metrics about. If not specified, metrics for all
- # ## databases are gathered. Do NOT use with the 'ignored_databases' option.
- # # databases = ["app_production", "testing"]
- # # Read metrics from one or many postgresql servers
- # [[inputs.postgresql_extensible]]
- # ## specify address via a url matching:
- # ## postgres://[pqgotest[:password]]@localhost[/dbname]\
- # ## ?sslmode=[disable|verify-ca|verify-full]
- # ## or a simple string:
- # ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
- # #
- # ## All connection parameters are optional. #
- # ## Without the dbname parameter, the driver will default to a database
- # ## with the same name as the user. This dbname is just for instantiating a
- # ## connection with the server and doesn't restrict the databases we are trying
- # ## to grab metrics for.
- # #
- # address = "host=localhost user=postgres sslmode=disable"
- #
- # ## connection configuration.
- # ## maxlifetime - specify the maximum lifetime of a connection.
- # ## default is forever (0s)
- # max_lifetime = "0s"
- #
- # ## A list of databases to pull metrics about. If not specified, metrics for all
- # ## databases are gathered.
- # ## databases = ["app_production", "testing"]
- # #
- # ## A custom name for the database that will be used as the "server" tag in the
- # ## measurement output. If not specified, a default one generated from
- # ## the connection address is used.
- # # outputaddress = "db01"
- # #
- # ## Define the toml config where the sql queries are stored
- # ## New queries can be added, if the withdbname is set to true and there is no
- # ## databases defined in the 'databases field', the sql query is ended by a
- # ## 'is not null' in order to make the query succeed.
- # ## Example :
- # ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become
- # ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')"
- # ## because the databases variable was set to ['postgres', 'pgbench' ] and the
- # ## withdbname was true. Be careful that if the withdbname is set to false you
- # ## don't have to define the where clause (aka with the dbname) the tagvalue
- # ## field is used to define custom tags (separated by commas)
- # ## The optional "measurement" value can be used to override the default
- # ## output measurement name ("postgresql").
- # #
- # ## Structure :
- # ## [[inputs.postgresql_extensible.query]]
- # ## sqlquery string
- # ## version string
- # ## withdbname boolean
- # ## tagvalue string (comma separated)
- # ## measurement string
- # [[inputs.postgresql_extensible.query]]
- # sqlquery="SELECT * FROM pg_stat_database"
- # version=901
- # withdbname=false
- # tagvalue=""
- # measurement=""
- # [[inputs.postgresql_extensible.query]]
- # sqlquery="SELECT * FROM pg_stat_bgwriter"
- # version=901
- # withdbname=false
- # tagvalue="postgresql.stats"
- # # Read metrics from one or many prometheus clients
- # [[inputs.prometheus]]
- # ## An array of urls to scrape metrics from.
- # urls = ["http://localhost:9100/metrics"]
- #
- # ## An array of Kubernetes services to scrape metrics from.
- # # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"]
- #
- # ## Kubernetes config file to create client from.
- # # kube_config = "/path/to/kubernetes.config"
- #
- # ## Scrape Kubernetes pods for the following prometheus annotations:
- # ## - prometheus.io/scrape: Enable scraping for this pod
- # ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to
- # ## set this to 'https' & most likely set the tls config.
- # ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation.
- # ## - prometheus.io/port: If port is not 9102 use this annotation
- # # monitor_kubernetes_pods = true
- #
- # ## Use bearer token for authorization
- # # bearer_token = /path/to/bearer/token
- #
- # ## Specify timeout duration for slower prometheus clients (default is 3s)
- # # response_timeout = "3s"
- #
- # ## Optional TLS Config
- # # tls_ca = /path/to/cafile
- # # tls_cert = /path/to/certfile
- # # tls_key = /path/to/keyfile
- # ## Use TLS but skip chain & host verification
- # # insecure_skip_verify = false
- # # Generic socket listener capable of handling multiple socket types.
- # [[inputs.socket_listener]]
- # ## URL to listen on
- # # service_address = "tcp://:8094"
- # # service_address = "tcp://127.0.0.1:http"
- # # service_address = "tcp4://:8094"
- # # service_address = "tcp6://:8094"
- # # service_address = "tcp6://[2001:db8::1]:8094"
- # # service_address = "udp://:8094"
- # # service_address = "udp4://:8094"
- # # service_address = "udp6://:8094"
- # # service_address = "unix:///tmp/telegraf.sock"
- # # service_address = "unixgram:///tmp/telegraf.sock"
- #
- # ## Maximum number of concurrent connections.
- # ## Only applies to stream sockets (e.g. TCP).
- # ## 0 (default) is unlimited.
- # # max_connections = 1024
- #
- # ## Read timeout.
- # ## Only applies to stream sockets (e.g. TCP).
- # ## 0 (default) is unlimited.
- # # read_timeout = "30s"
- #
- # ## Optional TLS configuration.
- # ## Only applies to stream sockets (e.g. TCP).
- # # tls_cert = "/etc/telegraf/cert.pem"
- # # tls_key = "/etc/telegraf/key.pem"
- # ## Enables client authentication if set.
- # # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
- #
- # ## Maximum socket buffer size (in bytes when no unit specified).
- # ## For stream sockets, once the buffer fills up, the sender will start backing up.
- # ## For datagram sockets, once the buffer fills up, metrics will start dropping.
- # ## Defaults to the OS default.
- # # read_buffer_size = "64KiB"
- #
- # ## Period between keep alive probes.
- # ## Only applies to TCP sockets.
- # ## 0 disables keep alive probes.
- # ## Defaults to the OS configuration.
- # # keep_alive_period = "5m"
- #
- # ## Data format to consume.
- # ## Each data format has its own unique set of configuration options, read
- # ## more about them here:
- # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
- # # data_format = "influx"
- # # Statsd UDP/TCP Server
- # [[inputs.statsd]]
- # ## Protocol, must be "tcp", "udp", "udp4" or "udp6" (default=udp)
- # protocol = "udp"
- #
- # ## MaxTCPConnection - applicable when protocol is set to tcp (default=250)
- # max_tcp_connections = 250
- #
- # ## Enable TCP keep alive probes (default=false)
- # tcp_keep_alive = false
- #
- # ## Specifies the keep-alive period for an active network connection.
- # ## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false.
- # ## Defaults to the OS configuration.
- # # tcp_keep_alive_period = "2h"
- #
- # ## Address and port to host UDP listener on
- # service_address = ":8125"
- #
- # ## The following configuration options control when telegraf clears it's cache
- # ## of previous values. If set to false, then telegraf will only clear it's
- # ## cache when the daemon is restarted.
- # ## Reset gauges every interval (default=true)
- # delete_gauges = true
- # ## Reset counters every interval (default=true)
- # delete_counters = true
- # ## Reset sets every interval (default=true)
- # delete_sets = true
- # ## Reset timings & histograms every interval (default=true)
- # delete_timings = true
- #
- # ## Percentiles to calculate for timing & histogram stats
- # percentiles = [90]
- #
- # ## separator to use between elements of a statsd metric
- # metric_separator = "_"
- #
- # ## Parses tags in the datadog statsd format
- # ## http://docs.datadoghq.com/guides/dogstatsd/
- # parse_data_dog_tags = false
- #
- # ## Statsd data translation templates, more info can be read here:
- # ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md
- # # templates = [
- # # "cpu.* measurement*"
- # # ]
- #
- # ## Number of UDP messages allowed to queue up, once filled,
- # ## the statsd server will start dropping packets
- # allowed_pending_messages = 10000
- #
- # ## Number of timing/histogram values to track per-measurement in the
- # ## calculation of percentiles. Raising this limit increases the accuracy
- # ## of percentiles but also increases the memory usage and cpu time.
- # percentile_limit = 1000
- # # Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587
- # [[inputs.syslog]]
- # ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514
- # ## Protocol, address and port to host the syslog receiver.
- # ## If no host is specified, then localhost is used.
- # ## If no port is specified, 6514 is used (RFC5425#section-4.1).
- # server = "tcp://:6514"
- #
- # ## TLS Config
- # # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"]
- # # tls_cert = "/etc/telegraf/cert.pem"
- # # tls_key = "/etc/telegraf/key.pem"
- #
- # ## Period between keep alive probes.
- # ## 0 disables keep alive probes.
- # ## Defaults to the OS configuration.
- # ## Only applies to stream sockets (e.g. TCP).
- # # keep_alive_period = "5m"
- #
- # ## Maximum number of concurrent connections (default = 0).
- # ## 0 means unlimited.
- # ## Only applies to stream sockets (e.g. TCP).
- # # max_connections = 1024
- #
- # ## Read timeout is the maximum time allowed for reading a single message (default = 5s).
- # ## 0 means unlimited.
- # # read_timeout = "5s"
- #
- # ## The framing technique with which it is expected that messages are transported (default = "octet-counting").
- # ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1),
- # ## or the non-transparent framing technique (RFC6587#section-3.4.2).
- # ## Must be one of "octect-counting", "non-transparent".
- # # framing = "octet-counting"
- #
- # ## The trailer to be expected in case of non-trasparent framing (default = "LF").
- # ## Must be one of "LF", or "NUL".
- # # trailer = "LF"
- #
- # ## Whether to parse in best effort mode or not (default = false).
- # ## By default best effort parsing is off.
- # # best_effort = false
- #
- # ## Character to prepend to SD-PARAMs (default = "_").
- # ## A syslog message can contain multiple parameters and multiple identifiers within structured data section.
- # ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"]
- # ## For each combination a field is created.
- # ## Its name is created concatenating identifier, sdparam_separator, and parameter name.
- # # sdparam_separator = "_"
- # # Stream a log file, like the tail -f command
- # [[inputs.tail]]
- # ## files to tail.
- # ## These accept standard unix glob matching rules, but with the addition of
- # ## ** as a "super asterisk". ie:
- # ## "/var/log/**.log" -> recursively find all .log files in /var/log
- # ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
- # ## "/var/log/apache.log" -> just tail the apache log file
- # ##
- # ## See https://github.com/gobwas/glob for more examples
- # ##
- # files = ["/var/mymetrics.out"]
- # ## Read file from beginning.
- # from_beginning = false
- # ## Whether file is a named pipe
- # pipe = false
- #
- # ## Method used to watch for file updates. Can be either "inotify" or "poll".
- # # watch_method = "inotify"
- #
- # ## Data format to consume.
- # ## Each data format has its own unique set of configuration options, read
- # ## more about them here:
- # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
- # data_format = "influx"
- # # Generic TCP listener
- # [[inputs.tcp_listener]]
- # # DEPRECATED: the TCP listener plugin has been deprecated in favor of the
- # # socket_listener plugin
- # # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener
- # # Generic UDP listener
- # [[inputs.udp_listener]]
- # # DEPRECATED: the TCP listener plugin has been deprecated in favor of the
- # # socket_listener plugin
- # # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener
- # # Read metrics from VMware vCenter
- # [[inputs.vsphere]]
- # ## List of vCenter URLs to be monitored. These three lines must be uncommented
- # ## and edited for the plugin to work.
- # vcenters = [ "https://vcenter.local/sdk" ]
- # username = "user@corp.local"
- # password = "secret"
- #
- # ## VMs
- # ## Typical VM metrics (if omitted or empty, all metrics are collected)
- # vm_metric_include = [
- # "cpu.demand.average",
- # "cpu.idle.summation",
- # "cpu.latency.average",
- # "cpu.readiness.average",
- # "cpu.ready.summation",
- # "cpu.run.summation",
- # "cpu.usagemhz.average",
- # "cpu.used.summation",
- # "cpu.wait.summation",
- # "mem.active.average",
- # "mem.granted.average",
- # "mem.latency.average",
- # "mem.swapin.average",
- # "mem.swapinRate.average",
- # "mem.swapout.average",
- # "mem.swapoutRate.average",
- # "mem.usage.average",
- # "mem.vmmemctl.average",
- # "net.bytesRx.average",
- # "net.bytesTx.average",
- # "net.droppedRx.summation",
- # "net.droppedTx.summation",
- # "net.usage.average",
- # "power.power.average",
- # "virtualDisk.numberReadAveraged.average",
- # "virtualDisk.numberWriteAveraged.average",
- # "virtualDisk.read.average",
- # "virtualDisk.readOIO.latest",
- # "virtualDisk.throughput.usage.average",
- # "virtualDisk.totalReadLatency.average",
- # "virtualDisk.totalWriteLatency.average",
- # "virtualDisk.write.average",
- # "virtualDisk.writeOIO.latest",
- # "sys.uptime.latest",
- # ]
- # # vm_metric_exclude = [] ## Nothing is excluded by default
- # # vm_instances = true ## true by default
- #
- # ## Hosts
- # ## Typical host metrics (if omitted or empty, all metrics are collected)
- # host_metric_include = [
- # "cpu.coreUtilization.average",
- # "cpu.costop.summation",
- # "cpu.demand.average",
- # "cpu.idle.summation",
- # "cpu.latency.average",
- # "cpu.readiness.average",
- # "cpu.ready.summation",
- # "cpu.swapwait.summation",
- # "cpu.usage.average",
- # "cpu.usagemhz.average",
- # "cpu.used.summation",
- # "cpu.utilization.average",
- # "cpu.wait.summation",
- # "disk.deviceReadLatency.average",
- # "disk.deviceWriteLatency.average",
- # "disk.kernelReadLatency.average",
- # "disk.kernelWriteLatency.average",
- # "disk.numberReadAveraged.average",
- # "disk.numberWriteAveraged.average",
- # "disk.read.average",
- # "disk.totalReadLatency.average",
- # "disk.totalWriteLatency.average",
- # "disk.write.average",
- # "mem.active.average",
- # "mem.latency.average",
- # "mem.state.latest",
- # "mem.swapin.average",
- # "mem.swapinRate.average",
- # "mem.swapout.average",
- # "mem.swapoutRate.average",
- # "mem.totalCapacity.average",
- # "mem.usage.average",
- # "mem.vmmemctl.average",
- # "net.bytesRx.average",
- # "net.bytesTx.average",
- # "net.droppedRx.summation",
- # "net.droppedTx.summation",
- # "net.errorsRx.summation",
- # "net.errorsTx.summation",
- # "net.usage.average",
- # "power.power.average",
- # "storageAdapter.numberReadAveraged.average",
- # "storageAdapter.numberWriteAveraged.average",
- # "storageAdapter.read.average",
- # "storageAdapter.write.average",
- # "sys.uptime.latest",
- # ]
- # # host_metric_exclude = [] ## Nothing excluded by default
- # # host_instances = true ## true by default
- #
- # ## Clusters
- # # cluster_metric_include = [] ## if omitted or empty, all metrics are collected
- # # cluster_metric_exclude = [] ## Nothing excluded by default
- # # cluster_instances = true ## true by default
- #
- # ## Datastores
- # # datastore_metric_include = [] ## if omitted or empty, all metrics are collected
- # # datastore_metric_exclude = [] ## Nothing excluded by default
- # # datastore_instances = false ## false by default for Datastores only
- #
- # ## Datacenters
- # datacenter_metric_include = [] ## if omitted or empty, all metrics are collected
- # datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default.
- # # datacenter_instances = false ## false by default for Datastores only
- #
- # ## Plugin Settings
- # ## separator character to use for measurement and field names (default: "_")
- # # separator = "_"
- #
- # ## number of objects to retreive per query for realtime resources (vms and hosts)
- # ## set to 64 for vCenter 5.5 and 6.0 (default: 256)
- # # max_query_objects = 256
- #
- # ## number of metrics to retreive per query for non-realtime resources (clusters and datastores)
- # ## set to 64 for vCenter 5.5 and 6.0 (default: 256)
- # # max_query_metrics = 256
- #
- # ## number of go routines to use for collection and discovery of objects and metrics
- # # collect_concurrency = 1
- # # discover_concurrency = 1
- #
- # ## whether or not to force discovery of new objects on initial gather call before collecting metrics
- # ## when true for large environments this may cause errors for time elapsed while collecting metrics
- # ## when false (default) the first collection cycle may result in no or limited metrics while objects are discovered
- # # force_discover_on_init = false
- #
- # ## the interval before (re)discovering objects subject to metrics collection (default: 300s)
- # # object_discovery_interval = "300s"
- #
- # ## timeout applies to any of the api request made to vcenter
- # # timeout = "60s"
- #
- # ## Optional SSL Config
- # # ssl_ca = "/path/to/cafile"
- # # ssl_cert = "/path/to/certfile"
- # # ssl_key = "/path/to/keyfile"
- # ## Use SSL but skip chain & host verification
- # # insecure_skip_verify = false
- # # A Webhooks Event collector
- # [[inputs.webhooks]]
- # ## Address and port to host Webhook listener on
- # service_address = ":1619"
- #
- # [inputs.webhooks.filestack]
- # path = "/filestack"
- #
- # [inputs.webhooks.github]
- # path = "/github"
- # # secret = ""
- #
- # [inputs.webhooks.mandrill]
- # path = "/mandrill"
- #
- # [inputs.webhooks.rollbar]
- # path = "/rollbar"
- #
- # [inputs.webhooks.papertrail]
- # path = "/papertrail"
- #
- # [inputs.webhooks.particle]
- # path = "/particle"
- # # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures.
- # [[inputs.zipkin]]
- # # path = "/api/v1/spans" # URL path for span data
- # # port = 9411 # Port on which Telegraf listens
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement