Advertisement
Guest User

Untitled

a guest
Feb 28th, 2019
329
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 62.53 KB | None | 0 0
  1. # Telegraf Configuration
  2. #
  3. # Telegraf is entirely plugin driven. All metrics are gathered from the
  4. # declared inputs, and sent to the declared outputs.
  5. #
  6. # Plugins must be declared in here to be active.
  7. # To deactivate a plugin, comment out the name and any variables.
  8. #
  9. # Use 'telegraf -config telegraf.conf -test' to see what metrics a config
  10. # file would generate.
  11. #
  12. # Environment variables can be used anywhere in this config file, simply prepend
  13. # them with $. For strings the variable must be within quotes (ie, "$STR_VAR"),
  14. # for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR)
  15.  
  16.  
  17. # Global tags can be specified here in key="value" format.
  18. [global_tags]
  19. # dc = "us-east-1" # will tag all metrics with dc=us-east-1
  20. # rack = "1a"
  21. ## Environment variables can be used as tags, and throughout the config file
  22. # user = "$USER"
  23.  
  24.  
  25. # Configuration for telegraf agent
  26. [agent]
  27. ## Default data collection interval for all inputs
  28. interval = "10s"
  29. ## Rounds collection interval to 'interval'
  30. ## ie, if interval="10s" then always collect on :00, :10, :20, etc.
  31. round_interval = true
  32.  
  33. ## Telegraf will send metrics to outputs in batches of at most
  34. ## metric_batch_size metrics.
  35. ## This controls the size of writes that Telegraf sends to output plugins.
  36. metric_batch_size = 1000
  37.  
  38. ## For failed writes, telegraf will cache metric_buffer_limit metrics for each
  39. ## output, and will flush this buffer on a successful write. Oldest metrics
  40. ## are dropped first when this buffer fills.
  41. ## This buffer only fills when writes fail to output plugin(s).
  42. metric_buffer_limit = 10000
  43.  
  44. ## Collection jitter is used to jitter the collection by a random amount.
  45. ## Each plugin will sleep for a random time within jitter before collecting.
  46. ## This can be used to avoid many plugins querying things like sysfs at the
  47. ## same time, which can have a measurable effect on the system.
  48. collection_jitter = "0s"
  49.  
  50. ## Default flushing interval for all outputs. Maximum flush_interval will be
  51. ## flush_interval + flush_jitter
  52. flush_interval = "10s"
  53. ## Jitter the flush interval by a random amount. This is primarily to avoid
  54. ## large write spikes for users running a large number of telegraf instances.
  55. ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
  56. flush_jitter = "0s"
  57.  
  58. ## By default or when set to "0s", precision will be set to the same
  59. ## timestamp order as the collection interval, with the maximum being 1s.
  60. ## ie, when interval = "10s", precision will be "1s"
  61. ## when interval = "250ms", precision will be "1ms"
  62. ## Precision will NOT be used for service inputs. It is up to each individual
  63. ## service input to set the timestamp at the appropriate precision.
  64. ## Valid time units are "ns", "us" (or "µs"), "ms", "s".
  65. precision = ""
  66.  
  67. ## Logging configuration:
  68. ## Run telegraf with debug log messages.
  69. debug = false
  70. ## Run telegraf in quiet mode (error log messages only).
  71. quiet = false
  72. ## Specify the log file name. The empty string means to log to stderr.
  73. logfile = ""
  74.  
  75. ## Override default hostname, if empty use os.Hostname()
  76. hostname = ""
  77. ## If set to true, do no set the "host" tag in the telegraf agent.
  78. omit_hostname = false
  79.  
  80.  
  81. ###############################################################################
  82. # OUTPUT PLUGINS #
  83. ###############################################################################
  84.  
  85. # Configuration for sending metrics to InfluxDB
  86. [[outputs.influxdb]]
  87. urls = ["http://127.0.0.1:8086"]
  88. database = “telegraf”
  89. username = “telegraf”
  90. password = “yourpassword”
  91.  
  92.  
  93.  
  94. ###############################################################################
  95. # INPUT PLUGINS #
  96. ###############################################################################
  97.  
  98. # Read metrics about cpu usage
  99. [[inputs.cpu]]
  100. ## Whether to report per-cpu stats or not
  101. percpu = true
  102. ## Whether to report total system cpu stats or not
  103. totalcpu = true
  104. ## If true, collect raw CPU time metrics.
  105. collect_cpu_time = false
  106. ## If true, compute and report the sum of all non-idle CPU states.
  107. report_active = false
  108.  
  109.  
  110. # Read metrics about disk usage by mount point
  111. [[inputs.disk]]
  112. ## By default stats will be gathered for all mount points.
  113. ## Set mount_points will restrict the stats to only the specified mount points.
  114. # mount_points = ["/"]
  115.  
  116. ## Ignore mount points by filesystem type.
  117. ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"]
  118.  
  119.  
  120. # Read metrics about disk IO by device
  121. [[inputs.diskio]]
  122. ## By default, telegraf will gather stats for all devices including
  123. ## disk partitions.
  124. ## Setting devices will restrict the stats to the specified devices.
  125. # devices = ["sda", "sdb", "vd*"]
  126. ## Uncomment the following line if you need disk serial numbers.
  127. # skip_serial_number = false
  128. #
  129. ## On systems which support it, device metadata can be added in the form of
  130. ## tags.
  131. ## Currently only Linux is supported via udev properties. You can view
  132. ## available properties for a device by running:
  133. ## 'udevadm info -q property -n /dev/sda'
  134. # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"]
  135. #
  136. ## Using the same metadata source as device_tags, you can also customize the
  137. ## name of the device via templates.
  138. ## The 'name_templates' parameter is a list of templates to try and apply to
  139. ## the device. The template may contain variables in the form of '$PROPERTY' or
  140. ## '${PROPERTY}'. The first template which does not contain any variables not
  141. ## present for the device is used as the device name tag.
  142. ## The typical use case is for LVM volumes, to get the VG/LV name instead of
  143. ## the near-meaningless DM-0 name.
  144. # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"]
  145.  
  146.  
  147. # Get kernel statistics from /proc/stat
  148. [[inputs.kernel]]
  149. # no configuration
  150.  
  151.  
  152. # Read metrics about memory usage
  153. [[inputs.mem]]
  154. # no configuration
  155.  
  156.  
  157. # Get the number of processes and group them by status
  158. [[inputs.processes]]
  159. # no configuration
  160.  
  161.  
  162. # Read metrics about swap memory usage
  163. [[inputs.swap]]
  164. # no configuration
  165.  
  166.  
  167. # Read metrics about system load & uptime
  168. [[inputs.system]]
  169. # no configuration
  170.  
  171.  
  172.  
  173.  
  174.  
  175.  
  176.  
  177. # # Retrieves SNMP values from remote agents
  178. [[inputs.snmp]]
  179. agents = [ "192.168.100.251:161" ]
  180. # ## Timeout for each SNMP query.
  181. # timeout = "5s"
  182. # ## Number of retries to attempt within timeout.
  183. # retries = 3
  184. # ## SNMP version, values can be 1, 2, or 3
  185. # version = 2
  186. #
  187. # ## SNMP community string.
  188. community = "public"
  189. #
  190. # ## The GETBULK max-repetitions parameter
  191. # max_repetitions = 10
  192. #
  193. # ## SNMPv3 auth parameters
  194. # #sec_name = "myuser"
  195. # #auth_protocol = "md5" # Values: "MD5", "SHA", ""
  196. # #auth_password = "pass"
  197. # #sec_level = "authNoPriv" # Values: "noAuthNoPriv", "authNoPriv", "authPriv"
  198. # #context_name = ""
  199. # #priv_protocol = "" # Values: "DES", "AES", ""
  200. # #priv_password = ""
  201. #
  202.  
  203. [[inputs.snmp.field]]
  204. name = "hostname"
  205. oid = "RFC1213-MIB::sysName.0"
  206. is_tag = true
  207.  
  208. [[inputs.snmp.field]]
  209. name = "uptime"
  210. oid = "DISMAN-EXPRESSION-MIB::sysUpTimeInstance"
  211.  
  212.  
  213.  
  214. [[inputs.snmp.field]]
  215. name = "upsBasicBatteryTimeOnBattery"
  216. oid = "SNMPv2-SMI::mib-2.33.1.2.3.0"
  217.  
  218. [[inputs.snmp.field]]
  219. name = "upsAdvBatteryRunTimeRemaining"
  220. oid = "PowerNet-MIB::upsAdvBatteryRunTimeRemaining.0"
  221.  
  222. [[inputs.snmp.field]]
  223. name = "upsAdvBatteryReplaceIndicator"
  224. oid = "PowerNet-MIB::upsAdvBatteryReplaceIndicator.0"
  225.  
  226. [[inputs.snmp.field]]
  227. name = "upsHighPrecBatteryCapacity"
  228. oid = "PowerNet-MIB::upsHighPrecBatteryCapacity.0"
  229. conversion = "float(1)"
  230.  
  231. [[inputs.snmp.field]]
  232. name = "upsHighPrecBatteryTemperature"
  233. oid = "PowerNet-MIB::upsHighPrecBatteryTemperature.0"
  234. conversion = "float(1)"
  235.  
  236. [[inputs.snmp.field]]
  237. name = "upsBasicOutputStatus"
  238. oid = "PowerNet-MIB::upsBasicOutputStatus.0"
  239.  
  240. [[inputs.snmp.field]]
  241. name = "upsHighPrecOutputLoad"
  242. oid = "PowerNet-MIB::upsHighPrecOutputLoad.0"
  243. conversion = "float(1)"
  244.  
  245. [[inputs.snmp.field]]
  246. name = "upsHighPrecOutputEfficiency"
  247. oid = "PowerNet-MIB::upsHighPrecOutputEfficiency.0"
  248. conversion = "float(1)"
  249.  
  250. [[inputs.snmp.field]]
  251. name = "upsHighPrecOutputVoltage"
  252. oid = "PowerNet-MIB::upsHighPrecOutputVoltage.0"
  253. conversion = "float(1)"
  254.  
  255. [[inputs.snmp.field]]
  256. name = "upsHighPrecInputLineVoltage"
  257. oid = "PowerNet-MIB::upsHighPrecInputLineVoltage.0"
  258. conversion = "float(1)"
  259.  
  260. [[inputs.snmp.field]]
  261. name = "upsHighPrecOutputCurrent"
  262. oid = "PowerNet-MIB::upsHighPrecOutputCurrent.0"
  263. conversion = "float(1)"
  264.  
  265. [[inputs.snmp.field]]
  266. name = "upsHighPrecOutputEnergyUsage"
  267. oid = "PowerNet-MIB::upsHighPrecOutputEnergyUsage.0"
  268. conversion = "float(1)"
  269.  
  270.  
  271. # IF-MIB::ifTable contains counters on input and output traffic as well as errors and discards.
  272. [[inputs.snmp.table]]
  273. name = "interface"
  274. inherit_tags = [ "hostname" ]
  275. oid = "IF-MIB::ifTable"
  276.  
  277. # Interface tag - used to identify interface in metrics database
  278. [[inputs.snmp.table.field]]
  279. name = "ifDescr"
  280. oid = "IF-MIB::ifDescr"
  281. is_tag = true
  282.  
  283. # IF-MIB::ifXTable contains newer High Capacity (HC) counters that do not overflow as fast for a few of the ifTable counters
  284. [[inputs.snmp.table]]
  285. name = "interface"
  286. inherit_tags = [ "hostname" ]
  287. oid = "IF-MIB::ifXTable"
  288.  
  289. # Interface tag - used to identify interface in metrics database
  290. [[inputs.snmp.table.field]]
  291. name = "ifDescr"
  292. oid = "IF-MIB::ifDescr"
  293. is_tag = true
  294.  
  295. # EtherLike-MIB::dot3StatsTable contains detailed ethernet-level information about what kind of errors have been logged on an interface (such as FCS error, frame too long, etc)
  296. [[inputs.snmp.table]]
  297. name = "interface"
  298. inherit_tags = [ "hostname" ]
  299. oid = "EtherLike-MIB::dot3StatsTable"
  300.  
  301. # Interface tag - used to identify interface in metrics database
  302. [[inputs.snmp.table.field]]
  303. name = "ifDescr"
  304. oid = "IF-MIB::ifDescr"
  305. is_tag = true
  306.  
  307.  
  308.  
  309. # ## measurement name
  310. # name = "system"
  311. # [[inputs.snmp.field]]
  312. # name = "hostname"
  313. # oid = ".1.0.0.1.1"
  314. # [[inputs.snmp.field]]
  315. # name = "uptime"
  316. # oid = ".1.0.0.1.2"
  317. # [[inputs.snmp.field]]
  318. # name = "load"
  319. # oid = ".1.0.0.1.3"
  320. # [[inputs.snmp.field]]
  321. # oid = "HOST-RESOURCES-MIB::hrMemorySize"
  322. #
  323. # [[inputs.snmp.table]]
  324. # ## measurement name
  325. # name = "remote_servers"
  326. # inherit_tags = [ "hostname" ]
  327. # [[inputs.snmp.table.field]]
  328. # name = "server"
  329. # oid = ".1.0.0.0.1.0"
  330. # is_tag = true
  331. # [[inputs.snmp.table.field]]
  332. # name = "connections"
  333. # oid = ".1.0.0.0.1.1"
  334. # [[inputs.snmp.table.field]]
  335. # name = "latency"
  336. # oid = ".1.0.0.0.1.2"
  337. #
  338. # [[inputs.snmp.table]]
  339. # ## auto populate table's fields using the MIB
  340. # oid = "HOST-RESOURCES-MIB::hrNetworkTable"
  341.  
  342.  
  343. # # DEPRECATED! PLEASE USE inputs.snmp INSTEAD.
  344. # [[inputs.snmp_legacy]]
  345. # ## Use 'oids.txt' file to translate oids to names
  346. # ## To generate 'oids.txt' you need to run:
  347. # ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt
  348. # ## Or if you have an other MIB folder with custom MIBs
  349. # ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt
  350. # snmptranslate_file = "/tmp/oids.txt"
  351. # [[inputs.snmp.host]]
  352. # address = "192.168.2.2:161"
  353. # # SNMP community
  354. # community = "public" # default public
  355. # # SNMP version (1, 2 or 3)
  356. # # Version 3 not supported yet
  357. # version = 2 # default 2
  358. # # SNMP response timeout
  359. # timeout = 2.0 # default 2.0
  360. # # SNMP request retries
  361. # retries = 2 # default 2
  362. # # Which get/bulk do you want to collect for this host
  363. # collect = ["mybulk", "sysservices", "sysdescr"]
  364. # # Simple list of OIDs to get, in addition to "collect"
  365. # get_oids = []
  366. #
  367. # [[inputs.snmp.host]]
  368. # address = "192.168.2.3:161"
  369. # community = "public"
  370. # version = 2
  371. # timeout = 2.0
  372. # retries = 2
  373. # collect = ["mybulk"]
  374. # get_oids = [
  375. # "ifNumber",
  376. # ".1.3.6.1.2.1.1.3.0",
  377. # ]
  378. #
  379. # [[inputs.snmp.get]]
  380. # name = "ifnumber"
  381. # oid = "ifNumber"
  382. #
  383. # [[inputs.snmp.get]]
  384. # name = "interface_speed"
  385. # oid = "ifSpeed"
  386. # instance = "0"
  387. #
  388. # [[inputs.snmp.get]]
  389. # name = "sysuptime"
  390. # oid = ".1.3.6.1.2.1.1.3.0"
  391. # unit = "second"
  392. #
  393. # [[inputs.snmp.bulk]]
  394. # name = "mybulk"
  395. # max_repetition = 127
  396. # oid = ".1.3.6.1.2.1.1"
  397. #
  398. # [[inputs.snmp.bulk]]
  399. # name = "ifoutoctets"
  400. # max_repetition = 127
  401. # oid = "ifOutOctets"
  402. #
  403. # [[inputs.snmp.host]]
  404. # address = "192.168.2.13:161"
  405. # #address = "127.0.0.1:161"
  406. # community = "public"
  407. # version = 2
  408. # timeout = 2.0
  409. # retries = 2
  410. # #collect = ["mybulk", "sysservices", "sysdescr", "systype"]
  411. # collect = ["sysuptime" ]
  412. # [[inputs.snmp.host.table]]
  413. # name = "iftable3"
  414. # include_instances = ["enp5s0", "eth1"]
  415. #
  416. # # SNMP TABLEs
  417. # # table without mapping neither subtables
  418. # [[inputs.snmp.table]]
  419. # name = "iftable1"
  420. # oid = ".1.3.6.1.2.1.31.1.1.1"
  421. #
  422. # # table without mapping but with subtables
  423. # [[inputs.snmp.table]]
  424. # name = "iftable2"
  425. # oid = ".1.3.6.1.2.1.31.1.1.1"
  426. # sub_tables = [".1.3.6.1.2.1.2.2.1.13"]
  427. #
  428. # # table with mapping but without subtables
  429. # [[inputs.snmp.table]]
  430. # name = "iftable3"
  431. # oid = ".1.3.6.1.2.1.31.1.1.1"
  432. # # if empty. get all instances
  433. # mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
  434. # # if empty, get all subtables
  435. #
  436. # # table with both mapping and subtables
  437. # [[inputs.snmp.table]]
  438. # name = "iftable4"
  439. # oid = ".1.3.6.1.2.1.31.1.1.1"
  440. # # if empty get all instances
  441. # mapping_table = ".1.3.6.1.2.1.31.1.1.1.1"
  442. # # if empty get all subtables
  443. # # sub_tables could be not "real subtables"
  444. # sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"]
  445.  
  446.  
  447. # # Read stats from one or more Solr servers or cores
  448. # [[inputs.solr]]
  449. # ## specify a list of one or more Solr servers
  450. # servers = ["http://localhost:8983"]
  451. #
  452. # ## specify a list of one or more Solr cores (default - all)
  453. # # cores = ["main"]
  454.  
  455.  
  456. # # Read metrics from Microsoft SQL Server
  457. # [[inputs.sqlserver]]
  458. # ## Specify instances to monitor with a list of connection strings.
  459. # ## All connection parameters are optional.
  460. # ## By default, the host is localhost, listening on default port, TCP 1433.
  461. # ## for Windows, the user is the currently running AD user (SSO).
  462. # ## See https://github.com/denisenkom/go-mssqldb for detailed connection
  463. # ## parameters.
  464. # # servers = [
  465. # # "Server=192.168.1.10;Port=1433;User Id=<user>;Password=<pw>;app name=telegraf;log=1;",
  466. # # ]
  467. #
  468. # ## Optional parameter, setting this to 2 will use a new version
  469. # ## of the collection queries that break compatibility with the original
  470. # ## dashboards.
  471. # query_version = 2
  472. #
  473. # ## If you are using AzureDB, setting this to true will gather resource utilization metrics
  474. # # azuredb = false
  475. #
  476. # ## If you would like to exclude some of the metrics queries, list them here
  477. # ## Possible choices:
  478. # ## - PerformanceCounters
  479. # ## - WaitStatsCategorized
  480. # ## - DatabaseIO
  481. # ## - DatabaseProperties
  482. # ## - CPUHistory
  483. # ## - DatabaseSize
  484. # ## - DatabaseStats
  485. # ## - MemoryClerk
  486. # ## - VolumeSpace
  487. # ## - PerformanceMetrics
  488. # # exclude_query = [ 'DatabaseIO' ]
  489.  
  490.  
  491. # # Sysstat metrics collector
  492. # [[inputs.sysstat]]
  493. # ## Path to the sadc command.
  494. # #
  495. # ## Common Defaults:
  496. # ## Debian/Ubuntu: /usr/lib/sysstat/sadc
  497. # ## Arch: /usr/lib/sa/sadc
  498. # ## RHEL/CentOS: /usr/lib64/sa/sadc
  499. # sadc_path = "/usr/lib/sa/sadc" # required
  500. # #
  501. # #
  502. # ## Path to the sadf command, if it is not in PATH
  503. # # sadf_path = "/usr/bin/sadf"
  504. # #
  505. # #
  506. # ## Activities is a list of activities, that are passed as argument to the
  507. # ## sadc collector utility (e.g: DISK, SNMP etc...)
  508. # ## The more activities that are added, the more data is collected.
  509. # # activities = ["DISK"]
  510. # #
  511. # #
  512. # ## Group metrics to measurements.
  513. # ##
  514. # ## If group is false each metric will be prefixed with a description
  515. # ## and represents itself a measurement.
  516. # ##
  517. # ## If Group is true, corresponding metrics are grouped to a single measurement.
  518. # # group = true
  519. # #
  520. # #
  521. # ## Options for the sadf command. The values on the left represent the sadf
  522. # ## options and the values on the right their description (which are used for
  523. # ## grouping and prefixing metrics).
  524. # ##
  525. # ## Run 'sar -h' or 'man sar' to find out the supported options for your
  526. # ## sysstat version.
  527. # [inputs.sysstat.options]
  528. # -C = "cpu"
  529. # -B = "paging"
  530. # -b = "io"
  531. # -d = "disk" # requires DISK activity
  532. # "-n ALL" = "network"
  533. # "-P ALL" = "per_cpu"
  534. # -q = "queue"
  535. # -R = "mem"
  536. # -r = "mem_util"
  537. # -S = "swap_util"
  538. # -u = "cpu_util"
  539. # -v = "inode"
  540. # -W = "swap"
  541. # -w = "task"
  542. # # -H = "hugepages" # only available for newer linux distributions
  543. # # "-I ALL" = "interrupts" # requires INT activity
  544. # #
  545. # #
  546. # ## Device tags can be used to add additional tags for devices.
  547. # ## For example the configuration below adds a tag vg with value rootvg for
  548. # ## all metrics with sda devices.
  549. # # [[inputs.sysstat.device_tags.sda]]
  550. # # vg = "rootvg"
  551.  
  552.  
  553. # # Reads metrics from a Teamspeak 3 Server via ServerQuery
  554. # [[inputs.teamspeak]]
  555. # ## Server address for Teamspeak 3 ServerQuery
  556. # # server = "127.0.0.1:10011"
  557. # ## Username for ServerQuery
  558. # username = "serverqueryuser"
  559. # ## Password for ServerQuery
  560. # password = "secret"
  561. # ## Array of virtual servers
  562. # # virtual_servers = [1]
  563.  
  564.  
  565. # # Read metrics about temperature
  566. # [[inputs.temp]]
  567. # # no configuration
  568.  
  569.  
  570. # # Read Tengine's basic status information (ngx_http_reqstat_module)
  571. # [[inputs.tengine]]
  572. # # An array of Tengine reqstat module URI to gather stats.
  573. # urls = ["http://127.0.0.1/us"]
  574. #
  575. # # HTTP response timeout (default: 5s)
  576. # # response_timeout = "5s"
  577. #
  578. # ## Optional TLS Config
  579. # # tls_ca = "/etc/telegraf/ca.pem"
  580. # # tls_cert = "/etc/telegraf/cert.cer"
  581. # # tls_key = "/etc/telegraf/key.key"
  582. # ## Use TLS but skip chain & host verification
  583. # # insecure_skip_verify = false
  584.  
  585.  
  586. # # Gather metrics from the Tomcat server status page.
  587. # [[inputs.tomcat]]
  588. # ## URL of the Tomcat server status
  589. # # url = "http://127.0.0.1:8080/manager/status/all?XML=true"
  590. #
  591. # ## HTTP Basic Auth Credentials
  592. # # username = "tomcat"
  593. # # password = "s3cret"
  594. #
  595. # ## Request timeout
  596. # # timeout = "5s"
  597. #
  598. # ## Optional TLS Config
  599. # # tls_ca = "/etc/telegraf/ca.pem"
  600. # # tls_cert = "/etc/telegraf/cert.pem"
  601. # # tls_key = "/etc/telegraf/key.pem"
  602. # ## Use TLS but skip chain & host verification
  603. # # insecure_skip_verify = false
  604.  
  605.  
  606. # # Inserts sine and cosine waves for demonstration purposes
  607. # [[inputs.trig]]
  608. # ## Set the amplitude
  609. # amplitude = 10.0
  610.  
  611.  
  612. # # Read Twemproxy stats data
  613. # [[inputs.twemproxy]]
  614. # ## Twemproxy stats address and port (no scheme)
  615. # addr = "localhost:22222"
  616. # ## Monitor pool name
  617. # pools = ["redis_pool", "mc_pool"]
  618.  
  619.  
  620. # # A plugin to collect stats from the Unbound DNS resolver
  621. # [[inputs.unbound]]
  622. # ## Address of server to connect to, read from unbound conf default, optionally ':port'
  623. # ## Will lookup IP if given a hostname
  624. # server = "127.0.0.1:8953"
  625. #
  626. # ## If running as a restricted user you can prepend sudo for additional access:
  627. # # use_sudo = false
  628. #
  629. # ## The default location of the unbound-control binary can be overridden with:
  630. # # binary = "/usr/sbin/unbound-control"
  631. #
  632. # ## The default timeout of 1s can be overriden with:
  633. # # timeout = "1s"
  634. #
  635. # ## When set to true, thread metrics are tagged with the thread id.
  636. # ##
  637. # ## The default is false for backwards compatibility, and will be change to
  638. # ## true in a future version. It is recommended to set to true on new
  639. # ## deployments.
  640. # thread_as_tag = false
  641.  
  642.  
  643. # # A plugin to collect stats from Varnish HTTP Cache
  644. # [[inputs.varnish]]
  645. # ## If running as a restricted user you can prepend sudo for additional access:
  646. # #use_sudo = false
  647. #
  648. # ## The default location of the varnishstat binary can be overridden with:
  649. # binary = "/usr/bin/varnishstat"
  650. #
  651. # ## By default, telegraf gather stats for 3 metric points.
  652. # ## Setting stats will override the defaults shown below.
  653. # ## Glob matching can be used, ie, stats = ["MAIN.*"]
  654. # ## stats may also be set to ["*"], which will collect all stats
  655. # stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"]
  656. #
  657. # ## Optional name for the varnish instance (or working directory) to query
  658. # ## Usually appened after -n in varnish cli
  659. # # instance_name = instanceName
  660.  
  661.  
  662. # # Monitor wifi signal strength and quality
  663. # [[inputs.wireless]]
  664. # ## Sets 'proc' directory path
  665. # ## If not specified, then default is /proc
  666. # # host_proc = "/proc"
  667.  
  668.  
  669. # # Reads metrics from a SSL certificate
  670. # [[inputs.x509_cert]]
  671. # ## List certificate sources
  672. # sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "tcp://example.org:443"]
  673. #
  674. # ## Timeout for SSL connection
  675. # # timeout = "5s"
  676. #
  677. # ## Optional TLS Config
  678. # # tls_ca = "/etc/telegraf/ca.pem"
  679. # # tls_cert = "/etc/telegraf/cert.pem"
  680. # # tls_key = "/etc/telegraf/key.pem"
  681. #
  682. # ## Use TLS but skip chain & host verification
  683. # # insecure_skip_verify = false
  684.  
  685.  
  686. # # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools
  687. # [[inputs.zfs]]
  688. # ## ZFS kstat path. Ignored on FreeBSD
  689. # ## If not specified, then default is:
  690. # # kstatPath = "/proc/spl/kstat/zfs"
  691. #
  692. # ## By default, telegraf gather all zfs stats
  693. # ## If not specified, then default is:
  694. # # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"]
  695. # ## For Linux, the default is:
  696. # # kstatMetrics = ["abdstats", "arcstats", "dnodestats", "dbufcachestats",
  697. # # "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"]
  698. # ## By default, don't gather zpool stats
  699. # # poolMetrics = false
  700.  
  701.  
  702. # # Reads 'mntr' stats from one or many zookeeper servers
  703. # [[inputs.zookeeper]]
  704. # ## An array of address to gather stats about. Specify an ip or hostname
  705. # ## with port. ie localhost:2181, 10.0.0.1:2181, etc.
  706. #
  707. # ## If no servers are specified, then localhost is used as the host.
  708. # ## If no port is specified, 2181 is used
  709. # servers = [":2181"]
  710. #
  711. # ## Timeout for metric collections from all servers. Minimum timeout is "1s".
  712. # # timeout = "5s"
  713. #
  714. # ## Optional TLS Config
  715. # # enable_tls = true
  716. # # tls_ca = "/etc/telegraf/ca.pem"
  717. # # tls_cert = "/etc/telegraf/cert.pem"
  718. # # tls_key = "/etc/telegraf/key.pem"
  719. # ## If false, skip chain & host verification
  720. # # insecure_skip_verify = true
  721.  
  722.  
  723.  
  724. ###############################################################################
  725. # SERVICE INPUT PLUGINS #
  726. ###############################################################################
  727.  
  728. # # AMQP consumer plugin
  729. # [[inputs.amqp_consumer]]
  730. # ## Broker to consume from.
  731. # ## deprecated in 1.7; use the brokers option
  732. # # url = "amqp://localhost:5672/influxdb"
  733. #
  734. # ## Brokers to consume from. If multiple brokers are specified a random broker
  735. # ## will be selected anytime a connection is established. This can be
  736. # ## helpful for load balancing when not using a dedicated load balancer.
  737. # brokers = ["amqp://localhost:5672/influxdb"]
  738. #
  739. # ## Authentication credentials for the PLAIN auth_method.
  740. # # username = ""
  741. # # password = ""
  742. #
  743. # ## Exchange to declare and consume from.
  744. # exchange = "telegraf"
  745. #
  746. # ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
  747. # # exchange_type = "topic"
  748. #
  749. # ## If true, exchange will be passively declared.
  750. # # exchange_passive = false
  751. #
  752. # ## Exchange durability can be either "transient" or "durable".
  753. # # exchange_durability = "durable"
  754. #
  755. # ## Additional exchange arguments.
  756. # # exchange_arguments = { }
  757. # # exchange_arguments = {"hash_propery" = "timestamp"}
  758. #
  759. # ## AMQP queue name.
  760. # queue = "telegraf"
  761. #
  762. # ## AMQP queue durability can be "transient" or "durable".
  763. # queue_durability = "durable"
  764. #
  765. # ## Binding Key.
  766. # binding_key = "#"
  767. #
  768. # ## Maximum number of messages server should give to the worker.
  769. # # prefetch_count = 50
  770. #
  771. # ## Maximum messages to read from the broker that have not been written by an
  772. # ## output. For best throughput set based on the number of metrics within
  773. # ## each message and the size of the output's metric_batch_size.
  774. # ##
  775. # ## For example, if each message from the queue contains 10 metrics and the
  776. # ## output metric_batch_size is 1000, setting this to 100 will ensure that a
  777. # ## full batch is collected and the write is triggered immediately without
  778. # ## waiting until the next flush_interval.
  779. # # max_undelivered_messages = 1000
  780. #
  781. # ## Auth method. PLAIN and EXTERNAL are supported
  782. # ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
  783. # ## described here: https://www.rabbitmq.com/plugins.html
  784. # # auth_method = "PLAIN"
  785. #
  786. # ## Optional TLS Config
  787. # # tls_ca = "/etc/telegraf/ca.pem"
  788. # # tls_cert = "/etc/telegraf/cert.pem"
  789. # # tls_key = "/etc/telegraf/key.pem"
  790. # ## Use TLS but skip chain & host verification
  791. # # insecure_skip_verify = false
  792. #
  793. # ## Data format to consume.
  794. # ## Each data format has its own unique set of configuration options, read
  795. # ## more about them here:
  796. # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  797. # data_format = "influx"
  798.  
  799.  
  800. # # Read Cassandra metrics through Jolokia
  801. # [[inputs.cassandra]]
  802. # ## DEPRECATED: The cassandra plugin has been deprecated. Please use the
  803. # ## jolokia2 plugin instead.
  804. # ##
  805. # ## see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2
  806. #
  807. # context = "/jolokia/read"
  808. # ## List of cassandra servers exposing jolokia read service
  809. # servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"]
  810. # ## List of metrics collected on above servers
  811. # ## Each metric consists of a jmx path.
  812. # ## This will collect all heap memory usage metrics from the jvm and
  813. # ## ReadLatency metrics for all keyspaces and tables.
  814. # ## "type=Table" in the query works with Cassandra3.0. Older versions might
  815. # ## need to use "type=ColumnFamily"
  816. # metrics = [
  817. # "/java.lang:type=Memory/HeapMemoryUsage",
  818. # "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency"
  819. # ]
  820.  
  821.  
  822. # # Influx HTTP write listener
  823. # [[inputs.http_listener]]
  824. # ## Address and port to host HTTP listener on
  825. # service_address = ":8186"
  826. #
  827. # ## maximum duration before timing out read of the request
  828. # read_timeout = "10s"
  829. # ## maximum duration before timing out write of the response
  830. # write_timeout = "10s"
  831. #
  832. # ## Maximum allowed http request body size in bytes.
  833. # ## 0 means to use the default of 524,288,000 bytes (500 mebibytes)
  834. # max_body_size = "500MiB"
  835. #
  836. # ## Maximum line size allowed to be sent in bytes.
  837. # ## 0 means to use the default of 65536 bytes (64 kibibytes)
  838. # max_line_size = "64KiB"
  839. #
  840. # ## Set one or more allowed client CA certificate file names to
  841. # ## enable mutually authenticated TLS connections
  842. # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
  843. #
  844. # ## Add service certificate and key
  845. # tls_cert = "/etc/telegraf/cert.pem"
  846. # tls_key = "/etc/telegraf/key.pem"
  847. #
  848. # ## Optional username and password to accept for HTTP basic authentication.
  849. # ## You probably want to make sure you have TLS configured above for this.
  850. # # basic_username = "foobar"
  851. # # basic_password = "barfoo"
  852.  
  853.  
  854. # # Generic HTTP write listener
  855. # [[inputs.http_listener_v2]]
  856. # ## Address and port to host HTTP listener on
  857. # service_address = ":8080"
  858. #
  859. # ## Path to listen to.
  860. # # path = "/telegraf"
  861. #
  862. # ## HTTP methods to accept.
  863. # # methods = ["POST", "PUT"]
  864. #
  865. # ## maximum duration before timing out read of the request
  866. # # read_timeout = "10s"
  867. # ## maximum duration before timing out write of the response
  868. # # write_timeout = "10s"
  869. #
  870. # ## Maximum allowed http request body size in bytes.
  871. # ## 0 means to use the default of 524,288,00 bytes (500 mebibytes)
  872. # # max_body_size = "500MB"
  873. #
  874. # ## Set one or more allowed client CA certificate file names to
  875. # ## enable mutually authenticated TLS connections
  876. # # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
  877. #
  878. # ## Add service certificate and key
  879. # # tls_cert = "/etc/telegraf/cert.pem"
  880. # # tls_key = "/etc/telegraf/key.pem"
  881. #
  882. # ## Optional username and password to accept for HTTP basic authentication.
  883. # ## You probably want to make sure you have TLS configured above for this.
  884. # # basic_username = "foobar"
  885. # # basic_password = "barfoo"
  886. #
  887. # ## Data format to consume.
  888. # ## Each data format has its own unique set of configuration options, read
  889. # ## more about them here:
  890. # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  891. # data_format = "influx"
  892.  
  893.  
  894. # # Influx HTTP write listener
  895. # [[inputs.influxdb_listener]]
  896. # ## Address and port to host HTTP listener on
  897. # service_address = ":8186"
  898. #
  899. # ## maximum duration before timing out read of the request
  900. # read_timeout = "10s"
  901. # ## maximum duration before timing out write of the response
  902. # write_timeout = "10s"
  903. #
  904. # ## Maximum allowed http request body size in bytes.
  905. # ## 0 means to use the default of 524,288,000 bytes (500 mebibytes)
  906. # max_body_size = "500MiB"
  907. #
  908. # ## Maximum line size allowed to be sent in bytes.
  909. # ## 0 means to use the default of 65536 bytes (64 kibibytes)
  910. # max_line_size = "64KiB"
  911. #
  912. # ## Set one or more allowed client CA certificate file names to
  913. # ## enable mutually authenticated TLS connections
  914. # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
  915. #
  916. # ## Add service certificate and key
  917. # tls_cert = "/etc/telegraf/cert.pem"
  918. # tls_key = "/etc/telegraf/key.pem"
  919. #
  920. # ## Optional username and password to accept for HTTP basic authentication.
  921. # ## You probably want to make sure you have TLS configured above for this.
  922. # # basic_username = "foobar"
  923. # # basic_password = "barfoo"
  924.  
  925.  
  926. # # Read JTI OpenConfig Telemetry from listed sensors
  927. # [[inputs.jti_openconfig_telemetry]]
  928. # ## List of device addresses to collect telemetry from
  929. # servers = ["localhost:1883"]
  930. #
  931. # ## Authentication details. Username and password are must if device expects
  932. # ## authentication. Client ID must be unique when connecting from multiple instances
  933. # ## of telegraf to the same device
  934. # username = "user"
  935. # password = "pass"
  936. # client_id = "telegraf"
  937. #
  938. # ## Frequency to get data
  939. # sample_frequency = "1000ms"
  940. #
  941. # ## Sensors to subscribe for
  942. # ## A identifier for each sensor can be provided in path by separating with space
  943. # ## Else sensor path will be used as identifier
  944. # ## When identifier is used, we can provide a list of space separated sensors.
  945. # ## A single subscription will be created with all these sensors and data will
  946. # ## be saved to measurement with this identifier name
  947. # sensors = [
  948. # "/interfaces/",
  949. # "collection /components/ /lldp",
  950. # ]
  951. #
  952. # ## We allow specifying sensor group level reporting rate. To do this, specify the
  953. # ## reporting rate in Duration at the beginning of sensor paths / collection
  954. # ## name. For entries without reporting rate, we use configured sample frequency
  955. # sensors = [
  956. # "1000ms customReporting /interfaces /lldp",
  957. # "2000ms collection /components",
  958. # "/interfaces",
  959. # ]
  960. #
  961. # ## x509 Certificate to use with TLS connection. If it is not provided, an insecure
  962. # ## channel will be opened with server
  963. # ssl_cert = "/etc/telegraf/cert.pem"
  964. #
  965. # ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms.
  966. # ## Failed streams/calls will not be retried if 0 is provided
  967. # retry_delay = "1000ms"
  968. #
  969. # ## To treat all string values as tags, set this to true
  970. # str_as_tags = false
  971.  
  972.  
  973. # # Read metrics from Kafka topic(s)
  974. # [[inputs.kafka_consumer]]
  975. # ## kafka servers
  976. # brokers = ["localhost:9092"]
  977. # ## topic(s) to consume
  978. # topics = ["telegraf"]
  979. #
  980. # ## Optional Client id
  981. # # client_id = "Telegraf"
  982. #
  983. # ## Set the minimal supported Kafka version. Setting this enables the use of new
  984. # ## Kafka features and APIs. Of particular interest, lz4 compression
  985. # ## requires at least version 0.10.0.0.
  986. # ## ex: version = "1.1.0"
  987. # # version = ""
  988. #
  989. # ## Optional TLS Config
  990. # # tls_ca = "/etc/telegraf/ca.pem"
  991. # # tls_cert = "/etc/telegraf/cert.pem"
  992. # # tls_key = "/etc/telegraf/key.pem"
  993. # ## Use TLS but skip chain & host verification
  994. # # insecure_skip_verify = false
  995. #
  996. # ## Optional SASL Config
  997. # # sasl_username = "kafka"
  998. # # sasl_password = "secret"
  999. #
  1000. # ## the name of the consumer group
  1001. # consumer_group = "telegraf_metrics_consumers"
  1002. # ## Offset (must be either "oldest" or "newest")
  1003. # offset = "oldest"
  1004. # ## Maximum length of a message to consume, in bytes (default 0/unlimited);
  1005. # ## larger messages are dropped
  1006. # max_message_len = 1000000
  1007. #
  1008. # ## Maximum messages to read from the broker that have not been written by an
  1009. # ## output. For best throughput set based on the number of metrics within
  1010. # ## each message and the size of the output's metric_batch_size.
  1011. # ##
  1012. # ## For example, if each message from the queue contains 10 metrics and the
  1013. # ## output metric_batch_size is 1000, setting this to 100 will ensure that a
  1014. # ## full batch is collected and the write is triggered immediately without
  1015. # ## waiting until the next flush_interval.
  1016. # # max_undelivered_messages = 1000
  1017. #
  1018. # ## Data format to consume.
  1019. # ## Each data format has its own unique set of configuration options, read
  1020. # ## more about them here:
  1021. # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  1022. # data_format = "influx"
  1023.  
  1024.  
  1025. # # Read metrics from Kafka topic(s)
  1026. # [[inputs.kafka_consumer_legacy]]
  1027. # ## topic(s) to consume
  1028. # topics = ["telegraf"]
  1029. # ## an array of Zookeeper connection strings
  1030. # zookeeper_peers = ["localhost:2181"]
  1031. # ## Zookeeper Chroot
  1032. # zookeeper_chroot = ""
  1033. # ## the name of the consumer group
  1034. # consumer_group = "telegraf_metrics_consumers"
  1035. # ## Offset (must be either "oldest" or "newest")
  1036. # offset = "oldest"
  1037. #
  1038. # ## Data format to consume.
  1039. # ## Each data format has its own unique set of configuration options, read
  1040. # ## more about them here:
  1041. # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  1042. # data_format = "influx"
  1043. #
  1044. # ## Maximum length of a message to consume, in bytes (default 0/unlimited);
  1045. # ## larger messages are dropped
  1046. # max_message_len = 65536
  1047.  
  1048.  
  1049. # # Stream and parse log file(s).
  1050. # [[inputs.logparser]]
  1051. # ## Log files to parse.
  1052. # ## These accept standard unix glob matching rules, but with the addition of
  1053. # ## ** as a "super asterisk". ie:
  1054. # ## /var/log/**.log -> recursively find all .log files in /var/log
  1055. # ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log
  1056. # ## /var/log/apache.log -> only tail the apache log file
  1057. # files = ["/var/log/apache/access.log"]
  1058. #
  1059. # ## Read files that currently exist from the beginning. Files that are created
  1060. # ## while telegraf is running (and that match the "files" globs) will always
  1061. # ## be read from the beginning.
  1062. # from_beginning = false
  1063. #
  1064. # ## Method used to watch for file updates. Can be either "inotify" or "poll".
  1065. # # watch_method = "inotify"
  1066. #
  1067. # ## Parse logstash-style "grok" patterns:
  1068. # [inputs.logparser.grok]
  1069. # ## This is a list of patterns to check the given log file(s) for.
  1070. # ## Note that adding patterns here increases processing time. The most
  1071. # ## efficient configuration is to have one pattern per logparser.
  1072. # ## Other common built-in patterns are:
  1073. # ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs)
  1074. # ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent)
  1075. # patterns = ["%{COMBINED_LOG_FORMAT}"]
  1076. #
  1077. # ## Name of the outputted measurement name.
  1078. # measurement = "apache_access_log"
  1079. #
  1080. # ## Full path(s) to custom pattern files.
  1081. # custom_pattern_files = []
  1082. #
  1083. # ## Custom patterns can also be defined here. Put one pattern per line.
  1084. # custom_patterns = '''
  1085. # '''
  1086. #
  1087. # ## Timezone allows you to provide an override for timestamps that
  1088. # ## don't already include an offset
  1089. # ## e.g. 04/06/2016 12:41:45 data one two 5.43µs
  1090. # ##
  1091. # ## Default: "" which renders UTC
  1092. # ## Options are as follows:
  1093. # ## 1. Local -- interpret based on machine localtime
  1094. # ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
  1095. # ## 3. UTC -- or blank/unspecified, will return timestamp in UTC
  1096. # # timezone = "Canada/Eastern"
  1097.  
  1098.  
  1099. # # Read metrics from MQTT topic(s)
  1100. # [[inputs.mqtt_consumer]]
  1101. # ## MQTT broker URLs to be used. The format should be scheme://host:port,
  1102. # ## schema can be tcp, ssl, or ws.
  1103. # servers = ["tcp://localhost:1883"]
  1104. #
  1105. # ## QoS policy for messages
  1106. # ## 0 = at most once
  1107. # ## 1 = at least once
  1108. # ## 2 = exactly once
  1109. # ##
  1110. # ## When using a QoS of 1 or 2, you should enable persistent_session to allow
  1111. # ## resuming unacknowledged messages.
  1112. # qos = 0
  1113. #
  1114. # ## Connection timeout for initial connection in seconds
  1115. # connection_timeout = "30s"
  1116. #
  1117. # ## Maximum messages to read from the broker that have not been written by an
  1118. # ## output. For best throughput set based on the number of metrics within
  1119. # ## each message and the size of the output's metric_batch_size.
  1120. # ##
  1121. # ## For example, if each message from the queue contains 10 metrics and the
  1122. # ## output metric_batch_size is 1000, setting this to 100 will ensure that a
  1123. # ## full batch is collected and the write is triggered immediately without
  1124. # ## waiting until the next flush_interval.
  1125. # # max_undelivered_messages = 1000
  1126. #
  1127. # ## Topics to subscribe to
  1128. # topics = [
  1129. # "telegraf/host01/cpu",
  1130. # "telegraf/+/mem",
  1131. # "sensors/#",
  1132. # ]
  1133. #
  1134. # # if true, messages that can't be delivered while the subscriber is offline
  1135. # # will be delivered when it comes back (such as on service restart).
  1136. # # NOTE: if true, client_id MUST be set
  1137. # persistent_session = false
  1138. # # If empty, a random client ID will be generated.
  1139. # client_id = ""
  1140. #
  1141. # ## username and password to connect MQTT server.
  1142. # # username = "telegraf"
  1143. # # password = "metricsmetricsmetricsmetrics"
  1144. #
  1145. # ## Optional TLS Config
  1146. # # tls_ca = "/etc/telegraf/ca.pem"
  1147. # # tls_cert = "/etc/telegraf/cert.pem"
  1148. # # tls_key = "/etc/telegraf/key.pem"
  1149. # ## Use TLS but skip chain & host verification
  1150. # # insecure_skip_verify = false
  1151. #
  1152. # ## Data format to consume.
  1153. # ## Each data format has its own unique set of configuration options, read
  1154. # ## more about them here:
  1155. # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  1156. # data_format = "influx"
  1157.  
  1158.  
  1159. # # Read metrics from NATS subject(s)
  1160. # [[inputs.nats_consumer]]
  1161. # ## urls of NATS servers
  1162. # servers = ["nats://localhost:4222"]
  1163. # ## Use Transport Layer Security
  1164. # secure = false
  1165. # ## subject(s) to consume
  1166. # subjects = ["telegraf"]
  1167. # ## name a queue group
  1168. # queue_group = "telegraf_consumers"
  1169. #
  1170. # ## Sets the limits for pending msgs and bytes for each subscription
  1171. # ## These shouldn't need to be adjusted except in very high throughput scenarios
  1172. # # pending_message_limit = 65536
  1173. # # pending_bytes_limit = 67108864
  1174. #
  1175. # ## Maximum messages to read from the broker that have not been written by an
  1176. # ## output. For best throughput set based on the number of metrics within
  1177. # ## each message and the size of the output's metric_batch_size.
  1178. # ##
  1179. # ## For example, if each message from the queue contains 10 metrics and the
  1180. # ## output metric_batch_size is 1000, setting this to 100 will ensure that a
  1181. # ## full batch is collected and the write is triggered immediately without
  1182. # ## waiting until the next flush_interval.
  1183. # # max_undelivered_messages = 1000
  1184. #
  1185. # ## Data format to consume.
  1186. # ## Each data format has its own unique set of configuration options, read
  1187. # ## more about them here:
  1188. # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  1189. # data_format = "influx"
  1190.  
  1191.  
  1192. # # Read NSQ topic for metrics.
  1193. # [[inputs.nsq_consumer]]
  1194. # ## Server option still works but is deprecated, we just prepend it to the nsqd array.
  1195. # # server = "localhost:4150"
  1196. # ## An array representing the NSQD TCP HTTP Endpoints
  1197. # nsqd = ["localhost:4150"]
  1198. # ## An array representing the NSQLookupd HTTP Endpoints
  1199. # nsqlookupd = ["localhost:4161"]
  1200. # topic = "telegraf"
  1201. # channel = "consumer"
  1202. # max_in_flight = 100
  1203. #
  1204. # ## Maximum messages to read from the broker that have not been written by an
  1205. # ## output. For best throughput set based on the number of metrics within
  1206. # ## each message and the size of the output's metric_batch_size.
  1207. # ##
  1208. # ## For example, if each message from the queue contains 10 metrics and the
  1209. # ## output metric_batch_size is 1000, setting this to 100 will ensure that a
  1210. # ## full batch is collected and the write is triggered immediately without
  1211. # ## waiting until the next flush_interval.
  1212. # # max_undelivered_messages = 1000
  1213. #
  1214. # ## Data format to consume.
  1215. # ## Each data format has its own unique set of configuration options, read
  1216. # ## more about them here:
  1217. # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  1218. # data_format = "influx"
  1219.  
  1220.  
  1221. # # Read metrics from one or many pgbouncer servers
  1222. # [[inputs.pgbouncer]]
  1223. # ## specify address via a url matching:
  1224. # ## postgres://[pqgotest[:password]]@localhost[/dbname]\
  1225. # ## ?sslmode=[disable|verify-ca|verify-full]
  1226. # ## or a simple string:
  1227. # ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
  1228. # ##
  1229. # ## All connection parameters are optional.
  1230. # ##
  1231. # address = "host=localhost user=pgbouncer sslmode=disable"
  1232.  
  1233.  
  1234. # # Read metrics from one or many postgresql servers
  1235. # [[inputs.postgresql]]
  1236. # ## specify address via a url matching:
  1237. # ## postgres://[pqgotest[:password]]@localhost[/dbname]\
  1238. # ## ?sslmode=[disable|verify-ca|verify-full]
  1239. # ## or a simple string:
  1240. # ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
  1241. # ##
  1242. # ## All connection parameters are optional.
  1243. # ##
  1244. # ## Without the dbname parameter, the driver will default to a database
  1245. # ## with the same name as the user. This dbname is just for instantiating a
  1246. # ## connection with the server and doesn't restrict the databases we are trying
  1247. # ## to grab metrics for.
  1248. # ##
  1249. # address = "host=localhost user=postgres sslmode=disable"
  1250. # ## A custom name for the database that will be used as the "server" tag in the
  1251. # ## measurement output. If not specified, a default one generated from
  1252. # ## the connection address is used.
  1253. # # outputaddress = "db01"
  1254. #
  1255. # ## connection configuration.
  1256. # ## maxlifetime - specify the maximum lifetime of a connection.
  1257. # ## default is forever (0s)
  1258. # max_lifetime = "0s"
  1259. #
  1260. # ## A list of databases to explicitly ignore. If not specified, metrics for all
  1261. # ## databases are gathered. Do NOT use with the 'databases' option.
  1262. # # ignored_databases = ["postgres", "template0", "template1"]
  1263. #
  1264. # ## A list of databases to pull metrics about. If not specified, metrics for all
  1265. # ## databases are gathered. Do NOT use with the 'ignored_databases' option.
  1266. # # databases = ["app_production", "testing"]
  1267.  
  1268.  
  1269. # # Read metrics from one or many postgresql servers
  1270. # [[inputs.postgresql_extensible]]
  1271. # ## specify address via a url matching:
  1272. # ## postgres://[pqgotest[:password]]@localhost[/dbname]\
  1273. # ## ?sslmode=[disable|verify-ca|verify-full]
  1274. # ## or a simple string:
  1275. # ## host=localhost user=pqotest password=... sslmode=... dbname=app_production
  1276. # #
  1277. # ## All connection parameters are optional. #
  1278. # ## Without the dbname parameter, the driver will default to a database
  1279. # ## with the same name as the user. This dbname is just for instantiating a
  1280. # ## connection with the server and doesn't restrict the databases we are trying
  1281. # ## to grab metrics for.
  1282. # #
  1283. # address = "host=localhost user=postgres sslmode=disable"
  1284. #
  1285. # ## connection configuration.
  1286. # ## maxlifetime - specify the maximum lifetime of a connection.
  1287. # ## default is forever (0s)
  1288. # max_lifetime = "0s"
  1289. #
  1290. # ## A list of databases to pull metrics about. If not specified, metrics for all
  1291. # ## databases are gathered.
  1292. # ## databases = ["app_production", "testing"]
  1293. # #
  1294. # ## A custom name for the database that will be used as the "server" tag in the
  1295. # ## measurement output. If not specified, a default one generated from
  1296. # ## the connection address is used.
  1297. # # outputaddress = "db01"
  1298. # #
  1299. # ## Define the toml config where the sql queries are stored
  1300. # ## New queries can be added, if the withdbname is set to true and there is no
  1301. # ## databases defined in the 'databases field', the sql query is ended by a
  1302. # ## 'is not null' in order to make the query succeed.
  1303. # ## Example :
  1304. # ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become
  1305. # ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')"
  1306. # ## because the databases variable was set to ['postgres', 'pgbench' ] and the
  1307. # ## withdbname was true. Be careful that if the withdbname is set to false you
  1308. # ## don't have to define the where clause (aka with the dbname) the tagvalue
  1309. # ## field is used to define custom tags (separated by commas)
  1310. # ## The optional "measurement" value can be used to override the default
  1311. # ## output measurement name ("postgresql").
  1312. # #
  1313. # ## Structure :
  1314. # ## [[inputs.postgresql_extensible.query]]
  1315. # ## sqlquery string
  1316. # ## version string
  1317. # ## withdbname boolean
  1318. # ## tagvalue string (comma separated)
  1319. # ## measurement string
  1320. # [[inputs.postgresql_extensible.query]]
  1321. # sqlquery="SELECT * FROM pg_stat_database"
  1322. # version=901
  1323. # withdbname=false
  1324. # tagvalue=""
  1325. # measurement=""
  1326. # [[inputs.postgresql_extensible.query]]
  1327. # sqlquery="SELECT * FROM pg_stat_bgwriter"
  1328. # version=901
  1329. # withdbname=false
  1330. # tagvalue="postgresql.stats"
  1331.  
  1332.  
  1333. # # Read metrics from one or many prometheus clients
  1334. # [[inputs.prometheus]]
  1335. # ## An array of urls to scrape metrics from.
  1336. # urls = ["http://localhost:9100/metrics"]
  1337. #
  1338. # ## An array of Kubernetes services to scrape metrics from.
  1339. # # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"]
  1340. #
  1341. # ## Kubernetes config file to create client from.
  1342. # # kube_config = "/path/to/kubernetes.config"
  1343. #
  1344. # ## Scrape Kubernetes pods for the following prometheus annotations:
  1345. # ## - prometheus.io/scrape: Enable scraping for this pod
  1346. # ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to
  1347. # ## set this to 'https' & most likely set the tls config.
  1348. # ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation.
  1349. # ## - prometheus.io/port: If port is not 9102 use this annotation
  1350. # # monitor_kubernetes_pods = true
  1351. #
  1352. # ## Use bearer token for authorization
  1353. # # bearer_token = /path/to/bearer/token
  1354. #
  1355. # ## Specify timeout duration for slower prometheus clients (default is 3s)
  1356. # # response_timeout = "3s"
  1357. #
  1358. # ## Optional TLS Config
  1359. # # tls_ca = /path/to/cafile
  1360. # # tls_cert = /path/to/certfile
  1361. # # tls_key = /path/to/keyfile
  1362. # ## Use TLS but skip chain & host verification
  1363. # # insecure_skip_verify = false
  1364.  
  1365.  
  1366. # # Generic socket listener capable of handling multiple socket types.
  1367. # [[inputs.socket_listener]]
  1368. # ## URL to listen on
  1369. # # service_address = "tcp://:8094"
  1370. # # service_address = "tcp://127.0.0.1:http"
  1371. # # service_address = "tcp4://:8094"
  1372. # # service_address = "tcp6://:8094"
  1373. # # service_address = "tcp6://[2001:db8::1]:8094"
  1374. # # service_address = "udp://:8094"
  1375. # # service_address = "udp4://:8094"
  1376. # # service_address = "udp6://:8094"
  1377. # # service_address = "unix:///tmp/telegraf.sock"
  1378. # # service_address = "unixgram:///tmp/telegraf.sock"
  1379. #
  1380. # ## Maximum number of concurrent connections.
  1381. # ## Only applies to stream sockets (e.g. TCP).
  1382. # ## 0 (default) is unlimited.
  1383. # # max_connections = 1024
  1384. #
  1385. # ## Read timeout.
  1386. # ## Only applies to stream sockets (e.g. TCP).
  1387. # ## 0 (default) is unlimited.
  1388. # # read_timeout = "30s"
  1389. #
  1390. # ## Optional TLS configuration.
  1391. # ## Only applies to stream sockets (e.g. TCP).
  1392. # # tls_cert = "/etc/telegraf/cert.pem"
  1393. # # tls_key = "/etc/telegraf/key.pem"
  1394. # ## Enables client authentication if set.
  1395. # # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
  1396. #
  1397. # ## Maximum socket buffer size (in bytes when no unit specified).
  1398. # ## For stream sockets, once the buffer fills up, the sender will start backing up.
  1399. # ## For datagram sockets, once the buffer fills up, metrics will start dropping.
  1400. # ## Defaults to the OS default.
  1401. # # read_buffer_size = "64KiB"
  1402. #
  1403. # ## Period between keep alive probes.
  1404. # ## Only applies to TCP sockets.
  1405. # ## 0 disables keep alive probes.
  1406. # ## Defaults to the OS configuration.
  1407. # # keep_alive_period = "5m"
  1408. #
  1409. # ## Data format to consume.
  1410. # ## Each data format has its own unique set of configuration options, read
  1411. # ## more about them here:
  1412. # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  1413. # # data_format = "influx"
  1414.  
  1415.  
  1416. # # Statsd UDP/TCP Server
  1417. # [[inputs.statsd]]
  1418. # ## Protocol, must be "tcp", "udp", "udp4" or "udp6" (default=udp)
  1419. # protocol = "udp"
  1420. #
  1421. # ## MaxTCPConnection - applicable when protocol is set to tcp (default=250)
  1422. # max_tcp_connections = 250
  1423. #
  1424. # ## Enable TCP keep alive probes (default=false)
  1425. # tcp_keep_alive = false
  1426. #
  1427. # ## Specifies the keep-alive period for an active network connection.
  1428. # ## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false.
  1429. # ## Defaults to the OS configuration.
  1430. # # tcp_keep_alive_period = "2h"
  1431. #
  1432. # ## Address and port to host UDP listener on
  1433. # service_address = ":8125"
  1434. #
  1435. # ## The following configuration options control when telegraf clears it's cache
  1436. # ## of previous values. If set to false, then telegraf will only clear it's
  1437. # ## cache when the daemon is restarted.
  1438. # ## Reset gauges every interval (default=true)
  1439. # delete_gauges = true
  1440. # ## Reset counters every interval (default=true)
  1441. # delete_counters = true
  1442. # ## Reset sets every interval (default=true)
  1443. # delete_sets = true
  1444. # ## Reset timings & histograms every interval (default=true)
  1445. # delete_timings = true
  1446. #
  1447. # ## Percentiles to calculate for timing & histogram stats
  1448. # percentiles = [90]
  1449. #
  1450. # ## separator to use between elements of a statsd metric
  1451. # metric_separator = "_"
  1452. #
  1453. # ## Parses tags in the datadog statsd format
  1454. # ## http://docs.datadoghq.com/guides/dogstatsd/
  1455. # parse_data_dog_tags = false
  1456. #
  1457. # ## Statsd data translation templates, more info can be read here:
  1458. # ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md
  1459. # # templates = [
  1460. # # "cpu.* measurement*"
  1461. # # ]
  1462. #
  1463. # ## Number of UDP messages allowed to queue up, once filled,
  1464. # ## the statsd server will start dropping packets
  1465. # allowed_pending_messages = 10000
  1466. #
  1467. # ## Number of timing/histogram values to track per-measurement in the
  1468. # ## calculation of percentiles. Raising this limit increases the accuracy
  1469. # ## of percentiles but also increases the memory usage and cpu time.
  1470. # percentile_limit = 1000
  1471.  
  1472.  
  1473. # # Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587
  1474. # [[inputs.syslog]]
  1475. # ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514
  1476. # ## Protocol, address and port to host the syslog receiver.
  1477. # ## If no host is specified, then localhost is used.
  1478. # ## If no port is specified, 6514 is used (RFC5425#section-4.1).
  1479. # server = "tcp://:6514"
  1480. #
  1481. # ## TLS Config
  1482. # # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"]
  1483. # # tls_cert = "/etc/telegraf/cert.pem"
  1484. # # tls_key = "/etc/telegraf/key.pem"
  1485. #
  1486. # ## Period between keep alive probes.
  1487. # ## 0 disables keep alive probes.
  1488. # ## Defaults to the OS configuration.
  1489. # ## Only applies to stream sockets (e.g. TCP).
  1490. # # keep_alive_period = "5m"
  1491. #
  1492. # ## Maximum number of concurrent connections (default = 0).
  1493. # ## 0 means unlimited.
  1494. # ## Only applies to stream sockets (e.g. TCP).
  1495. # # max_connections = 1024
  1496. #
  1497. # ## Read timeout is the maximum time allowed for reading a single message (default = 5s).
  1498. # ## 0 means unlimited.
  1499. # # read_timeout = "5s"
  1500. #
  1501. # ## The framing technique with which it is expected that messages are transported (default = "octet-counting").
  1502. # ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1),
  1503. # ## or the non-transparent framing technique (RFC6587#section-3.4.2).
  1504. # ## Must be one of "octect-counting", "non-transparent".
  1505. # # framing = "octet-counting"
  1506. #
  1507. # ## The trailer to be expected in case of non-trasparent framing (default = "LF").
  1508. # ## Must be one of "LF", or "NUL".
  1509. # # trailer = "LF"
  1510. #
  1511. # ## Whether to parse in best effort mode or not (default = false).
  1512. # ## By default best effort parsing is off.
  1513. # # best_effort = false
  1514. #
  1515. # ## Character to prepend to SD-PARAMs (default = "_").
  1516. # ## A syslog message can contain multiple parameters and multiple identifiers within structured data section.
  1517. # ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"]
  1518. # ## For each combination a field is created.
  1519. # ## Its name is created concatenating identifier, sdparam_separator, and parameter name.
  1520. # # sdparam_separator = "_"
  1521.  
  1522.  
  1523. # # Stream a log file, like the tail -f command
  1524. # [[inputs.tail]]
  1525. # ## files to tail.
  1526. # ## These accept standard unix glob matching rules, but with the addition of
  1527. # ## ** as a "super asterisk". ie:
  1528. # ## "/var/log/**.log" -> recursively find all .log files in /var/log
  1529. # ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log
  1530. # ## "/var/log/apache.log" -> just tail the apache log file
  1531. # ##
  1532. # ## See https://github.com/gobwas/glob for more examples
  1533. # ##
  1534. # files = ["/var/mymetrics.out"]
  1535. # ## Read file from beginning.
  1536. # from_beginning = false
  1537. # ## Whether file is a named pipe
  1538. # pipe = false
  1539. #
  1540. # ## Method used to watch for file updates. Can be either "inotify" or "poll".
  1541. # # watch_method = "inotify"
  1542. #
  1543. # ## Data format to consume.
  1544. # ## Each data format has its own unique set of configuration options, read
  1545. # ## more about them here:
  1546. # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
  1547. # data_format = "influx"
  1548.  
  1549.  
  1550. # # Generic TCP listener
  1551. # [[inputs.tcp_listener]]
  1552. # # DEPRECATED: the TCP listener plugin has been deprecated in favor of the
  1553. # # socket_listener plugin
  1554. # # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener
  1555.  
  1556.  
  1557. # # Generic UDP listener
  1558. # [[inputs.udp_listener]]
  1559. # # DEPRECATED: the TCP listener plugin has been deprecated in favor of the
  1560. # # socket_listener plugin
  1561. # # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener
  1562.  
  1563.  
  1564. # # Read metrics from VMware vCenter
  1565. # [[inputs.vsphere]]
  1566. # ## List of vCenter URLs to be monitored. These three lines must be uncommented
  1567. # ## and edited for the plugin to work.
  1568. # vcenters = [ "https://vcenter.local/sdk" ]
  1569. # username = "user@corp.local"
  1570. # password = "secret"
  1571. #
  1572. # ## VMs
  1573. # ## Typical VM metrics (if omitted or empty, all metrics are collected)
  1574. # vm_metric_include = [
  1575. # "cpu.demand.average",
  1576. # "cpu.idle.summation",
  1577. # "cpu.latency.average",
  1578. # "cpu.readiness.average",
  1579. # "cpu.ready.summation",
  1580. # "cpu.run.summation",
  1581. # "cpu.usagemhz.average",
  1582. # "cpu.used.summation",
  1583. # "cpu.wait.summation",
  1584. # "mem.active.average",
  1585. # "mem.granted.average",
  1586. # "mem.latency.average",
  1587. # "mem.swapin.average",
  1588. # "mem.swapinRate.average",
  1589. # "mem.swapout.average",
  1590. # "mem.swapoutRate.average",
  1591. # "mem.usage.average",
  1592. # "mem.vmmemctl.average",
  1593. # "net.bytesRx.average",
  1594. # "net.bytesTx.average",
  1595. # "net.droppedRx.summation",
  1596. # "net.droppedTx.summation",
  1597. # "net.usage.average",
  1598. # "power.power.average",
  1599. # "virtualDisk.numberReadAveraged.average",
  1600. # "virtualDisk.numberWriteAveraged.average",
  1601. # "virtualDisk.read.average",
  1602. # "virtualDisk.readOIO.latest",
  1603. # "virtualDisk.throughput.usage.average",
  1604. # "virtualDisk.totalReadLatency.average",
  1605. # "virtualDisk.totalWriteLatency.average",
  1606. # "virtualDisk.write.average",
  1607. # "virtualDisk.writeOIO.latest",
  1608. # "sys.uptime.latest",
  1609. # ]
  1610. # # vm_metric_exclude = [] ## Nothing is excluded by default
  1611. # # vm_instances = true ## true by default
  1612. #
  1613. # ## Hosts
  1614. # ## Typical host metrics (if omitted or empty, all metrics are collected)
  1615. # host_metric_include = [
  1616. # "cpu.coreUtilization.average",
  1617. # "cpu.costop.summation",
  1618. # "cpu.demand.average",
  1619. # "cpu.idle.summation",
  1620. # "cpu.latency.average",
  1621. # "cpu.readiness.average",
  1622. # "cpu.ready.summation",
  1623. # "cpu.swapwait.summation",
  1624. # "cpu.usage.average",
  1625. # "cpu.usagemhz.average",
  1626. # "cpu.used.summation",
  1627. # "cpu.utilization.average",
  1628. # "cpu.wait.summation",
  1629. # "disk.deviceReadLatency.average",
  1630. # "disk.deviceWriteLatency.average",
  1631. # "disk.kernelReadLatency.average",
  1632. # "disk.kernelWriteLatency.average",
  1633. # "disk.numberReadAveraged.average",
  1634. # "disk.numberWriteAveraged.average",
  1635. # "disk.read.average",
  1636. # "disk.totalReadLatency.average",
  1637. # "disk.totalWriteLatency.average",
  1638. # "disk.write.average",
  1639. # "mem.active.average",
  1640. # "mem.latency.average",
  1641. # "mem.state.latest",
  1642. # "mem.swapin.average",
  1643. # "mem.swapinRate.average",
  1644. # "mem.swapout.average",
  1645. # "mem.swapoutRate.average",
  1646. # "mem.totalCapacity.average",
  1647. # "mem.usage.average",
  1648. # "mem.vmmemctl.average",
  1649. # "net.bytesRx.average",
  1650. # "net.bytesTx.average",
  1651. # "net.droppedRx.summation",
  1652. # "net.droppedTx.summation",
  1653. # "net.errorsRx.summation",
  1654. # "net.errorsTx.summation",
  1655. # "net.usage.average",
  1656. # "power.power.average",
  1657. # "storageAdapter.numberReadAveraged.average",
  1658. # "storageAdapter.numberWriteAveraged.average",
  1659. # "storageAdapter.read.average",
  1660. # "storageAdapter.write.average",
  1661. # "sys.uptime.latest",
  1662. # ]
  1663. # # host_metric_exclude = [] ## Nothing excluded by default
  1664. # # host_instances = true ## true by default
  1665. #
  1666. # ## Clusters
  1667. # # cluster_metric_include = [] ## if omitted or empty, all metrics are collected
  1668. # # cluster_metric_exclude = [] ## Nothing excluded by default
  1669. # # cluster_instances = true ## true by default
  1670. #
  1671. # ## Datastores
  1672. # # datastore_metric_include = [] ## if omitted or empty, all metrics are collected
  1673. # # datastore_metric_exclude = [] ## Nothing excluded by default
  1674. # # datastore_instances = false ## false by default for Datastores only
  1675. #
  1676. # ## Datacenters
  1677. # datacenter_metric_include = [] ## if omitted or empty, all metrics are collected
  1678. # datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default.
  1679. # # datacenter_instances = false ## false by default for Datastores only
  1680. #
  1681. # ## Plugin Settings
  1682. # ## separator character to use for measurement and field names (default: "_")
  1683. # # separator = "_"
  1684. #
  1685. # ## number of objects to retreive per query for realtime resources (vms and hosts)
  1686. # ## set to 64 for vCenter 5.5 and 6.0 (default: 256)
  1687. # # max_query_objects = 256
  1688. #
  1689. # ## number of metrics to retreive per query for non-realtime resources (clusters and datastores)
  1690. # ## set to 64 for vCenter 5.5 and 6.0 (default: 256)
  1691. # # max_query_metrics = 256
  1692. #
  1693. # ## number of go routines to use for collection and discovery of objects and metrics
  1694. # # collect_concurrency = 1
  1695. # # discover_concurrency = 1
  1696. #
  1697. # ## whether or not to force discovery of new objects on initial gather call before collecting metrics
  1698. # ## when true for large environments this may cause errors for time elapsed while collecting metrics
  1699. # ## when false (default) the first collection cycle may result in no or limited metrics while objects are discovered
  1700. # # force_discover_on_init = false
  1701. #
  1702. # ## the interval before (re)discovering objects subject to metrics collection (default: 300s)
  1703. # # object_discovery_interval = "300s"
  1704. #
  1705. # ## timeout applies to any of the api request made to vcenter
  1706. # # timeout = "60s"
  1707. #
  1708. # ## Optional SSL Config
  1709. # # ssl_ca = "/path/to/cafile"
  1710. # # ssl_cert = "/path/to/certfile"
  1711. # # ssl_key = "/path/to/keyfile"
  1712. # ## Use SSL but skip chain & host verification
  1713. # # insecure_skip_verify = false
  1714.  
  1715.  
  1716. # # A Webhooks Event collector
  1717. # [[inputs.webhooks]]
  1718. # ## Address and port to host Webhook listener on
  1719. # service_address = ":1619"
  1720. #
  1721. # [inputs.webhooks.filestack]
  1722. # path = "/filestack"
  1723. #
  1724. # [inputs.webhooks.github]
  1725. # path = "/github"
  1726. # # secret = ""
  1727. #
  1728. # [inputs.webhooks.mandrill]
  1729. # path = "/mandrill"
  1730. #
  1731. # [inputs.webhooks.rollbar]
  1732. # path = "/rollbar"
  1733. #
  1734. # [inputs.webhooks.papertrail]
  1735. # path = "/papertrail"
  1736. #
  1737. # [inputs.webhooks.particle]
  1738. # path = "/particle"
  1739.  
  1740.  
  1741. # # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures.
  1742. # [[inputs.zipkin]]
  1743. # # path = "/api/v1/spans" # URL path for span data
  1744. # # port = 9411 # Port on which Telegraf listens
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement