Advertisement
Guest User

Untitled

a guest
Jul 22nd, 2017
48
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 5.65 KB | None | 0 0
  1. /* This configuration is as close to 2.5.x default behavior as possible
  2. The values closely match ./gmond/metric.h definitions in 2.5.x */
  3. globals {
  4. setuid = yes
  5. user = nobody
  6. cleanup_threshold = 300 /*secs */
  7. host_dmax = 3600 /*secs */
  8. }
  9.  
  10. /* If a cluster attribute is specified, then all gmond hosts are wrapped inside
  11. * of a <CLUSTER> tag. If you do not specify a cluster tag, then all <HOSTS> will
  12. * NOT be wrapped inside of a <CLUSTER> tag. */
  13. cluster {
  14. name = "web"
  15. }
  16.  
  17. /* Feel free to specify as many udp_send_channels as you like. Gmond
  18. used to only support having a single channel */
  19. #udp_send_channel {
  20. # host = mother010.ksjc.sh.colo
  21. # port = 8650
  22. #}
  23.  
  24. /* You can specify as many udp_recv_channels as you like as well. */
  25. udp_recv_channel {
  26. # mcast_join = 239.2.11.71
  27. port = 8658
  28. # bind = 239.2.11.71
  29. }
  30.  
  31. /* You can specify as many tcp_accept_channels as you like to share
  32. an xml description of the state of the cluster */
  33. tcp_accept_channel {
  34. port = 8658
  35. }
  36.  
  37.  
  38. /* The old internal 2.5.x metric array has been replaced by the following
  39. collection_group directives. What follows is the default behavior for
  40. collecting and sending metrics that is as close to 2.5.x behavior as
  41. possible. */
  42.  
  43. /* This collection group will cause a heartbeat (or beacon) to be sent every
  44. 20 seconds. In the heartbeat is the GMOND_STARTED data which expresses
  45. the age of the running gmond. */
  46. collection_group {
  47. collect_once = yes
  48. time_threshold = 20
  49. metric {
  50. name = "heartbeat"
  51. }
  52. }
  53.  
  54. /* This collection group will send general info about this host every 1200 secs.
  55. This information doesn't change between reboots and is only collected once. */
  56. collection_group {
  57. collect_once = yes
  58. time_threshold = 1200
  59. metric {
  60. name = "cpu_num"
  61. }
  62. metric {
  63. name = "cpu_speed"
  64. }
  65. metric {
  66. name = "mem_total"
  67. }
  68. /* Should this be here? Swap can be added/removed between reboots. */
  69. metric {
  70. name = "swap_total"
  71. }
  72. metric {
  73. name = "boottime"
  74. }
  75. metric {
  76. name = "machine_type"
  77. }
  78. metric {
  79. name = "os_name"
  80. }
  81. metric {
  82. name = "os_release"
  83. }
  84. metric {
  85. name = "location"
  86. }
  87. }
  88.  
  89. /* This collection group will send the status of gexecd for this host every 300 secs */
  90. /* Unlike 2.5.x the default behavior is to report gexecd OFF. */
  91. collection_group {
  92. collect_once = yes
  93. time_threshold = 300
  94. metric {
  95. name = "gexec"
  96. }
  97. }
  98.  
  99. /* This collection group will collect the CPU status info every 20 secs.
  100. The time threshold is set to 90 seconds. In honesty, this time_threshold could be
  101. set significantly higher to reduce unneccessary network chatter. */
  102. collection_group {
  103. collect_every = 20
  104. time_threshold = 90
  105. /* CPU status */
  106. metric {
  107. name = "cpu_user"
  108. value_threshold = "1.0"
  109. }
  110. metric {
  111. name = "cpu_system"
  112. value_threshold = "1.0"
  113. }
  114. metric {
  115. name = "cpu_idle"
  116. value_threshold = "5.0"
  117. }
  118. metric {
  119. name = "cpu_nice"
  120. value_threshold = "1.0"
  121. }
  122. metric {
  123. name = "cpu_aidle"
  124. value_threshold = "5.0"
  125. }
  126. metric {
  127. name = "cpu_wio"
  128. value_threshold = "1.0"
  129. }
  130. /* The next two metrics are optional if you want more detail...
  131. ... since they are accounted for in cpu_system.
  132. metric {
  133. name = "cpu_intr"
  134. value_threshold = "1.0"
  135. }
  136. metric {
  137. name = "cpu_sintr"
  138. value_threshold = "1.0"
  139. }
  140. */
  141. }
  142.  
  143. collection_group {
  144. collect_every = 20
  145. time_threshold = 90
  146. /* Load Averages */
  147. metric {
  148. name = "load_one"
  149. value_threshold = "1.0"
  150. }
  151. metric {
  152. name = "load_five"
  153. value_threshold = "1.0"
  154. }
  155. metric {
  156. name = "load_fifteen"
  157. value_threshold = "1.0"
  158. }
  159. }
  160.  
  161. /* This group collects the number of running and total processes */
  162. collection_group {
  163. collect_every = 80
  164. time_threshold = 950
  165. metric {
  166. name = "proc_run"
  167. value_threshold = "1.0"
  168. }
  169. metric {
  170. name = "proc_total"
  171. value_threshold = "1.0"
  172. }
  173. }
  174.  
  175. /* This collection group grabs the volatile memory metrics every 40 secs and
  176. sends them at least every 180 secs. This time_threshold can be increased
  177. significantly to reduce unneeded network traffic. */
  178. collection_group {
  179. collect_every = 40
  180. time_threshold = 180
  181. metric {
  182. name = "mem_free"
  183. value_threshold = "1024.0"
  184. }
  185. metric {
  186. name = "mem_shared"
  187. value_threshold = "1024.0"
  188. }
  189. metric {
  190. name = "mem_buffers"
  191. value_threshold = "1024.0"
  192. }
  193. metric {
  194. name = "mem_cached"
  195. value_threshold = "1024.0"
  196. }
  197. metric {
  198. name = "swap_free"
  199. value_threshold = "1024.0"
  200. }
  201. }
  202.  
  203. collection_group {
  204. collect_every = 40
  205. time_threshold = 300
  206. metric {
  207. name = "bytes_out"
  208. value_threshold = 4096
  209. }
  210. metric {
  211. name = "bytes_in"
  212. value_threshold = 4096
  213. }
  214. metric {
  215. name = "pkts_in"
  216. value_threshold = 256
  217. }
  218. metric {
  219. name = "pkts_out"
  220. value_threshold = 256
  221. }
  222. }
  223.  
  224. /* Different than 2.5.x default since the old config made no sense */
  225. collection_group {
  226. collect_every = 1800
  227. time_threshold = 3600
  228. metric {
  229. name = "disk_total"
  230. value_threshold = 1.0
  231. }
  232. }
  233.  
  234. collection_group {
  235. collect_every = 40
  236. time_threshold = 180
  237. metric {
  238. name = "disk_free"
  239. value_threshold = 1.0
  240. }
  241. metric {
  242. name = "part_max_used"
  243. value_threshold = 1.0
  244. }
  245. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement