Advertisement
Guest User

Untitled

a guest
Jul 22nd, 2017
49
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 5.76 KB | None | 0 0
  1. /* This configuration is as close to 2.5.x default behavior as possible
  2. The values closely match ./gmond/metric.h definitions in 2.5.x */
  3. globals {
  4. setuid = yes
  5. user = nobody
  6. cleanup_threshold = 300 /*secs */
  7. }
  8.  
  9. /* If a cluster attribute is specified, then all gmond hosts are wrapped inside
  10. * of a <CLUSTER> tag. If you do not specify a cluster tag, then all <HOSTS> will
  11. * NOT be wrapped inside of a <CLUSTER> tag. */
  12. cluster {
  13. name = "web"
  14. }
  15.  
  16. /* Feel free to specify as many udp_send_channels as you like. Gmond
  17. used to only support having a single channel */
  18. /*udp_send_channel {
  19. host = mother010.ksjc.sh.colo
  20. port = 8648
  21. }*/
  22. udp_send_channel {
  23. host = ganglia010.ksjc.sh.colo
  24. port = 8658
  25. }
  26. udp_send_channel {
  27. host = monitor.ksjc.sh.colo
  28. port = 8658
  29. }
  30.  
  31. /* You can specify as many udp_recv_channels as you like as well. */
  32. udp_recv_channel {
  33. # mcast_join = 239.2.11.71
  34. port = 8648
  35. # bind = 239.2.11.71
  36. }
  37.  
  38. /* You can specify as many tcp_accept_channels as you like to share
  39. an xml description of the state of the cluster */
  40. tcp_accept_channel {
  41. port = 8648
  42. }
  43.  
  44.  
  45. /* The old internal 2.5.x metric array has been replaced by the following
  46. collection_group directives. What follows is the default behavior for
  47. collecting and sending metrics that is as close to 2.5.x behavior as
  48. possible. */
  49.  
  50. /* This collection group will cause a heartbeat (or beacon) to be sent every
  51. 20 seconds. In the heartbeat is the GMOND_STARTED data which expresses
  52. the age of the running gmond. */
  53. collection_group {
  54. collect_once = yes
  55. time_threshold = 20
  56. metric {
  57. name = "heartbeat"
  58. }
  59. }
  60.  
  61. /* This collection group will send general info about this host every 1200 secs.
  62. This information doesn't change between reboots and is only collected once. */
  63. collection_group {
  64. collect_once = yes
  65. time_threshold = 1200
  66. metric {
  67. name = "cpu_num"
  68. }
  69. metric {
  70. name = "cpu_speed"
  71. }
  72. metric {
  73. name = "mem_total"
  74. }
  75. /* Should this be here? Swap can be added/removed between reboots. */
  76. metric {
  77. name = "swap_total"
  78. }
  79. metric {
  80. name = "boottime"
  81. }
  82. metric {
  83. name = "machine_type"
  84. }
  85. metric {
  86. name = "os_name"
  87. }
  88. metric {
  89. name = "os_release"
  90. }
  91. metric {
  92. name = "location"
  93. }
  94. }
  95.  
  96. /* This collection group will send the status of gexecd for this host every 300 secs */
  97. /* Unlike 2.5.x the default behavior is to report gexecd OFF. */
  98. collection_group {
  99. collect_once = yes
  100. time_threshold = 300
  101. metric {
  102. name = "gexec"
  103. }
  104. }
  105.  
  106. /* This collection group will collect the CPU status info every 20 secs.
  107. The time threshold is set to 90 seconds. In honesty, this time_threshold could be
  108. set significantly higher to reduce unneccessary network chatter. */
  109. collection_group {
  110. collect_every = 20
  111. time_threshold = 90
  112. /* CPU status */
  113. metric {
  114. name = "cpu_user"
  115. value_threshold = "1.0"
  116. }
  117. metric {
  118. name = "cpu_system"
  119. value_threshold = "1.0"
  120. }
  121. metric {
  122. name = "cpu_idle"
  123. value_threshold = "5.0"
  124. }
  125. metric {
  126. name = "cpu_nice"
  127. value_threshold = "1.0"
  128. }
  129. metric {
  130. name = "cpu_aidle"
  131. value_threshold = "5.0"
  132. }
  133. metric {
  134. name = "cpu_wio"
  135. value_threshold = "1.0"
  136. }
  137. /* The next two metrics are optional if you want more detail...
  138. ... since they are accounted for in cpu_system.
  139. metric {
  140. name = "cpu_intr"
  141. value_threshold = "1.0"
  142. }
  143. metric {
  144. name = "cpu_sintr"
  145. value_threshold = "1.0"
  146. }
  147. */
  148. }
  149.  
  150. collection_group {
  151. collect_every = 20
  152. time_threshold = 90
  153. /* Load Averages */
  154. metric {
  155. name = "load_one"
  156. value_threshold = "1.0"
  157. }
  158. metric {
  159. name = "load_five"
  160. value_threshold = "1.0"
  161. }
  162. metric {
  163. name = "load_fifteen"
  164. value_threshold = "1.0"
  165. }
  166. }
  167.  
  168. /* This group collects the number of running and total processes */
  169. collection_group {
  170. collect_every = 80
  171. time_threshold = 950
  172. metric {
  173. name = "proc_run"
  174. value_threshold = "1.0"
  175. }
  176. metric {
  177. name = "proc_total"
  178. value_threshold = "1.0"
  179. }
  180. }
  181.  
  182. /* This collection group grabs the volatile memory metrics every 40 secs and
  183. sends them at least every 180 secs. This time_threshold can be increased
  184. significantly to reduce unneeded network traffic. */
  185. collection_group {
  186. collect_every = 40
  187. time_threshold = 180
  188. metric {
  189. name = "mem_free"
  190. value_threshold = "1024.0"
  191. }
  192. metric {
  193. name = "mem_shared"
  194. value_threshold = "1024.0"
  195. }
  196. metric {
  197. name = "mem_buffers"
  198. value_threshold = "1024.0"
  199. }
  200. metric {
  201. name = "mem_cached"
  202. value_threshold = "1024.0"
  203. }
  204. metric {
  205. name = "swap_free"
  206. value_threshold = "1024.0"
  207. }
  208. }
  209.  
  210. collection_group {
  211. collect_every = 40
  212. time_threshold = 300
  213. metric {
  214. name = "bytes_out"
  215. value_threshold = 4096
  216. }
  217. metric {
  218. name = "bytes_in"
  219. value_threshold = 4096
  220. }
  221. metric {
  222. name = "pkts_in"
  223. value_threshold = 256
  224. }
  225. metric {
  226. name = "pkts_out"
  227. value_threshold = 256
  228. }
  229. }
  230.  
  231. /* Different than 2.5.x default since the old config made no sense */
  232. collection_group {
  233. collect_every = 1800
  234. time_threshold = 3600
  235. metric {
  236. name = "disk_total"
  237. value_threshold = 1.0
  238. }
  239. }
  240.  
  241. collection_group {
  242. collect_every = 40
  243. time_threshold = 180
  244. metric {
  245. name = "disk_free"
  246. value_threshold = 1.0
  247. }
  248. metric {
  249. name = "part_max_used"
  250. value_threshold = 1.0
  251. }
  252. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement