Advertisement
Guest User

Untitled

a guest
Sep 24th, 2017
65
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 8.17 KB | None | 0 0
  1. /* This configuration is as close to 2.5.x default behavior as possible
  2. The values closely match ./gmond/metric.h definitions in 2.5.x */
  3. globals {
  4. daemonize = yes
  5. setuid = yes
  6. user = nobody
  7. debug_level = 0
  8. max_udp_msg_len = 1472
  9. mute = no
  10. deaf = no
  11. allow_extra_data = yes
  12. host_dmax = 0 /*secs */
  13. host_tmax = 20 /*secs */
  14. cleanup_threshold = 300 /*secs */
  15. gexec = no
  16. send_metadata_interval = 30 /*secs */
  17. }
  18.  
  19. /*
  20. * The cluster attributes specified will be used as part of the <CLUSTER>
  21. * tag that will wrap all hosts collected by this instance.
  22. */
  23. cluster {
  24. name = "unspecified"
  25. owner = "unspecified"
  26. latlong = "unspecified"
  27. url = "unspecified"
  28. }
  29.  
  30. /* The host section describes attributes of the host, like the location */
  31. host {
  32. location = "unspecified"
  33. }
  34.  
  35. /* Feel free to specify as many udp_send_channels as you like. Gmond
  36. used to only support having a single channel */
  37. udp_send_channel {
  38. #bind_hostname = yes # Highly recommended, soon to be default.
  39. # This option tells gmond to use a source address
  40. # that resolves to the machine's hostname. Without
  41. # this, the metrics may appear to come from any
  42. # interface and the DNS names associated with
  43. # those IPs will be used to create the RRDs.
  44. #mcast_join = 239.2.11.71
  45. host = 161.109.36.254
  46. port = 8649
  47. ttl = 1
  48. }
  49.  
  50. /* You can specify as many udp_recv_channels as you like as well. */
  51. /*udp_recv_channel {
  52. mcast_join = 239.2.11.71
  53. port = 8649
  54. bind = 239.2.11.71
  55. }*/
  56.  
  57. /* You can specify as many tcp_accept_channels as you like to share
  58. an xml description of the state of the cluster */
  59. /*tcp_accept_channel {
  60. port = 8649
  61. }*/
  62.  
  63. /* Channel to receive sFlow datagrams */
  64. #udp_recv_channel {
  65. # port = 6343
  66. #}
  67.  
  68. /* Optional sFlow settings */
  69. #sflow {
  70. # udp_port = 6343
  71. # accept_vm_metrics = no
  72. #}
  73.  
  74. /* Each metrics module that is referenced by gmond must be specified and
  75. loaded. If the module has been statically linked with gmond, it does
  76. not require a load path. However all dynamically loadable modules must
  77. include a load path. */
  78. modules {
  79. module {
  80. name = "core_metrics"
  81. }
  82. module {
  83. name = "cpu_module"
  84. path = "modcpu.so"
  85. }
  86. module {
  87. name = "disk_module"
  88. path = "moddisk.so"
  89. }
  90. module {
  91. name = "load_module"
  92. path = "modload.so"
  93. }
  94. module {
  95. name = "mem_module"
  96. path = "modmem.so"
  97. }
  98. module {
  99. name = "net_module"
  100. path = "modnet.so"
  101. }
  102. module {
  103. name = "proc_module"
  104. path = "modproc.so"
  105. }
  106. module {
  107. name = "sys_module"
  108. path = "modsys.so"
  109. }
  110. }
  111.  
  112. /* The old internal 2.5.x metric array has been replaced by the following
  113. collection_group directives. What follows is the default behavior for
  114. collecting and sending metrics that is as close to 2.5.x behavior as
  115. possible. */
  116.  
  117. /* This collection group will cause a heartbeat (or beacon) to be sent every
  118. 20 seconds. In the heartbeat is the GMOND_STARTED data which expresses
  119. the age of the running gmond. */
  120. collection_group {
  121. collect_once = yes
  122. time_threshold = 20
  123. metric {
  124. name = "heartbeat"
  125. }
  126. }
  127.  
  128. /* This collection group will send general info about this host every
  129. 1200 secs.
  130. This information doesn't change between reboots and is only collected
  131. once. */
  132. collection_group {
  133. collect_once = yes
  134. time_threshold = 1200
  135. metric {
  136. name = "cpu_num"
  137. title = "CPU Count"
  138. }
  139. metric {
  140. name = "cpu_speed"
  141. title = "CPU Speed"
  142. }
  143. metric {
  144. name = "mem_total"
  145. title = "Memory Total"
  146. }
  147. /* Should this be here? Swap can be added/removed between reboots. */
  148. metric {
  149. name = "swap_total"
  150. title = "Swap Space Total"
  151. }
  152. metric {
  153. name = "boottime"
  154. title = "Last Boot Time"
  155. }
  156. metric {
  157. name = "machine_type"
  158. title = "Machine Type"
  159. }
  160. metric {
  161. name = "os_name"
  162. title = "Operating System"
  163. }
  164. metric {
  165. name = "os_release"
  166. title = "Operating System Release"
  167. }
  168. metric {
  169. name = "location"
  170. title = "Location"
  171. }
  172. }
  173.  
  174. /* This collection group will send the status of gexecd for this host
  175. every 300 secs.*/
  176. /* Unlike 2.5.x the default behavior is to report gexecd OFF. */
  177. collection_group {
  178. collect_once = yes
  179. time_threshold = 300
  180. metric {
  181. name = "gexec"
  182. title = "Gexec Status"
  183. }
  184. }
  185.  
  186. /* This collection group will collect the CPU status info every 20 secs.
  187. The time threshold is set to 90 seconds. In honesty, this
  188. time_threshold could be set significantly higher to reduce
  189. unneccessary network chatter. */
  190. collection_group {
  191. collect_every = 20
  192. time_threshold = 90
  193. /* CPU status */
  194. metric {
  195. name = "cpu_user"
  196. value_threshold = "1.0"
  197. title = "CPU User"
  198. }
  199. metric {
  200. name = "cpu_system"
  201. value_threshold = "1.0"
  202. title = "CPU System"
  203. }
  204. metric {
  205. name = "cpu_idle"
  206. value_threshold = "5.0"
  207. title = "CPU Idle"
  208. }
  209. metric {
  210. name = "cpu_nice"
  211. value_threshold = "1.0"
  212. title = "CPU Nice"
  213. }
  214. metric {
  215. name = "cpu_aidle"
  216. value_threshold = "5.0"
  217. title = "CPU aidle"
  218. }
  219. metric {
  220. name = "cpu_wio"
  221. value_threshold = "1.0"
  222. title = "CPU wio"
  223. }
  224. /* The next two metrics are optional if you want more detail...
  225. ... since they are accounted for in cpu_system.
  226. metric {
  227. name = "cpu_intr"
  228. value_threshold = "1.0"
  229. title = "CPU intr"
  230. }
  231. metric {
  232. name = "cpu_sintr"
  233. value_threshold = "1.0"
  234. title = "CPU sintr"
  235. }
  236. */
  237. }
  238.  
  239. collection_group {
  240. collect_every = 20
  241. time_threshold = 90
  242. /* Load Averages */
  243. metric {
  244. name = "load_one"
  245. value_threshold = "1.0"
  246. title = "One Minute Load Average"
  247. }
  248. metric {
  249. name = "load_five"
  250. value_threshold = "1.0"
  251. title = "Five Minute Load Average"
  252. }
  253. metric {
  254. name = "load_fifteen"
  255. value_threshold = "1.0"
  256. title = "Fifteen Minute Load Average"
  257. }
  258. }
  259.  
  260. /* This group collects the number of running and total processes */
  261. collection_group {
  262. collect_every = 80
  263. time_threshold = 950
  264. metric {
  265. name = "proc_run"
  266. value_threshold = "1.0"
  267. title = "Total Running Processes"
  268. }
  269. metric {
  270. name = "proc_total"
  271. value_threshold = "1.0"
  272. title = "Total Processes"
  273. }
  274. }
  275.  
  276. /* This collection group grabs the volatile memory metrics every 40 secs and
  277. sends them at least every 180 secs. This time_threshold can be increased
  278. significantly to reduce unneeded network traffic. */
  279. collection_group {
  280. collect_every = 40
  281. time_threshold = 180
  282. metric {
  283. name = "mem_free"
  284. value_threshold = "1024.0"
  285. title = "Free Memory"
  286. }
  287. metric {
  288. name = "mem_shared"
  289. value_threshold = "1024.0"
  290. title = "Shared Memory"
  291. }
  292. metric {
  293. name = "mem_buffers"
  294. value_threshold = "1024.0"
  295. title = "Memory Buffers"
  296. }
  297. metric {
  298. name = "mem_cached"
  299. value_threshold = "1024.0"
  300. title = "Cached Memory"
  301. }
  302. metric {
  303. name = "swap_free"
  304. value_threshold = "1024.0"
  305. title = "Free Swap Space"
  306. }
  307. }
  308.  
  309. collection_group {
  310. collect_every = 40
  311. time_threshold = 300
  312. metric {
  313. name = "bytes_out"
  314. value_threshold = 4096
  315. title = "Bytes Sent"
  316. }
  317. metric {
  318. name = "bytes_in"
  319. value_threshold = 4096
  320. title = "Bytes Received"
  321. }
  322. metric {
  323. name = "pkts_in"
  324. value_threshold = 256
  325. title = "Packets Received"
  326. }
  327. metric {
  328. name = "pkts_out"
  329. value_threshold = 256
  330. title = "Packets Sent"
  331. }
  332. }
  333.  
  334. /* Different than 2.5.x default since the old config made no sense */
  335. collection_group {
  336. collect_every = 1800
  337. time_threshold = 3600
  338. metric {
  339. name = "disk_total"
  340. value_threshold = 1.0
  341. title = "Total Disk Space"
  342. }
  343. }
  344.  
  345. collection_group {
  346. collect_every = 40
  347. time_threshold = 180
  348. metric {
  349. name = "disk_free"
  350. value_threshold = 1.0
  351. title = "Disk Space Available"
  352. }
  353. metric {
  354. name = "part_max_used"
  355. value_threshold = 1.0
  356. title = "Maximum Disk Space Used"
  357. }
  358. }
  359.  
  360. include ("/etc/ganglia/conf.d/*.conf")
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement