Advertisement
Guest User

Untitled

a guest
Aug 17th, 2017
54
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 7.96 KB | None | 0 0
  1. /* This configuration is as close to 2.5.x default behavior as possible
  2. The values closely match ./gmond/metric.h definitions in 2.5.x */
  3. globals {
  4. daemonize = yes
  5. setuid = yes
  6. user = nobody
  7. debug_level = 0
  8. max_udp_msg_len = 1472
  9. mute = no
  10. deaf = no
  11. allow_extra_data = yes
  12. host_dmax = 0 /*secs */
  13. cleanup_threshold = 300 /*secs */
  14. gexec = no
  15. send_metadata_interval = 15 /*secs */
  16. }
  17.  
  18. /*
  19. * The cluster attributes specified will be used as part of the <CLUSTER>
  20. * tag that will wrap all hosts collected by this instance.
  21. */
  22. cluster {
  23. name = "Surge Nodes"
  24. owner = "unspecified"
  25. latlong = "unspecified"
  26. url = "unspecified"
  27. }
  28.  
  29. /* The host section describes attributes of the host, like the location */
  30. host {
  31. location = "unspecified"
  32. }
  33.  
  34. /* Feel free to specify as many udp_send_channels as you like. Gmond
  35. used to only support having a single channel */
  36. udp_send_channel {
  37. #bind_hostname = yes # Highly recommended, soon to be default.
  38. # This option tells gmond to use a source address
  39. # that resolves to the machine's hostname. Without
  40. # this, the metrics may appear to come from any
  41. # interface and the DNS names associated with
  42. # those IPs will be used to create the RRDs.
  43. host = 10.5.17.101
  44. port = 8649
  45. # ttl = 1
  46. }
  47.  
  48. /* You can specify as many udp_recv_channels as you like as well. */
  49. udp_recv_channel {
  50. # mcast_join = 239.2.11.71
  51. port = 8649
  52. family = inet4
  53. # bind = 239.2.11.71
  54. }
  55.  
  56. /* You can specify as many tcp_accept_channels as you like to share
  57. an xml description of the state of the cluster */
  58. tcp_accept_channel {
  59. port = 8649
  60. }
  61.  
  62. /* Each metrics module that is referenced by gmond must be specified and
  63. loaded. If the module has been statically linked with gmond, it does
  64. not require a load path. However all dynamically loadable modules must
  65. include a load path. */
  66. modules {
  67. module {
  68. name = "core_metrics"
  69. }
  70. module {
  71. name = "cpu_module"
  72. path = "modcpu.so"
  73. }
  74. module {
  75. name = "disk_module"
  76. path = "moddisk.so"
  77. }
  78. module {
  79. name = "load_module"
  80. path = "modload.so"
  81. }
  82. module {
  83. name = "mem_module"
  84. path = "modmem.so"
  85. }
  86. module {
  87. name = "net_module"
  88. path = "modnet.so"
  89. }
  90. module {
  91. name = "proc_module"
  92. path = "modproc.so"
  93. }
  94. module {
  95. name = "sys_module"
  96. path = "modsys.so"
  97. }
  98. }
  99.  
  100. include ('/etc/ganglia/conf.d/*.conf')
  101.  
  102. /* The old internal 2.5.x metric array has been replaced by the following
  103. collection_group directives. What follows is the default behavior for
  104. collecting and sending metrics that is as close to 2.5.x behavior as
  105. possible. */
  106.  
  107. /* This collection group will cause a heartbeat (or beacon) to be sent every
  108. 20 seconds. In the heartbeat is the GMOND_STARTED data which expresses
  109. the age of the running gmond. */
  110. collection_group {
  111. collect_once = yes
  112. time_threshold = 20
  113. metric {
  114. name = "heartbeat"
  115. }
  116. }
  117.  
  118. /* This collection group will send general info about this host every
  119. 1200 secs.
  120. This information doesn't change between reboots and is only collected
  121. once. */
  122. collection_group {
  123. collect_once = yes
  124. time_threshold = 1200
  125. metric {
  126. name = "cpu_num"
  127. title = "CPU Count"
  128. }
  129. metric {
  130. name = "cpu_speed"
  131. title = "CPU Speed"
  132. }
  133. metric {
  134. name = "mem_total"
  135. title = "Memory Total"
  136. }
  137. /* Should this be here? Swap can be added/removed between reboots. */
  138. metric {
  139. name = "swap_total"
  140. title = "Swap Space Total"
  141. }
  142. metric {
  143. name = "boottime"
  144. title = "Last Boot Time"
  145. }
  146. metric {
  147. name = "machine_type"
  148. title = "Machine Type"
  149. }
  150. metric {
  151. name = "os_name"
  152. title = "Operating System"
  153. }
  154. metric {
  155. name = "os_release"
  156. title = "Operating System Release"
  157. }
  158. metric {
  159. name = "location"
  160. title = "Location"
  161. }
  162. }
  163.  
  164. /* This collection group will send the status of gexecd for this host
  165. every 300 secs.*/
  166. /* Unlike 2.5.x the default behavior is to report gexecd OFF. */
  167. collection_group {
  168. collect_once = yes
  169. time_threshold = 300
  170. metric {
  171. name = "gexec"
  172. title = "Gexec Status"
  173. }
  174. }
  175.  
  176. /* This collection group will collect the CPU status info every 20 secs.
  177. The time threshold is set to 90 seconds. In honesty, this
  178. time_threshold could be set significantly higher to reduce
  179. unneccessary network chatter. */
  180. collection_group {
  181. collect_every = 20
  182. time_threshold = 90
  183. /* CPU status */
  184. metric {
  185. name = "cpu_user"
  186. value_threshold = "1.0"
  187. title = "CPU User"
  188. }
  189. metric {
  190. name = "cpu_system"
  191. value_threshold = "1.0"
  192. title = "CPU System"
  193. }
  194. metric {
  195. name = "cpu_idle"
  196. value_threshold = "5.0"
  197. title = "CPU Idle"
  198. }
  199. metric {
  200. name = "cpu_nice"
  201. value_threshold = "1.0"
  202. title = "CPU Nice"
  203. }
  204. metric {
  205. name = "cpu_aidle"
  206. value_threshold = "5.0"
  207. title = "CPU aidle"
  208. }
  209. metric {
  210. name = "cpu_wio"
  211. value_threshold = "1.0"
  212. title = "CPU wio"
  213. }
  214. /* The next two metrics are optional if you want more detail...
  215. ... since they are accounted for in cpu_system.
  216. metric {
  217. name = "cpu_intr"
  218. value_threshold = "1.0"
  219. title = "CPU intr"
  220. }
  221. metric {
  222. name = "cpu_sintr"
  223. value_threshold = "1.0"
  224. title = "CPU sintr"
  225. }
  226. */
  227. }
  228.  
  229. collection_group {
  230. collect_every = 20
  231. time_threshold = 90
  232. /* Load Averages */
  233. metric {
  234. name = "load_one"
  235. value_threshold = "1.0"
  236. title = "One Minute Load Average"
  237. }
  238. metric {
  239. name = "load_five"
  240. value_threshold = "1.0"
  241. title = "Five Minute Load Average"
  242. }
  243. metric {
  244. name = "load_fifteen"
  245. value_threshold = "1.0"
  246. title = "Fifteen Minute Load Average"
  247. }
  248. }
  249.  
  250. /* This group collects the number of running and total processes */
  251. collection_group {
  252. collect_every = 80
  253. time_threshold = 950
  254. metric {
  255. name = "proc_run"
  256. value_threshold = "1.0"
  257. title = "Total Running Processes"
  258. }
  259. metric {
  260. name = "proc_total"
  261. value_threshold = "1.0"
  262. title = "Total Processes"
  263. }
  264. }
  265.  
  266. /* This collection group grabs the volatile memory metrics every 40 secs and
  267. sends them at least every 180 secs. This time_threshold can be increased
  268. significantly to reduce unneeded network traffic. */
  269. collection_group {
  270. collect_every = 40
  271. time_threshold = 180
  272. metric {
  273. name = "mem_free"
  274. value_threshold = "1024.0"
  275. title = "Free Memory"
  276. }
  277. metric {
  278. name = "mem_shared"
  279. value_threshold = "1024.0"
  280. title = "Shared Memory"
  281. }
  282. metric {
  283. name = "mem_buffers"
  284. value_threshold = "1024.0"
  285. title = "Memory Buffers"
  286. }
  287. metric {
  288. name = "mem_cached"
  289. value_threshold = "1024.0"
  290. title = "Cached Memory"
  291. }
  292. metric {
  293. name = "swap_free"
  294. value_threshold = "1024.0"
  295. title = "Free Swap Space"
  296. }
  297. }
  298.  
  299. collection_group {
  300. collect_every = 40
  301. time_threshold = 300
  302. metric {
  303. name = "bytes_out"
  304. value_threshold = 4096
  305. title = "Bytes Sent"
  306. }
  307. metric {
  308. name = "bytes_in"
  309. value_threshold = 4096
  310. title = "Bytes Received"
  311. }
  312. metric {
  313. name = "pkts_in"
  314. value_threshold = 256
  315. title = "Packets Received"
  316. }
  317. metric {
  318. name = "pkts_out"
  319. value_threshold = 256
  320. title = "Packets Sent"
  321. }
  322. }
  323.  
  324. /* Different than 2.5.x default since the old config made no sense */
  325. collection_group {
  326. collect_every = 1800
  327. time_threshold = 3600
  328. metric {
  329. name = "disk_total"
  330. value_threshold = 1.0
  331. title = "Total Disk Space"
  332. }
  333. }
  334.  
  335. collection_group {
  336. collect_every = 40
  337. time_threshold = 180
  338. metric {
  339. name = "disk_free"
  340. value_threshold = 1.0
  341. title = "Disk Space Available"
  342. }
  343. metric {
  344. name = "part_max_used"
  345. value_threshold = 1.0
  346. title = "Maximum Disk Space Used"
  347. }
  348. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement