Guest User

Untitled

a guest
Feb 10th, 2014
146
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 9.09 KB | None | 0 0
  1. /* This configuration is as close to 2.5.x default behavior as possible
  2. The values closely match ./gmond/metric.h definitions in 2.5.x */
  3. globals {
  4. daemonize = yes
  5. setuid = yes
  6. user = ganglia
  7. debug_level = 0
  8. max_udp_msg_len = 1472
  9. mute = no
  10. deaf = no
  11. allow_extra_data = yes
  12. host_dmax = 86400 /*secs. Expires (removes from web interface) hosts in 1 day */
  13. host_tmax = 20 /*secs */
  14. cleanup_threshold = 300 /*secs */
  15. gexec = no
  16. # By default gmond will use reverse DNS resolution when displaying your hostname
  17. # Uncommeting following value will override that value.
  18. # override_hostname = "mywebserver.domain.com"
  19. # If you are not using multicast this value should be set to something other than 0.
  20. # Otherwise if you restart aggregator gmond you will get empty graphs. 60 seconds is reasonable
  21. send_metadata_interval = 0 /*secs */
  22.  
  23. }
  24.  
  25. /*
  26. * The cluster attributes specified will be used as part of the <CLUSTER>
  27. * tag that will wrap all hosts collected by this instance.
  28. */
  29. cluster {
  30. name = "Ganglia Test Setup"
  31. owner = "Ops"
  32. latlong = "unspecified"
  33. url = "unspecified"
  34. }
  35.  
  36. /* The host section describes attributes of the host, like the location */
  37. host {
  38. location = "unspecified"
  39. }
  40.  
  41. /* Feel free to specify as many udp_send_channels as you like. Gmond
  42. used to only support having a single channel */
  43. udp_send_channel {
  44. #bind_hostname = yes # Highly recommended, soon to be default.
  45. # This option tells gmond to use a source address
  46. # that resolves to the machine's hostname. Without
  47. # this, the metrics may appear to come from any
  48. # interface and the DNS names associated with
  49. # those IPs will be used to create the RRDs.
  50. #host = test4
  51. mcast_join = 239.2.11.71
  52. port = 8649
  53. ttl = 1
  54. }
  55.  
  56. /* You can specify as many udp_recv_channels as you like as well. */
  57. udp_recv_channel {
  58. mcast_join = 239.2.11.71
  59. port = 8649
  60. bind = 239.2.11.71
  61. retry_bind = true
  62. # Size of the UDP buffer. If you are handling lots of metrics you really
  63. # should bump it up to e.g. 10MB or even higher.
  64. # buffer = 10485760
  65. }
  66.  
  67. /* You can specify as many tcp_accept_channels as you like to share
  68. an xml description of the state of the cluster */
  69. tcp_accept_channel {
  70. port = 8649
  71. # If you want to gzip XML output
  72. #gzip_output = no
  73. }
  74.  
  75. /* Channel to receive sFlow datagrams */
  76. #udp_recv_channel {
  77. # port = 6343
  78. #}
  79.  
  80. /* Optional sFlow settings */
  81. #sflow {
  82. # udp_port = 6343
  83. # accept_vm_metrics = yes
  84. # accept_jvm_metrics = yes
  85. # multiple_jvm_instances = no
  86. # accept_http_metrics = yes
  87. # multiple_http_instances = no
  88. # accept_memcache_metrics = yes
  89. # multiple_memcache_instances = no
  90. #}
  91.  
  92. /* Each metrics module that is referenced by gmond must be specified and
  93. loaded. If the module has been statically linked with gmond, it does
  94. not require a load path. However all dynamically loadable modules must
  95. include a load path. */
  96. modules {
  97. module {
  98. name = "core_metrics"
  99. }
  100. module {
  101. name = "cpu_module"
  102. path = "modcpu.so"
  103. }
  104. module {
  105. name = "disk_module"
  106. path = "moddisk.so"
  107. }
  108. module {
  109. name = "load_module"
  110. path = "modload.so"
  111. }
  112. module {
  113. name = "mem_module"
  114. path = "modmem.so"
  115. }
  116. module {
  117. name = "net_module"
  118. path = "modnet.so"
  119. }
  120. module {
  121. name = "proc_module"
  122. path = "modproc.so"
  123. }
  124. module {
  125. name = "sys_module"
  126. path = "modsys.so"
  127. }
  128. }
  129.  
  130. /* The old internal 2.5.x metric array has been replaced by the following
  131. collection_group directives. What follows is the default behavior for
  132. collecting and sending metrics that is as close to 2.5.x behavior as
  133. possible. */
  134.  
  135. /* This collection group will cause a heartbeat (or beacon) to be sent every
  136. 20 seconds. In the heartbeat is the GMOND_STARTED data which expresses
  137. the age of the running gmond. */
  138. collection_group {
  139. collect_once = yes
  140. time_threshold = 20
  141. metric {
  142. name = "heartbeat"
  143. }
  144. }
  145.  
  146. /* This collection group will send general info about this host every
  147. 1200 secs.
  148. This information doesn't change between reboots and is only collected
  149. once. */
  150. collection_group {
  151. collect_once = yes
  152. time_threshold = 1200
  153. metric {
  154. name = "cpu_num"
  155. title = "CPU Count"
  156. }
  157. metric {
  158. name = "cpu_speed"
  159. title = "CPU Speed"
  160. }
  161. metric {
  162. name = "mem_total"
  163. title = "Memory Total"
  164. }
  165. /* Should this be here? Swap can be added/removed between reboots. */
  166. metric {
  167. name = "swap_total"
  168. title = "Swap Space Total"
  169. }
  170. metric {
  171. name = "boottime"
  172. title = "Last Boot Time"
  173. }
  174. metric {
  175. name = "machine_type"
  176. title = "Machine Type"
  177. }
  178. metric {
  179. name = "os_name"
  180. title = "Operating System"
  181. }
  182. metric {
  183. name = "os_release"
  184. title = "Operating System Release"
  185. }
  186. metric {
  187. name = "location"
  188. title = "Location"
  189. }
  190. }
  191.  
  192. /* This collection group will send the status of gexecd for this host
  193. every 300 secs.*/
  194. /* Unlike 2.5.x the default behavior is to report gexecd OFF. */
  195. collection_group {
  196. collect_once = yes
  197. time_threshold = 300
  198. metric {
  199. name = "gexec"
  200. title = "Gexec Status"
  201. }
  202. }
  203.  
  204. /* This collection group will collect the CPU status info every 20 secs.
  205. The time threshold is set to 90 seconds. In honesty, this
  206. time_threshold could be set significantly higher to reduce
  207. unneccessary network chatter. */
  208. collection_group {
  209. collect_every = 20
  210. time_threshold = 90
  211. /* CPU status */
  212. metric {
  213. name = "cpu_user"
  214. value_threshold = "1.0"
  215. title = "CPU User"
  216. }
  217. metric {
  218. name = "cpu_system"
  219. value_threshold = "1.0"
  220. title = "CPU System"
  221. }
  222. metric {
  223. name = "cpu_idle"
  224. value_threshold = "5.0"
  225. title = "CPU Idle"
  226. }
  227. metric {
  228. name = "cpu_nice"
  229. value_threshold = "1.0"
  230. title = "CPU Nice"
  231. }
  232. metric {
  233. name = "cpu_aidle"
  234. value_threshold = "5.0"
  235. title = "CPU aidle"
  236. }
  237. metric {
  238. name = "cpu_wio"
  239. value_threshold = "1.0"
  240. title = "CPU wio"
  241. }
  242. metric {
  243. name = "cpu_steal"
  244. value_threshold = "1.0"
  245. title = "CPU steal"
  246. }
  247. /* The next two metrics are optional if you want more detail...
  248. ... since they are accounted for in cpu_system.
  249. metric {
  250. name = "cpu_intr"
  251. value_threshold = "1.0"
  252. title = "CPU intr"
  253. }
  254. metric {
  255. name = "cpu_sintr"
  256. value_threshold = "1.0"
  257. title = "CPU sintr"
  258. }
  259. */
  260. }
  261.  
  262. collection_group {
  263. collect_every = 20
  264. time_threshold = 90
  265. /* Load Averages */
  266. metric {
  267. name = "load_one"
  268. value_threshold = "1.0"
  269. title = "One Minute Load Average"
  270. }
  271. metric {
  272. name = "load_five"
  273. value_threshold = "1.0"
  274. title = "Five Minute Load Average"
  275. }
  276. metric {
  277. name = "load_fifteen"
  278. value_threshold = "1.0"
  279. title = "Fifteen Minute Load Average"
  280. }
  281. }
  282.  
  283. /* This group collects the number of running and total processes */
  284. collection_group {
  285. collect_every = 80
  286. time_threshold = 950
  287. metric {
  288. name = "proc_run"
  289. value_threshold = "1.0"
  290. title = "Total Running Processes"
  291. }
  292. metric {
  293. name = "proc_total"
  294. value_threshold = "1.0"
  295. title = "Total Processes"
  296. }
  297. }
  298.  
  299. /* This collection group grabs the volatile memory metrics every 40 secs and
  300. sends them at least every 180 secs. This time_threshold can be increased
  301. significantly to reduce unneeded network traffic. */
  302. collection_group {
  303. collect_every = 40
  304. time_threshold = 180
  305. metric {
  306. name = "mem_free"
  307. value_threshold = "1024.0"
  308. title = "Free Memory"
  309. }
  310. metric {
  311. name = "mem_shared"
  312. value_threshold = "1024.0"
  313. title = "Shared Memory"
  314. }
  315. metric {
  316. name = "mem_buffers"
  317. value_threshold = "1024.0"
  318. title = "Memory Buffers"
  319. }
  320. metric {
  321. name = "mem_cached"
  322. value_threshold = "1024.0"
  323. title = "Cached Memory"
  324. }
  325. metric {
  326. name = "swap_free"
  327. value_threshold = "1024.0"
  328. title = "Free Swap Space"
  329. }
  330. }
  331.  
  332. collection_group {
  333. collect_every = 40
  334. time_threshold = 300
  335. metric {
  336. name = "bytes_out"
  337. value_threshold = 4096
  338. title = "Bytes Sent"
  339. }
  340. metric {
  341. name = "bytes_in"
  342. value_threshold = 4096
  343. title = "Bytes Received"
  344. }
  345. metric {
  346. name = "pkts_in"
  347. value_threshold = 256
  348. title = "Packets Received"
  349. }
  350. metric {
  351. name = "pkts_out"
  352. value_threshold = 256
  353. title = "Packets Sent"
  354. }
  355. }
  356.  
  357. /* Different than 2.5.x default since the old config made no sense */
  358. collection_group {
  359. collect_every = 1800
  360. time_threshold = 3600
  361. metric {
  362. name = "disk_total"
  363. value_threshold = 1.0
  364. title = "Total Disk Space"
  365. }
  366. }
  367.  
  368. collection_group {
  369. collect_every = 40
  370. time_threshold = 180
  371. metric {
  372. name = "disk_free"
  373. value_threshold = 1.0
  374. title = "Disk Space Available"
  375. }
  376. metric {
  377. name = "part_max_used"
  378. value_threshold = 1.0
  379. title = "Maximum Disk Space Used"
  380. }
  381. }
  382.  
  383. include ("/usr/local/etc/conf.d/*.conf")
Advertisement
Add Comment
Please, Sign In to add comment