Advertisement
Guest User

Untitled

a guest
Mar 18th, 2017
178
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 10.17 KB | None | 0 0
  1. [Main]
  2.  
  3. # The host of the Datadog intake server to send Agent data to
  4. dd_url: https://app.datadoghq.com
  5.  
  6. # If you need a proxy to connect to the Internet, provide the settings here (default: disabled)
  7. # proxy_host: my-proxy.com
  8. # proxy_port: 3128
  9. # proxy_user: user
  10. # proxy_password: password
  11. # To be used with some proxys that return a 302 which make curl switch from POST to GET
  12. # See http://stackoverflow.com/questions/8156073/curl-violate-rfc-2616-10-3-2-and-switch-from-post-to-get
  13. # proxy_forbid_method_switch: no
  14.  
  15. # If you run the agent behind haproxy, you might want to enable this
  16. # skip_ssl_validation: no
  17.  
  18. # The Datadog api key to associate your Agent's data with your organization.
  19. # Can be found here:
  20. # https://app.datadoghq.com/account/settings
  21. # This can be a comma-separated list of api keys.
  22. # (default: None, the agent doesn't start without it)
  23. api_key:
  24.  
  25. # Force the hostname to whatever you want. (default: auto-detected)
  26. # hostname: mymachine.mydomain
  27.  
  28. # Set the host's tags (optional)
  29. # tags: mytag, env:prod, role:database
  30.  
  31. # Set timeout in seconds for outgoing requests to Datadog. (default: 20)
  32. # When a request timeout, it will be retried after some time.
  33. # It will only be deleted if the forwarder queue becomes too big. (30 MB by default)
  34. # forwarder_timeout: 20
  35.  
  36. # Set timeout in seconds for integrations that use HTTP to fetch metrics, since
  37. # unbounded timeouts can potentially block the collector indefinitely and cause
  38. # problems!
  39. # default_integration_http_timeout: 9
  40.  
  41. # Add one "dd_check:checkname" tag per running check. It makes it possible to slice
  42. # and dice per monitored app (= running Agent Check) on Datadog's backend.
  43. # create_dd_check_tags: no
  44.  
  45. # Collect AWS EC2 custom tags as agent tags (requires an IAM role associated with the instance)
  46. # collect_ec2_tags: no
  47. # Incorporate security-groups into tags collected from AWS EC2
  48. # collect_security_groups: no
  49.  
  50. # Enable Agent Developer Mode
  51. # Agent Developer Mode collects and sends more fine-grained metrics about agent and check performance
  52. # developer_mode: no
  53. # In developer mode, the number of runs to be included in a single collector profile
  54. # collector_profile_interval: 20
  55.  
  56. # use unique hostname for GCE hosts, see http://dtdg.co/1eAynZk
  57. # when not specified, default: no
  58. gce_updated_hostname: yes
  59.  
  60. # Set the threshold for accepting points to allow anything
  61. # within recent_point_threshold seconds (default: 30)
  62. # recent_point_threshold: 30
  63.  
  64. # Use mount points instead of volumes to track disk and fs metrics
  65. # DEPRECATED: use conf.d/disk.yaml instead to configure it
  66. # use_mount: no
  67.  
  68. # Forwarder listening port
  69. # listen_port: 17123
  70.  
  71. # Graphite listener port
  72. # graphite_listen_port: 17124
  73.  
  74. # Additional directory to look for Datadog checks (optional)
  75. # additional_checksd: /etc/dd-agent/checks.d/
  76.  
  77. # Allow non-local traffic to this Agent
  78. # This is required when using this Agent as a proxy for other Agents
  79. # that might not have an internet connection
  80. # For more information, please see
  81. # https://github.com/DataDog/dd-agent/wiki/Network-Traffic-and-Proxy-Configuration
  82. # non_local_traffic: no
  83.  
  84. # Select the Tornado HTTP Client to be used in the Forwarder,
  85. # between curl client and simple http client (default: simple http client)
  86. # use_curl_http_client: no
  87.  
  88. # The loopback address the Forwarder and Dogstatsd will bind.
  89. # Optional, it is mainly used when running the agent on Openshift
  90. # bind_host: localhost
  91.  
  92. # If enabled the collector will capture a metric for check run times.
  93. # check_timings: no
  94.  
  95. # If you want to remove the 'ww' flag from ps catching the arguments of processes
  96. # for instance for security reasons
  97. # exclude_process_args: no
  98.  
  99. # histogram_aggregates: max, median, avg, count
  100. # histogram_percentiles: 0.95
  101.  
  102. # ========================================================================== #
  103. # Service Discovery #
  104. # See https://github.com/DataDog/dd-agent/wiki/Service-Discovery for details #
  105. # ========================================================================== #
  106. #
  107. # Service discovery allows the agent to look for running services
  108. # and load a configuration object for the one it recognizes.
  109. # This feature is disabled by default.
  110. # Uncomment this line to enable it (works for docker containers only for now).
  111. # service_discovery_backend: docker
  112. #
  113. # Define which key/value store must be used to look for configuration templates.
  114. # Default is etcd. Consul is also supported.
  115. # sd_config_backend: etcd
  116. #
  117. # Settings for connecting to the service discovery backend.
  118. # sd_backend_host: 127.0.0.1
  119. # sd_backend_port: 4001
  120. #
  121. # By default, the agent will look for the configuration templates under the
  122. # `/datadog/check_configs` key in the back-end. If you wish otherwise, uncomment this option
  123. # and modify its value.
  124. # sd_template_dir: /datadog/check_configs
  125. #
  126. # ========================================================================== #
  127. # Other #
  128. # ========================================================================== #
  129. #
  130. # In some environments we may have the procfs file system mounted in a
  131. # miscellaneous location. The procfs_path configuration paramenter allows
  132. # us to override the standard default location '/proc'
  133. # procfs_path: /proc
  134.  
  135. # ========================================================================== #
  136. # DogStatsd configuration #
  137. # DogStatsd is a small server that aggregates your custom app metrics. For #
  138. # usage information, check out http://docs.datadoghq.com/guides/dogstatsd/ #
  139. # ========================================================================== #
  140.  
  141. # If you don't want to enable the DogStatsd server, set this option to no
  142. # use_dogstatsd: yes
  143.  
  144. # Make sure your client is sending to the same port.
  145. # dogstatsd_port: 8125
  146.  
  147. # By default dogstatsd will post aggregate metrics to the Agent (which handles
  148. # errors/timeouts/retries/etc). To send directly to the datadog api, set this
  149. # to https://app.datadoghq.com.
  150. # dogstatsd_target: http://localhost:17123
  151.  
  152. # If you want to forward every packet received by the dogstatsd server
  153. # to another statsd server, uncomment these lines.
  154. # WARNING: Make sure that forwarded packets are regular statsd packets and not "dogstatsd" packets,
  155. # as your other statsd server might not be able to handle them.
  156. # statsd_forward_host: address_of_own_statsd_server
  157. # statsd_forward_port: 8125
  158.  
  159. # you may want all statsd metrics coming from this host to be namespaced
  160. # in some way; if so, configure your namespace here. a metric that looks
  161. # like `metric.name` will instead become `namespace.metric.name`
  162. # statsd_metric_namespace:
  163.  
  164. # By default, dogstatsd supports only plain ASCII packets. However, most
  165. # (dog)statsd client support UTF8 by encoding packets before sending them
  166. # this option enables UTF8 decoding in case you need it.
  167. # However, it comes with a performance overhead of ~10% in the dogstatsd
  168. # server. This will be taken care of properly in the new gen agent core.
  169. # utf8_decoding: false
  170.  
  171. # ========================================================================== #
  172. # Service-specific configuration #
  173. # ========================================================================== #
  174.  
  175. # -------------------------------------------------------------------------- #
  176. # Ganglia #
  177. # -------------------------------------------------------------------------- #
  178.  
  179. # Ganglia host where gmetad is running
  180. # ganglia_host: localhost
  181.  
  182. # Ganglia port where gmetad is running
  183. # ganglia_port: 8651
  184.  
  185. # -------------------------------------------------------------------------- #
  186. # Dogstream (log file parser) #
  187. # -------------------------------------------------------------------------- #
  188.  
  189. # Comma-separated list of logs to parse and optionally custom parsers to use.
  190. # The form should look like this:
  191. #
  192. # dogstreams: /path/to/log1:parsers_module:custom_parser, /path/to/log2, /path/to/log3, ...
  193. #
  194. # Or this:
  195. #
  196. # dogstreams: /path/to/log1:/path/to/my/parsers_module.py:custom_parser, /path/to/log2, /path/to/log3, ...
  197. #
  198. # Each entry is a path to a log file and optionally a Python module/function pair
  199. # separated by colons.
  200. #
  201. # Custom parsers should take a 2 parameters, a logger object and
  202. # a string parameter of the current line to parse. It should return a tuple of
  203. # the form:
  204. # (metric (str), timestamp (unix timestamp), value (float), attributes (dict))
  205. # where attributes should at least contain the key 'metric_type', specifying
  206. # whether the given metric is a 'counter' or 'gauge'.
  207. #
  208. # Unless parsers are specified with an absolute path, the modules must exist in
  209. # the Agent's PYTHONPATH. You can set this as an environment variable when
  210. # starting the Agent. If the name of the custom parser function is not passed,
  211. # 'parser' is assumed.
  212. #
  213. # If this value isn't specified, the default parser assumes this log format:
  214. # metric timestamp value key0=val0 key1=val1 ...
  215. #
  216.  
  217. # ========================================================================== #
  218. # Custom Emitters #
  219. # ========================================================================== #
  220.  
  221. # Comma-separated list of emitters to be used in addition to the standard one
  222. #
  223. # Expected to be passed as a comma-separated list of colon-delimited
  224. # name/object pairs.
  225. #
  226. # custom_emitters: /usr/local/my-code/emitters/rabbitmq.py:RabbitMQEmitter
  227. #
  228. # If the name of the emitter function is not specified, 'emitter' is assumed.
  229.  
  230.  
  231. # ========================================================================== #
  232. # Logging
  233. # ========================================================================== #
  234.  
  235. # log_level: INFO
  236.  
  237. # collector_log_file: /var/log/datadog/collector.log
  238. # forwarder_log_file: /var/log/datadog/forwarder.log
  239. # dogstatsd_log_file: /var/log/datadog/dogstatsd.log
  240.  
  241. # if syslog is enabled but a host and port are not set, a local domain socket
  242. # connection will be attempted
  243. #
  244. # log_to_syslog: yes
  245. # syslog_host:
  246. # syslog_port:
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement