Guest User

Airflow Helm deployment with Persistent Volume - Attempt

a guest
Oct 9th, 2019
608
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
YAML 20.22 KB | None | 0 0
  1. # Duplicate this file and put your customization here
  2.  
  3. ##
  4. ## common settings and setting for the webserver
  5. airflow:
  6.   extraConfigmapMounts: []
  7.   # - name: extra-metadata
  8.   #   mountPath: /opt/metadata
  9.   #   configMap: airflow-metadata
  10.   #   readOnly: true
  11.   #
  12.   # Example of configmap mount with subPath
  13.   # - name: extra-metadata
  14.   #   mountPath: /opt/metadata/file.yaml
  15.   #   configMap: airflow-metadata
  16.   #   readOnly: true
  17.   #   subPath: file.yaml
  18.  
  19.  
  20.   ##
  21.   ## Extra environment variables to mount in the web, scheduler, and worker pods:
  22.   extraEnv:
  23.  #  - name: AIRFLOW__CORE__FERNET_KEY
  24.   #    valueFrom:
  25.   #      secretKeyRef:
  26.   #        name: airflow
  27.   #        key: fernet_key
  28.   #  - name: AIRFLOW__LDAP__BIND_PASSWORD
  29.   #    valueFrom:
  30.   #      secretKeyRef:
  31.   #        name: ldap
  32.   #        key: password
  33.  
  34.  
  35.   ##
  36.   ## You will need to define your fernet key:
  37.   ## Generate fernetKey with:
  38.   ##    python -c "from cryptography.fernet import Fernet; FERNET_KEY = Fernet.generate_key().decode(); print(FERNET_KEY)"
  39.   ## fernetKey: ABCDABCDABCDABCDABCDABCDABCDABCDABCDABCD
  40.   fernetKey: ""
  41.   service:
  42.     annotations: {}
  43.     type: ClusterIP
  44.     externalPort: 8080
  45.     nodePort:
  46.       http:
  47.  ##
  48.   ## The executor to use.
  49.   ##
  50.   executor: Celery
  51.   ##
  52.   ## set the max number of retries during container initialization
  53.   initRetryLoop:
  54.  ##
  55.   ## base image for webserver/scheduler/workers
  56.   ## Note: If you want to use airflow HEAD (2.0dev), use the following image:
  57.   # image
  58.   #   repository: stibbons31/docker-airflow-dev
  59.   #   tag: 2.0dev
  60.   ## Airflow 2.0 allows changing the value ingress.web.path and ingress.flower.path (see bellow).
  61.   ## In version < 2.0, changing these paths won't have any effect.
  62.   image:
  63.    ##
  64.     ## docker-airflow image
  65.     repository: puckel/docker-airflow
  66.     ##
  67.     ## image tag
  68.     tag: 1.10.4
  69.     ##
  70.     ## Image pull policy
  71.     ## values: Always or IfNotPresent
  72.     pullPolicy: IfNotPresent
  73.     ##
  74.     ## image pull secret for private images
  75.     pullSecret:
  76.  ##
  77.   ## Set schedulerNumRuns to control how the scheduler behaves:
  78.   ##   -1 will let him looping indefinitively but it will never update the DAG
  79.   ##   1 will have the scheduler quit after each refresh, but kubernetes will restart it.
  80.   ##
  81.   ## A long running scheduler process, at least with the CeleryExecutor, ends up not scheduling
  82.   ## some tasks. We still don’t know the exact cause, unfortunately. Airflow has a built-in
  83.   ## workaround in the form of the `num_runs` flag.
  84.   ## Airflow runs with num_runs set to 5.
  85.   ##
  86.   ## If set to a value != -1, you will see your scheduler regularly restart. This is its normal
  87.   ## behavior under these conditions.
  88.   schedulerNumRuns: "-1"
  89.   ##
  90.   ## Set schedulerDoPickle to toggle whether to have the scheduler
  91.   ## attempt to pickle the DAG object to send over to the workers,
  92.   ## instead of letting workers run their version of the code.
  93.   ## See the Airflow documentation for the --do_pickle argument: https://airflow.apache.org/cli.html#scheduler
  94.   schedulerDoPickle: true
  95.   ##
  96.   ## Number of replicas for web server.
  97.   webReplicas: 1
  98.   ##
  99.   ## Custom airflow configuration environment variables
  100.   ## Use this to override any airflow setting settings defining environment variables in the
  101.   ## following form: AIRFLOW__<section>__<key>.
  102.   ## See the Airflow documentation: https://airflow.readthedocs.io/en/stable/howto/set-config.html?highlight=setting-configuration
  103.   ## Example:
  104.   ##   config:
  105.   ##     AIRFLOW__CORE__EXPOSE_CONFIG: "True"
  106.   ##     HTTP_PROXY: "http://proxy.mycompany.com:123"
  107.   config: {}
  108.   ##
  109.   ## Configure pod disruption budget for the scheduler
  110.   podDisruptionBudget:
  111.     maxUnavailable: 1
  112.   ## Add custom connections
  113.   ## Use this to add Airflow connections for operators you use
  114.   ## For each connection - the id and type have to be defined.
  115.   ## All the other parameters are optional
  116.   ## Connections will be created with a script that is stored
  117.   ## in a K8s secret and mounted into the scheduler container
  118.   ## Example:
  119.   ##   connections:
  120.   ##   - id: my_aws
  121.   ##     type: aws
  122.   ##     extra: '{"aws_access_key_id": "**********", "aws_secret_access_key": "***", "region_name":"eu-central-1"}'
  123.   connections: []
  124.  
  125.   ## Add airflow variables
  126.   ## This should be a json string with your variables in it
  127.   ## Examples:
  128.   ##   variables: '{ "environment": "dev" }'
  129.   variables: {}
  130.  
  131.   ## Add airflow ppols
  132.   ## This should be a json string with your pools in it
  133.   ## Examples:
  134.   ##   pools: '{ "example": { "description": "This is an example of a pool", "slots": 2 } }'
  135.   pools: {}
  136.  
  137.   ##
  138.   ## Annotations for the Scheduler, Worker and Web pods
  139.   podAnnotations: {}
  140.     ## Example:
  141.     ## iam.amazonaws.com/role: airflow-Role
  142.  
  143.   extraInitContainers: []
  144.   ## Additional init containers to run before the Scheduler pods.
  145.   ## for example, be used to run a sidecar that chown Logs storage .
  146.   # - name: volume-mount-hack
  147.   #   image: busybox
  148.   #   command: ["sh", "-c", "chown -R 1000:1000 logs"]
  149.   #   volumeMounts:
  150.   #     - mountPath: /usr/local/airflow/logs
  151.   #       name: logs-data
  152.  
  153.   extraContainers: []
  154.   ## Additional containers to run alongside the Scheduler, Worker and Web pods
  155.   ## This could, for example, be used to run a sidecar that syncs DAGs from object storage.
  156.   # - name: s3-sync
  157.   #   image: my-user/s3sync:latest
  158.   #   volumeMounts:
  159.   #   - name: synchronised-dags
  160.   #     mountPath: /dags
  161.   extraVolumeMounts: home/*user*/github/airflowDAGs
  162.   ## Additional volumeMounts to the main containers in the Scheduler, Worker and Web pods.
  163.   # - name: synchronised-dags
  164.   #   mountPath: /usr/local/airflow/dags
  165.   extraVolumes: home/*user*/github/airflowDAGs
  166.   ## Additional volumes for the Scheduler, Worker and Web pods.
  167.   # - name: synchronised-dags
  168.   #   emptyDir: {}
  169.  
  170.   ##
  171.   ## Run initdb when the scheduler starts.
  172.   initdb: true
  173.  
  174.  
  175. scheduler:
  176.   resources: {}
  177.     # limits:
  178.     #   cpu: "1000m"
  179.     #   memory: "1Gi"
  180.     # requests:
  181.     #   cpu: "500m"
  182.     #   memory: "512Mi"
  183.  
  184.   ## Support Node, affinity and tolerations for scheduler pod assignment
  185.   ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
  186.   ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
  187.   ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
  188.   nodeSelector: {}
  189.   affinity: {}
  190.   tolerations: []
  191.  
  192. flower:
  193.   resources: {}
  194.     # limits:
  195.     #   cpu: "100m"
  196.     #   memory: "128Mi"
  197.     # requests:
  198.     #   cpu: "100m"
  199.     #   memory: "128Mi"
  200.   service:
  201.     annotations: {}
  202.     type: ClusterIP
  203.     externalPort: 5555
  204.  
  205.   ## Support Node, affinity and tolerations for flower pod assignment
  206.   ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
  207.   ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
  208.   ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
  209.   nodeSelector: {}
  210.   affinity: {}
  211.   tolerations: []
  212.  
  213. web:
  214.   resources: {}
  215.     # limits:
  216.     #   cpu: "300m"
  217.     #   memory: "1Gi"
  218.     # requests:
  219.     #   cpu: "100m"
  220.     #   memory: "512Mi"
  221.   initialStartupDelay: "60"
  222.   initialDelaySeconds: "360"
  223.  
  224.   ## Support Node, affinity and tolerations for web pod assignment
  225.   ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
  226.   ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
  227.   ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
  228.   nodeSelector: {}
  229.   affinity: {}
  230.   tolerations: []
  231.   ##
  232.   ## Directory in which to mount secrets on webserver nodes.
  233.   secretsDir: /var/airflow/secrets
  234.   ##
  235.   ## Secrets which will be mounted as a file at `secretsDir/<secret name>`.
  236.   secrets: []
  237.  
  238. ##
  239. ## Workers configuration
  240. workers:
  241.   enabled: true
  242.   ##
  243.   ## Number of workers pod to launch
  244.   replicas: 1
  245.   ##
  246.   ## Custom resource configuration
  247.   resources: {}
  248.     # limits:
  249.     #   cpu: "1"
  250.     #   memory: "2G"
  251.     # requests:
  252.     #   cpu: "0.5"
  253.     #   memory: "512Mi"
  254.   ##
  255.   ## Annotations for the Worker pods
  256.   podAnnotations: {}
  257.     ## Example:
  258.     ## iam.amazonaws.com/role: airflow-Role
  259.   ##
  260.   ## Celery worker configuration
  261.   celery:
  262.    ##
  263.     ## number of parallel celery tasks per worker
  264.     instances: 1
  265.   ##
  266.   ## Directory in which to mount secrets on worker nodes.
  267.   secretsDir: /var/airflow/secrets
  268.   ##
  269.   ## Secrets which will be mounted as a file at `secretsDir/<secret name>`.
  270.   secrets: []
  271.  
  272.   ## Support Node, affinity and tolerations for worker pod assignment
  273.   ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
  274.   ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
  275.   ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
  276.   nodeSelector: {}
  277.   affinity: {}
  278.   tolerations: []
  279.  
  280. ##
  281. ## Ingress configuration
  282. ingress:
  283.  ##
  284.   ## enable ingress
  285.   ## Note: If you want to change url prefix for web ui or flower even if you do not use ingress,
  286.   ## you can still change ingress.web.path and ingress.flower.path
  287.   enabled: false
  288.   ##
  289.   ## Configure the webserver endpoint
  290.   web:
  291.    ## NOTE: This requires an airflow version > 1.9.x
  292.     ## For the moment (March 2018) this is **not** available on official package, you will have
  293.     ## to use an image where airflow has been updated to its current HEAD.
  294.     ## You can use the following one:
  295.     ##  stibbons31/docker-airflow-dev:2.0dev
  296.     ##
  297.     ## if path is '/airflow':
  298.     ##  - UI will be accessible at 'http://mycompany.com/airflow/admin'
  299.     ##  - Healthcheck is at 'http://mycompany.com/airflow/health'
  300.     ##  - api is at 'http://mycompany.com/airflow/api'
  301.     ## NOTE: do NOT keep trailing slash. For root configuration, set and empty string
  302.     path: ""
  303.     ##
  304.     ## hostname for the webserver
  305.     host: ""
  306.     ##
  307.     ## Annotations for the webserver
  308.     ## Airflow webserver handles relative path completely, just let your load balancer give the HTTP
  309.     ## header like the requested URL (no special configuration neeed)
  310.     annotations: {}
  311.       ##
  312.       ## Example for Traefik:
  313.       # traefik.frontend.rule.type: PathPrefix
  314.       # kubernetes.io/ingress.class: traefik
  315.     ##
  316.     ## Configure the web liveness path.
  317.     ## Defaults to the templated value `{{ ingress.web.path }}/health`
  318.     livenessPath:
  319.     tls:
  320.      ## Set to "true" to enable TLS termination at the ingress
  321.       enabled: false
  322.       ## If enabled, set "secretName" to the secret containing the TLS private key and certificate
  323.       ## Example:
  324.       ## secretName: example-com-crt
  325.   ##
  326.   ## Configure the flower endpoind
  327.   flower:
  328.    ##
  329.     ## If flower is '/airflow/flower':
  330.     ##  - Flower UI is at 'http://mycompany.com/airflow/flower'
  331.     ## NOTE: you need to have a reverse proxy/load balancer able to do URL rewrite in order to have
  332.     ## flower mounted on other path than root. Flower only does half the job in url prefixing: it
  333.     ## only generates the right URL/relative paths in the **returned HTML files**, but expects the
  334.     ## request to have been be at the root.
  335.     ## That's why we need a reverse proxy/load balancer that is able to strip the path
  336.     ## NOTE: do NOT keep trailing slash. For root configuration, set and empty string
  337.     path: ""
  338.     ##
  339.     ## Configure the liveness path. Keep to "/" for Flower >= jan 2018.
  340.     ## For previous version, enter the same path than in the 'path' key
  341.     ## NOTE: keep the trailing slash.
  342.     livenessPath: /
  343.     ##
  344.     ## hostname for flower
  345.     host: ""
  346.     ##
  347.     ## Annotation for the Flower endpoint
  348.     ##
  349.     ## ==== SKIP THE FOLLOWING BLOCK IF YOU HAVE FLOWER > JANUARY 2018 =============================
  350.     ## Please note their is a small difference between the way Airflow Web server and Flower handles
  351.     ## URL prefixes in HTTP requests:
  352.     ## Flower wants HTTP header to behave like there was no URL prefix, and but still generates
  353.     ## the right URL in html pages thanks to its `--url-prefix` parameter
  354.     ##
  355.     ##    Extracted from the Flower documentation:
  356.     ##    (https://github.com/mher/flower/blob/master/docs/config.rst#url_prefix)
  357.     ##
  358.     ##        To access Flower on http://example.com/flower run it with:
  359.     ##            flower --url-prefix=/flower
  360.     ##
  361.     ##        Use the following nginx configuration:
  362.     ##            server {
  363.     ##              listen 80;
  364.     ##              server_name example.com;
  365.     ##
  366.     ##              location /flower/ {
  367.     ##                rewrite ^/flower/(.*)$ /$1 break;
  368.     ##                proxy_pass http://example.com:5555;
  369.     ##                proxy_set_header Host $host;
  370.     ##              }
  371.     ##            }
  372.     ## ==== IF YOU HAVE FLOWER > JANUARY 2018, NO MORE NEED TO STRIP THE PREFIX ====================
  373.     annotations: {}
  374.       ##
  375.       ## NOTE: it is important here to have your reverse proxy strip the path/rewrite the URL
  376.       ## Example for Traefik:
  377.       # traefik.frontend.rule.type: PathPrefix       ## Flower >= Jan 2018
  378.       # traefik.frontend.rule.type: PathPrefixStrip  ## Flower < Jan 2018
  379.       # kubernetes.io/ingress.class: traefik
  380.     tls:
  381.      ## Set to "true" to enable TLS termination at the ingress
  382.       enabled: false
  383.       ## If enabled, set "secretName" to the secret containing the TLS private key and certificate
  384.       ## Example:
  385.       ## secretName: example-com-crt
  386.  
  387.  
  388. ##
  389. ## Storage configuration for DAGs
  390. persistence:
  391.  ##
  392.   ## enable persistance storage
  393.   enabled: false
  394.   ##
  395.   ## Existing claim to use
  396.   # existingClaim: nil
  397.   ## Existing claim's subPath to use, e.g. "dags" (optional)
  398.   # subPath: ""
  399.   ##
  400.   ## Persistent Volume Storage Class
  401.   ## If defined, storageClassName: <storageClass>
  402.   ## If set to "-", storageClassName: "", which disables dynamic provisioning
  403.   ## If undefined (the default) or set to null, no storageClassName spec is
  404.   ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
  405.   ##   GKE, AWS & OpenStack)
  406.   # storageClass: default
  407.   accessMode: ReadWriteOnce
  408.   ##
  409.   ## Persistant storage size request
  410.   size: 1Gi
  411.  
  412. ##
  413. ## Storage configuration for logs
  414. logsPersistence:
  415.  ##
  416.   ## enable persistance storage
  417.   enabled: false
  418.   ##
  419.   ## Existing claim to use
  420.   # existingClaim: nil
  421.   ## Existing claim's subPath to use, e.g. "logs" (optional)
  422.   # subPath: ""
  423.   ##
  424.   ## Persistent Volume Storage Class
  425.   ## If defined, storageClassName: <storageClass>
  426.   ## If set to "-", storageClassName: "", which disables dynamic provisioning
  427.   ## If undefined (the default) or set to null, no storageClassName spec is
  428.   ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
  429.   ##   GKE, AWS & OpenStack)
  430.   ##
  431.   ## A configuration for shared log storage requires a `storageClass` that
  432.   ## supports the `ReadWriteMany` accessMode, such as NFS or AWS EFS.
  433.   # storageClass: default
  434.   accessMode: ReadWriteOnce
  435.   ##
  436.   ## Persistant storage size request
  437.   size: 1Gi
  438.  
  439. ##
  440. ## Configure DAGs deployment and update
  441. dags:
  442.  ##
  443.   ## mount path for persistent volume.
  444.   ## Note that this location is referred to in airflow.cfg, so if you change it, you must update airflow.cfg accordingly.
  445.   path: /home/*user*/github/airflowDAGs/dags
  446.   ##
  447.   ## Set to True to prevent pickling DAGs from scheduler to workers
  448.   doNotPickle: false
  449.   ##
  450.   ## Configure Git repository to fetch DAGs
  451.   git:
  452.    ##
  453.     ## url to clone the git repository
  454.     url:
  455.    ##
  456.     ## branch name, tag or sha1 to reset to
  457.     ref: master
  458.     ## pre-created secret with key, key.pub and known_hosts file for private repos
  459.     secret: ""
  460.   initContainer:
  461.    ## Fetch the source code when the pods starts
  462.     enabled: false
  463.     ## Image for the init container (any image with git will do)
  464.     image:
  465.      ## docker-airflow image
  466.       repository: alpine/git
  467.       ## image tag
  468.       tag: 1.0.7
  469.       ## Image pull policy
  470.       ## values: Always or IfNotPresent
  471.       pullPolicy: IfNotPresent
  472.     ## install requirements.txt dependencies automatically
  473.     installRequirements: true
  474.  
  475. ##
  476. ## Configure logs
  477. logs:
  478.   path: /home/*user*/github/airflowDAGs/logs
  479.  
  480. ##
  481. ##  Enable RBAC
  482. rbac:
  483.  ##
  484.   ## Specifies whether RBAC resources should be created
  485.   create: true
  486.  
  487. ##
  488. ## Create or use ServiceAccount
  489. serviceAccount:
  490.  ##
  491.   ## Specifies whether a ServiceAccount should be created
  492.   create: true
  493.   ## The name of the ServiceAccount to use.
  494.   ## If not set and create is true, a name is generated using the fullname template
  495.   name:
  496.  
  497. ##
  498. ## Configuration values for the postgresql dependency.
  499. ## ref: https://github.com/kubernetes/charts/blob/master/stable/postgresql/README.md
  500. postgresql:
  501.  ##
  502.   ## Use the PostgreSQL chart dependency.
  503.   ## Set to false if bringing your own PostgreSQL.
  504.   enabled: true
  505.  
  506.   ##
  507.   ## The name of an existing secret that contains the postgres password.
  508.   existingSecret:
  509.   ## Name of the key containing the secret.
  510.   existingSecretKey: postgres-password
  511.  
  512.   ##
  513.   ## If you are bringing your own PostgreSQL, you should set postgresHost and
  514.   ## also probably service.port, postgresUser, postgresPassword, and postgresDatabase
  515.   ## postgresHost:
  516.   ##
  517.   ## PostgreSQL port
  518.   service:
  519.     port: 5432
  520.   ## PostgreSQL User to create.
  521.   postgresUser: postgres
  522.   ##
  523.   ## PostgreSQL Password for the new user.
  524.   ## If not set, a random 10 characters password will be used.
  525.   postgresPassword: airflow
  526.   ##
  527.   ## PostgreSQL Database to create.
  528.   postgresDatabase: airflow
  529.   ##
  530.   ## Persistent Volume Storage configuration.
  531.   ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes
  532.   persistence:
  533.    ##
  534.     ## Enable PostgreSQL persistence using Persistent Volume Claims.
  535.     enabled: true
  536.     ##
  537.     ## Persistant class
  538.     # storageClass: classname
  539.     ##
  540.     ## Access mode:
  541.     accessMode: ReadWriteOnce
  542.  
  543.  
  544. ## Configuration values for the Redis dependency.
  545. ## ref: https://github.com/kubernetes/charts/blob/master/stable/redis/README.md
  546. redis:
  547.  ##
  548.   ## Use the redis chart dependency.
  549.   ## Set to false if bringing your own redis.
  550.   enabled: true
  551.  
  552.   ##
  553.   ## The name of an existing secret that contains the redis password.
  554.   existingSecret:
  555.   ## Name of the key containing the secret.
  556.   existingSecretKey: redis-password
  557.  
  558.   ##
  559.   ## If you are bringing your own redis, you can set the host in redisHost.
  560.   ## redisHost:
  561.   ##
  562.   ## Redis password
  563.   ##
  564.   password: airflow
  565.   ##
  566.   ## Master configuration
  567.   master:
  568.    ##
  569.     ## Image configuration
  570.     # image:
  571.       ##
  572.       ## docker registry secret names (list)
  573.       # pullSecrets: nil
  574.     ##
  575.     ## Configure persistance
  576.     persistence:
  577.      ##
  578.       ## Use a PVC to persist data.
  579.       enabled: false
  580.       ##
  581.       ## Persistant class
  582.       # storageClass: classname
  583.       ##
  584.       ## Access mode:
  585.       accessMode: ReadWriteOnce
  586.   ##
  587.   ## Disable cluster management by default.
  588.   cluster:
  589.     enabled: false
  590.  
  591. # Enable this if you're using https://github.com/coreos/prometheus-operator
  592. # Don't forget you need to install something like https://github.com/epoch8/airflow-exporter in your airflow docker container
  593. serviceMonitor:
  594.   enabled: false
  595.   interval: "30s"
  596.   path: /admin/metrics
  597.   ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters)
  598.   selector:
  599.     prometheus: kube-prometheus
  600.  
  601. # Enable this if you're using https://github.com/coreos/prometheus-operator
  602. prometheusRule:
  603.   enabled: false
  604.   ## Namespace in which the prometheus rule is created
  605.   # namespace: monitoring
  606.   ## Define individual alerting rules as required
  607.   ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#rulegroup
  608.   ##      https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/
  609.   groups: {}
  610.  
  611.   ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Prometheus Rules to work with
  612.   ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
  613.   additionalLabels: {}
Add Comment
Please, Sign In to add comment