Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- sudo -E pip-3.6 install apache-airflow[celery,devel,postgres]
- sudo -E pip-3.6 install apache-airflow[all]
- [ec2-user@ip-1-2-3-4 ~]$ airflow version
- [2018-08-29 16:09:59,088] {{__init__.py:51}} INFO - Using executor SequentialExecutor
- ____________ _____________
- ____ |__( )_________ __/__ /________ __
- ____ /| |_ /__ ___/_ /_ __ /_ __ _ | /| / /
- ___ ___ | / _ / _ __/ _ / / /_/ /_ |/ |/ /
- _/_/ |_/_/ /_/ /_/ /_/ ____/____/|__/
- v1.10.0
- [2018-08-29 16:14:01,114] {{sequential_executor.py:52}} ERROR - Failed to execute task Command 'airflow run ScheduleTest successful 2018-08-29T19:00:00+00:00 --local -sd /home/ec2-user/{AIRFLOW_HOME}/dags/SchedulerTest.py' returned non-zero exit status 1..
- [2018-08-29 16:14:01,115] {{jobs.py:1443}} INFO - Executor reports ScheduleTest.successful execution_date=2018-08-29 19:00:00+00:00 as failed
- [2018-08-29 16:14:01,119] {{jobs.py:1459}} ERROR - Executor reports task instance <TaskInstance: ScheduleTest.successful 2018-08-29 19:00:00+00:00 [queued]> finished (failed) although the task says its queued. Was the task killed externally?
- [2018-08-29 16:14:01,120] {{models.py:258}} INFO - Filling up the DagBag from /home/ec2-user/{AIRFLOW_HOME}/dags/SchedulerTest.py
- [2018-08-29 16:14:01,540] {{models.py:1736}} ERROR - Executor reports task instance <TaskInstance: ScheduleTest.successful 2018-08-29 19:00:00+00:00 [queued]> finished (failed) although the task says its queued. Was the task killed externally?
- NoneType: None
- [2018-08-29 16:14:01,541] {{models.py:1764}} INFO - Marking task as FAILED.
- [2018-08-29 16:14:01,740] {{models.py:1778}} ERROR - Failed at executing callback
- [2018-08-29 16:14:01,740] {{models.py:1779}} ERROR - Bash command failed
- Traceback (most recent call last):
- File "/usr/local/lib/python3.6/site-packages/airflow/models.py", line 1776, in handle_failure
- task.on_failure_callback(context)
- File "/home/ec2-user/{AIRFLOW_HOME}/dags/SchedulerTest.py", line 35, in on_failure_callback
- return failure_task_notification.execute(context=context)
- File "/usr/local/lib/python3.6/site-packages/airflow/operators/bash_operator.py", line 118, in execute
- raise AirflowException("Bash command failed")
- airflow.exceptions.AirflowException: Bash command failed
- # -*- coding: utf-8 -*-
- #
- # Licensed to the Apache Software Foundation (ASF) under one
- # or more contributor license agreements. See the NOTICE file
- # distributed with this work for additional information
- # regarding copyright ownership. The ASF licenses this file
- # to you under the Apache License, Version 2.0 (the
- # "License"); you may not use this file except in compliance
- # with the License. You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing,
- # software distributed under the License is distributed on an
- # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- # KIND, either express or implied. See the License for the
- # specific language governing permissions and limitations
- # under the License.
- # This is the template for Airflow's default configuration. When Airflow is
- # imported, it looks for a configuration file at $AIRFLOW_HOME/airflow.cfg. If
- # it doesn't exist, Airflow uses this template to generate it by replacing
- # variables in curly braces with their global values from configuration.py.
- # Users should not modify this file; they should customize the generated
- # airflow.cfg instead.
- # ----------------------- TEMPLATE BEGINS HERE -----------------------
- [core]
- # The home folder for airflow, default is ~/airflow
- airflow_home = {AIRFLOW_HOME}
- # The folder where your airflow pipelines live, most likely a
- # subfolder in a code repository
- # This path must be absolute
- dags_folder = {AIRFLOW_HOME}/dags
- # The folder where airflow should store its log files
- # This path must be absolute
- base_log_folder = {AIRFLOW_HOME}/logs
- # Airflow can store logs remotely in AWS S3, Google Cloud Storage or Elastic Search.
- # Users must supply an Airflow connection id that provides access to the storage
- # location. If remote_logging is set to true, see UPDATING.md for additional
- # configuration requirements.
- remote_logging = False
- remote_log_conn_id =
- remote_base_log_folder =
- encrypt_s3_logs = False
- # Logging level
- logging_level = INFO
- fab_logging_level = WARN
- # Logging class
- # Specify the class that will specify the logging configuration
- # This class has to be on the python classpath
- # logging_config_class = my.path.default_local_settings.LOGGING_CONFIG
- logging_config_class =airflow_local_settings.DEFAULT_LOGGING_CONFIG
- # Log format
- # we need to escape the curly braces by adding an additional curly brace
- log_format = [%%(asctime)s] {{%%(filename)s:%%(lineno)d}} %%(levelname)s - %%(message)s
- simple_log_format = %%(asctime)s %%(levelname)s - %%(message)s
- # Log filename format
- # we need to escape the curly braces by adding an additional curly brace
- log_filename_template = {{{{ ti.dag_id }}}}/{{{{ ti.task_id }}}}/{{{{ ts }}}}/{{{{ try_number }}}}.log
- log_processor_filename_template = {{{{ filename }}}}.log
- # Hostname by providing a path to a callable, which will resolve the hostname
- hostname_callable = socket:getfqdn
- # Default timezone in case supplied date times are naive
- # can be utc (default), system, or any IANA timezone string (e.g. Europe/Amsterdam)
- default_timezone = utc
- # The executor class that airflow should use. Choices include
- # SequentialExecutor, LocalExecutor, CeleryExecutor, DaskExecutor
- executor = SequentialExecutor
- # The SqlAlchemy connection string to the metadata database.
- # SqlAlchemy supports many different database engine, more information
- # their website
- sql_alchemy_conn = sqlite:///{AIRFLOW_HOME}/airflow.db
- # If SqlAlchemy should pool database connections.
- sql_alchemy_pool_enabled = True
- # The SqlAlchemy pool size is the maximum number of database connections
- # in the pool. 0 indicates no limit.
- sql_alchemy_pool_size = 5
- # The SqlAlchemy pool recycle is the number of seconds a connection
- # can be idle in the pool before it is invalidated. This config does
- # not apply to sqlite. If the number of DB connections is ever exceeded,
- # a lower config value will allow the system to recover faster.
- sql_alchemy_pool_recycle = 1800
- # How many seconds to retry re-establishing a DB connection after
- # disconnects. Setting this to 0 disables retries.
- sql_alchemy_reconnect_timeout = 300
- # The amount of parallelism as a setting to the executor. This defines
- # the max number of task instances that should run simultaneously
- # on this airflow installation
- parallelism = 32
- # The number of task instances allowed to run concurrently by the scheduler
- dag_concurrency = 16
- # Are DAGs paused by default at creation
- dags_are_paused_at_creation = True
- # When not using pools, tasks are run in the "default pool",
- # whose size is guided by this config element
- non_pooled_task_slot_count = 128
- # The maximum number of active DAG runs per DAG
- max_active_runs_per_dag = 16
- # Whether to load the examples that ship with Airflow. It's good to
- # get started, but you probably want to set this to False in a production
- # environment
- load_examples = True
- # Where your Airflow plugins are stored
- plugins_folder = {AIRFLOW_HOME}/plugins
- # Secret key to save connection passwords in the db
- fernet_key=ZFz1t3rs5fHD_vdxiBISbr23mhnigDB7YeN_Zek=
- # Whether to disable pickling dags
- donot_pickle = False
- # How long before timing out a python file import while filling the DagBag
- dagbag_import_timeout = 30
- # The class to use for running task instances in a subprocess
- task_runner = StandardTaskRunner
- # If set, tasks without a `run_as_user` argument will be run with this user
- # Can be used to de-elevate a sudo user running Airflow when executing tasks
- default_impersonation =
- # What security module to use (for example kerberos):
- security =
- # If set to False enables some unsecure features like Charts and Ad Hoc Queries.
- # In 2.0 will default to True.
- secure_mode = False
- # Turn unit test mode on (overwrites many configuration options with test
- # values at runtime)
- unit_test_mode = False
- # Name of handler to read task instance logs.
- # Default to use task handler.
- task_log_reader = task
- # Whether to enable pickling for xcom (note that this is insecure and allows for
- # RCE exploits). This will be deprecated in Airflow 2.0 (be forced to False).
- enable_xcom_pickling = True
- # When a task is killed forcefully, this is the amount of time in seconds that
- # it has to cleanup after it is sent a SIGTERM, before it is SIGKILLED
- killed_task_cleanup_time = 60
- # Whether to override params with dag_run.conf. If you pass some key-value pairs through `airflow backfill -c` or
- # `airflow trigger_dag -c`, the key-value pairs will override the existing ones in params.
- dag_run_conf_overrides_params = False
- # Worker initialisation check to validate Metadata Database connection
- worker_precheck = False
- [cli]
- # In what way should the cli access the API. The LocalClient will use the
- # database directly, while the json_client will use the api running on the
- # webserver
- api_client = airflow.api.client.local_client
- # If you set web_server_url_prefix, do NOT forget to append it here, ex:
- # endpoint_url = http://localhost:8080/myroot
- # So api will look like: http://localhost:8080/myroot/api/experimental/...
- endpoint_url = http://localhost:8080
- [api]
- # How to authenticate users of the API
- auth_backend = airflow.api.auth.backend.default
- [lineage]
- # what lineage backend to use
- backend =
- [atlas]
- sasl_enabled = False
- host =
- port = 21000
- username =
- password =
- [operators]
- # The default owner assigned to each new operator, unless
- # provided explicitly or passed via `default_args`
- default_owner = Airflow
- default_cpus = 1
- default_ram = 512
- default_disk = 512
- default_gpus = 0
- [hive]
- # Default mapreduce queue for HiveOperator tasks
- default_hive_mapred_queue =
- # Template for mapred_job_name in HiveOperator, supports the following named parameters:
- # hostname, dag_id, task_id, execution_date
- mapred_job_name_template = Airflow HiveOperator task for {{hostname}}.{{dag_id}}.{{task_id}}.{{execution_date}}
- [webserver]
- # The base url of your website as airflow cannot guess what domain or
- # cname you are using. This is used in automated emails that
- # airflow sends to point links to the right web server
- base_url = http://localhost:8080
- # The ip specified when starting the web server
- web_server_host = 0.0.0.0
- # The port on which to run the web server
- web_server_port = 8080
- # Paths to the SSL certificate and key for the web server. When both are
- # provided SSL will be enabled. This does not change the web server port.
- web_server_ssl_cert =
- web_server_ssl_key =
- # Number of seconds the webserver waits before killing gunicorn master that doesn't respond
- web_server_master_timeout = 120
- # Number of seconds the gunicorn webserver waits before timing out on a worker
- web_server_worker_timeout = 120
- # Number of workers to refresh at a time. When set to 0, worker refresh is
- # disabled. When nonzero, airflow periodically refreshes webserver workers by
- # bringing up new ones and killing old ones.
- worker_refresh_batch_size = 1
- # Number of seconds to wait before refreshing a batch of workers.
- worker_refresh_interval = 30
- # Secret key used to run your flask app
- # It should be as random as possible
- secret_key = {SECRET_KEY}
- # Number of workers to run the Gunicorn web server
- workers = 4
- # The worker class gunicorn should use. Choices include
- # sync (default), eventlet, gevent
- worker_class = sync
- # Log files for the gunicorn webserver. '-' means log to stderr.
- access_logfile = -
- error_logfile = -
- # Expose the configuration file in the web server
- expose_config = False
- # Set to true to turn on authentication:
- # https://airflow.incubator.apache.org/security.html#web-authentication
- authenticate = False
- # Filter the list of dags by owner name (requires authentication to be enabled)
- filter_by_owner = False
- # Filtering mode. Choices include user (default) and ldapgroup.
- # Ldap group filtering requires using the ldap backend
- #
- # Note that the ldap server needs the "memberOf" overlay to be set up
- # in order to user the ldapgroup mode.
- owner_mode = user
- # Default DAG view. Valid values are:
- # tree, graph, duration, gantt, landing_times
- dag_default_view = tree
- # Default DAG orientation. Valid values are:
- # LR (Left->Right), TB (Top->Bottom), RL (Right->Left), BT (Bottom->Top)
- dag_orientation = LR
- # Puts the webserver in demonstration mode; blurs the names of Operators for
- # privacy.
- demo_mode = False
- # The amount of time (in secs) webserver will wait for initial handshake
- # while fetching logs from other worker machine
- log_fetch_timeout_sec = 5
- # By default, the webserver shows paused DAGs. Flip this to hide paused
- # DAGs by default
- hide_paused_dags_by_default = False
- # Consistent page size across all listing views in the UI
- page_size = 100
- # Use FAB-based webserver with RBAC feature
- rbac = False
- # Define the color of navigation bar
- navbar_color = #007A87
- # Default dagrun to show in UI
- default_dag_run_display_number = 25
- [email]
- email_backend = airflow.utils.email.send_email_smtp
- [smtp]
- # If you want airflow to send emails on retries, failure, and you want to use
- # the airflow.utils.email.send_email_smtp function, you have to configure an
- # smtp server here
- smtp_host = localhost
- smtp_starttls = True
- smtp_ssl = False
- # Uncomment and set the user/pass settings if you want to use SMTP AUTH
- # smtp_user = airflow
- # smtp_password = airflow
- smtp_port = 25
- smtp_mail_from = airflow@example.com
- [celery]
- # This section only applies if you are using the CeleryExecutor in
- # [core] section above
- # The app name that will be used by celery
- celery_app_name = airflow.executors.celery_executor
- # The concurrency that will be used when starting workers with the
- # "airflow worker" command. This defines the number of task instances that
- # a worker will take, so size up your workers based on the resources on
- # your worker box and the nature of your tasks
- worker_concurrency = 16
- # When you start an airflow worker, airflow starts a tiny web server
- # subprocess to serve the workers local log files to the airflow main
- # web server, who then builds pages and sends them to users. This defines
- # the port on which the logs are served. It needs to be unused, and open
- # visible from the main web server to connect into the workers.
- worker_log_server_port = 8793
- # The Celery broker URL. Celery supports RabbitMQ, Redis and experimentally
- # a sqlalchemy database. Refer to the Celery documentation for more
- # information.
- # http://docs.celeryproject.org/en/latest/userguide/configuration.html#broker-settings
- broker_url = sqla+mysql://airflow:airflow@localhost:3306/airflow
- # The Celery result_backend. When a job finishes, it needs to update the
- # metadata of the job. Therefore it will post a message on a message bus,
- # or insert it into a database (depending of the backend)
- # This status is used by the scheduler to update the state of the task
- # The use of a database is highly recommended
- # http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-result-backend-settings
- result_backend = db+mysql://airflow:airflow@localhost:3306/airflow
- # Celery Flower is a sweet UI for Celery. Airflow has a shortcut to start
- # it `airflow flower`. This defines the IP that Celery Flower runs on
- flower_host = 0.0.0.0
- # The root URL for Flower
- # Ex: flower_url_prefix = /flower
- flower_url_prefix =
- # This defines the port that Celery Flower runs on
- flower_port = 5555
- # Default queue that tasks get assigned to and that worker listen on.
- default_queue = default
- # Import path for celery configuration options
- celery_config_options = airflow.config_templates.default_celery.DEFAULT_CELERY_CONFIG
- # In case of using SSL
- ssl_active = False
- ssl_key =
- ssl_cert =
- ssl_cacert =
- [celery_broker_transport_options]
- # This section is for specifying options which can be passed to the
- # underlying celery broker transport. See:
- # http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_transport_options
- # The visibility timeout defines the number of seconds to wait for the worker
- # to acknowledge the task before the message is redelivered to another worker.
- # Make sure to increase the visibility timeout to match the time of the longest
- # ETA you're planning to use.
- #
- # visibility_timeout is only supported for Redis and SQS celery brokers.
- # See:
- # http://docs.celeryproject.org/en/master/userguide/configuration.html#std:setting-broker_transport_options
- #
- #visibility_timeout = 21600
- [dask]
- # This section only applies if you are using the DaskExecutor in
- # [core] section above
- # The IP address and port of the Dask cluster's scheduler.
- cluster_address = 127.0.0.1:8786
- # TLS/ SSL settings to access a secured Dask scheduler.
- tls_ca =
- tls_cert =
- tls_key =
- [scheduler]
- # Task instances listen for external kill signal (when you clear tasks
- # from the CLI or the UI), this defines the frequency at which they should
- # listen (in seconds).
- job_heartbeat_sec = 5
- # The scheduler constantly tries to trigger new tasks (look at the
- # scheduler section in the docs for more information). This defines
- # how often the scheduler should run (in seconds).
- scheduler_heartbeat_sec = 5
- # after how much time should the scheduler terminate in seconds
- # -1 indicates to run continuously (see also num_runs)
- run_duration = -1
- # after how much time (seconds) a new DAGs should be picked up from the filesystem
- min_file_process_interval = 0
- # How often (in seconds) to scan the DAGs directory for new files. Default to 5 minutes.
- dag_dir_list_interval = 300
- # How often should stats be printed to the logs
- print_stats_interval = 30
- child_process_log_directory = {AIRFLOW_HOME}/logs/scheduler
- # Local task jobs periodically heartbeat to the DB. If the job has
- # not heartbeat in this many seconds, the scheduler will mark the
- # associated task instance as failed and will re-schedule the task.
- scheduler_zombie_task_threshold = 300
- # Turn off scheduler catchup by setting this to False.
- # Default behavior is unchanged and
- # Command Line Backfills still work, but the scheduler
- # will not do scheduler catchup if this is False,
- # however it can be set on a per DAG basis in the
- # DAG definition (catchup)
- catchup_by_default = True
- # This changes the batch size of queries in the scheduling main loop.
- # If this is too high, SQL query performance may be impacted by one
- # or more of the following:
- # - reversion to full table scan
- # - complexity of query predicate
- # - excessive locking
- #
- # Additionally, you may hit the maximum allowable query length for your db.
- #
- # Set this to 0 for no limit (not advised)
- max_tis_per_query = 512
- # Statsd (https://github.com/etsy/statsd) integration settings
- statsd_on = False
- statsd_host = localhost
- statsd_port = 8125
- statsd_prefix = airflow
- # The scheduler can run multiple threads in parallel to schedule dags.
- # This defines how many threads will run.
- max_threads = 2
- authenticate = False
- [ldap]
- # set this to ldaps://<your.ldap.server>:<port>
- uri =
- user_filter = objectClass=*
- user_name_attr = uid
- group_member_attr = memberOf
- superuser_filter =
- data_profiler_filter =
- bind_user = cn=Manager,dc=example,dc=com
- bind_password = insecure
- basedn = dc=example,dc=com
- cacert = /etc/ca/ldap_ca.crt
- search_scope = LEVEL
- [mesos]
- # Mesos master address which MesosExecutor will connect to.
- master = localhost:5050
- # The framework name which Airflow scheduler will register itself as on mesos
- framework_name = Airflow
- # Number of cpu cores required for running one task instance using
- # 'airflow run <dag_id> <task_id> <execution_date> --local -p <pickle_id>'
- # command on a mesos slave
- task_cpu = 1
- # Memory in MB required for running one task instance using
- # 'airflow run <dag_id> <task_id> <execution_date> --local -p <pickle_id>'
- # command on a mesos slave
- task_memory = 256
- # Enable framework checkpointing for mesos
- # See http://mesos.apache.org/documentation/latest/slave-recovery/
- checkpoint = False
- # Failover timeout in milliseconds.
- # When checkpointing is enabled and this option is set, Mesos waits
- # until the configured timeout for
- # the MesosExecutor framework to re-register after a failover. Mesos
- # shuts down running tasks if the
- # MesosExecutor framework fails to re-register within this timeframe.
- # failover_timeout = 604800
- # Enable framework authentication for mesos
- # See http://mesos.apache.org/documentation/latest/configuration/
- authenticate = False
- # Mesos credentials, if authentication is enabled
- # default_principal = admin
- # default_secret = admin
- # Optional Docker Image to run on slave before running the command
- # This image should be accessible from mesos slave i.e mesos slave
- # should be able to pull this docker image before executing the command.
- # docker_image_slave = puckel/docker-airflow
- [kerberos]
- ccache = /tmp/airflow_krb5_ccache
- # gets augmented with fqdn
- principal = airflow
- reinit_frequency = 3600
- kinit_path = kinit
- keytab = airflow.keytab
- [github_enterprise]
- api_rev = v3
- [admin]
- # UI to hide sensitive variable fields when set to True
- hide_sensitive_variable_fields = True
- [elasticsearch]
- elasticsearch_host =
- # we need to escape the curly braces by adding an additional curly brace
- elasticsearch_log_id_template = {{dag_id}}-{{task_id}}-{{execution_date}}-{{try_number}}
- elasticsearch_end_of_log_mark = end_of_log
- [kubernetes]
- # The repository, tag and imagePullPolicy of the Kubernetes Image for the Worker to Run
- worker_container_repository =
- worker_container_tag =
- worker_container_image_pull_policy = IfNotPresent
- worker_dags_folder =
- # If True (default), worker pods will be deleted upon termination
- delete_worker_pods = True
- # The Kubernetes namespace where airflow workers should be created. Defaults to `default`
- namespace = default
- # The name of the Kubernetes ConfigMap Containing the Airflow Configuration (this file)
- airflow_configmap =
- # For either git sync or volume mounted DAGs, the worker will look in this subpath for DAGs
- dags_volume_subpath =
- # For DAGs mounted via a volume claim (mutually exclusive with volume claim)
- dags_volume_claim =
- # For volume mounted logs, the worker will look in this subpath for logs
- logs_volume_subpath =
- # A shared volume claim for the logs
- logs_volume_claim =
- # Git credentials and repository for DAGs mounted via Git (mutually exclusive with volume claim)
- git_repo =
- git_branch =
- git_user =
- git_password =
- git_subpath =
- # For cloning DAGs from git repositories into volumes: https://github.com/kubernetes/git-sync
- git_sync_container_repository = gcr.io/google-containers/git-sync-amd64
- git_sync_container_tag = v2.0.5
- git_sync_init_container_name = git-sync-clone
- # The name of the Kubernetes service account to be associated with airflow workers, if any.
- # Service accounts are required for workers that require access to secrets or cluster resources.
- # See the Kubernetes RBAC documentation for more:
- # https://kubernetes.io/docs/admin/authorization/rbac/
- worker_service_account_name =
- # Any image pull secrets to be given to worker pods, If more than one secret is
- # required, provide a comma separated list: secret_a,secret_b
- image_pull_secrets =
- # GCP Service Account Keys to be provided to tasks run on Kubernetes Executors
- # Should be supplied in the format: key-name-1:key-path-1,key-name-2:key-path-2
- gcp_service_account_keys =
- # Use the service account kubernetes gives to pods to connect to kubernetes cluster.
- # It's intended for clients that expect to be running inside a pod running on kubernetes.
- # It will raise an exception if called from a process not running in a kubernetes environment.
- in_cluster = True
- [kubernetes_node_selectors]
- # The Key-value pairs to be given to worker pods.
- # The worker pods will be scheduled to the nodes of the specified key-value pairs.
- # Should be supplied in the format: key = value
- [kubernetes_secrets]
- # The scheduler mounts the following secrets into your workers as they are launched by the
- # scheduler. You may define as many secrets as needed and the kubernetes launcher will parse the
- # defined secrets and mount them as secret environment variables in the launched workers.
- # Secrets in this section are defined as follows
- # <environment_variable_mount> = <kubernetes_secret_object>:<kubernetes_secret_key>
- #
- # For example if you wanted to mount a kubernetes secret key named `postgres_password` from the
- # kubernetes secret object `airflow-secret` as the environment variable `POSTGRES_PASSWORD` into
- # your workers you would follow the following format:
- # POSTGRES_PASSWORD = airflow-secret:postgres_credentials
- #
- # Additionally you may override worker airflow settings with the AIRFLOW__<SECTION>__<KEY>
- # formatting as suppont:d by airflow normally.
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement