Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- dev_us-east-1_ubuntu@dev_common_1_omd:~$ cat /etc/crontab
- # /etc/crontab: system-wide crontab
- # Unlike any other crontab you don't have to run the `crontab'
- # command to install the new version when you edit this file
- # and files in /etc/cron.d. These files also have username fields,
- # that none of the other crontabs do.
- SHELL=/bin/sh
- PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
- # m h dom mon dow user command
- 17 * * * * root cd / && run-parts --report /etc/cron.hourly
- 25 6 * * * root test -x /usr/sbin/anacron || ( cd / && run-parts --report /etc/cron.daily )
- 47 6 * * 7 root test -x /usr/sbin/anacron || ( cd / && run-parts --report /etc/cron.weekly )
- 52 6 1 * * root test -x /usr/sbin/anacron || ( cd / && run-parts --report /etc/cron.monthly )
- #
- 14 0 * * * ubuntu (cd /opt/talemetry_nagios_checks; sudo git pull; make update;)
- 14 0 * * * ubuntu (cd /opt/talemetry_nagios_checks; sudo git pull; make update;)
- ### SQS QUEUE CHECKS ###
- */10 * * * * monitor /vol/cronjobs/sqs_queue/check_match_queue_v2 -w 2000 -c 5000 -n vpc_production_dupcheck_queue 2>&1
- */10 * * * * monitor /vol/cronjobs/sqs_queue/check_match_queue_v2 -w 2000 -c 5000 -n vpc_production_index_queue 2>&1
- */10 * * * * monitor /vol/cronjobs/sqs_queue/check_match_queue_v2 -w 2000 -c 5000 -n vpc_production_profile_queue 2>&1
- */10 * * * * monitor /vol/cronjobs/sqs_queue/check_match_queue_v2 -w 2000 -c 5000 -n vpc_production_scrape_queue 2>&1
- 14 0 * * 6 monitor (: > /vol/cronjobs/sqs_queue/sqs_queue_check.log)
- ### KAFKA REPORT ###
- 0 11 * * * monitor /vol/cronjobs/kafka/mail_kafka_activity_report >> /vol/cronjobs/kafka/mail_kafka_activity_report.log 2>&1
- 0 11 * * * monitor /vol/cronjobs/kafka/mail_kafka_consumer_exceptions_by_class >> /vol/cronjobs/kafka/mail_kafka_consumer_exceptions_by_class.log 2>&1
- ### DB aggregate check ###
- 0,30 * * * * monitor (date && /usr/bin/time -f "Run time:\%E" /vol/cronjobs/psql/verify_dwh_aggregates --status --lag --host=prod-talemetry-bi.talemetry.internal --database=talemetry_bi --aggregate=daily; status=$? && echo "Status:$status" | tee /vol/cronjobs/psql/aggregate_tables_checks/daily.status && echo Done) >> /vol/cronjobs/psql/aggregate_tables_checks/daily.log 2>&1
- 15 */6 * * * monitor (date && /usr/bin/time -f "Run time:\%E" /vol/cronjobs/psql/verify_dwh_aggregates --status --lag --host=prod-talemetry-bi.talemetry.internal --database=talemetry_bi --aggregate=monthly; status=$? && echo "Status:$status" | tee /vol/cronjobs/psql/aggregate_tables_checks/monthly.status && echo Done) >> /vol/cronjobs/psql/aggregate_tables_checks/monthly.log 2>&1
- 45 22 * * * monitor (date && /usr/bin/time -f "Run time:\%E" /vol/cronjobs/psql/verify_dwh_aggregates --status --lag --host=prod-talemetry-bi.talemetry.internal --database=talemetry_bi --aggregate=annual; status=$? && echo "Status:$status" | tee /vol/cronjobs/psql/aggregate_tables_checks/annual.status && echo Done) >> /vol/cronjobs/psql/aggregate_tables_checks/annual.log 2>&1
- ### System Metric Facts check ###
- 0 16 * * * monitor (date && /usr/bin/time -f "Elapsed Time: \%E" /vol/cronjobs/psql/check_system_metric_facts -h prod-talemetry-bi.talemetry.internal -d talemetry_bi | tee -a /vol/cronjobs/psql/metric_facts_daily.status && echo Done) >> /vol/cronjobs/psql/metric_facts_daily.log 2>&1
- ### Delayed jobs check ###
- */15 * * * * monitor /vol/cronjobs/mysql/check_delayed_jobs >/dev/null 2>&1
- 14 0 * * * monitor (: > /vol/cronjobs/mysql/delayed_jobs.log)
- ### DWH health check ###
- */15 * * * * monitor /vol/cronjobs/dwh/check_dwh_health >/dev/null 2>&1
- 14 0 * * * monitor (: > /vol/cronjobs/dwh/dwh.log)
- ### solr cluster health check ###
- */15 * * * * monitor /vol/cronjobs/solr/check_solr_health >/dev/null 2>&1
- 14 0 * * * monitor (: > /vol/cronjobs/solr/check_solr_health.log)
- ### solr cluster health check ###
- */15 * * * * monitor /vol/cronjobs/solr/check_solr_cluster --zkhosts common-1-zookeeper --collection production_jobs >/dev/null 2>&1
- */15 * * * * monitor /vol/cronjobs/solr/check_solr_cluster --zkhosts common-1-zookeeper --collection production_candidates >/dev/null 2>&1
- */15 * * * * monitor /vol/cronjobs/solr/check_solr_cluster --zkhosts common-1-zookeeper --collection production_talemetry_jobs >/dev/null 2>&1
- 14 0 * * * monitor (: > /vol/cronjobs/solr/check_solr_cluster.log)
- ### Blank candidate check ###
- 0 * * * * monitor /vol/cronjobs/solr/check_blank_candidate >/dev/null 2>&1
- 14 0 * * * monitor (: > /vol/cronjobs/solr/check_blank_candidate.log)
- ### job list memberships check ###
- */15 * * * * monitor /vol/cronjobs/mysql/check_job_list_memberships >/dev/null 2>&1
- 14 0 * * * monitor (: > /vol/cronjobs/mysql/jobs_list_memberships.log)
- ### failed candidates CHECKS ###
- 00 8,15 * * * monitor /vol/cronjobs/mysql/check_failed_candidates 2>&1
- 14 0 * * * monitor (: > /vol/cronjobs/mysql/check_failed_candidates.log)
- ### sendgrid api error CHECKS ###
- 00 8,15 * * * monitor /vol/cronjobs/sendgrid/check_sendgrid_api_error 2>&1
- ### candidates status report ###
- 00 8,15 * * * monitor /vol/cronjobs/mysql/check_candidates_status 2>&1
- 14 0 * * * monitor (: > /vol/cronjobs/mysql/candidates_status.log)
- ### async status report ###
- 00 8,15 * * * monitor /vol/cronjobs/mysql/check_async_reports 2>&1
- 14 0 * * * monitor (: > /vol/cronjobs/mysql/check_async_reports.log)
- ### check careersites ###
- 00 8,15 * * * monitor (ruby /vol/cronjobs/zenedge/check_careersites.rb 2>&1)
- ### Maintenance schedule for pingdom ###
- #
- ### Wednesday 8pm to 11pm
- 0 20 * * 3 monitor /vol/cronjobs/pingdom_monitor/pingdom_pause_monitor true 2>&1
- 0 23 * * 3 monitor /vol/cronjobs/pingdom_monitor/pingdom_pause_monitor false 2>&1
- ### Friday 8pm to 11pm
- 0 20 * * 5 monitor /vol/cronjobs/pingdom_monitor/pingdom_pause_monitor true 2>&1
- 0 23 * * 5 monitor /vol/cronjobs/pingdom_monitor/pingdom_pause_monitor false 2>&1
- ### Saturday 8pm to Sunday 1am
- 0 20 * * 6 monitor /vol/cronjobs/pingdom_monitor/pingdom_pause_monitor true 2>&1
- 0 1 * * 7 monitor /vol/cronjobs/pingdom_monitor/pingdom_pause_monitor false 2>&1
- ### check kafka cluster in zookeeper
- 0 * * * * monitor /vol/cronjobs/ssh_mgr/awser --module check_kafka_status_in_zookeeper --serverlist production_zookeeper.list --vpc prod --prompt no --enablealert >/dev/null 2>&1
- ### process status check within bluepill
- */15 * * * * monitor /vol/cronjobs/ssh_mgr/awser --module check_bluepill_processes_status --serverlist production_consumers.list --vpc prod --prompt no >/dev/null 2>&1
- ### process status check within bluepill
- */15 * * * * monitor /vol/cronjobs/ssh_mgr/awser --module check_runner_bluepill_process --serverlist production_runners.list --vpc prod --prompt no >/dev/null 2>&1
- ### check all job workers
- */15 * * * * monitor /vol/cronjobs/ssh_mgr/awser --module check_all_jobs_worker --serverlist production_talemetrybackgrounds.list --vpc prod --prompt no >/dev/null 2>&1
- ## restart puma for talemetry_core
- */30 * * * * monitor /vol/cronjobs/ssh_mgr/awser --module restart_core_puma --serverlist production_talemetryweb_core.list --vpc prod --prompt no >/dev/null 2>&1
- ## restart puma for talemetryweb
- #0 */2 * * * monitor /vol/cronjobs/ssh_mgr/awser --module restart_puma --serverlist production_talemetryweb.list --vpc prod --prompt no >/dev/null 2>&1
- ## check puma for talemetryweb
- */12 * * * * monitor /vol/cronjobs/ssh_mgr/awser --module check_puma --serverlist production_talemetryweb.list --vpc prod --prompt no --enablealert >/dev/null 2>&1
- ## restart puma for web
- #0 */2 * * * monitor /vol/cronjobs/ssh_mgr/awser --module restart_puma --serverlist production_web.list --vpc prod --prompt no >/dev/null 2>&1
- ## check puma for web
- */12 * * * * monitor /vol/cronjobs/ssh_mgr/awser --module check_puma --serverlist production_web.list --vpc prod --prompt no --enablealert >/dev/null 2>&1
- ## restart puma for biweb
- #0 */2 * * * monitor /vol/cronjobs/ssh_mgr/awser --module restart_puma --serverlist production_biweb.list --vpc prod --prompt no >/dev/null 2>&1
- ## check puma for biweb
- */12 * * * * monitor /vol/cronjobs/ssh_mgr/awser --module check_puma --serverlist production_biweb.list --vpc prod --prompt no --enablealert >/dev/null 2>&1
- ## restart puma for api
- #0 */2 * * * monitor /vol/cronjobs/ssh_mgr/awser --module restart_puma --serverlist production_api.list --vpc prod --prompt no >/dev/null 2>&1
- ## check puma for api
- */12 * * * * monitor /vol/cronjobs/ssh_mgr/awser --module check_puma --serverlist production_api.list --vpc prod --prompt no --enablealert >/dev/null 2>&1
- ## restart puma for monitor server
- #0 */2 * * * monitor /vol/cronjobs/ssh_mgr/awser --module restart_puma --serverlist production_monitor.list --vpc prod --prompt no >/dev/null 2>&1
- ## check puma for monitor server
- */12 * * * * monitor /vol/cronjobs/ssh_mgr/awser --module check_puma --serverlist production_monitor.list --vpc prod --prompt no --enablealert >/dev/null 2>&1
- */10 * * * * monitor /vol/cronjobs/ssh_mgr/awser --module check_logviewer --serverlist production_logviewer.list --vpc prod --prompt no --enablealert >/dev/null 2>&1
- # Replication data quality check
- 0 20 * * * sysmonitor ( /opt/talemetry/dbmonitor/bin/mysql-repl-data-check.sh 1 ) > /opt/talemetry/dbmonitor/logs/mysql-repl-data-check.out 2>&1
- 0 21 * * * sysmonitor ( /opt/talemetry/dbmonitor/bin/mysql-repl-data-check.sh 2 ) > /opt/talemetry/dbmonitor/logs/mysql-repl-data-check.out 2>&1
- 0 22 * * * sysmonitor ( /opt/talemetry/dbmonitor/bin/mysql-repl-data-check.sh 3 ) > /opt/talemetry/dbmonitor/logs/mysql-repl-data-check.out 2>&1
- 0 23 * * * sysmonitor ( /opt/talemetry/dbmonitor/bin/mysql-repl-data-check.sh 4 ) > /opt/talemetry/dbmonitor/logs/mysql-repl-data-check.out 2>&1
- 0 1 * * * sysmonitor ( /opt/talemetry/dbmonitor/bin/delete-repl-data-check-reports.sh 1 ) > /opt/talemetry/dbmonitor/logs/mysql-repl-data-check.out 2>&1
- # Route53 backup
- 0 20 * * * ubuntu /vol/cronjobs/route53/route53_backup.sh
- 50 16 * * 1-5 ubuntu /bin/bash -c ". /home/ubuntu/.bashrc; /home/ubuntu/stop_qa.sh"
- 0 10 * * 1-5 ubuntu /home/ubuntu/start_qa.sh
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement