From 9481c2b7249a3d04b165f86ffb50c14c6de92f65 Mon Sep 17 00:00:00 2001 From: Sjoerd Schipper Date: Fri, 26 Jul 2024 16:29:41 +0200 Subject: [PATCH 1/2] :stethoscope: add celery healthcheck --- Dockerfile | 2 +- bin/celery_worker.sh | 6 ++-- docker-compose.yml | 25 +++++++++++++++-- src/objects/celery.py | 65 ++++++++++++++++++++++++++++++++++++++++++- 4 files changed, 92 insertions(+), 6 deletions(-) diff --git a/Dockerfile b/Dockerfile index f82901c9..ba141e4b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -57,7 +57,7 @@ COPY ./bin/celery_worker.sh /celery_worker.sh COPY ./bin/celery_flower.sh /celery_flower.sh COPY ./bin/check_celery_worker_liveness.py ./bin/ COPY ./bin/setup_configuration.sh /setup_configuration.sh -RUN mkdir /app/log /app/config +RUN mkdir /app/log /app/config /app/tmp # copy frontend build statics COPY --from=frontend-build /app/src/objects/static /app/src/objects/static diff --git a/bin/celery_worker.sh b/bin/celery_worker.sh index 33fdf84a..031a2ec1 100755 --- a/bin/celery_worker.sh +++ b/bin/celery_worker.sh @@ -15,9 +15,11 @@ if [[ "$ENABLE_COVERAGE" ]]; then fi echo "Starting celery worker $WORKER_NAME with queue $QUEUE" -exec $_binary --workdir src --app objects.celery worker \ +exec $_binary --workdir src --app "objects.celery" worker \ -Q $QUEUE \ -n $WORKER_NAME \ -l $LOGLEVEL \ -O fair \ - -c $CONCURRENCY + -c $CONCURRENCY \ + -E \ + --max-tasks-per-child=50 \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml index 7ff54493..c57e124a 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -36,6 +36,15 @@ services: - DEMO_TOKEN=demo-random-string - DEMO_PERSON=Demo - DEMO_EMAIL=demo@demo.local + healthcheck: + test: ["CMD", "python", "-c", "import requests; exit(requests.head('http://localhost:8000/admin/').status_code not in [200, 302])"] + interval: 30s + timeout: 5s + retries: 3 + # This should allow for enough time for migrations to run before the max + # retries have passed. This healthcheck in turn allows other containers + # to wait for the database migrations. + start_period: 30s ports: - 8000:8000 depends_on: @@ -59,9 +68,21 @@ services: build: *web_build environment: *web_env command: /celery_worker.sh + healthcheck: + test: ["CMD", "python", "/app/bin/check_celery_worker_liveness.py"] + interval: 30s + timeout: 5s + retries: 3 + start_period: 10s depends_on: - - db - - redis + web: + # This health check condition is needed because Celery Beat will + # try to convert the CELERY_BEAT_SCHEDULE into database entries. For + # this, migrations need to be finished. If Celery tasks were still + # pending, the database also needs to be ready for Celery itself. We + # therefore have the health check here, and make Celery beat and + # monitor containers depend on the celery container. + condition: service_healthy volumes: *web_volumes celery-flower: diff --git a/src/objects/celery.py b/src/objects/celery.py index f5c5e0e0..5103a886 100644 --- a/src/objects/celery.py +++ b/src/objects/celery.py @@ -1,4 +1,9 @@ -from celery import Celery +from pathlib import Path + +from django.conf import settings + +from celery import Celery, bootsteps +from celery.signals import setup_logging, worker_ready, worker_shutdown from .setup import setup_env @@ -6,4 +11,62 @@ app = Celery("objects") app.config_from_object("django.conf:settings", namespace="CELERY") +app.conf.ONCE = { + "backend": "celery_once.backends.Redis", + "settings": { + "url": settings.CELERY_BROKER_URL, + "default_timeout": 60 * 60, # one hour + }, +} + app.autodiscover_tasks() + + +# Use django's logging settings as these are reset by Celery by default +@setup_logging.connect() +def config_loggers(*args, **kwargs): + from logging.config import dictConfig + + dictConfig(settings.LOGGING) + + +HEARTBEAT_FILE = Path(settings.BASE_DIR) / "tmp" / "celery_worker_heartbeat" +READINESS_FILE = Path(settings.BASE_DIR) / "tmp" / "celery_worker_ready" + + +# +# Utilities for checking the health of celery workers +# +class LivenessProbe(bootsteps.StartStopStep): + requires = {"celery.worker.components:Timer"} + + def __init__(self, worker, **kwargs): + self.requests = [] + self.tref = None + + def start(self, worker): + self.tref = worker.timer.call_repeatedly( + 10.0, + self.update_heartbeat_file, + (worker,), + priority=10, + ) + + def stop(self, worker): + HEARTBEAT_FILE.unlink(missing_ok=True) + + def update_heartbeat_file(self, worker): + HEARTBEAT_FILE.touch() + + +@worker_ready.connect +def worker_ready(**_): + READINESS_FILE.touch() + + +@worker_shutdown.connect +def worker_shutdown(**_): + READINESS_FILE.unlink(missing_ok=True) + + +app.steps["worker"].add(LivenessProbe) From e1a003695fcec3fc30c4e27c2f9dedff2c41b9fa Mon Sep 17 00:00:00 2001 From: Sjoerd Schipper Date: Mon, 29 Jul 2024 11:11:34 +0200 Subject: [PATCH 2/2] Remove celery health check comment --- docker-compose.yml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index c57e124a..cf49c46c 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -76,12 +76,6 @@ services: start_period: 10s depends_on: web: - # This health check condition is needed because Celery Beat will - # try to convert the CELERY_BEAT_SCHEDULE into database entries. For - # this, migrations need to be finished. If Celery tasks were still - # pending, the database also needs to be ready for Celery itself. We - # therefore have the health check here, and make Celery beat and - # monitor containers depend on the celery container. condition: service_healthy volumes: *web_volumes