Skip to content

Commit

Permalink
✨ Add celery for notifications
Browse files Browse the repository at this point in the history
since celery is now required by notifications-api-common to send notifications
  • Loading branch information
stevenbal committed Feb 1, 2024
1 parent 9ad58e1 commit eb341a2
Show file tree
Hide file tree
Showing 6 changed files with 100 additions and 1 deletion.
4 changes: 4 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -47,10 +47,14 @@ RUN apt-get update && apt-get install -y --no-install-recommends \

COPY --from=backend-build /usr/local/lib/python3.10 /usr/local/lib/python3.10
COPY --from=backend-build /usr/local/bin/uwsgi /usr/local/bin/uwsgi
COPY --from=backend-build /usr/local/bin/celery /usr/local/bin/celery

# Stage 3.2 - Copy source code
WORKDIR /app
COPY ./bin/docker_start.sh /start.sh
COPY ./bin/wait_for_db.sh /wait_for_db.sh
COPY ./bin/celery_worker.sh /celery_worker.sh
COPY ./bin/celery_flower.sh /celery_flower.sh
RUN mkdir /app/log /app/config

# copy frontend build statics
Expand Down
5 changes: 5 additions & 0 deletions bin/celery_flower.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
#!/bin/bash

set -e

exec celery --app objects --workdir src flower
34 changes: 34 additions & 0 deletions bin/celery_worker.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
#!/bin/bash

set -euo pipefail

LOGLEVEL=${CELERY_LOGLEVEL:-INFO}

QUEUE=${1:-${CELERY_WORKER_QUEUE:=celery}}
WORKER_NAME=${2:-${CELERY_WORKER_NAME:="${QUEUE}"@%n}}

# Figure out abspath of this script
SCRIPT=$(readlink -f "$0")
SCRIPTPATH=$(dirname "$SCRIPT")

# wait for required services
${SCRIPTPATH}/wait_for_db.sh

# build up worker options array
worker_options=(
"-Q$QUEUE"
"-n$WORKER_NAME"
"-l$LOGLEVEL"
"-Ofair"
)

if [[ -v CELERY_WORKER_CONCURRENCY ]]; then
echo "Using concurrency ${CELERY_WORKER_CONCURRENCY}"
worker_options+=( "-c${CELERY_WORKER_CONCURRENCY}" )
fi

echo "Starting celery worker $WORKER_NAME with queue $QUEUE"
exec celery \
--app objects \
--workdir src \
worker "${worker_options[@]}"
15 changes: 15 additions & 0 deletions bin/wait_for_db.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
#!/bin/sh

set -e

# Wait for the database container
# See: https://docs.docker.com/compose/startup-order/
export PGHOST=${DB_HOST:-db}
export PGPORT=${DB_PORT:-5432}

until pg_isready; do
>&2 echo "Waiting for database connection..."
sleep 1
done

>&2 echo "Database is up."
31 changes: 30 additions & 1 deletion docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,16 +9,45 @@ services:
- POSTGRES_USER=${DB_USER:-objects}
- POSTGRES_PASSWORD=${DB_PASSWORD:-objects}

redis:
image: redis

web:
build: .
environment:
environment: &app-env
- DJANGO_SETTINGS_MODULE=objects.conf.docker
- SECRET_KEY=${SECRET_KEY:-1(@f(-6s_u(5fd&1sg^uvu2s(c-9sapw)1era8q&)g)h@cwxxg}
- OBJECTS_SUPERUSER_USERNAME=admin
- OBJECTS_SUPERUSER_PASSWORD=admin
- OBJECTS_SUPERUSER_EMAIL=admin@localhost
- ALLOWED_HOSTS=*
- CACHE_DEFAULT=redis:6379/0
- CACHE_AXES=redis:6379/0
- CELERY_BROKER_URL=redis://redis:6379/1
- CELERY_RESULT_BACKEND=redis://redis:6379/1
- CELERY_LOGLEVEL=DEBUG
- CELERY_WORKER_CONCURRENCY=${CELERY_WORKER_CONCURRENCY:-4}
ports:
- 8000:8000
depends_on:
- db

celery:
build: .
image: maykinmedia/objects-api
environment: *app-env
command: /celery_worker.sh
depends_on:
- db
- redis

celery-flower:
build: .
image: maykinmedia/objects-api
environment: *app-env
command: /celery_flower.sh
ports:
- 5555:5555
depends_on:
- redis
- celery
12 changes: 12 additions & 0 deletions src/objects/celery.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# SPDX-License-Identifier: EUPL-1.2
# Copyright (C) 2022 Dimpact
from celery import Celery

from objects.setup import setup_env

setup_env()

app = Celery("objects")

app.config_from_object("django.conf:settings", namespace="CELERY")
app.autodiscover_tasks()

0 comments on commit eb341a2

Please sign in to comment.