From 8c6296027e062fa6e86c8100c80294208bdf4332 Mon Sep 17 00:00:00 2001 From: Richard Kojedzinszky Date: Tue, 27 Aug 2024 09:26:28 +0200 Subject: [PATCH] feat(ml): make http keep-alive configurable Closes #12064 --- docs/docs/install/environment-variables.md | 3 +++ machine-learning/start.sh | 2 ++ 2 files changed, 5 insertions(+) diff --git a/docs/docs/install/environment-variables.md b/docs/docs/install/environment-variables.md index 78cd16cf1b7c2e..e65add187f7a23 100644 --- a/docs/docs/install/environment-variables.md +++ b/docs/docs/install/environment-variables.md @@ -168,6 +168,7 @@ Redis (Sentinel) URL example JSON before encoding: | `MACHINE_LEARNING_MODEL_INTER_OP_THREADS` | Number of parallel model operations | `1` | machine learning | | `MACHINE_LEARNING_MODEL_INTRA_OP_THREADS` | Number of threads for each model operation | `2` | machine learning | | `MACHINE_LEARNING_WORKERS`\*2 | Number of worker processes to spawn | `1` | machine learning | +| `MACHINE_LEARNING_HTTP_KEEPALIVE_TIMEOUT_S`\*3 | HTTP Keep-alive time in seconds | `2` | machine learning | | `MACHINE_LEARNING_WORKER_TIMEOUT` | Maximum time (s) of unresponsiveness before a worker is killed | `120` (`300` if using OpenVINO image) | machine learning | | `MACHINE_LEARNING_PRELOAD__CLIP` | Name of a CLIP model to be preloaded and kept in cache | | machine learning | | `MACHINE_LEARNING_PRELOAD__FACIAL_RECOGNITION` | Name of a facial recognition model to be preloaded and kept in cache | | machine learning | @@ -179,6 +180,8 @@ Redis (Sentinel) URL example JSON before encoding: \*2: Since each process duplicates models in memory, changing this is not recommended unless you have abundant memory to go around. +\*3: For scenarios like HPA in K8S. https://github.com/immich-app/immich/discussions/12064 + :::info Other machine learning parameters can be tuned from the admin UI. diff --git a/machine-learning/start.sh b/machine-learning/start.sh index 6b8e55a23657d1..c3fda523df8329 100755 --- a/machine-learning/start.sh +++ b/machine-learning/start.sh @@ -13,6 +13,7 @@ fi : "${IMMICH_HOST:=[::]}" : "${IMMICH_PORT:=3003}" : "${MACHINE_LEARNING_WORKERS:=1}" +: "${MACHINE_LEARNING_HTTP_KEEPALIVE_TIMEOUT_S:=2}" gunicorn app.main:app \ -k app.config.CustomUvicornWorker \ @@ -20,4 +21,5 @@ gunicorn app.main:app \ -w "$MACHINE_LEARNING_WORKERS" \ -t "$MACHINE_LEARNING_WORKER_TIMEOUT" \ --log-config-json log_conf.json \ + --keep-alive "$MACHINE_LEARNING_HTTP_KEEPALIVE_TIMEOUT_S" \ --graceful-timeout 0