From 1f2f27bcd42e8e09ef39010461b363128cb62621 Mon Sep 17 00:00:00 2001 From: YuviPanda Date: Fri, 16 Sep 2022 12:14:32 -0700 Subject: [PATCH] Keep a placeholder node around for UToronto Hub Users were reporting slow starts and timeouts in the morning, as many users come on at the same time. This change keeps a placeholder *node* around, with a placeholder pod that will get displaced whenever a user needs that node. This should increase the odds of a new node being up by the time more than 1 node worth of users pop in. Ref https://2i2c.freshdesk.com/a/tickets/201 --- config/clusters/utoronto/prod.values.yaml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/config/clusters/utoronto/prod.values.yaml b/config/clusters/utoronto/prod.values.yaml index 910928ad2e..4d51b905bc 100644 --- a/config/clusters/utoronto/prod.values.yaml +++ b/config/clusters/utoronto/prod.values.yaml @@ -1,4 +1,15 @@ jupyterhub: + scheduling: + userPlaceholder: + # Keep at least one spare node around + replicas: 1 + resources: + requests: + # Each node on the UToronto cluster has 59350076Ki of RAM + # You can find this out by looking at the output of `kubectl get node -o yaml` + # Look under `allocatable`, not `capacity` + # So even though this is under `userPlaceholder`, it really is operating as a `nodePlaceholder` + memory: 57350076Ki hub: db: pvc: