diff --git a/config/clusters/utoronto/prod.values.yaml b/config/clusters/utoronto/prod.values.yaml index 910928ad2e..85305ec892 100644 --- a/config/clusters/utoronto/prod.values.yaml +++ b/config/clusters/utoronto/prod.values.yaml @@ -1,4 +1,17 @@ jupyterhub: + scheduling: + userPlaceholder: + # Keep at least one spare node around + replicas: 1 + resources: + requests: + # Each node on the UToronto cluster has 59350076Ki of RAM + # You can find this out by looking at the output of `kubectl get node -o yaml` + # Look under `allocatable`, not `capacity`. Unfortunately then you have to fiddle with it to + # find the right number that's big enough that no user pods will schedule here, but small enough + # that pods in `kube-system` will still schedule. + # So even though this is under `userPlaceholder`, it really is operating as a `nodePlaceholder` + memory: 57350076Ki hub: db: pvc: