Skip to content

Commit

Permalink
Merge pull request #3817 from GeorgianaElena/awi-ciroh-profile-list
Browse files Browse the repository at this point in the history
[Support: awi-ciroh staging] Add image choice to each profile
GeorgianaElena authored Mar 26, 2024
2 parents 4bf822a + bc13bff commit 3054b8f
Showing 3 changed files with 145 additions and 68 deletions.
70 changes: 2 additions & 68 deletions config/clusters/awi-ciroh/common.values.yaml
Original file line number Diff line number Diff line change
@@ -44,74 +44,8 @@ basehub:
admin_users:
- jameshalgren
- arpita0911patel
- karnesh
singleuser:
image:
# Image build repo: https://github.com/2i2c-org/awi-ciroh-image
#
# NOTE: The configurator is used in this cluster, so this is stale
# configuration.
#
name: "quay.io/2i2c/awi-ciroh-image"
tag: "584293e50d4c"
profileList:
# The mem-guarantees are here so k8s doesn't schedule other pods
# on these nodes. They need to be just under total allocatable
# RAM on a node, not total node capacity. Values calculated using
# https://learnk8s.io/kubernetes-instance-calculator
#
# FIXME: These are changed to a temporary node sharing setup based on
# the legacy choices to help us pre-warm capacity independent on
# the choices users end up making and avoiding running into
# persistent disk quotas.
#
# Change PR: https://github.com/2i2c-org/infrastructure/pull/2539
# Related event: https://github.com/2i2c-org/infrastructure/issues/2520
#
# This is an interim setup, trying to balance the experience of
# the previous 1:1 user:node setup with a node sharing setup. It
# is not meant to be retained long term!
#
# -[ ] Make this cluster have a node sharing setup like in the
# basehub/daskhub template.
#
- display_name: "Small"
description: 5GB RAM, 2 CPUs
default: true
kubespawner_override:
mem_limit: 7G
mem_guarantee: 5G
cpu_limit: 2
cpu_guarantee: 0.938
node_selector:
node.kubernetes.io/instance-type: n2-highmem-16
- display_name: Medium
description: 11GB RAM, 4 CPUs
kubespawner_override:
mem_limit: 15G
mem_guarantee: 11G
cpu_limit: 4
cpu_guarantee: 1.875
node_selector:
node.kubernetes.io/instance-type: n2-highmem-16
- display_name: Large
description: 24GB RAM, 8 CPUs
kubespawner_override:
mem_limit: 30G
mem_guarantee: 24G
cpu_limit: 8
cpu_guarantee: 3.75
node_selector:
node.kubernetes.io/instance-type: n2-highmem-16
- display_name: Huge
description: 52GB RAM, 16 CPUs
kubespawner_override:
mem_limit: 60G
mem_guarantee: 52G
cpu_limit: 16
cpu_guarantee: 7.5
node_selector:
node.kubernetes.io/instance-type: n2-highmem-16
- sepehrkrz
- benlee0423
dask-gateway:
gateway:
backend:
66 changes: 66 additions & 0 deletions config/clusters/awi-ciroh/prod.values.yaml
Original file line number Diff line number Diff line change
@@ -9,6 +9,72 @@ basehub:
- hosts: [ciroh.awi.2i2c.cloud]
secretName: https-auto-tls
singleuser:
image:
# Image build repo: https://github.com/2i2c-org/awi-ciroh-image
#
# NOTE: The configurator is used in this cluster, so this is stale
# configuration.
#
name: "quay.io/2i2c/awi-ciroh-image"
tag: "584293e50d4c"
profileList:
# The mem-guarantees are here so k8s doesn't schedule other pods
# on these nodes. They need to be just under total allocatable
# RAM on a node, not total node capacity. Values calculated using
# https://learnk8s.io/kubernetes-instance-calculator
#
# FIXME: These are changed to a temporary node sharing setup based on
# the legacy choices to help us pre-warm capacity independent on
# the choices users end up making and avoiding running into
# persistent disk quotas.
#
# Change PR: https://github.com/2i2c-org/infrastructure/pull/2539
# Related event: https://github.com/2i2c-org/infrastructure/issues/2520
#
# This is an interim setup, trying to balance the experience of
# the previous 1:1 user:node setup with a node sharing setup. It
# is not meant to be retained long term!
#
# -[ ] Make this cluster have a node sharing setup like in the
# basehub/daskhub template.
#
- display_name: "Small"
description: 5GB RAM, 2 CPUs
default: true
kubespawner_override:
mem_limit: 7G
mem_guarantee: 5G
cpu_limit: 2
cpu_guarantee: 0.938
node_selector:
node.kubernetes.io/instance-type: n2-highmem-16
- display_name: Medium
description: 11GB RAM, 4 CPUs
kubespawner_override:
mem_limit: 15G
mem_guarantee: 11G
cpu_limit: 4
cpu_guarantee: 1.875
node_selector:
node.kubernetes.io/instance-type: n2-highmem-16
- display_name: Large
description: 24GB RAM, 8 CPUs
kubespawner_override:
mem_limit: 30G
mem_guarantee: 24G
cpu_limit: 8
cpu_guarantee: 3.75
node_selector:
node.kubernetes.io/instance-type: n2-highmem-16
- display_name: Huge
description: 52GB RAM, 16 CPUs
kubespawner_override:
mem_limit: 60G
mem_guarantee: 52G
cpu_limit: 16
cpu_guarantee: 7.5
node_selector:
node.kubernetes.io/instance-type: n2-highmem-16
extraEnv:
SCRATCH_BUCKET: gs://awi-ciroh-scratch/$(JUPYTERHUB_USER)
PANGEO_SCRATCH: gs://awi-ciroh-scratch/$(JUPYTERHUB_USER)
77 changes: 77 additions & 0 deletions config/clusters/awi-ciroh/staging.values.yaml
Original file line number Diff line number Diff line change
@@ -9,6 +9,83 @@ basehub:
- hosts: [staging.ciroh.awi.2i2c.cloud]
secretName: https-auto-tls
singleuser:
profileList:
# The mem-guarantees are here so k8s doesn't schedule other pods
# on these nodes. They need to be just under total allocatable
# RAM on a node, not total node capacity. Values calculated using
# https://learnk8s.io/kubernetes-instance-calculator
#
# FIXME: These are changed to a temporary node sharing setup based on
# the legacy choices to help us pre-warm capacity independent on
# the choices users end up making and avoiding running into
# persistent disk quotas.
#
# Change PR: https://github.com/2i2c-org/infrastructure/pull/2539
# Related event: https://github.com/2i2c-org/infrastructure/issues/2520
#
# This is an interim setup, trying to balance the experience of
# the previous 1:1 user:node setup with a node sharing setup. It
# is not meant to be retained long term!
#
# -[ ] Make this cluster have a node sharing setup like in the
# basehub/daskhub template.
#
- display_name: "Small"
description: 5GB RAM, 2 CPUs
default: true
profile_options: &profile_options
image:
display_name: Image
# Requested in https://2i2c.freshdesk.com/a/tickets/1387
choices:
old:
display_name: Original Pangeo Notebook base image 2022.07.13
slug: "old"
kubespawner_override:
image: "quay.io/2i2c/awi-ciroh-image:88ea9a74a66e"
new:
display_name: New Pangeo Notebook base image 2023.09.11
default: true
slug: "new"
kubespawner_override:
image: "quay.io/2i2c/awi-ciroh-image:f7222fce8b16"
kubespawner_override:
mem_limit: 7G
mem_guarantee: 5G
cpu_limit: 2
cpu_guarantee: 0.938
node_selector:
node.kubernetes.io/instance-type: n2-highmem-16
- display_name: Medium
description: 11GB RAM, 4 CPUs
profile_options: *profile_options
kubespawner_override:
mem_limit: 15G
mem_guarantee: 11G
cpu_limit: 4
cpu_guarantee: 1.875
node_selector:
node.kubernetes.io/instance-type: n2-highmem-16
- display_name: Large
description: 24GB RAM, 8 CPUs
profile_options: *profile_options
kubespawner_override:
mem_limit: 30G
mem_guarantee: 24G
cpu_limit: 8
cpu_guarantee: 3.75
node_selector:
node.kubernetes.io/instance-type: n2-highmem-16
- display_name: Huge
description: 52GB RAM, 16 CPUs
profile_options: *profile_options
kubespawner_override:
mem_limit: 60G
mem_guarantee: 52G
cpu_limit: 16
cpu_guarantee: 7.5
node_selector:
node.kubernetes.io/instance-type: n2-highmem-16
extraEnv:
SCRATCH_BUCKET: gs://awi-ciroh-scratch-staging/$(JUPYTERHUB_USER)
PANGEO_SCRATCH: gs://awi-ciroh-scratch-staging/$(JUPYTERHUB_USER)

0 comments on commit 3054b8f

Please sign in to comment.