Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added support for google TPU #2143

Open
wants to merge 6 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions metaflow/metaflow_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -359,6 +359,8 @@
KUBERNETES_ANNOTATIONS = from_conf("KUBERNETES_ANNOTATIONS", "")
# Default GPU vendor to use by K8S jobs created by Metaflow (supports nvidia, amd)
KUBERNETES_GPU_VENDOR = from_conf("KUBERNETES_GPU_VENDOR", "nvidia")
# Default TPU vendor to use by K8S jobs created by Metaflow (supports google)
KUBERNETES_TPU_VENDOR = from_conf("KUBERNETES_TPU_VENDOR", "google")
# Default container image for K8S
KUBERNETES_CONTAINER_IMAGE = from_conf(
"KUBERNETES_CONTAINER_IMAGE", DEFAULT_CONTAINER_IMAGE
Expand Down
7 changes: 7 additions & 0 deletions metaflow/plugins/airflow/airflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -446,6 +446,13 @@ def _to_job(self, node):
# Don't set GPU limits if gpu isn't specified.
if k8s_deco.attributes["gpu"] is not None
},
**{
"%s.com/tpu".lower()
% k8s_deco.attributes["tpu_vendor"]: str(k8s_deco.attributes["tpu"])
for k in [0]
# Don't set TPU limits if gpu isn't specified.
if k8s_deco.attributes["tpu"] is not None
},
},
)

Expand Down
10 changes: 10 additions & 0 deletions metaflow/plugins/argo/argo_workflows.py
Original file line number Diff line number Diff line change
Expand Up @@ -1940,6 +1940,8 @@ def _container_templates(self):
disk=str(resources["disk"]),
gpu=resources["gpu"],
gpu_vendor=str(resources["gpu_vendor"]),
tpu=resources["tpu"],
tpu_vendor=str(resources["tpu_vendor"]),
tolerations=resources["tolerations"],
use_tmpfs=use_tmpfs,
tmpfs_tempdir=tmpfs_tempdir,
Expand Down Expand Up @@ -2159,6 +2161,14 @@ def _container_templates(self):
for k in [0]
if resources["gpu"] is not None
},
**{
"%s.com/tpu".lower()
% resources["tpu_vendor"]: str(
resources["tpu"]
)
for k in [0]
if resources["tpu"] is not None
},
},
),
# Configure secrets
Expand Down
8 changes: 8 additions & 0 deletions metaflow/plugins/kubernetes/kubernetes.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,8 @@ def create_jobset(
cpu=None,
gpu=None,
gpu_vendor=None,
tpu=None,
tpu_vendor=None,
disk=None,
memory=None,
use_tmpfs=None,
Expand Down Expand Up @@ -210,6 +212,8 @@ def create_jobset(
disk=disk,
gpu=gpu,
gpu_vendor=gpu_vendor,
tpu=tpu,
tpu_vendor=tpu_vendor,
timeout_in_seconds=run_time_limit,
# Retries are handled by Metaflow runtime
retries=0,
Expand Down Expand Up @@ -472,6 +476,8 @@ def create_job_object(
cpu=None,
gpu=None,
gpu_vendor=None,
tpu=None,
tpu_vendor=None,
disk=None,
memory=None,
use_tmpfs=None,
Expand Down Expand Up @@ -515,6 +521,8 @@ def create_job_object(
disk=disk,
gpu=gpu,
gpu_vendor=gpu_vendor,
tpu=tpu,
tpu_vendor=tpu_vendor,
timeout_in_seconds=run_time_limit,
# Retries are handled by Metaflow runtime
retries=0,
Expand Down
6 changes: 6 additions & 0 deletions metaflow/plugins/kubernetes/kubernetes_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,8 @@ def kubernetes():
@click.option("--memory", help="Memory requirement for Kubernetes pod.")
@click.option("--gpu", help="GPU requirement for Kubernetes pod.")
@click.option("--gpu-vendor", help="GPU vendor requirement for Kubernetes pod.")
@click.option("--tpu", help="TPU requirement for Kubernetes pod.")
@click.option("--tpu-vendor", help="TPU vendor requirement for Kubernetes pod.")
@click.option("--run-id", help="Passed to the top-level 'step'.")
@click.option("--task-id", help="Passed to the top-level 'step'.")
@click.option("--input-paths", help="Passed to the top-level 'step'.")
Expand Down Expand Up @@ -163,6 +165,8 @@ def step(
memory=None,
gpu=None,
gpu_vendor=None,
tpu=None,
tpu_vendor=None,
use_tmpfs=None,
tmpfs_tempdir=None,
tmpfs_size=None,
Expand Down Expand Up @@ -305,6 +309,8 @@ def _sync_metadata():
memory=memory,
gpu=gpu,
gpu_vendor=gpu_vendor,
tpu=tpu,
tpu_vendor=tpu_vendor,
use_tmpfs=use_tmpfs,
tmpfs_tempdir=tmpfs_tempdir,
tmpfs_size=tmpfs_size,
Expand Down
45 changes: 34 additions & 11 deletions metaflow/plugins/kubernetes/kubernetes_decorator.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
KUBERNETES_DISK,
KUBERNETES_FETCH_EC2_METADATA,
KUBERNETES_GPU_VENDOR,
KUBERNETES_TPU_VENDOR,
KUBERNETES_IMAGE_PULL_POLICY,
KUBERNETES_MEMORY,
KUBERNETES_LABELS,
Expand Down Expand Up @@ -89,6 +90,11 @@ class KubernetesDecorator(StepDecorator):
the scheduled node should not have GPUs.
gpu_vendor : str, default KUBERNETES_GPU_VENDOR
The vendor of the GPUs to be used for this step.
tpu : int, optional, default None
Number of TPUs required for this step. A value of zero implies that
the scheduled node should not have TPUs.
tpu_vendor : str, default KUBERNETES_TPU_VENDOR
The vendor of the TPUs to be used for this step.
tolerations : List[str], default []
The default is extracted from METAFLOW_KUBERNETES_TOLERATIONS.
Kubernetes tolerations to use when launching pod in Kubernetes.
Expand Down Expand Up @@ -136,6 +142,8 @@ class KubernetesDecorator(StepDecorator):
"namespace": None,
"gpu": None, # value of 0 implies that the scheduled node should not have GPUs
"gpu_vendor": None,
"tpu": None, # value of 0 implies that the scheduled node should not have TPUs
"tpu_vendor": None,
"tolerations": None, # e.g., [{"key": "arch", "operator": "Equal", "value": "amd"},
# {"key": "foo", "operator": "Equal", "value": "bar"}]
"labels": None, # e.g. {"test-label": "value", "another-label":"value2"}
Expand Down Expand Up @@ -169,6 +177,8 @@ def init(self):
self.attributes["service_account"] = KUBERNETES_SERVICE_ACCOUNT
if not self.attributes["gpu_vendor"]:
self.attributes["gpu_vendor"] = KUBERNETES_GPU_VENDOR
if not self.attributes["tpu_vendor"]:
self.attributes["tpu_vendor"] = KUBERNETES_TPU_VENDOR
if not self.attributes["node_selector"] and KUBERNETES_NODE_SELECTOR:
self.attributes["node_selector"] = KUBERNETES_NODE_SELECTOR
if not self.attributes["tolerations"] and KUBERNETES_TOLERATIONS:
Expand Down Expand Up @@ -342,9 +352,9 @@ def step_init(self, flow, graph, step, decos, environment, flow_datastore, logge
for deco in decos:
if isinstance(deco, ResourcesDecorator):
for k, v in deco.attributes.items():
# If GPU count is specified, explicitly set it in self.attributes.
if k == "gpu" and v != None:
self.attributes["gpu"] = v
# If GPU/TPU count is specified, explicitly set it in self.attributes.
if k in ("gpu", "tpu") and v != None:
self.attributes[k] = v

if k in self.attributes:
if self.defaults[k] is None:
Expand All @@ -366,6 +376,14 @@ def step_init(self, flow, graph, step, decos, environment, flow_datastore, logge
)
)

# Check TPU vendor.
if self.attributes["tpu_vendor"].lower() not in ("google"):
raise KubernetesException(
"TPU vendor *{}* for step *{step}* is not currently supported.".format(
self.attributes["tpu_vendor"], step=step
)
)

# CPU, Disk, and Memory values should be greater than 0.
for attr in ["cpu", "disk", "memory"]:
if not (
Expand All @@ -378,15 +396,20 @@ def step_init(self, flow, graph, step, decos, environment, flow_datastore, logge
)
)

if self.attributes["gpu"] is not None and not (
isinstance(self.attributes["gpu"], (int, unicode, basestring))
and float(self.attributes["gpu"]).is_integer()
):
raise KubernetesException(
"Invalid GPU value *{}* for step *{step}*; it should be an integer".format(
self.attributes["gpu"], step=step
for accelerator_type in ("gpu", "tpu"):
if self.attributes[accelerator_type] is not None and not (
isinstance(
self.attributes[accelerator_type], (int, unicode, basestring)
)
and float(self.attributes[accelerator_type]).is_integer()
):
raise KubernetesException(
"Invalid {accelerator_type} value *{number}* for step *{step}*; it should be an integer".format(
accelerator_type=accelerator_type.toUpper(),
number=self.attributes[accelerator_type],
step=step,
)
)
)

if self.attributes["tmpfs_size"]:
if not (
Expand Down
9 changes: 9 additions & 0 deletions metaflow/plugins/kubernetes/kubernetes_job.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,15 @@ def create_job_spec(self):
# Don't set GPU limits if gpu isn't specified.
if self._kwargs["gpu"] is not None
},
**{
"%s.com/tpu".lower()
% self._kwargs["tpu_vendor"]: str(
self._kwargs["tpu"]
)
for k in [0]
# Don't set GPU limits if gpu isn't specified.
if self._kwargs["tpu"] is not None
},
},
),
volume_mounts=(
Expand Down
9 changes: 9 additions & 0 deletions metaflow/plugins/kubernetes/kubernetes_jobsets.py
Original file line number Diff line number Diff line change
Expand Up @@ -670,6 +670,15 @@ def dump(self):
# Don't set GPU limits if gpu isn't specified.
if self._kwargs["gpu"] is not None
},
**{
"%s.com/tpu".lower()
% self._kwargs["tpu_vendor"]: str(
self._kwargs["tpu"]
)
for k in [0]
# Don't set GPU limits if gpu isn't specified.
if self._kwargs["tpu"] is not None
},
},
),
volume_mounts=(
Expand Down