Skip to content

Commit

Permalink
Update SDK docs for KFP extensions to follow Sphinx guidelines
Browse files Browse the repository at this point in the history
  • Loading branch information
alexlatchford committed Jul 16, 2020
1 parent 0c861f2 commit b7b132f
Show file tree
Hide file tree
Showing 7 changed files with 24 additions and 23 deletions.
1 change: 0 additions & 1 deletion docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,6 @@
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None


# -- Options for HTML output -------------------------------------------------

# The theme to use for HTML and HTML Help pages. See the documentation for
Expand Down
8 changes: 4 additions & 4 deletions docs/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -3,20 +3,20 @@
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to Kubeflow Pipelines SDK API reference
================================================
Kubeflow Pipelines SDK API
==========================

Main documentation: https://www.kubeflow.org/docs/pipelines/

Source code: https://github.com/kubeflow/pipelines/

.. toctree::
:maxdepth: 3
:caption: Contents:
:caption: Contents

self
source/kfp


.. * :ref:`modindex`
.. * :ref:`kfp-ref`
.. * :ref:`search`
Expand Down
8 changes: 5 additions & 3 deletions sdk/python/kfp/aws.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,11 @@
def use_aws_secret(secret_name='aws-secret', aws_access_key_id_name='AWS_ACCESS_KEY_ID', aws_secret_access_key_name='AWS_SECRET_ACCESS_KEY'):
"""An operator that configures the container to use AWS credentials.
AWS doesn't create secret along with kubeflow deployment and it requires users
to manually create credential secret with proper permissions.
---
AWS doesn't create secret along with kubeflow deployment and it requires users
to manually create credential secret with proper permissions.
::
apiVersion: v1
kind: Secret
metadata:
Expand Down
7 changes: 3 additions & 4 deletions sdk/python/kfp/azure.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,10 @@
def use_azure_secret(secret_name='azcreds'):
"""An operator that configures the container to use Azure user credentials.
The azcreds secret is created as part of the kubeflow deployment that
stores the client ID and secrets for the kubeflow azure service principal.
The azcreds secret is created as part of the kubeflow deployment that
stores the client ID and secrets for the kubeflow azure service principal.
With this service principal, the container has a range of Azure APIs to
access to.
With this service principal, the container has a range of Azure APIs to access to.
"""

def _use_azure_secret(task):
Expand Down
2 changes: 0 additions & 2 deletions sdk/python/kfp/dsl/_container_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -569,8 +569,6 @@ class UserContainer(Container):
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
Example:
::
Expand Down
11 changes: 6 additions & 5 deletions sdk/python/kfp/gcp.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,9 @@

def use_gcp_secret(secret_name='user-gcp-sa', secret_file_path_in_volume=None, volume_name=None, secret_volume_mount_path='/secret/gcp-credentials'):
"""An operator that configures the container to use GCP service account by service account key
stored in a Kubernetes secret.
stored in a Kubernetes secret.
For cluster setup and alternatives to using service account key, check https://www.kubeflow.org/docs/gke/authentication-pipelines/.
For cluster setup and alternatives to using service account key, check https://www.kubeflow.org/docs/gke/authentication-pipelines/.
"""

# permitted values for secret_name = ['admin-gcp-sa', 'user-gcp-sa']
Expand Down Expand Up @@ -95,9 +95,10 @@ def use_preemptible_nodepool(toleration: V1Toleration = V1Toleration(effect='NoS
value='true'),
hard_constraint: bool = False):
"""An operator that configures the GKE preemptible in a container op.
Args:
toleration (V1Toleration): toleration to pods, default is the preemptible label.
hard_constraint (bool): the constraint of scheduling the pods on preemptible
toleration: toleration to pods, default is the preemptible label.
hard_constraint: the constraint of scheduling the pods on preemptible
nodepools is hard. (Default: False)
"""

Expand Down Expand Up @@ -127,7 +128,7 @@ def add_gpu_toleration(toleration: V1Toleration = V1Toleration(
"""An operator that configures the GKE GPU nodes in a container op.
Args:
toleration {V1Toleration} -- toleration to pods, default is the nvidia.com/gpu label.
toleration: toleration to pods, default is the nvidia.com/gpu label.
"""

def _set_toleration(task):
Expand Down
10 changes: 6 additions & 4 deletions sdk/python/kfp/onprem.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@

def mount_pvc(pvc_name='pipeline-claim', volume_name='pipeline', volume_mount_path='/mnt/pipeline'):
"""
Modifier function to apply to a Container Op to simplify volume, volume mount addition and
enable better reuse of volumes, volume claims across container ops.
Usage:
"""Modifier function to apply to a Container Op to simplify volume, volume mount addition and
enable better reuse of volumes, volume claims across container ops.
Example:
::
train = train_op(...)
train.apply(mount_pvc('claim-name', 'pipeline', '/mnt/pipeline'))
"""
Expand Down

0 comments on commit b7b132f

Please sign in to comment.