From fdacaf2aa723678ccbe13fe7056f5cc4a8ce089c Mon Sep 17 00:00:00 2001 From: Christie Wilson Date: Thu, 31 Oct 2019 16:48:52 -0400 Subject: [PATCH] WIP Adding "volume" aka "workspace" declaration to Tasks So far just docs and examples for declaring volumes in a Task via the "workspace" field and binding to them at runtime. This is midway between @skaegi's proposal b/c it allows the user to bring their own PVC but isn't the full cadillac version because it does not include volume devices. Will eventually fix #1438 --- docs/pipelineruns.md | 27 +++++ docs/pipelines.md | 92 +++++++++++++--- docs/taskruns.md | 31 +++++- docs/tasks.md | 58 +++++++++- examples/README.md | 9 ++ examples/pipelineruns/workspaces.yaml | 149 ++++++++++++++++++++++++++ examples/storageclass.yaml | 20 ++++ test/e2e-common.sh | 5 + 8 files changed, 373 insertions(+), 18 deletions(-) create mode 100644 examples/pipelineruns/workspaces.yaml create mode 100644 examples/storageclass.yaml diff --git a/docs/pipelineruns.md b/docs/pipelineruns.md index 573e461d09f..6119125f9b6 100644 --- a/docs/pipelineruns.md +++ b/docs/pipelineruns.md @@ -16,6 +16,7 @@ Creation of a `PipelineRun` will trigger the creation of - [Service account](#service-account) - [Service accounts](#service-accounts) - [Pod Template](#pod-template) + - [Workspaces](#workspaces) - [Cancelling a PipelineRun](#cancelling-a-pipelinerun) - [Examples](https://github.com/tektoncd/pipeline/tree/master/examples/pipelineruns) - [Logs](logs.md) @@ -265,6 +266,32 @@ spec: claimName: my-volume-claim ``` +## Workspaces + +If you are trying to use a [`Pipeline` that uses `workspaces`](pipelines.md#declared-workspaces), +at runtime you need to map these `workspaces` to actual physical volumes with +`workspaces`. Values in `workspaces` are +[`Volumes`](https://kubernetes.io/docs/tasks/configure-pod-container/configure-volume-storage/) +(see https://kubernetes.io/docs/concepts/storage/volumes for possible values). + +For example to provide an existing PVC called `mypvc` for a `workspace` called +`myworkspace` declared by the `Pipeline`: + +```yaml +workspaces: +- name: myworkspace + persistentVolumeClaim: + claimName: mypvc +``` + +Or to use [`emptyDir`](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) for the same `workspace`: + +```yaml +workspaces: +- name: myworkspace + emptyDir: {} +``` + ## Cancelling a PipelineRun In order to cancel a running pipeline (`PipelineRun`), you need to update its diff --git a/docs/pipelines.md b/docs/pipelines.md index 62e2f9694aa..323fd59d2f0 100644 --- a/docs/pipelines.md +++ b/docs/pipelines.md @@ -6,6 +6,7 @@ This document defines `Pipelines` and their capabilities. - [Syntax](#syntax) - [Declared resources](#declared-resources) + - [Workspaces](#declared-workspaces) - [Parameters](#parameters) - [Pipeline Tasks](#pipeline-tasks) - [From](#from) @@ -39,27 +40,28 @@ following fields: - [`from`](#from) - Used when the content of the [`PipelineResource`](resources.md) should come from the [output](tasks.md#outputs) of a previous [Pipeline Task](#pipeline-tasks) - - [`runAfter`](#runAfter) - Used when the [Pipeline Task](#pipeline-tasks) - should be executed after another Pipeline Task, but there is no - [output linking](#from) required - - [`retries`](#retries) - Used when the task is wanted to be executed if - it fails. Could be a network error or a missing dependency. It does not - apply to cancellations. - - [`conditions`](#conditions) - Used when a task is to be executed only if the specified - conditions are evaluated to be true. + - [`runAfter`](#runAfter) - Used when the [Pipeline Task](#pipeline-tasks) + should be executed after another Pipeline Task, but there is no + [output linking](#from) required + - [`retries`](#retries) - Used when the task is wanted to be executed if + it fails. Could be a network error or a missing dependency. It does not + apply to cancellations. + - [`conditions`](#conditions) - Used when a task is to be executed only if the specified + conditions are evaluated to be true. + - [`workspaces](#pipeline-tasks) - Specify which of the [declared workspaces](#declared-workspaces) + to use for the `Task` [kubernetes-overview]: https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/#required-fields ### Declared resources -In order for a `Pipeline` to interact with the outside world, it will probably -need [`PipelineResources`](resources.md) which will be given to +Your `Pipeline` may need [`PipelineResources`](resources.md) which will be given to `Tasks` as inputs and outputs. Your `Pipeline` must declare the `PipelineResources` it needs in a `resources` section in the `spec`, giving each a name which will be used to refer to these -`PipelineResources` in the `Tasks`. +`PipelineResources` in the [Pipeline's `Tasks`](#pipeline-tasks). For example: @@ -72,6 +74,22 @@ spec: type: image ``` +### Declared Workspaces + +Similar to [declaring resources](#declared-resources), if your `Pipeline` +uses [`Tasks` that declare `workspaces`](tasks.md#workspaces), you must +declare the workspaces your `Pipeline` expects, so you can +[pass them to the `Tasks`](#pipeline-tasks). + +For example: + +```yaml +spec: + workspaces: + - name: someVolume + - name: someOtherVolume +``` + ### Parameters `Pipeline`s can declare input parameters that must be supplied to the `Pipeline` @@ -113,7 +131,33 @@ spec: tasks: - name: build-skaffold-web taskRef: - name: build-push + name: build-pusspec: + workspaces: + - name: someVolume + - name: someOtherVolume + tasks: + - name: first-create-files + taskRef: + name: create-files + workspaces: + - name: volume1 + workspace: someVolume + - name: volume2 + workspace: someOtherVolume + - name: then-check-and-write + taskRef: + name: files-exist-and-add-new + workspaces: + - name: volume1 + workspace: someVolume + - name: volume2 + workspace: someOtherVolume + - name: then-check + taskRef: + name: files-exist + workspaces: + - name: volume1 + workspace: volume1h params: - name: pathToDockerFile value: Dockerfile @@ -157,6 +201,11 @@ the `Pipeline` as inputs and outputs, for example: ```yaml spec: + resources: + - name: my-repo + type: git + - name: my-image + type: image tasks: - name: build-the-image taskRef: @@ -170,6 +219,25 @@ spec: resource: my-image ``` +[Declared workspaces](#declared-workspaces) can be provided to the `Task` as well, +for example: + +```yaml +spec: + workspaces: + - name: someVolume + - name: someOtherVolume + tasks: + - name: first-write-some-files + taskRef: + name: create-files + workspaces: + - name: volume1 + workspace: someVolume + - name: volume2 + workspace: someOtherVolume +``` + [Parameters](tasks.md#parameters) can also be provided: ```yaml diff --git a/docs/taskruns.md b/docs/taskruns.md index 30f77eeb4dc..49b079dd130 100644 --- a/docs/taskruns.md +++ b/docs/taskruns.md @@ -15,9 +15,9 @@ A `TaskRun` runs until all `steps` have completed or until a failure occurs. - [Specifying a `Task`](#specifying-a-task) - [Input parameters](#input-parameters) - [Providing resources](#providing-resources) - - [Overriding where resources are copied from](#overriding-where-resources-are-copied-from) - [Service Account](#service-account) - [Pod Template](#pod-template) + - [Workspaces](#workspaces) - [Status](#status) - [Steps](#steps) - [Cancelling a TaskRun](#cancelling-a-taskrun) @@ -58,6 +58,8 @@ following fields: - [`podTemplate`](#pod-template) - Specifies a subset of [`PodSpec`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.15/#pod-v1-core) configuration that will be used as the basis for the `Task` pod. + - [`workspaces`](#workspaces) - Specify the actual volumes to use for the + [workspaces](tasks.md#workspaces) declared by a `Task` [kubernetes-overview]: https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/#required-fields @@ -227,7 +229,34 @@ spec: claimName: my-volume-claim ``` +## Workspaces +For a `TaskRun` to execute [a `Task` that declares `workspaces`](tasks.md#workspaces), +at runtime you need to map the `workspaces` to actual physical volumes with +`workspaces`. Values in `workspaces` are +[`Volumes`](https://kubernetes.io/docs/tasks/configure-pod-container/configure-volume-storage/) +(see https://kubernetes.io/docs/concepts/storage/volumes for possible values). + +If the declared `workspaces` are not provided at runtime, the `TaskRun` will fail +with an error. + +For example to provide an existing PVC called `mypvc` for a `workspace` called +`myworkspace` declared by the `Pipeline`: + +```yaml +workspaces: +- name: myworkspace + persistentVolumeClaim: + claimName: mypvc +``` + +Or to use [`emptyDir`](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) for the same `workspace`: + +```yaml +workspaces: +- name: myworkspace + emptyDir: {} +``` ## Status diff --git a/docs/tasks.md b/docs/tasks.md index bf1bf5254da..077ac935e82 100644 --- a/docs/tasks.md +++ b/docs/tasks.md @@ -9,6 +9,7 @@ A `Task` declares: - [Inputs](#inputs) - [Outputs](#outputs) - [Steps](#steps) +- [Workspaces](#workspaces) A `Task` is available within a namespace, and `ClusterTask` is available across entire Kubernetes cluster. @@ -21,9 +22,8 @@ entire Kubernetes cluster. - [Step script](#step-script) - [Inputs](#inputs) - [Outputs](#outputs) - - [Controlling where resources are mounted](#controlling-where-resources-are-mounted) - [Volumes](#volumes) - - [Container Template **deprecated**](#step-template) + - [Workspaces](#workspaces) - [Step Template](#step-template) - [Variable Substitution](#variable-substitution) - [Examples](#examples) @@ -77,6 +77,8 @@ following fields: created by your `Task` - [`volumes`](#volumes) - Specifies one or more volumes that you want to make available to your `Task`'s steps. + - [`workspaces`](#workspaces) - Specifies paths at which you expect volumes to + be mounted and available - [`stepTemplate`](#step-template) - Specifies a `Container` step definition to use as the basis for all steps within your `Task`. - [`sidecars`](#sidecars) - Specifies sidecar containers to run alongside @@ -347,6 +349,37 @@ steps: args: ['-c', 'cd /workspace/tar-scratch-space/ && tar -cvf /workspace/customworkspace/rules_docker-master.tar rules_docker-master'] ``` +### Workspaces + +`workspaces` are a way of declaring the path you expect to be made available to your +executing `Task`. They are similar to [`volumes`](#volumes) but allow you to enforce +at runtime that the volumes have been attached. + +The path that the workspace will be made available at is always relative to the dir +`/tekton/workspaces/` and will be available via +[variable substitution](#variable-substituation) with +`$(workspaces.myworkspace.path)`. + +The actual volumes must be provided at runtime +[in the `TaskRun`](taskruns.md#workspaces) +or [in the `PipelineRun`](pipelineruns.md#workspaces). + +When a [`Pipeline`] uses `Tasks` that declare `workspaces`, it must +also [declare the `workspaces` it expects, and map these to `Tasks`](pipelines.md#workspaces). + +For example: + +```yaml +spec: + steps: + - name: write-message + image: ubuntu + command: ['bash'] + args: ['-c', 'echo hello! > $(workspaces.messages.path)/message'] + workspaces: + - name: messages + description: The folder where we write the message to +``` ### Volumes @@ -459,9 +492,16 @@ has been created to track this bug. ### Variable Substitution -`Tasks` support string replacement using values from all [`inputs`](#inputs) and -[`outputs`](#outputs). +`Tasks` support string replacement using values from: + +* [Inputs and Outputs](#input-and-output-substitution) +* [`workspaces`](#workspaces) +* [`volumes`](#variable-substitution-with-volumes) +#### Input and Output substitution + +[`inputs`](#inputs) and [outputs](#outputs) attributes can be used in replacements, +including [`params`](#params) and [resources](./resources.md#variable-substitution). Input parameters can be referenced in the `Task` spec using the variable substitution syntax below, where `` is the name of the parameter: @@ -472,7 +512,7 @@ $(inputs.params.) Param values from resources can also be accessed using [variable substitution](./resources.md#variable-substitution) -#### Variable Substitution with Parameters of Type `Array` +##### Variable Substitution with Parameters of Type `Array` Referenced parameters of type `array` will expand to insert the array elements in the reference string's spot. @@ -515,6 +555,14 @@ A valid reference to the `build-args` parameter is isolated and in an eligible f args: ["build", "$(inputs.params.build-args)", "additonalArg"] ``` +#### Workspace Substitution + +Paths to a `Task's` declared [workspaces](#workspaces) can be substituted with: + +``` +$(workspaces.myworkspace.path) +``` + #### Variable Substitution within Volumes Task volume names and different diff --git a/examples/README.md b/examples/README.md index fc79ac5d3e2..967a0d3447e 100644 --- a/examples/README.md +++ b/examples/README.md @@ -51,3 +51,12 @@ output-pipeline-run Succeeded True 2019-02-11T21:35:43Z You can also use `kubectl get tr` or `kubectl get pr` to query all `taskruns` or `pipelineruns` respectively. + + +## Storage class + +[storageclass.yaml](storageclass.yaml) contains the configuration needed to +[configure Tekton](https://github.com/tektoncd/pipeline/blob/master/docs/install.md#how-are-resources-shared-between-tasks) +to use a storage class for the automatically created PVC that will work with +[GKE regional clusters](https://cloud.google.com/kubernetes-engine/docs/concepts/regional-clusters#pd) +when additional PVCs are created and used. \ No newline at end of file diff --git a/examples/pipelineruns/workspaces.yaml b/examples/pipelineruns/workspaces.yaml new file mode 100644 index 00000000000..6c29b7f02bf --- /dev/null +++ b/examples/pipelineruns/workspaces.yaml @@ -0,0 +1,149 @@ +apiVersion: +kind: PersistentVolumeClaim +metadata: + name: pvc1 +spec: + # Uses the storageClass defined in examples/storageclass.yaml + storageClassName: regional-disk + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi +--- +apiVersion: +kind: PersistentVolumeClaim +metadata: + name: pvc2 +spec: + # Uses the storageClass defined in examples/storageclass.yaml + storageClassName: regional-disk + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi +--- +# Task writes data to a predefined path +apiVersion: tekton.dev/v1alpha1 +kind: Task +metadata: + name: create-files +spec: + steps: + - name: write-new-stuff-1 + image: ubuntu + command: ['bash'] + args: ['-c', 'echo stuff1 > $(workspaces.volume1.path)/stuff1'] + - name: write-new-stuff-2 + image: ubuntu + command: ['bash'] + args: ['-c', 'echo stuff2 > $(workspaces.volume2.path)/stuff2'] + workspaces: + - name: volume1 + description: The first volume we will write stuff to + - name: volume2 + description: The second volume we will write stuff to +--- +# Reads a file from a predefined path and writes as well +apiVersion: tekton.dev/v1alpha1 +kind: Task +metadata: + name: files-exist-and-add-new +spec: + steps: + - name: read1 + image: ubuntu + command: ["/bin/bash"] + args: + - '-c' + - '[[ stuff1 == $(cat $(workspaces.volume1.path)/stuff1) ]]' + - name: read2 + image: ubuntu + command: ["/bin/bash"] + args: + - '-c' + - '[[ stuff2 == $(cat $(workspaces.volume2.path)/stuff2) ]]' + - name: write-new-stuff-3 + image: ubuntu + command: ['bash'] + args: ['-c', 'echo stuff3 > $(workspaces.volume1.path)/stuff3'] + workspaces: + - name: volume1 + description: The first volume which we will read from and write to + - name: volume2 + description: The second volume which we will read from +--- +# Reads a file from a predefined path and writes as well +apiVersion: tekton.dev/v1alpha1 +kind: Task +metadata: + name: files-exist +spec: + steps: + - name: read1 + image: ubuntu + command: ["/bin/bash"] + args: + - '-c' + - '[[ stuff1 == $(cat $(workspaces.volume.path)/stuff1) ]]' + - name: read3 + image: ubuntu + command: ["/bin/bash"] + args: + - '-c' + - '[[ stuff3 == $(cat $(workspaces.volume.path)/stuff3) ]]' + workspaces: + - name: volume + description: The volume we will read from +--- +# First task writes files to two volumes. The next task ensures these files exist +# then writes a third file to the first volume. The last Task ensures both expected +# files exist on this volume. +apiVersion: tekton.dev/v1alpha1 +kind: Pipeline +metadata: + name: volume-output-pipeline +spec: + workspaces: + - name: someVolume + - name: someOtherVolume + tasks: + - name: first-create-files + taskRef: + name: create-files + workspaces: + - name: volume1 + workspace: someVolume + - name: volume2 + workspace: someOtherVolume + - name: then-check-and-write + taskRef: + name: files-exist-and-add-new + workspaces: + - name: volume1 + workspace: someVolume + - name: volume2 + workspace: someOtherVolume + - name: then-check + taskRef: + name: files-exist + workspaces: + - name: volume1 + workspace: volume1 +--- +apiVersion: tekton.dev/v1alpha1 +kind: PipelineRun +metadata: + name: volume-output-pipeline-run +spec: + pipelineRef: + name: volume-output-pipeline + serviceAccount: 'default' + workspaces: + - name: someVolume + persistentVolumeClaim: + claimName: pvc1 + - name: someOtherVolume + persistentVolumeClaim: + claimName: pvc2 \ No newline at end of file diff --git a/examples/storageclass.yaml b/examples/storageclass.yaml new file mode 100644 index 00000000000..94e9dea678b --- /dev/null +++ b/examples/storageclass.yaml @@ -0,0 +1,20 @@ +# Some exampels use multiple PVCs and will be run against a regional GKE. +# This means we have to make sure that the PVCs aren't created in different zones, +# and the only way to do this is to create regional PVCs. +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: regional-disk +provisioner: kubernetes.io/gce-pd +parameters: + type: pd-ssd + replication-type: regional-pd +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-artifact-pvc + namespace: tekton-pipelines +data: + # storage class of the PVC volume + storageClassName: regional-disk \ No newline at end of file diff --git a/test/e2e-common.sh b/test/e2e-common.sh index 81392685335..10291e8aa26 100755 --- a/test/e2e-common.sh +++ b/test/e2e-common.sh @@ -129,6 +129,11 @@ function run_yaml_tests() { function install_pipeline_crd() { echo ">> Deploying Tekton Pipelines" ko apply -f config/ || fail_test "Build pipeline installation failed" + + echo ">> Applying storage class configuration" + kubectl delete configmap config-artifact-pvc --namespace tekton-pipelines + kubectl apply -f examples/storageclass.yaml + verify_pipeline_installation }