From 8e8fc96944ed4633e2007fabecca55f2d9c2a2bd Mon Sep 17 00:00:00 2001 From: Yongxuan Zhang Date: Fri, 28 Oct 2022 20:06:10 +0000 Subject: [PATCH] add trusted resources e2e tests This commit adds trusted resources e2e tests to test tasks and pipelinese verification. --- .../pipelinerun/resources/pipelineref.go | 16 +- pkg/reconciler/taskrun/resources/taskref.go | 16 +- pkg/trustedresources/verify.go | 80 +++- pkg/trustedresources/verify_test.go | 71 +++- test/trusted_resources_test.go | 375 ++++++++++++++++++ test/trustedresources-keys/cosign.key | 11 + test/trustedresources-keys/cosign.pub | 4 + test/trustedresources.go | 18 + 8 files changed, 561 insertions(+), 30 deletions(-) create mode 100644 test/trusted_resources_test.go create mode 100644 test/trustedresources-keys/cosign.key create mode 100644 test/trustedresources-keys/cosign.pub diff --git a/pkg/reconciler/pipelinerun/resources/pipelineref.go b/pkg/reconciler/pipelinerun/resources/pipelineref.go index 5316581bb0d..be2f1cb409c 100644 --- a/pkg/reconciler/pipelinerun/resources/pipelineref.go +++ b/pkg/reconciler/pipelinerun/resources/pipelineref.go @@ -70,7 +70,7 @@ func GetPipelineFunc(ctx context.Context, k8s kubernetes.Interface, tekton clien return nil, fmt.Errorf("failed to get keychain: %w", err) } resolver := oci.NewResolver(pr.Bundle, kc) - return resolvePipeline(ctx, resolver, name) + return resolvePipeline(ctx, resolver, name, k8s) }, nil case pr != nil && pr.Resolver != "" && requester != nil: return func(ctx context.Context, name string) (v1beta1.PipelineObject, error) { @@ -80,13 +80,14 @@ func GetPipelineFunc(ctx context.Context, k8s kubernetes.Interface, tekton clien } replacedParams := replaceParamValues(pr.Params, stringReplacements, arrayReplacements, objectReplacements) resolver := resolution.NewResolver(requester, pipelineRun, string(pr.Resolver), "", "", replacedParams) - return resolvePipeline(ctx, resolver, name) + return resolvePipeline(ctx, resolver, name, k8s) }, nil default: // Even if there is no task ref, we should try to return a local resolver. local := &LocalPipelineRefResolver{ Namespace: namespace, Tektonclient: tekton, + K8sclient: k8s, } return local.GetPipeline, nil } @@ -96,6 +97,7 @@ func GetPipelineFunc(ctx context.Context, k8s kubernetes.Interface, tekton clien type LocalPipelineRefResolver struct { Namespace string Tektonclient clientset.Interface + K8sclient kubernetes.Interface } // GetPipeline will resolve a Pipeline from the local cluster using a versioned Tekton client. It will @@ -110,7 +112,7 @@ func (l *LocalPipelineRefResolver) GetPipeline(ctx context.Context, name string) if err != nil { return nil, err } - if err := verifyResolvedPipeline(ctx, pipeline); err != nil { + if err := verifyResolvedPipeline(ctx, pipeline, l.K8sclient); err != nil { return nil, err } return pipeline, nil @@ -120,7 +122,7 @@ func (l *LocalPipelineRefResolver) GetPipeline(ctx context.Context, name string) // fetch a pipeline with given name. An error is returned if the // resolution doesn't work or the returned data isn't a valid // v1beta1.PipelineObject. -func resolvePipeline(ctx context.Context, resolver remote.Resolver, name string) (v1beta1.PipelineObject, error) { +func resolvePipeline(ctx context.Context, resolver remote.Resolver, name string, k8s kubernetes.Interface) (v1beta1.PipelineObject, error) { obj, err := resolver.Get(ctx, "pipeline", name) if err != nil { return nil, err @@ -130,7 +132,7 @@ func resolvePipeline(ctx context.Context, resolver remote.Resolver, name string) return nil, fmt.Errorf("failed to convert obj %s into Pipeline", obj.GetObjectKind().GroupVersionKind().String()) } // TODO(#5527): Consider move this function call to GetPipelineData - if err := verifyResolvedPipeline(ctx, pipelineObj); err != nil { + if err := verifyResolvedPipeline(ctx, pipelineObj, k8s); err != nil { return nil, err } return pipelineObj, nil @@ -150,10 +152,10 @@ func readRuntimeObjectAsPipeline(ctx context.Context, obj runtime.Object) (v1bet } // verifyResolvedPipeline verifies the resolved pipeline -func verifyResolvedPipeline(ctx context.Context, pipeline v1beta1.PipelineObject) error { +func verifyResolvedPipeline(ctx context.Context, pipeline v1beta1.PipelineObject, k8s kubernetes.Interface) error { cfg := config.FromContextOrDefaults(ctx) if cfg.FeatureFlags.ResourceVerificationMode == config.EnforceResourceVerificationMode || cfg.FeatureFlags.ResourceVerificationMode == config.WarnResourceVerificationMode { - if err := trustedresources.VerifyPipeline(ctx, pipeline); err != nil { + if err := trustedresources.VerifyPipeline(ctx, pipeline, k8s); err != nil { if cfg.FeatureFlags.ResourceVerificationMode == config.EnforceResourceVerificationMode { return trustedresources.ErrorResourceVerificationFailed } diff --git a/pkg/reconciler/taskrun/resources/taskref.go b/pkg/reconciler/taskrun/resources/taskref.go index 5df5ab0429d..742971a58d0 100644 --- a/pkg/reconciler/taskrun/resources/taskref.go +++ b/pkg/reconciler/taskrun/resources/taskref.go @@ -100,7 +100,7 @@ func GetTaskFunc(ctx context.Context, k8s kubernetes.Interface, tekton clientset } resolver := oci.NewResolver(tr.Bundle, kc) - return resolveTask(ctx, resolver, name, kind) + return resolveTask(ctx, resolver, name, kind, k8s) }, nil case tr != nil && tr.Resolver != "" && requester != nil: // Return an inline function that implements GetTask by calling Resolver.Get with the specified task type and @@ -120,7 +120,7 @@ func GetTaskFunc(ctx context.Context, k8s kubernetes.Interface, tekton clientset replacedParams = append(replacedParams, tr.Params...) } resolver := resolution.NewResolver(requester, owner, string(tr.Resolver), trName, namespace, replacedParams) - return resolveTask(ctx, resolver, name, kind) + return resolveTask(ctx, resolver, name, kind, k8s) }, nil default: @@ -129,6 +129,7 @@ func GetTaskFunc(ctx context.Context, k8s kubernetes.Interface, tekton clientset Namespace: namespace, Kind: kind, Tektonclient: tekton, + K8sclient: k8s, } return local.GetTask, nil } @@ -138,7 +139,7 @@ func GetTaskFunc(ctx context.Context, k8s kubernetes.Interface, tekton clientset // fetch a task with given name. An error is returned if the // remoteresource doesn't work or the returned data isn't a valid // v1beta1.TaskObject. -func resolveTask(ctx context.Context, resolver remote.Resolver, name string, kind v1beta1.TaskKind) (v1beta1.TaskObject, error) { +func resolveTask(ctx context.Context, resolver remote.Resolver, name string, kind v1beta1.TaskKind, k8s kubernetes.Interface) (v1beta1.TaskObject, error) { // Because the resolver will only return references with the same kind (eg ClusterTask), this will ensure we // don't accidentally return a Task with the same name but different kind. obj, err := resolver.Get(ctx, strings.TrimSuffix(strings.ToLower(string(kind)), "s"), name) @@ -150,7 +151,7 @@ func resolveTask(ctx context.Context, resolver remote.Resolver, name string, kin return nil, fmt.Errorf("failed to convert obj %s into Task", obj.GetObjectKind().GroupVersionKind().String()) } // TODO(#5527): Consider move this function call to GetTaskData - if err := verifyResolvedTask(ctx, taskObj); err != nil { + if err := verifyResolvedTask(ctx, taskObj, k8s); err != nil { return nil, err } return taskObj, nil @@ -173,6 +174,7 @@ type LocalTaskRefResolver struct { Namespace string Kind v1beta1.TaskKind Tektonclient clientset.Interface + K8sclient kubernetes.Interface } // GetTask will resolve either a Task or ClusterTask from the local cluster using a versioned Tekton client. It will @@ -194,7 +196,7 @@ func (l *LocalTaskRefResolver) GetTask(ctx context.Context, name string) (v1beta if err != nil { return nil, err } - if err := verifyResolvedTask(ctx, task); err != nil { + if err := verifyResolvedTask(ctx, task, l.K8sclient); err != nil { return nil, err } return task, nil @@ -206,10 +208,10 @@ func IsGetTaskErrTransient(err error) bool { } // verifyResolvedTask verifies the resolved task -func verifyResolvedTask(ctx context.Context, task v1beta1.TaskObject) error { +func verifyResolvedTask(ctx context.Context, task v1beta1.TaskObject, k8s kubernetes.Interface) error { cfg := config.FromContextOrDefaults(ctx) if cfg.FeatureFlags.ResourceVerificationMode == config.EnforceResourceVerificationMode || cfg.FeatureFlags.ResourceVerificationMode == config.WarnResourceVerificationMode { - if err := trustedresources.VerifyTask(ctx, task); err != nil { + if err := trustedresources.VerifyTask(ctx, task, k8s); err != nil { if cfg.FeatureFlags.ResourceVerificationMode == config.EnforceResourceVerificationMode { return trustedresources.ErrorResourceVerificationFailed } diff --git a/pkg/trustedresources/verify.go b/pkg/trustedresources/verify.go index a4a25f01add..d438dbef0a3 100644 --- a/pkg/trustedresources/verify.go +++ b/pkg/trustedresources/verify.go @@ -26,17 +26,23 @@ import ( "fmt" "os" "path/filepath" + "strings" + "github.com/pkg/errors" "github.com/sigstore/sigstore/pkg/cryptoutils" "github.com/sigstore/sigstore/pkg/signature" "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" ) const ( // SignatureAnnotation is the key of signature in annotation map SignatureAnnotation = "tekton.dev/signature" + // keyReference is the prefix of secret reference + keyReference = "k8s://" ) // VerifyInterface get the checksum of json marshalled object and verify it. @@ -57,7 +63,7 @@ func VerifyInterface(obj interface{}, verifier signature.Verifier, signature []b } // VerifyTask verifies the signature and public key against task -func VerifyTask(ctx context.Context, taskObj v1beta1.TaskObject) error { +func VerifyTask(ctx context.Context, taskObj v1beta1.TaskObject, k8s kubernetes.Interface) error { tm, signature, err := prepareObjectMeta(taskObj.TaskMetadata()) if err != nil { return err @@ -69,7 +75,7 @@ func VerifyTask(ctx context.Context, taskObj v1beta1.TaskObject) error { ObjectMeta: tm, Spec: taskObj.TaskSpec(), } - verifiers, err := getVerifiers(ctx) + verifiers, err := getVerifiers(ctx, k8s) if err != nil { return err } @@ -82,7 +88,7 @@ func VerifyTask(ctx context.Context, taskObj v1beta1.TaskObject) error { } // VerifyPipeline verifies the signature and public key against pipeline -func VerifyPipeline(ctx context.Context, pipelineObj v1beta1.PipelineObject) error { +func VerifyPipeline(ctx context.Context, pipelineObj v1beta1.PipelineObject, k8s kubernetes.Interface) error { pm, signature, err := prepareObjectMeta(pipelineObj.PipelineMetadata()) if err != nil { return err @@ -94,7 +100,7 @@ func VerifyPipeline(ctx context.Context, pipelineObj v1beta1.PipelineObject) err ObjectMeta: pm, Spec: pipelineObj.PipelineSpec(), } - verifiers, err := getVerifiers(ctx) + verifiers, err := getVerifiers(ctx, k8s) if err != nil { return err } @@ -152,18 +158,16 @@ func prepareObjectMeta(in metav1.ObjectMeta) (metav1.ObjectMeta, []byte, error) } // getVerifiers get all verifiers from configmap -func getVerifiers(ctx context.Context) ([]signature.Verifier, error) { +func getVerifiers(ctx context.Context, k8s kubernetes.Interface) ([]signature.Verifier, error) { cfg := config.FromContextOrDefaults(ctx) verifiers := []signature.Verifier{} - // TODO(#5527): consider using k8s://namespace/name instead of mounting files. for key := range cfg.TrustedResources.Keys { - v, err := verifierForKeyRef(ctx, key, crypto.SHA256) + v, err := verifierForKeyRef(ctx, key, crypto.SHA256, k8s) if err == nil { - verifiers = append(verifiers, v) + verifiers = append(verifiers, v...) } } - if len(verifiers) == 0 { return verifiers, fmt.Errorf("no public keys are founded for verification") } @@ -174,10 +178,33 @@ func getVerifiers(ctx context.Context) ([]signature.Verifier, error) { // verifierForKeyRef parses the given keyRef, loads the key and returns an appropriate // verifier using the provided hash algorithm // TODO(#5527): consider wrap verifiers to resolver so the same verifiers are used for the same reconcile event -func verifierForKeyRef(ctx context.Context, keyRef string, hashAlgorithm crypto.Hash) (verifier signature.Verifier, err error) { - raw, err := os.ReadFile(filepath.Clean(keyRef)) - if err != nil { - return nil, err +func verifierForKeyRef(ctx context.Context, keyRef string, hashAlgorithm crypto.Hash, k8s kubernetes.Interface) (verifiers []signature.Verifier, err error) { + var raw []byte + verifiers = []signature.Verifier{} + // if the ref is secret then we fetch the keys from the secrets + if strings.HasPrefix(keyRef, keyReference) { + s, err := getKeyPairSecret(ctx, keyRef, k8s) + if err != nil { + return nil, err + } + for _, raw := range s.Data { + pubKey, err := cryptoutils.UnmarshalPEMToPublicKey(raw) + if err != nil { + return nil, fmt.Errorf("pem to public key: %w", err) + } + v, _ := signature.LoadVerifier(pubKey, hashAlgorithm) + verifiers = append(verifiers, v) + } + if len(verifiers) == 0 { + return verifiers, fmt.Errorf("no public keys are founded for verification") + } + return verifiers, nil + } else { + // read the key from mounted file + raw, err = os.ReadFile(filepath.Clean(keyRef)) + if err != nil { + return nil, err + } } // PEM encoded file. @@ -185,6 +212,31 @@ func verifierForKeyRef(ctx context.Context, keyRef string, hashAlgorithm crypto. if err != nil { return nil, fmt.Errorf("pem to public key: %w", err) } + v, _ := signature.LoadVerifier(pubKey, hashAlgorithm) + verifiers = append(verifiers, v) + + return verifiers, nil +} + +func getKeyPairSecret(ctx context.Context, k8sRef string, k8s kubernetes.Interface) (*v1.Secret, error) { + namespace, name, err := parseRef(k8sRef) + if err != nil { + return nil, err + } - return signature.LoadVerifier(pubKey, hashAlgorithm) + var s *v1.Secret + if s, err = k8s.CoreV1().Secrets(namespace).Get(ctx, name, metav1.GetOptions{}); err != nil { + return nil, errors.Wrap(err, "checking if secret exists") + } + + return s, nil +} + +// the reference should be formatted as / +func parseRef(k8sRef string) (string, string, error) { + s := strings.Split(strings.TrimPrefix(k8sRef, keyReference), "/") + if len(s) != 2 { + return "", "", errors.New("kubernetes specification should be in the format k8s:///") + } + return s[0], s[1], nil } diff --git a/pkg/trustedresources/verify_test.go b/pkg/trustedresources/verify_test.go index fa76256c793..21472e3046e 100644 --- a/pkg/trustedresources/verify_test.go +++ b/pkg/trustedresources/verify_test.go @@ -20,6 +20,8 @@ import ( "context" "encoding/base64" "fmt" + "io/ioutil" + "path/filepath" "testing" "github.com/google/go-cmp/cmp" @@ -29,7 +31,9 @@ import ( test "github.com/tektoncd/pipeline/test" "github.com/tektoncd/pipeline/test/diff" "go.uber.org/zap/zaptest" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + fakek8s "k8s.io/client-go/kubernetes/fake" "knative.dev/pkg/logging" ) @@ -141,7 +145,7 @@ func TestVerifyTask(t *testing.T) { for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { - err := VerifyTask(ctx, tc.task) + err := VerifyTask(ctx, tc.task, nil) if (err != nil) != tc.wantErr { t.Fatalf("verifyTaskRun() get err %v, wantErr %t", err, tc.wantErr) } @@ -191,7 +195,7 @@ func TestVerifyPipeline(t *testing.T) { for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { - err := VerifyPipeline(ctx, tc.pipeline) + err := VerifyPipeline(ctx, tc.pipeline, nil) if (err != nil) != tc.wantErr { t.Fatalf("VerifyPipeline() get err %v, wantErr %t", err, tc.wantErr) } @@ -200,6 +204,69 @@ func TestVerifyPipeline(t *testing.T) { } +func TestVerifyTask_SecretRef(t *testing.T) { + ctx := logging.WithLogger(context.Background(), zaptest.NewLogger(t).Sugar()) + + signer, keypath, err := test.GetSignerFromFile(ctx, t) + if err != nil { + t.Fatal(err) + } + fileBytes, err := ioutil.ReadFile(filepath.Clean(keypath)) + if err != nil { + t.Fatal(err) + } + + secret := &v1.Secret{ + Data: map[string][]byte{"cosign.pub": fileBytes}, + ObjectMeta: metav1.ObjectMeta{ + Name: "verification-secrets", + Namespace: "default"}} + kubeclient := fakek8s.NewSimpleClientset(secret) + + secretref := fmt.Sprintf("%sdefault/verification-secrets", keyReference) + + ctx = test.SetupTrustedResourceConfig(ctx, secretref, config.EnforceResourceVerificationMode) + + unsignedTask := test.GetUnsignedTask("test-task") + + signedTask, err := test.GetSignedTask(unsignedTask, signer, "signed") + if err != nil { + t.Fatal("fail to sign task", err) + } + + tamperedTask := signedTask.DeepCopy() + tamperedTask.Annotations["random"] = "attack" + + tcs := []struct { + name string + task v1beta1.TaskObject + wantErr bool + }{{ + name: "Signed Task Passes Verification", + task: signedTask, + wantErr: false, + }, { + name: "Tampered Task Fails Verification with tampered content", + task: tamperedTask, + wantErr: true, + }, { + name: "Unsigned Task Fails Verification without signature", + task: unsignedTask, + wantErr: true, + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + err := VerifyTask(ctx, tc.task, kubeclient) + if (err != nil) != tc.wantErr { + t.Fatalf("verifyTaskRun() get err %v, wantErr %t", err, tc.wantErr) + } + }) + } + +} + func TestPrepareObjectMeta(t *testing.T) { unsigned := test.GetUnsignedTask("test-task").ObjectMeta diff --git a/test/trusted_resources_test.go b/test/trusted_resources_test.go new file mode 100644 index 00000000000..c1ce4390037 --- /dev/null +++ b/test/trusted_resources_test.go @@ -0,0 +1,375 @@ +//go:build e2e +// +build e2e + +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "context" + "crypto" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/sigstore/sigstore/pkg/signature" + "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "github.com/tektoncd/pipeline/pkg/pod" + "github.com/tektoncd/pipeline/test/parse" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" + "knative.dev/pkg/system" + knativetest "knative.dev/pkg/test" + "knative.dev/pkg/test/helpers" +) + +var ( + neededFeatureFlags = map[string]string{ + "resource-verification-mode": "enforce", + // Make sure this is running under alpha integration tests + "enable-api-fields": "alpha", + } + privKey = "trustedresources-keys/cosign.key" + pubKey = "trustedresources-keys/cosign.pub" + password = "1234" +) + +func init() { + os.Setenv("PRIVATE_PASSWORD", password) +} + +func TestTrustedResourcesVerifyTask(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t, requireAnyGate(neededFeatureFlags)) + configMapData := map[string]string{ + "resource-verification-mode": config.EnforceResourceVerificationMode, + } + if err := updateConfigMap(ctx, c.KubeClient, system.Namespace(), config.GetFeatureFlagsConfigName(), configMapData); err != nil { + t.Fatal(err) + } + + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) + + // Config signer and public key + signer, err := signature.LoadSignerFromPEMFile(privKey, crypto.SHA256, getPass) + if err != nil { + t.Errorf("error getting signer from key file: %v", err) + } + + fileBytes, err := ioutil.ReadFile(filepath.Clean(pubKey)) + if err != nil { + t.Fatal(err) + } + + secret := &v1.Secret{Data: map[string][]byte{"cosign.pub": fileBytes}, ObjectMeta: metav1.ObjectMeta{Name: "verification-secrets", Namespace: system.Namespace()}} + c.KubeClient.CoreV1().Secrets(system.Namespace()).Create(ctx, secret, metav1.CreateOptions{}) + // Check if secret created + _, err = c.KubeClient.CoreV1().Secrets(system.Namespace()).Get(ctx, secret.Name, metav1.GetOptions{}) + if err != nil { + t.Fatal(err) + } + configMapData = map[string]string{ + config.PublicKeys: fmt.Sprintf("k8s://%s/verification-secrets", system.Namespace()), + } + if err := updateConfigMap(ctx, c.KubeClient, system.Namespace(), config.GetTrustedResourcesConfigName(), configMapData); err != nil { + t.Fatal(err) + } + + // create tasks + fqImageName := getTestImage(busyboxImage) + task := parse.MustParseTask(t, fmt.Sprintf(` +metadata: + name: %s + namespace: %s +spec: + steps: + - image: %s + command: ['/bin/sh'] + args: ['-c', 'echo hello'] +`, helpers.ObjectNameForTest(t), namespace, fqImageName)) + + signed, err := GetSignedTask(task, signer, "signedtask") + if err != nil { + t.Errorf("error getting signed task: %v", err) + } + + tamperedTask := signed.DeepCopy() + tamperedTask.Name = "tampered" + tamperedTask.Annotations["foo"] = "bar" + + tcs := []struct { + name string + task v1beta1.TaskObject + wantErr bool + }{{ + name: "Signed Task Passes Verification", + task: signed, + wantErr: false, + }, { + name: "Tampered Task Fails Verification with tampered content", + task: tamperedTask, + wantErr: true, + }, { + name: "Unsigned Task Fails Verification without signature", + task: task, + wantErr: true, + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + + if _, err := c.TaskClient.Create(ctx, tc.task.(*v1beta1.Task), metav1.CreateOptions{}); err != nil { + t.Fatalf("Failed to create Task: %s", err) + } + + tr := parse.MustParseTaskRun(t, fmt.Sprintf(` +metadata: + name: %s + namespace: %s +spec: + taskRef: + name: %s +`, helpers.ObjectNameForTest(t), namespace, tc.task.TaskMetadata().Name)) + + t.Logf("Creating TaskRun %s", tr.Name) + if _, err := c.TaskRunClient.Create(ctx, tr, metav1.CreateOptions{}); err != nil { + t.Fatalf("Failed to create TaskRun `%s`: %s", tr.Name, err) + } + + if tc.wantErr { + t.Logf("Waiting for TaskRun in namespace %s to fail", namespace) + if err := WaitForTaskRunState(ctx, c, tr.Name, TaskRunFailed(tr.Name), "TaskRunFailed"); err != nil { + t.Errorf("Error waiting for TaskRun to finish: %s", err) + } + } else { + t.Logf("Waiting for TaskRun in namespace %s to succeed", namespace) + if err := WaitForTaskRunState(ctx, c, tr.Name, TaskRunSucceed(tr.Name), "TaskRunSucceed"); err != nil { + t.Errorf("Error waiting for TaskRun to finish: %s", err) + } + } + + tr, err := c.TaskRunClient.Get(ctx, tr.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Couldn't get expected TaskRun %s: %s", tr.Name, err) + } + + if tc.wantErr { + if tr.Status.GetCondition(apis.ConditionSucceeded).IsTrue() { + t.Errorf("Expected TaskRun to fail %v but found condition: %s", tc.wantErr, tr.Status.GetCondition(apis.ConditionSucceeded)) + } + if tr.Status.Conditions[0].Reason != pod.ReasonResourceVerificationFailed { + t.Errorf("Expected TaskRun fail condition is: %s but got: %s", pod.ReasonResourceVerificationFailed, tr.Status.Conditions[0].Reason) + } + return + } + if tr.Status.GetCondition(apis.ConditionSucceeded).IsFalse() { + t.Errorf("Expected TaskRun to succeed but instead found condition: %s", tr.Status.GetCondition(apis.ConditionSucceeded)) + } + }) + } + + // clear config and secrets + configMapData = map[string]string{ + "resource-verification-mode": config.SkipResourceVerificationMode, + } + if err := updateConfigMap(ctx, c.KubeClient, system.Namespace(), config.GetFeatureFlagsConfigName(), configMapData); err != nil { + t.Fatal(err) + } + + err = c.KubeClient.CoreV1().Secrets(system.Namespace()).Delete(ctx, secret.Name, metav1.DeleteOptions{}) + if err != nil { + t.Fatal(err) + } + +} + +func TestTrustedResourcesVerifyPipeline(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + c, namespace := setup(ctx, t, requireAnyGate(neededFeatureFlags)) + configMapData := map[string]string{ + "resource-verification-mode": config.EnforceResourceVerificationMode, + } + if err := updateConfigMap(ctx, c.KubeClient, system.Namespace(), config.GetFeatureFlagsConfigName(), configMapData); err != nil { + t.Fatal(err) + } + + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) + + // Config signer and public key + signer, err := signature.LoadSignerFromPEMFile(privKey, crypto.SHA256, getPass) + if err != nil { + t.Errorf("error getting signer from key file: %v", err) + } + + fileBytes, err := ioutil.ReadFile(filepath.Clean(pubKey)) + if err != nil { + t.Fatal(err) + } + + secret := &v1.Secret{Data: map[string][]byte{"cosign.pub": fileBytes}, ObjectMeta: metav1.ObjectMeta{Name: "verification-secrets", Namespace: system.Namespace()}} + c.KubeClient.CoreV1().Secrets(system.Namespace()).Create(ctx, secret, metav1.CreateOptions{}) + // Check if secret created + _, err = c.KubeClient.CoreV1().Secrets(system.Namespace()).Get(ctx, secret.Name, metav1.GetOptions{}) + if err != nil { + t.Fatal(err) + } + configMapData = map[string]string{ + config.PublicKeys: fmt.Sprintf("k8s://%s/verification-secrets", system.Namespace()), + } + if err := updateConfigMap(ctx, c.KubeClient, system.Namespace(), config.GetTrustedResourcesConfigName(), configMapData); err != nil { + t.Fatal(err) + } + + // create pipelines + fqImageName := getTestImage(busyboxImage) + task := parse.MustParseTask(t, fmt.Sprintf(` +metadata: + name: %s + namespace: %s +spec: + steps: + - image: %s + command: ['/bin/sh'] + args: ['-c', 'echo hello'] +`, helpers.ObjectNameForTest(t), namespace, fqImageName)) + + signedTask, err := GetSignedTask(task, signer, "signedtask") + if err != nil { + t.Errorf("error getting signed task: %v", err) + } + if _, err := c.TaskClient.Create(ctx, signedTask, metav1.CreateOptions{}); err != nil { + t.Fatalf("Failed to create Task: %s", err) + } + + pipeline := parse.MustParsePipeline(t, fmt.Sprintf(` +metadata: + name: %s + namespace: %s +spec: + tasks: + - name: task + taskRef: + name: %s + kind: Task +`, helpers.ObjectNameForTest(t), namespace, signedTask.Name)) + + signedPipeline, err := GetSignedPipeline(pipeline, signer, "signedpipeline") + if err != nil { + t.Errorf("error getting signed pipeline: %v", err) + } + + tamperedPipeline := signedPipeline.DeepCopy() + tamperedPipeline.Name = "tampered" + tamperedPipeline.Annotations["foo"] = "bar" + + tcs := []struct { + name string + pipeline v1beta1.PipelineObject + wantErr bool + }{{ + name: "Signed Pipeline Passes Verification", + pipeline: signedPipeline, + wantErr: false, + }, { + name: "Tampered Pipeline Fails Verification with tampered content", + pipeline: tamperedPipeline, + wantErr: true, + }, { + name: "Unsigned Pipeline Fails Verification without signature", + pipeline: pipeline, + wantErr: true, + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + + if _, err := c.PipelineClient.Create(ctx, tc.pipeline.(*v1beta1.Pipeline), metav1.CreateOptions{}); err != nil { + t.Fatalf("Failed to create Pipeline: %s", err) + } + + pr := parse.MustParsePipelineRun(t, fmt.Sprintf(` +metadata: + name: %s + namespace: %s +spec: + pipelineRef: + name: %s +`, helpers.ObjectNameForTest(t), namespace, tc.pipeline.PipelineMetadata().Name)) + + t.Logf("Creating PipelineRun %s", pr.Name) + if _, err := c.PipelineRunClient.Create(ctx, pr, metav1.CreateOptions{}); err != nil { + t.Fatalf("Failed to create PipelineRun `%s`: %s", pr.Name, err) + } + + if tc.wantErr { + t.Logf("Waiting for PipelineRun in namespace %s to fail", namespace) + if err := WaitForPipelineRunState(ctx, c, pr.Name, timeout, PipelineRunFailed(pr.Name), "PipelineRunFailed"); err != nil { + t.Errorf("Error waiting for PipelineRun to finish: %s", err) + } + } else { + t.Logf("Waiting for PipelineRun in namespace %s to succeed", namespace) + if err := WaitForPipelineRunState(ctx, c, pr.Name, timeout, PipelineRunSucceed(pr.Name), "PipelineRunSucceed"); err != nil { + t.Errorf("Error waiting for PipelineRun to finish: %s", err) + } + } + + pr, err := c.PipelineRunClient.Get(ctx, pr.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Couldn't get expected PipelineRun %s: %s", pr.Name, err) + } + + if tc.wantErr { + if pr.Status.GetCondition(apis.ConditionSucceeded).IsTrue() { + t.Errorf("Expected PipelineRun to fail %v but found condition: %s", tc.wantErr, pr.Status.GetCondition(apis.ConditionSucceeded)) + } + if pr.Status.Conditions[0].Reason != pod.ReasonResourceVerificationFailed { + t.Errorf("Expected PipelineRun fail condition is: %s but got: %s", pod.ReasonResourceVerificationFailed, pr.Status.Conditions[0].Reason) + } + return + } + if pr.Status.GetCondition(apis.ConditionSucceeded).IsFalse() { + t.Errorf("Expected PipelineRun to succeed but instead found condition: %s", pr.Status.GetCondition(apis.ConditionSucceeded)) + } + }) + } + + // clear config and secrets + configMapData = map[string]string{ + "resource-verification-mode": config.SkipResourceVerificationMode, + } + if err := updateConfigMap(ctx, c.KubeClient, system.Namespace(), config.GetFeatureFlagsConfigName(), configMapData); err != nil { + t.Fatal(err) + } + + err = c.KubeClient.CoreV1().Secrets(system.Namespace()).Delete(ctx, secret.Name, metav1.DeleteOptions{}) + if err != nil { + t.Fatal(err) + } + +} diff --git a/test/trustedresources-keys/cosign.key b/test/trustedresources-keys/cosign.key new file mode 100644 index 00000000000..b058a6d610a --- /dev/null +++ b/test/trustedresources-keys/cosign.key @@ -0,0 +1,11 @@ +-----BEGIN ENCRYPTED COSIGN PRIVATE KEY----- +eyJrZGYiOnsibmFtZSI6InNjcnlwdCIsInBhcmFtcyI6eyJOIjozMjc2OCwiciI6 +OCwicCI6MX0sInNhbHQiOiI2eXpNS3RwMlEweGdVazhHeXZCTll5bWdtVXdQc0NK +eGhyNlZJeWcrdzNVPSJ9LCJjaXBoZXIiOnsibmFtZSI6Im5hY2wvc2VjcmV0Ym94 +Iiwibm9uY2UiOiJiVGp4ei9VVUNFN1dwaldvanczbkJKSzRtNXg3dE1XdSJ9LCJj +aXBoZXJ0ZXh0IjoienVPQlZ1dysvTzFhZ1lTaDR5VTQ1bGxQOXVzY251VXJMMDlh +cVc2a0hJck53aUI4YkpPTnhVRHdrT2dXcWVRenpjQXdqNnF5WThEMHJPaTNLQk8x +TStoVUc1TFlwdEF4TmtpTXRpT0t3V29JSmd5R0tCZkY0L1g4ejhkR3VmcmdkR0FX +anNydkVHMWQ5UUdvQWRPOWhrU3dzNTloYks2dE5OVUtFYVhHMWdVNDY0Y1dmaUdn +Rk9ZTGZVN1ZFVjRIZ21NR0tyWW1XbzRES1E9PSJ9 +-----END ENCRYPTED COSIGN PRIVATE KEY----- diff --git a/test/trustedresources-keys/cosign.pub b/test/trustedresources-keys/cosign.pub new file mode 100644 index 00000000000..88c4a5646d2 --- /dev/null +++ b/test/trustedresources-keys/cosign.pub @@ -0,0 +1,4 @@ +-----BEGIN PUBLIC KEY----- +MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEgi1y14AALL3oM9jFTWq5Y4Wqtped +2CkBGLryV7RHrGUzPI91k0v9T4dwK3mMUBD/w31hUYJ6nC9qdwz9OwUITQ== +-----END PUBLIC KEY----- diff --git a/test/trustedresources.go b/test/trustedresources.go index 5c12970716a..6a3afaeec83 100644 --- a/test/trustedresources.go +++ b/test/trustedresources.go @@ -55,6 +55,7 @@ var ( Name: "echo", }}, } + read = readPasswordFn ) // GetUnsignedTask returns unsigned task with given name @@ -214,3 +215,20 @@ func GetSignedTask(unsigned *v1beta1.Task, signer signature.Signer, name string) signedTask.Annotations[signatureAnnotation] = base64.StdEncoding.EncodeToString(signature) return signedTask, nil } + +func getPass(confirm bool) ([]byte, error) { + read := read(confirm) + return read() +} + +func readPasswordFn(confirm bool) func() ([]byte, error) { + pw, ok := os.LookupEnv("PRIVATE_PASSWORD") + if ok { + return func() ([]byte, error) { + return []byte(pw), nil + } + } + return func() ([]byte, error) { + return nil, fmt.Errorf("fail to get password") + } +}