From bd0d91e7d88793f062845b06bd1309296ca4134e Mon Sep 17 00:00:00 2001 From: Stefan Prodan Date: Fri, 3 Nov 2023 10:52:46 +0200 Subject: [PATCH 01/19] Add clusters to Runtime API Signed-off-by: Stefan Prodan --- api/v1alpha1/runtime.go | 25 +++++++++ cmd/timoni/log.go | 10 ++-- cmd/timoni/runtime_build.go | 67 ++++++++++++++++++------- internal/engine/runtime_builder.go | 33 +++++++++++- internal/engine/runtime_builder_test.go | 47 +++++++++++++++++ 5 files changed, 158 insertions(+), 24 deletions(-) diff --git a/api/v1alpha1/runtime.go b/api/v1alpha1/runtime.go index 2d377c16..3c4d6228 100644 --- a/api/v1alpha1/runtime.go +++ b/api/v1alpha1/runtime.go @@ -36,6 +36,9 @@ const ( // RuntimeName is the CUE path for the Timoni's bundle name. RuntimeName Selector = "runtime.name" + // RuntimeClustersSelector is the CUE path for the Timoni's runtime clusters. + RuntimeClustersSelector Selector = "runtime.clusters" + // RuntimeValuesSelector is the CUE path for the Timoni's runtime values. RuntimeValuesSelector Selector = "runtime.values" ) @@ -53,6 +56,12 @@ import "strings" #Runtime: { apiVersion: string & =~"^v1alpha1$" name: string & =~"^(([A-Za-z0-9][-A-Za-z0-9_]*)?[A-Za-z0-9])?$" & strings.MaxRunes(63) & strings.MinRunes(1) + + clusters?: [string & =~"^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$" & strings.MaxRunes(63) & strings.MinRunes(1)]: { + group!: string + kubeContext!: string + } + values: [...#RuntimeValue] } ` @@ -99,10 +108,26 @@ type Runtime struct { // Name of the runtime. Name string `json:"name"` + // Clusters is the list of Kubernetes + // clusters belonging to this runtime. + Clusters []RuntimeCluster `json:"clusters"` + // Refs is the list of in-cluster resource references. Refs []RuntimeResourceRef `json:"refs"` } +// RuntimeCluster holds the reference to a Kubernetes cluster. +type RuntimeCluster struct { + // Name of the cluster. + Name string `json:"name"` + + // Group name of the cluster. + Group string `json:"group"` + + // KubeContext is the name of kubeconfig context for this cluster. + KubeContext string `json:"kubeContext"` +} + // RuntimeResourceRef holds the data needed to query the fields // of a Kubernetes resource using CUE expressions. type RuntimeResourceRef struct { diff --git a/cmd/timoni/log.go b/cmd/timoni/log.go index 1653a41f..b59b57d3 100644 --- a/cmd/timoni/log.go +++ b/cmd/timoni/log.go @@ -190,6 +190,10 @@ func colorizeRuntime(runtime string) string { return colorCallerPrefix.Sprint("r:") + colorInstance.Sprint(runtime) } +func colorizeCluster(cluster string) string { + return colorCallerPrefix.Sprint("c:") + colorInstance.Sprint(cluster) +} + func LoggerBundle(ctx context.Context, bundle string) logr.Logger { if !rootArgs.prettyLog { return LoggerFrom(ctx, "bundle", bundle) @@ -211,11 +215,11 @@ func LoggerBundleInstance(ctx context.Context, bundle, instance string) logr.Log return LoggerFrom(ctx, "caller", fmt.Sprintf("%s %s %s", colorizeBundle(bundle), color.CyanString(">"), colorizeInstance(instance))) } -func LoggerRuntime(ctx context.Context, runtime string) logr.Logger { +func LoggerRuntime(ctx context.Context, runtime, cluster string) logr.Logger { if !rootArgs.prettyLog { - return LoggerFrom(ctx, "runtime", runtime) + return LoggerFrom(ctx, "runtime", runtime, "cluster", cluster) } - return LoggerFrom(ctx, "caller", colorizeRuntime(runtime)) + return LoggerFrom(ctx, "caller", fmt.Sprintf("%s %s %s", colorizeRuntime(runtime), color.CyanString(">"), colorizeCluster(cluster))) } // LoggerFrom returns a logr.Logger with predefined values from a context.Context. diff --git a/cmd/timoni/runtime_build.go b/cmd/timoni/runtime_build.go index 32aee668..fa31d353 100644 --- a/cmd/timoni/runtime_build.go +++ b/cmd/timoni/runtime_build.go @@ -77,38 +77,59 @@ func runRuntimeBuildCmd(cmd *cobra.Command, args []string) error { return err } - log := LoggerRuntime(cmd.Context(), rt.Name) - ctx, cancel := context.WithTimeout(cmd.Context(), rootArgs.timeout) defer cancel() - rm, err := runtime.NewResourceManager(kubeconfigArgs) - if err != nil { - return err - } + for _, cluster := range rt.Clusters { + log := LoggerRuntime(cmd.Context(), rt.Name, cluster.Name) - reader := runtime.NewResourceReader(rm) + kubeconfigArgs.Context = &cluster.KubeContext + rm, err := runtime.NewResourceManager(kubeconfigArgs) + if err != nil { + return err + } - values, err := reader.Read(ctx, rt.Refs) - if err != nil { - return err - } + reader := runtime.NewResourceReader(rm) - keys := make([]string, 0, len(values)) + values, err := reader.Read(ctx, rt.Refs) + if err != nil { + return err + } - for k := range values { - keys = append(keys, k) - } - sort.Strings(keys) + keys := make([]string, 0, len(values)) - for _, k := range keys { - log.Info(fmt.Sprintf("%s: %s", colorizeSubject(k), values[k])) + for k := range values { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + log.Info(fmt.Sprintf("%s: %s", colorizeSubject(k), values[k])) + } + + if len(values) == 0 { + log.Info("no values defined") + } } return nil } func buildRuntime(files []string) (*apiv1.Runtime, error) { + defaultCluster := apiv1.RuntimeCluster{ + Name: "default", + Group: "default", + KubeContext: *kubeconfigArgs.Context, + } + if len(files) == 0 { + defaultRuntime := apiv1.Runtime{ + Name: "default", + Clusters: []apiv1.RuntimeCluster{defaultCluster}, + Refs: []apiv1.RuntimeResourceRef{}, + } + return &defaultRuntime, nil + } + tmpDir, err := os.MkdirTemp("", apiv1.FieldManager) if err != nil { return nil, err @@ -127,5 +148,13 @@ func buildRuntime(files []string) (*apiv1.Runtime, error) { return nil, describeErr(tmpDir, "failed to parse runtime", err) } - return rb.GetRuntime(v) + rt, err := rb.GetRuntime(v) + if err != nil { + return nil, err + } + + if len(rt.Clusters) == 0 { + rt.Clusters = []apiv1.RuntimeCluster{defaultCluster} + } + return rt, nil } diff --git a/internal/engine/runtime_builder.go b/internal/engine/runtime_builder.go index bd6bac83..5947b1bb 100644 --- a/internal/engine/runtime_builder.go +++ b/internal/engine/runtime_builder.go @@ -168,8 +168,37 @@ func (b *RuntimeBuilder) GetRuntime(v cue.Value) (*apiv1.Runtime, error) { refs = append(refs, *ref) } + clusters := []apiv1.RuntimeCluster{} + + clustersCue := v.LookupPath(cue.ParsePath(apiv1.RuntimeClustersSelector.String())) + if clustersCue.Err() == nil { + + iter, err := clustersCue.Fields(cue.Concrete(true)) + if err != nil { + return nil, err + } + + for iter.Next() { + name := iter.Selector().Unquoted() + expr := iter.Value() + + vGroup := expr.LookupPath(cue.ParsePath("group")) + group, _ := vGroup.String() + + vkc := expr.LookupPath(cue.ParsePath("kubeContext")) + kc, _ := vkc.String() + + clusters = append(clusters, apiv1.RuntimeCluster{ + Name: name, + Group: group, + KubeContext: kc, + }) + } + } + return &apiv1.Runtime{ - Name: runtimeName, - Refs: refs, + Name: runtimeName, + Clusters: clusters, + Refs: refs, }, nil } diff --git a/internal/engine/runtime_builder_test.go b/internal/engine/runtime_builder_test.go index 37125765..cbf21b9f 100644 --- a/internal/engine/runtime_builder_test.go +++ b/internal/engine/runtime_builder_test.go @@ -81,3 +81,50 @@ runtime: { })) g.Expect(b.Refs[2].Namespace).To(BeEmpty()) } + +func TestGetRuntimeClusters(t *testing.T) { + g := NewWithT(t) + ctx := cuecontext.New() + + rt := ` +runtime: { + apiVersion: "v1alpha1" + name: "fleet" + clusters: { + "staging-eu": { + group: "staging" + kubeContext: "eu-central-1:staging" + } + "staging-us": { + group: "staging" + kubeContext: "us-west-1:staging" + } + "production-eu": { + group: "production" + kubeContext: "eu-central-1:production" + } + "production-us": { + group: "production" + kubeContext: "us-west-1:production" + } + } + values: [] +} +` + v := ctx.CompileString(rt) + builder := NewRuntimeBuilder(ctx, []string{}) + b, err := builder.GetRuntime(v) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(b.Name).To(BeEquivalentTo("fleet")) + g.Expect(len(b.Clusters)).To(BeEquivalentTo(4)) + g.Expect(b.Clusters[0]).To(BeEquivalentTo(apiv1.RuntimeCluster{ + Name: "staging-eu", + Group: "staging", + KubeContext: "eu-central-1:staging", + })) + g.Expect(b.Clusters[3]).To(BeEquivalentTo(apiv1.RuntimeCluster{ + Name: "production-us", + Group: "production", + KubeContext: "us-west-1:production", + })) +} From 06790462c6fceb3d3fdcede698b29f080d82f596 Mon Sep 17 00:00:00 2001 From: Stefan Prodan Date: Sun, 5 Nov 2023 10:59:46 +0200 Subject: [PATCH 02/19] Setup local cluster fleet Signed-off-by: Stefan Prodan --- hack/Makefile | 13 ++++++++++++- hack/local/kind-down.sh | 4 ++-- hack/local/kind-up.sh | 4 ++++ hack/local/mod-push.sh | 14 +++----------- hack/local/podinfo.bundle.cue | 2 ++ 5 files changed, 23 insertions(+), 14 deletions(-) diff --git a/hack/Makefile b/hack/Makefile index 86d8a3aa..79590533 100644 --- a/hack/Makefile +++ b/hack/Makefile @@ -14,13 +14,24 @@ tools: # Install required tools with Homebrew brew bundle .PHONY: up -up: # Start a local Kind clusters and a container registry on port 5555 +up: # Start a local Kind cluster and a container registry on port 5555 $(REPOSITORY_ROOT)/hack/local/kind-up.sh .PHONY: down down: # Teardown the Kind cluster and registry $(REPOSITORY_ROOT)/hack/local/kind-down.sh +.PHONY: fleet-up +fleet-up: # Start local Kind clusters (staging and production) and a container registry on port 5555 + CLUSTER_NAME=timoni-staging $(REPOSITORY_ROOT)/hack/local/kind-up.sh + CLUSTER_NAME=timoni-production $(REPOSITORY_ROOT)/hack/local/kind-up.sh + +.PHONY: fleet-down +fleet-down: # Teardown the Kind clusters and registry + kind delete cluster --name timoni-staging + kind delete cluster --name timoni-production + docker rm -f timoni-registry + .PHONY: push push: # Push the example modules to the local registry $(REPOSITORY_ROOT)/hack/local/mod-push.sh diff --git a/hack/local/kind-down.sh b/hack/local/kind-down.sh index 1c01c994..f0c5a600 100755 --- a/hack/local/kind-down.sh +++ b/hack/local/kind-down.sh @@ -5,9 +5,9 @@ set -o errexit -cluster_name="timoni" +CLUSTER_NAME="${CLUSTER_NAME:=timoni}" reg_name='timoni-registry' -kind delete cluster --name ${cluster_name} +kind delete cluster --name ${CLUSTER_NAME} docker rm -f ${reg_name} diff --git a/hack/local/kind-up.sh b/hack/local/kind-up.sh index 40e3fc37..4ff6e7c6 100755 --- a/hack/local/kind-up.sh +++ b/hack/local/kind-up.sh @@ -6,6 +6,7 @@ set -o errexit CLUSTER_NAME="${CLUSTER_NAME:=timoni}" +cluster_version="v1.28.0" reg_name='timoni-registry' reg_localhost_port='5555' reg_cluster_port='5000' @@ -20,12 +21,15 @@ containerdConfigPatches: endpoint = ["http://${reg_name}:${reg_cluster_port}"] nodes: - role: control-plane + image: kindest/node:${cluster_version} kubeadmConfigPatches: - | kind: InitConfiguration nodeRegistration: kubeletExtraArgs: node-labels: "ingress-ready=true" + - role: worker + image: kindest/node:${cluster_version} EOF } diff --git a/hack/local/mod-push.sh b/hack/local/mod-push.sh index 53e50727..0e50f70e 100755 --- a/hack/local/mod-push.sh +++ b/hack/local/mod-push.sh @@ -8,14 +8,6 @@ set -o errexit reg_localhost_port='5555' repo_root=$(git rev-parse --show-toplevel) -PODINFO_VER=$(cat $repo_root/examples/podinfo/templates/config.cue | awk '/tag:/ {print $2}' | tr -d '*"') -timoni mod push $repo_root/examples/podinfo oci://localhost:${reg_localhost_port}/modules/podinfo -v ${PODINFO_VER} --latest \ - --source https://github.com/stefanprodan/podinfo \ - -a 'org.opencontainers.image.description=A timoni.sh module for deploying Podinfo.' \ - -a 'org.opencontainers.image.documentation=https://github.com/stefanprodan/timoni/blob/main/examples/podinfo/README.md' - -REDIS_VER=$(cat $repo_root/examples/redis/templates/config.cue | awk '/tag:/ {print $2}' | tr -d '*"') -timoni mod push $repo_root/examples/redis oci://localhost:${reg_localhost_port}/modules/redis -v ${REDIS_VER} --latest \ - --source https://github.com/stefanprodan/timoni/tree/main/examples/redis \ - -a 'org.opencontainers.image.description=A timoni.sh module for deploying Redis master-replica clusters.' \ - -a 'org.opencontainers.image.documentation=https://github.com/stefanprodan/timoni/blob/main/examples/redis/README.md' +crane copy ghcr.io/stefanprodan/modules/podinfo localhost:${reg_localhost_port}/modules/podinfo -a +crane copy ghcr.io/stefanprodan/modules/redis localhost:${reg_localhost_port}/modules/redis -a +crane copy ghcr.io/stefanprodan/timoni/minimal localhost:${reg_localhost_port}/modules/nginx -a diff --git a/hack/local/podinfo.bundle.cue b/hack/local/podinfo.bundle.cue index d63a6a78..3463c3af 100644 --- a/hack/local/podinfo.bundle.cue +++ b/hack/local/podinfo.bundle.cue @@ -23,6 +23,7 @@ bundle: { enabled: true redisURL: "tcp://cache:6379" } + values: test: enabled: true } frontend: { module: { @@ -47,6 +48,7 @@ bundle: { capabilities: drop: ["ALL"] seccompProfile: type: "RuntimeDefault" } + test: enabled: true } } } From 2fee79f0a17dce191e9992b04e89a6bc365b1fbc Mon Sep 17 00:00:00 2001 From: Stefan Prodan Date: Sat, 18 Nov 2023 01:01:32 +0200 Subject: [PATCH 03/19] Add runtime clusters to API Signed-off-by: Stefan Prodan --- api/v1alpha1/zz_generated.deepcopy.go | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 35a26eca..7543cc9b 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -133,6 +133,11 @@ func (in *ResourceRef) DeepCopy() *ResourceRef { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Runtime) DeepCopyInto(out *Runtime) { *out = *in + if in.Clusters != nil { + in, out := &in.Clusters, &out.Clusters + *out = make([]RuntimeCluster, len(*in)) + copy(*out, *in) + } if in.Refs != nil { in, out := &in.Refs, &out.Refs *out = make([]RuntimeResourceRef, len(*in)) @@ -167,6 +172,21 @@ func (in *RuntimeAttribute) DeepCopy() *RuntimeAttribute { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeCluster) DeepCopyInto(out *RuntimeCluster) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeCluster. +func (in *RuntimeCluster) DeepCopy() *RuntimeCluster { + if in == nil { + return nil + } + out := new(RuntimeCluster) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RuntimeResourceRef) DeepCopyInto(out *RuntimeResourceRef) { *out = *in From 20d6887c98609826c02b4bcc23dd56790adc3498 Mon Sep 17 00:00:00 2001 From: Stefan Prodan Date: Sat, 18 Nov 2023 01:02:01 +0200 Subject: [PATCH 04/19] Add test for runtime build with clusters Signed-off-by: Stefan Prodan --- cmd/timoni/main_test.go | 1 + cmd/timoni/runtime_build_test.go | 68 ++++++++++++++++++++++++++++++++ 2 files changed, 69 insertions(+) diff --git a/cmd/timoni/main_test.go b/cmd/timoni/main_test.go index 9289ea23..e81c719e 100644 --- a/cmd/timoni/main_test.go +++ b/cmd/timoni/main_test.go @@ -136,6 +136,7 @@ func resetCmdArgs() { vendorK8sArgs = vendorK8sFlags{} pushArtifactArgs = pushArtifactFlags{} pullArtifactArgs = pullArtifactFlags{} + runtimeBuildArgs = runtimeBuildFlags{} } func rnd(prefix string, n int) string { diff --git a/cmd/timoni/runtime_build_test.go b/cmd/timoni/runtime_build_test.go index ba615edd..b50310bf 100644 --- a/cmd/timoni/runtime_build_test.go +++ b/cmd/timoni/runtime_build_test.go @@ -17,10 +17,12 @@ limitations under the License. package main import ( + "bufio" "context" "fmt" "os" "path/filepath" + "strings" "testing" . "github.com/onsi/gomega" @@ -123,3 +125,69 @@ runtime: { g.Expect(output).To(ContainSubstring("sc.local")) }) } + +func Test_RuntimeBuild_Clusters(t *testing.T) { + g := NewWithT(t) + + runtimeData := ` +runtime: { + apiVersion: "v1alpha1" + name: "fleet" + clusters: { + "staging": { + group: "staging" + kubeContext: "envtest" + } + "production": { + group: "production" + kubeContext: "envtest" + } + } + values: [ + { + query: "k8s:v1:Namespace:kube-system" + for: { + "CLUSTER_UID": "obj.metadata.uid" + } + }, + ] +} +` + + runtimePath := filepath.Join(t.TempDir(), "runtime.cue") + err := os.WriteFile(runtimePath, []byte(runtimeData), 0644) + g.Expect(err).ToNot(HaveOccurred()) + + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kube-system", + }, + } + + err = envTestClient.Get(context.Background(), client.ObjectKeyFromObject(ns), ns) + g.Expect(err).ToNot(HaveOccurred()) + + t.Run("builds runtime for clusters with UID", func(t *testing.T) { + g := NewWithT(t) + + output, err := executeCommandWithIn("runtime build -f-", strings.NewReader(runtimeData)) + g.Expect(err).ToNot(HaveOccurred()) + t.Log("\n", output) + + scanner := bufio.NewScanner(strings.NewReader(output)) + var i int + for scanner.Scan() { + i++ + txt := scanner.Text() + g.Expect(txt).To(ContainSubstring(string(ns.UID))) + if i == 1 { + g.Expect(txt).To(MatchRegexp("staging.*CLUSTER_UID")) + } + if i == 2 { + g.Expect(txt).To(MatchRegexp("production.*CLUSTER_UID")) + } + } + g.Expect(scanner.Err()).ToNot(HaveOccurred()) + g.Expect(i).To(BeEquivalentTo(2)) + }) +} From b69bfab68cf8a3d6b1a3a7a81ade58d33b1d4be8 Mon Sep 17 00:00:00 2001 From: Stefan Prodan Date: Sat, 18 Nov 2023 13:38:11 +0200 Subject: [PATCH 05/19] Add runtime cluster selector to API Signed-off-by: Stefan Prodan --- api/v1alpha1/runtime.go | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/api/v1alpha1/runtime.go b/api/v1alpha1/runtime.go index 3c4d6228..472e225a 100644 --- a/api/v1alpha1/runtime.go +++ b/api/v1alpha1/runtime.go @@ -27,6 +27,9 @@ const ( // RuntimeKind is the name of the Timoni runtime CUE attributes. RuntimeKind string = "runtime" + // RuntimeDefaultName is the name of the default Timoni runtime. + RuntimeDefaultName string = "_default" + // RuntimeDelimiter is the delimiter used in Timoni runtime CUE attributes. RuntimeDelimiter string = ":" @@ -116,6 +119,22 @@ type Runtime struct { Refs []RuntimeResourceRef `json:"refs"` } +// DefaultRuntime returns a Runtime with a single +// cluster set to specified context. +func DefaultRuntime(kubeContext string) *Runtime { + defaultCluster := RuntimeCluster{ + Name: RuntimeDefaultName, + Group: RuntimeDefaultName, + KubeContext: kubeContext, + } + + return &Runtime{ + Name: RuntimeDefaultName, + Clusters: []RuntimeCluster{defaultCluster}, + Refs: []RuntimeResourceRef{}, + } +} + // RuntimeCluster holds the reference to a Kubernetes cluster. type RuntimeCluster struct { // Name of the cluster. @@ -128,6 +147,22 @@ type RuntimeCluster struct { KubeContext string `json:"kubeContext"` } +// SelectClusters returns the clusters matching the specified name and group. +// Both the name and group support the '*' wildcard. +func (r *Runtime) SelectClusters(name, group string) []RuntimeCluster { + var result []RuntimeCluster + for _, cluster := range r.Clusters { + if name != "" && name != "*" && !strings.EqualFold(cluster.Name, name) { + continue + } + if group != "" && group != "*" && !strings.EqualFold(cluster.Group, group) { + continue + } + result = append(result, cluster) + } + return result +} + // RuntimeResourceRef holds the data needed to query the fields // of a Kubernetes resource using CUE expressions. type RuntimeResourceRef struct { From b740d24a9d20de4e19bf9452c7b42824f93207df Mon Sep 17 00:00:00 2001 From: Stefan Prodan Date: Sat, 18 Nov 2023 13:38:37 +0200 Subject: [PATCH 06/19] Add cluster selector to runtime cmd Signed-off-by: Stefan Prodan --- cmd/timoni/log.go | 18 +++++++++++++--- cmd/timoni/runtime_build.go | 30 +++++++++++++------------- cmd/timoni/runtime_build_test.go | 36 +++++++++++++++++++++++++++++++- 3 files changed, 66 insertions(+), 18 deletions(-) diff --git a/cmd/timoni/log.go b/cmd/timoni/log.go index b59b57d3..14990b7f 100644 --- a/cmd/timoni/log.go +++ b/cmd/timoni/log.go @@ -34,6 +34,8 @@ import ( "github.com/rs/zerolog" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" runtimeLog "sigs.k8s.io/controller-runtime/pkg/log" + + apiv1 "github.com/stefanprodan/timoni/api/v1alpha1" ) // NewConsoleLogger returns a human-friendly Logger. @@ -216,10 +218,20 @@ func LoggerBundleInstance(ctx context.Context, bundle, instance string) logr.Log } func LoggerRuntime(ctx context.Context, runtime, cluster string) logr.Logger { - if !rootArgs.prettyLog { - return LoggerFrom(ctx, "runtime", runtime, "cluster", cluster) + switch cluster { + case apiv1.RuntimeDefaultName: + if !rootArgs.prettyLog { + return LoggerFrom(ctx, "runtime", runtime) + } + return LoggerFrom(ctx, "caller", colorizeRuntime(runtime)) + default: + if !rootArgs.prettyLog { + return LoggerFrom(ctx, "runtime", runtime, "cluster", cluster) + } + return LoggerFrom(ctx, "caller", + fmt.Sprintf("%s %s %s", colorizeRuntime(runtime), + color.CyanString(">"), colorizeCluster(cluster))) } - return LoggerFrom(ctx, "caller", fmt.Sprintf("%s %s %s", colorizeRuntime(runtime), color.CyanString(">"), colorizeCluster(cluster))) } // LoggerFrom returns a logr.Logger with predefined values from a context.Context. diff --git a/cmd/timoni/runtime_build.go b/cmd/timoni/runtime_build.go index fa31d353..3ab38feb 100644 --- a/cmd/timoni/runtime_build.go +++ b/cmd/timoni/runtime_build.go @@ -41,7 +41,9 @@ var runtimeBuildCmd = &cobra.Command{ } type runtimeBuildFlags struct { - files []string + files []string + clusterSelector string + clusterGroupSelector string } var runtimeBuildArgs runtimeBuildFlags @@ -49,6 +51,10 @@ var runtimeBuildArgs runtimeBuildFlags func init() { runtimeBuildCmd.Flags().StringSliceVarP(&runtimeBuildArgs.files, "file", "f", nil, "The local path to runtime.cue files.") + runtimeBuildCmd.Flags().StringVar(&runtimeBuildArgs.clusterSelector, "cluster", "*", + "Select cluster by name.") + runtimeBuildCmd.Flags().StringVar(&runtimeBuildArgs.clusterGroupSelector, "cluster-group", "*", + "Select clusters by group name.") runtimeCmd.AddCommand(runtimeBuildCmd) } @@ -80,7 +86,12 @@ func runRuntimeBuildCmd(cmd *cobra.Command, args []string) error { ctx, cancel := context.WithTimeout(cmd.Context(), rootArgs.timeout) defer cancel() - for _, cluster := range rt.Clusters { + clusters := rt.SelectClusters(runtimeBuildArgs.clusterSelector, runtimeBuildArgs.clusterGroupSelector) + if len(clusters) == 0 { + return fmt.Errorf("no cluster found") + } + + for _, cluster := range clusters { log := LoggerRuntime(cmd.Context(), rt.Name, cluster.Name) kubeconfigArgs.Context = &cluster.KubeContext @@ -116,18 +127,9 @@ func runRuntimeBuildCmd(cmd *cobra.Command, args []string) error { } func buildRuntime(files []string) (*apiv1.Runtime, error) { - defaultCluster := apiv1.RuntimeCluster{ - Name: "default", - Group: "default", - KubeContext: *kubeconfigArgs.Context, - } + defaultRuntime := apiv1.DefaultRuntime(*kubeconfigArgs.Context) if len(files) == 0 { - defaultRuntime := apiv1.Runtime{ - Name: "default", - Clusters: []apiv1.RuntimeCluster{defaultCluster}, - Refs: []apiv1.RuntimeResourceRef{}, - } - return &defaultRuntime, nil + return defaultRuntime, nil } tmpDir, err := os.MkdirTemp("", apiv1.FieldManager) @@ -154,7 +156,7 @@ func buildRuntime(files []string) (*apiv1.Runtime, error) { } if len(rt.Clusters) == 0 { - rt.Clusters = []apiv1.RuntimeCluster{defaultCluster} + rt.Clusters = defaultRuntime.Clusters } return rt, nil } diff --git a/cmd/timoni/runtime_build_test.go b/cmd/timoni/runtime_build_test.go index b50310bf..a353e39d 100644 --- a/cmd/timoni/runtime_build_test.go +++ b/cmd/timoni/runtime_build_test.go @@ -167,7 +167,7 @@ runtime: { err = envTestClient.Get(context.Background(), client.ObjectKeyFromObject(ns), ns) g.Expect(err).ToNot(HaveOccurred()) - t.Run("builds runtime for clusters with UID", func(t *testing.T) { + t.Run("builds runtime for all clusters", func(t *testing.T) { g := NewWithT(t) output, err := executeCommandWithIn("runtime build -f-", strings.NewReader(runtimeData)) @@ -190,4 +190,38 @@ runtime: { g.Expect(scanner.Err()).ToNot(HaveOccurred()) g.Expect(i).To(BeEquivalentTo(2)) }) + + t.Run("builds runtime for selected cluster", func(t *testing.T) { + g := NewWithT(t) + + output, err := executeCommandWithIn("runtime build --cluster=staging -f-", strings.NewReader(runtimeData)) + g.Expect(err).ToNot(HaveOccurred()) + t.Log("\n", output) + + scanner := bufio.NewScanner(strings.NewReader(output)) + var i int + for scanner.Scan() { + i++ + g.Expect(scanner.Text()).To(MatchRegexp("staging.*CLUSTER_UID.*%s", string(ns.UID))) + } + g.Expect(scanner.Err()).ToNot(HaveOccurred()) + g.Expect(i).To(BeEquivalentTo(1)) + }) + + t.Run("builds runtime for selected group", func(t *testing.T) { + g := NewWithT(t) + + output, err := executeCommandWithIn("runtime build --cluster-group=production -f-", strings.NewReader(runtimeData)) + g.Expect(err).ToNot(HaveOccurred()) + t.Log("\n", output) + + scanner := bufio.NewScanner(strings.NewReader(output)) + var i int + for scanner.Scan() { + i++ + g.Expect(scanner.Text()).To(MatchRegexp("production.*CLUSTER_UID.*%s", string(ns.UID))) + } + g.Expect(scanner.Err()).ToNot(HaveOccurred()) + g.Expect(i).To(BeEquivalentTo(1)) + }) } From 50922c3289ed55a84e17571e7c6cbb3e8f72582b Mon Sep 17 00:00:00 2001 From: Stefan Prodan Date: Sat, 18 Nov 2023 14:43:06 +0200 Subject: [PATCH 07/19] Add cluster aware logger Signed-off-by: Stefan Prodan --- cmd/timoni/bundle_apply.go | 4 ++-- cmd/timoni/bundle_delete.go | 4 ++-- cmd/timoni/bundle_status.go | 2 +- cmd/timoni/bundle_vet.go | 4 ++-- cmd/timoni/log.go | 47 ++++++++++++++++++++++++++++++------- 5 files changed, 46 insertions(+), 15 deletions(-) diff --git a/cmd/timoni/bundle_apply.go b/cmd/timoni/bundle_apply.go index ff0fd6a1..2d487c48 100644 --- a/cmd/timoni/bundle_apply.go +++ b/cmd/timoni/bundle_apply.go @@ -173,7 +173,7 @@ func runBundleApplyCmd(cmd *cobra.Command, _ []string) error { return err } - log := LoggerBundle(cmd.Context(), bundle.Name) + log := LoggerBundle(cmd.Context(), bundle.Name, apiv1.RuntimeDefaultName) if !bundleApplyArgs.overwriteOwnership { err = bundleInstancesOwnershipConflicts(bundle.Instances) @@ -258,7 +258,7 @@ func fetchBundleInstanceModule(ctx context.Context, instance *engine.BundleInsta } func applyBundleInstance(ctx context.Context, cuectx *cue.Context, instance *engine.BundleInstance, kubeVersion string, rootDir string) error { - log := LoggerBundleInstance(ctx, instance.Bundle, instance.Name) + log := LoggerBundleInstance(ctx, instance.Bundle, apiv1.RuntimeDefaultName, instance.Name) modDir := path.Join(rootDir, instance.Name, "module") builder := engine.NewModuleBuilder( diff --git a/cmd/timoni/bundle_delete.go b/cmd/timoni/bundle_delete.go index 63111e86..96e47ecb 100644 --- a/cmd/timoni/bundle_delete.go +++ b/cmd/timoni/bundle_delete.go @@ -100,7 +100,7 @@ func runBundleDelCmd(cmd *cobra.Command, args []string) error { return err } - log := LoggerBundle(ctx, bundleDelArgs.name) + log := LoggerBundle(ctx, bundleDelArgs.name, apiv1.RuntimeDefaultName) iStorage := runtime.NewStorageManager(sm) instances, err := iStorage.List(ctx, "", bundleDelArgs.name) @@ -129,7 +129,7 @@ func runBundleDelCmd(cmd *cobra.Command, args []string) error { } func deleteBundleInstance(ctx context.Context, instance *engine.BundleInstance, wait bool, dryrun bool) error { - log := LoggerBundle(ctx, instance.Bundle) + log := LoggerBundle(ctx, instance.Bundle, apiv1.RuntimeDefaultName) sm, err := runtime.NewResourceManager(kubeconfigArgs) if err != nil { diff --git a/cmd/timoni/bundle_status.go b/cmd/timoni/bundle_status.go index ebed7d03..6916239c 100644 --- a/cmd/timoni/bundle_status.go +++ b/cmd/timoni/bundle_status.go @@ -94,7 +94,7 @@ func runBundleStatusCmd(cmd *cobra.Command, args []string) error { } for _, instance := range instances { - log := LoggerBundleInstance(ctx, bundleStatusArgs.name, instance.Name) + log := LoggerBundleInstance(ctx, bundleStatusArgs.name, apiv1.RuntimeDefaultName, instance.Name) log.Info(fmt.Sprintf("last applied %s", colorizeSubject(instance.LastTransitionTime))) diff --git a/cmd/timoni/bundle_vet.go b/cmd/timoni/bundle_vet.go index 849b262b..4d5fa35c 100644 --- a/cmd/timoni/bundle_vet.go +++ b/cmd/timoni/bundle_vet.go @@ -154,7 +154,7 @@ func runBundleVetCmd(cmd *cobra.Command, args []string) error { if err != nil { return err } - log = LoggerBundle(logr.NewContext(cmd.Context(), log), bundle.Name) + log = LoggerBundle(logr.NewContext(cmd.Context(), log), bundle.Name, apiv1.RuntimeDefaultName) if len(bundle.Instances) == 0 { return fmt.Errorf("no instances found in bundle") @@ -173,7 +173,7 @@ func runBundleVetCmd(cmd *cobra.Command, args []string) error { if i.Namespace == "" { return fmt.Errorf("instance %s does not have a namespace", i.Name) } - log := LoggerBundleInstance(logr.NewContext(cmd.Context(), log), bundle.Name, i.Name) + log := LoggerBundleInstance(logr.NewContext(cmd.Context(), log), bundle.Name, apiv1.RuntimeDefaultName, i.Name) log.Info("instance is valid") } diff --git a/cmd/timoni/log.go b/cmd/timoni/log.go index 14990b7f..6a9482de 100644 --- a/cmd/timoni/log.go +++ b/cmd/timoni/log.go @@ -196,11 +196,23 @@ func colorizeCluster(cluster string) string { return colorCallerPrefix.Sprint("c:") + colorInstance.Sprint(cluster) } -func LoggerBundle(ctx context.Context, bundle string) logr.Logger { - if !rootArgs.prettyLog { - return LoggerFrom(ctx, "bundle", bundle) +func LoggerBundle(ctx context.Context, bundle, cluster string) logr.Logger { + switch cluster { + case apiv1.RuntimeDefaultName: + if !rootArgs.prettyLog { + return LoggerFrom(ctx, "bundle", bundle) + } + return LoggerFrom(ctx, "caller", colorizeBundle(bundle)) + default: + if !rootArgs.prettyLog { + return LoggerFrom(ctx, "bundle", bundle, "cluster", cluster) + } + return LoggerFrom(ctx, "caller", + fmt.Sprintf("%s %s %s", + colorizeBundle(bundle), + color.CyanString(">"), + colorizeCluster(cluster))) } - return LoggerFrom(ctx, "caller", colorizeBundle(bundle)) } func LoggerInstance(ctx context.Context, instance string) logr.Logger { @@ -210,11 +222,30 @@ func LoggerInstance(ctx context.Context, instance string) logr.Logger { return LoggerFrom(ctx, "caller", colorizeInstance(instance)) } -func LoggerBundleInstance(ctx context.Context, bundle, instance string) logr.Logger { - if !rootArgs.prettyLog { - return LoggerFrom(ctx, "bundle", bundle, "instance", instance) +func LoggerBundleInstance(ctx context.Context, bundle, cluster, instance string) logr.Logger { + switch cluster { + case apiv1.RuntimeDefaultName: + if !rootArgs.prettyLog { + return LoggerFrom(ctx, "bundle", bundle, "instance", instance) + } + return LoggerFrom(ctx, "caller", + fmt.Sprintf("%s %s %s", + colorizeBundle(bundle), + color.CyanString(">"), + colorizeInstance(instance))) + default: + if !rootArgs.prettyLog { + return LoggerFrom(ctx, "bundle", bundle, "cluster", cluster, "instance", instance) + } + return LoggerFrom(ctx, "caller", + fmt.Sprintf("%s %s %s %s %s", + colorizeBundle(bundle), + color.CyanString(">"), + colorizeCluster(cluster), + color.CyanString(">"), + colorizeInstance(instance))) + } - return LoggerFrom(ctx, "caller", fmt.Sprintf("%s %s %s", colorizeBundle(bundle), color.CyanString(">"), colorizeInstance(instance))) } func LoggerRuntime(ctx context.Context, runtime, cluster string) logr.Logger { From d482f2335b0d911ec91a70524c995e0fd0be238f Mon Sep 17 00:00:00 2001 From: Stefan Prodan Date: Sat, 18 Nov 2023 15:01:28 +0200 Subject: [PATCH 08/19] Add bundle global runtime args Signed-off-by: Stefan Prodan --- cmd/timoni/bundle.go | 17 +++++++++++++++++ cmd/timoni/bundle_apply.go | 12 +++--------- cmd/timoni/bundle_build.go | 18 ++++++------------ cmd/timoni/bundle_vet.go | 18 ++++++------------ cmd/timoni/main_test.go | 1 + 5 files changed, 33 insertions(+), 33 deletions(-) diff --git a/cmd/timoni/bundle.go b/cmd/timoni/bundle.go index fdf3bc84..006f65fd 100644 --- a/cmd/timoni/bundle.go +++ b/cmd/timoni/bundle.go @@ -20,11 +20,28 @@ import ( "github.com/spf13/cobra" ) +type bundleFlags struct { + runtimeFromEnv bool + runtimeFiles []string + runtimeCluster string + runtimeClusterGroup string +} + +var bundleArgs bundleFlags + var bundleCmd = &cobra.Command{ Use: "bundle", Short: "Commands for managing bundles", } func init() { + bundleCmd.PersistentFlags().BoolVar(&bundleArgs.runtimeFromEnv, "runtime-from-env", false, + "Inject runtime values from the environment.") + bundleCmd.PersistentFlags().StringSliceVarP(&bundleArgs.runtimeFiles, "runtime", "r", nil, + "The local path to runtime.cue files.") + bundleCmd.PersistentFlags().StringVar(&bundleArgs.runtimeCluster, "runtime-cluster", "*", + "Filter runtime cluster by name.") + bundleCmd.PersistentFlags().StringVar(&bundleArgs.runtimeCluster, "runtime-group", "*", + "Filter runtime clusters by group.") rootCmd.AddCommand(bundleCmd) } diff --git a/cmd/timoni/bundle_apply.go b/cmd/timoni/bundle_apply.go index 2d487c48..60805f88 100644 --- a/cmd/timoni/bundle_apply.go +++ b/cmd/timoni/bundle_apply.go @@ -71,8 +71,6 @@ type bundleApplyFlags struct { wait bool force bool overwriteOwnership bool - runtimeFromEnv bool - runtimeFiles []string creds flags.Credentials } @@ -92,10 +90,6 @@ func init() { "Perform a server-side apply dry run and prints the diff.") bundleApplyCmd.Flags().BoolVar(&bundleApplyArgs.wait, "wait", true, "Wait for the applied Kubernetes objects to become ready.") - bundleApplyCmd.Flags().StringSliceVarP(&bundleApplyArgs.runtimeFiles, "runtime", "r", nil, - "The local path to runtime.cue files.") - bundleApplyCmd.Flags().BoolVar(&bundleApplyArgs.runtimeFromEnv, "runtime-from-env", false, - "Inject runtime values from the environment.") bundleApplyCmd.Flags().Var(&bundleApplyArgs.creds, bundleApplyArgs.creds.Type(), bundleApplyArgs.creds.Description()) bundleCmd.AddCommand(bundleApplyCmd) } @@ -135,12 +129,12 @@ func runBundleApplyCmd(cmd *cobra.Command, _ []string) error { runtimeValues := make(map[string]string) - if bundleApplyArgs.runtimeFromEnv { + if bundleArgs.runtimeFromEnv { maps.Copy(runtimeValues, engine.GetEnv()) } - if len(bundleApplyArgs.runtimeFiles) > 0 { - rt, err := buildRuntime(bundleApplyArgs.runtimeFiles) + if len(bundleArgs.runtimeFiles) > 0 { + rt, err := buildRuntime(bundleArgs.runtimeFiles) if err != nil { return err } diff --git a/cmd/timoni/bundle_build.go b/cmd/timoni/bundle_build.go index b0621b64..5efa5110 100644 --- a/cmd/timoni/bundle_build.go +++ b/cmd/timoni/bundle_build.go @@ -55,11 +55,9 @@ var bundleBuildCmd = &cobra.Command{ } type bundleBuildFlags struct { - pkg flags.Package - files []string - creds flags.Credentials - runtimeFromEnv bool - runtimeFiles []string + pkg flags.Package + files []string + creds flags.Credentials } var bundleBuildArgs bundleBuildFlags @@ -68,10 +66,6 @@ func init() { bundleBuildCmd.Flags().VarP(&bundleBuildArgs.pkg, bundleBuildArgs.pkg.Type(), bundleBuildArgs.pkg.Shorthand(), bundleBuildArgs.pkg.Description()) bundleBuildCmd.Flags().StringSliceVarP(&bundleBuildArgs.files, "file", "f", nil, "The local path to bundle.cue files.") - bundleBuildCmd.Flags().BoolVar(&bundleBuildArgs.runtimeFromEnv, "runtime-from-env", false, - "Inject runtime values from the environment.") - bundleBuildCmd.Flags().StringSliceVarP(&bundleBuildArgs.runtimeFiles, "runtime", "r", nil, - "The local path to runtime.cue files.") bundleBuildCmd.Flags().Var(&bundleBuildArgs.creds, bundleBuildArgs.creds.Type(), bundleBuildArgs.creds.Description()) bundleCmd.AddCommand(bundleBuildCmd) } @@ -107,15 +101,15 @@ func runBundleBuildCmd(cmd *cobra.Command, _ []string) error { runtimeValues := make(map[string]string) - if bundleBuildArgs.runtimeFromEnv { + if bundleArgs.runtimeFromEnv { maps.Copy(runtimeValues, engine.GetEnv()) } - if len(bundleBuildArgs.runtimeFiles) > 0 { + if len(bundleArgs.runtimeFiles) > 0 { kctx, cancel := context.WithTimeout(cmd.Context(), rootArgs.timeout) defer cancel() - rt, err := buildRuntime(bundleBuildArgs.runtimeFiles) + rt, err := buildRuntime(bundleArgs.runtimeFiles) if err != nil { return err } diff --git a/cmd/timoni/bundle_vet.go b/cmd/timoni/bundle_vet.go index 4d5fa35c..7c91f514 100644 --- a/cmd/timoni/bundle_vet.go +++ b/cmd/timoni/bundle_vet.go @@ -60,11 +60,9 @@ with Timoni's schema and optionally prints the computed value. } type bundleVetFlags struct { - pkg flags.Package - files []string - runtimeFromEnv bool - runtimeFiles []string - printValue bool + pkg flags.Package + files []string + printValue bool } var bundleVetArgs bundleVetFlags @@ -73,10 +71,6 @@ func init() { bundleVetCmd.Flags().VarP(&bundleVetArgs.pkg, bundleVetArgs.pkg.Type(), bundleVetArgs.pkg.Shorthand(), bundleVetArgs.pkg.Description()) bundleVetCmd.Flags().StringSliceVarP(&bundleVetArgs.files, "file", "f", nil, "The local path to bundle.cue files.") - bundleVetCmd.Flags().BoolVar(&bundleVetArgs.runtimeFromEnv, "runtime-from-env", false, - "Inject runtime values from the environment.") - bundleVetCmd.Flags().StringSliceVarP(&bundleVetArgs.runtimeFiles, "runtime", "r", nil, - "The local path to runtime.cue files.") bundleVetCmd.Flags().BoolVar(&bundleVetArgs.printValue, "print-value", false, "Print the computed value of the bundle.") bundleCmd.AddCommand(bundleVetCmd) @@ -114,15 +108,15 @@ func runBundleVetCmd(cmd *cobra.Command, args []string) error { runtimeValues := make(map[string]string) - if bundleVetArgs.runtimeFromEnv { + if bundleArgs.runtimeFromEnv { maps.Copy(runtimeValues, engine.GetEnv()) } - if len(bundleVetArgs.runtimeFiles) > 0 { + if len(bundleArgs.runtimeFiles) > 0 { kctx, cancel := context.WithTimeout(cmd.Context(), rootArgs.timeout) defer cancel() - rt, err := buildRuntime(bundleVetArgs.runtimeFiles) + rt, err := buildRuntime(bundleArgs.runtimeFiles) if err != nil { return err } diff --git a/cmd/timoni/main_test.go b/cmd/timoni/main_test.go index e81c719e..301e5e04 100644 --- a/cmd/timoni/main_test.go +++ b/cmd/timoni/main_test.go @@ -128,6 +128,7 @@ func resetCmdArgs() { listArgs = listFlags{} pullModArgs = pullModFlags{} pushModArgs = pushModFlags{} + bundleArgs = bundleFlags{} bundleApplyArgs = bundleApplyFlags{} bundleVetArgs = bundleVetFlags{} bundleDelArgs = bundleDelFlags{} From 3b88f3dc6484513cc7cef7cd208841130aa5b10c Mon Sep 17 00:00:00 2001 From: Stefan Prodan Date: Sat, 18 Nov 2023 16:53:02 +0200 Subject: [PATCH 09/19] Add cluster info runtime values to API Signed-off-by: Stefan Prodan --- api/v1alpha1/runtime.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/api/v1alpha1/runtime.go b/api/v1alpha1/runtime.go index 472e225a..7a8e7e34 100644 --- a/api/v1alpha1/runtime.go +++ b/api/v1alpha1/runtime.go @@ -147,6 +147,16 @@ type RuntimeCluster struct { KubeContext string `json:"kubeContext"` } +// NameGroupValues returns the Timoni runtime values for this cluster. +func (rt *RuntimeCluster) NameGroupValues() map[string]string { + result := make(map[string]string) + if rt.Name != RuntimeDefaultName { + result["TIMONI_CLUSTER_NAME"] = rt.Name + result["TIMONI_CLUSTER_GROUP"] = rt.Group + } + return result +} + // SelectClusters returns the clusters matching the specified name and group. // Both the name and group support the '*' wildcard. func (r *Runtime) SelectClusters(name, group string) []RuntimeCluster { From 1245ad0c49c44685f94e65c872467b00ca6f3a3f Mon Sep 17 00:00:00 2001 From: Stefan Prodan Date: Sat, 18 Nov 2023 16:54:22 +0200 Subject: [PATCH 10/19] Add multi-cluster support to `bundle vet` cmd Signed-off-by: Stefan Prodan --- cmd/timoni/bundle_vet.go | 106 +++++++++++++++++++------------ cmd/timoni/bundle_vet_test.go | 114 ++++++++++++++++++++++++++++++++++ 2 files changed, 182 insertions(+), 38 deletions(-) diff --git a/cmd/timoni/bundle_vet.go b/cmd/timoni/bundle_vet.go index 7c91f514..ac017107 100644 --- a/cmd/timoni/bundle_vet.go +++ b/cmd/timoni/bundle_vet.go @@ -21,6 +21,7 @@ import ( "fmt" "maps" "os" + "path" "cuelang.org/go/cue" "cuelang.org/go/cue/cuecontext" @@ -112,65 +113,94 @@ func runBundleVetCmd(cmd *cobra.Command, args []string) error { maps.Copy(runtimeValues, engine.GetEnv()) } - if len(bundleArgs.runtimeFiles) > 0 { - kctx, cancel := context.WithTimeout(cmd.Context(), rootArgs.timeout) - defer cancel() + rt, err := buildRuntime(bundleArgs.runtimeFiles) + if err != nil { + return err + } - rt, err := buildRuntime(bundleArgs.runtimeFiles) - if err != nil { - return err - } + clusters := rt.SelectClusters(bundleArgs.runtimeCluster, bundleArgs.runtimeClusterGroup) + if len(clusters) == 0 { + return fmt.Errorf("no cluster found") + } + + kctx, cancel := context.WithTimeout(cmd.Context(), rootArgs.timeout) + defer cancel() + + for _, cluster := range clusters { + kubeconfigArgs.Context = &cluster.KubeContext + + clusterValues := make(map[string]string) + // add values from env + maps.Copy(clusterValues, runtimeValues) + + // add values from cluster rm, err := runtime.NewResourceManager(kubeconfigArgs) if err != nil { return err } - reader := runtime.NewResourceReader(rm) rv, err := reader.Read(kctx, rt.Refs) if err != nil { return err } + maps.Copy(clusterValues, rv) - maps.Copy(runtimeValues, rv) - } - - if err := bm.InitWorkspace(tmpDir, runtimeValues); err != nil { - return describeErr(tmpDir, "failed to parse bundle", err) - } + // add cluster info + maps.Copy(clusterValues, cluster.NameGroupValues()) - v, err := bm.Build() - if err != nil { - return describeErr(tmpDir, "failed to build bundle", err) - } + // create cluster workspace + workspace := path.Join(tmpDir, cluster.Name) + if err := os.MkdirAll(workspace, os.ModePerm); err != nil { + return err + } - bundle, err := bm.GetBundle(v) - if err != nil { - return err - } - log = LoggerBundle(logr.NewContext(cmd.Context(), log), bundle.Name, apiv1.RuntimeDefaultName) + if err := bm.InitWorkspace(workspace, clusterValues); err != nil { + return describeErr(workspace, "failed to parse bundle", err) + } - if len(bundle.Instances) == 0 { - return fmt.Errorf("no instances found in bundle") - } + v, err := bm.Build() + if err != nil { + return describeErr(workspace, "failed to build bundle", err) + } - if bundleVetArgs.printValue { - val := v.LookupPath(cue.ParsePath("bundle")) - if val.Err() != nil { + bundle, err := bm.GetBundle(v) + if err != nil { return err } - _, err := rootCmd.OutOrStdout().Write([]byte(fmt.Sprintf("bundle: %v\n", val))) - return err - } - for _, i := range bundle.Instances { - if i.Namespace == "" { - return fmt.Errorf("instance %s does not have a namespace", i.Name) + log = LoggerBundle(logr.NewContext(cmd.Context(), log), bundle.Name, apiv1.RuntimeDefaultName) + + if len(bundle.Instances) == 0 { + return fmt.Errorf("no instances found in bundle") + } + + if bundleVetArgs.printValue { + val := v.LookupPath(cue.ParsePath("bundle")) + if val.Err() != nil { + return err + } + bundleCue := fmt.Sprintf("bundle: %v\n", val) + if cluster.Name != apiv1.RuntimeDefaultName { + bundleCue = fmt.Sprintf("\"%s\": bundle: %v\n", cluster.Name, val) + } + _, err := rootCmd.OutOrStdout().Write([]byte(bundleCue)) + if err != nil { + return err + } + } else { + for _, i := range bundle.Instances { + if i.Namespace == "" { + return fmt.Errorf("instance %s does not have a namespace", i.Name) + } + log := LoggerBundleInstance(logr.NewContext(cmd.Context(), log), bundle.Name, cluster.Name, i.Name) + log.Info("instance is valid") + } } - log := LoggerBundleInstance(logr.NewContext(cmd.Context(), log), bundle.Name, apiv1.RuntimeDefaultName, i.Name) - log.Info("instance is valid") } - log.Info("bundle is valid") + if !bundleVetArgs.printValue { + log.Info("bundle is valid") + } return nil } diff --git a/cmd/timoni/bundle_vet_test.go b/cmd/timoni/bundle_vet_test.go index b9120482..9b14bc42 100644 --- a/cmd/timoni/bundle_vet_test.go +++ b/cmd/timoni/bundle_vet_test.go @@ -292,3 +292,117 @@ bundle: g.Expect(err).ToNot(HaveOccurred()) g.Expect(output).To(BeEquivalentTo(bundleComputed)) } + +func Test_BundleVet_Clusters(t *testing.T) { + g := NewWithT(t) + + bundleCue := ` +bundle: { + _cluster: "dev" @timoni(runtime:string:TIMONI_CLUSTER_NAME) + _env: "dev" @timoni(runtime:string:TIMONI_CLUSTER_GROUP) + + apiVersion: "v1alpha1" + name: "fleet-test" + instances: { + "frontend": { + module: { + url: "oci://ghcr.io/stefanprodan/timoni/minimal" + version: "latest" + } + namespace: "fleet-test" + values: { + message: "Hello from cluster \(_cluster)" + test: enabled: true + + if _env == "staging" { + replicas: 2 + } + + if _env == "production" { + replicas: 3 + } + } + } + } +} +` + runtimeCue := ` +runtime: { + apiVersion: "v1alpha1" + name: "fleet-test" + clusters: { + "staging": { + group: "staging" + kubeContext: "envtest" + } + "production": { + group: "production" + kubeContext: "envtest" + } + } + values: [ + { + query: "k8s:v1:Namespace:kube-system" + for: { + "CLUSTER_UID": "obj.metadata.uid" + } + }, + ] +} +` + + bundleComputed := `"staging": bundle: { + apiVersion: "v1alpha1" + name: "fleet-test" + instances: { + frontend: { + module: { + url: "oci://ghcr.io/stefanprodan/timoni/minimal" + version: "latest" + } + namespace: "fleet-test" + values: { + message: "Hello from cluster staging" + replicas: 2 + test: { + enabled: true + } + } + } + } +} +"production": bundle: { + apiVersion: "v1alpha1" + name: "fleet-test" + instances: { + frontend: { + module: { + url: "oci://ghcr.io/stefanprodan/timoni/minimal" + version: "latest" + } + namespace: "fleet-test" + values: { + message: "Hello from cluster production" + replicas: 3 + test: { + enabled: true + } + } + } + } +} +` + wd := t.TempDir() + bundlePath := filepath.Join(wd, "bundle.cue") + g.Expect(os.WriteFile(bundlePath, []byte(bundleCue), 0644)).ToNot(HaveOccurred()) + + runtimePath := filepath.Join(wd, "runtime.cue") + g.Expect(os.WriteFile(runtimePath, []byte(runtimeCue), 0644)).ToNot(HaveOccurred()) + + output, err := executeCommand(fmt.Sprintf( + "bundle vet -f %s -r %s -p main --print-value", + bundlePath, runtimePath, + )) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(output).To(BeEquivalentTo(bundleComputed)) +} From 3291315a2a33d3cf8b0d996302fb7b4514c9d827 Mon Sep 17 00:00:00 2001 From: Stefan Prodan Date: Sat, 18 Nov 2023 18:39:47 +0200 Subject: [PATCH 11/19] Add default cluster to API Signed-off-by: Stefan Prodan --- api/v1alpha1/runtime.go | 16 ++++++++++++---- cmd/timoni/bundle_vet.go | 2 +- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/api/v1alpha1/runtime.go b/api/v1alpha1/runtime.go index 7a8e7e34..f1965ec2 100644 --- a/api/v1alpha1/runtime.go +++ b/api/v1alpha1/runtime.go @@ -119,8 +119,8 @@ type Runtime struct { Refs []RuntimeResourceRef `json:"refs"` } -// DefaultRuntime returns a Runtime with a single -// cluster set to specified context. +// DefaultRuntime returns an empty Runtime with an unnamed +// cluster set to the specified context. func DefaultRuntime(kubeContext string) *Runtime { defaultCluster := RuntimeCluster{ Name: RuntimeDefaultName, @@ -147,10 +147,18 @@ type RuntimeCluster struct { KubeContext string `json:"kubeContext"` } -// NameGroupValues returns the Timoni runtime values for this cluster. +// IsDefault returns true if the given cluster +// was initialised by a Runtime with no target clusters. +func (rt *RuntimeCluster) IsDefault() bool { + return rt.Name == RuntimeDefaultName +} + +// NameGroupValues returns the cluster name and group variables +// as specified in the Runtime definition. If the given cluster +// was initialised by an empty Runtime, the returned map is empty. func (rt *RuntimeCluster) NameGroupValues() map[string]string { result := make(map[string]string) - if rt.Name != RuntimeDefaultName { + if !rt.IsDefault() { result["TIMONI_CLUSTER_NAME"] = rt.Name result["TIMONI_CLUSTER_GROUP"] = rt.Group } diff --git a/cmd/timoni/bundle_vet.go b/cmd/timoni/bundle_vet.go index ac017107..0ef5760e 100644 --- a/cmd/timoni/bundle_vet.go +++ b/cmd/timoni/bundle_vet.go @@ -181,7 +181,7 @@ func runBundleVetCmd(cmd *cobra.Command, args []string) error { return err } bundleCue := fmt.Sprintf("bundle: %v\n", val) - if cluster.Name != apiv1.RuntimeDefaultName { + if !cluster.IsDefault() { bundleCue = fmt.Sprintf("\"%s\": bundle: %v\n", cluster.Name, val) } _, err := rootCmd.OutOrStdout().Write([]byte(bundleCue)) From 817d00ec5ecee1f82a67a05d85a98d1b7bc2da10 Mon Sep 17 00:00:00 2001 From: Stefan Prodan Date: Sat, 18 Nov 2023 19:58:08 +0200 Subject: [PATCH 12/19] Add multi-cluster support to `bundle apply` cmd Signed-off-by: Stefan Prodan --- cmd/timoni/bundle_apply.go | 132 +++++++++++++++++------------- cmd/timoni/bundle_apply_test.go | 34 ++++++++ internal/engine/bundle_builder.go | 1 + 3 files changed, 111 insertions(+), 56 deletions(-) diff --git a/cmd/timoni/bundle_apply.go b/cmd/timoni/bundle_apply.go index 60805f88..781c2135 100644 --- a/cmd/timoni/bundle_apply.go +++ b/cmd/timoni/bundle_apply.go @@ -133,88 +133,108 @@ func runBundleApplyCmd(cmd *cobra.Command, _ []string) error { maps.Copy(runtimeValues, engine.GetEnv()) } - if len(bundleArgs.runtimeFiles) > 0 { - rt, err := buildRuntime(bundleArgs.runtimeFiles) - if err != nil { - return err - } + rt, err := buildRuntime(bundleArgs.runtimeFiles) + if err != nil { + return err + } + + clusters := rt.SelectClusters(bundleArgs.runtimeCluster, bundleArgs.runtimeClusterGroup) + if len(clusters) == 0 { + return fmt.Errorf("no cluster found") + } + ctxPull, cancel := context.WithTimeout(ctx, rootArgs.timeout) + defer cancel() + + for _, cluster := range clusters { + kubeconfigArgs.Context = &cluster.KubeContext + + clusterValues := make(map[string]string) + + // add values from env + maps.Copy(clusterValues, runtimeValues) + + // add values from cluster rm, err := runtime.NewResourceManager(kubeconfigArgs) if err != nil { return err } - reader := runtime.NewResourceReader(rm) rv, err := reader.Read(ctx, rt.Refs) if err != nil { return err } + maps.Copy(clusterValues, rv) - maps.Copy(runtimeValues, rv) - } - - if err := bm.InitWorkspace(tmpDir, runtimeValues); err != nil { - return err - } + // add cluster info + maps.Copy(clusterValues, cluster.NameGroupValues()) - v, err := bm.Build() - if err != nil { - return describeErr(tmpDir, "failed to build bundle", err) - } + // create cluster workspace + workspace := path.Join(tmpDir, cluster.Name) + if err := os.MkdirAll(workspace, os.ModePerm); err != nil { + return err + } - bundle, err := bm.GetBundle(v) - if err != nil { - return err - } + if err := bm.InitWorkspace(workspace, clusterValues); err != nil { + return describeErr(workspace, "failed to parse bundle", err) + } - log := LoggerBundle(cmd.Context(), bundle.Name, apiv1.RuntimeDefaultName) + v, err := bm.Build() + if err != nil { + return describeErr(tmpDir, "failed to build bundle", err) + } - if !bundleApplyArgs.overwriteOwnership { - err = bundleInstancesOwnershipConflicts(bundle.Instances) + bundle, err := bm.GetBundle(v) if err != nil { return err } - } - ctxPull, cancel := context.WithTimeout(ctx, rootArgs.timeout) - defer cancel() + log := LoggerBundle(cmd.Context(), bundle.Name, cluster.Name) - for _, instance := range bundle.Instances { - spin := StartSpinner(fmt.Sprintf("pulling %s", instance.Module.Repository)) - pullErr := fetchBundleInstanceModule(ctxPull, instance, tmpDir) - spin.Stop() - if pullErr != nil { - return pullErr + if !bundleApplyArgs.overwriteOwnership { + err = bundleInstancesOwnershipConflicts(bundle.Instances) + if err != nil { + return err + } } - } - kubeVersion, err := runtime.ServerVersion(kubeconfigArgs) - if err != nil { - return err - } - - if bundleApplyArgs.dryrun || bundleApplyArgs.diff { - log.Info(fmt.Sprintf("applying %v instance(s) %s", - len(bundle.Instances), colorizeDryRun("(server dry run)"))) - } else { - log.Info(fmt.Sprintf("applying %v instance(s)", - len(bundle.Instances))) - } + for _, instance := range bundle.Instances { + spin := StartSpinner(fmt.Sprintf("pulling %s", instance.Module.Repository)) + pullErr := fetchBundleInstanceModule(ctxPull, instance, tmpDir) + spin.Stop() + if pullErr != nil { + return pullErr + } + } - for _, instance := range bundle.Instances { - if err := applyBundleInstance(logr.NewContext(ctx, log), cuectx, instance, kubeVersion, tmpDir); err != nil { + kubeVersion, err := runtime.ServerVersion(kubeconfigArgs) + if err != nil { return err } - } - elapsed := time.Since(start) - if bundleApplyArgs.dryrun || bundleApplyArgs.diff { - log.Info(fmt.Sprintf("applied successfully %s", - colorizeDryRun("(server dry run)"))) - } else { - log.Info(fmt.Sprintf("applied successfully in %s", elapsed.Round(time.Second))) - } + if bundleApplyArgs.dryrun || bundleApplyArgs.diff { + log.Info(fmt.Sprintf("applying %v instance(s) %s", + len(bundle.Instances), colorizeDryRun("(server dry run)"))) + } else { + log.Info(fmt.Sprintf("applying %v instance(s)", + len(bundle.Instances))) + } + for _, instance := range bundle.Instances { + instance.Cluster = cluster.Name + if err := applyBundleInstance(logr.NewContext(ctx, log), cuectx, instance, kubeVersion, tmpDir); err != nil { + return err + } + } + + elapsed := time.Since(start) + if bundleApplyArgs.dryrun || bundleApplyArgs.diff { + log.Info(fmt.Sprintf("applied successfully %s", + colorizeDryRun("(server dry run)"))) + } else { + log.Info(fmt.Sprintf("applied successfully in %s", elapsed.Round(time.Second))) + } + } return nil } @@ -252,7 +272,7 @@ func fetchBundleInstanceModule(ctx context.Context, instance *engine.BundleInsta } func applyBundleInstance(ctx context.Context, cuectx *cue.Context, instance *engine.BundleInstance, kubeVersion string, rootDir string) error { - log := LoggerBundleInstance(ctx, instance.Bundle, apiv1.RuntimeDefaultName, instance.Name) + log := LoggerBundleInstance(ctx, instance.Bundle, instance.Cluster, instance.Name) modDir := path.Join(rootDir, instance.Name, "module") builder := engine.NewModuleBuilder( diff --git a/cmd/timoni/bundle_apply_test.go b/cmd/timoni/bundle_apply_test.go index da8169b4..272ea702 100644 --- a/cmd/timoni/bundle_apply_test.go +++ b/cmd/timoni/bundle_apply_test.go @@ -443,6 +443,8 @@ bundle: { values: client: enabled: true @timoni(runtime:bool:CLIENT) values: server: enabled: false @timoni(runtime:bool:ENABLED) values: domain: string @timoni(runtime:string:DOMAIN) + values: metadata: labels: "cluster": string @timoni(runtime:string:TIMONI_CLUSTER_NAME) + values: metadata: labels: "env": string @timoni(runtime:string:TIMONI_CLUSTER_GROUP) } } } @@ -452,6 +454,12 @@ bundle: { runtime: { apiVersion: "v1alpha1" name: "test" + clusters: { + "test": { + group: "testing" + kubeContext: "envtest" + } + } values: [ { query: "k8s:v1:Secret:%[1]s:%[2]s" @@ -518,6 +526,8 @@ runtime: { err = envTestClient.Get(context.Background(), client.ObjectKeyFromObject(scm), scm) g.Expect(err).ToNot(HaveOccurred()) g.Expect(scm.Data["hostname"]).To(BeEquivalentTo("test.local")) + g.Expect(scm.GetLabels()).To(HaveKeyWithValue("cluster", "test")) + g.Expect(scm.GetLabels()).To(HaveKeyWithValue("env", "testing")) }) t.Run("overrides env vars", func(t *testing.T) { @@ -557,4 +567,28 @@ runtime: { g.Expect(err).To(HaveOccurred()) g.Expect(apierrors.IsNotFound(err)).To(BeTrue()) }) + + t.Run("fails for wrong cluster name selector", func(t *testing.T) { + g := NewWithT(t) + + cmd := fmt.Sprintf("bundle apply -p main --wait -f- -r=%s --runtime-cluster=prod", + runtimePath, + ) + + _, err := executeCommandWithIn(cmd, strings.NewReader(bundleData)) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("no cluster found")) + }) + + t.Run("fails for wrong cluster group selector", func(t *testing.T) { + g := NewWithT(t) + + cmd := fmt.Sprintf("bundle apply -p main --wait -f- -r=%s --runtime-group=prod", + runtimePath, + ) + + _, err := executeCommandWithIn(cmd, strings.NewReader(bundleData)) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("no cluster found")) + }) } diff --git a/internal/engine/bundle_builder.go b/internal/engine/bundle_builder.go index 0ac161f2..d467eedf 100644 --- a/internal/engine/bundle_builder.go +++ b/internal/engine/bundle_builder.go @@ -46,6 +46,7 @@ type Bundle struct { type BundleInstance struct { Bundle string + Cluster string Name string Namespace string Module apiv1.ModuleReference From b963ca663a55b4327d779c3b04038ae585549399 Mon Sep 17 00:00:00 2001 From: Stefan Prodan Date: Sun, 19 Nov 2023 01:11:13 +0200 Subject: [PATCH 13/19] Add multi-cluster support to `bundle status` cmd Signed-off-by: Stefan Prodan --- cmd/timoni/bundle_status.go | 106 ++++++++++++++++++++----------- cmd/timoni/bundle_status_test.go | 76 +++++++++++++++++++++- 2 files changed, 144 insertions(+), 38 deletions(-) diff --git a/cmd/timoni/bundle_status.go b/cmd/timoni/bundle_status.go index 6916239c..c9933b67 100644 --- a/cmd/timoni/bundle_status.go +++ b/cmd/timoni/bundle_status.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "maps" "cuelang.org/go/cue/cuecontext" "github.com/fluxcd/cli-utils/pkg/kstatus/status" @@ -75,65 +76,96 @@ func runBundleStatusCmd(cmd *cobra.Command, args []string) error { bundleStatusArgs.name = args[0] } - rm, err := runtime.NewResourceManager(kubeconfigArgs) - if err != nil { - return err - } + runtimeValues := make(map[string]string) - ctx, cancel := context.WithTimeout(context.Background(), rootArgs.timeout) - defer cancel() + if bundleArgs.runtimeFromEnv { + maps.Copy(runtimeValues, engine.GetEnv()) + } - sm := runtime.NewStorageManager(rm) - instances, err := sm.List(ctx, "", bundleStatusArgs.name) + rt, err := buildRuntime(bundleArgs.runtimeFiles) if err != nil { return err } - if len(instances) == 0 { - return fmt.Errorf("no instances found in bundle") + clusters := rt.SelectClusters(bundleArgs.runtimeCluster, bundleArgs.runtimeClusterGroup) + if len(clusters) == 0 { + return fmt.Errorf("no cluster found") } - for _, instance := range instances { - log := LoggerBundleInstance(ctx, bundleStatusArgs.name, apiv1.RuntimeDefaultName, instance.Name) - - log.Info(fmt.Sprintf("last applied %s", - colorizeSubject(instance.LastTransitionTime))) - log.Info(fmt.Sprintf("module %s", - colorizeSubject(instance.Module.Repository+":"+instance.Module.Version))) - log.Info(fmt.Sprintf("digest %s", - colorizeSubject(instance.Module.Digest))) + failed := false + for _, cluster := range clusters { + kubeconfigArgs.Context = &cluster.KubeContext - for _, image := range instance.Images { - log.Info(fmt.Sprintf("container image %s", - colorizeSubject(image))) + rm, err := runtime.NewResourceManager(kubeconfigArgs) + if err != nil { + return err } - im := runtime.InstanceManager{Instance: apiv1.Instance{Inventory: instance.Inventory}} + ctx, cancel := context.WithTimeout(context.Background(), rootArgs.timeout) + defer cancel() - objects, err := im.ListObjects() + sm := runtime.NewStorageManager(rm) + instances, err := sm.List(ctx, "", bundleStatusArgs.name) if err != nil { return err } - for _, obj := range objects { - err = rm.Client().Get(ctx, client.ObjectKeyFromObject(obj), obj) + log := LoggerBundle(ctx, bundleStatusArgs.name, cluster.Name) + + if len(instances) == 0 { + log.Error(nil, "no instances found in bundle") + failed = true + continue + } + + for _, instance := range instances { + log := LoggerBundleInstance(ctx, bundleStatusArgs.name, cluster.Name, instance.Name) + + log.Info(fmt.Sprintf("last applied %s", + colorizeSubject(instance.LastTransitionTime))) + log.Info(fmt.Sprintf("module %s", + colorizeSubject(instance.Module.Repository+":"+instance.Module.Version))) + log.Info(fmt.Sprintf("digest %s", + colorizeSubject(instance.Module.Digest))) + + for _, image := range instance.Images { + log.Info(fmt.Sprintf("container image %s", + colorizeSubject(image))) + } + + im := runtime.InstanceManager{Instance: apiv1.Instance{Inventory: instance.Inventory}} + + objects, err := im.ListObjects() if err != nil { - if apierrors.IsNotFound(err) { - log.Error(err, colorizeJoin(obj, errors.New("NotFound"))) + return err + } + + for _, obj := range objects { + err = rm.Client().Get(ctx, client.ObjectKeyFromObject(obj), obj) + if err != nil { + if apierrors.IsNotFound(err) { + log.Error(err, colorizeJoin(obj, errors.New("NotFound"))) + failed = true + + continue + } + log.Error(err, colorizeJoin(obj, errors.New("Unknown"))) + failed = true continue } - log.Error(err, colorizeJoin(obj, errors.New("Unknown"))) - continue - } - res, err := status.Compute(obj) - if err != nil { - log.Error(err, colorizeJoin(obj, errors.New("Failed"))) - continue + res, err := status.Compute(obj) + if err != nil { + log.Error(err, colorizeJoin(obj, errors.New("Failed"))) + failed = true + continue + } + log.Info(colorizeJoin(obj, res.Status, "-", res.Message)) } - log.Info(colorizeJoin(obj, res.Status, "-", res.Message)) } } - + if failed { + return fmt.Errorf("completed with errors") + } return nil } diff --git a/cmd/timoni/bundle_status_test.go b/cmd/timoni/bundle_status_test.go index be3c8be8..64a1b16c 100644 --- a/cmd/timoni/bundle_status_test.go +++ b/cmd/timoni/bundle_status_test.go @@ -19,6 +19,8 @@ package main import ( "context" "fmt" + "os" + "path/filepath" "strings" "testing" @@ -108,7 +110,7 @@ bundle: { g.Expect(err).ToNot(HaveOccurred()) output, err := executeCommand(fmt.Sprintf("bundle status %s", bundleName)) - g.Expect(err).ToNot(HaveOccurred()) + g.Expect(err).To(HaveOccurred()) g.Expect(output).To(ContainSubstring(fmt.Sprintf("ConfigMap/%s/frontend-client Current", namespace))) g.Expect(output).To(ContainSubstring(fmt.Sprintf("ConfigMap/%s/backend-server NotFound", namespace))) }) @@ -171,3 +173,75 @@ bundle: { g.Expect(output).ToNot(ContainSubstring("timoni:latest-dev@sha")) }) } + +func Test_BundleStatus_Runtime(t *testing.T) { + g := NewWithT(t) + + bundleName := rnd("my-bundle", 5) + modPath := "testdata/module" + namespace := rnd("my-namespace", 5) + modName := rnd("my-mod", 5) + modURL := fmt.Sprintf("%s/%s", dockerRegistry, modName) + modVer := "1.0.0" + + _, err := executeCommand(fmt.Sprintf( + "mod push %s oci://%s -v %s", + modPath, + modURL, + modVer, + )) + g.Expect(err).ToNot(HaveOccurred()) + + bundleData := fmt.Sprintf(` +bundle: { + _cluster: string @timoni(runtime:string:TIMONI_CLUSTER_NAME) + + apiVersion: "v1alpha1" + name: "%[1]s" + instances: { + "\(_cluster)-app": { + module: { + url: "oci://%[2]s" + version: "%[3]s" + } + namespace: "%[4]s" + } + } +} +`, bundleName, modURL, modVer, namespace) + + runtimeCue := ` +runtime: { + apiVersion: "v1alpha1" + name: "fleet-test" + clusters: { + "staging": { + group: "staging" + kubeContext: "envtest" + } + "production": { + group: "production" + kubeContext: "envtest" + } + } + values: [] +} +` + + runtimePath := filepath.Join(t.TempDir(), "runtime.cue") + g.Expect(os.WriteFile(runtimePath, []byte(runtimeCue), 0644)).ToNot(HaveOccurred()) + + _, err = executeCommandWithIn( + fmt.Sprintf("bundle apply -f- -r %s -p main --wait", runtimePath), + strings.NewReader(bundleData)) + g.Expect(err).ToNot(HaveOccurred()) + + t.Run("lists instances across clusters", func(t *testing.T) { + g := NewWithT(t) + + output, err := executeCommand(fmt.Sprintf("bundle status %s -r %s", bundleName, runtimePath)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(output).To(ContainSubstring("staging-app")) + g.Expect(output).To(ContainSubstring("production-app")) + }) +} From d6f2175eb5bbd87b81c0221a51b623d71e80425a Mon Sep 17 00:00:00 2001 From: Stefan Prodan Date: Sun, 19 Nov 2023 10:17:40 +0200 Subject: [PATCH 14/19] Add multi-cluster support to `bundle delete` cmd Signed-off-by: Stefan Prodan --- cmd/timoni/bundle_delete.go | 64 ++++++++++++-------- cmd/timoni/bundle_delete_test.go | 100 +++++++++++++++++++++++++++++++ cmd/timoni/bundle_status.go | 13 +--- 3 files changed, 143 insertions(+), 34 deletions(-) diff --git a/cmd/timoni/bundle_delete.go b/cmd/timoni/bundle_delete.go index 96e47ecb..2b5c8a4e 100644 --- a/cmd/timoni/bundle_delete.go +++ b/cmd/timoni/bundle_delete.go @@ -92,44 +92,60 @@ func runBundleDelCmd(cmd *cobra.Command, args []string) error { bundleDelArgs.name = args[0] } - ctx, cancel := context.WithTimeout(cmd.Context(), rootArgs.timeout) - defer cancel() - - sm, err := runtime.NewResourceManager(kubeconfigArgs) + rt, err := buildRuntime(bundleArgs.runtimeFiles) if err != nil { return err } - log := LoggerBundle(ctx, bundleDelArgs.name, apiv1.RuntimeDefaultName) - iStorage := runtime.NewStorageManager(sm) - - instances, err := iStorage.List(ctx, "", bundleDelArgs.name) - if err != nil { - return err + clusters := rt.SelectClusters(bundleArgs.runtimeCluster, bundleArgs.runtimeClusterGroup) + if len(clusters) == 0 { + return fmt.Errorf("no cluster found") } - if len(instances) == 0 { - return fmt.Errorf("no instances found in bundle") - } + ctx, cancel := context.WithTimeout(context.Background(), rootArgs.timeout) + defer cancel() + + for _, cluster := range clusters { + kubeconfigArgs.Context = &cluster.KubeContext - // delete in revers order (last installed, first to uninstall) - for index := len(instances) - 1; index >= 0; index-- { - instance := instances[index] - log.Info(fmt.Sprintf("deleting instance %s in namespace %s", - colorizeSubject(instance.Name), colorizeSubject(instance.Namespace))) - if err := deleteBundleInstance(ctx, &engine.BundleInstance{ - Bundle: bundleDelArgs.name, - Name: instance.Name, - Namespace: instance.Namespace, - }, bundleDelArgs.wait, bundleDelArgs.dryrun); err != nil { + rm, err := runtime.NewResourceManager(kubeconfigArgs) + if err != nil { + return err + } + + sm := runtime.NewStorageManager(rm) + instances, err := sm.List(ctx, "", bundleDelArgs.name) + if err != nil { return err } + + log := LoggerBundle(ctx, bundleDelArgs.name, cluster.Name) + + if len(instances) == 0 { + log.Error(nil, "no instances found in bundle") + continue + } + + // delete in revers order (last installed, first to uninstall) + for index := len(instances) - 1; index >= 0; index-- { + instance := instances[index] + log.Info(fmt.Sprintf("deleting instance %s in namespace %s", + colorizeSubject(instance.Name), colorizeSubject(instance.Namespace))) + if err := deleteBundleInstance(ctx, &engine.BundleInstance{ + Bundle: bundleDelArgs.name, + Cluster: cluster.Name, + Name: instance.Name, + Namespace: instance.Namespace, + }, bundleDelArgs.wait, bundleDelArgs.dryrun); err != nil { + return err + } + } } return nil } func deleteBundleInstance(ctx context.Context, instance *engine.BundleInstance, wait bool, dryrun bool) error { - log := LoggerBundle(ctx, instance.Bundle, apiv1.RuntimeDefaultName) + log := LoggerBundle(ctx, instance.Bundle, instance.Cluster) sm, err := runtime.NewResourceManager(kubeconfigArgs) if err != nil { diff --git a/cmd/timoni/bundle_delete_test.go b/cmd/timoni/bundle_delete_test.go index f930f6f6..47839399 100644 --- a/cmd/timoni/bundle_delete_test.go +++ b/cmd/timoni/bundle_delete_test.go @@ -19,6 +19,8 @@ package main import ( "context" "fmt" + "os" + "path/filepath" "strings" "testing" @@ -180,3 +182,101 @@ bundle: { g.Expect(errors.IsNotFound(err)).To(BeTrue()) }) } + +func Test_BundleDelete_Runtime(t *testing.T) { + g := NewWithT(t) + + bundleName := rnd("my-bundle", 5) + modPath := "testdata/module" + namespace := rnd("my-namespace", 5) + modName := rnd("my-mod", 5) + modURL := fmt.Sprintf("%s/%s", dockerRegistry, modName) + modVer := "1.0.0" + + _, err := executeCommand(fmt.Sprintf( + "mod push %s oci://%s -v %s", + modPath, + modURL, + modVer, + )) + g.Expect(err).ToNot(HaveOccurred()) + + bundleData := fmt.Sprintf(` +bundle: { + _cluster: string @timoni(runtime:string:TIMONI_CLUSTER_NAME) + + apiVersion: "v1alpha1" + name: "%[1]s" + instances: { + "\(_cluster)-app": { + module: { + url: "oci://%[2]s" + version: "%[3]s" + } + namespace: "%[4]s" + } + } +} +`, bundleName, modURL, modVer, namespace) + + runtimeCue := ` +runtime: { + apiVersion: "v1alpha1" + name: "fleet-test" + clusters: { + "staging": { + group: "staging" + kubeContext: "envtest" + } + "production": { + group: "production" + kubeContext: "envtest" + } + } + values: [] +} +` + + runtimePath := filepath.Join(t.TempDir(), "runtime.cue") + g.Expect(os.WriteFile(runtimePath, []byte(runtimeCue), 0644)).ToNot(HaveOccurred()) + + _, err = executeCommandWithIn( + fmt.Sprintf("bundle apply -f- -r %s -p main --wait", runtimePath), + strings.NewReader(bundleData)) + g.Expect(err).ToNot(HaveOccurred()) + + stagingCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "staging-app-server", + Namespace: namespace, + }, + } + + productionCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "production-app-server", + Namespace: namespace, + }, + } + + err = envTestClient.Get(context.Background(), client.ObjectKeyFromObject(stagingCM), stagingCM) + g.Expect(err).ToNot(HaveOccurred()) + + err = envTestClient.Get(context.Background(), client.ObjectKeyFromObject(productionCM), productionCM) + g.Expect(err).ToNot(HaveOccurred()) + + t.Run("deletes instances across clusters", func(t *testing.T) { + g := NewWithT(t) + + output, err := executeCommand(fmt.Sprintf("bundle delete %s -r %s --wait", bundleName, runtimePath)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(output).To(ContainSubstring("staging-app")) + g.Expect(output).To(ContainSubstring("production-app")) + + err = envTestClient.Get(context.Background(), client.ObjectKeyFromObject(stagingCM), stagingCM) + g.Expect(errors.IsNotFound(err)).To(BeTrue()) + + err = envTestClient.Get(context.Background(), client.ObjectKeyFromObject(productionCM), productionCM) + g.Expect(errors.IsNotFound(err)).To(BeTrue()) + }) +} diff --git a/cmd/timoni/bundle_status.go b/cmd/timoni/bundle_status.go index c9933b67..13e101b3 100644 --- a/cmd/timoni/bundle_status.go +++ b/cmd/timoni/bundle_status.go @@ -20,7 +20,6 @@ import ( "context" "errors" "fmt" - "maps" "cuelang.org/go/cue/cuecontext" "github.com/fluxcd/cli-utils/pkg/kstatus/status" @@ -76,12 +75,6 @@ func runBundleStatusCmd(cmd *cobra.Command, args []string) error { bundleStatusArgs.name = args[0] } - runtimeValues := make(map[string]string) - - if bundleArgs.runtimeFromEnv { - maps.Copy(runtimeValues, engine.GetEnv()) - } - rt, err := buildRuntime(bundleArgs.runtimeFiles) if err != nil { return err @@ -92,6 +85,9 @@ func runBundleStatusCmd(cmd *cobra.Command, args []string) error { return fmt.Errorf("no cluster found") } + ctx, cancel := context.WithTimeout(context.Background(), rootArgs.timeout) + defer cancel() + failed := false for _, cluster := range clusters { kubeconfigArgs.Context = &cluster.KubeContext @@ -101,9 +97,6 @@ func runBundleStatusCmd(cmd *cobra.Command, args []string) error { return err } - ctx, cancel := context.WithTimeout(context.Background(), rootArgs.timeout) - defer cancel() - sm := runtime.NewStorageManager(rm) instances, err := sm.List(ctx, "", bundleStatusArgs.name) if err != nil { From 8e208ac060745030b4405c6038de1a33865fd19b Mon Sep 17 00:00:00 2001 From: Stefan Prodan Date: Sun, 19 Nov 2023 15:15:19 +0200 Subject: [PATCH 15/19] Enforce cluster selection for `bundle build` cmd Signed-off-by: Stefan Prodan --- cmd/timoni/bundle.go | 2 +- cmd/timoni/bundle_build.go | 12 +++++ cmd/timoni/bundle_build_test.go | 79 ++++++++++++++++++++++++++++++++- 3 files changed, 91 insertions(+), 2 deletions(-) diff --git a/cmd/timoni/bundle.go b/cmd/timoni/bundle.go index 006f65fd..f92c1dd0 100644 --- a/cmd/timoni/bundle.go +++ b/cmd/timoni/bundle.go @@ -41,7 +41,7 @@ func init() { "The local path to runtime.cue files.") bundleCmd.PersistentFlags().StringVar(&bundleArgs.runtimeCluster, "runtime-cluster", "*", "Filter runtime cluster by name.") - bundleCmd.PersistentFlags().StringVar(&bundleArgs.runtimeCluster, "runtime-group", "*", + bundleCmd.PersistentFlags().StringVar(&bundleArgs.runtimeClusterGroup, "runtime-group", "*", "Filter runtime clusters by group.") rootCmd.AddCommand(bundleCmd) } diff --git a/cmd/timoni/bundle_build.go b/cmd/timoni/bundle_build.go index 5efa5110..3276959c 100644 --- a/cmd/timoni/bundle_build.go +++ b/cmd/timoni/bundle_build.go @@ -114,6 +114,17 @@ func runBundleBuildCmd(cmd *cobra.Command, _ []string) error { return err } + clusters := rt.SelectClusters(bundleArgs.runtimeCluster, bundleArgs.runtimeClusterGroup) + if len(clusters) > 1 { + return fmt.Errorf("you must select a cluster with --runtime-cluster") + } + if len(clusters) == 0 { + return fmt.Errorf("no cluster found") + } + + cluster := clusters[0] + kubeconfigArgs.Context = &cluster.KubeContext + rm, err := runtime.NewResourceManager(kubeconfigArgs) if err != nil { return err @@ -126,6 +137,7 @@ func runBundleBuildCmd(cmd *cobra.Command, _ []string) error { } maps.Copy(runtimeValues, rv) + maps.Copy(runtimeValues, cluster.NameGroupValues()) } if err := bm.InitWorkspace(tmpDir, runtimeValues); err != nil { diff --git a/cmd/timoni/bundle_build_test.go b/cmd/timoni/bundle_build_test.go index 87586a76..aca6ddf8 100644 --- a/cmd/timoni/bundle_build_test.go +++ b/cmd/timoni/bundle_build_test.go @@ -135,12 +135,89 @@ bundle: g.Expect(err).ToNot(HaveOccurred()) g.Expect(found).To(BeTrue()) g.Expect(host).To(ContainSubstring("example.internal")) - }) } }) } +func Test_BundleBuild_Runtime(t *testing.T) { + g := NewWithT(t) + + bundleName := rnd("my-bundle", 5) + modPath := "testdata/module" + namespace := rnd("my-namespace", 5) + modName := rnd("my-mod", 5) + modURL := fmt.Sprintf("%s/%s", dockerRegistry, modName) + modVer := "1.0.0" + + _, err := executeCommand(fmt.Sprintf( + "mod push %s oci://%s -v %s", + modPath, + modURL, + modVer, + )) + g.Expect(err).ToNot(HaveOccurred()) + + bundleData := fmt.Sprintf(` +bundle: { + _cluster: string @timoni(runtime:string:TIMONI_CLUSTER_NAME) + + apiVersion: "v1alpha1" + name: "%[1]s" + instances: { + "\(_cluster)-app": { + module: { + url: "oci://%[2]s" + version: "%[3]s" + } + namespace: "%[4]s" + } + } +} +`, bundleName, modURL, modVer, namespace) + + runtimeCue := ` +runtime: { + apiVersion: "v1alpha1" + name: "fleet-test" + clusters: { + "staging": { + group: "staging" + kubeContext: "envtest" + } + "production": { + group: "production" + kubeContext: "envtest" + } + } + values: [] +} +` + + runtimePath := filepath.Join(t.TempDir(), "runtime.cue") + g.Expect(os.WriteFile(runtimePath, []byte(runtimeCue), 0644)).ToNot(HaveOccurred()) + + t.Run("fails for multiple clusters", func(t *testing.T) { + g := NewWithT(t) + _, err = executeCommandWithIn( + fmt.Sprintf("bundle build -f- -r %s -p main", runtimePath), + strings.NewReader(bundleData)) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("select a cluster")) + }) + + t.Run("builds for a single cluster", func(t *testing.T) { + g := NewWithT(t) + + output, err := executeCommandWithIn( + fmt.Sprintf("bundle build -f- -r %s -p main --runtime-group=production", runtimePath), + strings.NewReader(bundleData)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(output).ToNot(ContainSubstring("staging-app")) + g.Expect(output).To(ContainSubstring("production-app")) + }) +} + func getObjectByName(objs []*unstructured.Unstructured, name string) (*unstructured.Unstructured, error) { for _, obj := range objs { if obj.GetName() == name { From cb76adec2d0fc8dc152b420efa5212685a674a65 Mon Sep 17 00:00:00 2001 From: Stefan Prodan Date: Sun, 19 Nov 2023 18:00:26 +0200 Subject: [PATCH 16/19] Add cluster group to instance logs Signed-off-by: Stefan Prodan --- cmd/timoni/bundle_apply.go | 17 +++++++++-------- cmd/timoni/log.go | 5 +++++ 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/cmd/timoni/bundle_apply.go b/cmd/timoni/bundle_apply.go index 781c2135..4a667430 100644 --- a/cmd/timoni/bundle_apply.go +++ b/cmd/timoni/bundle_apply.go @@ -212,12 +212,15 @@ func runBundleApplyCmd(cmd *cobra.Command, _ []string) error { return err } + startMsg := fmt.Sprintf("applying %v instance(s)", len(bundle.Instances)) + if !cluster.IsDefault() { + startMsg = fmt.Sprintf("%s on %s", startMsg, colorizeSubject(cluster.Group)) + } + if bundleApplyArgs.dryrun || bundleApplyArgs.diff { - log.Info(fmt.Sprintf("applying %v instance(s) %s", - len(bundle.Instances), colorizeDryRun("(server dry run)"))) + log.Info(fmt.Sprintf("%s %s", startMsg, colorizeDryRun("(server dry run)"))) } else { - log.Info(fmt.Sprintf("applying %v instance(s)", - len(bundle.Instances))) + log.Info(startMsg) } for _, instance := range bundle.Instances { @@ -422,7 +425,7 @@ func applyBundleInstance(ctx context.Context, cuectx *cue.Context, instance *eng if err != nil { return err } - log.Info("resources are ready") + log.Info(fmt.Sprintf("%s resources %s", set.Name, colorizeReady("ready"))) } } @@ -453,10 +456,8 @@ func applyBundleInstance(ctx context.Context, cuectx *cue.Context, instance *eng err = rm.WaitForTermination(deletedObjects, waitOptions) spin.Stop() if err != nil { - return fmt.Errorf("wating for termination failed: %w", err) + return fmt.Errorf("waiting for termination failed: %w", err) } - - log.Info("all resources are ready") } } diff --git a/cmd/timoni/log.go b/cmd/timoni/log.go index 6a9482de..7d7d607e 100644 --- a/cmd/timoni/log.go +++ b/cmd/timoni/log.go @@ -68,6 +68,7 @@ func NewConsoleLogger() logr.Logger { var ( colorDryRun = color.New(color.FgHiBlack, color.Italic) colorError = color.New(color.FgHiRed) + colorReady = color.New(color.FgHiGreen) colorCallerPrefix = color.New(color.FgHiBlack) colorBundle = color.New(color.FgHiMagenta) colorInstance = color.New(color.FgHiMagenta) @@ -134,6 +135,10 @@ func colorizeSubject(subject string) string { return color.CyanString(subject) } +func colorizeReady(subject string) string { + return colorReady.Sprint(subject) +} + func colorizeInfo(subject string) string { return color.GreenString(subject) } From 170f2fe4920543b60721d46b21b989d4fb544f5c Mon Sep 17 00:00:00 2001 From: Stefan Prodan Date: Sun, 19 Nov 2023 23:08:34 +0200 Subject: [PATCH 17/19] Make runtime values optional Signed-off-by: Stefan Prodan --- api/v1alpha1/runtime.go | 2 +- cmd/timoni/bundle_delete_test.go | 1 - cmd/timoni/bundle_status_test.go | 1 - internal/engine/runtime_builder.go | 44 +++++++++++-------------- internal/engine/runtime_builder_test.go | 22 +++++++++++-- 5 files changed, 39 insertions(+), 31 deletions(-) diff --git a/api/v1alpha1/runtime.go b/api/v1alpha1/runtime.go index f1965ec2..e5bb1a28 100644 --- a/api/v1alpha1/runtime.go +++ b/api/v1alpha1/runtime.go @@ -65,7 +65,7 @@ import "strings" kubeContext!: string } - values: [...#RuntimeValue] + values?: [...#RuntimeValue] } ` diff --git a/cmd/timoni/bundle_delete_test.go b/cmd/timoni/bundle_delete_test.go index 47839399..9053ced3 100644 --- a/cmd/timoni/bundle_delete_test.go +++ b/cmd/timoni/bundle_delete_test.go @@ -233,7 +233,6 @@ runtime: { kubeContext: "envtest" } } - values: [] } ` diff --git a/cmd/timoni/bundle_status_test.go b/cmd/timoni/bundle_status_test.go index 64a1b16c..5833a951 100644 --- a/cmd/timoni/bundle_status_test.go +++ b/cmd/timoni/bundle_status_test.go @@ -224,7 +224,6 @@ runtime: { kubeContext: "envtest" } } - values: [] } ` diff --git a/internal/engine/runtime_builder.go b/internal/engine/runtime_builder.go index 5947b1bb..93f01622 100644 --- a/internal/engine/runtime_builder.go +++ b/internal/engine/runtime_builder.go @@ -145,34 +145,9 @@ func (b *RuntimeBuilder) GetRuntime(v cue.Value) (*apiv1.Runtime, error) { return nil, fmt.Errorf("lookup %s failed: %w", apiv1.RuntimeName.String(), runtimeNameValue.Err()) } - runtimeValuesCue := v.LookupPath(cue.ParsePath(apiv1.RuntimeValuesSelector.String())) - if runtimeValuesCue.Err() != nil { - return nil, fmt.Errorf("lookup %s failed: %w", apiv1.RuntimeValuesSelector.String(), runtimeValuesCue.Err()) - } - - runtimeValues := []apiv1.RuntimeValue{} - - err = runtimeValuesCue.Decode(&runtimeValues) - if err != nil { - return nil, fmt.Errorf("values decoding failed: %w", err) - } - - var refs []apiv1.RuntimeResourceRef - - for _, rv := range runtimeValues { - ref, err := rv.ToResourceRef() - if err != nil { - return nil, fmt.Errorf("value decoding failed: %w", err) - } - - refs = append(refs, *ref) - } - clusters := []apiv1.RuntimeCluster{} - clustersCue := v.LookupPath(cue.ParsePath(apiv1.RuntimeClustersSelector.String())) if clustersCue.Err() == nil { - iter, err := clustersCue.Fields(cue.Concrete(true)) if err != nil { return nil, err @@ -196,6 +171,25 @@ func (b *RuntimeBuilder) GetRuntime(v cue.Value) (*apiv1.Runtime, error) { } } + var refs []apiv1.RuntimeResourceRef + runtimeValuesCue := v.LookupPath(cue.ParsePath(apiv1.RuntimeValuesSelector.String())) + if runtimeValuesCue.Err() == nil { + runtimeValues := []apiv1.RuntimeValue{} + err = runtimeValuesCue.Decode(&runtimeValues) + if err != nil { + return nil, fmt.Errorf("values decoding failed: %w", err) + } + + for _, rv := range runtimeValues { + ref, err := rv.ToResourceRef() + if err != nil { + return nil, fmt.Errorf("value decoding failed: %w", err) + } + + refs = append(refs, *ref) + } + } + return &apiv1.Runtime{ Name: runtimeName, Clusters: clusters, diff --git a/internal/engine/runtime_builder_test.go b/internal/engine/runtime_builder_test.go index cbf21b9f..27740528 100644 --- a/internal/engine/runtime_builder_test.go +++ b/internal/engine/runtime_builder_test.go @@ -26,7 +26,24 @@ import ( apiv1 "github.com/stefanprodan/timoni/api/v1alpha1" ) -func TestGetRuntime(t *testing.T) { +func TestRuntimeBuilder_Minimal(t *testing.T) { + g := NewWithT(t) + ctx := cuecontext.New() + + rt := ` +runtime: { + apiVersion: "v1alpha1" + name: "minimal" +} +` + v := ctx.CompileString(rt) + builder := NewRuntimeBuilder(ctx, []string{}) + b, err := builder.GetRuntime(v) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(b.Name).To(BeEquivalentTo("minimal")) +} + +func TestRuntimeBuilder_Values(t *testing.T) { g := NewWithT(t) ctx := cuecontext.New() @@ -82,7 +99,7 @@ runtime: { g.Expect(b.Refs[2].Namespace).To(BeEmpty()) } -func TestGetRuntimeClusters(t *testing.T) { +func TestRuntimeBuilder_Clusters(t *testing.T) { g := NewWithT(t) ctx := cuecontext.New() @@ -108,7 +125,6 @@ runtime: { kubeContext: "us-west-1:production" } } - values: [] } ` v := ctx.CompileString(rt) From ec50aef8eb829c3962f58208d454f393a15db119 Mon Sep 17 00:00:00 2001 From: Stefan Prodan Date: Wed, 22 Nov 2023 21:43:12 +0200 Subject: [PATCH 18/19] Add `clusters` spec to Runtime API docs Signed-off-by: Stefan Prodan --- docs/bundle-runtime.md | 100 ++++++++++++++++++++++++++++++++++++----- 1 file changed, 89 insertions(+), 11 deletions(-) diff --git a/docs/bundle-runtime.md b/docs/bundle-runtime.md index 5db740bf..6afe3f2a 100644 --- a/docs/bundle-runtime.md +++ b/docs/bundle-runtime.md @@ -2,7 +2,7 @@ While Timoni [Bundles](bundle.md) offer a way to specify the config values in declarative manner, not all the configuration values of an application are known ahead of time. -Some values may be available at runtime, in the Kubernetes cluster where the Bundle is applied. +Some values may be available at runtime, in the Kubernetes clusters where the Bundle is applied. For example, the API token for some backend service that your app consumes is stored in a Kubernetes Secret in-cluster. When installing the application with Timoni, @@ -19,6 +19,9 @@ The following is an example of a Runtime definition that extracts values from th runtime: { apiVersion: "v1alpha1" name: "production" + clusters: { + // using the cluster set in kubeconfig current context + } values: [ { query: "k8s:v1:ConfigMap:infra:aws-info" @@ -130,7 +133,13 @@ A Runtime file must contain a definition that matches the following schema: #Runtime: { apiVersion: string name: string - values: [...#RuntimeValue] + + clusters?: [string]: { + group!: string + kubeContext!: string + } + + values?: [...#RuntimeValue] } #RuntimeValue: { @@ -150,11 +159,86 @@ Currently, the only supported value is `v1alpha1`. The `name` is a required field used to identify the Runtime. +### Clusters + +The `clusters` field is for defining the target clusters and +environments (group of clusters) where a Bundle is applied. + +A cluster entry must specify the `group` and `kubeContext` fields. +The `kubeContext` value must match a context name from the `.kube/config` file. + +!!! tip "Default cluster" + + When no clusters are defined in the Runtime, Timoni will use the + current context from the kubeconfig, unless the context is specifed + using the `--kube-context` flag. + +Example: + +```cue +runtime: { + apiVersion: "v1alpha1" + name: "fleet" + clusters: { + "preview-us-1": { + group: "staging" + kubeContext: "eks-us-west-2" + } + "prod-us-1": { + group: "production" + kubeContext: "eks-us-west-1" + } + "prod-eu-1": { + group: "production" + kubeContext: "eks-eu-west-1" + } + } +} +``` + +The clusters name and group, can be mapped to fields in a Bundle using `@timoni()` attributes. + +```cue +bundle: { + _cluster: string @timoni(runtime:string:TIMONI_CLUSTER_NAME) + _env: string @timoni(runtime:string:TIMONI_CLUSTER_GROUP) + + apiVersion: "v1alpha1" + name: "apps" + instances: { + app: { + module: url: "oci://ghcr.io/stefanprodan/modules/podinfo" + namespace: "apps" + values: { + ui: message: "Hosted by \(_cluster)" + if _env == "staging" { + replicas: 1 + } + if _env == "production" { + replicas: 2 + } + } + } + } +} +``` + +When applying the above Bundle, Timoni will deploy the app instances to all the +clusters, in the order defined in the Runtime. If the apply fails on a staging cluster, +Timoni will stop the execution and not continue with production. + ### Values -The `values` array is a required field that specifies the list of Kubernetes resources and the fields to be extracted. +The `values` array is for specifying +the list of Kubernetes resources and the fields to be extracted. -A Runtime must contain at least one value with the following required fields: +#### Query + +The `values.query` is a required field that specifies the Kubernetes resource. + +The `query` field must be in the format `k8s::::`. + +Example: ```cue runtime: { @@ -171,13 +255,7 @@ runtime: { } ``` -#### Query - -The `values.query` is a required field that specifies the Kubernetes resource. - -The `query` field must be in the format `k8s::::`. - -If the Kubernetes resource is global, the format is `k8s:::`. +If the Kubernetes resource is global, the `query` format is `k8s:::`. Example: From ef5bc720283f32b565bfb772ca106f1c0515f6d0 Mon Sep 17 00:00:00 2001 From: Stefan Prodan Date: Sat, 25 Nov 2023 15:31:47 +0200 Subject: [PATCH 19/19] Add multi-cluster deployments guide to docs Signed-off-by: Stefan Prodan --- docs/bundle-multi-cluster.md | 366 +++++++++++++++++++++++++++++++++++ docs/bundle-runtime.md | 2 + mkdocs.yml | 1 + 3 files changed, 369 insertions(+) create mode 100644 docs/bundle-multi-cluster.md diff --git a/docs/bundle-multi-cluster.md b/docs/bundle-multi-cluster.md new file mode 100644 index 00000000..79fde5f6 --- /dev/null +++ b/docs/bundle-multi-cluster.md @@ -0,0 +1,366 @@ +# Multi-cluster Deployments + +Timoni offers a declarative way of managing the app delivery across environments. +The Timoni [Runtime](bundle-runtime.md) allows defining groups of clusters where apps are being deployed. +The Timoni [Bundle](bundle.md) supports customising the app configuration based on the target +environment (group of clusters) and even for a specific cluster in a group. + +```mermaid +flowchart LR + +A((User)) --> B +B(Bundle + Runtime) --> C(((Timoni))) +C --> D(Staging) +D --> E[1. Region-A] +D --> F[2. Region-B] +C--> G(Production) +G --> H[3. Region-A] +G --> I[4. Region-B] +``` + +When applying a Bundle to multiple clusters, Timoni iterates over the clusters +in the order defined in the Runtime definition. It connects to each cluster, +deploys the app changes, runs health checks and e2e tests before moving to the next cluster. + +## Multi-clusters definitions + +### Runtime definition + +The following is an example of a Runtime definition containing a list of clusters: + +```cue +runtime: { + apiVersion: "v1alpha1" + name: "fleet" + clusters: { + "preview-eu-1": { + group: "staging" + kubeContext: "eks-eu-west-2" + } + "preview-us-1": { + group: "staging" + kubeContext: "eks-us-west-2" + } + "prod-eu-1": { + group: "production" + kubeContext: "eks-eu-west-1" + } + "prod-us-1": { + group: "production" + kubeContext: "eks-us-west-1" + } + } + values: [ + { + query: "k8s:v1:Namespace:default" + for: { + "CLUSTER_UID": "obj.metadata.uid" + } + }, + ] +} +``` + +A cluster entry is composed of: + +- The name of the cluster, used to unique identify a Kubernetes cluster. +- The group, used to denote the environment a cluster belongs to. +- The kubeContext, used to select a context from the kubeconfig file. + +!!! tip "kubeconfig" + + Note that all clusters defined in the Runtime file must have a + corresponding context in the kubeconfig file. + By default, Timoni looks for a file named `config` in the `$HOME/.kube` directory. + You can specify other kubeconfig file by setting the `KUBECONFIG` environment + variable or by setting the `--kubeconfig` flag. + +The `values` list can be used to query each cluster to extract values needed to +configure Ingress, TLS, auth, etc, during the app deployment. For more information +on how to query a cluster, please see the [runtime values doc](bundle-runtime.md#values). + +### Bundle definition + +The following is an example of a Bundle definition that uses the cluster attributes +to set the number of replicas to different values for staging and production: + +```cue +bundle: { + _cluster: { + name: string @timoni(runtime:string:TIMONI_CLUSTER_NAME) + group: string @timoni(runtime:string:TIMONI_CLUSTER_GROUP) + uid: string @timoni(runtime:string:CLUSTER_UID) + } + + apiVersion: "v1alpha1" + name: "apps" + instances: { + podinfo: { + module: url: "oci://ghcr.io/stefanprodan/modules/podinfo" + namespace: "apps" + values: { + ui: message: "Hosted by \(_cluster.name) id \(_cluster.uid)" + if _cluster.group == "staging" { + replicas: 1 + } + if _cluster.group == "production" { + replicas: 2 + } + } + } + } +} + +``` + +The cluster name and group, are mapped to fields in a Bundle using the following attributes: + +- `@timoni(runtime:string:TIMONI_CLUSTER_NAME)` +- `@timoni(runtime:string:TIMONI_CLUSTER_GROUP)` + +## Multi-cluster operations + +### Validation + +Build the Runtime definition to verify the connectivity to each cluster: + +=== "command" + + ```shell + timoni runtime build -f runtime.cue + ``` + +=== "output" + + ```text + r:fleet > c:preview-eu-1 > CLUSTER_UID: bc83fc97-3cb9-42ca-ae38-cc09501e01e3 + r:fleet > c:prod-eu-1 > CLUSTER_UID: 61fad037-bc8a-420e-a7b2-1d72fdc17e61 + ``` + +Print the Bundle variants to verify the final values used for each cluster: + +=== "command" + + ```shell + timoni bundle vet --print-value -f bundle.cue -r runtime.cue + ``` + +=== "output" + + ```cue + "preview-eu-1": bundle: { + apiVersion: "v1alpha1" + name: "apps" + instances: { + podinfo: { + module: { + url: "oci://ghcr.io/stefanprodan/modules/podinfo" + version: *"latest" | string + } + namespace: "apps" + values: { + test: { + enabled: true + } + replicas: 1 + ui: { + message: "Hosted by preview-eu-1 id bc83fc97-3cb9-42ca-ae38-cc09501e01e3" + } + } + } + } + } + "prod-eu-1": bundle: { + apiVersion: "v1alpha1" + name: "apps" + instances: { + podinfo: { + module: { + url: "oci://ghcr.io/stefanprodan/modules/podinfo" + version: *"latest" | string + } + namespace: "apps" + values: { + test: { + enabled: true + } + replicas: 2 + ui: { + message: "Hosted by prod-eu-1 id 61fad037-bc8a-420e-a7b2-1d72fdc17e61" + } + } + } + } + } + ``` + +Perform a dry-run apply of the Bundle to review the changes across clusters: + +=== "command" + + ```shell + timoni bundle apply --dry-run -f bundle.cue -r runtime.cue + ``` + +=== "output" + + ```text + b:apps > c:preview-eu-1 > applying 1 instance(s) on staging (server dry run) + b:apps > c:preview-eu-1 > i:podinfo > applying module timoni.sh/podinfo version 6.5.3 + b:apps > c:preview-eu-1 > i:podinfo > Namespace/apps created (server dry run) + b:apps > c:preview-eu-1 > i:podinfo > ServiceAccount/apps/podinfo created (server dry run) + b:apps > c:preview-eu-1 > i:podinfo > Service/apps/podinfo created (server dry run) + b:apps > c:preview-eu-1 > i:podinfo > Deployment/apps/podinfo created (server dry run) + b:apps > c:preview-eu-1 > i:podinfo > Job/apps/podinfo-test created (server dry run) + b:apps > c:preview-eu-1 > i:podinfo > applied successfully (server dry run) + b:apps > c:preview-eu-1 > applied successfully (server dry run) + b:apps > c:prod-eu-1 > applying 1 instance(s) on production (server dry run) + b:apps > c:prod-eu-1 > i:podinfo > applying module timoni.sh/podinfo version 6.5.3 + b:apps > c:prod-eu-1 > i:podinfo > Namespace/apps created (server dry run) + b:apps > c:prod-eu-1 > i:podinfo > ServiceAccount/apps/podinfo created (server dry run) + b:apps > c:prod-eu-1 > i:podinfo > Service/apps/podinfo created (server dry run) + b:apps > c:prod-eu-1 > i:podinfo > Deployment/apps/podinfo created (server dry run) + b:apps > c:prod-eu-1 > i:podinfo > Job/apps/podinfo-test created (server dry run) + b:apps > c:prod-eu-1 > i:podinfo > applied successfully (server dry run) + b:apps > c:prod-eu-1 > applied successfully (server dry run) + ``` + +### Install and Upgrade + +To install or upgrade the instances defined in the Bundle to all clusters: + +=== "command" + + ```shell + timoni bundle apply -f bundle.cue -r runtime.cue + ``` + +=== "output" + + ```text + b:apps > c:preview-eu-1 > applying 1 instance(s) on staging + b:apps > c:preview-eu-1 > i:podinfo > applying module timoni.sh/podinfo version 6.5.3 + b:apps > c:preview-eu-1 > i:podinfo > installing podinfo in namespace apps + b:apps > c:preview-eu-1 > i:podinfo > Namespace/apps created + b:apps > c:preview-eu-1 > i:podinfo > applying app + b:apps > c:preview-eu-1 > i:podinfo > ServiceAccount/apps/podinfo created + b:apps > c:preview-eu-1 > i:podinfo > Service/apps/podinfo created + b:apps > c:preview-eu-1 > i:podinfo > Deployment/apps/podinfo created + b:apps > c:preview-eu-1 > i:podinfo > app resources ready + b:apps > c:preview-eu-1 > i:podinfo > applying test + b:apps > c:preview-eu-1 > i:podinfo > Job/apps/podinfo-test created + b:apps > c:preview-eu-1 > i:podinfo > test resources ready + b:apps > c:preview-eu-1 > applied successfully in 22s + b:apps > c:prod-eu-1 > applying 1 instance(s) on production + b:apps > c:prod-eu-1 > i:podinfo > applying module timoni.sh/podinfo version 6.5.3 + b:apps > c:prod-eu-1 > i:podinfo > installing podinfo in namespace apps + b:apps > c:prod-eu-1 > i:podinfo > Namespace/apps created + b:apps > c:prod-eu-1 > i:podinfo > applying app + b:apps > c:prod-eu-1 > i:podinfo > ServiceAccount/apps/podinfo created + b:apps > c:prod-eu-1 > i:podinfo > Service/apps/podinfo created + b:apps > c:prod-eu-1 > i:podinfo > Deployment/apps/podinfo created + b:apps > c:prod-eu-1 > i:podinfo > app resources ready + b:apps > c:prod-eu-1 > i:podinfo > applying test + b:apps > c:prod-eu-1 > i:podinfo > Job/apps/podinfo-test created + b:apps > c:prod-eu-1 > i:podinfo > test resources ready + b:apps > c:prod-eu-1 > applied successfully in 44s + ``` + +Note that Timoni deploys the app instances to all the clusters, in the order +defined in the Runtime. If the apply fails on a staging cluster, +Timoni will stop the execution and not continue with production. + +After editing a bundle file, to review the changes that will +be made on all clusters: + +```shell +timoni bundle apply -f bundle.cue -r runtime.cue --dry-run --diff +``` + +### Status + +To list the current status of the deployed apps on all clusters: + +=== "command" + + ```shell + timoni bundle status -f bundle.cue -r runtime.cue + ``` + +=== "output" + + ```text + b:apps > c:preview-eu-1 > i:podinfo > last applied 2023-11-25T12:50:02Z + b:apps > c:preview-eu-1 > i:podinfo > module oci://ghcr.io/stefanprodan/modules/podinfo:6.5.3 + b:apps > c:preview-eu-1 > i:podinfo > digest sha256:54d38b407012ccfb42badf0974ba70f9ae229ecd38f17e8a1f4e7189283b924f + b:apps > c:preview-eu-1 > i:podinfo > container image ghcr.io/curl/curl-container/curl-multi:master + b:apps > c:preview-eu-1 > i:podinfo > container image ghcr.io/stefanprodan/podinfo:6.5.3 + b:apps > c:preview-eu-1 > i:podinfo > ServiceAccount/apps/podinfo Current - Resource is current + b:apps > c:preview-eu-1 > i:podinfo > Service/apps/podinfo Current - Service is ready + b:apps > c:preview-eu-1 > i:podinfo > Deployment/apps/podinfo Current - Deployment is available. Replicas: 1 + b:apps > c:preview-eu-1 > i:podinfo > Job/apps/podinfo-test Current - Job Completed. succeeded: 1/1 + b:apps > c:prod-eu-1 > i:podinfo > last applied 2023-11-25T12:50:24Z + b:apps > c:prod-eu-1 > i:podinfo > module oci://ghcr.io/stefanprodan/modules/podinfo:6.5.3 + b:apps > c:prod-eu-1 > i:podinfo > digest sha256:54d38b407012ccfb42badf0974ba70f9ae229ecd38f17e8a1f4e7189283b924f + b:apps > c:prod-eu-1 > i:podinfo > container image ghcr.io/curl/curl-container/curl-multi:master + b:apps > c:prod-eu-1 > i:podinfo > container image ghcr.io/stefanprodan/podinfo:6.5.3 + b:apps > c:prod-eu-1 > i:podinfo > ServiceAccount/apps/podinfo Current - Resource is current + b:apps > c:prod-eu-1 > i:podinfo > Service/apps/podinfo Current - Service is ready + b:apps > c:prod-eu-1 > i:podinfo > Deployment/apps/podinfo Current - Deployment is available. Replicas: 2 + b:apps > c:prod-eu-1 > i:podinfo > Job/apps/podinfo-test Current - Job Completed. succeeded: 1/1 + ``` + +Or using the bundle name: + +```shell +timoni bundle status my-bundle -r runtime.cue +``` + +### Uninstall + +To delete all deployed apps on all clusters: + +=== "command" + + ```shell + timoni bundle delete -f bundle.cue -r runtime.cue + ``` + +=== "output" + + ```text + b:apps > c:preview-eu-1 > deleting instance podinfo in namespace apps + b:apps > c:preview-eu-1 > Job/apps/podinfo-test deleted + b:apps > c:preview-eu-1 > Deployment/apps/podinfo deleted + b:apps > c:preview-eu-1 > Service/apps/podinfo deleted + b:apps > c:preview-eu-1 > ServiceAccount/apps/podinfo deleted + b:apps > c:preview-eu-1 > all resources have been deleted + b:apps > c:prod-eu-1 > deleting instance podinfo in namespace apps + b:apps > c:prod-eu-1 > Job/apps/podinfo-test deleted + b:apps > c:prod-eu-1 > Deployment/apps/podinfo deleted + b:apps > c:prod-eu-1 > Service/apps/podinfo deleted + b:apps > c:prod-eu-1 > ServiceAccount/apps/podinfo deleted + b:apps > c:prod-eu-1 > all resources have been deleted + ``` + +Or using the bundle name: + +```shell +timoni bundle delete my-bundle -r runtime.cue +``` + +### Cluster filtering + +To perform an apply only on a group of clusters: + +```shell +timoni bundle apply -f bundle.cue -r runtime.cue --runtime-group staging +``` + +To perform an apply only on a cluster: + +```shell +timoni bundle apply -f bundle.cue -r runtime.cue --runtime-cluster prod-eu-1 +``` + +Note that all `timoni bundle` commands support filtering by cluster name and group. diff --git a/docs/bundle-runtime.md b/docs/bundle-runtime.md index 6afe3f2a..fbb7d166 100644 --- a/docs/bundle-runtime.md +++ b/docs/bundle-runtime.md @@ -227,6 +227,8 @@ When applying the above Bundle, Timoni will deploy the app instances to all the clusters, in the order defined in the Runtime. If the apply fails on a staging cluster, Timoni will stop the execution and not continue with production. +For more details please see the [multi-cluster deployments guide](bundle-multi-cluster.md). + ### Values The `values` array is for specifying diff --git a/mkdocs.yml b/mkdocs.yml index 789e58f9..6c0506f7 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -95,6 +95,7 @@ nav: - Bundle runtime: bundle-runtime.md - Bundle distribution: bundle-distribution.md - Bundle secrets injection: bundle-secrets.md + - Multi-cluster deployments: bundle-multi-cluster.md - Module Development: - Module structure: module.md - Module distribution: module-distribution.md