diff --git a/api/v1alpha1/runtime.go b/api/v1alpha1/runtime.go index 2d377c16..e5bb1a28 100644 --- a/api/v1alpha1/runtime.go +++ b/api/v1alpha1/runtime.go @@ -27,6 +27,9 @@ const ( // RuntimeKind is the name of the Timoni runtime CUE attributes. RuntimeKind string = "runtime" + // RuntimeDefaultName is the name of the default Timoni runtime. + RuntimeDefaultName string = "_default" + // RuntimeDelimiter is the delimiter used in Timoni runtime CUE attributes. RuntimeDelimiter string = ":" @@ -36,6 +39,9 @@ const ( // RuntimeName is the CUE path for the Timoni's bundle name. RuntimeName Selector = "runtime.name" + // RuntimeClustersSelector is the CUE path for the Timoni's runtime clusters. + RuntimeClustersSelector Selector = "runtime.clusters" + // RuntimeValuesSelector is the CUE path for the Timoni's runtime values. RuntimeValuesSelector Selector = "runtime.values" ) @@ -53,7 +59,13 @@ import "strings" #Runtime: { apiVersion: string & =~"^v1alpha1$" name: string & =~"^(([A-Za-z0-9][-A-Za-z0-9_]*)?[A-Za-z0-9])?$" & strings.MaxRunes(63) & strings.MinRunes(1) - values: [...#RuntimeValue] + + clusters?: [string & =~"^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$" & strings.MaxRunes(63) & strings.MinRunes(1)]: { + group!: string + kubeContext!: string + } + + values?: [...#RuntimeValue] } ` @@ -99,10 +111,76 @@ type Runtime struct { // Name of the runtime. Name string `json:"name"` + // Clusters is the list of Kubernetes + // clusters belonging to this runtime. + Clusters []RuntimeCluster `json:"clusters"` + // Refs is the list of in-cluster resource references. Refs []RuntimeResourceRef `json:"refs"` } +// DefaultRuntime returns an empty Runtime with an unnamed +// cluster set to the specified context. +func DefaultRuntime(kubeContext string) *Runtime { + defaultCluster := RuntimeCluster{ + Name: RuntimeDefaultName, + Group: RuntimeDefaultName, + KubeContext: kubeContext, + } + + return &Runtime{ + Name: RuntimeDefaultName, + Clusters: []RuntimeCluster{defaultCluster}, + Refs: []RuntimeResourceRef{}, + } +} + +// RuntimeCluster holds the reference to a Kubernetes cluster. +type RuntimeCluster struct { + // Name of the cluster. + Name string `json:"name"` + + // Group name of the cluster. + Group string `json:"group"` + + // KubeContext is the name of kubeconfig context for this cluster. + KubeContext string `json:"kubeContext"` +} + +// IsDefault returns true if the given cluster +// was initialised by a Runtime with no target clusters. +func (rt *RuntimeCluster) IsDefault() bool { + return rt.Name == RuntimeDefaultName +} + +// NameGroupValues returns the cluster name and group variables +// as specified in the Runtime definition. If the given cluster +// was initialised by an empty Runtime, the returned map is empty. +func (rt *RuntimeCluster) NameGroupValues() map[string]string { + result := make(map[string]string) + if !rt.IsDefault() { + result["TIMONI_CLUSTER_NAME"] = rt.Name + result["TIMONI_CLUSTER_GROUP"] = rt.Group + } + return result +} + +// SelectClusters returns the clusters matching the specified name and group. +// Both the name and group support the '*' wildcard. +func (r *Runtime) SelectClusters(name, group string) []RuntimeCluster { + var result []RuntimeCluster + for _, cluster := range r.Clusters { + if name != "" && name != "*" && !strings.EqualFold(cluster.Name, name) { + continue + } + if group != "" && group != "*" && !strings.EqualFold(cluster.Group, group) { + continue + } + result = append(result, cluster) + } + return result +} + // RuntimeResourceRef holds the data needed to query the fields // of a Kubernetes resource using CUE expressions. type RuntimeResourceRef struct { diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 35a26eca..7543cc9b 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -133,6 +133,11 @@ func (in *ResourceRef) DeepCopy() *ResourceRef { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Runtime) DeepCopyInto(out *Runtime) { *out = *in + if in.Clusters != nil { + in, out := &in.Clusters, &out.Clusters + *out = make([]RuntimeCluster, len(*in)) + copy(*out, *in) + } if in.Refs != nil { in, out := &in.Refs, &out.Refs *out = make([]RuntimeResourceRef, len(*in)) @@ -167,6 +172,21 @@ func (in *RuntimeAttribute) DeepCopy() *RuntimeAttribute { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeCluster) DeepCopyInto(out *RuntimeCluster) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeCluster. +func (in *RuntimeCluster) DeepCopy() *RuntimeCluster { + if in == nil { + return nil + } + out := new(RuntimeCluster) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RuntimeResourceRef) DeepCopyInto(out *RuntimeResourceRef) { *out = *in diff --git a/cmd/timoni/bundle.go b/cmd/timoni/bundle.go index fdf3bc84..f92c1dd0 100644 --- a/cmd/timoni/bundle.go +++ b/cmd/timoni/bundle.go @@ -20,11 +20,28 @@ import ( "github.com/spf13/cobra" ) +type bundleFlags struct { + runtimeFromEnv bool + runtimeFiles []string + runtimeCluster string + runtimeClusterGroup string +} + +var bundleArgs bundleFlags + var bundleCmd = &cobra.Command{ Use: "bundle", Short: "Commands for managing bundles", } func init() { + bundleCmd.PersistentFlags().BoolVar(&bundleArgs.runtimeFromEnv, "runtime-from-env", false, + "Inject runtime values from the environment.") + bundleCmd.PersistentFlags().StringSliceVarP(&bundleArgs.runtimeFiles, "runtime", "r", nil, + "The local path to runtime.cue files.") + bundleCmd.PersistentFlags().StringVar(&bundleArgs.runtimeCluster, "runtime-cluster", "*", + "Filter runtime cluster by name.") + bundleCmd.PersistentFlags().StringVar(&bundleArgs.runtimeClusterGroup, "runtime-group", "*", + "Filter runtime clusters by group.") rootCmd.AddCommand(bundleCmd) } diff --git a/cmd/timoni/bundle_apply.go b/cmd/timoni/bundle_apply.go index ff0fd6a1..4a667430 100644 --- a/cmd/timoni/bundle_apply.go +++ b/cmd/timoni/bundle_apply.go @@ -71,8 +71,6 @@ type bundleApplyFlags struct { wait bool force bool overwriteOwnership bool - runtimeFromEnv bool - runtimeFiles []string creds flags.Credentials } @@ -92,10 +90,6 @@ func init() { "Perform a server-side apply dry run and prints the diff.") bundleApplyCmd.Flags().BoolVar(&bundleApplyArgs.wait, "wait", true, "Wait for the applied Kubernetes objects to become ready.") - bundleApplyCmd.Flags().StringSliceVarP(&bundleApplyArgs.runtimeFiles, "runtime", "r", nil, - "The local path to runtime.cue files.") - bundleApplyCmd.Flags().BoolVar(&bundleApplyArgs.runtimeFromEnv, "runtime-from-env", false, - "Inject runtime values from the environment.") bundleApplyCmd.Flags().Var(&bundleApplyArgs.creds, bundleApplyArgs.creds.Type(), bundleApplyArgs.creds.Description()) bundleCmd.AddCommand(bundleApplyCmd) } @@ -135,92 +129,115 @@ func runBundleApplyCmd(cmd *cobra.Command, _ []string) error { runtimeValues := make(map[string]string) - if bundleApplyArgs.runtimeFromEnv { + if bundleArgs.runtimeFromEnv { maps.Copy(runtimeValues, engine.GetEnv()) } - if len(bundleApplyArgs.runtimeFiles) > 0 { - rt, err := buildRuntime(bundleApplyArgs.runtimeFiles) - if err != nil { - return err - } + rt, err := buildRuntime(bundleArgs.runtimeFiles) + if err != nil { + return err + } + + clusters := rt.SelectClusters(bundleArgs.runtimeCluster, bundleArgs.runtimeClusterGroup) + if len(clusters) == 0 { + return fmt.Errorf("no cluster found") + } + ctxPull, cancel := context.WithTimeout(ctx, rootArgs.timeout) + defer cancel() + + for _, cluster := range clusters { + kubeconfigArgs.Context = &cluster.KubeContext + + clusterValues := make(map[string]string) + + // add values from env + maps.Copy(clusterValues, runtimeValues) + + // add values from cluster rm, err := runtime.NewResourceManager(kubeconfigArgs) if err != nil { return err } - reader := runtime.NewResourceReader(rm) rv, err := reader.Read(ctx, rt.Refs) if err != nil { return err } + maps.Copy(clusterValues, rv) - maps.Copy(runtimeValues, rv) - } + // add cluster info + maps.Copy(clusterValues, cluster.NameGroupValues()) - if err := bm.InitWorkspace(tmpDir, runtimeValues); err != nil { - return err - } - - v, err := bm.Build() - if err != nil { - return describeErr(tmpDir, "failed to build bundle", err) - } + // create cluster workspace + workspace := path.Join(tmpDir, cluster.Name) + if err := os.MkdirAll(workspace, os.ModePerm); err != nil { + return err + } - bundle, err := bm.GetBundle(v) - if err != nil { - return err - } + if err := bm.InitWorkspace(workspace, clusterValues); err != nil { + return describeErr(workspace, "failed to parse bundle", err) + } - log := LoggerBundle(cmd.Context(), bundle.Name) + v, err := bm.Build() + if err != nil { + return describeErr(tmpDir, "failed to build bundle", err) + } - if !bundleApplyArgs.overwriteOwnership { - err = bundleInstancesOwnershipConflicts(bundle.Instances) + bundle, err := bm.GetBundle(v) if err != nil { return err } - } - ctxPull, cancel := context.WithTimeout(ctx, rootArgs.timeout) - defer cancel() + log := LoggerBundle(cmd.Context(), bundle.Name, cluster.Name) - for _, instance := range bundle.Instances { - spin := StartSpinner(fmt.Sprintf("pulling %s", instance.Module.Repository)) - pullErr := fetchBundleInstanceModule(ctxPull, instance, tmpDir) - spin.Stop() - if pullErr != nil { - return pullErr + if !bundleApplyArgs.overwriteOwnership { + err = bundleInstancesOwnershipConflicts(bundle.Instances) + if err != nil { + return err + } } - } - kubeVersion, err := runtime.ServerVersion(kubeconfigArgs) - if err != nil { - return err - } - - if bundleApplyArgs.dryrun || bundleApplyArgs.diff { - log.Info(fmt.Sprintf("applying %v instance(s) %s", - len(bundle.Instances), colorizeDryRun("(server dry run)"))) - } else { - log.Info(fmt.Sprintf("applying %v instance(s)", - len(bundle.Instances))) - } + for _, instance := range bundle.Instances { + spin := StartSpinner(fmt.Sprintf("pulling %s", instance.Module.Repository)) + pullErr := fetchBundleInstanceModule(ctxPull, instance, tmpDir) + spin.Stop() + if pullErr != nil { + return pullErr + } + } - for _, instance := range bundle.Instances { - if err := applyBundleInstance(logr.NewContext(ctx, log), cuectx, instance, kubeVersion, tmpDir); err != nil { + kubeVersion, err := runtime.ServerVersion(kubeconfigArgs) + if err != nil { return err } - } - elapsed := time.Since(start) - if bundleApplyArgs.dryrun || bundleApplyArgs.diff { - log.Info(fmt.Sprintf("applied successfully %s", - colorizeDryRun("(server dry run)"))) - } else { - log.Info(fmt.Sprintf("applied successfully in %s", elapsed.Round(time.Second))) - } + startMsg := fmt.Sprintf("applying %v instance(s)", len(bundle.Instances)) + if !cluster.IsDefault() { + startMsg = fmt.Sprintf("%s on %s", startMsg, colorizeSubject(cluster.Group)) + } + + if bundleApplyArgs.dryrun || bundleApplyArgs.diff { + log.Info(fmt.Sprintf("%s %s", startMsg, colorizeDryRun("(server dry run)"))) + } else { + log.Info(startMsg) + } + for _, instance := range bundle.Instances { + instance.Cluster = cluster.Name + if err := applyBundleInstance(logr.NewContext(ctx, log), cuectx, instance, kubeVersion, tmpDir); err != nil { + return err + } + } + + elapsed := time.Since(start) + if bundleApplyArgs.dryrun || bundleApplyArgs.diff { + log.Info(fmt.Sprintf("applied successfully %s", + colorizeDryRun("(server dry run)"))) + } else { + log.Info(fmt.Sprintf("applied successfully in %s", elapsed.Round(time.Second))) + } + } return nil } @@ -258,7 +275,7 @@ func fetchBundleInstanceModule(ctx context.Context, instance *engine.BundleInsta } func applyBundleInstance(ctx context.Context, cuectx *cue.Context, instance *engine.BundleInstance, kubeVersion string, rootDir string) error { - log := LoggerBundleInstance(ctx, instance.Bundle, instance.Name) + log := LoggerBundleInstance(ctx, instance.Bundle, instance.Cluster, instance.Name) modDir := path.Join(rootDir, instance.Name, "module") builder := engine.NewModuleBuilder( @@ -408,7 +425,7 @@ func applyBundleInstance(ctx context.Context, cuectx *cue.Context, instance *eng if err != nil { return err } - log.Info("resources are ready") + log.Info(fmt.Sprintf("%s resources %s", set.Name, colorizeReady("ready"))) } } @@ -439,10 +456,8 @@ func applyBundleInstance(ctx context.Context, cuectx *cue.Context, instance *eng err = rm.WaitForTermination(deletedObjects, waitOptions) spin.Stop() if err != nil { - return fmt.Errorf("wating for termination failed: %w", err) + return fmt.Errorf("waiting for termination failed: %w", err) } - - log.Info("all resources are ready") } } diff --git a/cmd/timoni/bundle_apply_test.go b/cmd/timoni/bundle_apply_test.go index da8169b4..272ea702 100644 --- a/cmd/timoni/bundle_apply_test.go +++ b/cmd/timoni/bundle_apply_test.go @@ -443,6 +443,8 @@ bundle: { values: client: enabled: true @timoni(runtime:bool:CLIENT) values: server: enabled: false @timoni(runtime:bool:ENABLED) values: domain: string @timoni(runtime:string:DOMAIN) + values: metadata: labels: "cluster": string @timoni(runtime:string:TIMONI_CLUSTER_NAME) + values: metadata: labels: "env": string @timoni(runtime:string:TIMONI_CLUSTER_GROUP) } } } @@ -452,6 +454,12 @@ bundle: { runtime: { apiVersion: "v1alpha1" name: "test" + clusters: { + "test": { + group: "testing" + kubeContext: "envtest" + } + } values: [ { query: "k8s:v1:Secret:%[1]s:%[2]s" @@ -518,6 +526,8 @@ runtime: { err = envTestClient.Get(context.Background(), client.ObjectKeyFromObject(scm), scm) g.Expect(err).ToNot(HaveOccurred()) g.Expect(scm.Data["hostname"]).To(BeEquivalentTo("test.local")) + g.Expect(scm.GetLabels()).To(HaveKeyWithValue("cluster", "test")) + g.Expect(scm.GetLabels()).To(HaveKeyWithValue("env", "testing")) }) t.Run("overrides env vars", func(t *testing.T) { @@ -557,4 +567,28 @@ runtime: { g.Expect(err).To(HaveOccurred()) g.Expect(apierrors.IsNotFound(err)).To(BeTrue()) }) + + t.Run("fails for wrong cluster name selector", func(t *testing.T) { + g := NewWithT(t) + + cmd := fmt.Sprintf("bundle apply -p main --wait -f- -r=%s --runtime-cluster=prod", + runtimePath, + ) + + _, err := executeCommandWithIn(cmd, strings.NewReader(bundleData)) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("no cluster found")) + }) + + t.Run("fails for wrong cluster group selector", func(t *testing.T) { + g := NewWithT(t) + + cmd := fmt.Sprintf("bundle apply -p main --wait -f- -r=%s --runtime-group=prod", + runtimePath, + ) + + _, err := executeCommandWithIn(cmd, strings.NewReader(bundleData)) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("no cluster found")) + }) } diff --git a/cmd/timoni/bundle_build.go b/cmd/timoni/bundle_build.go index b0621b64..3276959c 100644 --- a/cmd/timoni/bundle_build.go +++ b/cmd/timoni/bundle_build.go @@ -55,11 +55,9 @@ var bundleBuildCmd = &cobra.Command{ } type bundleBuildFlags struct { - pkg flags.Package - files []string - creds flags.Credentials - runtimeFromEnv bool - runtimeFiles []string + pkg flags.Package + files []string + creds flags.Credentials } var bundleBuildArgs bundleBuildFlags @@ -68,10 +66,6 @@ func init() { bundleBuildCmd.Flags().VarP(&bundleBuildArgs.pkg, bundleBuildArgs.pkg.Type(), bundleBuildArgs.pkg.Shorthand(), bundleBuildArgs.pkg.Description()) bundleBuildCmd.Flags().StringSliceVarP(&bundleBuildArgs.files, "file", "f", nil, "The local path to bundle.cue files.") - bundleBuildCmd.Flags().BoolVar(&bundleBuildArgs.runtimeFromEnv, "runtime-from-env", false, - "Inject runtime values from the environment.") - bundleBuildCmd.Flags().StringSliceVarP(&bundleBuildArgs.runtimeFiles, "runtime", "r", nil, - "The local path to runtime.cue files.") bundleBuildCmd.Flags().Var(&bundleBuildArgs.creds, bundleBuildArgs.creds.Type(), bundleBuildArgs.creds.Description()) bundleCmd.AddCommand(bundleBuildCmd) } @@ -107,19 +101,30 @@ func runBundleBuildCmd(cmd *cobra.Command, _ []string) error { runtimeValues := make(map[string]string) - if bundleBuildArgs.runtimeFromEnv { + if bundleArgs.runtimeFromEnv { maps.Copy(runtimeValues, engine.GetEnv()) } - if len(bundleBuildArgs.runtimeFiles) > 0 { + if len(bundleArgs.runtimeFiles) > 0 { kctx, cancel := context.WithTimeout(cmd.Context(), rootArgs.timeout) defer cancel() - rt, err := buildRuntime(bundleBuildArgs.runtimeFiles) + rt, err := buildRuntime(bundleArgs.runtimeFiles) if err != nil { return err } + clusters := rt.SelectClusters(bundleArgs.runtimeCluster, bundleArgs.runtimeClusterGroup) + if len(clusters) > 1 { + return fmt.Errorf("you must select a cluster with --runtime-cluster") + } + if len(clusters) == 0 { + return fmt.Errorf("no cluster found") + } + + cluster := clusters[0] + kubeconfigArgs.Context = &cluster.KubeContext + rm, err := runtime.NewResourceManager(kubeconfigArgs) if err != nil { return err @@ -132,6 +137,7 @@ func runBundleBuildCmd(cmd *cobra.Command, _ []string) error { } maps.Copy(runtimeValues, rv) + maps.Copy(runtimeValues, cluster.NameGroupValues()) } if err := bm.InitWorkspace(tmpDir, runtimeValues); err != nil { diff --git a/cmd/timoni/bundle_build_test.go b/cmd/timoni/bundle_build_test.go index 87586a76..aca6ddf8 100644 --- a/cmd/timoni/bundle_build_test.go +++ b/cmd/timoni/bundle_build_test.go @@ -135,12 +135,89 @@ bundle: g.Expect(err).ToNot(HaveOccurred()) g.Expect(found).To(BeTrue()) g.Expect(host).To(ContainSubstring("example.internal")) - }) } }) } +func Test_BundleBuild_Runtime(t *testing.T) { + g := NewWithT(t) + + bundleName := rnd("my-bundle", 5) + modPath := "testdata/module" + namespace := rnd("my-namespace", 5) + modName := rnd("my-mod", 5) + modURL := fmt.Sprintf("%s/%s", dockerRegistry, modName) + modVer := "1.0.0" + + _, err := executeCommand(fmt.Sprintf( + "mod push %s oci://%s -v %s", + modPath, + modURL, + modVer, + )) + g.Expect(err).ToNot(HaveOccurred()) + + bundleData := fmt.Sprintf(` +bundle: { + _cluster: string @timoni(runtime:string:TIMONI_CLUSTER_NAME) + + apiVersion: "v1alpha1" + name: "%[1]s" + instances: { + "\(_cluster)-app": { + module: { + url: "oci://%[2]s" + version: "%[3]s" + } + namespace: "%[4]s" + } + } +} +`, bundleName, modURL, modVer, namespace) + + runtimeCue := ` +runtime: { + apiVersion: "v1alpha1" + name: "fleet-test" + clusters: { + "staging": { + group: "staging" + kubeContext: "envtest" + } + "production": { + group: "production" + kubeContext: "envtest" + } + } + values: [] +} +` + + runtimePath := filepath.Join(t.TempDir(), "runtime.cue") + g.Expect(os.WriteFile(runtimePath, []byte(runtimeCue), 0644)).ToNot(HaveOccurred()) + + t.Run("fails for multiple clusters", func(t *testing.T) { + g := NewWithT(t) + _, err = executeCommandWithIn( + fmt.Sprintf("bundle build -f- -r %s -p main", runtimePath), + strings.NewReader(bundleData)) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("select a cluster")) + }) + + t.Run("builds for a single cluster", func(t *testing.T) { + g := NewWithT(t) + + output, err := executeCommandWithIn( + fmt.Sprintf("bundle build -f- -r %s -p main --runtime-group=production", runtimePath), + strings.NewReader(bundleData)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(output).ToNot(ContainSubstring("staging-app")) + g.Expect(output).To(ContainSubstring("production-app")) + }) +} + func getObjectByName(objs []*unstructured.Unstructured, name string) (*unstructured.Unstructured, error) { for _, obj := range objs { if obj.GetName() == name { diff --git a/cmd/timoni/bundle_delete.go b/cmd/timoni/bundle_delete.go index 63111e86..2b5c8a4e 100644 --- a/cmd/timoni/bundle_delete.go +++ b/cmd/timoni/bundle_delete.go @@ -92,44 +92,60 @@ func runBundleDelCmd(cmd *cobra.Command, args []string) error { bundleDelArgs.name = args[0] } - ctx, cancel := context.WithTimeout(cmd.Context(), rootArgs.timeout) - defer cancel() - - sm, err := runtime.NewResourceManager(kubeconfigArgs) + rt, err := buildRuntime(bundleArgs.runtimeFiles) if err != nil { return err } - log := LoggerBundle(ctx, bundleDelArgs.name) - iStorage := runtime.NewStorageManager(sm) - - instances, err := iStorage.List(ctx, "", bundleDelArgs.name) - if err != nil { - return err + clusters := rt.SelectClusters(bundleArgs.runtimeCluster, bundleArgs.runtimeClusterGroup) + if len(clusters) == 0 { + return fmt.Errorf("no cluster found") } - if len(instances) == 0 { - return fmt.Errorf("no instances found in bundle") - } + ctx, cancel := context.WithTimeout(context.Background(), rootArgs.timeout) + defer cancel() + + for _, cluster := range clusters { + kubeconfigArgs.Context = &cluster.KubeContext - // delete in revers order (last installed, first to uninstall) - for index := len(instances) - 1; index >= 0; index-- { - instance := instances[index] - log.Info(fmt.Sprintf("deleting instance %s in namespace %s", - colorizeSubject(instance.Name), colorizeSubject(instance.Namespace))) - if err := deleteBundleInstance(ctx, &engine.BundleInstance{ - Bundle: bundleDelArgs.name, - Name: instance.Name, - Namespace: instance.Namespace, - }, bundleDelArgs.wait, bundleDelArgs.dryrun); err != nil { + rm, err := runtime.NewResourceManager(kubeconfigArgs) + if err != nil { + return err + } + + sm := runtime.NewStorageManager(rm) + instances, err := sm.List(ctx, "", bundleDelArgs.name) + if err != nil { return err } + + log := LoggerBundle(ctx, bundleDelArgs.name, cluster.Name) + + if len(instances) == 0 { + log.Error(nil, "no instances found in bundle") + continue + } + + // delete in revers order (last installed, first to uninstall) + for index := len(instances) - 1; index >= 0; index-- { + instance := instances[index] + log.Info(fmt.Sprintf("deleting instance %s in namespace %s", + colorizeSubject(instance.Name), colorizeSubject(instance.Namespace))) + if err := deleteBundleInstance(ctx, &engine.BundleInstance{ + Bundle: bundleDelArgs.name, + Cluster: cluster.Name, + Name: instance.Name, + Namespace: instance.Namespace, + }, bundleDelArgs.wait, bundleDelArgs.dryrun); err != nil { + return err + } + } } return nil } func deleteBundleInstance(ctx context.Context, instance *engine.BundleInstance, wait bool, dryrun bool) error { - log := LoggerBundle(ctx, instance.Bundle) + log := LoggerBundle(ctx, instance.Bundle, instance.Cluster) sm, err := runtime.NewResourceManager(kubeconfigArgs) if err != nil { diff --git a/cmd/timoni/bundle_delete_test.go b/cmd/timoni/bundle_delete_test.go index f930f6f6..9053ced3 100644 --- a/cmd/timoni/bundle_delete_test.go +++ b/cmd/timoni/bundle_delete_test.go @@ -19,6 +19,8 @@ package main import ( "context" "fmt" + "os" + "path/filepath" "strings" "testing" @@ -180,3 +182,100 @@ bundle: { g.Expect(errors.IsNotFound(err)).To(BeTrue()) }) } + +func Test_BundleDelete_Runtime(t *testing.T) { + g := NewWithT(t) + + bundleName := rnd("my-bundle", 5) + modPath := "testdata/module" + namespace := rnd("my-namespace", 5) + modName := rnd("my-mod", 5) + modURL := fmt.Sprintf("%s/%s", dockerRegistry, modName) + modVer := "1.0.0" + + _, err := executeCommand(fmt.Sprintf( + "mod push %s oci://%s -v %s", + modPath, + modURL, + modVer, + )) + g.Expect(err).ToNot(HaveOccurred()) + + bundleData := fmt.Sprintf(` +bundle: { + _cluster: string @timoni(runtime:string:TIMONI_CLUSTER_NAME) + + apiVersion: "v1alpha1" + name: "%[1]s" + instances: { + "\(_cluster)-app": { + module: { + url: "oci://%[2]s" + version: "%[3]s" + } + namespace: "%[4]s" + } + } +} +`, bundleName, modURL, modVer, namespace) + + runtimeCue := ` +runtime: { + apiVersion: "v1alpha1" + name: "fleet-test" + clusters: { + "staging": { + group: "staging" + kubeContext: "envtest" + } + "production": { + group: "production" + kubeContext: "envtest" + } + } +} +` + + runtimePath := filepath.Join(t.TempDir(), "runtime.cue") + g.Expect(os.WriteFile(runtimePath, []byte(runtimeCue), 0644)).ToNot(HaveOccurred()) + + _, err = executeCommandWithIn( + fmt.Sprintf("bundle apply -f- -r %s -p main --wait", runtimePath), + strings.NewReader(bundleData)) + g.Expect(err).ToNot(HaveOccurred()) + + stagingCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "staging-app-server", + Namespace: namespace, + }, + } + + productionCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "production-app-server", + Namespace: namespace, + }, + } + + err = envTestClient.Get(context.Background(), client.ObjectKeyFromObject(stagingCM), stagingCM) + g.Expect(err).ToNot(HaveOccurred()) + + err = envTestClient.Get(context.Background(), client.ObjectKeyFromObject(productionCM), productionCM) + g.Expect(err).ToNot(HaveOccurred()) + + t.Run("deletes instances across clusters", func(t *testing.T) { + g := NewWithT(t) + + output, err := executeCommand(fmt.Sprintf("bundle delete %s -r %s --wait", bundleName, runtimePath)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(output).To(ContainSubstring("staging-app")) + g.Expect(output).To(ContainSubstring("production-app")) + + err = envTestClient.Get(context.Background(), client.ObjectKeyFromObject(stagingCM), stagingCM) + g.Expect(errors.IsNotFound(err)).To(BeTrue()) + + err = envTestClient.Get(context.Background(), client.ObjectKeyFromObject(productionCM), productionCM) + g.Expect(errors.IsNotFound(err)).To(BeTrue()) + }) +} diff --git a/cmd/timoni/bundle_status.go b/cmd/timoni/bundle_status.go index ebed7d03..13e101b3 100644 --- a/cmd/timoni/bundle_status.go +++ b/cmd/timoni/bundle_status.go @@ -75,65 +75,90 @@ func runBundleStatusCmd(cmd *cobra.Command, args []string) error { bundleStatusArgs.name = args[0] } - rm, err := runtime.NewResourceManager(kubeconfigArgs) + rt, err := buildRuntime(bundleArgs.runtimeFiles) if err != nil { return err } + clusters := rt.SelectClusters(bundleArgs.runtimeCluster, bundleArgs.runtimeClusterGroup) + if len(clusters) == 0 { + return fmt.Errorf("no cluster found") + } + ctx, cancel := context.WithTimeout(context.Background(), rootArgs.timeout) defer cancel() - sm := runtime.NewStorageManager(rm) - instances, err := sm.List(ctx, "", bundleStatusArgs.name) - if err != nil { - return err - } + failed := false + for _, cluster := range clusters { + kubeconfigArgs.Context = &cluster.KubeContext - if len(instances) == 0 { - return fmt.Errorf("no instances found in bundle") - } + rm, err := runtime.NewResourceManager(kubeconfigArgs) + if err != nil { + return err + } - for _, instance := range instances { - log := LoggerBundleInstance(ctx, bundleStatusArgs.name, instance.Name) + sm := runtime.NewStorageManager(rm) + instances, err := sm.List(ctx, "", bundleStatusArgs.name) + if err != nil { + return err + } - log.Info(fmt.Sprintf("last applied %s", - colorizeSubject(instance.LastTransitionTime))) - log.Info(fmt.Sprintf("module %s", - colorizeSubject(instance.Module.Repository+":"+instance.Module.Version))) - log.Info(fmt.Sprintf("digest %s", - colorizeSubject(instance.Module.Digest))) + log := LoggerBundle(ctx, bundleStatusArgs.name, cluster.Name) - for _, image := range instance.Images { - log.Info(fmt.Sprintf("container image %s", - colorizeSubject(image))) + if len(instances) == 0 { + log.Error(nil, "no instances found in bundle") + failed = true + continue } - im := runtime.InstanceManager{Instance: apiv1.Instance{Inventory: instance.Inventory}} + for _, instance := range instances { + log := LoggerBundleInstance(ctx, bundleStatusArgs.name, cluster.Name, instance.Name) - objects, err := im.ListObjects() - if err != nil { - return err - } + log.Info(fmt.Sprintf("last applied %s", + colorizeSubject(instance.LastTransitionTime))) + log.Info(fmt.Sprintf("module %s", + colorizeSubject(instance.Module.Repository+":"+instance.Module.Version))) + log.Info(fmt.Sprintf("digest %s", + colorizeSubject(instance.Module.Digest))) + + for _, image := range instance.Images { + log.Info(fmt.Sprintf("container image %s", + colorizeSubject(image))) + } - for _, obj := range objects { - err = rm.Client().Get(ctx, client.ObjectKeyFromObject(obj), obj) + im := runtime.InstanceManager{Instance: apiv1.Instance{Inventory: instance.Inventory}} + + objects, err := im.ListObjects() if err != nil { - if apierrors.IsNotFound(err) { - log.Error(err, colorizeJoin(obj, errors.New("NotFound"))) + return err + } + + for _, obj := range objects { + err = rm.Client().Get(ctx, client.ObjectKeyFromObject(obj), obj) + if err != nil { + if apierrors.IsNotFound(err) { + log.Error(err, colorizeJoin(obj, errors.New("NotFound"))) + failed = true + + continue + } + log.Error(err, colorizeJoin(obj, errors.New("Unknown"))) + failed = true continue } - log.Error(err, colorizeJoin(obj, errors.New("Unknown"))) - continue - } - res, err := status.Compute(obj) - if err != nil { - log.Error(err, colorizeJoin(obj, errors.New("Failed"))) - continue + res, err := status.Compute(obj) + if err != nil { + log.Error(err, colorizeJoin(obj, errors.New("Failed"))) + failed = true + continue + } + log.Info(colorizeJoin(obj, res.Status, "-", res.Message)) } - log.Info(colorizeJoin(obj, res.Status, "-", res.Message)) } } - + if failed { + return fmt.Errorf("completed with errors") + } return nil } diff --git a/cmd/timoni/bundle_status_test.go b/cmd/timoni/bundle_status_test.go index be3c8be8..5833a951 100644 --- a/cmd/timoni/bundle_status_test.go +++ b/cmd/timoni/bundle_status_test.go @@ -19,6 +19,8 @@ package main import ( "context" "fmt" + "os" + "path/filepath" "strings" "testing" @@ -108,7 +110,7 @@ bundle: { g.Expect(err).ToNot(HaveOccurred()) output, err := executeCommand(fmt.Sprintf("bundle status %s", bundleName)) - g.Expect(err).ToNot(HaveOccurred()) + g.Expect(err).To(HaveOccurred()) g.Expect(output).To(ContainSubstring(fmt.Sprintf("ConfigMap/%s/frontend-client Current", namespace))) g.Expect(output).To(ContainSubstring(fmt.Sprintf("ConfigMap/%s/backend-server NotFound", namespace))) }) @@ -171,3 +173,74 @@ bundle: { g.Expect(output).ToNot(ContainSubstring("timoni:latest-dev@sha")) }) } + +func Test_BundleStatus_Runtime(t *testing.T) { + g := NewWithT(t) + + bundleName := rnd("my-bundle", 5) + modPath := "testdata/module" + namespace := rnd("my-namespace", 5) + modName := rnd("my-mod", 5) + modURL := fmt.Sprintf("%s/%s", dockerRegistry, modName) + modVer := "1.0.0" + + _, err := executeCommand(fmt.Sprintf( + "mod push %s oci://%s -v %s", + modPath, + modURL, + modVer, + )) + g.Expect(err).ToNot(HaveOccurred()) + + bundleData := fmt.Sprintf(` +bundle: { + _cluster: string @timoni(runtime:string:TIMONI_CLUSTER_NAME) + + apiVersion: "v1alpha1" + name: "%[1]s" + instances: { + "\(_cluster)-app": { + module: { + url: "oci://%[2]s" + version: "%[3]s" + } + namespace: "%[4]s" + } + } +} +`, bundleName, modURL, modVer, namespace) + + runtimeCue := ` +runtime: { + apiVersion: "v1alpha1" + name: "fleet-test" + clusters: { + "staging": { + group: "staging" + kubeContext: "envtest" + } + "production": { + group: "production" + kubeContext: "envtest" + } + } +} +` + + runtimePath := filepath.Join(t.TempDir(), "runtime.cue") + g.Expect(os.WriteFile(runtimePath, []byte(runtimeCue), 0644)).ToNot(HaveOccurred()) + + _, err = executeCommandWithIn( + fmt.Sprintf("bundle apply -f- -r %s -p main --wait", runtimePath), + strings.NewReader(bundleData)) + g.Expect(err).ToNot(HaveOccurred()) + + t.Run("lists instances across clusters", func(t *testing.T) { + g := NewWithT(t) + + output, err := executeCommand(fmt.Sprintf("bundle status %s -r %s", bundleName, runtimePath)) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(output).To(ContainSubstring("staging-app")) + g.Expect(output).To(ContainSubstring("production-app")) + }) +} diff --git a/cmd/timoni/bundle_vet.go b/cmd/timoni/bundle_vet.go index 849b262b..0ef5760e 100644 --- a/cmd/timoni/bundle_vet.go +++ b/cmd/timoni/bundle_vet.go @@ -21,6 +21,7 @@ import ( "fmt" "maps" "os" + "path" "cuelang.org/go/cue" "cuelang.org/go/cue/cuecontext" @@ -60,11 +61,9 @@ with Timoni's schema and optionally prints the computed value. } type bundleVetFlags struct { - pkg flags.Package - files []string - runtimeFromEnv bool - runtimeFiles []string - printValue bool + pkg flags.Package + files []string + printValue bool } var bundleVetArgs bundleVetFlags @@ -73,10 +72,6 @@ func init() { bundleVetCmd.Flags().VarP(&bundleVetArgs.pkg, bundleVetArgs.pkg.Type(), bundleVetArgs.pkg.Shorthand(), bundleVetArgs.pkg.Description()) bundleVetCmd.Flags().StringSliceVarP(&bundleVetArgs.files, "file", "f", nil, "The local path to bundle.cue files.") - bundleVetCmd.Flags().BoolVar(&bundleVetArgs.runtimeFromEnv, "runtime-from-env", false, - "Inject runtime values from the environment.") - bundleVetCmd.Flags().StringSliceVarP(&bundleVetArgs.runtimeFiles, "runtime", "r", nil, - "The local path to runtime.cue files.") bundleVetCmd.Flags().BoolVar(&bundleVetArgs.printValue, "print-value", false, "Print the computed value of the bundle.") bundleCmd.AddCommand(bundleVetCmd) @@ -114,69 +109,98 @@ func runBundleVetCmd(cmd *cobra.Command, args []string) error { runtimeValues := make(map[string]string) - if bundleVetArgs.runtimeFromEnv { + if bundleArgs.runtimeFromEnv { maps.Copy(runtimeValues, engine.GetEnv()) } - if len(bundleVetArgs.runtimeFiles) > 0 { - kctx, cancel := context.WithTimeout(cmd.Context(), rootArgs.timeout) - defer cancel() + rt, err := buildRuntime(bundleArgs.runtimeFiles) + if err != nil { + return err + } - rt, err := buildRuntime(bundleVetArgs.runtimeFiles) - if err != nil { - return err - } + clusters := rt.SelectClusters(bundleArgs.runtimeCluster, bundleArgs.runtimeClusterGroup) + if len(clusters) == 0 { + return fmt.Errorf("no cluster found") + } + + kctx, cancel := context.WithTimeout(cmd.Context(), rootArgs.timeout) + defer cancel() + + for _, cluster := range clusters { + kubeconfigArgs.Context = &cluster.KubeContext + + clusterValues := make(map[string]string) + // add values from env + maps.Copy(clusterValues, runtimeValues) + + // add values from cluster rm, err := runtime.NewResourceManager(kubeconfigArgs) if err != nil { return err } - reader := runtime.NewResourceReader(rm) rv, err := reader.Read(kctx, rt.Refs) if err != nil { return err } + maps.Copy(clusterValues, rv) - maps.Copy(runtimeValues, rv) - } - - if err := bm.InitWorkspace(tmpDir, runtimeValues); err != nil { - return describeErr(tmpDir, "failed to parse bundle", err) - } + // add cluster info + maps.Copy(clusterValues, cluster.NameGroupValues()) - v, err := bm.Build() - if err != nil { - return describeErr(tmpDir, "failed to build bundle", err) - } + // create cluster workspace + workspace := path.Join(tmpDir, cluster.Name) + if err := os.MkdirAll(workspace, os.ModePerm); err != nil { + return err + } - bundle, err := bm.GetBundle(v) - if err != nil { - return err - } - log = LoggerBundle(logr.NewContext(cmd.Context(), log), bundle.Name) + if err := bm.InitWorkspace(workspace, clusterValues); err != nil { + return describeErr(workspace, "failed to parse bundle", err) + } - if len(bundle.Instances) == 0 { - return fmt.Errorf("no instances found in bundle") - } + v, err := bm.Build() + if err != nil { + return describeErr(workspace, "failed to build bundle", err) + } - if bundleVetArgs.printValue { - val := v.LookupPath(cue.ParsePath("bundle")) - if val.Err() != nil { + bundle, err := bm.GetBundle(v) + if err != nil { return err } - _, err := rootCmd.OutOrStdout().Write([]byte(fmt.Sprintf("bundle: %v\n", val))) - return err - } - for _, i := range bundle.Instances { - if i.Namespace == "" { - return fmt.Errorf("instance %s does not have a namespace", i.Name) + log = LoggerBundle(logr.NewContext(cmd.Context(), log), bundle.Name, apiv1.RuntimeDefaultName) + + if len(bundle.Instances) == 0 { + return fmt.Errorf("no instances found in bundle") + } + + if bundleVetArgs.printValue { + val := v.LookupPath(cue.ParsePath("bundle")) + if val.Err() != nil { + return err + } + bundleCue := fmt.Sprintf("bundle: %v\n", val) + if !cluster.IsDefault() { + bundleCue = fmt.Sprintf("\"%s\": bundle: %v\n", cluster.Name, val) + } + _, err := rootCmd.OutOrStdout().Write([]byte(bundleCue)) + if err != nil { + return err + } + } else { + for _, i := range bundle.Instances { + if i.Namespace == "" { + return fmt.Errorf("instance %s does not have a namespace", i.Name) + } + log := LoggerBundleInstance(logr.NewContext(cmd.Context(), log), bundle.Name, cluster.Name, i.Name) + log.Info("instance is valid") + } } - log := LoggerBundleInstance(logr.NewContext(cmd.Context(), log), bundle.Name, i.Name) - log.Info("instance is valid") } - log.Info("bundle is valid") + if !bundleVetArgs.printValue { + log.Info("bundle is valid") + } return nil } diff --git a/cmd/timoni/bundle_vet_test.go b/cmd/timoni/bundle_vet_test.go index b9120482..9b14bc42 100644 --- a/cmd/timoni/bundle_vet_test.go +++ b/cmd/timoni/bundle_vet_test.go @@ -292,3 +292,117 @@ bundle: g.Expect(err).ToNot(HaveOccurred()) g.Expect(output).To(BeEquivalentTo(bundleComputed)) } + +func Test_BundleVet_Clusters(t *testing.T) { + g := NewWithT(t) + + bundleCue := ` +bundle: { + _cluster: "dev" @timoni(runtime:string:TIMONI_CLUSTER_NAME) + _env: "dev" @timoni(runtime:string:TIMONI_CLUSTER_GROUP) + + apiVersion: "v1alpha1" + name: "fleet-test" + instances: { + "frontend": { + module: { + url: "oci://ghcr.io/stefanprodan/timoni/minimal" + version: "latest" + } + namespace: "fleet-test" + values: { + message: "Hello from cluster \(_cluster)" + test: enabled: true + + if _env == "staging" { + replicas: 2 + } + + if _env == "production" { + replicas: 3 + } + } + } + } +} +` + runtimeCue := ` +runtime: { + apiVersion: "v1alpha1" + name: "fleet-test" + clusters: { + "staging": { + group: "staging" + kubeContext: "envtest" + } + "production": { + group: "production" + kubeContext: "envtest" + } + } + values: [ + { + query: "k8s:v1:Namespace:kube-system" + for: { + "CLUSTER_UID": "obj.metadata.uid" + } + }, + ] +} +` + + bundleComputed := `"staging": bundle: { + apiVersion: "v1alpha1" + name: "fleet-test" + instances: { + frontend: { + module: { + url: "oci://ghcr.io/stefanprodan/timoni/minimal" + version: "latest" + } + namespace: "fleet-test" + values: { + message: "Hello from cluster staging" + replicas: 2 + test: { + enabled: true + } + } + } + } +} +"production": bundle: { + apiVersion: "v1alpha1" + name: "fleet-test" + instances: { + frontend: { + module: { + url: "oci://ghcr.io/stefanprodan/timoni/minimal" + version: "latest" + } + namespace: "fleet-test" + values: { + message: "Hello from cluster production" + replicas: 3 + test: { + enabled: true + } + } + } + } +} +` + wd := t.TempDir() + bundlePath := filepath.Join(wd, "bundle.cue") + g.Expect(os.WriteFile(bundlePath, []byte(bundleCue), 0644)).ToNot(HaveOccurred()) + + runtimePath := filepath.Join(wd, "runtime.cue") + g.Expect(os.WriteFile(runtimePath, []byte(runtimeCue), 0644)).ToNot(HaveOccurred()) + + output, err := executeCommand(fmt.Sprintf( + "bundle vet -f %s -r %s -p main --print-value", + bundlePath, runtimePath, + )) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(output).To(BeEquivalentTo(bundleComputed)) +} diff --git a/cmd/timoni/log.go b/cmd/timoni/log.go index 1653a41f..7d7d607e 100644 --- a/cmd/timoni/log.go +++ b/cmd/timoni/log.go @@ -34,6 +34,8 @@ import ( "github.com/rs/zerolog" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" runtimeLog "sigs.k8s.io/controller-runtime/pkg/log" + + apiv1 "github.com/stefanprodan/timoni/api/v1alpha1" ) // NewConsoleLogger returns a human-friendly Logger. @@ -66,6 +68,7 @@ func NewConsoleLogger() logr.Logger { var ( colorDryRun = color.New(color.FgHiBlack, color.Italic) colorError = color.New(color.FgHiRed) + colorReady = color.New(color.FgHiGreen) colorCallerPrefix = color.New(color.FgHiBlack) colorBundle = color.New(color.FgHiMagenta) colorInstance = color.New(color.FgHiMagenta) @@ -132,6 +135,10 @@ func colorizeSubject(subject string) string { return color.CyanString(subject) } +func colorizeReady(subject string) string { + return colorReady.Sprint(subject) +} + func colorizeInfo(subject string) string { return color.GreenString(subject) } @@ -190,11 +197,27 @@ func colorizeRuntime(runtime string) string { return colorCallerPrefix.Sprint("r:") + colorInstance.Sprint(runtime) } -func LoggerBundle(ctx context.Context, bundle string) logr.Logger { - if !rootArgs.prettyLog { - return LoggerFrom(ctx, "bundle", bundle) +func colorizeCluster(cluster string) string { + return colorCallerPrefix.Sprint("c:") + colorInstance.Sprint(cluster) +} + +func LoggerBundle(ctx context.Context, bundle, cluster string) logr.Logger { + switch cluster { + case apiv1.RuntimeDefaultName: + if !rootArgs.prettyLog { + return LoggerFrom(ctx, "bundle", bundle) + } + return LoggerFrom(ctx, "caller", colorizeBundle(bundle)) + default: + if !rootArgs.prettyLog { + return LoggerFrom(ctx, "bundle", bundle, "cluster", cluster) + } + return LoggerFrom(ctx, "caller", + fmt.Sprintf("%s %s %s", + colorizeBundle(bundle), + color.CyanString(">"), + colorizeCluster(cluster))) } - return LoggerFrom(ctx, "caller", colorizeBundle(bundle)) } func LoggerInstance(ctx context.Context, instance string) logr.Logger { @@ -204,18 +227,47 @@ func LoggerInstance(ctx context.Context, instance string) logr.Logger { return LoggerFrom(ctx, "caller", colorizeInstance(instance)) } -func LoggerBundleInstance(ctx context.Context, bundle, instance string) logr.Logger { - if !rootArgs.prettyLog { - return LoggerFrom(ctx, "bundle", bundle, "instance", instance) +func LoggerBundleInstance(ctx context.Context, bundle, cluster, instance string) logr.Logger { + switch cluster { + case apiv1.RuntimeDefaultName: + if !rootArgs.prettyLog { + return LoggerFrom(ctx, "bundle", bundle, "instance", instance) + } + return LoggerFrom(ctx, "caller", + fmt.Sprintf("%s %s %s", + colorizeBundle(bundle), + color.CyanString(">"), + colorizeInstance(instance))) + default: + if !rootArgs.prettyLog { + return LoggerFrom(ctx, "bundle", bundle, "cluster", cluster, "instance", instance) + } + return LoggerFrom(ctx, "caller", + fmt.Sprintf("%s %s %s %s %s", + colorizeBundle(bundle), + color.CyanString(">"), + colorizeCluster(cluster), + color.CyanString(">"), + colorizeInstance(instance))) + } - return LoggerFrom(ctx, "caller", fmt.Sprintf("%s %s %s", colorizeBundle(bundle), color.CyanString(">"), colorizeInstance(instance))) } -func LoggerRuntime(ctx context.Context, runtime string) logr.Logger { - if !rootArgs.prettyLog { - return LoggerFrom(ctx, "runtime", runtime) +func LoggerRuntime(ctx context.Context, runtime, cluster string) logr.Logger { + switch cluster { + case apiv1.RuntimeDefaultName: + if !rootArgs.prettyLog { + return LoggerFrom(ctx, "runtime", runtime) + } + return LoggerFrom(ctx, "caller", colorizeRuntime(runtime)) + default: + if !rootArgs.prettyLog { + return LoggerFrom(ctx, "runtime", runtime, "cluster", cluster) + } + return LoggerFrom(ctx, "caller", + fmt.Sprintf("%s %s %s", colorizeRuntime(runtime), + color.CyanString(">"), colorizeCluster(cluster))) } - return LoggerFrom(ctx, "caller", colorizeRuntime(runtime)) } // LoggerFrom returns a logr.Logger with predefined values from a context.Context. diff --git a/cmd/timoni/main_test.go b/cmd/timoni/main_test.go index 9289ea23..301e5e04 100644 --- a/cmd/timoni/main_test.go +++ b/cmd/timoni/main_test.go @@ -128,6 +128,7 @@ func resetCmdArgs() { listArgs = listFlags{} pullModArgs = pullModFlags{} pushModArgs = pushModFlags{} + bundleArgs = bundleFlags{} bundleApplyArgs = bundleApplyFlags{} bundleVetArgs = bundleVetFlags{} bundleDelArgs = bundleDelFlags{} @@ -136,6 +137,7 @@ func resetCmdArgs() { vendorK8sArgs = vendorK8sFlags{} pushArtifactArgs = pushArtifactFlags{} pullArtifactArgs = pullArtifactFlags{} + runtimeBuildArgs = runtimeBuildFlags{} } func rnd(prefix string, n int) string { diff --git a/cmd/timoni/runtime_build.go b/cmd/timoni/runtime_build.go index 32aee668..3ab38feb 100644 --- a/cmd/timoni/runtime_build.go +++ b/cmd/timoni/runtime_build.go @@ -41,7 +41,9 @@ var runtimeBuildCmd = &cobra.Command{ } type runtimeBuildFlags struct { - files []string + files []string + clusterSelector string + clusterGroupSelector string } var runtimeBuildArgs runtimeBuildFlags @@ -49,6 +51,10 @@ var runtimeBuildArgs runtimeBuildFlags func init() { runtimeBuildCmd.Flags().StringSliceVarP(&runtimeBuildArgs.files, "file", "f", nil, "The local path to runtime.cue files.") + runtimeBuildCmd.Flags().StringVar(&runtimeBuildArgs.clusterSelector, "cluster", "*", + "Select cluster by name.") + runtimeBuildCmd.Flags().StringVar(&runtimeBuildArgs.clusterGroupSelector, "cluster-group", "*", + "Select clusters by group name.") runtimeCmd.AddCommand(runtimeBuildCmd) } @@ -77,38 +83,55 @@ func runRuntimeBuildCmd(cmd *cobra.Command, args []string) error { return err } - log := LoggerRuntime(cmd.Context(), rt.Name) - ctx, cancel := context.WithTimeout(cmd.Context(), rootArgs.timeout) defer cancel() - rm, err := runtime.NewResourceManager(kubeconfigArgs) - if err != nil { - return err + clusters := rt.SelectClusters(runtimeBuildArgs.clusterSelector, runtimeBuildArgs.clusterGroupSelector) + if len(clusters) == 0 { + return fmt.Errorf("no cluster found") } - reader := runtime.NewResourceReader(rm) + for _, cluster := range clusters { + log := LoggerRuntime(cmd.Context(), rt.Name, cluster.Name) - values, err := reader.Read(ctx, rt.Refs) - if err != nil { - return err - } + kubeconfigArgs.Context = &cluster.KubeContext + rm, err := runtime.NewResourceManager(kubeconfigArgs) + if err != nil { + return err + } - keys := make([]string, 0, len(values)) + reader := runtime.NewResourceReader(rm) - for k := range values { - keys = append(keys, k) - } - sort.Strings(keys) + values, err := reader.Read(ctx, rt.Refs) + if err != nil { + return err + } + + keys := make([]string, 0, len(values)) + + for k := range values { + keys = append(keys, k) + } + sort.Strings(keys) - for _, k := range keys { - log.Info(fmt.Sprintf("%s: %s", colorizeSubject(k), values[k])) + for _, k := range keys { + log.Info(fmt.Sprintf("%s: %s", colorizeSubject(k), values[k])) + } + + if len(values) == 0 { + log.Info("no values defined") + } } return nil } func buildRuntime(files []string) (*apiv1.Runtime, error) { + defaultRuntime := apiv1.DefaultRuntime(*kubeconfigArgs.Context) + if len(files) == 0 { + return defaultRuntime, nil + } + tmpDir, err := os.MkdirTemp("", apiv1.FieldManager) if err != nil { return nil, err @@ -127,5 +150,13 @@ func buildRuntime(files []string) (*apiv1.Runtime, error) { return nil, describeErr(tmpDir, "failed to parse runtime", err) } - return rb.GetRuntime(v) + rt, err := rb.GetRuntime(v) + if err != nil { + return nil, err + } + + if len(rt.Clusters) == 0 { + rt.Clusters = defaultRuntime.Clusters + } + return rt, nil } diff --git a/cmd/timoni/runtime_build_test.go b/cmd/timoni/runtime_build_test.go index ba615edd..a353e39d 100644 --- a/cmd/timoni/runtime_build_test.go +++ b/cmd/timoni/runtime_build_test.go @@ -17,10 +17,12 @@ limitations under the License. package main import ( + "bufio" "context" "fmt" "os" "path/filepath" + "strings" "testing" . "github.com/onsi/gomega" @@ -123,3 +125,103 @@ runtime: { g.Expect(output).To(ContainSubstring("sc.local")) }) } + +func Test_RuntimeBuild_Clusters(t *testing.T) { + g := NewWithT(t) + + runtimeData := ` +runtime: { + apiVersion: "v1alpha1" + name: "fleet" + clusters: { + "staging": { + group: "staging" + kubeContext: "envtest" + } + "production": { + group: "production" + kubeContext: "envtest" + } + } + values: [ + { + query: "k8s:v1:Namespace:kube-system" + for: { + "CLUSTER_UID": "obj.metadata.uid" + } + }, + ] +} +` + + runtimePath := filepath.Join(t.TempDir(), "runtime.cue") + err := os.WriteFile(runtimePath, []byte(runtimeData), 0644) + g.Expect(err).ToNot(HaveOccurred()) + + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kube-system", + }, + } + + err = envTestClient.Get(context.Background(), client.ObjectKeyFromObject(ns), ns) + g.Expect(err).ToNot(HaveOccurred()) + + t.Run("builds runtime for all clusters", func(t *testing.T) { + g := NewWithT(t) + + output, err := executeCommandWithIn("runtime build -f-", strings.NewReader(runtimeData)) + g.Expect(err).ToNot(HaveOccurred()) + t.Log("\n", output) + + scanner := bufio.NewScanner(strings.NewReader(output)) + var i int + for scanner.Scan() { + i++ + txt := scanner.Text() + g.Expect(txt).To(ContainSubstring(string(ns.UID))) + if i == 1 { + g.Expect(txt).To(MatchRegexp("staging.*CLUSTER_UID")) + } + if i == 2 { + g.Expect(txt).To(MatchRegexp("production.*CLUSTER_UID")) + } + } + g.Expect(scanner.Err()).ToNot(HaveOccurred()) + g.Expect(i).To(BeEquivalentTo(2)) + }) + + t.Run("builds runtime for selected cluster", func(t *testing.T) { + g := NewWithT(t) + + output, err := executeCommandWithIn("runtime build --cluster=staging -f-", strings.NewReader(runtimeData)) + g.Expect(err).ToNot(HaveOccurred()) + t.Log("\n", output) + + scanner := bufio.NewScanner(strings.NewReader(output)) + var i int + for scanner.Scan() { + i++ + g.Expect(scanner.Text()).To(MatchRegexp("staging.*CLUSTER_UID.*%s", string(ns.UID))) + } + g.Expect(scanner.Err()).ToNot(HaveOccurred()) + g.Expect(i).To(BeEquivalentTo(1)) + }) + + t.Run("builds runtime for selected group", func(t *testing.T) { + g := NewWithT(t) + + output, err := executeCommandWithIn("runtime build --cluster-group=production -f-", strings.NewReader(runtimeData)) + g.Expect(err).ToNot(HaveOccurred()) + t.Log("\n", output) + + scanner := bufio.NewScanner(strings.NewReader(output)) + var i int + for scanner.Scan() { + i++ + g.Expect(scanner.Text()).To(MatchRegexp("production.*CLUSTER_UID.*%s", string(ns.UID))) + } + g.Expect(scanner.Err()).ToNot(HaveOccurred()) + g.Expect(i).To(BeEquivalentTo(1)) + }) +} diff --git a/docs/bundle-multi-cluster.md b/docs/bundle-multi-cluster.md new file mode 100644 index 00000000..79fde5f6 --- /dev/null +++ b/docs/bundle-multi-cluster.md @@ -0,0 +1,366 @@ +# Multi-cluster Deployments + +Timoni offers a declarative way of managing the app delivery across environments. +The Timoni [Runtime](bundle-runtime.md) allows defining groups of clusters where apps are being deployed. +The Timoni [Bundle](bundle.md) supports customising the app configuration based on the target +environment (group of clusters) and even for a specific cluster in a group. + +```mermaid +flowchart LR + +A((User)) --> B +B(Bundle + Runtime) --> C(((Timoni))) +C --> D(Staging) +D --> E[1. Region-A] +D --> F[2. Region-B] +C--> G(Production) +G --> H[3. Region-A] +G --> I[4. Region-B] +``` + +When applying a Bundle to multiple clusters, Timoni iterates over the clusters +in the order defined in the Runtime definition. It connects to each cluster, +deploys the app changes, runs health checks and e2e tests before moving to the next cluster. + +## Multi-clusters definitions + +### Runtime definition + +The following is an example of a Runtime definition containing a list of clusters: + +```cue +runtime: { + apiVersion: "v1alpha1" + name: "fleet" + clusters: { + "preview-eu-1": { + group: "staging" + kubeContext: "eks-eu-west-2" + } + "preview-us-1": { + group: "staging" + kubeContext: "eks-us-west-2" + } + "prod-eu-1": { + group: "production" + kubeContext: "eks-eu-west-1" + } + "prod-us-1": { + group: "production" + kubeContext: "eks-us-west-1" + } + } + values: [ + { + query: "k8s:v1:Namespace:default" + for: { + "CLUSTER_UID": "obj.metadata.uid" + } + }, + ] +} +``` + +A cluster entry is composed of: + +- The name of the cluster, used to unique identify a Kubernetes cluster. +- The group, used to denote the environment a cluster belongs to. +- The kubeContext, used to select a context from the kubeconfig file. + +!!! tip "kubeconfig" + + Note that all clusters defined in the Runtime file must have a + corresponding context in the kubeconfig file. + By default, Timoni looks for a file named `config` in the `$HOME/.kube` directory. + You can specify other kubeconfig file by setting the `KUBECONFIG` environment + variable or by setting the `--kubeconfig` flag. + +The `values` list can be used to query each cluster to extract values needed to +configure Ingress, TLS, auth, etc, during the app deployment. For more information +on how to query a cluster, please see the [runtime values doc](bundle-runtime.md#values). + +### Bundle definition + +The following is an example of a Bundle definition that uses the cluster attributes +to set the number of replicas to different values for staging and production: + +```cue +bundle: { + _cluster: { + name: string @timoni(runtime:string:TIMONI_CLUSTER_NAME) + group: string @timoni(runtime:string:TIMONI_CLUSTER_GROUP) + uid: string @timoni(runtime:string:CLUSTER_UID) + } + + apiVersion: "v1alpha1" + name: "apps" + instances: { + podinfo: { + module: url: "oci://ghcr.io/stefanprodan/modules/podinfo" + namespace: "apps" + values: { + ui: message: "Hosted by \(_cluster.name) id \(_cluster.uid)" + if _cluster.group == "staging" { + replicas: 1 + } + if _cluster.group == "production" { + replicas: 2 + } + } + } + } +} + +``` + +The cluster name and group, are mapped to fields in a Bundle using the following attributes: + +- `@timoni(runtime:string:TIMONI_CLUSTER_NAME)` +- `@timoni(runtime:string:TIMONI_CLUSTER_GROUP)` + +## Multi-cluster operations + +### Validation + +Build the Runtime definition to verify the connectivity to each cluster: + +=== "command" + + ```shell + timoni runtime build -f runtime.cue + ``` + +=== "output" + + ```text + r:fleet > c:preview-eu-1 > CLUSTER_UID: bc83fc97-3cb9-42ca-ae38-cc09501e01e3 + r:fleet > c:prod-eu-1 > CLUSTER_UID: 61fad037-bc8a-420e-a7b2-1d72fdc17e61 + ``` + +Print the Bundle variants to verify the final values used for each cluster: + +=== "command" + + ```shell + timoni bundle vet --print-value -f bundle.cue -r runtime.cue + ``` + +=== "output" + + ```cue + "preview-eu-1": bundle: { + apiVersion: "v1alpha1" + name: "apps" + instances: { + podinfo: { + module: { + url: "oci://ghcr.io/stefanprodan/modules/podinfo" + version: *"latest" | string + } + namespace: "apps" + values: { + test: { + enabled: true + } + replicas: 1 + ui: { + message: "Hosted by preview-eu-1 id bc83fc97-3cb9-42ca-ae38-cc09501e01e3" + } + } + } + } + } + "prod-eu-1": bundle: { + apiVersion: "v1alpha1" + name: "apps" + instances: { + podinfo: { + module: { + url: "oci://ghcr.io/stefanprodan/modules/podinfo" + version: *"latest" | string + } + namespace: "apps" + values: { + test: { + enabled: true + } + replicas: 2 + ui: { + message: "Hosted by prod-eu-1 id 61fad037-bc8a-420e-a7b2-1d72fdc17e61" + } + } + } + } + } + ``` + +Perform a dry-run apply of the Bundle to review the changes across clusters: + +=== "command" + + ```shell + timoni bundle apply --dry-run -f bundle.cue -r runtime.cue + ``` + +=== "output" + + ```text + b:apps > c:preview-eu-1 > applying 1 instance(s) on staging (server dry run) + b:apps > c:preview-eu-1 > i:podinfo > applying module timoni.sh/podinfo version 6.5.3 + b:apps > c:preview-eu-1 > i:podinfo > Namespace/apps created (server dry run) + b:apps > c:preview-eu-1 > i:podinfo > ServiceAccount/apps/podinfo created (server dry run) + b:apps > c:preview-eu-1 > i:podinfo > Service/apps/podinfo created (server dry run) + b:apps > c:preview-eu-1 > i:podinfo > Deployment/apps/podinfo created (server dry run) + b:apps > c:preview-eu-1 > i:podinfo > Job/apps/podinfo-test created (server dry run) + b:apps > c:preview-eu-1 > i:podinfo > applied successfully (server dry run) + b:apps > c:preview-eu-1 > applied successfully (server dry run) + b:apps > c:prod-eu-1 > applying 1 instance(s) on production (server dry run) + b:apps > c:prod-eu-1 > i:podinfo > applying module timoni.sh/podinfo version 6.5.3 + b:apps > c:prod-eu-1 > i:podinfo > Namespace/apps created (server dry run) + b:apps > c:prod-eu-1 > i:podinfo > ServiceAccount/apps/podinfo created (server dry run) + b:apps > c:prod-eu-1 > i:podinfo > Service/apps/podinfo created (server dry run) + b:apps > c:prod-eu-1 > i:podinfo > Deployment/apps/podinfo created (server dry run) + b:apps > c:prod-eu-1 > i:podinfo > Job/apps/podinfo-test created (server dry run) + b:apps > c:prod-eu-1 > i:podinfo > applied successfully (server dry run) + b:apps > c:prod-eu-1 > applied successfully (server dry run) + ``` + +### Install and Upgrade + +To install or upgrade the instances defined in the Bundle to all clusters: + +=== "command" + + ```shell + timoni bundle apply -f bundle.cue -r runtime.cue + ``` + +=== "output" + + ```text + b:apps > c:preview-eu-1 > applying 1 instance(s) on staging + b:apps > c:preview-eu-1 > i:podinfo > applying module timoni.sh/podinfo version 6.5.3 + b:apps > c:preview-eu-1 > i:podinfo > installing podinfo in namespace apps + b:apps > c:preview-eu-1 > i:podinfo > Namespace/apps created + b:apps > c:preview-eu-1 > i:podinfo > applying app + b:apps > c:preview-eu-1 > i:podinfo > ServiceAccount/apps/podinfo created + b:apps > c:preview-eu-1 > i:podinfo > Service/apps/podinfo created + b:apps > c:preview-eu-1 > i:podinfo > Deployment/apps/podinfo created + b:apps > c:preview-eu-1 > i:podinfo > app resources ready + b:apps > c:preview-eu-1 > i:podinfo > applying test + b:apps > c:preview-eu-1 > i:podinfo > Job/apps/podinfo-test created + b:apps > c:preview-eu-1 > i:podinfo > test resources ready + b:apps > c:preview-eu-1 > applied successfully in 22s + b:apps > c:prod-eu-1 > applying 1 instance(s) on production + b:apps > c:prod-eu-1 > i:podinfo > applying module timoni.sh/podinfo version 6.5.3 + b:apps > c:prod-eu-1 > i:podinfo > installing podinfo in namespace apps + b:apps > c:prod-eu-1 > i:podinfo > Namespace/apps created + b:apps > c:prod-eu-1 > i:podinfo > applying app + b:apps > c:prod-eu-1 > i:podinfo > ServiceAccount/apps/podinfo created + b:apps > c:prod-eu-1 > i:podinfo > Service/apps/podinfo created + b:apps > c:prod-eu-1 > i:podinfo > Deployment/apps/podinfo created + b:apps > c:prod-eu-1 > i:podinfo > app resources ready + b:apps > c:prod-eu-1 > i:podinfo > applying test + b:apps > c:prod-eu-1 > i:podinfo > Job/apps/podinfo-test created + b:apps > c:prod-eu-1 > i:podinfo > test resources ready + b:apps > c:prod-eu-1 > applied successfully in 44s + ``` + +Note that Timoni deploys the app instances to all the clusters, in the order +defined in the Runtime. If the apply fails on a staging cluster, +Timoni will stop the execution and not continue with production. + +After editing a bundle file, to review the changes that will +be made on all clusters: + +```shell +timoni bundle apply -f bundle.cue -r runtime.cue --dry-run --diff +``` + +### Status + +To list the current status of the deployed apps on all clusters: + +=== "command" + + ```shell + timoni bundle status -f bundle.cue -r runtime.cue + ``` + +=== "output" + + ```text + b:apps > c:preview-eu-1 > i:podinfo > last applied 2023-11-25T12:50:02Z + b:apps > c:preview-eu-1 > i:podinfo > module oci://ghcr.io/stefanprodan/modules/podinfo:6.5.3 + b:apps > c:preview-eu-1 > i:podinfo > digest sha256:54d38b407012ccfb42badf0974ba70f9ae229ecd38f17e8a1f4e7189283b924f + b:apps > c:preview-eu-1 > i:podinfo > container image ghcr.io/curl/curl-container/curl-multi:master + b:apps > c:preview-eu-1 > i:podinfo > container image ghcr.io/stefanprodan/podinfo:6.5.3 + b:apps > c:preview-eu-1 > i:podinfo > ServiceAccount/apps/podinfo Current - Resource is current + b:apps > c:preview-eu-1 > i:podinfo > Service/apps/podinfo Current - Service is ready + b:apps > c:preview-eu-1 > i:podinfo > Deployment/apps/podinfo Current - Deployment is available. Replicas: 1 + b:apps > c:preview-eu-1 > i:podinfo > Job/apps/podinfo-test Current - Job Completed. succeeded: 1/1 + b:apps > c:prod-eu-1 > i:podinfo > last applied 2023-11-25T12:50:24Z + b:apps > c:prod-eu-1 > i:podinfo > module oci://ghcr.io/stefanprodan/modules/podinfo:6.5.3 + b:apps > c:prod-eu-1 > i:podinfo > digest sha256:54d38b407012ccfb42badf0974ba70f9ae229ecd38f17e8a1f4e7189283b924f + b:apps > c:prod-eu-1 > i:podinfo > container image ghcr.io/curl/curl-container/curl-multi:master + b:apps > c:prod-eu-1 > i:podinfo > container image ghcr.io/stefanprodan/podinfo:6.5.3 + b:apps > c:prod-eu-1 > i:podinfo > ServiceAccount/apps/podinfo Current - Resource is current + b:apps > c:prod-eu-1 > i:podinfo > Service/apps/podinfo Current - Service is ready + b:apps > c:prod-eu-1 > i:podinfo > Deployment/apps/podinfo Current - Deployment is available. Replicas: 2 + b:apps > c:prod-eu-1 > i:podinfo > Job/apps/podinfo-test Current - Job Completed. succeeded: 1/1 + ``` + +Or using the bundle name: + +```shell +timoni bundle status my-bundle -r runtime.cue +``` + +### Uninstall + +To delete all deployed apps on all clusters: + +=== "command" + + ```shell + timoni bundle delete -f bundle.cue -r runtime.cue + ``` + +=== "output" + + ```text + b:apps > c:preview-eu-1 > deleting instance podinfo in namespace apps + b:apps > c:preview-eu-1 > Job/apps/podinfo-test deleted + b:apps > c:preview-eu-1 > Deployment/apps/podinfo deleted + b:apps > c:preview-eu-1 > Service/apps/podinfo deleted + b:apps > c:preview-eu-1 > ServiceAccount/apps/podinfo deleted + b:apps > c:preview-eu-1 > all resources have been deleted + b:apps > c:prod-eu-1 > deleting instance podinfo in namespace apps + b:apps > c:prod-eu-1 > Job/apps/podinfo-test deleted + b:apps > c:prod-eu-1 > Deployment/apps/podinfo deleted + b:apps > c:prod-eu-1 > Service/apps/podinfo deleted + b:apps > c:prod-eu-1 > ServiceAccount/apps/podinfo deleted + b:apps > c:prod-eu-1 > all resources have been deleted + ``` + +Or using the bundle name: + +```shell +timoni bundle delete my-bundle -r runtime.cue +``` + +### Cluster filtering + +To perform an apply only on a group of clusters: + +```shell +timoni bundle apply -f bundle.cue -r runtime.cue --runtime-group staging +``` + +To perform an apply only on a cluster: + +```shell +timoni bundle apply -f bundle.cue -r runtime.cue --runtime-cluster prod-eu-1 +``` + +Note that all `timoni bundle` commands support filtering by cluster name and group. diff --git a/docs/bundle-runtime.md b/docs/bundle-runtime.md index 5db740bf..fbb7d166 100644 --- a/docs/bundle-runtime.md +++ b/docs/bundle-runtime.md @@ -2,7 +2,7 @@ While Timoni [Bundles](bundle.md) offer a way to specify the config values in declarative manner, not all the configuration values of an application are known ahead of time. -Some values may be available at runtime, in the Kubernetes cluster where the Bundle is applied. +Some values may be available at runtime, in the Kubernetes clusters where the Bundle is applied. For example, the API token for some backend service that your app consumes is stored in a Kubernetes Secret in-cluster. When installing the application with Timoni, @@ -19,6 +19,9 @@ The following is an example of a Runtime definition that extracts values from th runtime: { apiVersion: "v1alpha1" name: "production" + clusters: { + // using the cluster set in kubeconfig current context + } values: [ { query: "k8s:v1:ConfigMap:infra:aws-info" @@ -130,7 +133,13 @@ A Runtime file must contain a definition that matches the following schema: #Runtime: { apiVersion: string name: string - values: [...#RuntimeValue] + + clusters?: [string]: { + group!: string + kubeContext!: string + } + + values?: [...#RuntimeValue] } #RuntimeValue: { @@ -150,11 +159,88 @@ Currently, the only supported value is `v1alpha1`. The `name` is a required field used to identify the Runtime. +### Clusters + +The `clusters` field is for defining the target clusters and +environments (group of clusters) where a Bundle is applied. + +A cluster entry must specify the `group` and `kubeContext` fields. +The `kubeContext` value must match a context name from the `.kube/config` file. + +!!! tip "Default cluster" + + When no clusters are defined in the Runtime, Timoni will use the + current context from the kubeconfig, unless the context is specifed + using the `--kube-context` flag. + +Example: + +```cue +runtime: { + apiVersion: "v1alpha1" + name: "fleet" + clusters: { + "preview-us-1": { + group: "staging" + kubeContext: "eks-us-west-2" + } + "prod-us-1": { + group: "production" + kubeContext: "eks-us-west-1" + } + "prod-eu-1": { + group: "production" + kubeContext: "eks-eu-west-1" + } + } +} +``` + +The clusters name and group, can be mapped to fields in a Bundle using `@timoni()` attributes. + +```cue +bundle: { + _cluster: string @timoni(runtime:string:TIMONI_CLUSTER_NAME) + _env: string @timoni(runtime:string:TIMONI_CLUSTER_GROUP) + + apiVersion: "v1alpha1" + name: "apps" + instances: { + app: { + module: url: "oci://ghcr.io/stefanprodan/modules/podinfo" + namespace: "apps" + values: { + ui: message: "Hosted by \(_cluster)" + if _env == "staging" { + replicas: 1 + } + if _env == "production" { + replicas: 2 + } + } + } + } +} +``` + +When applying the above Bundle, Timoni will deploy the app instances to all the +clusters, in the order defined in the Runtime. If the apply fails on a staging cluster, +Timoni will stop the execution and not continue with production. + +For more details please see the [multi-cluster deployments guide](bundle-multi-cluster.md). + ### Values -The `values` array is a required field that specifies the list of Kubernetes resources and the fields to be extracted. +The `values` array is for specifying +the list of Kubernetes resources and the fields to be extracted. + +#### Query + +The `values.query` is a required field that specifies the Kubernetes resource. -A Runtime must contain at least one value with the following required fields: +The `query` field must be in the format `k8s::::`. + +Example: ```cue runtime: { @@ -171,13 +257,7 @@ runtime: { } ``` -#### Query - -The `values.query` is a required field that specifies the Kubernetes resource. - -The `query` field must be in the format `k8s::::`. - -If the Kubernetes resource is global, the format is `k8s:::`. +If the Kubernetes resource is global, the `query` format is `k8s:::`. Example: diff --git a/hack/Makefile b/hack/Makefile index 86d8a3aa..79590533 100644 --- a/hack/Makefile +++ b/hack/Makefile @@ -14,13 +14,24 @@ tools: # Install required tools with Homebrew brew bundle .PHONY: up -up: # Start a local Kind clusters and a container registry on port 5555 +up: # Start a local Kind cluster and a container registry on port 5555 $(REPOSITORY_ROOT)/hack/local/kind-up.sh .PHONY: down down: # Teardown the Kind cluster and registry $(REPOSITORY_ROOT)/hack/local/kind-down.sh +.PHONY: fleet-up +fleet-up: # Start local Kind clusters (staging and production) and a container registry on port 5555 + CLUSTER_NAME=timoni-staging $(REPOSITORY_ROOT)/hack/local/kind-up.sh + CLUSTER_NAME=timoni-production $(REPOSITORY_ROOT)/hack/local/kind-up.sh + +.PHONY: fleet-down +fleet-down: # Teardown the Kind clusters and registry + kind delete cluster --name timoni-staging + kind delete cluster --name timoni-production + docker rm -f timoni-registry + .PHONY: push push: # Push the example modules to the local registry $(REPOSITORY_ROOT)/hack/local/mod-push.sh diff --git a/hack/local/kind-down.sh b/hack/local/kind-down.sh index 1c01c994..f0c5a600 100755 --- a/hack/local/kind-down.sh +++ b/hack/local/kind-down.sh @@ -5,9 +5,9 @@ set -o errexit -cluster_name="timoni" +CLUSTER_NAME="${CLUSTER_NAME:=timoni}" reg_name='timoni-registry' -kind delete cluster --name ${cluster_name} +kind delete cluster --name ${CLUSTER_NAME} docker rm -f ${reg_name} diff --git a/hack/local/kind-up.sh b/hack/local/kind-up.sh index 40e3fc37..4ff6e7c6 100755 --- a/hack/local/kind-up.sh +++ b/hack/local/kind-up.sh @@ -6,6 +6,7 @@ set -o errexit CLUSTER_NAME="${CLUSTER_NAME:=timoni}" +cluster_version="v1.28.0" reg_name='timoni-registry' reg_localhost_port='5555' reg_cluster_port='5000' @@ -20,12 +21,15 @@ containerdConfigPatches: endpoint = ["http://${reg_name}:${reg_cluster_port}"] nodes: - role: control-plane + image: kindest/node:${cluster_version} kubeadmConfigPatches: - | kind: InitConfiguration nodeRegistration: kubeletExtraArgs: node-labels: "ingress-ready=true" + - role: worker + image: kindest/node:${cluster_version} EOF } diff --git a/hack/local/mod-push.sh b/hack/local/mod-push.sh index 53e50727..0e50f70e 100755 --- a/hack/local/mod-push.sh +++ b/hack/local/mod-push.sh @@ -8,14 +8,6 @@ set -o errexit reg_localhost_port='5555' repo_root=$(git rev-parse --show-toplevel) -PODINFO_VER=$(cat $repo_root/examples/podinfo/templates/config.cue | awk '/tag:/ {print $2}' | tr -d '*"') -timoni mod push $repo_root/examples/podinfo oci://localhost:${reg_localhost_port}/modules/podinfo -v ${PODINFO_VER} --latest \ - --source https://github.com/stefanprodan/podinfo \ - -a 'org.opencontainers.image.description=A timoni.sh module for deploying Podinfo.' \ - -a 'org.opencontainers.image.documentation=https://github.com/stefanprodan/timoni/blob/main/examples/podinfo/README.md' - -REDIS_VER=$(cat $repo_root/examples/redis/templates/config.cue | awk '/tag:/ {print $2}' | tr -d '*"') -timoni mod push $repo_root/examples/redis oci://localhost:${reg_localhost_port}/modules/redis -v ${REDIS_VER} --latest \ - --source https://github.com/stefanprodan/timoni/tree/main/examples/redis \ - -a 'org.opencontainers.image.description=A timoni.sh module for deploying Redis master-replica clusters.' \ - -a 'org.opencontainers.image.documentation=https://github.com/stefanprodan/timoni/blob/main/examples/redis/README.md' +crane copy ghcr.io/stefanprodan/modules/podinfo localhost:${reg_localhost_port}/modules/podinfo -a +crane copy ghcr.io/stefanprodan/modules/redis localhost:${reg_localhost_port}/modules/redis -a +crane copy ghcr.io/stefanprodan/timoni/minimal localhost:${reg_localhost_port}/modules/nginx -a diff --git a/hack/local/podinfo.bundle.cue b/hack/local/podinfo.bundle.cue index d63a6a78..3463c3af 100644 --- a/hack/local/podinfo.bundle.cue +++ b/hack/local/podinfo.bundle.cue @@ -23,6 +23,7 @@ bundle: { enabled: true redisURL: "tcp://cache:6379" } + values: test: enabled: true } frontend: { module: { @@ -47,6 +48,7 @@ bundle: { capabilities: drop: ["ALL"] seccompProfile: type: "RuntimeDefault" } + test: enabled: true } } } diff --git a/internal/engine/bundle_builder.go b/internal/engine/bundle_builder.go index 0ac161f2..d467eedf 100644 --- a/internal/engine/bundle_builder.go +++ b/internal/engine/bundle_builder.go @@ -46,6 +46,7 @@ type Bundle struct { type BundleInstance struct { Bundle string + Cluster string Name string Namespace string Module apiv1.ModuleReference diff --git a/internal/engine/runtime_builder.go b/internal/engine/runtime_builder.go index bd6bac83..93f01622 100644 --- a/internal/engine/runtime_builder.go +++ b/internal/engine/runtime_builder.go @@ -145,31 +145,54 @@ func (b *RuntimeBuilder) GetRuntime(v cue.Value) (*apiv1.Runtime, error) { return nil, fmt.Errorf("lookup %s failed: %w", apiv1.RuntimeName.String(), runtimeNameValue.Err()) } - runtimeValuesCue := v.LookupPath(cue.ParsePath(apiv1.RuntimeValuesSelector.String())) - if runtimeValuesCue.Err() != nil { - return nil, fmt.Errorf("lookup %s failed: %w", apiv1.RuntimeValuesSelector.String(), runtimeValuesCue.Err()) - } + clusters := []apiv1.RuntimeCluster{} + clustersCue := v.LookupPath(cue.ParsePath(apiv1.RuntimeClustersSelector.String())) + if clustersCue.Err() == nil { + iter, err := clustersCue.Fields(cue.Concrete(true)) + if err != nil { + return nil, err + } - runtimeValues := []apiv1.RuntimeValue{} + for iter.Next() { + name := iter.Selector().Unquoted() + expr := iter.Value() - err = runtimeValuesCue.Decode(&runtimeValues) - if err != nil { - return nil, fmt.Errorf("values decoding failed: %w", err) + vGroup := expr.LookupPath(cue.ParsePath("group")) + group, _ := vGroup.String() + + vkc := expr.LookupPath(cue.ParsePath("kubeContext")) + kc, _ := vkc.String() + + clusters = append(clusters, apiv1.RuntimeCluster{ + Name: name, + Group: group, + KubeContext: kc, + }) + } } var refs []apiv1.RuntimeResourceRef - - for _, rv := range runtimeValues { - ref, err := rv.ToResourceRef() + runtimeValuesCue := v.LookupPath(cue.ParsePath(apiv1.RuntimeValuesSelector.String())) + if runtimeValuesCue.Err() == nil { + runtimeValues := []apiv1.RuntimeValue{} + err = runtimeValuesCue.Decode(&runtimeValues) if err != nil { - return nil, fmt.Errorf("value decoding failed: %w", err) + return nil, fmt.Errorf("values decoding failed: %w", err) } - refs = append(refs, *ref) + for _, rv := range runtimeValues { + ref, err := rv.ToResourceRef() + if err != nil { + return nil, fmt.Errorf("value decoding failed: %w", err) + } + + refs = append(refs, *ref) + } } return &apiv1.Runtime{ - Name: runtimeName, - Refs: refs, + Name: runtimeName, + Clusters: clusters, + Refs: refs, }, nil } diff --git a/internal/engine/runtime_builder_test.go b/internal/engine/runtime_builder_test.go index 37125765..27740528 100644 --- a/internal/engine/runtime_builder_test.go +++ b/internal/engine/runtime_builder_test.go @@ -26,7 +26,24 @@ import ( apiv1 "github.com/stefanprodan/timoni/api/v1alpha1" ) -func TestGetRuntime(t *testing.T) { +func TestRuntimeBuilder_Minimal(t *testing.T) { + g := NewWithT(t) + ctx := cuecontext.New() + + rt := ` +runtime: { + apiVersion: "v1alpha1" + name: "minimal" +} +` + v := ctx.CompileString(rt) + builder := NewRuntimeBuilder(ctx, []string{}) + b, err := builder.GetRuntime(v) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(b.Name).To(BeEquivalentTo("minimal")) +} + +func TestRuntimeBuilder_Values(t *testing.T) { g := NewWithT(t) ctx := cuecontext.New() @@ -81,3 +98,49 @@ runtime: { })) g.Expect(b.Refs[2].Namespace).To(BeEmpty()) } + +func TestRuntimeBuilder_Clusters(t *testing.T) { + g := NewWithT(t) + ctx := cuecontext.New() + + rt := ` +runtime: { + apiVersion: "v1alpha1" + name: "fleet" + clusters: { + "staging-eu": { + group: "staging" + kubeContext: "eu-central-1:staging" + } + "staging-us": { + group: "staging" + kubeContext: "us-west-1:staging" + } + "production-eu": { + group: "production" + kubeContext: "eu-central-1:production" + } + "production-us": { + group: "production" + kubeContext: "us-west-1:production" + } + } +} +` + v := ctx.CompileString(rt) + builder := NewRuntimeBuilder(ctx, []string{}) + b, err := builder.GetRuntime(v) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(b.Name).To(BeEquivalentTo("fleet")) + g.Expect(len(b.Clusters)).To(BeEquivalentTo(4)) + g.Expect(b.Clusters[0]).To(BeEquivalentTo(apiv1.RuntimeCluster{ + Name: "staging-eu", + Group: "staging", + KubeContext: "eu-central-1:staging", + })) + g.Expect(b.Clusters[3]).To(BeEquivalentTo(apiv1.RuntimeCluster{ + Name: "production-us", + Group: "production", + KubeContext: "us-west-1:production", + })) +} diff --git a/mkdocs.yml b/mkdocs.yml index 789e58f9..6c0506f7 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -95,6 +95,7 @@ nav: - Bundle runtime: bundle-runtime.md - Bundle distribution: bundle-distribution.md - Bundle secrets injection: bundle-secrets.md + - Multi-cluster deployments: bundle-multi-cluster.md - Module Development: - Module structure: module.md - Module distribution: module-distribution.md