diff --git a/cmd/openshift/operator/kodata/openshift/00-prereconcile/openshift-pipelines-scc.yaml b/cmd/openshift/operator/kodata/tekton-pipeline/00-prereconcile/openshift-pipelines-scc.yaml similarity index 100% rename from cmd/openshift/operator/kodata/openshift/00-prereconcile/openshift-pipelines-scc.yaml rename to cmd/openshift/operator/kodata/tekton-pipeline/00-prereconcile/openshift-pipelines-scc.yaml diff --git a/config/crs/kubernetes/config/all/operator_v1alpha1_config_cr.yaml b/config/crs/kubernetes/config/all/operator_v1alpha1_config_cr.yaml index d5dca8ccc2..25728dff2c 100644 --- a/config/crs/kubernetes/config/all/operator_v1alpha1_config_cr.yaml +++ b/config/crs/kubernetes/config/all/operator_v1alpha1_config_cr.yaml @@ -19,3 +19,9 @@ metadata: spec: profile: all targetNamespace: tekton-pipelines + pruner: + resources: + - pipelinerun + - taskrun + keep: 2 + schedule: "0 8 * * *" \ No newline at end of file diff --git a/config/crs/kubernetes/config/basic/operator_v1alpha1_config_cr.yaml b/config/crs/kubernetes/config/basic/operator_v1alpha1_config_cr.yaml index 510cba558f..784a390ca9 100644 --- a/config/crs/kubernetes/config/basic/operator_v1alpha1_config_cr.yaml +++ b/config/crs/kubernetes/config/basic/operator_v1alpha1_config_cr.yaml @@ -19,3 +19,9 @@ metadata: spec: profile: basic targetNamespace: tekton-pipelines + pruner: + resources: + - pipelinerun + - taskrun + keep: 2 + schedule: "0 8 * * *" \ No newline at end of file diff --git a/config/crs/kubernetes/config/lite/operator_v1alpha1_config_cr.yaml b/config/crs/kubernetes/config/lite/operator_v1alpha1_config_cr.yaml index 4341dfe6e3..bff92430c6 100644 --- a/config/crs/kubernetes/config/lite/operator_v1alpha1_config_cr.yaml +++ b/config/crs/kubernetes/config/lite/operator_v1alpha1_config_cr.yaml @@ -19,3 +19,9 @@ metadata: spec: profile: lite targetNamespace: tekton-pipelines + pruner: + resources: + - pipelinerun + - taskrun + keep: 2 + schedule: "0 8 * * *" \ No newline at end of file diff --git a/config/crs/openshift/config/all/operator_v1alpha1_config_cr.yaml b/config/crs/openshift/config/all/operator_v1alpha1_config_cr.yaml index 38432513c6..37d07694c5 100644 --- a/config/crs/openshift/config/all/operator_v1alpha1_config_cr.yaml +++ b/config/crs/openshift/config/all/operator_v1alpha1_config_cr.yaml @@ -25,3 +25,9 @@ spec: value: "true" - name: pipelineTemplates value: "true" + pruner: + resources: + - pipelinerun + - taskrun + keep: 2 + schedule: "0 8 * * *" diff --git a/config/crs/openshift/config/basic/operator_v1alpha1_config_cr.yaml b/config/crs/openshift/config/basic/operator_v1alpha1_config_cr.yaml index 05c66c1f47..59007d8cb1 100644 --- a/config/crs/openshift/config/basic/operator_v1alpha1_config_cr.yaml +++ b/config/crs/openshift/config/basic/operator_v1alpha1_config_cr.yaml @@ -19,3 +19,9 @@ metadata: spec: profile: basic targetNamespace: openshift-pipelines + pruner: + resources: + - pipelinerun + - taskrun + keep: 2 + schedule: "0 8 * * *" \ No newline at end of file diff --git a/config/crs/openshift/config/lite/operator_v1alpha1_config_cr.yaml b/config/crs/openshift/config/lite/operator_v1alpha1_config_cr.yaml index 82c3b8bf39..c2e2aba541 100644 --- a/config/crs/openshift/config/lite/operator_v1alpha1_config_cr.yaml +++ b/config/crs/openshift/config/lite/operator_v1alpha1_config_cr.yaml @@ -19,3 +19,9 @@ metadata: spec: profile: lite targetNamespace: openshift-pipelines + pruner: + resources: + - pipelinerun + - taskrun + keep: 2 + schedule: "0 8 * * *" \ No newline at end of file diff --git a/pkg/apis/operator/v1alpha1/tektonconfig_types.go b/pkg/apis/operator/v1alpha1/tektonconfig_types.go index ba7dac5db5..5a42886489 100644 --- a/pkg/apis/operator/v1alpha1/tektonconfig_types.go +++ b/pkg/apis/operator/v1alpha1/tektonconfig_types.go @@ -58,7 +58,12 @@ type Prune struct { Resources []string `json:"resources,omitempty"` // The number of resource to keep // You dont want to delete all the pipelinerun/taskrun's by a cron + // +optional Keep *uint `json:"keep,omitempty"` + // KeepSince keeps the resources younger than the specified value + // Its value is taken in minutes + // +optional + KeepSince *uint `json:"keep-since,omitempty"` // How frequent pruning should happen Schedule string `json:"schedule,omitempty"` } diff --git a/pkg/apis/operator/v1alpha1/tektonconfig_validation.go b/pkg/apis/operator/v1alpha1/tektonconfig_validation.go index 87ab426dcd..c583160416 100644 --- a/pkg/apis/operator/v1alpha1/tektonconfig_validation.go +++ b/pkg/apis/operator/v1alpha1/tektonconfig_validation.go @@ -62,10 +62,12 @@ func (p Prune) validate() *apis.FieldError { errs = errs.Also(apis.ErrMissingField("spec.pruner.resources")) } - if p.Keep == nil { - errs = errs.Also(apis.ErrMissingField("spec.pruner.keep")) - } else if *p.Keep == 0 { + if p.Keep == nil && p.KeepSince == nil { + errs = errs.Also(apis.ErrMissingOneOf("spec.pruner.keep", "spec.pruner.keep-since")) + } else if p.Keep != nil && *p.Keep == 0 { errs = errs.Also(apis.ErrInvalidValue(*p.Keep, "spec.pruner.keep")) + } else if p.KeepSince != nil && *p.KeepSince == 0 { + errs = errs.Also(apis.ErrInvalidValue(*p.Keep, "spec.pruner.keep-since")) } if p.Schedule == "" { diff --git a/pkg/apis/operator/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/operator/v1alpha1/zz_generated.deepcopy.go index e7e8d54687..4a83a10e8d 100644 --- a/pkg/apis/operator/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/operator/v1alpha1/zz_generated.deepcopy.go @@ -281,6 +281,11 @@ func (in *Prune) DeepCopyInto(out *Prune) { *out = new(uint) **out = **in } + if in.KeepSince != nil { + in, out := &in.KeepSince, &out.KeepSince + *out = new(uint) + **out = **in + } return } diff --git a/pkg/reconciler/common/prune.go b/pkg/reconciler/common/prune.go index c6240ecb2e..862dae8375 100644 --- a/pkg/reconciler/common/prune.go +++ b/pkg/reconciler/common/prune.go @@ -21,11 +21,12 @@ import ( "fmt" "os" "regexp" + "strconv" "strings" "github.com/tektoncd/operator/pkg/apis/operator/v1alpha1" batchv1 "k8s.io/api/batch/v1" - v1beta1 "k8s.io/api/batch/v1beta1" + "k8s.io/api/batch/v1beta1" corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -33,24 +34,57 @@ import ( ) const ( - tektonSA = "tekton-pipelines-controller" - CronName = "resource-pruner" - JobsTKNImageName = "IMAGE_JOB_PRUNER_TKN" - ownerAPIVer = "operator.tekton.dev/v1alpha1" - ownerKind = "TektonConfig" + tektonSA = "tekton-pipelines-controller" + CronName = "resource-pruner" + JobsTKNImageName = "IMAGE_JOB_PRUNER_TKN" + ownerAPIVer = "operator.tekton.dev/v1alpha1" + ownerKind = "TektonConfig" + pruneSkip = "operator.tekton.dev/prune.skip" + pruneResources = "operator.tekton.dev/prune.resources" + pruneResourcesPipelineNames = "operator.tekton.dev/prune.resources.pipelineNames" + pruneResourcesTaskNames = "operator.tekton.dev/prune.resources.taskNames" + pruneKeep = "operator.tekton.dev/prune.keep" + pruneKeepSince = "operator.tekton.dev/prune.keepsince" + pruneSchedule = "operator.tekton.dev/prune.schedule" ) +type Pruner struct { + kc kubernetes.Interface + context context.Context + tknImage string +} +type pruneConfigPerNS struct { + config v1alpha1.Prune + relatedPipelineNames []string + relatedTaskNames []string +} + +// all the namespaces of default and the annotationbased +type pruningNs struct { + commonScheduleNs map[string]*pruneConfigPerNS + uniqueScheduleNS map[string]*pruneConfigPerNS +} + func Prune(k kubernetes.Interface, ctx context.Context, tC *v1alpha1.TektonConfig) error { + pruner := &Pruner{kc: k, context: ctx} + //may be reconciled by removing pruing spec from tektonConfig + if pruner.removedFromTektonConfig(tC.Spec.Pruner) { + return pruner.checkAndDelete(tC.Spec.TargetNamespace) + } - if len(tC.Spec.Pruner.Resources) == 0 || tC.Spec.Pruner.Schedule == "" { - return checkAndDelete(k, ctx, tC.Spec.TargetNamespace) + if namespaces, ok := pruner.removedNSAnnotation(); ok { + for _, ns := range namespaces { + if err := pruner.checkAndDelete(ns); err != nil { + return err + } + } } - tknImage := os.Getenv(JobsTKNImageName) - if tknImage == "" { + tknImageFromEnv := os.Getenv(JobsTKNImageName) + if tknImageFromEnv == "" { return fmt.Errorf("%s environment variable not found", JobsTKNImageName) } - pru := tC.Spec.Pruner + pruner.tknImage = tknImageFromEnv logger := logging.FromContext(ctx) ownerRef := v1.OwnerReference{ APIVersion: ownerAPIVer, @@ -58,38 +92,128 @@ func Prune(k kubernetes.Interface, ctx context.Context, tC *v1alpha1.TektonConfi Name: tC.Name, UID: tC.ObjectMeta.UID, } - - pruningNamespaces, err := GetPrunableNamespaces(k, ctx) + // for the default config from the tektonconfig + pruningNamespaces, err := prunableNamespaces(k, ctx, tC.Spec.Pruner) if err != nil { return err } - if err := createCronJob(k, ctx, pru, tC.Spec.TargetNamespace, pruningNamespaces, ownerRef, tknImage); err != nil { - logger.Error("failed to create cronjob ", err) + if pruningNamespaces.commonScheduleNs != nil && len(pruningNamespaces.commonScheduleNs) > 0 { + jobs := pruner.createAllJobs(pruningNamespaces.commonScheduleNs) + + if err := pruner.createCronJob(tC.Spec.TargetNamespace, tC.Spec.Pruner.Schedule, jobs, ownerRef); err != nil { + logger.Error("failed to create cronjob ", err) + } + } + if pruningNamespaces.uniqueScheduleNS != nil { + for ns, con := range pruningNamespaces.uniqueScheduleNS { + jobs := pruner.createJobs(con, ns) + + if err := pruner.createCronJob(ns, con.config.Schedule, jobs, ownerRef); err != nil { + logger.Errorf("failed to create cronjob in ns %s:", ns, err) + } + } } return nil } -func GetPrunableNamespaces(k kubernetes.Interface, ctx context.Context) ([]string, error) { +func prunableNamespaces(k kubernetes.Interface, ctx context.Context, defaultPruneConfig v1alpha1.Prune) (pruningNs, error) { nsList, err := k.CoreV1().Namespaces().List(ctx, v1.ListOptions{}) if err != nil { - return nil, err + return pruningNs{}, err } - - var allNameSpaces []string + var prunableNs pruningNs + commonSchedule := make(map[string]*pruneConfigPerNS) + uniqueSchedule := make(map[string]*pruneConfigPerNS) re := regexp.MustCompile(NamespaceIgnorePattern) for _, ns := range nsList.Items { if ignore := re.MatchString(ns.GetName()); ignore { continue } - allNameSpaces = append(allNameSpaces, ns.Name) + nsAnnotations := ns.GetAnnotations() + //skip all the namespaces if annotated with prune skip + if nsAnnotations[pruneSkip] != "" && nsAnnotations[pruneSkip] == "true" { + continue + } + pc := &pruneConfigPerNS{ + config: v1alpha1.Prune{}, + relatedPipelineNames: []string{}, + relatedTaskNames: []string{}, + } + + if nsAnnotations[pruneResourcesPipelineNames] != "" { + pc.relatedPipelineNames = strings.Split(nsAnnotations[pruneResourcesPipelineNames], ",") + } + if nsAnnotations[pruneResourcesTaskNames] != "" { + pc.relatedTaskNames = strings.Split(nsAnnotations[pruneResourcesTaskNames], ",") + } + if nsAnnotations[pruneResources] != "" { + pc.config.Resources = strings.Split(nsAnnotations[pruneResources], ",") + } else { + pc.config.Resources = defaultPruneConfig.Resources + } + if nsAnnotations[pruneKeepSince] != "" { + keepsince, _ := strconv.Atoi(nsAnnotations[pruneKeepSince]) + uintKeepSince := uint(keepsince) + pc.config.KeepSince = &uintKeepSince + pc.config.Keep = nil + } else if defaultPruneConfig.KeepSince != nil && nsAnnotations[pruneKeep] == "" { + pc.config.KeepSince = defaultPruneConfig.KeepSince + } + if nsAnnotations[pruneKeep] != "" { + keep, _ := strconv.Atoi(nsAnnotations[pruneKeep]) + uintKeep := uint(keep) + pc.config.Keep = &uintKeep + pc.config.KeepSince = nil + } else if defaultPruneConfig.Keep != nil && nsAnnotations[pruneKeepSince] == "" { + pc.config.Keep = defaultPruneConfig.Keep + } + + // if a different schedule then create a new cronJob + if nsAnnotations[pruneSchedule] != "" { + if nsAnnotations[pruneSchedule] != defaultPruneConfig.Schedule { + pc.config.Schedule = nsAnnotations[pruneSchedule] + uniqueSchedule[ns.Name] = pc + delete(commonSchedule, ns.Name) + continue + } + } + + commonSchedule[ns.Name] = pc } - return allNameSpaces, nil + prunableNs.commonScheduleNs = commonSchedule + prunableNs.uniqueScheduleNS = uniqueSchedule + return prunableNs, nil } -func createCronJob(k kubernetes.Interface, ctx context.Context, pru v1alpha1.Prune, targetNs string, pruningNs []string, oR v1.OwnerReference, tknImage string) error { - pruneContainers := getPruningContainers(pru.Resources, pruningNs, *pru.Keep, tknImage) +func (pruner *Pruner) createAllJobs(nsConfig map[string]*pruneConfigPerNS) []corev1.Container { + var containers []corev1.Container + for ns, con := range nsConfig { + jobContainers := pruner.createJobs(con, ns) + containers = append(containers, jobContainers...) + } + return containers +} + +func (pruner *Pruner) createJobs(nsConfig *pruneConfigPerNS, ns string) []corev1.Container { + var containers []corev1.Container + + cmdArgs := pruneCommand(nsConfig, ns) + jobName := SimpleNameGenerator.RestrictLengthWithRandomSuffix("pruner-tkn-" + ns) + container := corev1.Container{ + Name: jobName, + Image: pruner.tknImage, + Command: []string{"/bin/sh", "-c"}, + Args: []string{cmdArgs}, + TerminationMessagePolicy: "FallbackToLogsOnError", + } + containers = append(containers, container) + + return containers +} + +func (pruner *Pruner) createCronJob(targetNs, schedule string, jobs []corev1.Container, oR v1.OwnerReference) error { backOffLimit := int32(3) ttlSecondsAfterFinished := int32(3600) cj := &v1beta1.CronJob{ @@ -98,11 +222,11 @@ func createCronJob(k kubernetes.Interface, ctx context.Context, pru v1alpha1.Pru APIVersion: "batch/v1beta1", }, ObjectMeta: v1.ObjectMeta{ - Name: CronName, + Name: CronName + "-" + targetNs, OwnerReferences: []v1.OwnerReference{oR}, }, Spec: v1beta1.CronJobSpec{ - Schedule: pru.Schedule, + Schedule: schedule, ConcurrencyPolicy: "Forbid", JobTemplate: v1beta1.JobTemplateSpec{ @@ -112,7 +236,7 @@ func createCronJob(k kubernetes.Interface, ctx context.Context, pru v1alpha1.Pru Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ - Containers: pruneContainers, + Containers: jobs, RestartPolicy: "OnFailure", ServiceAccountName: tektonSA, }, @@ -122,9 +246,9 @@ func createCronJob(k kubernetes.Interface, ctx context.Context, pru v1alpha1.Pru }, } - if _, err := k.BatchV1beta1().CronJobs(targetNs).Create(ctx, cj, v1.CreateOptions{}); err != nil { + if _, err := pruner.kc.BatchV1beta1().CronJobs(targetNs).Create(pruner.context, cj, v1.CreateOptions{}); err != nil { if strings.Contains(err.Error(), "already exists") { - if _, err := k.BatchV1beta1().CronJobs(targetNs).Update(ctx, cj, v1.UpdateOptions{}); err != nil { + if _, err := pruner.kc.BatchV1beta1().CronJobs(targetNs).Update(pruner.context, cj, v1.UpdateOptions{}); err != nil { return err } } @@ -133,35 +257,42 @@ func createCronJob(k kubernetes.Interface, ctx context.Context, pru v1alpha1.Pru return nil } -func getPruningContainers(resources, namespaces []string, keep uint, tknImage string) []corev1.Container { - containers := []corev1.Container{} - for _, ns := range namespaces { - cmdArgs := deleteCommand(resources, keep, ns) - jobName := SimpleNameGenerator.RestrictLengthWithRandomSuffix("pruner-tkn-" + ns) - container := corev1.Container{ - Name: jobName, - Image: tknImage, - Command: []string{"/bin/sh", "-c"}, - Args: []string{cmdArgs}, - TerminationMessagePolicy: "FallbackToLogsOnError", +func pruneCommand(pru *pruneConfigPerNS, ns string) string { + var cmdArgs string + for _, resource := range pru.config.Resources { + res := strings.TrimSpace(resource) + var keepCmd string + if pru.config.Keep != nil { + keepCmd = "--keep=" + fmt.Sprint(*pru.config.Keep) } - containers = append(containers, container) - } - - return containers -} + if pru.config.Keep == nil && pru.config.KeepSince != nil { + keepCmd = "--keep-since=" + fmt.Sprint(*pru.config.KeepSince) + } + cmd := "tkn " + strings.ToLower(res) + " delete " + keepCmd + if strings.ToLower(res) == "pipelinerun" { + if len(pru.relatedPipelineNames) > 0 { + for _, pipeline := range pru.relatedPipelineNames { + cmdArgs = cmdArgs + cmd + " -p=" + strings.TrimSpace(pipeline) + " -n=" + ns + " -f ; " + } + } else { + cmdArgs = cmdArgs + cmd + " -n=" + ns + " -f ; " + } -func deleteCommand(resources []string, keep uint, ns string) string { - var cmdArgs string - for _, res := range resources { - cmd := "tkn " + strings.ToLower(res) + " delete --keep=" + fmt.Sprint(keep) + " -n " + ns + " -f ; " - cmdArgs = cmdArgs + cmd + } else if strings.ToLower(res) == "taskrun" { + if len(pru.relatedTaskNames) > 0 { + for _, task := range pru.relatedTaskNames { + cmdArgs = cmdArgs + cmd + " -t=" + strings.TrimSpace(task) + " -n=" + ns + " -f ; " + } + } else { + cmdArgs = cmdArgs + cmd + " -n=" + ns + " -f ; " + } + } } return cmdArgs } -func checkAndDelete(k kubernetes.Interface, ctx context.Context, targetNamespace string) error { - if _, err := k.BatchV1beta1().CronJobs(targetNamespace).Get(ctx, CronName, v1.GetOptions{}); err != nil { +func (pruner *Pruner) checkAndDelete(ns string) error { + if _, err := pruner.kc.BatchV1beta1().CronJobs(ns).Get(pruner.context, CronName+"-"+ns, v1.GetOptions{}); err != nil { if strings.Contains(err.Error(), "not found") { return nil } else { @@ -170,5 +301,39 @@ func checkAndDelete(k kubernetes.Interface, ctx context.Context, targetNamespace } //if there is no error it means cron is exists, but no prune in config it means delete it - return k.BatchV1beta1().CronJobs(targetNamespace).Delete(ctx, CronName, v1.DeleteOptions{}) + return pruner.kc.BatchV1beta1().CronJobs(ns).Delete(pruner.context, CronName+"-"+ns, v1.DeleteOptions{}) +} + +func (pruner *Pruner) removedFromTektonConfig(prune v1alpha1.Prune) bool { + if len(prune.Resources) == 0 || prune.Schedule == "" { + return true + } + return false +} + +func (pruner *Pruner) removedNSAnnotation() ([]string, bool) { + var namespaces []string + cronJobs, err := pruner.kc.BatchV1beta1().CronJobs("").List(pruner.context, v1.ListOptions{}) + if err != nil { + return nil, false + } + for _, cronjob := range cronJobs.Items { + if strings.HasPrefix(cronjob.Name, CronName) { + name := strings.TrimPrefix(cronjob.Name, CronName+"-") + ns, err := pruner.kc.CoreV1().Namespaces().Get(pruner.context, name, v1.GetOptions{}) + if err != nil { + return nil, false + } + ano := ns.GetAnnotations() + if _, ok := ano[pruneSchedule]; !ok || ano[pruneSkip] == "true" { + namespaces = append(namespaces, name) + } + } + } + + if len(namespaces) > 0 { + return namespaces, true + } + return nil, false + } diff --git a/pkg/reconciler/common/prune_test.go b/pkg/reconciler/common/prune_test.go index ced95ebb09..4adaf4f044 100644 --- a/pkg/reconciler/common/prune_test.go +++ b/pkg/reconciler/common/prune_test.go @@ -18,20 +18,36 @@ package common import ( "context" + "errors" "fmt" + "os" + "strings" "testing" "github.com/tektoncd/operator/pkg/apis/operator/v1alpha1" "gotest.tools/v3/assert" - batchv1 "k8s.io/api/batch/v1" - "k8s.io/api/batch/v1beta1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" ) +const ( + scheduleCommon = "*/2 * * * *" + scheduleUnique = "*/4 * * * *" +) + func TestGetPrunableNamespaces(t *testing.T) { - expected := []string{"ns-one", "ns-two"} + keep := uint(3) + anno1 := map[string]string{pruneSchedule: scheduleCommon, pruneResourcesPipelineNames: "pa, pb"} + anno2 := map[string]string{pruneSchedule: scheduleUnique} + defaultPrune := v1alpha1.Prune{ + Resources: []string{"something"}, + Keep: &keep, + Schedule: scheduleCommon, + } + expected1 := map[string]struct{}{"ns-one": struct{}{}, "ns-two": struct{}{}, "ns-three": struct{}{}} + expected2 := map[string]struct{}{"ns-four": struct{}{}} + client := fake.NewSimpleClientset( &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "openshift-api"}}, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "openshift-api-url"}}, @@ -39,57 +55,244 @@ func TestGetPrunableNamespaces(t *testing.T) { &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "kube-api"}}, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns-one"}}, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns-two"}}, + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns-three", Annotations: anno1}}, + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns-four", Annotations: anno2}}, ) - prunableNSList, err := GetPrunableNamespaces(client, context.TODO()) + pruningNamespaces, err := prunableNamespaces(client, context.TODO(), defaultPrune) if err != nil { assert.Error(t, err, "unable to get ns list") } - assert.Equal(t, fmt.Sprint(expected), fmt.Sprint(prunableNSList)) + assert.Equal(t, len(expected1), len(pruningNamespaces.commonScheduleNs)) + for ns := range pruningNamespaces.commonScheduleNs { + if _, ok := expected1[ns]; !ok { + assert.Error(t, errors.New("namespace not found"), ns) + } + } + assert.Equal(t, len(expected2), len(pruningNamespaces.uniqueScheduleNS)) + for ns := range pruningNamespaces.uniqueScheduleNS { + if _, ok := expected2[ns]; !ok { + assert.Error(t, errors.New("namespace not found"), ns) + } + } } -func TestCreateCronJob(t *testing.T) { - cronName := "resource-pruner" - resource := []string{"pipelinerun", "taskrun"} - cronJob := &v1beta1.CronJob{ - TypeMeta: metav1.TypeMeta{ - Kind: "CronJob", - APIVersion: "batch/v1beta1", - }, +func TestCompleteFlowPrune(t *testing.T) { + + keep := uint(3) + anno1 := map[string]string{pruneSchedule: scheduleCommon, pruneResourcesPipelineNames: "pa, pb"} + anno2 := map[string]string{pruneSchedule: scheduleUnique} + anno3 := map[string]string{pruneResourcesPipelineNames: "pv, pz"} + anno4 := map[string]string{pruneSchedule: scheduleCommon} + + defaultPrune := &v1alpha1.Prune{ + Resources: []string{"pipelinerun"}, + Keep: &keep, + Schedule: scheduleCommon, + } + config := &v1alpha1.TektonConfig{ ObjectMeta: metav1.ObjectMeta{ - Name: cronName, + Name: "config", }, - Spec: v1beta1.CronJobSpec{ - JobTemplate: v1beta1.JobTemplateSpec{ - Spec: batchv1.JobSpec{ - Template: corev1.PodTemplateSpec{}, - }, + Spec: v1alpha1.TektonConfigSpec{ + Profile: "all", + Pruner: *defaultPrune, + CommonSpec: v1alpha1.CommonSpec{TargetNamespace: "openshift-pipelines"}, + }, + } + client := fake.NewSimpleClientset( + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "openshift-api"}}, + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "openshift-api-url"}}, + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "kube-system"}}, + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "kube-api"}}, + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns-one"}}, + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns-two", Annotations: anno2}}, + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns-three", Annotations: anno1}}, + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns-four", Annotations: anno4}}, + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns-five", Annotations: anno3}}, + ) + os.Setenv(JobsTKNImageName, "some") + + err := Prune(client, context.TODO(), config) + if err != nil { + assert.Error(t, err, "unable to initiate prune") + } + cronjobs, err := client.BatchV1beta1().CronJobs("").List(context.TODO(), metav1.ListOptions{}) + if err != nil { + assert.Error(t, err, "unable to get cronjobs ") + } + // Only one ns with unique schedule than default + if len(cronjobs.Items) != 2 { + assert.Error(t, err, "number of cronjobs not correct") + } +} + +func TestPruneCommands(t *testing.T) { + keep := uint(2) + keepsince := uint(300) + expected := []string{ + "tkn pipelinerun delete --keep=2 -n=ns -f ; tkn taskrun delete --keep=2 -n=ns -f ; ", + "tkn pipelinerun delete --keep=2 -p=pipelineFoo -n=ns -f ; tkn pipelinerun delete --keep=2 -p=pipelineBar -n=ns -f ; tkn taskrun delete --keep=2 -n=ns -f ; ", + "tkn pipelinerun delete --keep=2 -p=pipelineFoo -n=ns -f ; tkn pipelinerun delete --keep=2 -p=pipelineBar -n=ns -f ; tkn taskrun delete --keep=2 -t=taskCat -n=ns -f ; tkn taskrun delete --keep=2 -t=taskDog -n=ns -f ; ", + "tkn pipelinerun delete --keep=2 -n=ns -f ; tkn taskrun delete --keep=2 -t=taskCat -n=ns -f ; tkn taskrun delete --keep=2 -t=taskDog -n=ns -f ; ", + "tkn pipelinerun delete --keep=2 -n=ns -f ; tkn taskrun delete --keep=2 -t=taskCat -n=ns -f ; tkn taskrun delete --keep=2 -t=taskDog -n=ns -f ; ", + "tkn pipelinerun delete --keep-since=300 -n=ns -f ; tkn taskrun delete --keep-since=300 -t=taskCat -n=ns -f ; tkn taskrun delete --keep-since=300 -t=taskDog -n=ns -f ; ", + } + ns := "ns" + configs := []*pruneConfigPerNS{ + { + config: v1alpha1.Prune{ + Resources: []string{"pipelinerun", "taskrun"}, + Keep: &keep, + KeepSince: nil, + Schedule: scheduleCommon, }, + relatedPipelineNames: nil, + relatedTaskNames: nil, + }, + { + config: v1alpha1.Prune{ + Resources: []string{"pipelinerun", "taskrun"}, + Keep: &keep, + KeepSince: nil, + Schedule: scheduleCommon, + }, + relatedPipelineNames: []string{"pipelineFoo", "pipelineBar"}, + relatedTaskNames: nil, + }, + { + config: v1alpha1.Prune{ + Resources: []string{"pipelinerun", "taskrun"}, + Keep: &keep, + KeepSince: nil, + Schedule: scheduleCommon, + }, + relatedPipelineNames: []string{"pipelineFoo", "pipelineBar"}, + relatedTaskNames: []string{"taskCat", "taskDog"}, + }, + { + config: v1alpha1.Prune{ + Resources: []string{"pipelinerun", "taskrun"}, + Keep: &keep, + KeepSince: nil, + Schedule: scheduleCommon, + }, + relatedPipelineNames: nil, + relatedTaskNames: []string{"taskCat", "taskDog"}, + }, + { + config: v1alpha1.Prune{ + Resources: []string{"pipelinerun", "taskrun"}, + Keep: &keep, + KeepSince: &keepsince, + Schedule: scheduleCommon, + }, + relatedPipelineNames: nil, + relatedTaskNames: []string{"taskCat", "taskDog"}, + }, + { + config: v1alpha1.Prune{ + Resources: []string{"pipelinerun", "taskrun"}, + Keep: nil, + KeepSince: &keepsince, + Schedule: scheduleCommon, + }, + relatedPipelineNames: nil, + relatedTaskNames: []string{"taskCat", "taskDog"}, }, } - nsObj1 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns-one"}} + for i, config := range configs { + cmd := pruneCommand(config, ns) + assert.Equal(t, cmd, expected[i]) + } +} - var keep = uint(2) - pru := v1alpha1.Prune{ - Resources: resource, +func TestAnnotationCmd(t *testing.T) { + keep := uint(3) + annoCommonSchedulePNames := map[string]string{pruneSchedule: scheduleCommon, pruneResourcesPipelineNames: "pa, pb"} + annoUniqueSchedule := map[string]string{pruneSchedule: scheduleUnique} + //no need to add preune resource- pr as its in default + annoPNames := map[string]string{pruneResourcesPipelineNames: "pipelineA, pipelineB"} + // prune resource overridden by annotatoins + annoTNames := map[string]string{pruneResourcesTaskNames: "taskA, taskB", pruneResources: "taskrun"} + // need to add resources as its overridden by annotations + annoTNamesPNames := map[string]string{pruneResourcesPipelineNames: "pipelineA, pipelineB", pruneResourcesTaskNames: "taskA, taskB", pruneResources: "taskrun, pipelinerun"} + annoCommonSchedule := map[string]string{pruneSchedule: scheduleCommon} + //keepsince should appear in cmd + annoKeepSince := map[string]string{pruneKeepSince: "3200"} + // only keep should appear in cmd + annoKeepSinceAndKeep := map[string]string{pruneKeepSince: "3200", pruneKeep: "5"} + annoResourceTr := map[string]string{pruneResources: "taskrun"} + annoResourceTrPr := map[string]string{pruneResources: "taskrun, pipelinerun"} + annoSkip := map[string]string{pruneSkip: "true"} + defaultPrune := &v1alpha1.Prune{ + Resources: []string{"pipelinerun"}, Keep: &keep, - Schedule: "*/5 * * * *", + Schedule: scheduleCommon, } - client := fake.NewSimpleClientset(cronJob, nsObj1) - nsList := []string{"ns-one"} - if err := createCronJob(client, context.TODO(), pru, nsList[0], nsList, metav1.OwnerReference{}, "some-image"); err != nil { - t.Error("failed creating cronjob") + config := &v1alpha1.TektonConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: "config", + }, + Spec: v1alpha1.TektonConfigSpec{ + Profile: "all", + Pruner: *defaultPrune, + CommonSpec: v1alpha1.CommonSpec{TargetNamespace: "openshift-pipelines"}, + }, } - cron, err := client.BatchV1beta1().CronJobs(nsList[0]).Get(context.TODO(), cronName, metav1.GetOptions{}) + client := fake.NewSimpleClientset( + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "openshift-api"}}, + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "openshift-api-url"}}, + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "kube-system"}}, + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "kube-api"}}, + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns-one"}}, + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns-two", Annotations: annoUniqueSchedule}}, + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns-three", Annotations: annoCommonSchedulePNames}}, + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns-four", Annotations: annoCommonSchedule}}, + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns-five", Annotations: annoPNames}}, + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns-six", Annotations: annoSkip}}, + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns-seven", Annotations: annoKeepSince}}, + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns-eight", Annotations: annoKeepSinceAndKeep}}, + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns-nine", Annotations: annoResourceTr}}, + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns-ten", Annotations: annoResourceTrPr}}, + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns-eleven", Annotations: annoTNames}}, + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns-twelve", Annotations: annoTNamesPNames}}, + ) + expected := map[string]string{ + "one-" + scheduleCommon: "tkn pipelinerun delete --keep=3 -n=ns-one -f ; ", + "two-" + scheduleUnique: "tkn pipelinerun delete --keep=3 -n=ns-two -f ; ", + "three-" + scheduleCommon: "tkn pipelinerun delete --keep=3 -p=pa -n=ns-three -f ; tkn pipelinerun delete --keep=3 -p=pb -n=ns-three -f ; ", + "four-" + scheduleCommon: "tkn pipelinerun delete --keep=3 -n=ns-four -f ; ", + "five-" + scheduleCommon: "tkn pipelinerun delete --keep=3 -p=pipelineA -n=ns-five -f ; tkn pipelinerun delete --keep=3 -p=pipelineB -n=ns-five -f ; ", + "seven-" + scheduleCommon: "tkn pipelinerun delete --keep-since=3200 -n=ns-seven -f ; ", + "eight-" + scheduleCommon: "tkn pipelinerun delete --keep=5 -n=ns-eight -f ; ", + "nine-" + scheduleCommon: "tkn taskrun delete --keep=3 -n=ns-nine -f ; ", + "ten-" + scheduleCommon: "tkn taskrun delete --keep=3 -n=ns-ten -f ; tkn pipelinerun delete --keep=3 -n=ns-ten -f ; ", + "eleven-" + scheduleCommon: "tkn taskrun delete --keep=3 -t=taskA -n=ns-eleven -f ; tkn taskrun delete --keep=3 -t=taskB -n=ns-eleven -f ; ", + "twelve-" + scheduleCommon: "tkn taskrun delete --keep=3 -t=taskA -n=ns-twelve -f ; tkn taskrun delete --keep=3 -t=taskB -n=ns-twelve -f ; tkn pipelinerun delete --keep=3 -p=pipelineA -n=ns-twelve -f ; tkn pipelinerun delete --keep=3 -p=pipelineB -n=ns-twelve -f ; ", + } + os.Setenv(JobsTKNImageName, "some") + + err := Prune(client, context.TODO(), config) if err != nil { - t.Error("failed getting cronjob") + assert.Error(t, err, "unable to get ns list") } - if cron.Name != cronName { - t.Error("cronjob not matched") + cronjobs, err := client.BatchV1beta1().CronJobs("").List(context.TODO(), metav1.ListOptions{}) + if err != nil { + assert.Error(t, err, "unable to get ns list") + } + // Only one ns with unique schedule than default + if len(cronjobs.Items) != 2 { + assert.Error(t, err, "unable to get ns list") } - jobName := "pruner-tkn-" + nsList[0] - nameAfterRemovingRand := cron.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Name[:len(jobName)] - if nameAfterRemovingRand != jobName { - t.Error("Job Name not matched") + for _, cronjob := range cronjobs.Items { + for _, container := range cronjob.Spec.JobTemplate.Spec.Template.Spec.Containers { + if _, ok := expected[container.Name[14:len(container.Name)-5]+cronjob.Spec.Schedule]; ok { + if expected[container.Name[14:len(container.Name)-5]+cronjob.Spec.Schedule] != strings.Join(container.Args, " ") { + msg := fmt.Sprintf("expected : %s\n actual : %s \n", expected[container.Name[14:len(container.Name)-5]+cronjob.Spec.Schedule], strings.Join(container.Args, " ")) + assert.Error(t, errors.New("command created is not as expected"), msg) + } + } + } } } diff --git a/pkg/reconciler/kubernetes/tektonconfig/controller.go b/pkg/reconciler/kubernetes/tektonconfig/controller.go index 88acfc6c8f..c56a4c5a23 100644 --- a/pkg/reconciler/kubernetes/tektonconfig/controller.go +++ b/pkg/reconciler/kubernetes/tektonconfig/controller.go @@ -18,7 +18,6 @@ package tektonconfig import ( "context" - "k8s.io/client-go/tools/cache" "github.com/tektoncd/operator/pkg/apis/operator/v1alpha1" @@ -31,7 +30,6 @@ import ( // NewController initializes the controller and is called by the generated code // Registers eventhandlers to enqueue events func NewController(ctx context.Context, cmw configmap.Watcher) *controller.Impl { - ctrl := tektonconfig.NewExtensibleController(KubernetesExtension)(ctx, cmw) tektonDashboardinformer.Get(ctx).Informer().AddEventHandler(cache.FilteringResourceEventHandler{ FilterFunc: controller.FilterControllerGVK(v1alpha1.SchemeGroupVersion.WithKind("TektonConfig")), diff --git a/pkg/reconciler/openshift/tektonconfig/controller.go b/pkg/reconciler/openshift/tektonconfig/controller.go index c667921d92..b6a445e65e 100644 --- a/pkg/reconciler/openshift/tektonconfig/controller.go +++ b/pkg/reconciler/openshift/tektonconfig/controller.go @@ -21,23 +21,16 @@ import ( "github.com/tektoncd/operator/pkg/apis/operator/v1alpha1" tektonAddoninformer "github.com/tektoncd/operator/pkg/client/injection/informers/operator/v1alpha1/tektonaddon" - "github.com/tektoncd/operator/pkg/reconciler/common" "github.com/tektoncd/operator/pkg/reconciler/shared/tektonconfig" - "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/cache" - namespaceinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/namespace" "knative.dev/pkg/configmap" "knative.dev/pkg/controller" - "knative.dev/pkg/kmeta" ) // NewController initializes the controller and is called by the generated code // Registers eventhandlers to enqueue events func NewController(ctx context.Context, cmw configmap.Watcher) *controller.Impl { ctrl := tektonconfig.NewExtensibleController(OpenShiftExtension)(ctx, cmw) - namespaceInformer := namespaceinformer.Get(ctx) - namespaceInformer.Informer().AddEventHandler(controller.HandleAll(enqueueCustomName(ctrl, common.ConfigResourceName))) - tektonAddoninformer.Get(ctx).Informer().AddEventHandler(cache.FilteringResourceEventHandler{ FilterFunc: controller.FilterControllerGVK(v1alpha1.SchemeGroupVersion.WithKind("TektonConfig")), Handler: controller.HandleAll(ctrl.EnqueueControllerOf), @@ -45,19 +38,3 @@ func NewController(ctx context.Context, cmw configmap.Watcher) *controller.Impl return ctrl } - -// enqueueCustomName adds an event with name `config` in work queue so that -// whenever a namespace event occurs, the TektonConfig reconciler get triggered. -// This is required because we want to get our TektonConfig reconciler triggered -// for already existing and new namespaces, without manual intervention like adding -// a label/annotation on namespace to make it manageable by Tekton controller. -// This will also filter the namespaces by regex `^(openshift|kube)-` -// and enqueue only when namespace doesn't match the regex -func enqueueCustomName(impl *controller.Impl, name string) func(obj interface{}) { - return func(obj interface{}) { - object, err := kmeta.DeletionHandlingAccessor(obj) - if err == nil && !nsRegex.MatchString(object.GetName()) { - impl.EnqueueKey(types.NamespacedName{Namespace: "", Name: name}) - } - } -} diff --git a/pkg/reconciler/shared/tektonconfig/controller.go b/pkg/reconciler/shared/tektonconfig/controller.go index 4aed1e80ec..254b84f4f1 100644 --- a/pkg/reconciler/shared/tektonconfig/controller.go +++ b/pkg/reconciler/shared/tektonconfig/controller.go @@ -18,6 +18,8 @@ package tektonconfig import ( "context" + "k8s.io/apimachinery/pkg/types" + namespaceinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/namespace" "os" "github.com/go-logr/zapr" @@ -47,6 +49,8 @@ func NewExtensibleController(generator common.ExtensionGenerator) injection.Cont tektonPipelineInformer := tektonPipelineinformer.Get(ctx) tektonTriggerInformer := tektonTriggerinformer.Get(ctx) deploymentInformer := deploymentinformer.Get(ctx) + namespaceInformer := namespaceinformer.Get(ctx) + kubeClient := kubeclient.Get(ctx) logger := logging.FromContext(ctx) @@ -87,6 +91,8 @@ func NewExtensibleController(generator common.ExtensionGenerator) injection.Cont Handler: controller.HandleAll(impl.EnqueueControllerOf), }) + namespaceInformer.Informer().AddEventHandler(controller.HandleAll(enqueueCustomName(impl, common.ConfigResourceName))) + if os.Getenv("AUTOINSTALL_COMPONENTS") == "true" { // try to ensure that there is an instance of tektonConfig newTektonConfig(operatorclient.Get(ctx), kubeclient.Get(ctx), manifest).ensureInstance(ctx) @@ -95,3 +101,14 @@ func NewExtensibleController(generator common.ExtensionGenerator) injection.Cont return impl } } + +// enqueueCustomName adds an event with name `config` in work queue so that +// whenever a namespace event occurs, the TektonConfig reconciler get triggered. +// This is required because we want to get our TektonConfig reconciler triggered +// for already existing and new namespaces, without manual intervention like adding +// a label/annotation on namespace to make it manageable by Tekton controller. +func enqueueCustomName(impl *controller.Impl, name string) func(obj interface{}) { + return func(obj interface{}) { + impl.EnqueueKey(types.NamespacedName{Namespace: "", Name: name}) + } +} diff --git a/third_party/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/third_party/github.com/hashicorp/golang-lru/simplelru/lru_interface.go index a0b97e3f77..92d70934d6 100644 --- a/third_party/github.com/hashicorp/golang-lru/simplelru/lru_interface.go +++ b/third_party/github.com/hashicorp/golang-lru/simplelru/lru_interface.go @@ -34,6 +34,6 @@ type LRUCache interface { // Clears all cache entries. Purge() - // Resizes cache, returning number evicted - Resize(int) int + // Resizes cache, returning number evicted + Resize(int) int }