From 356cde2aa3708f40809a79d155a5b2075f38977d Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Mon, 27 Feb 2023 15:05:21 +0100 Subject: [PATCH 01/15] e2e: inject static config manifests if present --- test/extended/apiserver/api_requests.go | 4 +- .../apiserver/graceful_termination.go | 2 +- test/extended/apiserver/kubeconfigs.go | 2 +- test/extended/authentication/front_proxy.go | 2 +- test/extended/authorization/authorization.go | 4 +- test/extended/baremetal/hosts.go | 6 +- test/extended/ci/job_names.go | 4 +- test/extended/cli/explain.go | 2 +- test/extended/cli/mustgather.go | 6 +- test/extended/cloud_controller_manager/ccm.go | 2 +- test/extended/cmd/cmd.go | 2 +- test/extended/csrapprover/csrapprover.go | 4 +- test/extended/etcd/etcd_test_runner.go | 2 +- test/extended/etcd/leader_changes.go | 2 +- test/extended/etcd/vertical_scaling.go | 2 +- test/extended/networking/egress_firewall.go | 2 +- test/extended/networking/egressip.go | 2 +- test/extended/oauth/client_secret.go | 2 +- test/extended/oauth/oauthcertfallback.go | 2 +- test/extended/oauth/well_known.go | 2 +- test/extended/olm/olm.go | 4 +- test/extended/operators/daemon_set.go | 2 +- test/extended/prometheus/prometheus.go | 2 +- test/extended/prometheus/prometheus_builds.go | 2 +- test/extended/router/grpc-interop.go | 2 +- test/extended/router/h2spec.go | 2 +- test/extended/router/headers.go | 2 +- test/extended/router/idle.go | 2 +- test/extended/router/metrics.go | 2 +- test/extended/security/fips.go | 2 +- test/extended/single_node/topology.go | 2 +- .../generated/zz_generated.annotations.go | 1652 ++--------------- test/extended/util/annotate/rules.go | 10 +- test/extended/util/client.go | 64 +- test/extended/util/configv1shim.go | 271 +++ test/extended/util/configv1shim_test.go | 63 + test/extended/util/framework.go | 73 + .../versioned/fake/clientset_generated.go | 76 + .../config/clientset/versioned/fake/doc.go | 4 + .../clientset/versioned/fake/register.go | 42 + .../versioned/typed/config/v1/fake/doc.go | 4 + .../typed/config/v1/fake/fake_apiserver.go | 163 ++ .../config/v1/fake/fake_authentication.go | 163 ++ .../typed/config/v1/fake/fake_build.go | 130 ++ .../config/v1/fake/fake_clusteroperator.go | 163 ++ .../config/v1/fake/fake_clusterversion.go | 163 ++ .../config/v1/fake/fake_config_client.go | 104 ++ .../typed/config/v1/fake/fake_console.go | 163 ++ .../typed/config/v1/fake/fake_dns.go | 163 ++ .../typed/config/v1/fake/fake_featuregate.go | 163 ++ .../typed/config/v1/fake/fake_image.go | 163 ++ .../config/v1/fake/fake_imagecontentpolicy.go | 130 ++ .../v1/fake/fake_imagedigestmirrorset.go | 163 ++ .../config/v1/fake/fake_imagetagmirrorset.go | 163 ++ .../config/v1/fake/fake_infrastructure.go | 163 ++ .../typed/config/v1/fake/fake_ingress.go | 163 ++ .../typed/config/v1/fake/fake_network.go | 163 ++ .../typed/config/v1/fake/fake_node.go | 163 ++ .../typed/config/v1/fake/fake_oauth.go | 163 ++ .../typed/config/v1/fake/fake_operatorhub.go | 163 ++ .../typed/config/v1/fake/fake_project.go | 163 ++ .../typed/config/v1/fake/fake_proxy.go | 163 ++ .../typed/config/v1/fake/fake_scheduler.go | 163 ++ .../typed/config/v1alpha1/fake/doc.go | 4 + .../v1alpha1/fake/fake_config_client.go | 24 + .../v1alpha1/fake/fake_insightsdatagather.go | 163 ++ vendor/modules.txt | 3 + 67 files changed, 4385 insertions(+), 1609 deletions(-) create mode 100644 test/extended/util/configv1shim.go create mode 100644 test/extended/util/configv1shim_test.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/fake/clientset_generated.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/fake/doc.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/fake/register.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/doc.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_apiserver.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_authentication.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_build.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_clusteroperator.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_clusterversion.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_config_client.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_console.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_dns.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_featuregate.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_image.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_imagecontentpolicy.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_imagedigestmirrorset.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_imagetagmirrorset.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_infrastructure.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_ingress.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_network.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_node.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_oauth.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_operatorhub.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_project.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_proxy.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_scheduler.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake/doc.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake/fake_config_client.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake/fake_insightsdatagather.go diff --git a/test/extended/apiserver/api_requests.go b/test/extended/apiserver/api_requests.go index 4c6264a43693..b4bc487fa7c7 100644 --- a/test/extended/apiserver/api_requests.go +++ b/test/extended/apiserver/api_requests.go @@ -25,7 +25,7 @@ var _ = g.Describe("[sig-arch][Late]", func() { oc := exutil.NewCLIWithoutNamespace("api-requests") - g.It("clients should not use APIs that are removed in upcoming releases [apigroup:config.openshift.io]", func() { + g.It("clients should not use APIs that are removed in upcoming releases [apigroup:apiserver.openshift.io]", func() { ctx := context.Background() apirequestCountClient, err := apiserverclientv1.NewForConfig(oc.AdminConfig()) o.Expect(err).NotTo(o.HaveOccurred()) @@ -91,7 +91,7 @@ var _ = g.Describe("[sig-arch][Late]", func() { } }) - g.It("operators should not create watch channels very often [apigroup:config.openshift.io]", func() { + g.It("operators should not create watch channels very often [apigroup:apiserver.openshift.io]", func() { ctx := context.Background() apirequestCountClient, err := apiserverclientv1.NewForConfig(oc.AdminConfig()) o.Expect(err).NotTo(o.HaveOccurred()) diff --git a/test/extended/apiserver/graceful_termination.go b/test/extended/apiserver/graceful_termination.go index baf25fe4ff38..209824b0f773 100644 --- a/test/extended/apiserver/graceful_termination.go +++ b/test/extended/apiserver/graceful_termination.go @@ -144,7 +144,7 @@ var _ = g.Describe("[sig-api-machinery][Feature:APIServer][Late]", func() { } }) - g.It("API LBs follow /readyz of kube-apiserver and stop sending requests before server shutdowns for external clients [apigroup:config.openshift.io]", func() { + g.It("API LBs follow /readyz of kube-apiserver and stop sending requests before server shutdowns for external clients", func() { controlPlaneTopology, err := exutil.GetControlPlaneTopology(oc) o.Expect(err).NotTo(o.HaveOccurred()) diff --git a/test/extended/apiserver/kubeconfigs.go b/test/extended/apiserver/kubeconfigs.go index fdd50d8898d9..7f17d8fad337 100644 --- a/test/extended/apiserver/kubeconfigs.go +++ b/test/extended/apiserver/kubeconfigs.go @@ -31,7 +31,7 @@ var _ = g.Describe("[Conformance][sig-api-machinery][Feature:APIServer] local ku "localhost-recovery.kubeconfig", } { kubeconfig := kc - g.It(fmt.Sprintf("%q should be present on all masters and work [apigroup:config.openshift.io]", kubeconfig), func() { + g.It(fmt.Sprintf("%q should be present on all masters and work", kubeconfig), func() { // external controlplane topology doesn't have master nodes controlPlaneTopology, err := exutil.GetControlPlaneTopology(oc) o.Expect(err).NotTo(o.HaveOccurred()) diff --git a/test/extended/authentication/front_proxy.go b/test/extended/authentication/front_proxy.go index 83787b669f9b..f9e376f228e0 100644 --- a/test/extended/authentication/front_proxy.go +++ b/test/extended/authentication/front_proxy.go @@ -26,7 +26,7 @@ var _ = g.Describe("[sig-auth][Feature:Authentication] ", func() { oc := exutil.NewCLI("project-api") g.Describe("TestFrontProxy", func() { - g.It(fmt.Sprintf("should succeed [apigroup:config.openshift.io]"), func() { + g.It(fmt.Sprintf("should succeed"), func() { controlPlaneTopology, err := exutil.GetControlPlaneTopology(oc) o.Expect(err).NotTo(o.HaveOccurred()) diff --git a/test/extended/authorization/authorization.go b/test/extended/authorization/authorization.go index 80c5d4f9a941..2d0de4a00013 100644 --- a/test/extended/authorization/authorization.go +++ b/test/extended/authorization/authorization.go @@ -233,7 +233,7 @@ func (test resourceAccessReviewTest) run() { failMessage := "" var err error - g.By("resourceAccessReviewTest - "+test.description+" [apigroup:config.openshift.io]", func() { + g.By("resourceAccessReviewTest - "+test.description, func() { // keep trying the test until you get a success or you timeout. Every time you have a failure, set the fail message // so that if you never have a success, we can call t.Errorf with a reasonable message // exiting the poll with `failMessage=""` indicates success. @@ -314,7 +314,7 @@ func (test localResourceAccessReviewTest) run() { failMessage := "" var err error - g.By("localResourceAccessReviewTest - "+test.description+" [apigroup:authorization.openshift.io][apigroup:config.openshift.io]", func() { + g.By("localResourceAccessReviewTest - "+test.description+" [apigroup:authorization.openshift.io]", func() { // keep trying the test until you get a success or you timeout. Every time you have a failure, set the fail message // so that if you never have a success, we can call t.Errorf with a reasonable message // exiting the poll with `failMessage=""` indicates success. diff --git a/test/extended/baremetal/hosts.go b/test/extended/baremetal/hosts.go index 9d98f5691561..5f26edf235b9 100644 --- a/test/extended/baremetal/hosts.go +++ b/test/extended/baremetal/hosts.go @@ -14,7 +14,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -var _ = g.Describe("[sig-installer][Feature:baremetal] Baremetal/OpenStack/vSphere/None/AWS/Azure/GCP platforms [apigroup:config.openshift.io]", func() { +var _ = g.Describe("[sig-installer][Feature:baremetal] Baremetal/OpenStack/vSphere/None/AWS/Azure/GCP platforms", func() { defer g.GinkgoRecover() var ( @@ -37,7 +37,7 @@ var _ = g.Describe("[sig-installer][Feature:baremetal] Baremetal/OpenStack/vSphe }) }) -var _ = g.Describe("[sig-installer][Feature:baremetal] Baremetal platform should [apigroup:config.openshift.io]", func() { +var _ = g.Describe("[sig-installer][Feature:baremetal] Baremetal platform should", func() { defer g.GinkgoRecover() oc := exutil.NewCLI("baremetal") @@ -147,7 +147,7 @@ var _ = g.Describe("[sig-installer][Feature:baremetal] Baremetal platform should // This block must be used for the serial tests. Any eventual extra worker deployed will be // automatically deleted during the AfterEach -var _ = g.Describe("[sig-installer][Feature:baremetal][Serial] Baremetal platform should [apigroup:config.openshift.io]", func() { +var _ = g.Describe("[sig-installer][Feature:baremetal][Serial] Baremetal platform should", func() { defer g.GinkgoRecover() var ( diff --git a/test/extended/ci/job_names.go b/test/extended/ci/job_names.go index 98887905dee0..e610bd013f5f 100644 --- a/test/extended/ci/job_names.go +++ b/test/extended/ci/job_names.go @@ -33,7 +33,7 @@ var _ = g.Describe("[sig-ci] [Early] prow job name", func() { } isPeriodic := strings.HasPrefix(jobName, "periodic-") - g.It("should match platform type [apigroup:config.openshift.io]", func() { + g.It("should match platform type", func() { if jobName == "" { e2eskipper.Skipf("JOB_NAME env var not set, skipping") } else if strings.Contains(jobName, "agnostic") { @@ -66,7 +66,7 @@ var _ = g.Describe("[sig-ci] [Early] prow job name", func() { }) - g.It("should match network type [apigroup:config.openshift.io]", func() { + g.It("should match network type", func() { if jobName == "" { e2eskipper.Skipf("JOB_NAME env var not set, skipping") } diff --git a/test/extended/cli/explain.go b/test/extended/cli/explain.go index a73a4b57bd96..f518cf0630d3 100644 --- a/test/extended/cli/explain.go +++ b/test/extended/cli/explain.go @@ -569,7 +569,7 @@ var _ = g.Describe("[sig-cli] oc explain", func() { }) } - g.It("should contain proper spec+status for CRDs [apigroup:config.openshift.io]", func() { + g.It("should contain proper spec+status for CRDs", func() { crdClient := apiextensionsclientset.NewForConfigOrDie(oc.AdminConfig()) crdTypesTest := getCrdTypes(oc) controlPlaneTopology, err := exutil.GetControlPlaneTopology(oc) diff --git a/test/extended/cli/mustgather.go b/test/extended/cli/mustgather.go index 8125ea7d7a59..0fa338bf742b 100644 --- a/test/extended/cli/mustgather.go +++ b/test/extended/cli/mustgather.go @@ -36,7 +36,7 @@ var _ = g.Describe("[sig-cli] oc adm must-gather", func() { o.Expect(err).NotTo(o.HaveOccurred()) }) - g.It("runs successfully [apigroup:config.openshift.io]", func() { + g.It("runs successfully", func() { controlPlaneTopology, err := exutil.GetControlPlaneTopology(oc) o.Expect(err).NotTo(o.HaveOccurred()) if *controlPlaneTopology == configv1.ExternalTopologyMode { @@ -101,7 +101,7 @@ var _ = g.Describe("[sig-cli] oc adm must-gather", func() { } }) - g.It("runs successfully with options [apigroup:config.openshift.io]", func() { + g.It("runs successfully with options", func() { controlPlaneTopology, err := exutil.GetControlPlaneTopology(oc) o.Expect(err).NotTo(o.HaveOccurred()) if *controlPlaneTopology == configv1.ExternalTopologyMode { @@ -301,7 +301,7 @@ var _ = g.Describe("[sig-cli] oc adm must-gather", func() { g.When("looking at the audit logs", func() { g.Describe("[sig-node] kubelet", func() { - g.It("runs apiserver processes strictly sequentially in order to not risk audit log corruption [apigroup:config.openshift.io]", func() { + g.It("runs apiserver processes strictly sequentially in order to not risk audit log corruption", func() { controlPlaneTopology, err := exutil.GetControlPlaneTopology(oc) o.Expect(err).NotTo(o.HaveOccurred()) diff --git a/test/extended/cloud_controller_manager/ccm.go b/test/extended/cloud_controller_manager/ccm.go index 74bb2e4acfeb..2e9e1d3b127b 100644 --- a/test/extended/cloud_controller_manager/ccm.go +++ b/test/extended/cloud_controller_manager/ccm.go @@ -20,7 +20,7 @@ var _ = g.Describe("[sig-cloud-provider][Feature:OpenShiftCloudControllerManager defer g.GinkgoRecover() oc := exutil.NewCLI("ccm") - g.It("Deploy an external cloud provider [apigroup:config.openshift.io][apigroup:machineconfiguration.openshift.io]", func() { + g.It("Deploy an external cloud provider [apigroup:machineconfiguration.openshift.io]", func() { infra, err := oc.AdminConfigClient().ConfigV1().Infrastructures().Get(context.Background(), "cluster", metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) diff --git a/test/extended/cmd/cmd.go b/test/extended/cmd/cmd.go index e45182126f4c..b26e4f271390 100644 --- a/test/extended/cmd/cmd.go +++ b/test/extended/cmd/cmd.go @@ -42,7 +42,7 @@ var _ = g.Describe("[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] te continue } - g.It("test/cmd/"+currFilename+" [apigroup:image.openshift.io][apigroup:config.openshift.io]", func() { + g.It("test/cmd/"+currFilename+" [apigroup:image.openshift.io]", func() { testsDir := exutil.FixturePath("testdata", "cmd", "test", "cmd") oc.AddExplicitResourceToDelete(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"}, "", "cmd-"+currFilename[0:len(currFilename)-3]) diff --git a/test/extended/csrapprover/csrapprover.go b/test/extended/csrapprover/csrapprover.go index 8be766fb2deb..c1b119e8f3f8 100644 --- a/test/extended/csrapprover/csrapprover.go +++ b/test/extended/csrapprover/csrapprover.go @@ -37,7 +37,7 @@ var _ = g.Describe("[sig-cluster-lifecycle]", func() { oc := exutil.NewCLIWithPodSecurityLevel("cluster-client-cert", admissionapi.LevelBaseline) defer g.GinkgoRecover() - g.It("Pods cannot access the /config/master API endpoint [apigroup:config.openshift.io]", func() { + g.It("Pods cannot access the /config/master API endpoint", func() { controlPlaneTopology, err := exutil.GetControlPlaneTopology(oc) o.Expect(err).NotTo(o.HaveOccurred()) @@ -73,7 +73,7 @@ var _ = g.Describe("[sig-cluster-lifecycle]", func() { o.Expect(curlOutput).To(o.ContainSubstring("Connection refused")) }) - g.It("CSRs from machines that are not recognized by the cloud provider are not approved [apigroup:config.openshift.io]", func() { + g.It("CSRs from machines that are not recognized by the cloud provider are not approved", func() { controlPlaneTopology, err := exutil.GetControlPlaneTopology(oc) o.Expect(err).NotTo(o.HaveOccurred()) diff --git a/test/extended/etcd/etcd_test_runner.go b/test/extended/etcd/etcd_test_runner.go index d6c3c90bb29b..439676b298f5 100644 --- a/test/extended/etcd/etcd_test_runner.go +++ b/test/extended/etcd/etcd_test_runner.go @@ -30,7 +30,7 @@ var _ = g.Describe("[sig-api-machinery] API data in etcd", func() { oc := exutil.NewCLIWithPodSecurityLevel("etcd-storage-path", psapi.LevelBaseline).AsAdmin() - _ = g.It("should be stored at the correct location and version for all resources [Serial][apigroup:config.openshift.io]", func() { + _ = g.It("should be stored at the correct location and version for all resources [Serial]", func() { controlPlaneTopology, err := exutil.GetControlPlaneTopology(oc) o.Expect(err).NotTo(o.HaveOccurred()) diff --git a/test/extended/etcd/leader_changes.go b/test/extended/etcd/leader_changes.go index c27b386bccce..7046b3586cff 100644 --- a/test/extended/etcd/leader_changes.go +++ b/test/extended/etcd/leader_changes.go @@ -24,7 +24,7 @@ var _ = g.Describe("[sig-etcd] etcd", func() { earlyTimeStamp = time.Now() }) - g.It("leader changes are not excessive [Late][apigroup:config.openshift.io]", func() { + g.It("leader changes are not excessive [Late]", func() { controlPlaneTopology, err := exutil.GetControlPlaneTopology(oc) o.Expect(err).NotTo(o.HaveOccurred()) if *controlPlaneTopology == configv1.ExternalTopologyMode { diff --git a/test/extended/etcd/vertical_scaling.go b/test/extended/etcd/vertical_scaling.go index da83d2c5dc42..3b9c31f4951b 100644 --- a/test/extended/etcd/vertical_scaling.go +++ b/test/extended/etcd/vertical_scaling.go @@ -18,7 +18,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" ) -var _ = g.Describe("[sig-etcd][Feature:EtcdVerticalScaling][Suite:openshift/etcd/scaling] etcd [apigroup:config.openshift.io]", func() { +var _ = g.Describe("[sig-etcd][Feature:EtcdVerticalScaling][Suite:openshift/etcd/scaling] etcd", func() { defer g.GinkgoRecover() oc := exutil.NewCLIWithoutNamespace("etcd-scaling").AsAdmin() diff --git a/test/extended/networking/egress_firewall.go b/test/extended/networking/egress_firewall.go index 200691eee969..eb5aac600316 100644 --- a/test/extended/networking/egress_firewall.go +++ b/test/extended/networking/egress_firewall.go @@ -50,7 +50,7 @@ var _ = g.Describe("[sig-network][Feature:EgressFirewall]", func() { ) noegFwoc := exutil.NewCLIWithPodSecurityLevel(noEgressFWE2E, admissionapi.LevelBaseline) noegFwf := noegFwoc.KubeFramework() - g.It("egressFirewall should have no impact outside its namespace [apigroup:config.openshift.io]", func() { + g.It("egressFirewall should have no impact outside its namespace", func() { g.By("creating test pod") pod := "dummy" o.Expect(createTestEgressFw(noegFwf, pod)).To(o.Succeed()) diff --git a/test/extended/networking/egressip.go b/test/extended/networking/egressip.go index eaa3b169a2d8..d0e20af1c2f8 100644 --- a/test/extended/networking/egressip.go +++ b/test/extended/networking/egressip.go @@ -43,7 +43,7 @@ const ( egressUpdateTimeout = 180 ) -var _ = g.Describe("[sig-network][Feature:EgressIP][apigroup:config.openshift.io]", func() { +var _ = g.Describe("[sig-network][Feature:EgressIP][apigroup:operator.openshift.io]", func() { oc := exutil.NewCLIWithPodSecurityLevel(namespacePrefix, admissionapi.LevelPrivileged) portAllocator := NewPortAllocator(egressIPTargetHostPortMin, egressIPTargetHostPortMax) diff --git a/test/extended/oauth/client_secret.go b/test/extended/oauth/client_secret.go index b82f1455b65d..c51cd49c7823 100644 --- a/test/extended/oauth/client_secret.go +++ b/test/extended/oauth/client_secret.go @@ -31,7 +31,7 @@ var _ = g.Describe("[sig-auth][Feature:OAuthServer]", func() { ctx := context.Background() g.Describe("ClientSecretWithPlus", func() { - g.It(fmt.Sprintf("should create oauthclient [apigroup:config.openshift.io][apigroup:oauth.openshift.io][apigroup:user.openshift.io]"), func() { + g.It(fmt.Sprintf("should create oauthclient [apigroup:oauth.openshift.io][apigroup:user.openshift.io]"), func() { controlPlaneTopology, err := exutil.GetControlPlaneTopology(oc) o.Expect(err).NotTo(o.HaveOccurred()) diff --git a/test/extended/oauth/oauthcertfallback.go b/test/extended/oauth/oauthcertfallback.go index 73583edc4470..4c99f705e3be 100644 --- a/test/extended/oauth/oauthcertfallback.go +++ b/test/extended/oauth/oauthcertfallback.go @@ -34,7 +34,7 @@ var _ = g.Describe("[sig-auth][Feature:OAuthServer] OAuth server", func() { defer g.GinkgoRecover() oc := exutil.NewCLI("oauth") - g.It("has the correct token and certificate fallback semantics [apigroup:config.openshift.io][apigroup:user.openshift.io]", func() { + g.It("has the correct token and certificate fallback semantics [apigroup:user.openshift.io]", func() { controlPlaneTopology, err := exutil.GetControlPlaneTopology(oc) o.Expect(err).NotTo(o.HaveOccurred()) diff --git a/test/extended/oauth/well_known.go b/test/extended/oauth/well_known.go index 872e7cd04603..857e7f9096c8 100644 --- a/test/extended/oauth/well_known.go +++ b/test/extended/oauth/well_known.go @@ -27,7 +27,7 @@ var _ = g.Describe("[sig-auth][Feature:OAuthServer] well-known endpoint", func() oauthNamespace = "openshift-authentication" ) - g.It("should be reachable [apigroup:config.openshift.io][apigroup:route.openshift.io]", func() { + g.It("should be reachable [apigroup:route.openshift.io]", func() { metadataJSON, err := oc.Run("get").Args("--raw", "/.well-known/oauth-authorization-server").Output() o.Expect(err).NotTo(o.HaveOccurred()) diff --git a/test/extended/olm/olm.go b/test/extended/olm/olm.go index f31ff38134db..43f5af9bf778 100644 --- a/test/extended/olm/olm.go +++ b/test/extended/olm/olm.go @@ -86,7 +86,7 @@ var _ = g.Describe("[sig-operator] OLM should", func() { // OCP-24061 - [bz 1685230] OLM operator should use imagePullPolicy: IfNotPresent // author: bandrade@redhat.com - g.It("have imagePullPolicy:IfNotPresent on thier deployments [apigroup:config.openshift.io]", func() { + g.It("have imagePullPolicy:IfNotPresent on thier deployments", func() { oc := oc.AsAdmin().WithoutNamespace() namespace := "openshift-operator-lifecycle-manager" @@ -142,7 +142,7 @@ var _ = g.Describe("[sig-arch] ocp payload should be based on existing source", // TODO: This test should be more generic and across components // OCP-20981, [BZ 1626434]The olm/catalog binary should output the exact version info // author: jiazha@redhat.com - g.It("OLM version should contain the source commit id [apigroup:config.openshift.io]", func() { + g.It("OLM version should contain the source commit id", func() { oc := oc namespace := "openshift-operator-lifecycle-manager" diff --git a/test/extended/operators/daemon_set.go b/test/extended/operators/daemon_set.go index b5d9b7ae1a79..f6ed034f022f 100644 --- a/test/extended/operators/daemon_set.go +++ b/test/extended/operators/daemon_set.go @@ -41,7 +41,7 @@ var _ = g.Describe("[sig-arch] Managed cluster", func() { // during an upgrade as new pods start and then establish connections. // // Currently only applies to daemonsets that don't explicitly target the control plane. - g.It("should only include cluster daemonsets that have maxUnavailable update of 10 or 33 percent [apigroup:config.openshift.io]", func() { + g.It("should only include cluster daemonsets that have maxUnavailable update of 10 or 33 percent", func() { // iterate over the references to find valid images daemonSets, err := oc.KubeFramework().ClientSet.AppsV1().DaemonSets("").List(context.Background(), metav1.ListOptions{}) if err != nil { diff --git a/test/extended/prometheus/prometheus.go b/test/extended/prometheus/prometheus.go index 612c20afe40f..cd511dba8447 100644 --- a/test/extended/prometheus/prometheus.go +++ b/test/extended/prometheus/prometheus.go @@ -203,7 +203,7 @@ var _ = g.Describe("[sig-instrumentation][Late] Alerts", func() { oc = exutil.NewCLIWithoutNamespace("prometheus") ) - g.It("shouldn't report any unexpected alerts in firing or pending state [apigroup:config.openshift.io]", func() { + g.It("shouldn't report any unexpected alerts in firing or pending state", func() { // we only consider samples since the beginning of the test testDuration := exutil.DurationSinceStartInSeconds() alerts.CheckAlerts(alerts.AllowedAlertsDuringConformance, oc.AdminConfig(), oc.NewPrometheusClient(context.TODO()), oc.AdminConfigClient(), testDuration, nil) diff --git a/test/extended/prometheus/prometheus_builds.go b/test/extended/prometheus/prometheus_builds.go index f90e3b24a6ec..85f859da9cec 100644 --- a/test/extended/prometheus/prometheus_builds.go +++ b/test/extended/prometheus/prometheus_builds.go @@ -31,7 +31,7 @@ var _ = g.Describe("[sig-instrumentation][sig-builds][Feature:Builds] Prometheus }) g.Describe("when installed on the cluster", func() { - g.It("should start and expose a secured proxy and verify build metrics [apigroup:config.openshift.io][apigroup:build.openshift.io]", func() { + g.It("should start and expose a secured proxy and verify build metrics [apigroup:build.openshift.io]", func() { controlPlaneTopology, infra_err := exutil.GetControlPlaneTopology(oc) o.Expect(infra_err).NotTo(o.HaveOccurred()) diff --git a/test/extended/router/grpc-interop.go b/test/extended/router/grpc-interop.go index 86922372124a..e83863006fb5 100644 --- a/test/extended/router/grpc-interop.go +++ b/test/extended/router/grpc-interop.go @@ -40,7 +40,7 @@ var _ = g.Describe("[sig-network-edge][Conformance][Area:Networking][Feature:Rou }) g.Describe("The HAProxy router", func() { - g.It("should pass the gRPC interoperability tests [apigroup:config.openshift.io][apigroup:route.openshift.io][apigroup:operator.openshift.io]", func() { + g.It("should pass the gRPC interoperability tests [apigroup:route.openshift.io][apigroup:operator.openshift.io]", func() { isProxyJob, err := exutil.IsClusterProxyEnabled(oc) o.Expect(err).NotTo(o.HaveOccurred(), "failed to get proxy configuration") if isProxyJob { diff --git a/test/extended/router/h2spec.go b/test/extended/router/h2spec.go index 18759cb24950..3c29b8f58b7c 100644 --- a/test/extended/router/h2spec.go +++ b/test/extended/router/h2spec.go @@ -57,7 +57,7 @@ var _ = g.Describe("[sig-network-edge][Conformance][Area:Networking][Feature:Rou }) g.Describe("The HAProxy router", func() { - g.It("should pass the h2spec conformance tests [apigroup:config.openshift.io][apigroup:authorization.openshift.io][apigroup:user.openshift.io][apigroup:security.openshift.io][apigroup:operator.openshift.io]", func() { + g.It("should pass the h2spec conformance tests [apigroup:authorization.openshift.io][apigroup:user.openshift.io][apigroup:security.openshift.io][apigroup:operator.openshift.io]", func() { isProxyJob, err := exutil.IsClusterProxyEnabled(oc) o.Expect(err).NotTo(o.HaveOccurred(), "failed to get proxy configuration") if isProxyJob { diff --git a/test/extended/router/headers.go b/test/extended/router/headers.go index 9d03e21da06c..8450cecb2255 100644 --- a/test/extended/router/headers.go +++ b/test/extended/router/headers.go @@ -24,7 +24,7 @@ import ( exutil "github.com/openshift/origin/test/extended/util" ) -var _ = g.Describe("[sig-network][Feature:Router][apigroup:config.openshift.io][apigroup:operator.openshift.io]", func() { +var _ = g.Describe("[sig-network][Feature:Router][apigroup:operator.openshift.io]", func() { defer g.GinkgoRecover() var ( configPath = exutil.FixturePath("testdata", "router", "router-http-echo-server.yaml") diff --git a/test/extended/router/idle.go b/test/extended/router/idle.go index 096f33a7af14..6375d3cc28ef 100644 --- a/test/extended/router/idle.go +++ b/test/extended/router/idle.go @@ -45,7 +45,7 @@ var _ = g.Describe("[sig-network-edge][Conformance][Area:Networking][Feature:Rou }) g.Describe("The HAProxy router", func() { - g.It("should be able to connect to a service that is idled because a GET on the route will unidle it [apigroup:config.openshift.io]", func() { + g.It("should be able to connect to a service that is idled because a GET on the route will unidle it", func() { network, err := oc.AdminConfigClient().ConfigV1().Networks().Get(context.Background(), "cluster", metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred(), "failed to get cluster network configuration") if !(network.Status.NetworkType == "OVNKubernetes" || network.Status.NetworkType == "OpenShiftSDN") { diff --git a/test/extended/router/metrics.go b/test/extended/router/metrics.go index 364634aaf638..453f1b724f80 100644 --- a/test/extended/router/metrics.go +++ b/test/extended/router/metrics.go @@ -37,7 +37,7 @@ import ( routev1client "github.com/openshift/client-go/route/clientset/versioned/typed/route/v1" ) -var _ = g.Describe("[sig-network][Feature:Router][apigroup:config.openshift.io]", func() { +var _ = g.Describe("[sig-network][Feature:Router]", func() { defer g.GinkgoRecover() var ( oc = exutil.NewCLIWithPodSecurityLevel("router-metrics", admissionapi.LevelBaseline) diff --git a/test/extended/security/fips.go b/test/extended/security/fips.go index 6a3c5976713d..973354101097 100644 --- a/test/extended/security/fips.go +++ b/test/extended/security/fips.go @@ -52,7 +52,7 @@ var _ = g.Describe("[sig-arch] [Conformance] FIPS", func() { defer g.GinkgoRecover() oc := exutil.NewCLIWithPodSecurityLevel("fips", admissionapi.LevelPrivileged) - g.It("TestFIPS [apigroup:config.openshift.io]", func() { + g.It("TestFIPS", func() { controlPlaneTopology, err := exutil.GetControlPlaneTopology(oc) o.Expect(err).NotTo(o.HaveOccurred()) clusterAdminKubeClientset := oc.AdminKubeClient() diff --git a/test/extended/single_node/topology.go b/test/extended/single_node/topology.go index 98643e0a8271..a7a6a4f9f75a 100644 --- a/test/extended/single_node/topology.go +++ b/test/extended/single_node/topology.go @@ -128,7 +128,7 @@ func validateDeploymentReplicas(deployment appsv1.Deployment, validateReplicas(deployment.Name, deployment.Namespace, int(*deployment.Spec.Replicas)) } -var _ = ginkgo.Describe("[sig-arch] Cluster topology single node tests [apigroup:config.openshift.io]", func() { +var _ = ginkgo.Describe("[sig-arch] Cluster topology single node tests", func() { f := framework.NewDefaultFramework("single-node") ginkgo.It("Verify that OpenShift components deploy one replica in SingleReplica topology mode", func() { diff --git a/test/extended/util/annotate/generated/zz_generated.annotations.go b/test/extended/util/annotate/generated/zz_generated.annotations.go index ab12137b7c7d..649208ab423b 100644 --- a/test/extended/util/annotate/generated/zz_generated.annotations.go +++ b/test/extended/util/annotate/generated/zz_generated.annotations.go @@ -7,28 +7,12 @@ import ( ) var Annotations = map[string]string{ - "[Conformance][sig-api-machinery][Feature:APIServer] local kubeconfig \"lb-ext.kubeconfig\" should be present on all masters and work [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel/minimal]", - - "[Conformance][sig-api-machinery][Feature:APIServer] local kubeconfig \"lb-int.kubeconfig\" should be present on all masters and work [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel/minimal]", - - "[Conformance][sig-api-machinery][Feature:APIServer] local kubeconfig \"localhost-recovery.kubeconfig\" should be present on all masters and work [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel/minimal]", - - "[Conformance][sig-api-machinery][Feature:APIServer] local kubeconfig \"localhost.kubeconfig\" should be present on all masters and work [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel/minimal]", - - "[Conformance][sig-sno][Serial] Cluster should allow a fast rollout of kube-apiserver with no pods restarts during API disruption [apigroup:config.openshift.io][apigroup:operator.openshift.io]": " [Suite:openshift/conformance/serial/minimal]", - - "[Serial] [sig-auth][Feature:OAuthServer] [RequestHeaders] [IdP] test RequestHeaders IdP [apigroup:config.openshift.io][apigroup:user.openshift.io][apigroup:apps.openshift.io]": " [Suite:openshift/conformance/serial]", - - "[sig-api-machinery] API data in etcd should be stored at the correct location and version for all resources [Serial][apigroup:config.openshift.io]": " [Suite:openshift/conformance/serial]", - "[sig-api-machinery] API priority and fairness should ensure that requests can be classified by adding FlowSchema and PriorityLevelConfiguration": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-api-machinery] API priority and fairness should ensure that requests can't be drowned out (fairness)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-api-machinery] API priority and fairness should ensure that requests can't be drowned out (priority)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-api-machinery] APIServer CR fields validation additionalCORSAllowedOrigins [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", - "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] listing mutating webhooks should work [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] listing validating webhooks should work [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", @@ -275,62 +259,6 @@ var Annotations = map[string]string{ "[sig-api-machinery] server version should find the server version [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - "[sig-api-machinery][Feature:APIServer] TestTLSDefaults": " [Suite:openshift/conformance/parallel]", - - "[sig-api-machinery][Feature:APIServer] anonymous browsers should get a 403 from /": " [Suite:openshift/conformance/parallel]", - - "[sig-api-machinery][Feature:APIServer] authenticated browser should get a 200 from /": " [Suite:openshift/conformance/parallel]", - - "[sig-api-machinery][Feature:APIServer][Late] API LBs follow /readyz of kube-apiserver and don't send request early": " [Suite:openshift/conformance/parallel]", - - "[sig-api-machinery][Feature:APIServer][Late] API LBs follow /readyz of kube-apiserver and stop sending requests before server shutdowns for external clients [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-api-machinery][Feature:APIServer][Late] API LBs follow /readyz of kube-apiserver and stop sending requests": " [Suite:openshift/conformance/parallel]", - - "[sig-api-machinery][Feature:APIServer][Late] kube-apiserver terminates within graceful termination period": " [Suite:openshift/conformance/parallel]", - - "[sig-api-machinery][Feature:APIServer][Late] kubelet terminates kube-apiserver gracefully": " [Suite:openshift/conformance/parallel]", - - "[sig-api-machinery][Feature:Audit] Basic audit should audit API calls": " [Disabled:SpecialConfig]", - - "[sig-api-machinery][Feature:ClusterResourceQuota] Cluster resource quota should control resource limits across namespaces [apigroup:quota.openshift.io][apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-api-machinery][Feature:ResourceQuota] Object count should properly count the number of imagestreams resources [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for apps.openshift.io/v1, Resource=deploymentconfigs [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for build.openshift.io/v1, Resource=buildconfigs [apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for build.openshift.io/v1, Resource=builds [apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for image.openshift.io/v1, Resource=images [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for image.openshift.io/v1, Resource=imagestreams [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for oauth.openshift.io/v1, Resource=oauthaccesstokens [apigroup:oauth.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for oauth.openshift.io/v1, Resource=oauthauthorizetokens [apigroup:oauth.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for oauth.openshift.io/v1, Resource=oauthclientauthorizations [apigroup:oauth.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for oauth.openshift.io/v1, Resource=oauthclients [apigroup:oauth.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for route.openshift.io/v1, Resource=routes [apigroup:route.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for security.openshift.io/v1, Resource=rangeallocations [apigroup:security.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for template.openshift.io/v1, Resource=brokertemplateinstances [apigroup:template.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for template.openshift.io/v1, Resource=templateinstances [apigroup:template.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for template.openshift.io/v1, Resource=templates [apigroup:template.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for user.openshift.io/v1, Resource=groups [apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for user.openshift.io/v1, Resource=identities [apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for user.openshift.io/v1, Resource=users [apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", - "[sig-apps] ControllerRevision [Serial] should manage the lifecycle of a ControllerRevision [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", "[sig-apps] CronJob should be able to schedule after more than 100 missed schedule": " [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -547,132 +475,6 @@ var Annotations = map[string]string{ "[sig-apps] TTLAfterFinished job should be deleted once it finishes after TTL seconds": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs adoption will orphan all RCs and adopt them back when recreated [apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs generation should deploy based on a status version bump [apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs ignores deployer and lets the config with a NewReplicationControllerCreated reason should let the deployment config with a NewReplicationControllerCreated reason [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs initially should not deploy if pods never transition to ready [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs keep the deployer pod invariant valid should deal with cancellation after deployer pod succeeded [apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs keep the deployer pod invariant valid should deal with cancellation of running deployment [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs keep the deployer pod invariant valid should deal with config change in case the deployment is still running [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs paused should disable actions on deployments [apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs reaper [Slow] should delete all failed deployer pods and hook pods [apigroup:apps.openshift.io]": "", - - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs rolled back should rollback to an older deployment [apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs should adhere to Three Laws of Controllers [apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs should respect image stream tag reference policy resolve the image pull spec [apigroup:apps.openshift.io][apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs viewing rollout history should print the rollout history [apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs when changing image change trigger should successfully trigger from an updated image [apigroup:apps.openshift.io][apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs when run iteratively should immediately start a new deployment [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs when run iteratively should only deploy the last deployment [apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs when tagging images should successfully tag the deployed image [apigroup:apps.openshift.io][apigroup:authorization.openshift.io][apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs with custom deployments should run the custom deployment steps [apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs with enhanced status should include various info in status [apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs with env in params referencing the configmap should expand the config map key to a value [apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs with failing hook should get all logs from retried hooks [apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs with minimum ready seconds set should not transition the deployment to Complete before satisfied [apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs with multiple image change triggers should run a successful deployment with a trigger used by different containers [apigroup:apps.openshift.io][apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs with multiple image change triggers should run a successful deployment with multiple triggers [apigroup:apps.openshift.io][apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs with revision history limits should never persist more old deployments than acceptable after being observed by the controller [apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs with test deployments should run a deployment to completion and then scale to zero [apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:DeploymentConfig] deploymentconfigs won't deploy RC with unresolved images when patched with empty image [apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:Jobs] Users should be able to create and run a job in a user project": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:OpenShiftControllerManager] TestDeployScale [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:OpenShiftControllerManager] TestDeploymentConfigDefaults [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:OpenShiftControllerManager] TestTriggers_MultipleICTs [apigroup:apps.openshift.io][apigroup:images.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:OpenShiftControllerManager] TestTriggers_configChange [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:OpenShiftControllerManager] TestTriggers_imageChange [apigroup:apps.openshift.io][apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:OpenShiftControllerManager] TestTriggers_imageChange_nonAutomatic [apigroup:image.openshift.io][apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-apps][Feature:OpenShiftControllerManager] TestTriggers_manual [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-arch] Cluster topology single node tests [apigroup:config.openshift.io] Verify that OpenShift components deploy one replica in SingleReplica topology mode": " [Suite:openshift/conformance/parallel]", - - "[sig-arch] ClusterOperators [apigroup:config.openshift.io] should define at least one namespace in their lists of related objects": " [Suite:openshift/conformance/parallel]", - - "[sig-arch] ClusterOperators [apigroup:config.openshift.io] should define at least one related object that is not a namespace": " [Suite:openshift/conformance/parallel]", - - "[sig-arch] ClusterOperators [apigroup:config.openshift.io] should define valid related objects": " [Suite:openshift/conformance/parallel]", - - "[sig-arch] Managed cluster should ensure control plane operators do not make themselves unevictable": " [Suite:openshift/conformance/parallel]", - - "[sig-arch] Managed cluster should ensure control plane pods do not run in best-effort QoS": " [Suite:openshift/conformance/parallel]", - - "[sig-arch] Managed cluster should ensure platform components have system-* priority class associated": " [Suite:openshift/conformance/parallel]", - - "[sig-arch] Managed cluster should ensure pods use downstream images from our release image with proper ImagePullPolicy [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-arch] Managed cluster should expose cluster services outside the cluster [apigroup:route.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-arch] Managed cluster should have operators on the cluster version [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-arch] Managed cluster should only include cluster daemonsets that have maxUnavailable update of 10 or 33 percent [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-arch] Managed cluster should recover when operator-owned objects are deleted [Disruptive][apigroup:config.openshift.io]": " [Serial]", - - "[sig-arch] Managed cluster should set requests but not limits": " [Suite:openshift/conformance/parallel]", - - "[sig-arch] [Conformance] FIPS TestFIPS [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel/minimal]", - - "[sig-arch] [Conformance] sysctl pod should not start for sysctl not on whitelist kernel.msgmax": " [Suite:openshift/conformance/parallel/minimal]", - - "[sig-arch] [Conformance] sysctl pod should not start for sysctl not on whitelist net.ipv4.ip_dynaddr": " [Suite:openshift/conformance/parallel/minimal]", - - "[sig-arch] [Conformance] sysctl whitelists kernel.shm_rmid_forced": " [Suite:openshift/conformance/parallel/minimal]", - - "[sig-arch] [Conformance] sysctl whitelists net.ipv4.ip_local_port_range": " [Suite:openshift/conformance/parallel/minimal]", - - "[sig-arch] [Conformance] sysctl whitelists net.ipv4.ip_unprivileged_port_start": " [Suite:openshift/conformance/parallel/minimal]", - - "[sig-arch] [Conformance] sysctl whitelists net.ipv4.ping_group_range": " [Suite:openshift/conformance/parallel/minimal]", - - "[sig-arch] [Conformance] sysctl whitelists net.ipv4.tcp_syncookies": " [Suite:openshift/conformance/parallel/minimal]", - - "[sig-arch] ocp payload should be based on existing source OLM version should contain the source commit id [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-arch][Early] CRDs for openshift.io should have a status in the CRD schema": " [Suite:openshift/conformance/parallel]", - - "[sig-arch][Early] CRDs for openshift.io should have subresource.status": " [Suite:openshift/conformance/parallel]", - - "[sig-arch][Early] Managed cluster should [apigroup:config.openshift.io] start all core operators": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-arch][Feature:ClusterUpgrade] Cluster should remain functional during upgrade [Disruptive]": " [Serial]", - - "[sig-arch][Late] clients should not use APIs that are removed in upcoming releases [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-arch][Late] operators should not create watch channels very often [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", - "[sig-auth] Certificates API [Privileged:ClusterAdmin] should support CSR API operations [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", "[sig-auth] Certificates API [Privileged:ClusterAdmin] should support building a client with a CSR": " [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -717,138 +519,6 @@ var Annotations = map[string]string{ "[sig-auth] [Feature:NodeAuthorizer] Getting an existing secret should exit with the Forbidden error": " [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-auth][Feature:Authentication] TestFrontProxy should succeed [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:BootstrapUser] The bootstrap user should successfully login with password decoded from kubeadmin secret [Disruptive]": " [Serial]", - - "[sig-auth][Feature:HTPasswdAuth] HTPasswd IDP should successfully configure htpasswd and be responsive [apigroup:user.openshift.io][apigroup:route.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:LDAP] LDAP IDP should authenticate against an ldap server [apigroup:user.openshift.io][apigroup:route.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:LDAP] LDAP should start an OpenLDAP test server [apigroup:user.openshift.io][apigroup:security.openshift.io][apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:LDAP][Serial] ldap group sync can sync groups from ldap [apigroup:user.openshift.io][apigroup:authorization.openshift.io][apigroup:security.openshift.io]": " [Suite:openshift/conformance/serial]", - - "[sig-auth][Feature:OAuthServer] ClientSecretWithPlus should create oauthclient [apigroup:config.openshift.io][apigroup:oauth.openshift.io][apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OAuthServer] OAuth Authenticator accepts sha256 access tokens [apigroup:user.openshift.io][apigroup:oauth.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OAuthServer] OAuth server [apigroup:auth.openshift.io] should use http1.1 only to prevent http2 connection reuse": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OAuthServer] OAuth server has the correct token and certificate fallback semantics [apigroup:config.openshift.io][apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OAuthServer] [Headers][apigroup:route.openshift.io][apigroup:config.openshift.io][apigroup:oauth.openshift.io] expected headers returned from the authorize URL": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OAuthServer] [Headers][apigroup:route.openshift.io][apigroup:config.openshift.io][apigroup:oauth.openshift.io] expected headers returned from the grant URL": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OAuthServer] [Headers][apigroup:route.openshift.io][apigroup:config.openshift.io][apigroup:oauth.openshift.io] expected headers returned from the login URL for the allow all IDP": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OAuthServer] [Headers][apigroup:route.openshift.io][apigroup:config.openshift.io][apigroup:oauth.openshift.io] expected headers returned from the login URL for the bootstrap IDP": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OAuthServer] [Headers][apigroup:route.openshift.io][apigroup:config.openshift.io][apigroup:oauth.openshift.io] expected headers returned from the login URL for when there is only one IDP": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OAuthServer] [Headers][apigroup:route.openshift.io][apigroup:config.openshift.io][apigroup:oauth.openshift.io] expected headers returned from the logout URL": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OAuthServer] [Headers][apigroup:route.openshift.io][apigroup:config.openshift.io][apigroup:oauth.openshift.io] expected headers returned from the root URL": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OAuthServer] [Headers][apigroup:route.openshift.io][apigroup:config.openshift.io][apigroup:oauth.openshift.io] expected headers returned from the token URL": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OAuthServer] [Headers][apigroup:route.openshift.io][apigroup:config.openshift.io][apigroup:oauth.openshift.io] expected headers returned from the token request URL": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OAuthServer] [Token Expiration] Using a OAuth client with a non-default token max age [apigroup:oauth.openshift.io] to generate tokens that do not expire works as expected when using a code authorization flow [apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OAuthServer] [Token Expiration] Using a OAuth client with a non-default token max age [apigroup:oauth.openshift.io] to generate tokens that do not expire works as expected when using a token authorization flow [apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OAuthServer] [Token Expiration] Using a OAuth client with a non-default token max age [apigroup:oauth.openshift.io] to generate tokens that expire shortly works as expected when using a code authorization flow [apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OAuthServer] [Token Expiration] Using a OAuth client with a non-default token max age [apigroup:oauth.openshift.io] to generate tokens that expire shortly works as expected when using a token authorization flow [apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OAuthServer] well-known endpoint should be reachable [apigroup:config.openshift.io][apigroup:route.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OpenShiftAuthorization] RBAC proxy for openshift authz RunLegacyClusterRoleBindingEndpoint should succeed [apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OpenShiftAuthorization] RBAC proxy for openshift authz RunLegacyClusterRoleEndpoint should succeed [apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OpenShiftAuthorization] RBAC proxy for openshift authz RunLegacyEndpointConfirmNoEscalation [apigroup:authorization.openshift.io] should succeed": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OpenShiftAuthorization] RBAC proxy for openshift authz RunLegacyLocalRoleBindingEndpoint should succeed [apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OpenShiftAuthorization] RBAC proxy for openshift authz RunLegacyLocalRoleEndpoint should succeed [apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OpenShiftAuthorization] The default cluster RBAC policy should have correct RBAC rules": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OpenShiftAuthorization] authorization TestAuthorizationSubjectAccessReview should succeed [apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OpenShiftAuthorization] authorization TestAuthorizationSubjectAccessReviewAPIGroup should succeed [apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OpenShiftAuthorization] authorization TestBrowserSafeAuthorizer should succeed [apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OpenShiftAuthorization] authorization TestClusterReaderCoverage should succeed": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OpenShiftAuthorization] scopes TestScopeEscalations should succeed [apigroup:user.openshift.io][apigroup:authorization.openshift.io][apigroup:build.openshift.io][apigroup:oauth.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OpenShiftAuthorization] scopes TestScopedImpersonation should succeed [apigroup:user.openshift.io][apigroup:authorization.openshift.io][apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OpenShiftAuthorization] scopes TestScopedTokens should succeed [apigroup:user.openshift.io][apigroup:authorization.openshift.io][apigroup:oauth.openshift.io][apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OpenShiftAuthorization] scopes TestTokensWithIllegalScopes should succeed [apigroup:oauth.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OpenShiftAuthorization] scopes TestUnknownScopes should succeed [apigroup:user.openshift.io][apigroup:authorization.openshift.io][apigroup:project.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OpenShiftAuthorization] self-SAR compatibility TestBootstrapPolicySelfSubjectAccessReviews should succeed [apigroup:user.openshift.io][apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OpenShiftAuthorization] self-SAR compatibility TestSelfSubjectAccessReviewsNonExistingNamespace should succeed [apigroup:user.openshift.io][apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:OpenShiftAuthorization][Serial] authorization TestAuthorizationResourceAccessReview should succeed [apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/serial]", - - "[sig-auth][Feature:PodSecurity] restricted-v2 SCC should mutate empty securityContext to match restricted PSa profile": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:PodSecurity][Feature:SCC] creating pod controllers": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:ProjectAPI] TestInvalidRoleRefs should succeed [apigroup:authorization.openshift.io][apigroup:user.openshift.io][apigroup:project.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:ProjectAPI] TestProjectIsNamespace should succeed [apigroup:project.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:ProjectAPI] TestProjectWatch should succeed [apigroup:project.openshift.io][apigroup:authorization.openshift.io][apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:ProjectAPI] TestProjectWatchWithSelectionPredicate should succeed [apigroup:project.openshift.io][apigroup:authorization.openshift.io][apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:ProjectAPI] TestScopedProjectAccess should succeed [apigroup:user.openshift.io][apigroup:project.openshift.io][apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:ProjectAPI] TestUnprivilegedNewProject [apigroup:project.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:ProjectAPI][Serial] TestUnprivilegedNewProjectDenied [apigroup:authorization.openshift.io][apigroup:project.openshift.io]": " [Suite:openshift/conformance/serial]", - - "[sig-auth][Feature:RoleBindingRestrictions] RoleBindingRestrictions should be functional Create a RBAC rolebinding when subject is not already bound and is not permitted by any RBR should fail [apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:RoleBindingRestrictions] RoleBindingRestrictions should be functional Create a rolebinding that also contains system:non-existing users should succeed [apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:RoleBindingRestrictions] RoleBindingRestrictions should be functional Create a rolebinding when subject is already bound should succeed [apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:RoleBindingRestrictions] RoleBindingRestrictions should be functional Create a rolebinding when subject is not already bound and is not permitted by any RBR should fail [apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:RoleBindingRestrictions] RoleBindingRestrictions should be functional Create a rolebinding when subject is permitted by RBR should succeed [apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:RoleBindingRestrictions] RoleBindingRestrictions should be functional Create a rolebinding when there are no restrictions should succeed [apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:RoleBindingRestrictions] RoleBindingRestrictions should be functional Rolebinding restrictions tests single project should succeed [apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:SCC][Early] should not have pod creation failures during install": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:SecurityContextConstraints] TestAllowedSCCViaRBAC [apigroup:project.openshift.io][apigroup:user.openshift.io][apigroup:authorization.openshift.io][apigroup:security.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:SecurityContextConstraints] TestAllowedSCCViaRBAC with service account [apigroup:security.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:SecurityContextConstraints] TestPodDefaultCapabilities": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:SecurityContextConstraints] TestPodUpdateSCCEnforcement [apigroup:user.openshift.io][apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:SecurityContextConstraints] TestPodUpdateSCCEnforcement with service account": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:UserAPI] groups should work [apigroup:user.openshift.io][apigroup:project.openshift.io][apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-auth][Feature:UserAPI] users can manipulate groups [apigroup:user.openshift.io][apigroup:authorization.openshift.io][apigroup:project.openshift.io]": " [Suite:openshift/conformance/parallel]", - "[sig-autoscaling] Cluster size autoscaler scalability [Slow] CA ignores unschedulable pods while scheduling schedulable pods [Feature:ClusterAutoscalerScalability6]": " [Suite:k8s]", "[sig-autoscaling] Cluster size autoscaler scalability [Slow] should scale down empty nodes [Feature:ClusterAutoscalerScalability3]": " [Suite:k8s]", @@ -907,1035 +577,225 @@ var Annotations = map[string]string{ "[sig-autoscaling] Cluster size autoscaling [Slow] should scale down when expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]": " [Suite:k8s]", - "[sig-autoscaling] Cluster size autoscaling [Slow] should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", - - "[sig-autoscaling] Cluster size autoscaling [Slow] should scale up when non expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", - - "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't be able to scale down when rescheduling a pod is required, but pdb doesn't allow drain[Feature:ClusterSizeAutoscalingScaleDown]": " [Suite:k8s]", - - "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", - - "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't scale down when non expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]": " [Suite:k8s]", - - "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", - - "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't scale up when expendable pod is preempted [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", - - "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't trigger additional scale-ups during processing scale-up [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", - - "[sig-autoscaling] DNS horizontal autoscaling [Serial] [Slow] kube-dns-autoscaler should scale kube-dns pods when cluster size changed": " [Disabled:SpecialConfig] [Suite:k8s]", - - "[sig-autoscaling] DNS horizontal autoscaling kube-dns-autoscaler should scale kube-dns pods in both nonfaulty and faulty scenarios": " [Disabled:SpecialConfig] [Suite:k8s]", - - "[sig-autoscaling] [Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling Autoscaling a service from 1 pod and 3 nodes to 8 pods and >=4 nodes takes less than 15 minutes": " [Suite:k8s]", - - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) CustomResourceDefinition Should scale with a CRD targetRef": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) ReplicationController light Should scale from 1 pod to 2 pods": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) ReplicationController light [Slow] Should scale from 2 pods to 1 pod": " [Suite:k8s]", - - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation": " [Suite:k8s]", - - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation": " [Suite:k8s]", - - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation": " [Suite:k8s]", - - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation": " [Suite:k8s]", - - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Pod Resource) Should scale from 5 pods to 3 pods and then from 3 pods to 1 pod using Average Utilization for aggregation": " [Suite:k8s]", - - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods": " [Suite:k8s]", - - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet Should scale from 5 pods to 3 pods and then from 3 pods to 1 pod": " [Suite:k8s]", - - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet with idle sidecar (ContainerResource use case) Should not scale up on a busy sidecar with an idle application": " [Suite:k8s]", - - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet with idle sidecar (ContainerResource use case) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods on a busy application with an idle sidecar container": " [Suite:k8s]", - - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicationController Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods and verify decision stability": " [Suite:k8s]", - - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicationController Should scale from 5 pods to 3 pods and then from 3 pods to 1 pod and verify decision stability": " [Suite:k8s]", - - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation": " [Suite:k8s]", - - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation": " [Suite:k8s]", - - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation": " [Suite:k8s]", - - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation": " [Suite:k8s]", - - "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with autoscaling disabled shouldn't scale down": " [Suite:k8s]", - - "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with autoscaling disabled shouldn't scale up": " [Suite:k8s]", - - "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with both scale up and down controls configured should keep recommendation within the range over two stabilization windows": " [Suite:k8s]", - - "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with both scale up and down controls configured should keep recommendation within the range with stabilization window and pod limit rate": " [Suite:k8s]", - - "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with long upscale stabilization window should scale up only after the stabilization period": " [Suite:k8s]", - - "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by number of Pods rate should scale down no more than given number of Pods per minute": " [Suite:k8s]", - - "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by number of Pods rate should scale up no more than given number of Pods per minute": " [Suite:k8s]", - - "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by percentage should scale down no more than given percentage of current Pods per minute": " [Suite:k8s]", - - "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by percentage should scale up no more than given percentage of current Pods per minute": " [Suite:k8s]", - - "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with short downscale stabilization window should scale down soon after the stabilization period": " [Suite:k8s]", - - "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with Custom Metric of type Object from Stackdriver should scale down to 0": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with Custom Metric of type Object from Stackdriver should scale down": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with Custom Metric of type Pod from Stackdriver should scale down with Prometheus": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with Custom Metric of type Pod from Stackdriver should scale down": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with Custom Metric of type Pod from Stackdriver should scale up with two metrics": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with External Metric from Stackdriver should scale down with target average value": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with External Metric from Stackdriver should scale down with target value": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with External Metric from Stackdriver should scale up with two metrics": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with multiple metrics of different types should not scale down when one metric is missing (Container Resource and External Metrics)": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with multiple metrics of different types should not scale down when one metric is missing (Pod and Object Metrics)": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with multiple metrics of different types should scale up when one metric is missing (Pod and External metrics)": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with multiple metrics of different types should scale up when one metric is missing (Resource and Object metrics)": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-builds][Feature:Builds] Multi-stage image builds should succeed [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds] Optimized image builds should succeed [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds] build can reference a cluster service with a build being created from new-build should be able to run a build that references a cluster service [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Skipped:Proxy] [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds] build have source revision metadata started build should contain source revision information [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds] build with empty source started build should build even with an empty source in build config [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds] build without output image building from templates should create an image from a S2i template without an output image reference defined [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds] build without output image building from templates should create an image from a docker template without an output image reference defined [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds] buildconfig secret injector should inject secrets to the appropriate buildconfigs [apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds] custom build with buildah being created from new-build should complete build with custom builder image [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds] imagechangetriggers imagechangetriggers should trigger builds of all types [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds] oc new-app should fail with a --name longer than 58 characters [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds] oc new-app should succeed with a --name of 58 characters [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Skipped:Proxy] [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds] oc new-app should succeed with an imagestream [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds] prune builds based on settings in the buildconfig buildconfigs should have a default history limit set when created via the group api [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds] prune builds based on settings in the buildconfig should prune builds after a buildConfig change [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds] prune builds based on settings in the buildconfig should prune canceled builds based on the failedBuildsHistoryLimit setting [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds] prune builds based on settings in the buildconfig should prune completed builds based on the successfulBuildsHistoryLimit setting [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds] prune builds based on settings in the buildconfig should prune errored builds based on the failedBuildsHistoryLimit setting [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds] prune builds based on settings in the buildconfig should prune failed builds based on the failedBuildsHistoryLimit setting [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds] remove all builds when build configuration is removed oc delete buildconfig should start builds and delete the buildconfig [apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds] result image should have proper labels set Docker build from a template should create a image from \"test-docker-build.json\" template with proper Docker labels [apigroup:build.openshift.io][apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds] result image should have proper labels set S2I build from a template should create a image from \"test-s2i-build.json\" template with proper Docker labels [apigroup:build.openshift.io][apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds] s2i build with a quota Building from a template should create an s2i build with a quota and run it [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds] s2i build with a root user image should create a root build and fail without a privileged SCC [apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds] s2i build with a root user image should create a root build and pass with a privileged SCC [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds] verify /run filesystem contents are writeable using a simple Docker Strategy Build [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds] verify /run filesystem contents do not have unexpected content using a simple Docker Strategy Build [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds][Serial][Slow][Disruptive] alter builds via cluster configuration build config no ocm rollout [apigroup:config.openshift.io] Apply default proxy configuration to docker build pod through env vars [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Serial][Slow][Disruptive] alter builds via cluster configuration build config no ocm rollout [apigroup:config.openshift.io] Apply default proxy configuration to source build pod through env vars [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Serial][Slow][Disruptive] alter builds via cluster configuration build config no ocm rollout [apigroup:config.openshift.io] Apply git proxy configuration to build pod [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Serial][Slow][Disruptive] alter builds via cluster configuration build config with ocm rollout [apigroup:config.openshift.io] Apply default image label configuration to build pod": "", - - "[sig-builds][Feature:Builds][Serial][Slow][Disruptive] alter builds via cluster configuration build config with ocm rollout [apigroup:config.openshift.io] Apply env configuration to build pod": "", - - "[sig-builds][Feature:Builds][Serial][Slow][Disruptive] alter builds via cluster configuration build config with ocm rollout [apigroup:config.openshift.io] Apply node selector configuration to build pod": "", - - "[sig-builds][Feature:Builds][Serial][Slow][Disruptive] alter builds via cluster configuration build config with ocm rollout [apigroup:config.openshift.io] Apply override image label configuration to build pod [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Serial][Slow][Disruptive] alter builds via cluster configuration build config with ocm rollout [apigroup:config.openshift.io] Apply resource configuration to build pod": "", - - "[sig-builds][Feature:Builds][Serial][Slow][Disruptive] alter builds via cluster configuration build config with ocm rollout [apigroup:config.openshift.io] Apply toleration override configuration to build pod": "", - - "[sig-builds][Feature:Builds][Serial][Slow][Disruptive] alter builds via cluster configuration registries config context should allow registries to be blacklisted": "", - - "[sig-builds][Feature:Builds][Serial][Slow][Disruptive] alter builds via cluster configuration registries config context should allow registries to be whitelisted": "", - - "[sig-builds][Feature:Builds][Serial][Slow][Disruptive] alter builds via cluster configuration registries config context should default registry search to docker.io for image pulls": "", - - "[sig-builds][Feature:Builds][Slow] Capabilities should be dropped for s2i builders s2i build with a rootable builder should not be able to switch to root with an assemble script [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] build can have Dockerfile input being created from new-build should be able to start a build from Dockerfile with FROM reference to scratch [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] build can have Dockerfile input being created from new-build should create a image via new-build [apigroup:build.openshift.io][apigroup:image.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] build can have Dockerfile input being created from new-build should create a image via new-build and infer the origin tag [apigroup:build.openshift.io][apigroup:image.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] build can have Dockerfile input being created from new-build testing build image with dockerfile contains a file path uses a variable in its name [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] build can have Dockerfile input being created from new-build testing build image with invalid dockerfile content [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] build can have container image source [apigroup:image.openshift.io] buildconfig with input source image and docker strategy should complete successfully and contain the expected file [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] build can have container image source [apigroup:image.openshift.io] buildconfig with input source image and s2i strategy should complete successfully and contain the expected file [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] build can have container image source [apigroup:image.openshift.io] creating a build with an input source image and custom strategy should resolve the imagestream references and secrets [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] build can have container image source [apigroup:image.openshift.io] creating a build with an input source image and docker strategy should resolve the imagestream references and secrets [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] build can have container image source [apigroup:image.openshift.io] creating a build with an input source image and s2i strategy should resolve the imagestream references and secrets [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] build controller RunBuildCompletePodDeleteTest should succeed [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] build controller RunBuildDeleteTest should succeed [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] build controller RunBuildRunningPodDeleteTest should succeed [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] builds should have deadlines oc start-build docker-build --wait Docker: should start a build and wait for the build failed and build pod being killed by kubelet [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] builds should have deadlines oc start-build source-build --wait Source: should start a build and wait for the build failed and build pod being killed by kubelet [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] builds should support proxies start build with broken proxy and a no_proxy override should start a docker build and wait for the build to succeed [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] builds should support proxies start build with broken proxy and a no_proxy override should start an s2i build and wait for the build to succeed [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] builds should support proxies start build with broken proxy should start a build and wait for the build to fail [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] builds should support proxies start build with cluster-wide custom PKI should mount the custom PKI into the build if specified [apigroup:config.openshift.io][apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] builds with a context directory docker context directory build should docker build an application using a context directory [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] builds with a context directory s2i context directory build should s2i build an application using a context directory [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] can use build secrets build with secrets and configMaps should contain secrets during the docker strategy build": "", - - "[sig-builds][Feature:Builds][Slow] can use build secrets build with secrets and configMaps should contain secrets during the source strategy build [apigroup:build.openshift.io][apigroup:image.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] can use private repositories as build input build using an HTTP token should be able to clone source code via an HTTP token [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] can use private repositories as build input build using an ssh private key should be able to clone source code via ssh [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] can use private repositories as build input build using an ssh private key should be able to clone source code via ssh using SCP-style URIs [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] completed builds should have digest of the image in their status Docker build started with log level >5 should save the image digest when finished [apigroup:build.openshift.io][apigroup:image.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] completed builds should have digest of the image in their status Docker build started with normal log level should save the image digest when finished [apigroup:build.openshift.io][apigroup:image.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] completed builds should have digest of the image in their status S2I build started with log level >5 should save the image digest when finished [apigroup:build.openshift.io][apigroup:image.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] completed builds should have digest of the image in their status S2I build started with normal log level should save the image digest when finished [apigroup:build.openshift.io][apigroup:image.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] incremental s2i build Building from a template should create a build from \"incremental-auth-build.json\" template and run it [apigroup:build.openshift.io][apigroup:image.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] s2i build with environment file in sources Building from a template should create a image from \"test-env-build.json\" template and run it in a pod [apigroup:build.openshift.io][apigroup:image.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context Setting build-args on Docker builds Should accept build args that are specified in the Dockerfile [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context Setting build-args on Docker builds Should complete with a warning on non-existent build-arg [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context Setting build-args on Docker builds Should copy build args from BuildConfig to Build [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context Trigger builds with branch refs matching directories on master branch Should checkout the config branch, not config directory [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context binary builds shoud accept --from-archive with https URL as an input [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context binary builds shoud accept --from-file with https URL as an input [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context binary builds should accept --from-dir as input [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context binary builds should accept --from-file as input [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context binary builds should accept --from-repo as input [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context binary builds should accept --from-repo with --commit as input [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context binary builds should reject binary build requests without a --from-xxxx value [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context cancel a binary build that doesn't start running in 5 minutes should start a build and wait for the build to be cancelled [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context cancel a build started by oc start-build --wait should start a build and wait for the build to cancel [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context oc start-build --wait should start a build and wait for the build to complete [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context oc start-build --wait should start a build and wait for the build to fail [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context oc start-build with pr ref should start a build from a PR ref, wait for the build to complete, and confirm the right level was used [apigroup:build.openshift.io][apigroup:image.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context override environment BUILD_LOGLEVEL in buildconfig can be overridden by build-loglevel [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context override environment BUILD_LOGLEVEL in buildconfig should create verbose output [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context override environment should accept environment variables [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context s2i build maintaining symlinks should s2i build image and maintain symlinks [apigroup:build.openshift.io][apigroup:image.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context start a build via a webhook should be able to start builds via the webhook with valid secrets and fail with invalid secrets [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] testing build configuration hooks testing postCommit hook should run docker postCommit hooks [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] testing build configuration hooks testing postCommit hook should run s2i postCommit hooks [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] update failure status Build status Docker fetch image content failure should contain the Docker build fetch image content reason and message [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] update failure status Build status Docker fetch source failure should contain the Docker build fetch source failure reason and message [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] update failure status Build status OutOfMemoryKilled should contain OutOfMemoryKilled failure reason and message [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] update failure status Build status S2I bad context dir failure should contain the S2I bad context dir failure reason and message [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] update failure status Build status S2I fetch source failure should contain the S2I fetch source failure reason and message [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] update failure status Build status failed assemble container should contain the failure reason related to an assemble script failing in s2i [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] update failure status Build status failed https proxy invalid url should contain the generic failure reason and message [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] update failure status Build status fetch builder image failure should contain the fetch builder image failure reason and message [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] update failure status Build status postcommit hook failure should contain the post commit hook failure reason and message [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] update failure status Build status push image to registry failure should contain the image push to registry failure reason and message [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] using build configuration runPolicy build configuration with Parallel build run policy runs the builds in parallel [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] using build configuration runPolicy build configuration with Serial build run policy handling cancellation starts the next build immediately after one is canceled [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] using build configuration runPolicy build configuration with Serial build run policy handling deletion starts the next build immediately after running one is deleted [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] using build configuration runPolicy build configuration with Serial build run policy handling failure starts the next build immediately after one fails [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] using build configuration runPolicy build configuration with Serial build run policy runs the builds in serial order [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] using build configuration runPolicy build configuration with SerialLatestOnly build run policy runs the builds in serial order but cancel previous builds [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] using pull secrets in a build start-build test context binary builds should be able to run a build that is implicitly pulling from the internal registry [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] using pull secrets in a build start-build test context pulling from an external authenticated registry should be able to use a pull secret in a build [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][Slow] using pull secrets in a build start-build test context pulling from an external authenticated registry should be able to use a pull secret linked to the builder service account [apigroup:build.openshift.io]": "", - - "[sig-builds][Feature:Builds][pullsearch] docker build where the registry is not specified Building from a Dockerfile whose FROM image ref does not specify the image registry should create a docker build that has buildah search from our predefined list of image registries and succeed [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds][pullsecret] docker build using a pull secret Building from a template should create a docker build that pulls using a secret run it [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds][timing] capture build stages and durations should record build stages and durations for docker [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds][timing] capture build stages and durations should record build stages and durations for s2i [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds][valueFrom] process valueFrom in build strategy environment variables should fail resolving unresolvable valueFrom in docker build environment variable references [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds][valueFrom] process valueFrom in build strategy environment variables should fail resolving unresolvable valueFrom in sti build environment variable references [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds][valueFrom] process valueFrom in build strategy environment variables should successfully resolve valueFrom in docker build environment variables [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds][valueFrom] process valueFrom in build strategy environment variables should successfully resolve valueFrom in s2i build environment variables [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds][volumes] build volumes should mount given secrets and configmaps into the build pod for docker strategy builds [apigroup:image.openshift.io][apigroup:build.openshift.io][apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds][volumes] build volumes should mount given secrets and configmaps into the build pod for source strategy builds [apigroup:image.openshift.io][apigroup:build.openshift.io][apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds][volumes] csi build volumes within Tech Preview enabled cluster [apigroup:config.openshift.io] should mount given csi shared resource secret into the build pod for docker strategy builds [apigroup:image.openshift.io][apigroup:build.openshift.io][apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds][volumes] csi build volumes within Tech Preview enabled cluster [apigroup:config.openshift.io] should mount given csi shared resource secret into the build pod for source strategy builds [apigroup:image.openshift.io][apigroup:build.openshift.io][apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds][volumes] csi build volumes within Tech Preview enabled cluster [apigroup:config.openshift.io] should mount given csi shared resource secret without resource refresh into the build pod for docker strategy builds [apigroup:image.openshift.io][apigroup:build.openshift.io][apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds][volumes] csi build volumes within Tech Preview enabled cluster [apigroup:config.openshift.io] should mount given csi shared resource secret without resource refresh into the build pod for source strategy builds [apigroup:image.openshift.io][apigroup:build.openshift.io][apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds][webhook] TestWebhook [apigroup:build.openshift.io][apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds][webhook] TestWebhookGitHubPing [apigroup:image.openshift.io][apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds][webhook] TestWebhookGitHubPushWithImage [apigroup:image.openshift.io][apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:Builds][webhook] TestWebhookGitHubPushWithImageStream [apigroup:image.openshift.io][apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-builds][Feature:JenkinsRHELImagesOnly][Feature:Jenkins][Feature:Builds][sig-devex][Slow] openshift pipeline build jenkins pipeline build config strategy using a jenkins instance launched with the ephemeral template [apigroup:build.openshift.io]": "", - - "[sig-ci] [Early] prow job name should match cluster version [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-ci] [Early] prow job name should match network type [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-ci] [Early] prow job name should match platform type [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 should support forwarding over websockets": " [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 that expects NO client request should support a client that connects, sends DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 that expects a client request should support a client that connects, sends DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 that expects a client request should support a client that connects, sends NO DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl Port forwarding With a server listening on localhost should support forwarding over websockets": " [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl Port forwarding With a server listening on localhost that expects NO client request should support a client that connects, sends DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl Port forwarding With a server listening on localhost that expects a client request should support a client that connects, sends DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl Port forwarding With a server listening on localhost that expects a client request should support a client that connects, sends NO DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl client Guestbook application should create and stop a working application [Conformance]": " [Slow] [Suite:k8s]", - - "[sig-cli] Kubectl client Kubectl api-versions should check if v1 is in available api versions [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - - "[sig-cli] Kubectl client Kubectl apply apply set/view last-applied": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl client Kubectl apply should apply a new configuration to an existing RC": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl client Kubectl apply should reuse port when apply to an existing SVC": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl client Kubectl cluster-info dump should check if cluster-info dump succeeds": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl client Kubectl cluster-info should check if Kubernetes control plane services is included in cluster-info [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - - "[sig-cli] Kubectl client Kubectl copy should copy a file from a running Pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl client Kubectl create quota should create a quota with scopes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl client Kubectl create quota should create a quota without scopes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl client Kubectl create quota should reject quota with invalid scopes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl client Kubectl describe should check if kubectl describe prints relevant information for cronjob": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl client Kubectl describe should check if kubectl describe prints relevant information for rc and pods [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - - "[sig-cli] Kubectl client Kubectl diff should check if kubectl diff finds a difference for Deployments [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - - "[sig-cli] Kubectl client Kubectl events should show event when pod is created": " [Disabled:RebaseInProgress] [Suite:k8s]", - - "[sig-cli] Kubectl client Kubectl expose should create services for rc [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - - "[sig-cli] Kubectl client Kubectl get componentstatuses should get componentstatuses": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl client Kubectl label should update the label on a resource [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - - "[sig-cli] Kubectl client Kubectl logs should be able to retrieve and filter logs [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - - "[sig-cli] Kubectl client Kubectl patch should add annotations for pods in rc [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - - "[sig-cli] Kubectl client Kubectl replace should update a single-container pod's image [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - - "[sig-cli] Kubectl client Kubectl run pod should create a pod from an image when restart is Never [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - - "[sig-cli] Kubectl client Kubectl server-side dry-run should check if kubectl can dry-run update Pods [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - - "[sig-cli] Kubectl client Kubectl taint [Serial] should remove all the taints with the same key off a node": " [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", - - "[sig-cli] Kubectl client Kubectl taint [Serial] should update the taint on a node": " [Suite:openshift/conformance/serial] [Suite:k8s]", - - "[sig-cli] Kubectl client Kubectl validation should create/apply a CR with unknown fields for CRD with no validation schema": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl client Kubectl validation should create/apply a valid CR for CRD with validation schema": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl client Kubectl validation should create/apply an invalid/valid CR with arbitrary-extra properties for CRD with partially-specified validation schema": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl client Kubectl validation should detect unknown metadata fields in both the root and embedded object of a CR": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl client Kubectl validation should detect unknown metadata fields of a typed object": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl client Kubectl version should check is all data is printed [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - - "[sig-cli] Kubectl client Proxy server should support --unix-socket=/path [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - - "[sig-cli] Kubectl client Proxy server should support proxy with --port 0 [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - - "[sig-cli] Kubectl client Simple pod should contain last line of the log": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl client Simple pod should handle in-cluster config": " [Disabled:Broken] [Suite:k8s]", - - "[sig-cli] Kubectl client Simple pod should return command exit codes [Slow] running a failing command with --leave-stdin-open": " [Suite:k8s]", - - "[sig-cli] Kubectl client Simple pod should return command exit codes [Slow] running a failing command without --restart=Never": " [Suite:k8s]", - - "[sig-cli] Kubectl client Simple pod should return command exit codes [Slow] running a failing command without --restart=Never, but with --rm": " [Suite:k8s]", - - "[sig-cli] Kubectl client Simple pod should return command exit codes execing into a container with a failing command": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl client Simple pod should return command exit codes execing into a container with a successful command": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl client Simple pod should return command exit codes running a failing command": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl client Simple pod should return command exit codes running a successful command": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl client Simple pod should support exec through an HTTP proxy": " [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl client Simple pod should support exec through kubectl proxy": " [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl client Simple pod should support exec using resource/name": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl client Simple pod should support exec": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl client Simple pod should support inline execution and attach": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl client Simple pod should support port-forward": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - - "[sig-cli] Kubectl client Update Demo should create and stop a replication controller [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - - "[sig-cli] Kubectl client Update Demo should scale a replication controller [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - - "[sig-cli] Kubectl client kubectl wait should ignore not found error with --for=delete": " [Disabled:Broken] [Suite:k8s]", - - "[sig-cli] oc --request-timeout works as expected [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc adm build-chain [apigroup:build.openshift.io][apigroup:image.openshift.io][apigroup:project.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc adm cluster-role-reapers [Serial][apigroup:authorization.openshift.io][apigroup:user.openshift.io]": " [Suite:openshift/conformance/serial]", - - "[sig-cli] oc adm groups [apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc adm images [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc adm must-gather runs successfully [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc adm must-gather runs successfully for audit logs [apigroup:config.openshift.io][apigroup:oauth.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc adm must-gather runs successfully with options [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc adm must-gather when looking at the audit logs [sig-node] kubelet runs apiserver processes strictly sequentially in order to not risk audit log corruption [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc adm new-project [apigroup:project.openshift.io][apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc adm node-logs": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc adm policy [apigroup:authorization.openshift.io][apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc adm role-reapers [apigroup:authorization.openshift.io][apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc adm role-selectors [apigroup:template.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc adm serviceaccounts": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc adm storage-admin [apigroup:authorization.openshift.io][apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc adm ui-project-commands [apigroup:project.openshift.io][apigroup:authorization.openshift.io][apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc adm user-creation [apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc adm who-can [apigroup:authorization.openshift.io][apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc annotate pod": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc api-resources can output expected information about api-resources": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc api-resources can output expected information about build.openshift.io api-resources [apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc api-resources can output expected information about image.openshift.io api-resources [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc api-resources can output expected information about operator.openshift.io api-resources [apigroup:operator.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc api-resources can output expected information about project.openshift.io api-resources [apigroup:project.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc api-resources can output expected information about route.openshift.io api-resources and api-version [apigroup:route.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc api-resources can output expected information about snapshot.storage.k8s.io api-resources [apigroup:snapshot.storage.k8s.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc basics can create and interact with a list of resources": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc basics can create deploymentconfig and clusterquota [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc basics can describe an OAuth access token [apigroup:oauth.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc basics can get version information from API": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc basics can get version information from CLI": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc basics can output expected --dry-run text": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc basics can patch resources [apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc basics can process templates [apigroup:template.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc basics can show correct whoami result with console": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc basics can show correct whoami result": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc builds complex build start-build [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc builds complex build webhooks CRUD [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc builds get buildconfig [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc builds new-build [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc builds patch buildconfig [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc can get list of nodes": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc can route traffic to services [apigroup:route.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc can run inside of a busybox container [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc completion returns expected help messages": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc debug deployment configs from a build [apigroup:image.openshift.io][apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc debug dissect deployment config debug [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc debug does not require a real resource on the server": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc debug ensure debug does not depend on a container actually existing for the selected resource [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc debug ensure it works with image streams [apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc env can set environment variables [apigroup:apps.openshift.io][apigroup:image.openshift.io][apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc explain list uncovered GroupVersionResources": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc explain networking types when using openshift-sdn should contain proper fields description for special networking types": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc explain should contain proper fields description for apps.openshift.io [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc explain should contain proper fields description for authorization.openshift.io [apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc explain should contain proper fields description for config.openshift.io [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc explain should contain proper fields description for console.openshift.io [apigroup:console.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc explain should contain proper fields description for image.openshift.io [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc explain should contain proper fields description for oauth.openshift.io [apigroup:oauth.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc explain should contain proper fields description for project.openshift.io [apigroup:project.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc explain should contain proper fields description for route.openshift.io [apigroup:route.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc explain should contain proper fields description for security.internal.openshift.io [apigroup:security.internal.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc explain should contain proper fields description for security.openshift.io [apigroup:security.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc explain should contain proper fields description for template.openshift.io [apigroup:template.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc explain should contain proper fields description for user.openshift.io [apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc explain should contain proper spec+status for CRDs [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc explain should contain spec+status for apps.openshift.io [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc explain should contain spec+status for build.openshift.io [apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc explain should contain spec+status for image.openshift.io [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc explain should contain spec+status for project.openshift.io [apigroup:project.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc explain should contain spec+status for route.openshift.io [apigroup:route.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc explain should contain spec+status for security.openshift.io [apigroup:security.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc explain should contain spec+status for template.openshift.io [apigroup:template.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc expose can ensure the expose command is functioning as expected [apigroup:route.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc help works as expected": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc idle [apigroup:apps.openshift.io][apigroup:route.openshift.io][apigroup:project.openshift.io][apigroup:image.openshift.io] by all": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc idle [apigroup:apps.openshift.io][apigroup:route.openshift.io][apigroup:project.openshift.io][apigroup:image.openshift.io] by checking previous scale": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc idle [apigroup:apps.openshift.io][apigroup:route.openshift.io][apigroup:project.openshift.io][apigroup:image.openshift.io] by label": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc idle [apigroup:apps.openshift.io][apigroup:route.openshift.io][apigroup:project.openshift.io][apigroup:image.openshift.io] by name": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc label pod": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc observe works as expected with cluster operators [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc observe works as expected": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc probe can ensure the probe command is functioning as expected on deploymentconfigs [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc probe can ensure the probe command is functioning as expected on pods": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc project --show-labels works for projects [apigroup:project.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc project can switch between different projects [apigroup:authorization.openshift.io][apigroup:user.openshift.io][apigroup:project.openshift.io][Serial]": " [Suite:openshift/conformance/serial]", - - "[sig-cli] oc rsh specific flags should work well when access to a remote shell": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc run can use --image flag correctly [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc secret creates and retrieves expected": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc service creates and deletes services": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc statefulset creates and deletes statefulsets": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] oc status can show correct status after switching between projects [apigroup:project.openshift.io][apigroup:image.openshift.io][Serial]": " [Suite:openshift/conformance/serial]", - - "[sig-cli] oc status returns expected help messages [apigroup:project.openshift.io][apigroup:build.openshift.io][apigroup:image.openshift.io][apigroup:apps.openshift.io][apigroup:route.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] policy scc-subject-review, scc-review [apigroup:authorization.openshift.io][apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] templates different namespaces [apigroup:user.openshift.io][apigroup:project.openshift.io][apigroup:template.openshift.io][apigroup:authorization.openshift.io][Skipped:Disconnected]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli] templates process [apigroup:apps.openshift.io][apigroup:template.openshift.io][Skipped:Disconnected]": " [Suite:openshift/conformance/parallel]", - - "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/authentication.sh [apigroup:image.openshift.io][apigroup:config.openshift.io]": "", - - "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/builds.sh [apigroup:image.openshift.io][apigroup:config.openshift.io]": "", - - "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/config.sh [apigroup:image.openshift.io][apigroup:config.openshift.io]": "", - - "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/deployments.sh [apigroup:image.openshift.io][apigroup:config.openshift.io]": "", - - "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/describer.sh [apigroup:image.openshift.io][apigroup:config.openshift.io]": "", - - "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/edit.sh [apigroup:image.openshift.io][apigroup:config.openshift.io]": "", - - "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/env.sh [apigroup:image.openshift.io][apigroup:config.openshift.io]": "", - - "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/framework-test.sh [apigroup:image.openshift.io][apigroup:config.openshift.io]": "", - - "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/get.sh [apigroup:image.openshift.io][apigroup:config.openshift.io]": "", - - "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/image-lookup.sh [apigroup:image.openshift.io][apigroup:config.openshift.io]": "", - - "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/images.sh [apigroup:image.openshift.io][apigroup:config.openshift.io]": "", - - "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/printer.sh [apigroup:image.openshift.io][apigroup:config.openshift.io]": "", - - "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/quota.sh [apigroup:image.openshift.io][apigroup:config.openshift.io]": "", - - "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/secrets.sh [apigroup:image.openshift.io][apigroup:config.openshift.io]": "", - - "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/set-data.sh [apigroup:image.openshift.io][apigroup:config.openshift.io]": "", - - "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/set-image.sh [apigroup:image.openshift.io][apigroup:config.openshift.io]": "", - - "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/set-liveness-probe.sh [apigroup:image.openshift.io][apigroup:config.openshift.io]": "", - - "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/setbuildhook.sh [apigroup:image.openshift.io][apigroup:config.openshift.io]": "", - - "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/setbuildsecret.sh [apigroup:image.openshift.io][apigroup:config.openshift.io]": "", - - "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/triggers.sh [apigroup:image.openshift.io][apigroup:config.openshift.io]": "", - - "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/volumes.sh [apigroup:image.openshift.io][apigroup:config.openshift.io]": "", - - "[sig-cli][Slow] can use rsync to upload files to pods [apigroup:template.openshift.io] copy by strategy should copy files with the rsync strategy": "", - - "[sig-cli][Slow] can use rsync to upload files to pods [apigroup:template.openshift.io] copy by strategy should copy files with the rsync-daemon strategy": "", - - "[sig-cli][Slow] can use rsync to upload files to pods [apigroup:template.openshift.io] copy by strategy should copy files with the tar strategy": "", - - "[sig-cli][Slow] can use rsync to upload files to pods [apigroup:template.openshift.io] rsync specific flags should honor multiple --exclude flags": "", - - "[sig-cli][Slow] can use rsync to upload files to pods [apigroup:template.openshift.io] rsync specific flags should honor multiple --include flags": "", - - "[sig-cli][Slow] can use rsync to upload files to pods [apigroup:template.openshift.io] rsync specific flags should honor the --exclude flag": "", - - "[sig-cli][Slow] can use rsync to upload files to pods [apigroup:template.openshift.io] rsync specific flags should honor the --include flag": "", - - "[sig-cli][Slow] can use rsync to upload files to pods [apigroup:template.openshift.io] rsync specific flags should honor the --no-perms flag": "", - - "[sig-cli][Slow] can use rsync to upload files to pods [apigroup:template.openshift.io] rsync specific flags should honor the --progress flag": "", - - "[sig-cli][Slow] can use rsync to upload files to pods [apigroup:template.openshift.io] using a watch should watch for changes and rsync them": "", - - "[sig-cloud-provider][Feature:OpenShiftCloudControllerManager][Late] Deploy an external cloud provider [apigroup:config.openshift.io][apigroup:machineconfiguration.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cluster-lifecycle] CSRs from machines that are not recognized by the cloud provider are not approved [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cluster-lifecycle] Pods cannot access the /config/master API endpoint [apigroup:config.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-cluster-lifecycle] TestAdminAck should succeed [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cluster-lifecycle][Feature:DisasterRecovery][Disruptive] [Feature:NodeRecovery] Cluster should survive master and worker failure and recover with machine health checks [apigroup:machine.openshift.io]": " [Serial]", - - "[sig-cluster-lifecycle][Feature:Machines] Managed cluster should have machine resources [apigroup:machine.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cluster-lifecycle][Feature:Machines][Disruptive] Managed cluster should recover from deleted worker machines [apigroup:machine.openshift.io]": " [Serial]", - - "[sig-cluster-lifecycle][Feature:Machines][Early] Managed cluster should have same number of Machines and Nodes [apigroup:machine.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-cluster-lifecycle][Feature:Machines][Serial] Managed cluster should grow and decrease when scaling different machineSets simultaneously [Timeout:30m][apigroup:machine.openshift.io]": " [Suite:openshift/conformance/serial]", - - "[sig-coreos] [Conformance] CoreOS bootimages TestBootimagesPresent [apigroup:machineconfiguration.openshift.io]": " [Suite:openshift/conformance/parallel/minimal]", - - "[sig-devex] check registry.redhat.io is available and samples operator can import sample imagestreams run sample related validations [apigroup:config.openshift.io][apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/dotnet:6.0-ubi8\" should print the usage": "", - - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/nginx:1.20-ubi7\" should print the usage": "", - - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/nginx:1.20-ubi8\" should print the usage": "", - - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/nodejs:14-ubi7\" should print the usage": "", - - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/nodejs:14-ubi8\" should print the usage": "", - - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/nodejs:16-ubi8\" should print the usage": "", - - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/perl:5.26-ubi8\" should print the usage": "", - - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/perl:5.30-el7\" should print the usage": "", - - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/perl:5.30-ubi8\" should print the usage": "", - - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/perl:5.32-ubi8\" should print the usage": "", - - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/php:7.3-ubi7\" should print the usage": "", - - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/php:7.4-ubi8\" should print the usage": "", - - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/php:8.0-ubi8\" should print the usage": "", - - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/python:2.7-ubi8\" should print the usage": "", - - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/python:3.6-ubi8\" should print the usage": "", - - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/python:3.8-ubi7\" should print the usage": "", - - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/python:3.8-ubi8\" should print the usage": "", - - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/python:3.9-ubi8\" should print the usage": "", - - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/ruby:2.5-ubi8\" should print the usage": "", - - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/ruby:2.7-ubi7\" should print the usage": "", - - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/ruby:2.7-ubi8\" should print the usage": "", + "[sig-autoscaling] Cluster size autoscaling [Slow] should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/ruby:3.0-ubi7\" should print the usage": "", + "[sig-autoscaling] Cluster size autoscaling [Slow] should scale up when non expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/ruby:3.0-ubi8\" should print the usage": "", + "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't be able to scale down when rescheduling a pod is required, but pdb doesn't allow drain[Feature:ClusterSizeAutoscalingScaleDown]": " [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/dotnet:6.0-ubi8\" should be SCL enabled": "", + "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/nginx:1.20-ubi7\" should be SCL enabled": "", + "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't scale down when non expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]": " [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/nginx:1.20-ubi8\" should be SCL enabled": "", + "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/nodejs:14-ubi7\" should be SCL enabled": "", + "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't scale up when expendable pod is preempted [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/nodejs:14-ubi8\" should be SCL enabled": "", + "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't trigger additional scale-ups during processing scale-up [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/nodejs:16-ubi8\" should be SCL enabled": "", + "[sig-autoscaling] DNS horizontal autoscaling [Serial] [Slow] kube-dns-autoscaler should scale kube-dns pods when cluster size changed": " [Disabled:SpecialConfig] [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/perl:5.26-ubi8\" should be SCL enabled": "", + "[sig-autoscaling] DNS horizontal autoscaling kube-dns-autoscaler should scale kube-dns pods in both nonfaulty and faulty scenarios": " [Disabled:SpecialConfig] [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/perl:5.30-el7\" should be SCL enabled": "", + "[sig-autoscaling] [Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling Autoscaling a service from 1 pod and 3 nodes to 8 pods and >=4 nodes takes less than 15 minutes": " [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/perl:5.30-ubi8\" should be SCL enabled": "", + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) CustomResourceDefinition Should scale with a CRD targetRef": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/perl:5.32-ubi8\" should be SCL enabled": "", + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) ReplicationController light Should scale from 1 pod to 2 pods": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/php:7.3-ubi7\" should be SCL enabled": "", + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) ReplicationController light [Slow] Should scale from 2 pods to 1 pod": " [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/php:7.4-ubi8\" should be SCL enabled": "", + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation": " [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/php:8.0-ubi8\" should be SCL enabled": "", + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation": " [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/python:2.7-ubi8\" should be SCL enabled": "", + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation": " [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/python:3.6-ubi8\" should be SCL enabled": "", + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation": " [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/python:3.8-ubi7\" should be SCL enabled": "", + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Pod Resource) Should scale from 5 pods to 3 pods and then from 3 pods to 1 pod using Average Utilization for aggregation": " [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/python:3.8-ubi8\" should be SCL enabled": "", + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods": " [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/python:3.9-ubi8\" should be SCL enabled": "", + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet Should scale from 5 pods to 3 pods and then from 3 pods to 1 pod": " [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/ruby:2.5-ubi8\" should be SCL enabled": "", + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet with idle sidecar (ContainerResource use case) Should not scale up on a busy sidecar with an idle application": " [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/ruby:2.7-ubi7\" should be SCL enabled": "", + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet with idle sidecar (ContainerResource use case) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods on a busy application with an idle sidecar container": " [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/ruby:2.7-ubi8\" should be SCL enabled": "", + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicationController Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods and verify decision stability": " [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/ruby:3.0-ubi7\" should be SCL enabled": "", + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicationController Should scale from 5 pods to 3 pods and then from 3 pods to 1 pod and verify decision stability": " [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/ruby:3.0-ubi8\" should be SCL enabled": "", + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation": " [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][Slow] openshift sample application repositories [sig-devex][Feature:ImageEcosystem][nodejs] test nodejs images with nodejs-rest-http-crud db repo Building nodejs-postgresql app from new-app should build a nodejs-postgresql image and run it in a pod [apigroup:build.openshift.io]": "", + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation": " [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][Slow] openshift sample application repositories [sig-devex][Feature:ImageEcosystem][php] test php images with cakephp-ex db repo Building cakephp-mysql app from new-app should build a cakephp-mysql image and run it in a pod [apigroup:build.openshift.io]": "", + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation": " [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][Slow] openshift sample application repositories [sig-devex][Feature:ImageEcosystem][python] test python images with django-ex db repo Building django-psql app from new-app should build a django-psql image and run it in a pod [apigroup:build.openshift.io]": "", + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation": " [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][Slow] openshift sample application repositories [sig-devex][Feature:ImageEcosystem][ruby] test ruby images with rails-ex db repo Building rails-postgresql app from new-app should build a rails-postgresql image and run it in a pod [apigroup:build.openshift.io]": "", + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with autoscaling disabled shouldn't scale down": " [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][mariadb][Slow] openshift mariadb image Creating from a template should instantiate the template [apigroup:image.openshift.io][apigroup:operator.openshift.io][apigroup:config.openshift.io][apigroup:apps.openshift.io]": " [Disabled:Broken]", + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with autoscaling disabled shouldn't scale up": " [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][mysql][Slow] openshift mysql image Creating from a template should instantiate the template [apigroup:apps.openshift.io]": " [Disabled:Broken]", + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with both scale up and down controls configured should keep recommendation within the range over two stabilization windows": " [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][perl][Slow] hot deploy for openshift perl image hot deploy test should work [apigroup:image.openshift.io][apigroup:operator.openshift.io][apigroup:config.openshift.io][apigroup:build.openshift.io][apigroup:apps.openshift.io]": "", + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with both scale up and down controls configured should keep recommendation within the range with stabilization window and pod limit rate": " [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][php][Slow] hot deploy for openshift php image CakePHP example should work with hot deploy [apigroup:image.openshift.io][apigroup:operator.openshift.io][apigroup:config.openshift.io][apigroup:build.openshift.io]": "", + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with long upscale stabilization window should scale up only after the stabilization period": " [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][python][Slow] hot deploy for openshift python image Django example should work with hot deploy [apigroup:image.openshift.io][apigroup:operator.openshift.io][apigroup:config.openshift.io][apigroup:build.openshift.io]": "", + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by number of Pods rate should scale down no more than given number of Pods per minute": " [Suite:k8s]", - "[sig-devex][Feature:ImageEcosystem][ruby][Slow] hot deploy for openshift ruby image Rails example should work with hot deploy [apigroup:image.openshift.io][apigroup:operator.openshift.io][apigroup:config.openshift.io][apigroup:build.openshift.io]": "", + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by number of Pods rate should scale up no more than given number of Pods per minute": " [Suite:k8s]", - "[sig-devex][Feature:OpenShiftControllerManager] TestAutomaticCreationOfPullSecrets [apigroup:config.openshift.io][apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by percentage should scale down no more than given percentage of current Pods per minute": " [Suite:k8s]", - "[sig-devex][Feature:OpenShiftControllerManager] TestDockercfgTokenDeletedController [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by percentage should scale up no more than given percentage of current Pods per minute": " [Suite:k8s]", - "[sig-devex][Feature:Templates] template-api TestTemplate [apigroup:template.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with short downscale stabilization window should scale down soon after the stabilization period": " [Suite:k8s]", - "[sig-devex][Feature:Templates] template-api TestTemplateTransformationFromConfig [apigroup:template.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with Custom Metric of type Object from Stackdriver should scale down to 0": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-devex][Feature:Templates] templateinstance creation with invalid object reports error should report a failure on creation [apigroup:template.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with Custom Metric of type Object from Stackdriver should scale down": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-devex][Feature:Templates] templateinstance cross-namespace test should create and delete objects across namespaces [apigroup:user.openshift.io][apigroup:template.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with Custom Metric of type Pod from Stackdriver should scale down with Prometheus": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-devex][Feature:Templates] templateinstance impersonation tests [apigroup:user.openshift.io][apigroup:authorization.openshift.io] should pass impersonation creation tests [apigroup:template.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with Custom Metric of type Pod from Stackdriver should scale down": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-devex][Feature:Templates] templateinstance impersonation tests [apigroup:user.openshift.io][apigroup:authorization.openshift.io] should pass impersonation deletion tests [apigroup:template.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with Custom Metric of type Pod from Stackdriver should scale up with two metrics": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-devex][Feature:Templates] templateinstance impersonation tests [apigroup:user.openshift.io][apigroup:authorization.openshift.io] should pass impersonation update tests [apigroup:template.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with External Metric from Stackdriver should scale down with target average value": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-devex][Feature:Templates] templateinstance object kinds test should create and delete objects from varying API groups [apigroup:template.openshift.io][apigroup:route.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with External Metric from Stackdriver should scale down with target value": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-devex][Feature:Templates] templateinstance readiness test should report failed soon after an annotated objects has failed [apigroup:template.openshift.io][apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with External Metric from Stackdriver should scale up with two metrics": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-devex][Feature:Templates] templateinstance readiness test should report ready soon after all annotated objects are ready [apigroup:template.openshift.io][apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with multiple metrics of different types should not scale down when one metric is missing (Container Resource and External Metrics)": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-devex][Feature:Templates] templateinstance security tests [apigroup:authorization.openshift.io][apigroup:template.openshift.io] should pass security tests [apigroup:route.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with multiple metrics of different types should not scale down when one metric is missing (Pod and Object Metrics)": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-etcd] etcd cluster has the same number of master nodes and voting members from the endpoints configmap [Early][apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with multiple metrics of different types should scale up when one metric is missing (Pod and External metrics)": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-etcd] etcd leader changes are not excessive [Late][apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with multiple metrics of different types should scale up when one metric is missing (Resource and Object metrics)": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-etcd] etcd record the start revision of the etcd-operator [Early]": " [Suite:openshift/conformance/parallel]", + "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 should support forwarding over websockets": " [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-etcd][Feature:DisasterRecovery][Disruptive] [Feature:EtcdRecovery] Cluster should recover from a backup taken on one node and recovered on another [apigroup:operator.openshift.io]": " [Serial]", + "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 that expects NO client request should support a client that connects, sends DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-etcd][Feature:DisasterRecovery][Disruptive] [Feature:EtcdRecovery] Cluster should restore itself after quorum loss [apigroup:machine.openshift.io][apigroup:operator.openshift.io]": " [Serial]", + "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 that expects a client request should support a client that connects, sends DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-etcd][Feature:EtcdVerticalScaling][Suite:openshift/etcd/scaling] etcd [apigroup:config.openshift.io] is able to vertically scale up and down with a single node [Timeout:60m][apigroup:machine.openshift.io]": "", + "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 that expects a client request should support a client that connects, sends NO DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-imageregistry] Image registry [apigroup:route.openshift.io] should redirect on blob pull [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-cli] Kubectl Port forwarding With a server listening on localhost should support forwarding over websockets": " [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-imageregistry][Feature:ImageAppend] Image append should create images by appending them [apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + "[sig-cli] Kubectl Port forwarding With a server listening on localhost that expects NO client request should support a client that connects, sends DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-imageregistry][Feature:ImageExtract] Image extract should extract content from an image [apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + "[sig-cli] Kubectl Port forwarding With a server listening on localhost that expects a client request should support a client that connects, sends DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-imageregistry][Feature:ImageInfo] Image info should display information about images [apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + "[sig-cli] Kubectl Port forwarding With a server listening on localhost that expects a client request should support a client that connects, sends NO DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-imageregistry][Feature:ImageLayers] Image layer subresource should identify a deleted image as missing [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-cli] Kubectl client Guestbook application should create and stop a working application [Conformance]": " [Slow] [Suite:k8s]", - "[sig-imageregistry][Feature:ImageLayers] Image layer subresource should return layers from tagged images [apigroup:image.openshift.io][apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + "[sig-cli] Kubectl client Kubectl api-versions should check if v1 is in available api versions [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - "[sig-imageregistry][Feature:ImageLookup] Image policy should perform lookup when the Deployment gets the resolve-names annotation later [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-cli] Kubectl client Kubectl apply apply set/view last-applied": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-imageregistry][Feature:ImageLookup] Image policy should perform lookup when the object has the resolve-names annotation [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-cli] Kubectl client Kubectl apply should apply a new configuration to an existing RC": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-imageregistry][Feature:ImageLookup] Image policy should update OpenShift object image fields when local names are on [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-cli] Kubectl client Kubectl apply should reuse port when apply to an existing SVC": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-imageregistry][Feature:ImageLookup] Image policy should update standard Kube object image fields when local names are on [apigroup:image.openshift.io][apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-cli] Kubectl client Kubectl cluster-info dump should check if cluster-info dump succeeds": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-imageregistry][Feature:ImageMirror][Slow] Image mirror mirror image from integrated registry into few external registries [apigroup:image.openshift.io][apigroup:build.openshift.io]": "", + "[sig-cli] Kubectl client Kubectl cluster-info should check if Kubernetes control plane services is included in cluster-info [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - "[sig-imageregistry][Feature:ImageMirror][Slow] Image mirror mirror image from integrated registry to external registry [apigroup:image.openshift.io][apigroup:build.openshift.io]": "", + "[sig-cli] Kubectl client Kubectl copy should copy a file from a running Pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-imageregistry][Feature:ImagePrune][Serial][Suite:openshift/registry/serial][Local] Image hard prune [apigroup:apps.openshift.io][apigroup:user.openshift.io] should delete orphaned blobs [apigroup:image.openshift.io]": "", + "[sig-cli] Kubectl client Kubectl create quota should create a quota with scopes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-imageregistry][Feature:ImagePrune][Serial][Suite:openshift/registry/serial][Local] Image hard prune [apigroup:apps.openshift.io][apigroup:user.openshift.io] should show orphaned blob deletions in dry-run mode [apigroup:image.openshift.io]": "", + "[sig-cli] Kubectl client Kubectl create quota should create a quota without scopes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-imageregistry][Feature:ImagePrune][Serial][Suite:openshift/registry/serial][Local] Image prune [apigroup:user.openshift.io] of schema 1 should prune old image [apigroup:build.openshift.io][apigroup:image.openshift.io]": "", + "[sig-cli] Kubectl client Kubectl create quota should reject quota with invalid scopes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-imageregistry][Feature:ImagePrune][Serial][Suite:openshift/registry/serial][Local] Image prune [apigroup:user.openshift.io] of schema 2 should prune old image with config [apigroup:build.openshift.io][apigroup:image.openshift.io]": "", + "[sig-cli] Kubectl client Kubectl describe should check if kubectl describe prints relevant information for cronjob": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-imageregistry][Feature:ImagePrune][Serial][Suite:openshift/registry/serial][Local] Image prune [apigroup:user.openshift.io] with --all=false flag should prune only internally managed images [apigroup:build.openshift.io][apigroup:image.openshift.io]": "", + "[sig-cli] Kubectl client Kubectl describe should check if kubectl describe prints relevant information for rc and pods [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - "[sig-imageregistry][Feature:ImagePrune][Serial][Suite:openshift/registry/serial][Local] Image prune [apigroup:user.openshift.io] with --prune-registry==false should prune old image but skip registry [apigroup:build.openshift.io][apigroup:image.openshift.io]": "", + "[sig-cli] Kubectl client Kubectl diff should check if kubectl diff finds a difference for Deployments [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - "[sig-imageregistry][Feature:ImagePrune][Serial][Suite:openshift/registry/serial][Local] Image prune [apigroup:user.openshift.io] with default --all flag should prune both internally managed and external images [apigroup:build.openshift.io][apigroup:image.openshift.io]": "", + "[sig-cli] Kubectl client Kubectl events should show event when pod is created": " [Disabled:RebaseInProgress] [Suite:k8s]", - "[sig-imageregistry][Feature:ImageQuota] Image resource quota should deny a push of built image exceeding openshift.io/imagestreams quota [apigroup:image.openshift.io]": " [Disabled:SpecialConfig]", + "[sig-cli] Kubectl client Kubectl expose should create services for rc [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - "[sig-imageregistry][Feature:ImageQuota][Serial][Suite:openshift/registry/serial] Image limit range [apigroup:config.openshift.io][apigroup:image.openshift.io][apigroup:operator.openshift.io] should deny a container image reference exceeding limit on openshift.io/image-tags resource [apigroup:build.openshift.io]": " [Disabled:SpecialConfig]", + "[sig-cli] Kubectl client Kubectl get componentstatuses should get componentstatuses": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-imageregistry][Feature:ImageQuota][Serial][Suite:openshift/registry/serial] Image limit range [apigroup:config.openshift.io][apigroup:image.openshift.io][apigroup:operator.openshift.io] should deny a push of built image exceeding limit on openshift.io/images resource [apigroup:build.openshift.io]": " [Disabled:SpecialConfig]", + "[sig-cli] Kubectl client Kubectl label should update the label on a resource [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - "[sig-imageregistry][Feature:ImageQuota][Serial][Suite:openshift/registry/serial] Image limit range [apigroup:config.openshift.io][apigroup:image.openshift.io][apigroup:operator.openshift.io] should deny a push of built image exceeding openshift.io/Image limit [apigroup:build.openshift.io]": " [Disabled:SpecialConfig]", + "[sig-cli] Kubectl client Kubectl logs should be able to retrieve and filter logs [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - "[sig-imageregistry][Feature:ImageQuota][Serial][Suite:openshift/registry/serial] Image limit range [apigroup:config.openshift.io][apigroup:image.openshift.io][apigroup:operator.openshift.io] should deny an import of a repository exceeding limit on openshift.io/image-tags resource [apigroup:build.openshift.io]": " [Disabled:SpecialConfig]", + "[sig-cli] Kubectl client Kubectl patch should add annotations for pods in rc [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - "[sig-imageregistry][Feature:ImageStreamImport][Serial][Slow] ImageStream API [apigroup:config.openshift.io] TestImportImageFromBlockedRegistry [apigroup:image.openshift.io]": "", + "[sig-cli] Kubectl client Kubectl replace should update a single-container pod's image [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - "[sig-imageregistry][Feature:ImageStreamImport][Serial][Slow] ImageStream API [apigroup:config.openshift.io] TestImportImageFromInsecureRegistry [apigroup:image.openshift.io]": "", + "[sig-cli] Kubectl client Kubectl run pod should create a pod from an image when restart is Never [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - "[sig-imageregistry][Feature:ImageStreamImport][Serial][Slow] ImageStream API [apigroup:config.openshift.io] TestImportRepositoryFromBlockedRegistry [apigroup:image.openshift.io]": "", + "[sig-cli] Kubectl client Kubectl server-side dry-run should check if kubectl can dry-run update Pods [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - "[sig-imageregistry][Feature:ImageStreamImport][Serial][Slow] ImageStream API [apigroup:config.openshift.io] TestImportRepositoryFromInsecureRegistry [apigroup:image.openshift.io]": "", + "[sig-cli] Kubectl client Kubectl taint [Serial] should remove all the taints with the same key off a node": " [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", - "[sig-imageregistry][Feature:ImageTriggers] Annotation trigger reconciles after the image is overwritten [apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + "[sig-cli] Kubectl client Kubectl taint [Serial] should update the taint on a node": " [Suite:openshift/conformance/serial] [Suite:k8s]", - "[sig-imageregistry][Feature:ImageTriggers] Image change build triggers TestMultipleImageChangeBuildTriggers [apigroup:image.openshift.io][apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-cli] Kubectl client Kubectl validation should create/apply a CR with unknown fields for CRD with no validation schema": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-imageregistry][Feature:ImageTriggers] Image change build triggers TestSimpleImageChangeBuildTriggerFromImageStreamTagCustom [apigroup:image.openshift.io][apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-cli] Kubectl client Kubectl validation should create/apply a valid CR for CRD with validation schema": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-imageregistry][Feature:ImageTriggers] Image change build triggers TestSimpleImageChangeBuildTriggerFromImageStreamTagCustomWithConfigChange [apigroup:image.openshift.io][apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-cli] Kubectl client Kubectl validation should create/apply an invalid/valid CR with arbitrary-extra properties for CRD with partially-specified validation schema": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-imageregistry][Feature:ImageTriggers] Image change build triggers TestSimpleImageChangeBuildTriggerFromImageStreamTagDocker [apigroup:image.openshift.io][apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-cli] Kubectl client Kubectl validation should detect unknown metadata fields in both the root and embedded object of a CR": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-imageregistry][Feature:ImageTriggers] Image change build triggers TestSimpleImageChangeBuildTriggerFromImageStreamTagDockerWithConfigChange [apigroup:image.openshift.io][apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-cli] Kubectl client Kubectl validation should detect unknown metadata fields of a typed object": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-imageregistry][Feature:ImageTriggers] Image change build triggers TestSimpleImageChangeBuildTriggerFromImageStreamTagSTI [apigroup:image.openshift.io][apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-cli] Kubectl client Kubectl version should check is all data is printed [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - "[sig-imageregistry][Feature:ImageTriggers] Image change build triggers TestSimpleImageChangeBuildTriggerFromImageStreamTagSTIWithConfigChange [apigroup:image.openshift.io][apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-cli] Kubectl client Proxy server should support --unix-socket=/path [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - "[sig-imageregistry][Feature:ImageTriggers][Serial] ImageStream API TestImageStreamMappingCreate [apigroup:image.openshift.io]": " [Suite:openshift/conformance/serial]", + "[sig-cli] Kubectl client Proxy server should support proxy with --port 0 [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - "[sig-imageregistry][Feature:ImageTriggers][Serial] ImageStream API TestImageStreamTagLifecycleHook [apigroup:image.openshift.io]": " [Suite:openshift/conformance/serial]", + "[sig-cli] Kubectl client Simple pod should contain last line of the log": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-imageregistry][Feature:ImageTriggers][Serial] ImageStream API TestImageStreamWithoutDockerImageConfig [apigroup:image.openshift.io]": " [Suite:openshift/conformance/serial]", + "[sig-cli] Kubectl client Simple pod should handle in-cluster config": " [Disabled:Broken] [Suite:k8s]", - "[sig-imageregistry][Feature:ImageTriggers][Serial] ImageStream admission TestImageStreamAdmitSpecUpdate [apigroup:image.openshift.io]": " [Suite:openshift/conformance/serial]", + "[sig-cli] Kubectl client Simple pod should return command exit codes [Slow] running a failing command with --leave-stdin-open": " [Suite:k8s]", - "[sig-imageregistry][Feature:ImageTriggers][Serial] ImageStream admission TestImageStreamAdmitStatusUpdate [apigroup:image.openshift.io]": " [Suite:openshift/conformance/serial]", + "[sig-cli] Kubectl client Simple pod should return command exit codes [Slow] running a failing command without --restart=Never": " [Suite:k8s]", - "[sig-imageregistry][Feature:ImageTriggers][Serial] ImageStream admission TestImageStreamTagsAdmission [apigroup:image.openshift.io]": " [Suite:openshift/conformance/serial]", + "[sig-cli] Kubectl client Simple pod should return command exit codes [Slow] running a failing command without --restart=Never, but with --rm": " [Suite:k8s]", - "[sig-imageregistry][Feature:Image] oc tag should change image reference for internal images [apigroup:build.openshift.io][apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + "[sig-cli] Kubectl client Simple pod should return command exit codes execing into a container with a failing command": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-imageregistry][Feature:Image] oc tag should preserve image reference for external images [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-cli] Kubectl client Simple pod should return command exit codes execing into a container with a successful command": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-imageregistry][Feature:Image] oc tag should work when only imagestreams api is available [apigroup:image.openshift.io][apigroup:authorization.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + "[sig-cli] Kubectl client Simple pod should return command exit codes running a failing command": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-imageregistry][Feature:Image] signature TestImageAddSignature [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-cli] Kubectl client Simple pod should return command exit codes running a successful command": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-imageregistry][Feature:Image] signature TestImageRemoveSignature [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-cli] Kubectl client Simple pod should support exec through an HTTP proxy": " [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-imageregistry][Serial] Image signature workflow can push a signed image to openshift registry and verify it [apigroup:user.openshift.io][apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/serial]", + "[sig-cli] Kubectl client Simple pod should support exec through kubectl proxy": " [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-installer][Feature:baremetal] Baremetal platform should [apigroup:config.openshift.io] have baremetalhost resources": " [Suite:openshift/conformance/parallel]", + "[sig-cli] Kubectl client Simple pod should support exec using resource/name": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-installer][Feature:baremetal] Baremetal platform should [apigroup:config.openshift.io] have hostfirmwaresetting resources": " [Suite:openshift/conformance/parallel]", + "[sig-cli] Kubectl client Simple pod should support exec": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-installer][Feature:baremetal] Baremetal platform should [apigroup:config.openshift.io] have preprovisioning images for workers": " [Suite:openshift/conformance/parallel]", + "[sig-cli] Kubectl client Simple pod should support inline execution and attach": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-installer][Feature:baremetal] Baremetal platform should [apigroup:config.openshift.io] not allow updating BootMacAddress": " [Suite:openshift/conformance/parallel]", + "[sig-cli] Kubectl client Simple pod should support port-forward": " [Suite:openshift/conformance/parallel] [Suite:k8s]", - "[sig-installer][Feature:baremetal] Baremetal/OpenStack/vSphere/None/AWS/Azure/GCP platforms [apigroup:config.openshift.io] have a metal3 deployment": " [Suite:openshift/conformance/parallel]", + "[sig-cli] Kubectl client Update Demo should create and stop a replication controller [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - "[sig-installer][Feature:baremetal][Serial] Baremetal platform should [apigroup:config.openshift.io] skip inspection when disabled by annotation": " [Suite:openshift/conformance/serial]", + "[sig-cli] Kubectl client Update Demo should scale a replication controller [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - "[sig-installer][Feature:baremetal][Serial] Baremetal platform should ensure [apigroup:config.openshift.io] cluster baremetal operator and metal3 deployment return back healthy after they are deleted": " [Suite:openshift/conformance/serial]", + "[sig-cli] Kubectl client kubectl wait should ignore not found error with --for=delete": " [Disabled:Broken] [Suite:k8s]", "[sig-instrumentation] Events API should delete a collection of events [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", @@ -1955,26 +815,6 @@ var Annotations = map[string]string{ "[sig-instrumentation] MetricsGrabber should grab all metrics from a Scheduler.": " [Disabled:Broken] [Suite:k8s]", - "[sig-instrumentation] Prometheus [apigroup:image.openshift.io] when installed on the cluster should have a AlertmanagerReceiversNotConfigured alert in firing state": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-instrumentation] Prometheus [apigroup:image.openshift.io] when installed on the cluster should have important platform topology metrics [apigroup:config.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-instrumentation] Prometheus [apigroup:image.openshift.io] when installed on the cluster should have non-Pod host cAdvisor metrics": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-instrumentation] Prometheus [apigroup:image.openshift.io] when installed on the cluster should provide ingress metrics": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-instrumentation] Prometheus [apigroup:image.openshift.io] when installed on the cluster should provide named network metrics [apigroup:project.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-instrumentation] Prometheus [apigroup:image.openshift.io] when installed on the cluster should report telemetry [Late]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-instrumentation] Prometheus [apigroup:image.openshift.io] when installed on the cluster should start and expose a secured proxy and unsecured metrics [apigroup:config.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-instrumentation] Prometheus [apigroup:image.openshift.io] when installed on the cluster shouldn't have failing rules evaluation": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-instrumentation] Prometheus [apigroup:image.openshift.io] when installed on the cluster shouldn't report any alerts in firing state apart from Watchdog and AlertmanagerReceiversNotConfigured [Early][apigroup:config.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-instrumentation] Prometheus [apigroup:image.openshift.io] when installed on the cluster when using openshift-sdn should be able to get the sdn ovs flows": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - "[sig-instrumentation] Stackdriver Monitoring should have accelerator metrics [Feature:StackdriverAcceleratorMonitoring]": " [Disabled:Unimplemented] [Suite:k8s]", "[sig-instrumentation] Stackdriver Monitoring should have cluster metrics [Feature:StackdriverMonitoring]": " [Disabled:Unimplemented] [Suite:k8s]", @@ -1987,46 +827,6 @@ var Annotations = map[string]string{ "[sig-instrumentation] Stackdriver Monitoring should run Stackdriver Metadata Agent [Feature:StackdriverMetadataAgent]": " [Disabled:Unimplemented] [Suite:k8s]", - "[sig-instrumentation][Late] Alerts shouldn't exceed the 650 series limit of total series sent via telemetry from each cluster": " [Suite:openshift/conformance/parallel]", - - "[sig-instrumentation][Late] Alerts shouldn't report any unexpected alerts in firing or pending state [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-instrumentation][Late] OpenShift alerting rules [apigroup:image.openshift.io] should have a runbook_url annotation if the alert is critical": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-instrumentation][Late] OpenShift alerting rules [apigroup:image.openshift.io] should have a valid severity label": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-instrumentation][Late] OpenShift alerting rules [apigroup:image.openshift.io] should have description and summary annotations": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-instrumentation][sig-builds][Feature:Builds] Prometheus when installed on the cluster should start and expose a secured proxy and verify build metrics [apigroup:config.openshift.io][apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-network-edge] DNS should answer A and AAAA queries for a dual-stack service [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-network-edge] DNS should answer endpoint and wildcard queries for the cluster": " [Disabled:Broken]", - - "[sig-network-edge] DNS should answer queries using the local DNS endpoint": " [Suite:openshift/conformance/parallel]", - - "[sig-network-edge][Conformance][Area:Networking][Feature:Router] The HAProxy router should be able to connect to a service that is idled because a GET on the route will unidle it [apigroup:config.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel/minimal]", - - "[sig-network-edge][Conformance][Area:Networking][Feature:Router] The HAProxy router should pass the gRPC interoperability tests [apigroup:config.openshift.io][apigroup:route.openshift.io][apigroup:operator.openshift.io]": " [Suite:openshift/conformance/parallel/minimal]", - - "[sig-network-edge][Conformance][Area:Networking][Feature:Router][apigroup:route.openshift.io] The HAProxy router should pass the h2spec conformance tests [apigroup:config.openshift.io][apigroup:authorization.openshift.io][apigroup:user.openshift.io][apigroup:security.openshift.io][apigroup:operator.openshift.io]": " [Suite:openshift/conformance/parallel/minimal]", - - "[sig-network-edge][Conformance][Area:Networking][Feature:Router][apigroup:route.openshift.io][apigroup:config.openshift.io] The HAProxy router should pass the http2 tests [apigroup:image.openshift.io][apigroup:operator.openshift.io]": " [Suite:openshift/conformance/parallel/minimal]", - - "[sig-network-edge][Feature:Idling] Idling with a single service and DeploymentConfig [apigroup:route.openshift.io] should idle the service and DeploymentConfig properly [apigroup:apps.openshift.io]": " [Disabled:Broken]", - - "[sig-network-edge][Feature:Idling] Idling with a single service and ReplicationController should idle the service and ReplicationController properly": " [Suite:openshift/conformance/parallel]", - - "[sig-network-edge][Feature:Idling] Unidling [apigroup:apps.openshift.io][apigroup:route.openshift.io] should handle many TCP connections by possibly dropping those over a certain bound [Serial]": " [Suite:openshift/conformance/serial]", - - "[sig-network-edge][Feature:Idling] Unidling [apigroup:apps.openshift.io][apigroup:route.openshift.io] should handle many UDP senders (by continuing to drop all packets on the floor) [Serial]": " [Suite:openshift/conformance/serial]", - - "[sig-network-edge][Feature:Idling] Unidling [apigroup:apps.openshift.io][apigroup:route.openshift.io] should work with TCP (when fully idled)": " [Suite:openshift/conformance/parallel]", - - "[sig-network-edge][Feature:Idling] Unidling [apigroup:apps.openshift.io][apigroup:route.openshift.io] should work with TCP (while idling)": " [Disabled:Broken]", - - "[sig-network-edge][Feature:Idling] Unidling [apigroup:apps.openshift.io][apigroup:route.openshift.io] should work with UDP": " [Suite:openshift/conformance/parallel]", - "[sig-network] CVE-2021-29923 IPv4 Service Type ClusterIP with leading zeros should work interpreted as decimal": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-network] ClusterDns [Feature:Example] should create pod that uses dns": " [Disabled:Broken] [Suite:k8s]", @@ -2101,8 +901,6 @@ var Annotations = map[string]string{ "[sig-network] IngressClass [Feature:Ingress] should set default value on new IngressClass [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", - "[sig-network] Internal connectivity for TCP and UDP on ports 9000-9999 is allowed [Serial:Self]": " [Suite:openshift/conformance/parallel]", - "[sig-network] KubeProxy should set TCP CLOSE_WAIT timeout [Privileged]": " [Disabled:Broken] [Suite:k8s]", "[sig-network] LoadBalancers ESIPP [Slow] should handle updates to ExternalTrafficPolicy field": " [Suite:k8s]", @@ -2555,148 +1353,6 @@ var Annotations = map[string]string{ "[sig-network] [Feature:Topology Hints] should distribute endpoints evenly": " [Disabled:SpecialConfig] [Suite:k8s]", - "[sig-network] external gateway address when using openshift ovn-kubernetes should match the address family of the pod": " [Suite:openshift/conformance/parallel]", - - "[sig-network] load balancer should be managed by OpenShift": " [Suite:openshift/conformance/parallel]", - - "[sig-network] load balancer should not be managed by OpenShift": " [Suite:openshift/conformance/parallel]", - - "[sig-network] multicast when using one of the OpenshiftSDN modes 'redhat/openshift-ovs-multitenant, redhat/openshift-ovs-networkpolicy' should allow multicast traffic in namespaces where it is enabled": " [Suite:openshift/conformance/parallel]", - - "[sig-network] multicast when using one of the OpenshiftSDN modes 'redhat/openshift-ovs-multitenant, redhat/openshift-ovs-networkpolicy' should block multicast traffic in namespaces where it is disabled": " [Suite:openshift/conformance/parallel]", - - "[sig-network] multicast when using one of the OpenshiftSDN modes 'redhat/openshift-ovs-subnet' should block multicast traffic": " [Suite:openshift/conformance/parallel]", - - "[sig-network] network isolation when using a plugin in a mode that does not isolate namespaces by default should allow communication between pods in different namespaces on different nodes": " [Suite:openshift/conformance/parallel]", - - "[sig-network] network isolation when using a plugin in a mode that does not isolate namespaces by default should allow communication between pods in different namespaces on the same node": " [Suite:openshift/conformance/parallel]", - - "[sig-network] network isolation when using a plugin in a mode that isolates namespaces by default should allow communication from default to non-default namespace on a different node": " [Suite:openshift/conformance/parallel]", - - "[sig-network] network isolation when using a plugin in a mode that isolates namespaces by default should allow communication from default to non-default namespace on the same node": " [Suite:openshift/conformance/parallel]", - - "[sig-network] network isolation when using a plugin in a mode that isolates namespaces by default should allow communication from non-default to default namespace on a different node": " [Suite:openshift/conformance/parallel]", - - "[sig-network] network isolation when using a plugin in a mode that isolates namespaces by default should allow communication from non-default to default namespace on the same node": " [Suite:openshift/conformance/parallel]", - - "[sig-network] network isolation when using a plugin in a mode that isolates namespaces by default should prevent communication between pods in different namespaces on different nodes": " [Suite:openshift/conformance/parallel]", - - "[sig-network] network isolation when using a plugin in a mode that isolates namespaces by default should prevent communication between pods in different namespaces on the same node": " [Suite:openshift/conformance/parallel]", - - "[sig-network] services basic functionality should allow connections to another pod on a different node via a service IP": " [Suite:openshift/conformance/parallel]", - - "[sig-network] services basic functionality should allow connections to another pod on the same node via a service IP": " [Suite:openshift/conformance/parallel]", - - "[sig-network] services when running openshift ipv4 cluster ensures external ip policy is configured correctly on the cluster [apigroup:config.openshift.io] [Serial]": " [Suite:openshift/conformance/serial]", - - "[sig-network] services when running openshift ipv4 cluster on bare metal [apigroup:config.openshift.io] ensures external auto assign cidr is configured correctly on the cluster [apigroup:config.openshift.io] [Serial]": " [Suite:openshift/conformance/serial]", - - "[sig-network] services when using a plugin in a mode that does not isolate namespaces by default should allow connections to pods in different namespaces on different nodes via service IPs": " [Suite:openshift/conformance/parallel]", - - "[sig-network] services when using a plugin in a mode that does not isolate namespaces by default should allow connections to pods in different namespaces on the same node via service IPs": " [Suite:openshift/conformance/parallel]", - - "[sig-network] services when using a plugin in a mode that isolates namespaces by default should allow connections from pods in the default namespace to a service in another namespace on a different node": " [Suite:openshift/conformance/parallel]", - - "[sig-network] services when using a plugin in a mode that isolates namespaces by default should allow connections from pods in the default namespace to a service in another namespace on the same node": " [Suite:openshift/conformance/parallel]", - - "[sig-network] services when using a plugin in a mode that isolates namespaces by default should allow connections to services in the default namespace from a pod in another namespace on a different node": " [Suite:openshift/conformance/parallel]", - - "[sig-network] services when using a plugin in a mode that isolates namespaces by default should allow connections to services in the default namespace from a pod in another namespace on the same node": " [Suite:openshift/conformance/parallel]", - - "[sig-network] services when using a plugin in a mode that isolates namespaces by default should prevent connections to pods in different namespaces on different nodes via service IPs": " [Suite:openshift/conformance/parallel]", - - "[sig-network] services when using a plugin in a mode that isolates namespaces by default should prevent connections to pods in different namespaces on the same node via service IPs": " [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:EgressFirewall] egressFirewall should have no impact outside its namespace [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:EgressFirewall] when using openshift ovn-kubernetes should ensure egressfirewall is created": " [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:EgressFirewall] when using openshift-sdn should ensure egressnetworkpolicy is created [apigroup:network.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:EgressIP][apigroup:config.openshift.io] [external-targets][apigroup:user.openshift.io][apigroup:security.openshift.io] EgressIPs can be assigned automatically [Skipped:Network/OVNKubernetes]": " [Serial] [Suite:openshift/conformance/serial]", - - "[sig-network][Feature:EgressIP][apigroup:config.openshift.io] [external-targets][apigroup:user.openshift.io][apigroup:security.openshift.io] only pods matched by the pod selector should have the EgressIPs [Skipped:Network/OpenShiftSDN]": " [Serial] [Suite:openshift/conformance/serial]", - - "[sig-network][Feature:EgressIP][apigroup:config.openshift.io] [external-targets][apigroup:user.openshift.io][apigroup:security.openshift.io] pods should have the assigned EgressIPs and EgressIPs can be deleted and recreated [Skipped:azure][apigroup:route.openshift.io]": " [Serial] [Suite:openshift/conformance/serial]", - - "[sig-network][Feature:EgressIP][apigroup:config.openshift.io] [external-targets][apigroup:user.openshift.io][apigroup:security.openshift.io] pods should have the assigned EgressIPs and EgressIPs can be updated [Skipped:Network/OpenShiftSDN]": " [Serial] [Suite:openshift/conformance/serial]", - - "[sig-network][Feature:EgressIP][apigroup:config.openshift.io] [external-targets][apigroup:user.openshift.io][apigroup:security.openshift.io] pods should keep the assigned EgressIPs when being rescheduled to another node": " [Serial] [Suite:openshift/conformance/serial]", - - "[sig-network][Feature:EgressIP][apigroup:config.openshift.io] [internal-targets] EgressIP pods should query hostNetwork pods with the local node's SNAT": " [Disabled:Broken] [Serial]", - - "[sig-network][Feature:EgressRouterCNI] should ensure ipv4 egressrouter cni resources are created [apigroup:operator.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:EgressRouterCNI] when using openshift ovn-kubernetes should ensure ipv6 egressrouter cni resources are created [apigroup:operator.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:Multus] should use multus to create net1 device from network-attachment-definition [apigroup:k8s.cni.cncf.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:Network Policy Audit logging] when using openshift ovn-kubernetes should ensure acl logs are created and correct [apigroup:project.openshift.io][apigroup:network.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:Router][apigroup:config.openshift.io] The HAProxy router should enable openshift-monitoring to pull metrics": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:Router][apigroup:config.openshift.io] The HAProxy router should expose a health check on the metrics port": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:Router][apigroup:config.openshift.io] The HAProxy router should expose prometheus metrics for a route [apigroup:route.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:Router][apigroup:config.openshift.io] The HAProxy router should expose the profiling endpoints": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:Router][apigroup:config.openshift.io][apigroup:operator.openshift.io] The HAProxy router should set Forwarded headers appropriately": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:Router][apigroup:image.openshift.io] The HAProxy router should serve a route that points to two services and respect weights": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:Router][apigroup:operator.openshift.io] The HAProxy router should respond with 503 to unrecognized hosts": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:Router][apigroup:operator.openshift.io] The HAProxy router should serve routes that were created from an ingress [apigroup:route.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:Router][apigroup:route.openshift.io] The HAProxy router converges when multiple routers are writing conflicting status": " [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:Router][apigroup:route.openshift.io] The HAProxy router converges when multiple routers are writing status": " [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:Router][apigroup:route.openshift.io] The HAProxy router reports the expected host names in admitted routes' statuses": " [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:Router][apigroup:route.openshift.io] The HAProxy router should override the route host for overridden domains with a custom value [apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:Router][apigroup:route.openshift.io] The HAProxy router should override the route host with a custom value": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:Router][apigroup:route.openshift.io] The HAProxy router should run even if it has no access to update status [apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:Router][apigroup:route.openshift.io] The HAProxy router should serve the correct routes when running with the haproxy config manager": " [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:Router][apigroup:route.openshift.io] The HAProxy router should serve the correct routes when scoped to a single namespace and label set": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:Router][apigroup:route.openshift.io] when FIPS is disabled the HAProxy router should serve routes when configured with a 1024-bit RSA key": " [Feature:Networking-IPv4] [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:Router][apigroup:route.openshift.io] when FIPS is enabled the HAProxy router should not work when configured with a 1024-bit RSA key": " [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:Router][apigroup:route.openshift.io][apigroup:operator.openshift.io] The HAProxy router should support reencrypt to services backed by a serving certificate automatically": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:Whereabouts] should assign unique IP addresses to each pod in the event of a race condition case [apigroup:k8s.cni.cncf.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:Whereabouts] should use whereabouts net-attach-def to limit IP ranges for newly created pods [apigroup:k8s.cni.cncf.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:bond] should create a pod with bond interface [apigroup:k8s.cni.cncf.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:tap] should create a pod with a tap interface [apigroup:k8s.cni.cncf.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:tuning] pod should not start for sysctls not on whitelist [apigroup:k8s.cni.cncf.io] net.ipv4.conf.IFNAME.arp_filter": " [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:tuning] pod should not start for sysctls not on whitelist [apigroup:k8s.cni.cncf.io] net.ipv4.conf.all.send_redirects": " [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:tuning] pod should start with all sysctl on whitelist [apigroup:k8s.cni.cncf.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:tuning] pod sysctl should not affect existing pods [apigroup:k8s.cni.cncf.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:tuning] pod sysctl should not affect newly created pods [apigroup:k8s.cni.cncf.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:tuning] pod sysctls should not affect node [apigroup:k8s.cni.cncf.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-network][Feature:tuning] sysctl allowlist update should start a pod with custom sysctl only when the sysctl is added to whitelist": " [Suite:openshift/conformance/parallel]", - - "[sig-network][endpoints] admission [apigroup:config.openshift.io] blocks manual creation of EndpointSlices pointing to the cluster or service network": " [Suite:openshift/conformance/parallel]", - - "[sig-network][endpoints] admission [apigroup:config.openshift.io] blocks manual creation of Endpoints pointing to the cluster or service network": " [Suite:openshift/conformance/parallel]", - "[sig-node] AppArmor load AppArmor profiles can disable an AppArmor profile, using unconfined": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-node] AppArmor load AppArmor profiles should enforce an AppArmor profile": " [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -2799,10 +1455,6 @@ var Annotations = map[string]string{ "[sig-node] Lease lease API should be available [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", - "[sig-node] Managed cluster record the number of nodes at the beginning of the tests [Early]": " [Suite:openshift/conformance/parallel]", - - "[sig-node] Managed cluster should report ready nodes the entire duration of the test run [Late][apigroup:monitoring.coreos.com]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - "[sig-node] Mount propagation should propagate mounts within defined scopes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-node] NoExecuteTaintManager Multiple Pods [Serial] evicts pods with minTolerationSeconds [Disruptive] [Conformance]": " [Skipped:SingleReplicaTopology] [Suite:k8s]", @@ -3061,40 +1713,6 @@ var Annotations = map[string]string{ "[sig-node] kubelet kubectl node-logs [Feature:add node log viewer] should return the logs for the requested service": " [Disabled:Alpha] [Suite:k8s]", - "[sig-node] should override timeoutGracePeriodSeconds when annotation is set": " [Suite:openshift/conformance/parallel]", - - "[sig-node] supplemental groups Ensure supplemental groups propagate to docker should propagate requested groups to the container [apigroup:security.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-node][Late] should not have pod creation failures due to systemd timeouts": " [Suite:openshift/conformance/parallel]", - - "[sig-operator] OLM should Implement packages API server and list packagemanifest info with namespace not NULL [apigroup:packages.operators.coreos.com]": " [Suite:openshift/conformance/parallel]", - - "[sig-operator] OLM should be installed with catalogsources at version v1alpha1 [apigroup:operators.coreos.com]": " [Suite:openshift/conformance/parallel]", - - "[sig-operator] OLM should be installed with clusterserviceversions at version v1alpha1 [apigroup:operators.coreos.com]": " [Suite:openshift/conformance/parallel]", - - "[sig-operator] OLM should be installed with installplans at version v1alpha1 [apigroup:operators.coreos.com]": " [Suite:openshift/conformance/parallel]", - - "[sig-operator] OLM should be installed with operatorgroups at version v1 [apigroup:operators.coreos.com]": " [Suite:openshift/conformance/parallel]", - - "[sig-operator] OLM should be installed with packagemanifests at version v1 [apigroup:packages.operators.coreos.com]": " [Suite:openshift/conformance/parallel]", - - "[sig-operator] OLM should be installed with subscriptions at version v1alpha1 [apigroup:operators.coreos.com]": " [Suite:openshift/conformance/parallel]", - - "[sig-operator] OLM should have imagePullPolicy:IfNotPresent on thier deployments [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-operator] an end user can use OLM Report Upgradeable in OLM ClusterOperators status [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", - - "[sig-operator] an end user can use OLM can subscribe to the operator [apigroup:config.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-scalability][Feature:Performance] Load cluster should populate the cluster [Slow][Serial][apigroup:template.openshift.io][apigroup:apps.openshift.io][apigroup:build.openshift.io]": "", - - "[sig-scalability][Feature:Performance][Serial][Slow] Load cluster concurrently with templates": "", - - "[sig-scalability][Feature:Performance][Serial][Slow] Mirror cluster it should read the cluster apps [apigroup:apps.openshift.io]": "", - - "[sig-scalability][Feature:Performance][Serial][Slow] Mirror cluster it should read the node info": "", - "[sig-scheduling] GPUDevicePluginAcrossRecreate [Feature:Recreate] run Nvidia GPU Device Plugin tests with a recreation": " [Disabled:SpecialConfig] [Suite:k8s]", "[sig-scheduling] LimitRange should create a LimitRange with defaults and ensure pod has those defaults applied. [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", @@ -3151,28 +1769,6 @@ var Annotations = map[string]string{ "[sig-scheduling] [Feature:GPUDevicePlugin] run Nvidia GPU Device Plugin tests": " [Disabled:SpecialConfig] [Suite:k8s]", - "[sig-scheduling][Early] The HAProxy router pods [apigroup:route.openshift.io] should be scheduled on different nodes": " [Suite:openshift/conformance/parallel]", - - "[sig-scheduling][Early] The openshift-apiserver pods [apigroup:apps.openshift.io][apigroup:authorization.openshift.io][apigroup:build.openshift.io][apigroup:image.openshift.io][apigroup:project.openshift.io][apigroup:quota.openshift.io][apigroup:route.openshift.io][apigroup:security.openshift.io][apigroup:template.openshift.io] should be scheduled on different nodes": " [Suite:openshift/conformance/parallel]", - - "[sig-scheduling][Early] The openshift-authentication pods [apigroup:oauth.openshift.io] should be scheduled on different nodes": " [Suite:openshift/conformance/parallel]", - - "[sig-scheduling][Early] The openshift-console console pods [apigroup:console.openshift.io] should be scheduled on different nodes": " [Suite:openshift/conformance/parallel]", - - "[sig-scheduling][Early] The openshift-console downloads pods [apigroup:console.openshift.io] should be scheduled on different nodes": " [Suite:openshift/conformance/parallel]", - - "[sig-scheduling][Early] The openshift-etcd pods [apigroup:operator.openshift.io] should be scheduled on different nodes": " [Suite:openshift/conformance/parallel]", - - "[sig-scheduling][Early] The openshift-image-registry pods [apigroup:imageregistry.operator.openshift.io] should be scheduled on different nodes": " [Suite:openshift/conformance/parallel]", - - "[sig-scheduling][Early] The openshift-monitoring prometheus-adapter pods [apigroup:monitoring.coreos.com] should be scheduled on different nodes": " [Suite:openshift/conformance/parallel]", - - "[sig-scheduling][Early] The openshift-monitoring thanos-querier pods [apigroup:monitoring.coreos.com] should be scheduled on different nodes": " [Suite:openshift/conformance/parallel]", - - "[sig-scheduling][Early] The openshift-oauth-apiserver pods [apigroup:oauth.openshift.io][apigroup:user.openshift.io] should be scheduled on different nodes": " [Suite:openshift/conformance/parallel]", - - "[sig-scheduling][Early] The openshift-operator-lifecycle-manager pods [apigroup:packages.operators.coreos.com] should be scheduled on different nodes": " [Suite:openshift/conformance/parallel]", - "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -15845,8 +14441,6 @@ var Annotations = map[string]string{ "[sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (xfs)][Slow] volumes should store data": " [Suite:k8s]", - "[sig-storage] Managed cluster should have no crashlooping recycler pods over four minutes": " [Suite:openshift/conformance/parallel]", - "[sig-storage] Mounted volume expand [Feature:StorageProvider] Should verify mounted devices can be resized": " [Flaky] [Skipped:NoOptionalCapabilities] [Suite:k8s]", "[sig-storage] Multi-AZ Cluster Volumes should schedule pods in the same zones as statically provisioned PVs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -16384,32 +14978,6 @@ var Annotations = map[string]string{ "[sig-storage] vsphere cloud provider stress [Feature:vsphere] vsphere stress tests": " [Disabled:Unsupported] [Suite:k8s]", "[sig-storage] vsphere statefulset [Feature:vsphere] vsphere statefulset testing": " [Disabled:Unsupported] [Suite:k8s]", - - "[sig-storage][Feature:CSIInlineVolumeAdmission][Serial] baseline namespace should allow pods with inline volumes when the driver uses the baseline label": " [Suite:openshift/conformance/serial]", - - "[sig-storage][Feature:CSIInlineVolumeAdmission][Serial] baseline namespace should allow pods with inline volumes when the driver uses the restricted label": " [Suite:openshift/conformance/serial]", - - "[sig-storage][Feature:CSIInlineVolumeAdmission][Serial] baseline namespace should deny pods with inline volumes when the driver uses the privileged label": " [Suite:openshift/conformance/serial]", - - "[sig-storage][Feature:CSIInlineVolumeAdmission][Serial] privileged namespace should allow pods with inline volumes when the driver uses the privileged label": " [Suite:openshift/conformance/serial]", - - "[sig-storage][Feature:CSIInlineVolumeAdmission][Serial] privileged namespace should allow pods with inline volumes when the driver uses the restricted label": " [Suite:openshift/conformance/serial]", - - "[sig-storage][Feature:CSIInlineVolumeAdmission][Serial] restricted namespace should allow pods with inline volumes when the driver uses the restricted label": " [Suite:openshift/conformance/serial]", - - "[sig-storage][Feature:CSIInlineVolumeAdmission][Serial] restricted namespace should deny pods with inline volumes when the driver uses the baseline label": " [Suite:openshift/conformance/serial]", - - "[sig-storage][Feature:CSIInlineVolumeAdmission][Serial] restricted namespace should deny pods with inline volumes when the driver uses the privileged label": " [Suite:openshift/conformance/serial]", - - "[sig-storage][Feature:DisableStorageClass][Serial] should not reconcile the StorageClass when StorageClassState is Unmanaged": " [Suite:openshift/conformance/serial]", - - "[sig-storage][Feature:DisableStorageClass][Serial] should reconcile the StorageClass when StorageClassState is Managed": " [Suite:openshift/conformance/serial]", - - "[sig-storage][Feature:DisableStorageClass][Serial] should remove the StorageClass when StorageClassState is Removed": " [Suite:openshift/conformance/serial]", - - "[sig-storage][Late] Metrics should report short attach times": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - - "[sig-storage][Late] Metrics should report short mount times": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", } func init() { diff --git a/test/extended/util/annotate/rules.go b/test/extended/util/annotate/rules.go index 8a21cbbb6cc0..e711a2ab4274 100644 --- a/test/extended/util/annotate/rules.go +++ b/test/extended/util/annotate/rules.go @@ -276,10 +276,10 @@ var ( `\[sig-instrumentation\]\[Late\] Alerts shouldn't report any alerts in firing or pending state apart from Watchdog and AlertmanagerReceiversNotConfigured and have no gaps in Watchdog firing`, `\[sig-instrumentation\]\[sig-builds\]\[Feature:Builds\] Prometheus when installed on the cluster should start and expose a secured proxy and verify build metrics`, `\[sig-network-edge\]\[Conformance\]\[Area:Networking\]\[Feature:Router\] The HAProxy router should be able to connect to a service that is idled because a GET on the route will unidle it`, - `\[sig-network\]\[Feature:Router\]\[apigroup:config.openshift.io\] The HAProxy router should enable openshift-monitoring to pull metrics`, - `\[sig-network\]\[Feature:Router\]\[apigroup:config.openshift.io\] The HAProxy router should expose a health check on the metrics port`, - `\[sig-network\]\[Feature:Router\]\[apigroup:config.openshift.io\] The HAProxy router should expose prometheus metrics for a route`, - `\[sig-network\]\[Feature:Router\]\[apigroup:config.openshift.io\] The HAProxy router should expose the profiling endpoints`, + `\[sig-network\]\[Feature:Router\] The HAProxy router should enable openshift-monitoring to pull metrics`, + `\[sig-network\]\[Feature:Router\] The HAProxy router should expose a health check on the metrics port`, + `\[sig-network\]\[Feature:Router\] The HAProxy router should expose prometheus metrics for a route`, + `\[sig-network\]\[Feature:Router\] The HAProxy router should expose the profiling endpoints`, `\[sig-network\]\[Feature:Router\]\[apigroup:route.openshift.io\] The HAProxy router should override the route host for overridden domains with a custom value`, `\[sig-network\]\[Feature:Router\]\[apigroup:route.openshift.io\] The HAProxy router should override the route host with a custom value`, `\[sig-network\]\[Feature:Router\]\[apigroup:operator.openshift.io\] The HAProxy router should respond with 503 to unrecognized hosts`, @@ -287,7 +287,7 @@ var ( `\[sig-network\]\[Feature:Router\]\[apigroup:image.openshift.io\] The HAProxy router should serve a route that points to two services and respect weights`, `\[sig-network\]\[Feature:Router\]\[apigroup:operator.openshift.io\] The HAProxy router should serve routes that were created from an ingress`, `\[sig-network\]\[Feature:Router\]\[apigroup:route.openshift.io\] The HAProxy router should serve the correct routes when scoped to a single namespace and label set`, - `\[sig-network\]\[Feature:Router\]\[apigroup:config.openshift.io\]\[apigroup:operator.openshift.io\] The HAProxy router should set Forwarded headers appropriately`, + `\[sig-network\]\[Feature:Router\]\[apigroup:operator.openshift.io\] The HAProxy router should set Forwarded headers appropriately`, `\[sig-network\]\[Feature:Router\]\[apigroup:route.openshift.io\]\[apigroup:operator.openshift.io\] The HAProxy router should support reencrypt to services backed by a serving certificate automatically`, `\[sig-network\] Networking should provide Internet connection for containers \[Feature:Networking-IPv6\]`, `\[sig-node\] Managed cluster should report ready nodes the entire duration of the test run`, diff --git a/test/extended/util/client.go b/test/extended/util/client.go index def8cd152a18..067e0019a87a 100644 --- a/test/extended/util/client.go +++ b/test/extended/util/client.go @@ -64,6 +64,7 @@ import ( authorizationv1client "github.com/openshift/client-go/authorization/clientset/versioned" buildv1client "github.com/openshift/client-go/build/clientset/versioned" configv1client "github.com/openshift/client-go/config/clientset/versioned" + fakeconfigv1client "github.com/openshift/client-go/config/clientset/versioned/fake" imagev1client "github.com/openshift/client-go/image/clientset/versioned" oauthv1client "github.com/openshift/client-go/oauth/clientset/versioned" operatorv1client "github.com/openshift/client-go/operator/clientset/versioned" @@ -79,23 +80,26 @@ import ( // CLI provides function to call the OpenShift CLI and Kubernetes and OpenShift // clients. type CLI struct { - execPath string - verb string - configPath string - adminConfigPath string - token string - username string - globalArgs []string - commandArgs []string - finalArgs []string - namespacesToDelete []string - stdin *bytes.Buffer - stdout io.Writer - stderr io.Writer - verbose bool - withoutNamespace bool - kubeFramework *framework.Framework - + execPath string + verb string + configPath string + adminConfigPath string + staticConfigManifestDir string + token string + username string + globalArgs []string + commandArgs []string + finalArgs []string + namespacesToDelete []string + stdin *bytes.Buffer + stdout io.Writer + stderr io.Writer + verbose bool + withoutNamespace bool + kubeFramework *framework.Framework + + // read from a static manifest directory (set through STATIC_CONFIG_MANIFEST_DIR env) + configObjects []runtime.Object resourcesToDelete []resourceRef } @@ -114,6 +118,9 @@ func NewCLIWithFramework(kubeFramework *framework.Framework) *CLI { execPath: "oc", adminConfigPath: KubeConfigPath(), } + // Called only once (assumed the objects will never get modified) + // TODO: run in every BeforeEach + cli.setupStaticConfigsFromManifests() return cli } @@ -150,13 +157,17 @@ func NewCLIWithoutNamespace(project string) *CLI { }, Timeouts: framework.NewTimeoutContextWithDefaults(), }, - username: "admin", - execPath: "oc", - adminConfigPath: KubeConfigPath(), - withoutNamespace: true, + username: "admin", + execPath: "oc", + adminConfigPath: KubeConfigPath(), + staticConfigManifestDir: StaticConfigManifestDir(), + withoutNamespace: true, } g.BeforeEach(cli.kubeFramework.BeforeEach) + // Called only once (assumed the objects will never get modified) + cli.setupStaticConfigsFromManifests() + // we can't use k8s initialization method to inject these into framework.NewFrameworkExtensions // because we need to have an instance of CLI, so we're rely on the less optimal ginkgo.AfterEach // in case where this method fails, framework cleans up the entire namespace so we should be @@ -563,6 +574,12 @@ func (c *CLI) RESTMapper() meta.RESTMapper { return ret } +func (c *CLI) setupStaticConfigsFromManifests() { + err, objects := collectConfigManifestsFromDir(c.staticConfigManifestDir) + o.Expect(err).ToNot(o.HaveOccurred()) + c.configObjects = objects +} + func (c *CLI) AppsClient() appsv1client.Interface { return appsv1client.NewForConfigOrDie(c.UserConfig()) } @@ -608,7 +625,10 @@ func (c *CLI) AdminBuildClient() buildv1client.Interface { } func (c *CLI) AdminConfigClient() configv1client.Interface { - return configv1client.NewForConfigOrDie(c.AdminConfig()) + return &ConfigClientShim{ + adminConfig: c.AdminConfig(), + fakeClient: fakeconfigv1client.NewSimpleClientset(c.configObjects...), + } } func (c *CLI) AdminImageClient() imagev1client.Interface { diff --git a/test/extended/util/configv1shim.go b/test/extended/util/configv1shim.go new file mode 100644 index 000000000000..8d92dfa49915 --- /dev/null +++ b/test/extended/util/configv1shim.go @@ -0,0 +1,271 @@ +package util + +import ( + "fmt" + + configv1client "github.com/openshift/client-go/config/clientset/versioned" + fakeconfigv1client "github.com/openshift/client-go/config/clientset/versioned/fake" + configv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + configv1alpha1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + clienttesting "k8s.io/client-go/testing" +) + +// ConfigClientShim makes sure whenever there's a static +// manifest present for a config v1 kind, fake client is used +// instead of the real one. +type ConfigClientShim struct { + adminConfig *rest.Config + v1Kinds map[string]bool + fakeClient *fakeconfigv1client.Clientset +} + +func (c *ConfigClientShim) Discovery() discovery.DiscoveryInterface { + return configv1client.NewForConfigOrDie(c.adminConfig).Discovery() +} +func (c *ConfigClientShim) ConfigV1() configv1.ConfigV1Interface { + return &ConfigV1ClientShim{ + configv1: func() configv1.ConfigV1Interface { return configv1client.NewForConfigOrDie(c.adminConfig).ConfigV1() }, + v1Kinds: c.v1Kinds, + fakeConfigV1Client: c.fakeClient.ConfigV1(), + } +} +func (c *ConfigClientShim) ConfigV1alpha1() configv1alpha1.ConfigV1alpha1Interface { + return configv1client.NewForConfigOrDie(c.adminConfig).ConfigV1alpha1() +} + +var _ configv1client.Interface = &ConfigClientShim{} + +// ConfigClientShim makes sure whenever there's a static +// manifest present for a config v1 kind, fake client is used +// instead of the real one. +type ConfigV1ClientShim struct { + configv1 func() configv1.ConfigV1Interface + v1Kinds map[string]bool + fakeConfigV1Client configv1.ConfigV1Interface +} + +func (c *ConfigV1ClientShim) APIServers() configv1.APIServerInterface { + if c.v1Kinds["APIServer"] { + return c.fakeConfigV1Client.APIServers() + } + return c.configv1().APIServers() +} + +func (c *ConfigV1ClientShim) Authentications() configv1.AuthenticationInterface { + if c.v1Kinds["Authentication"] { + return c.fakeConfigV1Client.Authentications() + } + return c.configv1().Authentications() +} + +func (c *ConfigV1ClientShim) Builds() configv1.BuildInterface { + if c.v1Kinds["Build"] { + return c.fakeConfigV1Client.Builds() + } + return c.configv1().Builds() +} + +func (c *ConfigV1ClientShim) ClusterOperators() configv1.ClusterOperatorInterface { + if c.v1Kinds["ClusterOperator"] { + return c.fakeConfigV1Client.ClusterOperators() + } + return c.configv1().ClusterOperators() +} + +func (c *ConfigV1ClientShim) ClusterVersions() configv1.ClusterVersionInterface { + if c.v1Kinds["ClusterVersion"] { + return c.fakeConfigV1Client.ClusterVersions() + } + return c.configv1().ClusterVersions() +} + +func (c *ConfigV1ClientShim) Consoles() configv1.ConsoleInterface { + if c.v1Kinds["Console"] { + return c.fakeConfigV1Client.Consoles() + } + return c.configv1().Consoles() +} + +func (c *ConfigV1ClientShim) DNSes() configv1.DNSInterface { + if c.v1Kinds["DNS"] { + return c.fakeConfigV1Client.DNSes() + } + return c.configv1().DNSes() +} + +func (c *ConfigV1ClientShim) FeatureGates() configv1.FeatureGateInterface { + if c.v1Kinds["FeatureGate"] { + return c.fakeConfigV1Client.FeatureGates() + } + return c.configv1().FeatureGates() +} + +func (c *ConfigV1ClientShim) Images() configv1.ImageInterface { + if c.v1Kinds["Image"] { + return c.fakeConfigV1Client.Images() + } + return c.configv1().Images() +} + +func (c *ConfigV1ClientShim) ImageContentPolicies() configv1.ImageContentPolicyInterface { + if c.v1Kinds["ImageContentPolicie"] { + return c.fakeConfigV1Client.ImageContentPolicies() + } + return c.configv1().ImageContentPolicies() +} + +func (c *ConfigV1ClientShim) ImageDigestMirrorSets() configv1.ImageDigestMirrorSetInterface { + if c.v1Kinds["ImageDigestMirrorSet"] { + return c.fakeConfigV1Client.ImageDigestMirrorSets() + } + return c.configv1().ImageDigestMirrorSets() +} + +func (c *ConfigV1ClientShim) ImageTagMirrorSets() configv1.ImageTagMirrorSetInterface { + if c.v1Kinds["ImageTagMirrorSet"] { + return c.fakeConfigV1Client.ImageTagMirrorSets() + } + return c.configv1().ImageTagMirrorSets() +} + +func (c *ConfigV1ClientShim) Infrastructures() configv1.InfrastructureInterface { + if c.v1Kinds["Infrastructure"] { + return c.fakeConfigV1Client.Infrastructures() + } + return c.configv1().Infrastructures() +} + +func (c *ConfigV1ClientShim) Ingresses() configv1.IngressInterface { + if c.v1Kinds["Ingresse"] { + return c.fakeConfigV1Client.Ingresses() + } + return c.configv1().Ingresses() +} + +func (c *ConfigV1ClientShim) Networks() configv1.NetworkInterface { + if c.v1Kinds["Network"] { + return c.fakeConfigV1Client.Networks() + } + return c.configv1().Networks() +} + +func (c *ConfigV1ClientShim) Nodes() configv1.NodeInterface { + if c.v1Kinds["Node"] { + return c.fakeConfigV1Client.Nodes() + } + return c.configv1().Nodes() +} + +func (c *ConfigV1ClientShim) OAuths() configv1.OAuthInterface { + if c.v1Kinds["OAuth"] { + return c.fakeConfigV1Client.OAuths() + } + return c.configv1().OAuths() +} + +func (c *ConfigV1ClientShim) OperatorHubs() configv1.OperatorHubInterface { + if c.v1Kinds["OperatorHub"] { + return c.fakeConfigV1Client.OperatorHubs() + } + return c.configv1().OperatorHubs() +} + +func (c *ConfigV1ClientShim) Projects() configv1.ProjectInterface { + if c.v1Kinds["Project"] { + return c.fakeConfigV1Client.Projects() + } + return c.configv1().Projects() +} + +func (c *ConfigV1ClientShim) Proxies() configv1.ProxyInterface { + if c.v1Kinds["Proxie"] { + return c.fakeConfigV1Client.Proxies() + } + return c.configv1().Proxies() +} + +func (c *ConfigV1ClientShim) Schedulers() configv1.SchedulerInterface { + if c.v1Kinds["Scheduler"] { + return c.fakeConfigV1Client.Schedulers() + } + return c.configv1().Schedulers() +} + +func (c *ConfigV1ClientShim) RESTClient() rest.Interface { + return c.configv1().RESTClient() +} + +var _ configv1.ConfigV1Interface = &ConfigV1ClientShim{} + +var kind2resourceMapping = map[string]string{ + "APIServer": "apiservers", + "Authentication": "authentications", + "Build": "build", + "ClusterOperator": "clusteroperators", + "ClusterVersion": "clusterversions", + "Console": "consoles", + "DNS": "dnses", + "FeatureGate": "featuregates", + "Image": "images", + "ImageContentPolicie": "imagecontentpolicies", + "ImageDigestMirrorSet": "imagedigestmirrorsetes", + "ImageTagMirrorSet": "imagetagmirrorsetes", + "Infrastructure": "infrastructures", + "Ingresse": "ingresses", + "Network": "networks", + "Node": "nodes", + "OAuth": "oauths", + "OperatorHub": "operatorhub", + "Project": "projects", + "Proxie": "proxies", + "Scheduler": "schedulers", +} + +type OperationNotPermitted struct { + Action string +} + +func (e OperationNotPermitted) Error() string { + return fmt.Sprintf("operation %q not permitted", e.Action) +} + +func NewConfigClientShim( + adminConfig *rest.Config, + objects []runtime.Object, +) (error, *ConfigClientShim) { + fakeClient := fakeconfigv1client.NewSimpleClientset(objects...) + + v1Kinds := make(map[string]bool) + // make sure every mutating operation is not permitted + for _, object := range objects { + objectKind := object.GetObjectKind().GroupVersionKind() + // currently supportig only config.openshift.io/v1 apiversion + if objectKind.Group != "config.openshift.io" { + return fmt.Errorf("unknown group: %v", objectKind.Group), nil + } + if objectKind.Version != "v1" { + return fmt.Errorf("unknown version: %v", objectKind.Version), nil + } + resource, exists := kind2resourceMapping[objectKind.Kind] + if !exists { + return fmt.Errorf("unknown kind mapping for %v", objectKind.Kind), nil + } + fakeClient.Fake.PrependReactor("update", resource, func(action clienttesting.Action) (bool, runtime.Object, error) { + return true, nil, &OperationNotPermitted{Action: "update"} + }) + fakeClient.Fake.PrependReactor("delete", resource, func(action clienttesting.Action) (bool, runtime.Object, error) { + return true, nil, &OperationNotPermitted{Action: "delete"} + }) + v1Kinds[objectKind.Kind] = true + } + + return nil, &ConfigClientShim{ + adminConfig: adminConfig, + v1Kinds: v1Kinds, + fakeClient: fakeClient, + } +} diff --git a/test/extended/util/configv1shim_test.go b/test/extended/util/configv1shim_test.go new file mode 100644 index 000000000000..60ede083197c --- /dev/null +++ b/test/extended/util/configv1shim_test.go @@ -0,0 +1,63 @@ +package util + +import ( + "context" + "testing" + + configv1 "github.com/openshift/api/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +func TestConfigClientShimErrorOnMutation(t *testing.T) { + object := &configv1.Infrastructure{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "config.openshift.io/v1", + Kind: "Infrastructure", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Spec: configv1.InfrastructureSpec{ + PlatformSpec: configv1.PlatformSpec{ + Type: configv1.AWSPlatformType, + }, + }, + Status: configv1.InfrastructureStatus{ + APIServerInternalURL: "https://api-int.jchaloup-20230222.group-b.devcluster.openshift.com:6443", + APIServerURL: "https://api.jchaloup-20230222.group-b.devcluster.openshift.com:6443", + ControlPlaneTopology: configv1.HighlyAvailableTopologyMode, + EtcdDiscoveryDomain: "", + InfrastructureName: "jchaloup-20230222-cvx5s", + InfrastructureTopology: configv1.HighlyAvailableTopologyMode, + Platform: configv1.AWSPlatformType, + PlatformStatus: &configv1.PlatformStatus{ + Type: configv1.AWSPlatformType, + AWS: &configv1.AWSPlatformStatus{ + Region: "us-east-1", + }, + }, + }, + } + + err, client := NewConfigClientShim( + nil, + []runtime.Object{object}, + ) + if err != nil { + t.Fatal(err) + } + + updateNotPermitted := OperationNotPermitted{Action: "update"} + deleteNotPermitted := OperationNotPermitted{Action: "delete"} + + _, err = client.ConfigV1().Infrastructures().Update(context.TODO(), object, metav1.UpdateOptions{}) + if err == nil || err.Error() != updateNotPermitted.Error() { + t.Fatalf("Expected %q error, got %q instead", updateNotPermitted.Error(), err) + } + + err = client.ConfigV1().Infrastructures().Delete(context.TODO(), object.Name, metav1.DeleteOptions{}) + if err == nil || err.Error() != deleteNotPermitted.Error() { + t.Fatalf("Expected %q error, got %q instead", deleteNotPermitted.Error(), err) + } +} diff --git a/test/extended/util/framework.go b/test/extended/util/framework.go index 90fa72031a2f..9309d6e9e86a 100644 --- a/test/extended/util/framework.go +++ b/test/extended/util/framework.go @@ -18,6 +18,7 @@ import ( "sync" "time" + "github.com/ghodss/yaml" g "github.com/onsi/ginkgo/v2" o "github.com/onsi/gomega" @@ -30,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" @@ -1390,6 +1392,11 @@ func KubeConfigPath() string { return os.Getenv("KUBECONFIG") } +// StaticConfigManifestDir returns the value of STATIC_CONFIG_MANIFEST_DIR environment variable +func StaticConfigManifestDir() string { + return os.Getenv("STATIC_CONFIG_MANIFEST_DIR") +} + // ArtifactDirPath returns the value of ARTIFACT_DIR environment variable func ArtifactDirPath() string { path := os.Getenv("ARTIFACT_DIR") @@ -2075,3 +2082,69 @@ func DoesApiResourceExist(config *rest.Config, apiResourceName, groupVersionName func groupName(groupVersionName string) string { return strings.Split(groupVersionName, "/")[0] } + +type staticObject struct { + APIVersion, Kind, Namespace, Name string +} + +func collectConfigManifestsFromDir(configManifestsDir string) (error, []runtime.Object) { + objects := []runtime.Object{} + knownObjects := make(map[staticObject]string) + + err := filepath.Walk(configManifestsDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return nil + } + + body, err := ioutil.ReadFile(path) + if err != nil { + return err + } + + object := &metav1.TypeMeta{} + err = yaml.Unmarshal(body, &object) + if err != nil { + return err + } + + if object.APIVersion == "config.openshift.io/v1" { + switch object.Kind { + case "Infrastructure": + config := &configv1.Infrastructure{} + err = yaml.Unmarshal(body, &config) + if err != nil { + return err + } + key := staticObject{APIVersion: object.APIVersion, Kind: object.Kind, Namespace: config.Namespace, Name: config.Name} + if objPath, exists := knownObjects[key]; exists { + return fmt.Errorf("object %v duplicated under %v", path, objPath) + } + objects = append(objects, config) + knownObjects[key] = path + case "Network": + config := &configv1.Network{} + err = yaml.Unmarshal(body, &config) + if err != nil { + return err + } + key := staticObject{APIVersion: object.APIVersion, Kind: object.Kind, Namespace: config.Namespace, Name: config.Name} + if objPath, exists := knownObjects[key]; exists { + return fmt.Errorf("object %v duplicated under %v", path, objPath) + } + objects = append(objects, config) + knownObjects[key] = path + default: + return fmt.Errorf("unknown 'config.openshift.io/v1' kind: %v", object.Kind) + } + } else { + return fmt.Errorf("unknown apiversion: %v", object.APIVersion) + } + + return nil + }) + + return err, objects +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/fake/clientset_generated.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 000000000000..cb8a9224bbfe --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,76 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + clientset "github.com/openshift/client-go/config/clientset/versioned" + configv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + fakeconfigv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake" + configv1alpha1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1" + fakeconfigv1alpha1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} + +var ( + _ clientset.Interface = &Clientset{} + _ testing.FakeClient = &Clientset{} +) + +// ConfigV1 retrieves the ConfigV1Client +func (c *Clientset) ConfigV1() configv1.ConfigV1Interface { + return &fakeconfigv1.FakeConfigV1{Fake: &c.Fake} +} + +// ConfigV1alpha1 retrieves the ConfigV1alpha1Client +func (c *Clientset) ConfigV1alpha1() configv1alpha1.ConfigV1alpha1Interface { + return &fakeconfigv1alpha1.FakeConfigV1alpha1{Fake: &c.Fake} +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/fake/doc.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/fake/doc.go new file mode 100644 index 000000000000..3630ed1cd17d --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/fake/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/fake/register.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/fake/register.go new file mode 100644 index 000000000000..669301d58c23 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/fake/register.go @@ -0,0 +1,42 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + configv1 "github.com/openshift/api/config/v1" + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) + +var localSchemeBuilder = runtime.SchemeBuilder{ + configv1.AddToScheme, + configv1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/doc.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/doc.go new file mode 100644 index 000000000000..2b5ba4c8e442 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_apiserver.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_apiserver.go new file mode 100644 index 000000000000..96d7b673fa08 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_apiserver.go @@ -0,0 +1,163 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeAPIServers implements APIServerInterface +type FakeAPIServers struct { + Fake *FakeConfigV1 +} + +var apiserversResource = schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "apiservers"} + +var apiserversKind = schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "APIServer"} + +// Get takes name of the aPIServer, and returns the corresponding aPIServer object, and an error if there is any. +func (c *FakeAPIServers) Get(ctx context.Context, name string, options v1.GetOptions) (result *configv1.APIServer, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(apiserversResource, name), &configv1.APIServer{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.APIServer), err +} + +// List takes label and field selectors, and returns the list of APIServers that match those selectors. +func (c *FakeAPIServers) List(ctx context.Context, opts v1.ListOptions) (result *configv1.APIServerList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(apiserversResource, apiserversKind, opts), &configv1.APIServerList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &configv1.APIServerList{ListMeta: obj.(*configv1.APIServerList).ListMeta} + for _, item := range obj.(*configv1.APIServerList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested aPIServers. +func (c *FakeAPIServers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(apiserversResource, opts)) +} + +// Create takes the representation of a aPIServer and creates it. Returns the server's representation of the aPIServer, and an error, if there is any. +func (c *FakeAPIServers) Create(ctx context.Context, aPIServer *configv1.APIServer, opts v1.CreateOptions) (result *configv1.APIServer, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(apiserversResource, aPIServer), &configv1.APIServer{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.APIServer), err +} + +// Update takes the representation of a aPIServer and updates it. Returns the server's representation of the aPIServer, and an error, if there is any. +func (c *FakeAPIServers) Update(ctx context.Context, aPIServer *configv1.APIServer, opts v1.UpdateOptions) (result *configv1.APIServer, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(apiserversResource, aPIServer), &configv1.APIServer{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.APIServer), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeAPIServers) UpdateStatus(ctx context.Context, aPIServer *configv1.APIServer, opts v1.UpdateOptions) (*configv1.APIServer, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(apiserversResource, "status", aPIServer), &configv1.APIServer{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.APIServer), err +} + +// Delete takes name of the aPIServer and deletes it. Returns an error if one occurs. +func (c *FakeAPIServers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(apiserversResource, name, opts), &configv1.APIServer{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeAPIServers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(apiserversResource, listOpts) + + _, err := c.Fake.Invokes(action, &configv1.APIServerList{}) + return err +} + +// Patch applies the patch and returns the patched aPIServer. +func (c *FakeAPIServers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *configv1.APIServer, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(apiserversResource, name, pt, data, subresources...), &configv1.APIServer{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.APIServer), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied aPIServer. +func (c *FakeAPIServers) Apply(ctx context.Context, aPIServer *applyconfigurationsconfigv1.APIServerApplyConfiguration, opts v1.ApplyOptions) (result *configv1.APIServer, err error) { + if aPIServer == nil { + return nil, fmt.Errorf("aPIServer provided to Apply must not be nil") + } + data, err := json.Marshal(aPIServer) + if err != nil { + return nil, err + } + name := aPIServer.Name + if name == nil { + return nil, fmt.Errorf("aPIServer.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(apiserversResource, *name, types.ApplyPatchType, data), &configv1.APIServer{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.APIServer), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeAPIServers) ApplyStatus(ctx context.Context, aPIServer *applyconfigurationsconfigv1.APIServerApplyConfiguration, opts v1.ApplyOptions) (result *configv1.APIServer, err error) { + if aPIServer == nil { + return nil, fmt.Errorf("aPIServer provided to Apply must not be nil") + } + data, err := json.Marshal(aPIServer) + if err != nil { + return nil, err + } + name := aPIServer.Name + if name == nil { + return nil, fmt.Errorf("aPIServer.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(apiserversResource, *name, types.ApplyPatchType, data, "status"), &configv1.APIServer{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.APIServer), err +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_authentication.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_authentication.go new file mode 100644 index 000000000000..5f37b1752ca4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_authentication.go @@ -0,0 +1,163 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeAuthentications implements AuthenticationInterface +type FakeAuthentications struct { + Fake *FakeConfigV1 +} + +var authenticationsResource = schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "authentications"} + +var authenticationsKind = schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "Authentication"} + +// Get takes name of the authentication, and returns the corresponding authentication object, and an error if there is any. +func (c *FakeAuthentications) Get(ctx context.Context, name string, options v1.GetOptions) (result *configv1.Authentication, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(authenticationsResource, name), &configv1.Authentication{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Authentication), err +} + +// List takes label and field selectors, and returns the list of Authentications that match those selectors. +func (c *FakeAuthentications) List(ctx context.Context, opts v1.ListOptions) (result *configv1.AuthenticationList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(authenticationsResource, authenticationsKind, opts), &configv1.AuthenticationList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &configv1.AuthenticationList{ListMeta: obj.(*configv1.AuthenticationList).ListMeta} + for _, item := range obj.(*configv1.AuthenticationList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested authentications. +func (c *FakeAuthentications) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(authenticationsResource, opts)) +} + +// Create takes the representation of a authentication and creates it. Returns the server's representation of the authentication, and an error, if there is any. +func (c *FakeAuthentications) Create(ctx context.Context, authentication *configv1.Authentication, opts v1.CreateOptions) (result *configv1.Authentication, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(authenticationsResource, authentication), &configv1.Authentication{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Authentication), err +} + +// Update takes the representation of a authentication and updates it. Returns the server's representation of the authentication, and an error, if there is any. +func (c *FakeAuthentications) Update(ctx context.Context, authentication *configv1.Authentication, opts v1.UpdateOptions) (result *configv1.Authentication, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(authenticationsResource, authentication), &configv1.Authentication{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Authentication), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeAuthentications) UpdateStatus(ctx context.Context, authentication *configv1.Authentication, opts v1.UpdateOptions) (*configv1.Authentication, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(authenticationsResource, "status", authentication), &configv1.Authentication{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Authentication), err +} + +// Delete takes name of the authentication and deletes it. Returns an error if one occurs. +func (c *FakeAuthentications) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(authenticationsResource, name, opts), &configv1.Authentication{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeAuthentications) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(authenticationsResource, listOpts) + + _, err := c.Fake.Invokes(action, &configv1.AuthenticationList{}) + return err +} + +// Patch applies the patch and returns the patched authentication. +func (c *FakeAuthentications) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *configv1.Authentication, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(authenticationsResource, name, pt, data, subresources...), &configv1.Authentication{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Authentication), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied authentication. +func (c *FakeAuthentications) Apply(ctx context.Context, authentication *applyconfigurationsconfigv1.AuthenticationApplyConfiguration, opts v1.ApplyOptions) (result *configv1.Authentication, err error) { + if authentication == nil { + return nil, fmt.Errorf("authentication provided to Apply must not be nil") + } + data, err := json.Marshal(authentication) + if err != nil { + return nil, err + } + name := authentication.Name + if name == nil { + return nil, fmt.Errorf("authentication.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(authenticationsResource, *name, types.ApplyPatchType, data), &configv1.Authentication{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Authentication), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeAuthentications) ApplyStatus(ctx context.Context, authentication *applyconfigurationsconfigv1.AuthenticationApplyConfiguration, opts v1.ApplyOptions) (result *configv1.Authentication, err error) { + if authentication == nil { + return nil, fmt.Errorf("authentication provided to Apply must not be nil") + } + data, err := json.Marshal(authentication) + if err != nil { + return nil, err + } + name := authentication.Name + if name == nil { + return nil, fmt.Errorf("authentication.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(authenticationsResource, *name, types.ApplyPatchType, data, "status"), &configv1.Authentication{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Authentication), err +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_build.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_build.go new file mode 100644 index 000000000000..1133fb58d755 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_build.go @@ -0,0 +1,130 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeBuilds implements BuildInterface +type FakeBuilds struct { + Fake *FakeConfigV1 +} + +var buildsResource = schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "builds"} + +var buildsKind = schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "Build"} + +// Get takes name of the build, and returns the corresponding build object, and an error if there is any. +func (c *FakeBuilds) Get(ctx context.Context, name string, options v1.GetOptions) (result *configv1.Build, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(buildsResource, name), &configv1.Build{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Build), err +} + +// List takes label and field selectors, and returns the list of Builds that match those selectors. +func (c *FakeBuilds) List(ctx context.Context, opts v1.ListOptions) (result *configv1.BuildList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(buildsResource, buildsKind, opts), &configv1.BuildList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &configv1.BuildList{ListMeta: obj.(*configv1.BuildList).ListMeta} + for _, item := range obj.(*configv1.BuildList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested builds. +func (c *FakeBuilds) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(buildsResource, opts)) +} + +// Create takes the representation of a build and creates it. Returns the server's representation of the build, and an error, if there is any. +func (c *FakeBuilds) Create(ctx context.Context, build *configv1.Build, opts v1.CreateOptions) (result *configv1.Build, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(buildsResource, build), &configv1.Build{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Build), err +} + +// Update takes the representation of a build and updates it. Returns the server's representation of the build, and an error, if there is any. +func (c *FakeBuilds) Update(ctx context.Context, build *configv1.Build, opts v1.UpdateOptions) (result *configv1.Build, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(buildsResource, build), &configv1.Build{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Build), err +} + +// Delete takes name of the build and deletes it. Returns an error if one occurs. +func (c *FakeBuilds) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(buildsResource, name, opts), &configv1.Build{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeBuilds) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(buildsResource, listOpts) + + _, err := c.Fake.Invokes(action, &configv1.BuildList{}) + return err +} + +// Patch applies the patch and returns the patched build. +func (c *FakeBuilds) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *configv1.Build, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(buildsResource, name, pt, data, subresources...), &configv1.Build{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Build), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied build. +func (c *FakeBuilds) Apply(ctx context.Context, build *applyconfigurationsconfigv1.BuildApplyConfiguration, opts v1.ApplyOptions) (result *configv1.Build, err error) { + if build == nil { + return nil, fmt.Errorf("build provided to Apply must not be nil") + } + data, err := json.Marshal(build) + if err != nil { + return nil, err + } + name := build.Name + if name == nil { + return nil, fmt.Errorf("build.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(buildsResource, *name, types.ApplyPatchType, data), &configv1.Build{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Build), err +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_clusteroperator.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_clusteroperator.go new file mode 100644 index 000000000000..35d57940d9ea --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_clusteroperator.go @@ -0,0 +1,163 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeClusterOperators implements ClusterOperatorInterface +type FakeClusterOperators struct { + Fake *FakeConfigV1 +} + +var clusteroperatorsResource = schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "clusteroperators"} + +var clusteroperatorsKind = schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "ClusterOperator"} + +// Get takes name of the clusterOperator, and returns the corresponding clusterOperator object, and an error if there is any. +func (c *FakeClusterOperators) Get(ctx context.Context, name string, options v1.GetOptions) (result *configv1.ClusterOperator, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(clusteroperatorsResource, name), &configv1.ClusterOperator{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ClusterOperator), err +} + +// List takes label and field selectors, and returns the list of ClusterOperators that match those selectors. +func (c *FakeClusterOperators) List(ctx context.Context, opts v1.ListOptions) (result *configv1.ClusterOperatorList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(clusteroperatorsResource, clusteroperatorsKind, opts), &configv1.ClusterOperatorList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &configv1.ClusterOperatorList{ListMeta: obj.(*configv1.ClusterOperatorList).ListMeta} + for _, item := range obj.(*configv1.ClusterOperatorList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusterOperators. +func (c *FakeClusterOperators) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(clusteroperatorsResource, opts)) +} + +// Create takes the representation of a clusterOperator and creates it. Returns the server's representation of the clusterOperator, and an error, if there is any. +func (c *FakeClusterOperators) Create(ctx context.Context, clusterOperator *configv1.ClusterOperator, opts v1.CreateOptions) (result *configv1.ClusterOperator, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(clusteroperatorsResource, clusterOperator), &configv1.ClusterOperator{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ClusterOperator), err +} + +// Update takes the representation of a clusterOperator and updates it. Returns the server's representation of the clusterOperator, and an error, if there is any. +func (c *FakeClusterOperators) Update(ctx context.Context, clusterOperator *configv1.ClusterOperator, opts v1.UpdateOptions) (result *configv1.ClusterOperator, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(clusteroperatorsResource, clusterOperator), &configv1.ClusterOperator{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ClusterOperator), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeClusterOperators) UpdateStatus(ctx context.Context, clusterOperator *configv1.ClusterOperator, opts v1.UpdateOptions) (*configv1.ClusterOperator, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(clusteroperatorsResource, "status", clusterOperator), &configv1.ClusterOperator{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ClusterOperator), err +} + +// Delete takes name of the clusterOperator and deletes it. Returns an error if one occurs. +func (c *FakeClusterOperators) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(clusteroperatorsResource, name, opts), &configv1.ClusterOperator{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeClusterOperators) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(clusteroperatorsResource, listOpts) + + _, err := c.Fake.Invokes(action, &configv1.ClusterOperatorList{}) + return err +} + +// Patch applies the patch and returns the patched clusterOperator. +func (c *FakeClusterOperators) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *configv1.ClusterOperator, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(clusteroperatorsResource, name, pt, data, subresources...), &configv1.ClusterOperator{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ClusterOperator), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied clusterOperator. +func (c *FakeClusterOperators) Apply(ctx context.Context, clusterOperator *applyconfigurationsconfigv1.ClusterOperatorApplyConfiguration, opts v1.ApplyOptions) (result *configv1.ClusterOperator, err error) { + if clusterOperator == nil { + return nil, fmt.Errorf("clusterOperator provided to Apply must not be nil") + } + data, err := json.Marshal(clusterOperator) + if err != nil { + return nil, err + } + name := clusterOperator.Name + if name == nil { + return nil, fmt.Errorf("clusterOperator.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(clusteroperatorsResource, *name, types.ApplyPatchType, data), &configv1.ClusterOperator{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ClusterOperator), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeClusterOperators) ApplyStatus(ctx context.Context, clusterOperator *applyconfigurationsconfigv1.ClusterOperatorApplyConfiguration, opts v1.ApplyOptions) (result *configv1.ClusterOperator, err error) { + if clusterOperator == nil { + return nil, fmt.Errorf("clusterOperator provided to Apply must not be nil") + } + data, err := json.Marshal(clusterOperator) + if err != nil { + return nil, err + } + name := clusterOperator.Name + if name == nil { + return nil, fmt.Errorf("clusterOperator.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(clusteroperatorsResource, *name, types.ApplyPatchType, data, "status"), &configv1.ClusterOperator{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ClusterOperator), err +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_clusterversion.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_clusterversion.go new file mode 100644 index 000000000000..46d7fbdddacd --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_clusterversion.go @@ -0,0 +1,163 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeClusterVersions implements ClusterVersionInterface +type FakeClusterVersions struct { + Fake *FakeConfigV1 +} + +var clusterversionsResource = schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "clusterversions"} + +var clusterversionsKind = schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "ClusterVersion"} + +// Get takes name of the clusterVersion, and returns the corresponding clusterVersion object, and an error if there is any. +func (c *FakeClusterVersions) Get(ctx context.Context, name string, options v1.GetOptions) (result *configv1.ClusterVersion, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(clusterversionsResource, name), &configv1.ClusterVersion{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ClusterVersion), err +} + +// List takes label and field selectors, and returns the list of ClusterVersions that match those selectors. +func (c *FakeClusterVersions) List(ctx context.Context, opts v1.ListOptions) (result *configv1.ClusterVersionList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(clusterversionsResource, clusterversionsKind, opts), &configv1.ClusterVersionList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &configv1.ClusterVersionList{ListMeta: obj.(*configv1.ClusterVersionList).ListMeta} + for _, item := range obj.(*configv1.ClusterVersionList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusterVersions. +func (c *FakeClusterVersions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(clusterversionsResource, opts)) +} + +// Create takes the representation of a clusterVersion and creates it. Returns the server's representation of the clusterVersion, and an error, if there is any. +func (c *FakeClusterVersions) Create(ctx context.Context, clusterVersion *configv1.ClusterVersion, opts v1.CreateOptions) (result *configv1.ClusterVersion, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(clusterversionsResource, clusterVersion), &configv1.ClusterVersion{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ClusterVersion), err +} + +// Update takes the representation of a clusterVersion and updates it. Returns the server's representation of the clusterVersion, and an error, if there is any. +func (c *FakeClusterVersions) Update(ctx context.Context, clusterVersion *configv1.ClusterVersion, opts v1.UpdateOptions) (result *configv1.ClusterVersion, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(clusterversionsResource, clusterVersion), &configv1.ClusterVersion{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ClusterVersion), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeClusterVersions) UpdateStatus(ctx context.Context, clusterVersion *configv1.ClusterVersion, opts v1.UpdateOptions) (*configv1.ClusterVersion, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(clusterversionsResource, "status", clusterVersion), &configv1.ClusterVersion{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ClusterVersion), err +} + +// Delete takes name of the clusterVersion and deletes it. Returns an error if one occurs. +func (c *FakeClusterVersions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(clusterversionsResource, name, opts), &configv1.ClusterVersion{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeClusterVersions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(clusterversionsResource, listOpts) + + _, err := c.Fake.Invokes(action, &configv1.ClusterVersionList{}) + return err +} + +// Patch applies the patch and returns the patched clusterVersion. +func (c *FakeClusterVersions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *configv1.ClusterVersion, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(clusterversionsResource, name, pt, data, subresources...), &configv1.ClusterVersion{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ClusterVersion), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied clusterVersion. +func (c *FakeClusterVersions) Apply(ctx context.Context, clusterVersion *applyconfigurationsconfigv1.ClusterVersionApplyConfiguration, opts v1.ApplyOptions) (result *configv1.ClusterVersion, err error) { + if clusterVersion == nil { + return nil, fmt.Errorf("clusterVersion provided to Apply must not be nil") + } + data, err := json.Marshal(clusterVersion) + if err != nil { + return nil, err + } + name := clusterVersion.Name + if name == nil { + return nil, fmt.Errorf("clusterVersion.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(clusterversionsResource, *name, types.ApplyPatchType, data), &configv1.ClusterVersion{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ClusterVersion), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeClusterVersions) ApplyStatus(ctx context.Context, clusterVersion *applyconfigurationsconfigv1.ClusterVersionApplyConfiguration, opts v1.ApplyOptions) (result *configv1.ClusterVersion, err error) { + if clusterVersion == nil { + return nil, fmt.Errorf("clusterVersion provided to Apply must not be nil") + } + data, err := json.Marshal(clusterVersion) + if err != nil { + return nil, err + } + name := clusterVersion.Name + if name == nil { + return nil, fmt.Errorf("clusterVersion.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(clusterversionsResource, *name, types.ApplyPatchType, data, "status"), &configv1.ClusterVersion{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ClusterVersion), err +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_config_client.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_config_client.go new file mode 100644 index 000000000000..b105e491cf0d --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_config_client.go @@ -0,0 +1,104 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeConfigV1 struct { + *testing.Fake +} + +func (c *FakeConfigV1) APIServers() v1.APIServerInterface { + return &FakeAPIServers{c} +} + +func (c *FakeConfigV1) Authentications() v1.AuthenticationInterface { + return &FakeAuthentications{c} +} + +func (c *FakeConfigV1) Builds() v1.BuildInterface { + return &FakeBuilds{c} +} + +func (c *FakeConfigV1) ClusterOperators() v1.ClusterOperatorInterface { + return &FakeClusterOperators{c} +} + +func (c *FakeConfigV1) ClusterVersions() v1.ClusterVersionInterface { + return &FakeClusterVersions{c} +} + +func (c *FakeConfigV1) Consoles() v1.ConsoleInterface { + return &FakeConsoles{c} +} + +func (c *FakeConfigV1) DNSes() v1.DNSInterface { + return &FakeDNSes{c} +} + +func (c *FakeConfigV1) FeatureGates() v1.FeatureGateInterface { + return &FakeFeatureGates{c} +} + +func (c *FakeConfigV1) Images() v1.ImageInterface { + return &FakeImages{c} +} + +func (c *FakeConfigV1) ImageContentPolicies() v1.ImageContentPolicyInterface { + return &FakeImageContentPolicies{c} +} + +func (c *FakeConfigV1) ImageDigestMirrorSets() v1.ImageDigestMirrorSetInterface { + return &FakeImageDigestMirrorSets{c} +} + +func (c *FakeConfigV1) ImageTagMirrorSets() v1.ImageTagMirrorSetInterface { + return &FakeImageTagMirrorSets{c} +} + +func (c *FakeConfigV1) Infrastructures() v1.InfrastructureInterface { + return &FakeInfrastructures{c} +} + +func (c *FakeConfigV1) Ingresses() v1.IngressInterface { + return &FakeIngresses{c} +} + +func (c *FakeConfigV1) Networks() v1.NetworkInterface { + return &FakeNetworks{c} +} + +func (c *FakeConfigV1) Nodes() v1.NodeInterface { + return &FakeNodes{c} +} + +func (c *FakeConfigV1) OAuths() v1.OAuthInterface { + return &FakeOAuths{c} +} + +func (c *FakeConfigV1) OperatorHubs() v1.OperatorHubInterface { + return &FakeOperatorHubs{c} +} + +func (c *FakeConfigV1) Projects() v1.ProjectInterface { + return &FakeProjects{c} +} + +func (c *FakeConfigV1) Proxies() v1.ProxyInterface { + return &FakeProxies{c} +} + +func (c *FakeConfigV1) Schedulers() v1.SchedulerInterface { + return &FakeSchedulers{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeConfigV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_console.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_console.go new file mode 100644 index 000000000000..ab075436a69f --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_console.go @@ -0,0 +1,163 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeConsoles implements ConsoleInterface +type FakeConsoles struct { + Fake *FakeConfigV1 +} + +var consolesResource = schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "consoles"} + +var consolesKind = schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "Console"} + +// Get takes name of the console, and returns the corresponding console object, and an error if there is any. +func (c *FakeConsoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *configv1.Console, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(consolesResource, name), &configv1.Console{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Console), err +} + +// List takes label and field selectors, and returns the list of Consoles that match those selectors. +func (c *FakeConsoles) List(ctx context.Context, opts v1.ListOptions) (result *configv1.ConsoleList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(consolesResource, consolesKind, opts), &configv1.ConsoleList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &configv1.ConsoleList{ListMeta: obj.(*configv1.ConsoleList).ListMeta} + for _, item := range obj.(*configv1.ConsoleList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested consoles. +func (c *FakeConsoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(consolesResource, opts)) +} + +// Create takes the representation of a console and creates it. Returns the server's representation of the console, and an error, if there is any. +func (c *FakeConsoles) Create(ctx context.Context, console *configv1.Console, opts v1.CreateOptions) (result *configv1.Console, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(consolesResource, console), &configv1.Console{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Console), err +} + +// Update takes the representation of a console and updates it. Returns the server's representation of the console, and an error, if there is any. +func (c *FakeConsoles) Update(ctx context.Context, console *configv1.Console, opts v1.UpdateOptions) (result *configv1.Console, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(consolesResource, console), &configv1.Console{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Console), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeConsoles) UpdateStatus(ctx context.Context, console *configv1.Console, opts v1.UpdateOptions) (*configv1.Console, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(consolesResource, "status", console), &configv1.Console{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Console), err +} + +// Delete takes name of the console and deletes it. Returns an error if one occurs. +func (c *FakeConsoles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(consolesResource, name, opts), &configv1.Console{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeConsoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(consolesResource, listOpts) + + _, err := c.Fake.Invokes(action, &configv1.ConsoleList{}) + return err +} + +// Patch applies the patch and returns the patched console. +func (c *FakeConsoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *configv1.Console, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(consolesResource, name, pt, data, subresources...), &configv1.Console{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Console), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied console. +func (c *FakeConsoles) Apply(ctx context.Context, console *applyconfigurationsconfigv1.ConsoleApplyConfiguration, opts v1.ApplyOptions) (result *configv1.Console, err error) { + if console == nil { + return nil, fmt.Errorf("console provided to Apply must not be nil") + } + data, err := json.Marshal(console) + if err != nil { + return nil, err + } + name := console.Name + if name == nil { + return nil, fmt.Errorf("console.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(consolesResource, *name, types.ApplyPatchType, data), &configv1.Console{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Console), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeConsoles) ApplyStatus(ctx context.Context, console *applyconfigurationsconfigv1.ConsoleApplyConfiguration, opts v1.ApplyOptions) (result *configv1.Console, err error) { + if console == nil { + return nil, fmt.Errorf("console provided to Apply must not be nil") + } + data, err := json.Marshal(console) + if err != nil { + return nil, err + } + name := console.Name + if name == nil { + return nil, fmt.Errorf("console.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(consolesResource, *name, types.ApplyPatchType, data, "status"), &configv1.Console{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Console), err +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_dns.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_dns.go new file mode 100644 index 000000000000..b67ce1c2c1b3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_dns.go @@ -0,0 +1,163 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeDNSes implements DNSInterface +type FakeDNSes struct { + Fake *FakeConfigV1 +} + +var dnsesResource = schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "dnses"} + +var dnsesKind = schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "DNS"} + +// Get takes name of the dNS, and returns the corresponding dNS object, and an error if there is any. +func (c *FakeDNSes) Get(ctx context.Context, name string, options v1.GetOptions) (result *configv1.DNS, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(dnsesResource, name), &configv1.DNS{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.DNS), err +} + +// List takes label and field selectors, and returns the list of DNSes that match those selectors. +func (c *FakeDNSes) List(ctx context.Context, opts v1.ListOptions) (result *configv1.DNSList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(dnsesResource, dnsesKind, opts), &configv1.DNSList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &configv1.DNSList{ListMeta: obj.(*configv1.DNSList).ListMeta} + for _, item := range obj.(*configv1.DNSList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested dNSes. +func (c *FakeDNSes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(dnsesResource, opts)) +} + +// Create takes the representation of a dNS and creates it. Returns the server's representation of the dNS, and an error, if there is any. +func (c *FakeDNSes) Create(ctx context.Context, dNS *configv1.DNS, opts v1.CreateOptions) (result *configv1.DNS, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(dnsesResource, dNS), &configv1.DNS{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.DNS), err +} + +// Update takes the representation of a dNS and updates it. Returns the server's representation of the dNS, and an error, if there is any. +func (c *FakeDNSes) Update(ctx context.Context, dNS *configv1.DNS, opts v1.UpdateOptions) (result *configv1.DNS, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(dnsesResource, dNS), &configv1.DNS{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.DNS), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeDNSes) UpdateStatus(ctx context.Context, dNS *configv1.DNS, opts v1.UpdateOptions) (*configv1.DNS, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(dnsesResource, "status", dNS), &configv1.DNS{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.DNS), err +} + +// Delete takes name of the dNS and deletes it. Returns an error if one occurs. +func (c *FakeDNSes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(dnsesResource, name, opts), &configv1.DNS{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeDNSes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(dnsesResource, listOpts) + + _, err := c.Fake.Invokes(action, &configv1.DNSList{}) + return err +} + +// Patch applies the patch and returns the patched dNS. +func (c *FakeDNSes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *configv1.DNS, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(dnsesResource, name, pt, data, subresources...), &configv1.DNS{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.DNS), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied dNS. +func (c *FakeDNSes) Apply(ctx context.Context, dNS *applyconfigurationsconfigv1.DNSApplyConfiguration, opts v1.ApplyOptions) (result *configv1.DNS, err error) { + if dNS == nil { + return nil, fmt.Errorf("dNS provided to Apply must not be nil") + } + data, err := json.Marshal(dNS) + if err != nil { + return nil, err + } + name := dNS.Name + if name == nil { + return nil, fmt.Errorf("dNS.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(dnsesResource, *name, types.ApplyPatchType, data), &configv1.DNS{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.DNS), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeDNSes) ApplyStatus(ctx context.Context, dNS *applyconfigurationsconfigv1.DNSApplyConfiguration, opts v1.ApplyOptions) (result *configv1.DNS, err error) { + if dNS == nil { + return nil, fmt.Errorf("dNS provided to Apply must not be nil") + } + data, err := json.Marshal(dNS) + if err != nil { + return nil, err + } + name := dNS.Name + if name == nil { + return nil, fmt.Errorf("dNS.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(dnsesResource, *name, types.ApplyPatchType, data, "status"), &configv1.DNS{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.DNS), err +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_featuregate.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_featuregate.go new file mode 100644 index 000000000000..b9b281144178 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_featuregate.go @@ -0,0 +1,163 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeFeatureGates implements FeatureGateInterface +type FakeFeatureGates struct { + Fake *FakeConfigV1 +} + +var featuregatesResource = schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "featuregates"} + +var featuregatesKind = schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "FeatureGate"} + +// Get takes name of the featureGate, and returns the corresponding featureGate object, and an error if there is any. +func (c *FakeFeatureGates) Get(ctx context.Context, name string, options v1.GetOptions) (result *configv1.FeatureGate, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(featuregatesResource, name), &configv1.FeatureGate{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.FeatureGate), err +} + +// List takes label and field selectors, and returns the list of FeatureGates that match those selectors. +func (c *FakeFeatureGates) List(ctx context.Context, opts v1.ListOptions) (result *configv1.FeatureGateList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(featuregatesResource, featuregatesKind, opts), &configv1.FeatureGateList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &configv1.FeatureGateList{ListMeta: obj.(*configv1.FeatureGateList).ListMeta} + for _, item := range obj.(*configv1.FeatureGateList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested featureGates. +func (c *FakeFeatureGates) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(featuregatesResource, opts)) +} + +// Create takes the representation of a featureGate and creates it. Returns the server's representation of the featureGate, and an error, if there is any. +func (c *FakeFeatureGates) Create(ctx context.Context, featureGate *configv1.FeatureGate, opts v1.CreateOptions) (result *configv1.FeatureGate, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(featuregatesResource, featureGate), &configv1.FeatureGate{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.FeatureGate), err +} + +// Update takes the representation of a featureGate and updates it. Returns the server's representation of the featureGate, and an error, if there is any. +func (c *FakeFeatureGates) Update(ctx context.Context, featureGate *configv1.FeatureGate, opts v1.UpdateOptions) (result *configv1.FeatureGate, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(featuregatesResource, featureGate), &configv1.FeatureGate{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.FeatureGate), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeFeatureGates) UpdateStatus(ctx context.Context, featureGate *configv1.FeatureGate, opts v1.UpdateOptions) (*configv1.FeatureGate, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(featuregatesResource, "status", featureGate), &configv1.FeatureGate{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.FeatureGate), err +} + +// Delete takes name of the featureGate and deletes it. Returns an error if one occurs. +func (c *FakeFeatureGates) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(featuregatesResource, name, opts), &configv1.FeatureGate{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeFeatureGates) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(featuregatesResource, listOpts) + + _, err := c.Fake.Invokes(action, &configv1.FeatureGateList{}) + return err +} + +// Patch applies the patch and returns the patched featureGate. +func (c *FakeFeatureGates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *configv1.FeatureGate, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(featuregatesResource, name, pt, data, subresources...), &configv1.FeatureGate{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.FeatureGate), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied featureGate. +func (c *FakeFeatureGates) Apply(ctx context.Context, featureGate *applyconfigurationsconfigv1.FeatureGateApplyConfiguration, opts v1.ApplyOptions) (result *configv1.FeatureGate, err error) { + if featureGate == nil { + return nil, fmt.Errorf("featureGate provided to Apply must not be nil") + } + data, err := json.Marshal(featureGate) + if err != nil { + return nil, err + } + name := featureGate.Name + if name == nil { + return nil, fmt.Errorf("featureGate.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(featuregatesResource, *name, types.ApplyPatchType, data), &configv1.FeatureGate{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.FeatureGate), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeFeatureGates) ApplyStatus(ctx context.Context, featureGate *applyconfigurationsconfigv1.FeatureGateApplyConfiguration, opts v1.ApplyOptions) (result *configv1.FeatureGate, err error) { + if featureGate == nil { + return nil, fmt.Errorf("featureGate provided to Apply must not be nil") + } + data, err := json.Marshal(featureGate) + if err != nil { + return nil, err + } + name := featureGate.Name + if name == nil { + return nil, fmt.Errorf("featureGate.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(featuregatesResource, *name, types.ApplyPatchType, data, "status"), &configv1.FeatureGate{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.FeatureGate), err +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_image.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_image.go new file mode 100644 index 000000000000..02e8925c228a --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_image.go @@ -0,0 +1,163 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeImages implements ImageInterface +type FakeImages struct { + Fake *FakeConfigV1 +} + +var imagesResource = schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "images"} + +var imagesKind = schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "Image"} + +// Get takes name of the image, and returns the corresponding image object, and an error if there is any. +func (c *FakeImages) Get(ctx context.Context, name string, options v1.GetOptions) (result *configv1.Image, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(imagesResource, name), &configv1.Image{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Image), err +} + +// List takes label and field selectors, and returns the list of Images that match those selectors. +func (c *FakeImages) List(ctx context.Context, opts v1.ListOptions) (result *configv1.ImageList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(imagesResource, imagesKind, opts), &configv1.ImageList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &configv1.ImageList{ListMeta: obj.(*configv1.ImageList).ListMeta} + for _, item := range obj.(*configv1.ImageList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested images. +func (c *FakeImages) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(imagesResource, opts)) +} + +// Create takes the representation of a image and creates it. Returns the server's representation of the image, and an error, if there is any. +func (c *FakeImages) Create(ctx context.Context, image *configv1.Image, opts v1.CreateOptions) (result *configv1.Image, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(imagesResource, image), &configv1.Image{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Image), err +} + +// Update takes the representation of a image and updates it. Returns the server's representation of the image, and an error, if there is any. +func (c *FakeImages) Update(ctx context.Context, image *configv1.Image, opts v1.UpdateOptions) (result *configv1.Image, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(imagesResource, image), &configv1.Image{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Image), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeImages) UpdateStatus(ctx context.Context, image *configv1.Image, opts v1.UpdateOptions) (*configv1.Image, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(imagesResource, "status", image), &configv1.Image{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Image), err +} + +// Delete takes name of the image and deletes it. Returns an error if one occurs. +func (c *FakeImages) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(imagesResource, name, opts), &configv1.Image{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeImages) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(imagesResource, listOpts) + + _, err := c.Fake.Invokes(action, &configv1.ImageList{}) + return err +} + +// Patch applies the patch and returns the patched image. +func (c *FakeImages) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *configv1.Image, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(imagesResource, name, pt, data, subresources...), &configv1.Image{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Image), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied image. +func (c *FakeImages) Apply(ctx context.Context, image *applyconfigurationsconfigv1.ImageApplyConfiguration, opts v1.ApplyOptions) (result *configv1.Image, err error) { + if image == nil { + return nil, fmt.Errorf("image provided to Apply must not be nil") + } + data, err := json.Marshal(image) + if err != nil { + return nil, err + } + name := image.Name + if name == nil { + return nil, fmt.Errorf("image.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(imagesResource, *name, types.ApplyPatchType, data), &configv1.Image{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Image), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeImages) ApplyStatus(ctx context.Context, image *applyconfigurationsconfigv1.ImageApplyConfiguration, opts v1.ApplyOptions) (result *configv1.Image, err error) { + if image == nil { + return nil, fmt.Errorf("image provided to Apply must not be nil") + } + data, err := json.Marshal(image) + if err != nil { + return nil, err + } + name := image.Name + if name == nil { + return nil, fmt.Errorf("image.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(imagesResource, *name, types.ApplyPatchType, data, "status"), &configv1.Image{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Image), err +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_imagecontentpolicy.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_imagecontentpolicy.go new file mode 100644 index 000000000000..a93d4d351a5e --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_imagecontentpolicy.go @@ -0,0 +1,130 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeImageContentPolicies implements ImageContentPolicyInterface +type FakeImageContentPolicies struct { + Fake *FakeConfigV1 +} + +var imagecontentpoliciesResource = schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "imagecontentpolicies"} + +var imagecontentpoliciesKind = schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "ImageContentPolicy"} + +// Get takes name of the imageContentPolicy, and returns the corresponding imageContentPolicy object, and an error if there is any. +func (c *FakeImageContentPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *configv1.ImageContentPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(imagecontentpoliciesResource, name), &configv1.ImageContentPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ImageContentPolicy), err +} + +// List takes label and field selectors, and returns the list of ImageContentPolicies that match those selectors. +func (c *FakeImageContentPolicies) List(ctx context.Context, opts v1.ListOptions) (result *configv1.ImageContentPolicyList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(imagecontentpoliciesResource, imagecontentpoliciesKind, opts), &configv1.ImageContentPolicyList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &configv1.ImageContentPolicyList{ListMeta: obj.(*configv1.ImageContentPolicyList).ListMeta} + for _, item := range obj.(*configv1.ImageContentPolicyList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested imageContentPolicies. +func (c *FakeImageContentPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(imagecontentpoliciesResource, opts)) +} + +// Create takes the representation of a imageContentPolicy and creates it. Returns the server's representation of the imageContentPolicy, and an error, if there is any. +func (c *FakeImageContentPolicies) Create(ctx context.Context, imageContentPolicy *configv1.ImageContentPolicy, opts v1.CreateOptions) (result *configv1.ImageContentPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(imagecontentpoliciesResource, imageContentPolicy), &configv1.ImageContentPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ImageContentPolicy), err +} + +// Update takes the representation of a imageContentPolicy and updates it. Returns the server's representation of the imageContentPolicy, and an error, if there is any. +func (c *FakeImageContentPolicies) Update(ctx context.Context, imageContentPolicy *configv1.ImageContentPolicy, opts v1.UpdateOptions) (result *configv1.ImageContentPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(imagecontentpoliciesResource, imageContentPolicy), &configv1.ImageContentPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ImageContentPolicy), err +} + +// Delete takes name of the imageContentPolicy and deletes it. Returns an error if one occurs. +func (c *FakeImageContentPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(imagecontentpoliciesResource, name, opts), &configv1.ImageContentPolicy{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeImageContentPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(imagecontentpoliciesResource, listOpts) + + _, err := c.Fake.Invokes(action, &configv1.ImageContentPolicyList{}) + return err +} + +// Patch applies the patch and returns the patched imageContentPolicy. +func (c *FakeImageContentPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *configv1.ImageContentPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(imagecontentpoliciesResource, name, pt, data, subresources...), &configv1.ImageContentPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ImageContentPolicy), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied imageContentPolicy. +func (c *FakeImageContentPolicies) Apply(ctx context.Context, imageContentPolicy *applyconfigurationsconfigv1.ImageContentPolicyApplyConfiguration, opts v1.ApplyOptions) (result *configv1.ImageContentPolicy, err error) { + if imageContentPolicy == nil { + return nil, fmt.Errorf("imageContentPolicy provided to Apply must not be nil") + } + data, err := json.Marshal(imageContentPolicy) + if err != nil { + return nil, err + } + name := imageContentPolicy.Name + if name == nil { + return nil, fmt.Errorf("imageContentPolicy.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(imagecontentpoliciesResource, *name, types.ApplyPatchType, data), &configv1.ImageContentPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ImageContentPolicy), err +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_imagedigestmirrorset.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_imagedigestmirrorset.go new file mode 100644 index 000000000000..c61807ce06cb --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_imagedigestmirrorset.go @@ -0,0 +1,163 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeImageDigestMirrorSets implements ImageDigestMirrorSetInterface +type FakeImageDigestMirrorSets struct { + Fake *FakeConfigV1 +} + +var imagedigestmirrorsetsResource = schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "imagedigestmirrorsets"} + +var imagedigestmirrorsetsKind = schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "ImageDigestMirrorSet"} + +// Get takes name of the imageDigestMirrorSet, and returns the corresponding imageDigestMirrorSet object, and an error if there is any. +func (c *FakeImageDigestMirrorSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *configv1.ImageDigestMirrorSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(imagedigestmirrorsetsResource, name), &configv1.ImageDigestMirrorSet{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ImageDigestMirrorSet), err +} + +// List takes label and field selectors, and returns the list of ImageDigestMirrorSets that match those selectors. +func (c *FakeImageDigestMirrorSets) List(ctx context.Context, opts v1.ListOptions) (result *configv1.ImageDigestMirrorSetList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(imagedigestmirrorsetsResource, imagedigestmirrorsetsKind, opts), &configv1.ImageDigestMirrorSetList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &configv1.ImageDigestMirrorSetList{ListMeta: obj.(*configv1.ImageDigestMirrorSetList).ListMeta} + for _, item := range obj.(*configv1.ImageDigestMirrorSetList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested imageDigestMirrorSets. +func (c *FakeImageDigestMirrorSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(imagedigestmirrorsetsResource, opts)) +} + +// Create takes the representation of a imageDigestMirrorSet and creates it. Returns the server's representation of the imageDigestMirrorSet, and an error, if there is any. +func (c *FakeImageDigestMirrorSets) Create(ctx context.Context, imageDigestMirrorSet *configv1.ImageDigestMirrorSet, opts v1.CreateOptions) (result *configv1.ImageDigestMirrorSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(imagedigestmirrorsetsResource, imageDigestMirrorSet), &configv1.ImageDigestMirrorSet{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ImageDigestMirrorSet), err +} + +// Update takes the representation of a imageDigestMirrorSet and updates it. Returns the server's representation of the imageDigestMirrorSet, and an error, if there is any. +func (c *FakeImageDigestMirrorSets) Update(ctx context.Context, imageDigestMirrorSet *configv1.ImageDigestMirrorSet, opts v1.UpdateOptions) (result *configv1.ImageDigestMirrorSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(imagedigestmirrorsetsResource, imageDigestMirrorSet), &configv1.ImageDigestMirrorSet{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ImageDigestMirrorSet), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeImageDigestMirrorSets) UpdateStatus(ctx context.Context, imageDigestMirrorSet *configv1.ImageDigestMirrorSet, opts v1.UpdateOptions) (*configv1.ImageDigestMirrorSet, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(imagedigestmirrorsetsResource, "status", imageDigestMirrorSet), &configv1.ImageDigestMirrorSet{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ImageDigestMirrorSet), err +} + +// Delete takes name of the imageDigestMirrorSet and deletes it. Returns an error if one occurs. +func (c *FakeImageDigestMirrorSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(imagedigestmirrorsetsResource, name, opts), &configv1.ImageDigestMirrorSet{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeImageDigestMirrorSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(imagedigestmirrorsetsResource, listOpts) + + _, err := c.Fake.Invokes(action, &configv1.ImageDigestMirrorSetList{}) + return err +} + +// Patch applies the patch and returns the patched imageDigestMirrorSet. +func (c *FakeImageDigestMirrorSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *configv1.ImageDigestMirrorSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(imagedigestmirrorsetsResource, name, pt, data, subresources...), &configv1.ImageDigestMirrorSet{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ImageDigestMirrorSet), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied imageDigestMirrorSet. +func (c *FakeImageDigestMirrorSets) Apply(ctx context.Context, imageDigestMirrorSet *applyconfigurationsconfigv1.ImageDigestMirrorSetApplyConfiguration, opts v1.ApplyOptions) (result *configv1.ImageDigestMirrorSet, err error) { + if imageDigestMirrorSet == nil { + return nil, fmt.Errorf("imageDigestMirrorSet provided to Apply must not be nil") + } + data, err := json.Marshal(imageDigestMirrorSet) + if err != nil { + return nil, err + } + name := imageDigestMirrorSet.Name + if name == nil { + return nil, fmt.Errorf("imageDigestMirrorSet.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(imagedigestmirrorsetsResource, *name, types.ApplyPatchType, data), &configv1.ImageDigestMirrorSet{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ImageDigestMirrorSet), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeImageDigestMirrorSets) ApplyStatus(ctx context.Context, imageDigestMirrorSet *applyconfigurationsconfigv1.ImageDigestMirrorSetApplyConfiguration, opts v1.ApplyOptions) (result *configv1.ImageDigestMirrorSet, err error) { + if imageDigestMirrorSet == nil { + return nil, fmt.Errorf("imageDigestMirrorSet provided to Apply must not be nil") + } + data, err := json.Marshal(imageDigestMirrorSet) + if err != nil { + return nil, err + } + name := imageDigestMirrorSet.Name + if name == nil { + return nil, fmt.Errorf("imageDigestMirrorSet.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(imagedigestmirrorsetsResource, *name, types.ApplyPatchType, data, "status"), &configv1.ImageDigestMirrorSet{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ImageDigestMirrorSet), err +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_imagetagmirrorset.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_imagetagmirrorset.go new file mode 100644 index 000000000000..e6aebaa4f8df --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_imagetagmirrorset.go @@ -0,0 +1,163 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeImageTagMirrorSets implements ImageTagMirrorSetInterface +type FakeImageTagMirrorSets struct { + Fake *FakeConfigV1 +} + +var imagetagmirrorsetsResource = schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "imagetagmirrorsets"} + +var imagetagmirrorsetsKind = schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "ImageTagMirrorSet"} + +// Get takes name of the imageTagMirrorSet, and returns the corresponding imageTagMirrorSet object, and an error if there is any. +func (c *FakeImageTagMirrorSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *configv1.ImageTagMirrorSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(imagetagmirrorsetsResource, name), &configv1.ImageTagMirrorSet{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ImageTagMirrorSet), err +} + +// List takes label and field selectors, and returns the list of ImageTagMirrorSets that match those selectors. +func (c *FakeImageTagMirrorSets) List(ctx context.Context, opts v1.ListOptions) (result *configv1.ImageTagMirrorSetList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(imagetagmirrorsetsResource, imagetagmirrorsetsKind, opts), &configv1.ImageTagMirrorSetList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &configv1.ImageTagMirrorSetList{ListMeta: obj.(*configv1.ImageTagMirrorSetList).ListMeta} + for _, item := range obj.(*configv1.ImageTagMirrorSetList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested imageTagMirrorSets. +func (c *FakeImageTagMirrorSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(imagetagmirrorsetsResource, opts)) +} + +// Create takes the representation of a imageTagMirrorSet and creates it. Returns the server's representation of the imageTagMirrorSet, and an error, if there is any. +func (c *FakeImageTagMirrorSets) Create(ctx context.Context, imageTagMirrorSet *configv1.ImageTagMirrorSet, opts v1.CreateOptions) (result *configv1.ImageTagMirrorSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(imagetagmirrorsetsResource, imageTagMirrorSet), &configv1.ImageTagMirrorSet{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ImageTagMirrorSet), err +} + +// Update takes the representation of a imageTagMirrorSet and updates it. Returns the server's representation of the imageTagMirrorSet, and an error, if there is any. +func (c *FakeImageTagMirrorSets) Update(ctx context.Context, imageTagMirrorSet *configv1.ImageTagMirrorSet, opts v1.UpdateOptions) (result *configv1.ImageTagMirrorSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(imagetagmirrorsetsResource, imageTagMirrorSet), &configv1.ImageTagMirrorSet{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ImageTagMirrorSet), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeImageTagMirrorSets) UpdateStatus(ctx context.Context, imageTagMirrorSet *configv1.ImageTagMirrorSet, opts v1.UpdateOptions) (*configv1.ImageTagMirrorSet, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(imagetagmirrorsetsResource, "status", imageTagMirrorSet), &configv1.ImageTagMirrorSet{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ImageTagMirrorSet), err +} + +// Delete takes name of the imageTagMirrorSet and deletes it. Returns an error if one occurs. +func (c *FakeImageTagMirrorSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(imagetagmirrorsetsResource, name, opts), &configv1.ImageTagMirrorSet{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeImageTagMirrorSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(imagetagmirrorsetsResource, listOpts) + + _, err := c.Fake.Invokes(action, &configv1.ImageTagMirrorSetList{}) + return err +} + +// Patch applies the patch and returns the patched imageTagMirrorSet. +func (c *FakeImageTagMirrorSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *configv1.ImageTagMirrorSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(imagetagmirrorsetsResource, name, pt, data, subresources...), &configv1.ImageTagMirrorSet{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ImageTagMirrorSet), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied imageTagMirrorSet. +func (c *FakeImageTagMirrorSets) Apply(ctx context.Context, imageTagMirrorSet *applyconfigurationsconfigv1.ImageTagMirrorSetApplyConfiguration, opts v1.ApplyOptions) (result *configv1.ImageTagMirrorSet, err error) { + if imageTagMirrorSet == nil { + return nil, fmt.Errorf("imageTagMirrorSet provided to Apply must not be nil") + } + data, err := json.Marshal(imageTagMirrorSet) + if err != nil { + return nil, err + } + name := imageTagMirrorSet.Name + if name == nil { + return nil, fmt.Errorf("imageTagMirrorSet.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(imagetagmirrorsetsResource, *name, types.ApplyPatchType, data), &configv1.ImageTagMirrorSet{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ImageTagMirrorSet), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeImageTagMirrorSets) ApplyStatus(ctx context.Context, imageTagMirrorSet *applyconfigurationsconfigv1.ImageTagMirrorSetApplyConfiguration, opts v1.ApplyOptions) (result *configv1.ImageTagMirrorSet, err error) { + if imageTagMirrorSet == nil { + return nil, fmt.Errorf("imageTagMirrorSet provided to Apply must not be nil") + } + data, err := json.Marshal(imageTagMirrorSet) + if err != nil { + return nil, err + } + name := imageTagMirrorSet.Name + if name == nil { + return nil, fmt.Errorf("imageTagMirrorSet.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(imagetagmirrorsetsResource, *name, types.ApplyPatchType, data, "status"), &configv1.ImageTagMirrorSet{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ImageTagMirrorSet), err +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_infrastructure.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_infrastructure.go new file mode 100644 index 000000000000..b5ca5879334e --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_infrastructure.go @@ -0,0 +1,163 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeInfrastructures implements InfrastructureInterface +type FakeInfrastructures struct { + Fake *FakeConfigV1 +} + +var infrastructuresResource = schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "infrastructures"} + +var infrastructuresKind = schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "Infrastructure"} + +// Get takes name of the infrastructure, and returns the corresponding infrastructure object, and an error if there is any. +func (c *FakeInfrastructures) Get(ctx context.Context, name string, options v1.GetOptions) (result *configv1.Infrastructure, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(infrastructuresResource, name), &configv1.Infrastructure{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Infrastructure), err +} + +// List takes label and field selectors, and returns the list of Infrastructures that match those selectors. +func (c *FakeInfrastructures) List(ctx context.Context, opts v1.ListOptions) (result *configv1.InfrastructureList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(infrastructuresResource, infrastructuresKind, opts), &configv1.InfrastructureList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &configv1.InfrastructureList{ListMeta: obj.(*configv1.InfrastructureList).ListMeta} + for _, item := range obj.(*configv1.InfrastructureList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested infrastructures. +func (c *FakeInfrastructures) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(infrastructuresResource, opts)) +} + +// Create takes the representation of a infrastructure and creates it. Returns the server's representation of the infrastructure, and an error, if there is any. +func (c *FakeInfrastructures) Create(ctx context.Context, infrastructure *configv1.Infrastructure, opts v1.CreateOptions) (result *configv1.Infrastructure, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(infrastructuresResource, infrastructure), &configv1.Infrastructure{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Infrastructure), err +} + +// Update takes the representation of a infrastructure and updates it. Returns the server's representation of the infrastructure, and an error, if there is any. +func (c *FakeInfrastructures) Update(ctx context.Context, infrastructure *configv1.Infrastructure, opts v1.UpdateOptions) (result *configv1.Infrastructure, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(infrastructuresResource, infrastructure), &configv1.Infrastructure{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Infrastructure), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeInfrastructures) UpdateStatus(ctx context.Context, infrastructure *configv1.Infrastructure, opts v1.UpdateOptions) (*configv1.Infrastructure, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(infrastructuresResource, "status", infrastructure), &configv1.Infrastructure{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Infrastructure), err +} + +// Delete takes name of the infrastructure and deletes it. Returns an error if one occurs. +func (c *FakeInfrastructures) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(infrastructuresResource, name, opts), &configv1.Infrastructure{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeInfrastructures) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(infrastructuresResource, listOpts) + + _, err := c.Fake.Invokes(action, &configv1.InfrastructureList{}) + return err +} + +// Patch applies the patch and returns the patched infrastructure. +func (c *FakeInfrastructures) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *configv1.Infrastructure, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(infrastructuresResource, name, pt, data, subresources...), &configv1.Infrastructure{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Infrastructure), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied infrastructure. +func (c *FakeInfrastructures) Apply(ctx context.Context, infrastructure *applyconfigurationsconfigv1.InfrastructureApplyConfiguration, opts v1.ApplyOptions) (result *configv1.Infrastructure, err error) { + if infrastructure == nil { + return nil, fmt.Errorf("infrastructure provided to Apply must not be nil") + } + data, err := json.Marshal(infrastructure) + if err != nil { + return nil, err + } + name := infrastructure.Name + if name == nil { + return nil, fmt.Errorf("infrastructure.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(infrastructuresResource, *name, types.ApplyPatchType, data), &configv1.Infrastructure{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Infrastructure), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeInfrastructures) ApplyStatus(ctx context.Context, infrastructure *applyconfigurationsconfigv1.InfrastructureApplyConfiguration, opts v1.ApplyOptions) (result *configv1.Infrastructure, err error) { + if infrastructure == nil { + return nil, fmt.Errorf("infrastructure provided to Apply must not be nil") + } + data, err := json.Marshal(infrastructure) + if err != nil { + return nil, err + } + name := infrastructure.Name + if name == nil { + return nil, fmt.Errorf("infrastructure.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(infrastructuresResource, *name, types.ApplyPatchType, data, "status"), &configv1.Infrastructure{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Infrastructure), err +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_ingress.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_ingress.go new file mode 100644 index 000000000000..6c41b126587d --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_ingress.go @@ -0,0 +1,163 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeIngresses implements IngressInterface +type FakeIngresses struct { + Fake *FakeConfigV1 +} + +var ingressesResource = schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "ingresses"} + +var ingressesKind = schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "Ingress"} + +// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. +func (c *FakeIngresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *configv1.Ingress, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(ingressesResource, name), &configv1.Ingress{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Ingress), err +} + +// List takes label and field selectors, and returns the list of Ingresses that match those selectors. +func (c *FakeIngresses) List(ctx context.Context, opts v1.ListOptions) (result *configv1.IngressList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(ingressesResource, ingressesKind, opts), &configv1.IngressList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &configv1.IngressList{ListMeta: obj.(*configv1.IngressList).ListMeta} + for _, item := range obj.(*configv1.IngressList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested ingresses. +func (c *FakeIngresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(ingressesResource, opts)) +} + +// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. +func (c *FakeIngresses) Create(ctx context.Context, ingress *configv1.Ingress, opts v1.CreateOptions) (result *configv1.Ingress, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(ingressesResource, ingress), &configv1.Ingress{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Ingress), err +} + +// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. +func (c *FakeIngresses) Update(ctx context.Context, ingress *configv1.Ingress, opts v1.UpdateOptions) (result *configv1.Ingress, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(ingressesResource, ingress), &configv1.Ingress{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Ingress), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *configv1.Ingress, opts v1.UpdateOptions) (*configv1.Ingress, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(ingressesResource, "status", ingress), &configv1.Ingress{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Ingress), err +} + +// Delete takes name of the ingress and deletes it. Returns an error if one occurs. +func (c *FakeIngresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(ingressesResource, name, opts), &configv1.Ingress{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeIngresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(ingressesResource, listOpts) + + _, err := c.Fake.Invokes(action, &configv1.IngressList{}) + return err +} + +// Patch applies the patch and returns the patched ingress. +func (c *FakeIngresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *configv1.Ingress, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(ingressesResource, name, pt, data, subresources...), &configv1.Ingress{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Ingress), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied ingress. +func (c *FakeIngresses) Apply(ctx context.Context, ingress *applyconfigurationsconfigv1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *configv1.Ingress, err error) { + if ingress == nil { + return nil, fmt.Errorf("ingress provided to Apply must not be nil") + } + data, err := json.Marshal(ingress) + if err != nil { + return nil, err + } + name := ingress.Name + if name == nil { + return nil, fmt.Errorf("ingress.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(ingressesResource, *name, types.ApplyPatchType, data), &configv1.Ingress{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Ingress), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeIngresses) ApplyStatus(ctx context.Context, ingress *applyconfigurationsconfigv1.IngressApplyConfiguration, opts v1.ApplyOptions) (result *configv1.Ingress, err error) { + if ingress == nil { + return nil, fmt.Errorf("ingress provided to Apply must not be nil") + } + data, err := json.Marshal(ingress) + if err != nil { + return nil, err + } + name := ingress.Name + if name == nil { + return nil, fmt.Errorf("ingress.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(ingressesResource, *name, types.ApplyPatchType, data, "status"), &configv1.Ingress{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Ingress), err +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_network.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_network.go new file mode 100644 index 000000000000..83a223c8048d --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_network.go @@ -0,0 +1,163 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeNetworks implements NetworkInterface +type FakeNetworks struct { + Fake *FakeConfigV1 +} + +var networksResource = schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "networks"} + +var networksKind = schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "Network"} + +// Get takes name of the network, and returns the corresponding network object, and an error if there is any. +func (c *FakeNetworks) Get(ctx context.Context, name string, options v1.GetOptions) (result *configv1.Network, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(networksResource, name), &configv1.Network{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Network), err +} + +// List takes label and field selectors, and returns the list of Networks that match those selectors. +func (c *FakeNetworks) List(ctx context.Context, opts v1.ListOptions) (result *configv1.NetworkList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(networksResource, networksKind, opts), &configv1.NetworkList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &configv1.NetworkList{ListMeta: obj.(*configv1.NetworkList).ListMeta} + for _, item := range obj.(*configv1.NetworkList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested networks. +func (c *FakeNetworks) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(networksResource, opts)) +} + +// Create takes the representation of a network and creates it. Returns the server's representation of the network, and an error, if there is any. +func (c *FakeNetworks) Create(ctx context.Context, network *configv1.Network, opts v1.CreateOptions) (result *configv1.Network, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(networksResource, network), &configv1.Network{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Network), err +} + +// Update takes the representation of a network and updates it. Returns the server's representation of the network, and an error, if there is any. +func (c *FakeNetworks) Update(ctx context.Context, network *configv1.Network, opts v1.UpdateOptions) (result *configv1.Network, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(networksResource, network), &configv1.Network{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Network), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeNetworks) UpdateStatus(ctx context.Context, network *configv1.Network, opts v1.UpdateOptions) (*configv1.Network, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(networksResource, "status", network), &configv1.Network{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Network), err +} + +// Delete takes name of the network and deletes it. Returns an error if one occurs. +func (c *FakeNetworks) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(networksResource, name, opts), &configv1.Network{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeNetworks) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(networksResource, listOpts) + + _, err := c.Fake.Invokes(action, &configv1.NetworkList{}) + return err +} + +// Patch applies the patch and returns the patched network. +func (c *FakeNetworks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *configv1.Network, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(networksResource, name, pt, data, subresources...), &configv1.Network{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Network), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied network. +func (c *FakeNetworks) Apply(ctx context.Context, network *applyconfigurationsconfigv1.NetworkApplyConfiguration, opts v1.ApplyOptions) (result *configv1.Network, err error) { + if network == nil { + return nil, fmt.Errorf("network provided to Apply must not be nil") + } + data, err := json.Marshal(network) + if err != nil { + return nil, err + } + name := network.Name + if name == nil { + return nil, fmt.Errorf("network.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(networksResource, *name, types.ApplyPatchType, data), &configv1.Network{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Network), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeNetworks) ApplyStatus(ctx context.Context, network *applyconfigurationsconfigv1.NetworkApplyConfiguration, opts v1.ApplyOptions) (result *configv1.Network, err error) { + if network == nil { + return nil, fmt.Errorf("network provided to Apply must not be nil") + } + data, err := json.Marshal(network) + if err != nil { + return nil, err + } + name := network.Name + if name == nil { + return nil, fmt.Errorf("network.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(networksResource, *name, types.ApplyPatchType, data, "status"), &configv1.Network{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Network), err +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_node.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_node.go new file mode 100644 index 000000000000..6346fbe59a3d --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_node.go @@ -0,0 +1,163 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeNodes implements NodeInterface +type FakeNodes struct { + Fake *FakeConfigV1 +} + +var nodesResource = schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "nodes"} + +var nodesKind = schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "Node"} + +// Get takes name of the node, and returns the corresponding node object, and an error if there is any. +func (c *FakeNodes) Get(ctx context.Context, name string, options v1.GetOptions) (result *configv1.Node, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(nodesResource, name), &configv1.Node{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Node), err +} + +// List takes label and field selectors, and returns the list of Nodes that match those selectors. +func (c *FakeNodes) List(ctx context.Context, opts v1.ListOptions) (result *configv1.NodeList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(nodesResource, nodesKind, opts), &configv1.NodeList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &configv1.NodeList{ListMeta: obj.(*configv1.NodeList).ListMeta} + for _, item := range obj.(*configv1.NodeList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested nodes. +func (c *FakeNodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(nodesResource, opts)) +} + +// Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any. +func (c *FakeNodes) Create(ctx context.Context, node *configv1.Node, opts v1.CreateOptions) (result *configv1.Node, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(nodesResource, node), &configv1.Node{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Node), err +} + +// Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any. +func (c *FakeNodes) Update(ctx context.Context, node *configv1.Node, opts v1.UpdateOptions) (result *configv1.Node, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(nodesResource, node), &configv1.Node{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Node), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeNodes) UpdateStatus(ctx context.Context, node *configv1.Node, opts v1.UpdateOptions) (*configv1.Node, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(nodesResource, "status", node), &configv1.Node{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Node), err +} + +// Delete takes name of the node and deletes it. Returns an error if one occurs. +func (c *FakeNodes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(nodesResource, name, opts), &configv1.Node{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeNodes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(nodesResource, listOpts) + + _, err := c.Fake.Invokes(action, &configv1.NodeList{}) + return err +} + +// Patch applies the patch and returns the patched node. +func (c *FakeNodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *configv1.Node, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(nodesResource, name, pt, data, subresources...), &configv1.Node{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Node), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied node. +func (c *FakeNodes) Apply(ctx context.Context, node *applyconfigurationsconfigv1.NodeApplyConfiguration, opts v1.ApplyOptions) (result *configv1.Node, err error) { + if node == nil { + return nil, fmt.Errorf("node provided to Apply must not be nil") + } + data, err := json.Marshal(node) + if err != nil { + return nil, err + } + name := node.Name + if name == nil { + return nil, fmt.Errorf("node.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(nodesResource, *name, types.ApplyPatchType, data), &configv1.Node{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Node), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeNodes) ApplyStatus(ctx context.Context, node *applyconfigurationsconfigv1.NodeApplyConfiguration, opts v1.ApplyOptions) (result *configv1.Node, err error) { + if node == nil { + return nil, fmt.Errorf("node provided to Apply must not be nil") + } + data, err := json.Marshal(node) + if err != nil { + return nil, err + } + name := node.Name + if name == nil { + return nil, fmt.Errorf("node.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(nodesResource, *name, types.ApplyPatchType, data, "status"), &configv1.Node{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Node), err +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_oauth.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_oauth.go new file mode 100644 index 000000000000..75f5b4fe97fa --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_oauth.go @@ -0,0 +1,163 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeOAuths implements OAuthInterface +type FakeOAuths struct { + Fake *FakeConfigV1 +} + +var oauthsResource = schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "oauths"} + +var oauthsKind = schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "OAuth"} + +// Get takes name of the oAuth, and returns the corresponding oAuth object, and an error if there is any. +func (c *FakeOAuths) Get(ctx context.Context, name string, options v1.GetOptions) (result *configv1.OAuth, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(oauthsResource, name), &configv1.OAuth{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.OAuth), err +} + +// List takes label and field selectors, and returns the list of OAuths that match those selectors. +func (c *FakeOAuths) List(ctx context.Context, opts v1.ListOptions) (result *configv1.OAuthList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(oauthsResource, oauthsKind, opts), &configv1.OAuthList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &configv1.OAuthList{ListMeta: obj.(*configv1.OAuthList).ListMeta} + for _, item := range obj.(*configv1.OAuthList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested oAuths. +func (c *FakeOAuths) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(oauthsResource, opts)) +} + +// Create takes the representation of a oAuth and creates it. Returns the server's representation of the oAuth, and an error, if there is any. +func (c *FakeOAuths) Create(ctx context.Context, oAuth *configv1.OAuth, opts v1.CreateOptions) (result *configv1.OAuth, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(oauthsResource, oAuth), &configv1.OAuth{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.OAuth), err +} + +// Update takes the representation of a oAuth and updates it. Returns the server's representation of the oAuth, and an error, if there is any. +func (c *FakeOAuths) Update(ctx context.Context, oAuth *configv1.OAuth, opts v1.UpdateOptions) (result *configv1.OAuth, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(oauthsResource, oAuth), &configv1.OAuth{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.OAuth), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeOAuths) UpdateStatus(ctx context.Context, oAuth *configv1.OAuth, opts v1.UpdateOptions) (*configv1.OAuth, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(oauthsResource, "status", oAuth), &configv1.OAuth{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.OAuth), err +} + +// Delete takes name of the oAuth and deletes it. Returns an error if one occurs. +func (c *FakeOAuths) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(oauthsResource, name, opts), &configv1.OAuth{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeOAuths) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(oauthsResource, listOpts) + + _, err := c.Fake.Invokes(action, &configv1.OAuthList{}) + return err +} + +// Patch applies the patch and returns the patched oAuth. +func (c *FakeOAuths) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *configv1.OAuth, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(oauthsResource, name, pt, data, subresources...), &configv1.OAuth{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.OAuth), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied oAuth. +func (c *FakeOAuths) Apply(ctx context.Context, oAuth *applyconfigurationsconfigv1.OAuthApplyConfiguration, opts v1.ApplyOptions) (result *configv1.OAuth, err error) { + if oAuth == nil { + return nil, fmt.Errorf("oAuth provided to Apply must not be nil") + } + data, err := json.Marshal(oAuth) + if err != nil { + return nil, err + } + name := oAuth.Name + if name == nil { + return nil, fmt.Errorf("oAuth.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(oauthsResource, *name, types.ApplyPatchType, data), &configv1.OAuth{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.OAuth), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeOAuths) ApplyStatus(ctx context.Context, oAuth *applyconfigurationsconfigv1.OAuthApplyConfiguration, opts v1.ApplyOptions) (result *configv1.OAuth, err error) { + if oAuth == nil { + return nil, fmt.Errorf("oAuth provided to Apply must not be nil") + } + data, err := json.Marshal(oAuth) + if err != nil { + return nil, err + } + name := oAuth.Name + if name == nil { + return nil, fmt.Errorf("oAuth.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(oauthsResource, *name, types.ApplyPatchType, data, "status"), &configv1.OAuth{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.OAuth), err +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_operatorhub.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_operatorhub.go new file mode 100644 index 000000000000..37b5e621c3b1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_operatorhub.go @@ -0,0 +1,163 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeOperatorHubs implements OperatorHubInterface +type FakeOperatorHubs struct { + Fake *FakeConfigV1 +} + +var operatorhubsResource = schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "operatorhubs"} + +var operatorhubsKind = schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "OperatorHub"} + +// Get takes name of the operatorHub, and returns the corresponding operatorHub object, and an error if there is any. +func (c *FakeOperatorHubs) Get(ctx context.Context, name string, options v1.GetOptions) (result *configv1.OperatorHub, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(operatorhubsResource, name), &configv1.OperatorHub{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.OperatorHub), err +} + +// List takes label and field selectors, and returns the list of OperatorHubs that match those selectors. +func (c *FakeOperatorHubs) List(ctx context.Context, opts v1.ListOptions) (result *configv1.OperatorHubList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(operatorhubsResource, operatorhubsKind, opts), &configv1.OperatorHubList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &configv1.OperatorHubList{ListMeta: obj.(*configv1.OperatorHubList).ListMeta} + for _, item := range obj.(*configv1.OperatorHubList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested operatorHubs. +func (c *FakeOperatorHubs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(operatorhubsResource, opts)) +} + +// Create takes the representation of a operatorHub and creates it. Returns the server's representation of the operatorHub, and an error, if there is any. +func (c *FakeOperatorHubs) Create(ctx context.Context, operatorHub *configv1.OperatorHub, opts v1.CreateOptions) (result *configv1.OperatorHub, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(operatorhubsResource, operatorHub), &configv1.OperatorHub{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.OperatorHub), err +} + +// Update takes the representation of a operatorHub and updates it. Returns the server's representation of the operatorHub, and an error, if there is any. +func (c *FakeOperatorHubs) Update(ctx context.Context, operatorHub *configv1.OperatorHub, opts v1.UpdateOptions) (result *configv1.OperatorHub, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(operatorhubsResource, operatorHub), &configv1.OperatorHub{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.OperatorHub), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeOperatorHubs) UpdateStatus(ctx context.Context, operatorHub *configv1.OperatorHub, opts v1.UpdateOptions) (*configv1.OperatorHub, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(operatorhubsResource, "status", operatorHub), &configv1.OperatorHub{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.OperatorHub), err +} + +// Delete takes name of the operatorHub and deletes it. Returns an error if one occurs. +func (c *FakeOperatorHubs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(operatorhubsResource, name, opts), &configv1.OperatorHub{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeOperatorHubs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(operatorhubsResource, listOpts) + + _, err := c.Fake.Invokes(action, &configv1.OperatorHubList{}) + return err +} + +// Patch applies the patch and returns the patched operatorHub. +func (c *FakeOperatorHubs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *configv1.OperatorHub, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(operatorhubsResource, name, pt, data, subresources...), &configv1.OperatorHub{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.OperatorHub), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied operatorHub. +func (c *FakeOperatorHubs) Apply(ctx context.Context, operatorHub *applyconfigurationsconfigv1.OperatorHubApplyConfiguration, opts v1.ApplyOptions) (result *configv1.OperatorHub, err error) { + if operatorHub == nil { + return nil, fmt.Errorf("operatorHub provided to Apply must not be nil") + } + data, err := json.Marshal(operatorHub) + if err != nil { + return nil, err + } + name := operatorHub.Name + if name == nil { + return nil, fmt.Errorf("operatorHub.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(operatorhubsResource, *name, types.ApplyPatchType, data), &configv1.OperatorHub{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.OperatorHub), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeOperatorHubs) ApplyStatus(ctx context.Context, operatorHub *applyconfigurationsconfigv1.OperatorHubApplyConfiguration, opts v1.ApplyOptions) (result *configv1.OperatorHub, err error) { + if operatorHub == nil { + return nil, fmt.Errorf("operatorHub provided to Apply must not be nil") + } + data, err := json.Marshal(operatorHub) + if err != nil { + return nil, err + } + name := operatorHub.Name + if name == nil { + return nil, fmt.Errorf("operatorHub.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(operatorhubsResource, *name, types.ApplyPatchType, data, "status"), &configv1.OperatorHub{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.OperatorHub), err +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_project.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_project.go new file mode 100644 index 000000000000..cbd186030587 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_project.go @@ -0,0 +1,163 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeProjects implements ProjectInterface +type FakeProjects struct { + Fake *FakeConfigV1 +} + +var projectsResource = schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "projects"} + +var projectsKind = schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "Project"} + +// Get takes name of the project, and returns the corresponding project object, and an error if there is any. +func (c *FakeProjects) Get(ctx context.Context, name string, options v1.GetOptions) (result *configv1.Project, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(projectsResource, name), &configv1.Project{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Project), err +} + +// List takes label and field selectors, and returns the list of Projects that match those selectors. +func (c *FakeProjects) List(ctx context.Context, opts v1.ListOptions) (result *configv1.ProjectList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(projectsResource, projectsKind, opts), &configv1.ProjectList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &configv1.ProjectList{ListMeta: obj.(*configv1.ProjectList).ListMeta} + for _, item := range obj.(*configv1.ProjectList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested projects. +func (c *FakeProjects) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(projectsResource, opts)) +} + +// Create takes the representation of a project and creates it. Returns the server's representation of the project, and an error, if there is any. +func (c *FakeProjects) Create(ctx context.Context, project *configv1.Project, opts v1.CreateOptions) (result *configv1.Project, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(projectsResource, project), &configv1.Project{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Project), err +} + +// Update takes the representation of a project and updates it. Returns the server's representation of the project, and an error, if there is any. +func (c *FakeProjects) Update(ctx context.Context, project *configv1.Project, opts v1.UpdateOptions) (result *configv1.Project, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(projectsResource, project), &configv1.Project{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Project), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeProjects) UpdateStatus(ctx context.Context, project *configv1.Project, opts v1.UpdateOptions) (*configv1.Project, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(projectsResource, "status", project), &configv1.Project{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Project), err +} + +// Delete takes name of the project and deletes it. Returns an error if one occurs. +func (c *FakeProjects) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(projectsResource, name, opts), &configv1.Project{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeProjects) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(projectsResource, listOpts) + + _, err := c.Fake.Invokes(action, &configv1.ProjectList{}) + return err +} + +// Patch applies the patch and returns the patched project. +func (c *FakeProjects) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *configv1.Project, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(projectsResource, name, pt, data, subresources...), &configv1.Project{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Project), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied project. +func (c *FakeProjects) Apply(ctx context.Context, project *applyconfigurationsconfigv1.ProjectApplyConfiguration, opts v1.ApplyOptions) (result *configv1.Project, err error) { + if project == nil { + return nil, fmt.Errorf("project provided to Apply must not be nil") + } + data, err := json.Marshal(project) + if err != nil { + return nil, err + } + name := project.Name + if name == nil { + return nil, fmt.Errorf("project.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(projectsResource, *name, types.ApplyPatchType, data), &configv1.Project{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Project), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeProjects) ApplyStatus(ctx context.Context, project *applyconfigurationsconfigv1.ProjectApplyConfiguration, opts v1.ApplyOptions) (result *configv1.Project, err error) { + if project == nil { + return nil, fmt.Errorf("project provided to Apply must not be nil") + } + data, err := json.Marshal(project) + if err != nil { + return nil, err + } + name := project.Name + if name == nil { + return nil, fmt.Errorf("project.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(projectsResource, *name, types.ApplyPatchType, data, "status"), &configv1.Project{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Project), err +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_proxy.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_proxy.go new file mode 100644 index 000000000000..bc54d5cc8fcd --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_proxy.go @@ -0,0 +1,163 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeProxies implements ProxyInterface +type FakeProxies struct { + Fake *FakeConfigV1 +} + +var proxiesResource = schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "proxies"} + +var proxiesKind = schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "Proxy"} + +// Get takes name of the proxy, and returns the corresponding proxy object, and an error if there is any. +func (c *FakeProxies) Get(ctx context.Context, name string, options v1.GetOptions) (result *configv1.Proxy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(proxiesResource, name), &configv1.Proxy{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Proxy), err +} + +// List takes label and field selectors, and returns the list of Proxies that match those selectors. +func (c *FakeProxies) List(ctx context.Context, opts v1.ListOptions) (result *configv1.ProxyList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(proxiesResource, proxiesKind, opts), &configv1.ProxyList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &configv1.ProxyList{ListMeta: obj.(*configv1.ProxyList).ListMeta} + for _, item := range obj.(*configv1.ProxyList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested proxies. +func (c *FakeProxies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(proxiesResource, opts)) +} + +// Create takes the representation of a proxy and creates it. Returns the server's representation of the proxy, and an error, if there is any. +func (c *FakeProxies) Create(ctx context.Context, proxy *configv1.Proxy, opts v1.CreateOptions) (result *configv1.Proxy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(proxiesResource, proxy), &configv1.Proxy{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Proxy), err +} + +// Update takes the representation of a proxy and updates it. Returns the server's representation of the proxy, and an error, if there is any. +func (c *FakeProxies) Update(ctx context.Context, proxy *configv1.Proxy, opts v1.UpdateOptions) (result *configv1.Proxy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(proxiesResource, proxy), &configv1.Proxy{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Proxy), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeProxies) UpdateStatus(ctx context.Context, proxy *configv1.Proxy, opts v1.UpdateOptions) (*configv1.Proxy, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(proxiesResource, "status", proxy), &configv1.Proxy{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Proxy), err +} + +// Delete takes name of the proxy and deletes it. Returns an error if one occurs. +func (c *FakeProxies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(proxiesResource, name, opts), &configv1.Proxy{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeProxies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(proxiesResource, listOpts) + + _, err := c.Fake.Invokes(action, &configv1.ProxyList{}) + return err +} + +// Patch applies the patch and returns the patched proxy. +func (c *FakeProxies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *configv1.Proxy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(proxiesResource, name, pt, data, subresources...), &configv1.Proxy{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Proxy), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied proxy. +func (c *FakeProxies) Apply(ctx context.Context, proxy *applyconfigurationsconfigv1.ProxyApplyConfiguration, opts v1.ApplyOptions) (result *configv1.Proxy, err error) { + if proxy == nil { + return nil, fmt.Errorf("proxy provided to Apply must not be nil") + } + data, err := json.Marshal(proxy) + if err != nil { + return nil, err + } + name := proxy.Name + if name == nil { + return nil, fmt.Errorf("proxy.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(proxiesResource, *name, types.ApplyPatchType, data), &configv1.Proxy{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Proxy), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeProxies) ApplyStatus(ctx context.Context, proxy *applyconfigurationsconfigv1.ProxyApplyConfiguration, opts v1.ApplyOptions) (result *configv1.Proxy, err error) { + if proxy == nil { + return nil, fmt.Errorf("proxy provided to Apply must not be nil") + } + data, err := json.Marshal(proxy) + if err != nil { + return nil, err + } + name := proxy.Name + if name == nil { + return nil, fmt.Errorf("proxy.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(proxiesResource, *name, types.ApplyPatchType, data, "status"), &configv1.Proxy{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Proxy), err +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_scheduler.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_scheduler.go new file mode 100644 index 000000000000..bf4cc94169bf --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_scheduler.go @@ -0,0 +1,163 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + configv1 "github.com/openshift/api/config/v1" + applyconfigurationsconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeSchedulers implements SchedulerInterface +type FakeSchedulers struct { + Fake *FakeConfigV1 +} + +var schedulersResource = schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "schedulers"} + +var schedulersKind = schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "Scheduler"} + +// Get takes name of the scheduler, and returns the corresponding scheduler object, and an error if there is any. +func (c *FakeSchedulers) Get(ctx context.Context, name string, options v1.GetOptions) (result *configv1.Scheduler, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(schedulersResource, name), &configv1.Scheduler{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Scheduler), err +} + +// List takes label and field selectors, and returns the list of Schedulers that match those selectors. +func (c *FakeSchedulers) List(ctx context.Context, opts v1.ListOptions) (result *configv1.SchedulerList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(schedulersResource, schedulersKind, opts), &configv1.SchedulerList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &configv1.SchedulerList{ListMeta: obj.(*configv1.SchedulerList).ListMeta} + for _, item := range obj.(*configv1.SchedulerList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested schedulers. +func (c *FakeSchedulers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(schedulersResource, opts)) +} + +// Create takes the representation of a scheduler and creates it. Returns the server's representation of the scheduler, and an error, if there is any. +func (c *FakeSchedulers) Create(ctx context.Context, scheduler *configv1.Scheduler, opts v1.CreateOptions) (result *configv1.Scheduler, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(schedulersResource, scheduler), &configv1.Scheduler{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Scheduler), err +} + +// Update takes the representation of a scheduler and updates it. Returns the server's representation of the scheduler, and an error, if there is any. +func (c *FakeSchedulers) Update(ctx context.Context, scheduler *configv1.Scheduler, opts v1.UpdateOptions) (result *configv1.Scheduler, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(schedulersResource, scheduler), &configv1.Scheduler{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Scheduler), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeSchedulers) UpdateStatus(ctx context.Context, scheduler *configv1.Scheduler, opts v1.UpdateOptions) (*configv1.Scheduler, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(schedulersResource, "status", scheduler), &configv1.Scheduler{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Scheduler), err +} + +// Delete takes name of the scheduler and deletes it. Returns an error if one occurs. +func (c *FakeSchedulers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(schedulersResource, name, opts), &configv1.Scheduler{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeSchedulers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(schedulersResource, listOpts) + + _, err := c.Fake.Invokes(action, &configv1.SchedulerList{}) + return err +} + +// Patch applies the patch and returns the patched scheduler. +func (c *FakeSchedulers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *configv1.Scheduler, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(schedulersResource, name, pt, data, subresources...), &configv1.Scheduler{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Scheduler), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied scheduler. +func (c *FakeSchedulers) Apply(ctx context.Context, scheduler *applyconfigurationsconfigv1.SchedulerApplyConfiguration, opts v1.ApplyOptions) (result *configv1.Scheduler, err error) { + if scheduler == nil { + return nil, fmt.Errorf("scheduler provided to Apply must not be nil") + } + data, err := json.Marshal(scheduler) + if err != nil { + return nil, err + } + name := scheduler.Name + if name == nil { + return nil, fmt.Errorf("scheduler.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(schedulersResource, *name, types.ApplyPatchType, data), &configv1.Scheduler{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Scheduler), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeSchedulers) ApplyStatus(ctx context.Context, scheduler *applyconfigurationsconfigv1.SchedulerApplyConfiguration, opts v1.ApplyOptions) (result *configv1.Scheduler, err error) { + if scheduler == nil { + return nil, fmt.Errorf("scheduler provided to Apply must not be nil") + } + data, err := json.Marshal(scheduler) + if err != nil { + return nil, err + } + name := scheduler.Name + if name == nil { + return nil, fmt.Errorf("scheduler.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(schedulersResource, *name, types.ApplyPatchType, data, "status"), &configv1.Scheduler{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.Scheduler), err +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake/doc.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake/doc.go new file mode 100644 index 000000000000..2b5ba4c8e442 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake/fake_config_client.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake/fake_config_client.go new file mode 100644 index 000000000000..54d32f5eeeea --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake/fake_config_client.go @@ -0,0 +1,24 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeConfigV1alpha1 struct { + *testing.Fake +} + +func (c *FakeConfigV1alpha1) InsightsDataGathers() v1alpha1.InsightsDataGatherInterface { + return &FakeInsightsDataGathers{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeConfigV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake/fake_insightsdatagather.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake/fake_insightsdatagather.go new file mode 100644 index 000000000000..d3e4c8955f29 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake/fake_insightsdatagather.go @@ -0,0 +1,163 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1alpha1 "github.com/openshift/api/config/v1alpha1" + configv1alpha1 "github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeInsightsDataGathers implements InsightsDataGatherInterface +type FakeInsightsDataGathers struct { + Fake *FakeConfigV1alpha1 +} + +var insightsdatagathersResource = schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1alpha1", Resource: "insightsdatagathers"} + +var insightsdatagathersKind = schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1alpha1", Kind: "InsightsDataGather"} + +// Get takes name of the insightsDataGather, and returns the corresponding insightsDataGather object, and an error if there is any. +func (c *FakeInsightsDataGathers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.InsightsDataGather, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(insightsdatagathersResource, name), &v1alpha1.InsightsDataGather{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.InsightsDataGather), err +} + +// List takes label and field selectors, and returns the list of InsightsDataGathers that match those selectors. +func (c *FakeInsightsDataGathers) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.InsightsDataGatherList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(insightsdatagathersResource, insightsdatagathersKind, opts), &v1alpha1.InsightsDataGatherList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.InsightsDataGatherList{ListMeta: obj.(*v1alpha1.InsightsDataGatherList).ListMeta} + for _, item := range obj.(*v1alpha1.InsightsDataGatherList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested insightsDataGathers. +func (c *FakeInsightsDataGathers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(insightsdatagathersResource, opts)) +} + +// Create takes the representation of a insightsDataGather and creates it. Returns the server's representation of the insightsDataGather, and an error, if there is any. +func (c *FakeInsightsDataGathers) Create(ctx context.Context, insightsDataGather *v1alpha1.InsightsDataGather, opts v1.CreateOptions) (result *v1alpha1.InsightsDataGather, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(insightsdatagathersResource, insightsDataGather), &v1alpha1.InsightsDataGather{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.InsightsDataGather), err +} + +// Update takes the representation of a insightsDataGather and updates it. Returns the server's representation of the insightsDataGather, and an error, if there is any. +func (c *FakeInsightsDataGathers) Update(ctx context.Context, insightsDataGather *v1alpha1.InsightsDataGather, opts v1.UpdateOptions) (result *v1alpha1.InsightsDataGather, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(insightsdatagathersResource, insightsDataGather), &v1alpha1.InsightsDataGather{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.InsightsDataGather), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeInsightsDataGathers) UpdateStatus(ctx context.Context, insightsDataGather *v1alpha1.InsightsDataGather, opts v1.UpdateOptions) (*v1alpha1.InsightsDataGather, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(insightsdatagathersResource, "status", insightsDataGather), &v1alpha1.InsightsDataGather{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.InsightsDataGather), err +} + +// Delete takes name of the insightsDataGather and deletes it. Returns an error if one occurs. +func (c *FakeInsightsDataGathers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(insightsdatagathersResource, name, opts), &v1alpha1.InsightsDataGather{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeInsightsDataGathers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(insightsdatagathersResource, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.InsightsDataGatherList{}) + return err +} + +// Patch applies the patch and returns the patched insightsDataGather. +func (c *FakeInsightsDataGathers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.InsightsDataGather, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(insightsdatagathersResource, name, pt, data, subresources...), &v1alpha1.InsightsDataGather{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.InsightsDataGather), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied insightsDataGather. +func (c *FakeInsightsDataGathers) Apply(ctx context.Context, insightsDataGather *configv1alpha1.InsightsDataGatherApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.InsightsDataGather, err error) { + if insightsDataGather == nil { + return nil, fmt.Errorf("insightsDataGather provided to Apply must not be nil") + } + data, err := json.Marshal(insightsDataGather) + if err != nil { + return nil, err + } + name := insightsDataGather.Name + if name == nil { + return nil, fmt.Errorf("insightsDataGather.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(insightsdatagathersResource, *name, types.ApplyPatchType, data), &v1alpha1.InsightsDataGather{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.InsightsDataGather), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeInsightsDataGathers) ApplyStatus(ctx context.Context, insightsDataGather *configv1alpha1.InsightsDataGatherApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.InsightsDataGather, err error) { + if insightsDataGather == nil { + return nil, fmt.Errorf("insightsDataGather provided to Apply must not be nil") + } + data, err := json.Marshal(insightsDataGather) + if err != nil { + return nil, err + } + name := insightsDataGather.Name + if name == nil { + return nil, fmt.Errorf("insightsDataGather.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(insightsdatagathersResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.InsightsDataGather{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.InsightsDataGather), err +} diff --git a/vendor/modules.txt b/vendor/modules.txt index fc5e53b7cc13..419e3892aa71 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -854,9 +854,12 @@ github.com/openshift/client-go/config/applyconfigurations/config/v1 github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1 github.com/openshift/client-go/config/applyconfigurations/internal github.com/openshift/client-go/config/clientset/versioned +github.com/openshift/client-go/config/clientset/versioned/fake github.com/openshift/client-go/config/clientset/versioned/scheme github.com/openshift/client-go/config/clientset/versioned/typed/config/v1 +github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1 +github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/fake github.com/openshift/client-go/config/informers/externalversions github.com/openshift/client-go/config/informers/externalversions/config github.com/openshift/client-go/config/informers/externalversions/config/v1 From 19621bb76a1fe2d3b115b842120874e08e552c02 Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Wed, 1 Mar 2023 22:44:45 +0100 Subject: [PATCH 02/15] Apply request on the level of individual config v1 kinds --- test/extended/util/client.go | 9 +- test/extended/util/configv1shim.go | 446 +++++++++++++++++++----- test/extended/util/configv1shim_test.go | 233 ++++++++++++- 3 files changed, 581 insertions(+), 107 deletions(-) diff --git a/test/extended/util/client.go b/test/extended/util/client.go index 067e0019a87a..d5ef8271ac97 100644 --- a/test/extended/util/client.go +++ b/test/extended/util/client.go @@ -64,7 +64,6 @@ import ( authorizationv1client "github.com/openshift/client-go/authorization/clientset/versioned" buildv1client "github.com/openshift/client-go/build/clientset/versioned" configv1client "github.com/openshift/client-go/config/clientset/versioned" - fakeconfigv1client "github.com/openshift/client-go/config/clientset/versioned/fake" imagev1client "github.com/openshift/client-go/image/clientset/versioned" oauthv1client "github.com/openshift/client-go/oauth/clientset/versioned" operatorv1client "github.com/openshift/client-go/operator/clientset/versioned" @@ -625,10 +624,10 @@ func (c *CLI) AdminBuildClient() buildv1client.Interface { } func (c *CLI) AdminConfigClient() configv1client.Interface { - return &ConfigClientShim{ - adminConfig: c.AdminConfig(), - fakeClient: fakeconfigv1client.NewSimpleClientset(c.configObjects...), - } + return NewConfigClientShim( + configv1client.NewForConfigOrDie(c.AdminConfig()), + c.configObjects, + ) } func (c *CLI) AdminImageClient() imagev1client.Interface { diff --git a/test/extended/util/configv1shim.go b/test/extended/util/configv1shim.go index 8d92dfa49915..b180cb12dd73 100644 --- a/test/extended/util/configv1shim.go +++ b/test/extended/util/configv1shim.go @@ -1,40 +1,50 @@ package util import ( + "context" "fmt" + "sync" + apiconfigv1 "github.com/openshift/api/config/v1" + applyconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" configv1client "github.com/openshift/client-go/config/clientset/versioned" fakeconfigv1client "github.com/openshift/client-go/config/clientset/versioned/fake" configv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" configv1alpha1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1" + "k8s.io/klog/v2" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/discovery" "k8s.io/client-go/rest" - clienttesting "k8s.io/client-go/testing" ) // ConfigClientShim makes sure whenever there's a static // manifest present for a config v1 kind, fake client is used // instead of the real one. type ConfigClientShim struct { - adminConfig *rest.Config - v1Kinds map[string]bool - fakeClient *fakeconfigv1client.Clientset + configClient configv1client.Interface + v1Kinds map[string]bool + fakeClient *fakeconfigv1client.Clientset } func (c *ConfigClientShim) Discovery() discovery.DiscoveryInterface { - return configv1client.NewForConfigOrDie(c.adminConfig).Discovery() + return c.configClient.Discovery() } func (c *ConfigClientShim) ConfigV1() configv1.ConfigV1Interface { return &ConfigV1ClientShim{ - configv1: func() configv1.ConfigV1Interface { return configv1client.NewForConfigOrDie(c.adminConfig).ConfigV1() }, + configv1: c.configClient.ConfigV1(), v1Kinds: c.v1Kinds, fakeConfigV1Client: c.fakeClient.ConfigV1(), } } func (c *ConfigClientShim) ConfigV1alpha1() configv1alpha1.ConfigV1alpha1Interface { - return configv1client.NewForConfigOrDie(c.adminConfig).ConfigV1alpha1() + return c.configClient.ConfigV1alpha1() } var _ configv1client.Interface = &ConfigClientShim{} @@ -43,186 +53,448 @@ var _ configv1client.Interface = &ConfigClientShim{} // manifest present for a config v1 kind, fake client is used // instead of the real one. type ConfigV1ClientShim struct { - configv1 func() configv1.ConfigV1Interface + configv1 configv1.ConfigV1Interface v1Kinds map[string]bool fakeConfigV1Client configv1.ConfigV1Interface } func (c *ConfigV1ClientShim) APIServers() configv1.APIServerInterface { if c.v1Kinds["APIServer"] { - return c.fakeConfigV1Client.APIServers() + panic(fmt.Errorf("APIServer not implemented")) } - return c.configv1().APIServers() + return c.configv1.APIServers() } func (c *ConfigV1ClientShim) Authentications() configv1.AuthenticationInterface { if c.v1Kinds["Authentication"] { - return c.fakeConfigV1Client.Authentications() + panic(fmt.Errorf("Authentication not implemented")) } - return c.configv1().Authentications() + return c.configv1.Authentications() } func (c *ConfigV1ClientShim) Builds() configv1.BuildInterface { if c.v1Kinds["Build"] { - return c.fakeConfigV1Client.Builds() + panic(fmt.Errorf("Build not implemented")) } - return c.configv1().Builds() + return c.configv1.Builds() } func (c *ConfigV1ClientShim) ClusterOperators() configv1.ClusterOperatorInterface { if c.v1Kinds["ClusterOperator"] { - return c.fakeConfigV1Client.ClusterOperators() + panic(fmt.Errorf("ClusterOperator not implemented")) } - return c.configv1().ClusterOperators() + return c.configv1.ClusterOperators() } func (c *ConfigV1ClientShim) ClusterVersions() configv1.ClusterVersionInterface { if c.v1Kinds["ClusterVersion"] { - return c.fakeConfigV1Client.ClusterVersions() + panic(fmt.Errorf("ClusterVersion not implemented")) } - return c.configv1().ClusterVersions() + return c.configv1.ClusterVersions() } func (c *ConfigV1ClientShim) Consoles() configv1.ConsoleInterface { if c.v1Kinds["Console"] { - return c.fakeConfigV1Client.Consoles() + panic(fmt.Errorf("Console not implemented")) } - return c.configv1().Consoles() + return c.configv1.Consoles() } func (c *ConfigV1ClientShim) DNSes() configv1.DNSInterface { if c.v1Kinds["DNS"] { - return c.fakeConfigV1Client.DNSes() + panic(fmt.Errorf("DNS not implemented")) } - return c.configv1().DNSes() + return c.configv1.DNSes() } func (c *ConfigV1ClientShim) FeatureGates() configv1.FeatureGateInterface { if c.v1Kinds["FeatureGate"] { - return c.fakeConfigV1Client.FeatureGates() + panic(fmt.Errorf("FeatureGate not implemented")) } - return c.configv1().FeatureGates() + return c.configv1.FeatureGates() } func (c *ConfigV1ClientShim) Images() configv1.ImageInterface { if c.v1Kinds["Image"] { - return c.fakeConfigV1Client.Images() + panic(fmt.Errorf("Image not implemented")) } - return c.configv1().Images() + return c.configv1.Images() } func (c *ConfigV1ClientShim) ImageContentPolicies() configv1.ImageContentPolicyInterface { if c.v1Kinds["ImageContentPolicie"] { - return c.fakeConfigV1Client.ImageContentPolicies() + panic(fmt.Errorf("ImageContentPolicie not implemented")) } - return c.configv1().ImageContentPolicies() + return c.configv1.ImageContentPolicies() } func (c *ConfigV1ClientShim) ImageDigestMirrorSets() configv1.ImageDigestMirrorSetInterface { if c.v1Kinds["ImageDigestMirrorSet"] { - return c.fakeConfigV1Client.ImageDigestMirrorSets() + panic(fmt.Errorf("ImageDigestMirrorSet not implemented")) } - return c.configv1().ImageDigestMirrorSets() + return c.configv1.ImageDigestMirrorSets() } func (c *ConfigV1ClientShim) ImageTagMirrorSets() configv1.ImageTagMirrorSetInterface { if c.v1Kinds["ImageTagMirrorSet"] { - return c.fakeConfigV1Client.ImageTagMirrorSets() + panic(fmt.Errorf("ImageTagMirrorSet not implemented")) } - return c.configv1().ImageTagMirrorSets() + return c.configv1.ImageTagMirrorSets() } func (c *ConfigV1ClientShim) Infrastructures() configv1.InfrastructureInterface { - if c.v1Kinds["Infrastructure"] { - return c.fakeConfigV1Client.Infrastructures() + return &ConfigV1InfrastructuresClientShim{ + fakeConfigV1InfrastructuresClient: c.fakeConfigV1Client.Infrastructures(), + configV1InfrastructuresClient: c.configv1.Infrastructures(), } - return c.configv1().Infrastructures() } func (c *ConfigV1ClientShim) Ingresses() configv1.IngressInterface { if c.v1Kinds["Ingresse"] { - return c.fakeConfigV1Client.Ingresses() + panic(fmt.Errorf("Ingresse not implemented")) } - return c.configv1().Ingresses() + return c.configv1.Ingresses() } func (c *ConfigV1ClientShim) Networks() configv1.NetworkInterface { if c.v1Kinds["Network"] { - return c.fakeConfigV1Client.Networks() + panic(fmt.Errorf("Network not implemented")) } - return c.configv1().Networks() + return c.configv1.Networks() } func (c *ConfigV1ClientShim) Nodes() configv1.NodeInterface { if c.v1Kinds["Node"] { - return c.fakeConfigV1Client.Nodes() + panic(fmt.Errorf("Node not implemented")) } - return c.configv1().Nodes() + return c.configv1.Nodes() } func (c *ConfigV1ClientShim) OAuths() configv1.OAuthInterface { if c.v1Kinds["OAuth"] { - return c.fakeConfigV1Client.OAuths() + panic(fmt.Errorf("OAuth not implemented")) } - return c.configv1().OAuths() + return c.configv1.OAuths() } func (c *ConfigV1ClientShim) OperatorHubs() configv1.OperatorHubInterface { if c.v1Kinds["OperatorHub"] { - return c.fakeConfigV1Client.OperatorHubs() + panic(fmt.Errorf("OperatorHub not implemented")) } - return c.configv1().OperatorHubs() + return c.configv1.OperatorHubs() } func (c *ConfigV1ClientShim) Projects() configv1.ProjectInterface { if c.v1Kinds["Project"] { - return c.fakeConfigV1Client.Projects() + panic(fmt.Errorf("Project not implemented")) } - return c.configv1().Projects() + return c.configv1.Projects() } func (c *ConfigV1ClientShim) Proxies() configv1.ProxyInterface { if c.v1Kinds["Proxie"] { - return c.fakeConfigV1Client.Proxies() + panic(fmt.Errorf("Proxie not implemented")) } - return c.configv1().Proxies() + return c.configv1.Proxies() } func (c *ConfigV1ClientShim) Schedulers() configv1.SchedulerInterface { if c.v1Kinds["Scheduler"] { - return c.fakeConfigV1Client.Schedulers() + panic(fmt.Errorf("Scheduler not implemented")) } - return c.configv1().Schedulers() + return c.configv1.Schedulers() } func (c *ConfigV1ClientShim) RESTClient() rest.Interface { - return c.configv1().RESTClient() + return c.configv1.RESTClient() } var _ configv1.ConfigV1Interface = &ConfigV1ClientShim{} -var kind2resourceMapping = map[string]string{ - "APIServer": "apiservers", - "Authentication": "authentications", - "Build": "build", - "ClusterOperator": "clusteroperators", - "ClusterVersion": "clusterversions", - "Console": "consoles", - "DNS": "dnses", - "FeatureGate": "featuregates", - "Image": "images", - "ImageContentPolicie": "imagecontentpolicies", - "ImageDigestMirrorSet": "imagedigestmirrorsetes", - "ImageTagMirrorSet": "imagetagmirrorsetes", - "Infrastructure": "infrastructures", - "Ingresse": "ingresses", - "Network": "networks", - "Node": "nodes", - "OAuth": "oauths", - "OperatorHub": "operatorhub", - "Project": "projects", - "Proxie": "proxies", - "Scheduler": "schedulers", +type ConfigV1InfrastructuresClientShim struct { + fakeConfigV1InfrastructuresClient configv1.InfrastructureInterface + configV1InfrastructuresClient configv1.InfrastructureInterface +} + +var _ configv1.InfrastructureInterface = &ConfigV1InfrastructuresClientShim{} + +func (c *ConfigV1InfrastructuresClientShim) Create(ctx context.Context, infrastructure *apiconfigv1.Infrastructure, opts metav1.CreateOptions) (*apiconfigv1.Infrastructure, error) { + _, err := c.fakeConfigV1InfrastructuresClient.Get(ctx, infrastructure.Name, metav1.GetOptions{}) + if err == nil { + return nil, &OperationNotPermitted{Action: "create"} + } + if apierrors.IsNotFound(err) { + return c.configV1InfrastructuresClient.Create(ctx, infrastructure, opts) + } + return nil, err +} + +func (c *ConfigV1InfrastructuresClientShim) Update(ctx context.Context, infrastructure *apiconfigv1.Infrastructure, opts metav1.UpdateOptions) (*apiconfigv1.Infrastructure, error) { + _, err := c.fakeConfigV1InfrastructuresClient.Get(ctx, infrastructure.Name, metav1.GetOptions{}) + if err == nil { + return nil, &OperationNotPermitted{Action: "update"} + } + if apierrors.IsNotFound(err) { + return c.configV1InfrastructuresClient.Update(ctx, infrastructure, opts) + } + return nil, err +} + +func (c *ConfigV1InfrastructuresClientShim) UpdateStatus(ctx context.Context, infrastructure *apiconfigv1.Infrastructure, opts metav1.UpdateOptions) (*apiconfigv1.Infrastructure, error) { + _, err := c.fakeConfigV1InfrastructuresClient.Get(ctx, infrastructure.Name, metav1.GetOptions{}) + if err == nil { + return nil, &OperationNotPermitted{Action: "updatestatus"} + } + if apierrors.IsNotFound(err) { + return c.configV1InfrastructuresClient.UpdateStatus(ctx, infrastructure, opts) + } + return nil, err +} + +func (c *ConfigV1InfrastructuresClientShim) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.fakeConfigV1InfrastructuresClient.Get(ctx, name, metav1.GetOptions{}) + if err == nil { + return &OperationNotPermitted{Action: "delete"} + } + if apierrors.IsNotFound(err) { + return c.configV1InfrastructuresClient.Delete(ctx, name, opts) + } + return err +} + +func (c *ConfigV1InfrastructuresClientShim) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + list, err := c.fakeConfigV1InfrastructuresClient.List(ctx, listOpts) + if err != nil { + return fmt.Errorf("unable to list objects during DeleteCollection request: %v", err) + } + // if either of the static manifests is expected to be deleted, the whole request is invalid + if len(list.Items) > 0 { + return &OperationNotPermitted{Action: "deletecollection"} + } + return c.configV1InfrastructuresClient.DeleteCollection(ctx, opts, listOpts) +} + +func (c *ConfigV1InfrastructuresClientShim) Get(ctx context.Context, name string, opts metav1.GetOptions) (*apiconfigv1.Infrastructure, error) { + obj, err := c.fakeConfigV1InfrastructuresClient.Get(ctx, name, metav1.GetOptions{}) + if err == nil { + return obj, nil + } + if apierrors.IsNotFound(err) { + return c.configV1InfrastructuresClient.Get(ctx, name, opts) + } + return nil, err +} + +func (c *ConfigV1InfrastructuresClientShim) List(ctx context.Context, opts metav1.ListOptions) (*apiconfigv1.InfrastructureList, error) { + // INFO: field selectors will not work + staticObjList, err := c.fakeConfigV1InfrastructuresClient.List(ctx, opts) + if err != nil { + return nil, err + } + objList, err := c.configV1InfrastructuresClient.List(ctx, opts) + if err != nil { + return nil, err + } + + items := []apiconfigv1.Infrastructure{} + knownKeys := make(map[string]struct{}) + for _, item := range staticObjList.Items { + items = append(items, item) + knownKeys[item.Name] = struct{}{} + } + + for _, item := range objList.Items { + // skip objects with corresponding static manifests + if _, exists := knownKeys[item.Name]; exists { + continue + } + items = append(items, item) + knownKeys[item.Name] = struct{}{} + } + + return &apiconfigv1.InfrastructureList{ + TypeMeta: objList.TypeMeta, + ListMeta: objList.ListMeta, + Items: items, + }, nil +} + +func (c *ConfigV1InfrastructuresClientShim) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + // static manifests do not produce any watch event besides create + // If the object exists, no need to generate the ADDED watch event + + // INFO: field selectors will not work + staticObjList, err := c.fakeConfigV1InfrastructuresClient.List(ctx, opts) + if err != nil { + return nil, err + } + + resourceWatcher, err := c.configV1InfrastructuresClient.Watch(ctx, opts) + if err != nil { + return nil, err + } + + if len(staticObjList.Items) == 0 { + return resourceWatcher, nil + } + + objs := []runtime.Object{} + watcher := NewFakeWatcher() + // Produce ADDED watch event types for the static manifests + for _, item := range staticObjList.Items { + // make shallow copy + obj := item + watcher.Action(watch.Added, &obj) + objs = append(objs, &obj) + } + + go func() { + err := watcher.Follow(resourceWatcher, objs...) + if err != nil { + klog.Errorf("Config shim fake watcher returned prematurely: %v", err) + } + watcher.Stop() + }() + + return watcher, nil +} + +func (c *ConfigV1InfrastructuresClientShim) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*apiconfigv1.Infrastructure, error) { + _, err := c.fakeConfigV1InfrastructuresClient.Get(ctx, name, metav1.GetOptions{}) + if err == nil { + return nil, &OperationNotPermitted{Action: "patch"} + } + if apierrors.IsNotFound(err) { + return c.configV1InfrastructuresClient.Patch(ctx, name, pt, data, opts, subresources...) + } + return nil, err +} + +func (c *ConfigV1InfrastructuresClientShim) Apply(ctx context.Context, infrastructure *applyconfigv1.InfrastructureApplyConfiguration, opts metav1.ApplyOptions) (*apiconfigv1.Infrastructure, error) { + // Unable to determine existence of a static manifest + if infrastructure == nil || infrastructure.Name == nil { + return c.configV1InfrastructuresClient.Apply(ctx, infrastructure, opts) + } + _, err := c.fakeConfigV1InfrastructuresClient.Get(ctx, *infrastructure.Name, metav1.GetOptions{}) + if err == nil { + return nil, &OperationNotPermitted{Action: "apply"} + } + if apierrors.IsNotFound(err) { + return c.configV1InfrastructuresClient.Apply(ctx, infrastructure, opts) + } + return nil, err +} + +func (c *ConfigV1InfrastructuresClientShim) ApplyStatus(ctx context.Context, infrastructure *applyconfigv1.InfrastructureApplyConfiguration, opts metav1.ApplyOptions) (*apiconfigv1.Infrastructure, error) { + // Unable to determine existence of a static manifest + if infrastructure == nil || infrastructure.Name == nil { + return c.configV1InfrastructuresClient.ApplyStatus(ctx, infrastructure, opts) + } + _, err := c.fakeConfigV1InfrastructuresClient.Get(ctx, *infrastructure.Name, metav1.GetOptions{}) + if err == nil { + return nil, &OperationNotPermitted{Action: "applystatus"} + } + if apierrors.IsNotFound(err) { + return c.configV1InfrastructuresClient.ApplyStatus(ctx, infrastructure, opts) + } + return nil, err +} + +const ( + DefaultChanSize = 1000 +) + +// RaceFreeFakeWatcher lets you test anything that consumes a watch.Interface; threadsafe. +type FakeWatcher struct { + result chan watch.Event + Stopped bool + sync.Mutex +} + +var _ watch.Interface = &FakeWatcher{} + +func NewFakeWatcher() *FakeWatcher { + return &FakeWatcher{ + result: make(chan watch.Event, DefaultChanSize), + } +} + +// Stop implements Interface.Stop(). +func (f *FakeWatcher) Stop() { + f.Lock() + defer f.Unlock() + if !f.Stopped { + klog.V(4).Infof("Stopping fake watcher.") + close(f.result) + f.Stopped = true + } +} + +func (f *FakeWatcher) IsStopped() bool { + f.Lock() + defer f.Unlock() + return f.Stopped +} + +// Reset prepares the watcher to be reused. +func (f *FakeWatcher) Reset() { + f.Lock() + defer f.Unlock() + f.Stopped = false + f.result = make(chan watch.Event, DefaultChanSize) +} + +func (f *FakeWatcher) ResultChan() <-chan watch.Event { + f.Lock() + defer f.Unlock() + return f.result +} + +type itemKey struct { + namespace, name string +} + +func (f *FakeWatcher) Follow(inputChan watch.Interface, ignoreList ...runtime.Object) error { + ignore := make(map[itemKey]struct{}) + + for _, item := range ignoreList { + accessor, err := apimeta.Accessor(item) + if err != nil { + return fmt.Errorf("unable to construct meta accessor: %v", err) + } + ignore[itemKey{namespace: accessor.GetNamespace(), name: accessor.GetName()}] = struct{}{} + } + + for { + if !f.Stopped { + select { + case item := <-inputChan.ResultChan(): + accessor, err := apimeta.Accessor(item.Object) + if err != nil { + return fmt.Errorf("unable to construct meta accessor: %v", err) + } + if _, exists := ignore[itemKey{namespace: accessor.GetNamespace(), name: accessor.GetName()}]; exists { + continue + } + klog.V(4).Infof("FakeWatcher.Follow: item.Type: %v, item.Object: %v\n", item.Type, item.Object) + f.Action(item.Type, item.Object) + } + } + } +} + +// Action sends an event of the requested type, for table-based testing. +func (f *FakeWatcher) Action(action watch.EventType, obj runtime.Object) { + f.Lock() + defer f.Unlock() + if !f.Stopped { + select { + case f.result <- watch.Event{action, obj}: + return + default: + panic(fmt.Errorf("channel full")) + } + } } type OperationNotPermitted struct { @@ -234,9 +506,9 @@ func (e OperationNotPermitted) Error() string { } func NewConfigClientShim( - adminConfig *rest.Config, + configClient configv1client.Interface, objects []runtime.Object, -) (error, *ConfigClientShim) { +) *ConfigClientShim { fakeClient := fakeconfigv1client.NewSimpleClientset(objects...) v1Kinds := make(map[string]bool) @@ -245,27 +517,17 @@ func NewConfigClientShim( objectKind := object.GetObjectKind().GroupVersionKind() // currently supportig only config.openshift.io/v1 apiversion if objectKind.Group != "config.openshift.io" { - return fmt.Errorf("unknown group: %v", objectKind.Group), nil + continue } if objectKind.Version != "v1" { - return fmt.Errorf("unknown version: %v", objectKind.Version), nil - } - resource, exists := kind2resourceMapping[objectKind.Kind] - if !exists { - return fmt.Errorf("unknown kind mapping for %v", objectKind.Kind), nil + continue } - fakeClient.Fake.PrependReactor("update", resource, func(action clienttesting.Action) (bool, runtime.Object, error) { - return true, nil, &OperationNotPermitted{Action: "update"} - }) - fakeClient.Fake.PrependReactor("delete", resource, func(action clienttesting.Action) (bool, runtime.Object, error) { - return true, nil, &OperationNotPermitted{Action: "delete"} - }) v1Kinds[objectKind.Kind] = true } - return nil, &ConfigClientShim{ - adminConfig: adminConfig, - v1Kinds: v1Kinds, - fakeClient: fakeClient, + return &ConfigClientShim{ + configClient: configClient, + v1Kinds: v1Kinds, + fakeClient: fakeClient, } } diff --git a/test/extended/util/configv1shim_test.go b/test/extended/util/configv1shim_test.go index 60ede083197c..42e90a41b9e1 100644 --- a/test/extended/util/configv1shim_test.go +++ b/test/extended/util/configv1shim_test.go @@ -2,21 +2,31 @@ package util import ( "context" + "encoding/json" "testing" + "time" + "github.com/google/go-cmp/cmp" configv1 "github.com/openshift/api/config/v1" + applyconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" + fakeconfigv1client "github.com/openshift/client-go/config/clientset/versioned/fake" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/watch" ) -func TestConfigClientShimErrorOnMutation(t *testing.T) { - object := &configv1.Infrastructure{ +func createInfrastructureObject(name string) *configv1.Infrastructure { + return &configv1.Infrastructure{ TypeMeta: metav1.TypeMeta{ APIVersion: "config.openshift.io/v1", Kind: "Infrastructure", }, ObjectMeta: metav1.ObjectMeta{ - Name: "cluster", + Name: name, + Labels: map[string]string{}, }, Spec: configv1.InfrastructureSpec{ PlatformSpec: configv1.PlatformSpec{ @@ -39,25 +49,228 @@ func TestConfigClientShimErrorOnMutation(t *testing.T) { }, }, } +} + +func TestConfigClientShimErrorOnMutation(t *testing.T) { + updateNotPermitted := OperationNotPermitted{Action: "update"} + updatestatusNotPermitted := OperationNotPermitted{Action: "updatestatus"} + patchNotPermitted := OperationNotPermitted{Action: "patch"} + applyNotPermitted := OperationNotPermitted{Action: "apply"} + applyStatusNotPermitted := OperationNotPermitted{Action: "applystatus"} + deleteNotPermitted := OperationNotPermitted{Action: "delete"} + deleteCollectionNotPermitted := OperationNotPermitted{Action: "deletecollection"} + + object := createInfrastructureObject("cluster") + object.Labels["deleteLabel"] = "somevalue" + object2 := createInfrastructureObject("cluster2") + object2.Labels["deleteLabel"] = "somevalue2" - err, client := NewConfigClientShim( - nil, + configClient := fakeconfigv1client.NewSimpleClientset( + object2, + ) + + client := NewConfigClientShim( + configClient, []runtime.Object{object}, ) + + _, err := client.ConfigV1().Infrastructures().Get(context.TODO(), object.Name, metav1.GetOptions{}) if err != nil { - t.Fatal(err) + t.Fatalf("Expected no error for a Get request, got %q instead", err) } - updateNotPermitted := OperationNotPermitted{Action: "update"} - deleteNotPermitted := OperationNotPermitted{Action: "delete"} + _, err = client.ConfigV1().Infrastructures().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + t.Fatalf("Expected no error for a List request, got %q instead", err) + } _, err = client.ConfigV1().Infrastructures().Update(context.TODO(), object, metav1.UpdateOptions{}) if err == nil || err.Error() != updateNotPermitted.Error() { - t.Fatalf("Expected %q error, got %q instead", updateNotPermitted.Error(), err) + t.Fatalf("Expected %q error for an Update request, got %q instead", updateNotPermitted.Error(), err) + } + + _, err = client.ConfigV1().Infrastructures().Update(context.TODO(), object2, metav1.UpdateOptions{}) + if err != nil { + t.Fatalf("Expected no error for an Update request, got %q instead", err) + } + + _, err = client.ConfigV1().Infrastructures().UpdateStatus(context.TODO(), object, metav1.UpdateOptions{}) + if err == nil || err.Error() != updatestatusNotPermitted.Error() { + t.Fatalf("Expected %q error for an UpdateStatus request, got %q instead", updatestatusNotPermitted.Error(), err) + } + + _, err = client.ConfigV1().Infrastructures().UpdateStatus(context.TODO(), object2, metav1.UpdateOptions{}) + if err != nil { + t.Fatalf("Expected no error for an UpdateStatus request, got %q instead", err) + } + + oldData, err := json.Marshal(object) + if err != nil { + t.Fatalf("Unable to marshal an object: %v", err) + } + + object.Labels["key"] = "value" + newData, err := json.Marshal(object) + if err != nil { + t.Fatalf("Unable to marshal an object: %v", err) + } + delete(object.Labels, "key") + + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, &configv1.Infrastructure{}) + if err != nil { + t.Fatalf("Unable to create a patch: %v", err) + } + + _, err = client.ConfigV1().Infrastructures().Patch(context.TODO(), object.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + if err == nil || err.Error() != patchNotPermitted.Error() { + t.Fatalf("Expected %q error for a Patch request, got %q instead", patchNotPermitted.Error(), err) + } + + _, err = client.ConfigV1().Infrastructures().Patch(context.TODO(), object2.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + if err != nil { + t.Fatalf("Expected no error for a Patch request, got %q instead", err) + } + + applyConfig, err := applyconfigv1.ExtractInfrastructure(object, "test-mgr") + if err != nil { + t.Fatalf("Unable to construct an apply config for %v: %v", object.Name, err) + } + _, err = client.ConfigV1().Infrastructures().Apply(context.TODO(), applyConfig, metav1.ApplyOptions{FieldManager: "test-mgr", Force: true}) + if err == nil || err.Error() != applyNotPermitted.Error() { + t.Fatalf("Expected %q error for an Apply request, got %q instead", applyNotPermitted.Error(), err) + } + + applyConfig2, err := applyconfigv1.ExtractInfrastructure(object2, "test-mgr") + if err != nil { + t.Fatalf("Unable to construct an apply config for %v: %v", object2.Name, err) + } + _, err = client.ConfigV1().Infrastructures().Apply(context.TODO(), applyConfig2, metav1.ApplyOptions{FieldManager: "test-mgr", Force: true}) + if err != nil { + t.Fatalf("Expected no error for an Apply request, got %q instead", err) + } + + applyStatusConfig, err := applyconfigv1.ExtractInfrastructureStatus(object, "test-mgr") + if err != nil { + t.Fatalf("Unable to construct an apply status config for %v: %v", object.Name, err) + } + _, err = client.ConfigV1().Infrastructures().ApplyStatus(context.TODO(), applyStatusConfig, metav1.ApplyOptions{FieldManager: "test-mgr", Force: true}) + if err == nil || err.Error() != applyStatusNotPermitted.Error() { + t.Fatalf("Expected %q error for an ApplyStatus request, got %q instead", applyStatusNotPermitted.Error(), err) + } + + applyStatusConfig2, err := applyconfigv1.ExtractInfrastructureStatus(object2, "test-mgr") + if err != nil { + t.Fatalf("Unable to construct an apply status config for %v: %v", object2.Name, err) + } + _, err = client.ConfigV1().Infrastructures().ApplyStatus(context.TODO(), applyStatusConfig2, metav1.ApplyOptions{FieldManager: "test-mgr", Force: true}) + if err != nil { + t.Fatalf("Expected no error for an ApplyStatus request, got %q instead", err) } err = client.ConfigV1().Infrastructures().Delete(context.TODO(), object.Name, metav1.DeleteOptions{}) if err == nil || err.Error() != deleteNotPermitted.Error() { - t.Fatalf("Expected %q error, got %q instead", deleteNotPermitted.Error(), err) + t.Fatalf("Expected %q error for a Delete request, got %q instead", deleteNotPermitted.Error(), err) + } + + err = client.ConfigV1().Infrastructures().Delete(context.TODO(), object2.Name, metav1.DeleteOptions{}) + if err != nil { + t.Fatalf("Expected no error for a Delete request, got %q instead", err) + } + + err = client.ConfigV1().Infrastructures().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{ + LabelSelector: labels.SelectorFromSet(labels.Set(map[string]string{"deleteLabel": "somevalue"})).String(), + }) + if err == nil || err.Error() != deleteCollectionNotPermitted.Error() { + t.Fatalf("Expected %q error for a DeleteCollection request, got %q instead", deleteCollectionNotPermitted.Error(), err) + } + + err = client.ConfigV1().Infrastructures().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{ + LabelSelector: labels.SelectorFromSet(labels.Set(map[string]string{"deleteLabel": "somevalue2"})).String(), + }) + if err != nil { + t.Fatalf("Expected no error for a DeleteCollection request, got %q instead", err) + } +} + +func TestConfigClientShimWatchRequest(t *testing.T) { + tests := []struct { + name string + staticObjects []runtime.Object + realObjects []runtime.Object + expectedWatchEvents []watch.Event + }{ + { + name: "merging static and real objects, not object override", + staticObjects: []runtime.Object{ + createInfrastructureObject("cluster"), + }, + realObjects: []runtime.Object{ + createInfrastructureObject("cluster2"), + createInfrastructureObject("cluster3"), + }, + expectedWatchEvents: []watch.Event{ + {Type: watch.Added, Object: createInfrastructureObject("cluster")}, + {Type: watch.Added, Object: createInfrastructureObject("cluster2")}, + {Type: watch.Added, Object: createInfrastructureObject("cluster3")}, + }, + }, + { + name: "merging static and real objects, static object override", + staticObjects: []runtime.Object{ + createInfrastructureObject("cluster"), + }, + realObjects: []runtime.Object{ + createInfrastructureObject("cluster"), + createInfrastructureObject("cluster2"), + }, + expectedWatchEvents: []watch.Event{ + {Type: watch.Added, Object: createInfrastructureObject("cluster")}, + {Type: watch.Added, Object: createInfrastructureObject("cluster2")}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + configClient := fakeconfigv1client.NewSimpleClientset() + + client := NewConfigClientShim( + configClient, + test.staticObjects, + ) + + // The watch request has to created first when using the fake clientset + resultChan, err := client.ConfigV1().Infrastructures().Watch(context.TODO(), metav1.ListOptions{}) + if err != nil { + t.Fatalf("Expected no error, got %q instead", err) + } + defer resultChan.Stop() + + // And only then the fake clientset can be populated to generate the watch event + for _, obj := range test.realObjects { + configClient.Tracker().Add(obj) + } + + // verify the watch events + ticker := time.NewTicker(500 * time.Millisecond) + size := len(test.expectedWatchEvents) + eventCounter := 0 + for i := 0; i < size; i++ { + select { + case item := <-resultChan.ResultChan(): + diff := cmp.Diff(test.expectedWatchEvents[i], item) + if diff != "" { + t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", test.name, diff) + } + eventCounter++ + case <-ticker.C: + t.Errorf("failed waiting for watch event") + } + } + if eventCounter < size { + t.Errorf("Expected %v watch events, got %v instead", eventCounter, size) + } + + }) } } From dba439bd9bed355caee54588ba6cd1de1bd7df53 Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Thu, 2 Mar 2023 20:20:52 +0100 Subject: [PATCH 03/15] Add config.openshift.io/v1 Networks client shim --- test/extended/util/configv1shim.go | 195 ++++++++++++++++++++++++++++- 1 file changed, 192 insertions(+), 3 deletions(-) diff --git a/test/extended/util/configv1shim.go b/test/extended/util/configv1shim.go index b180cb12dd73..f71a5d8e90a4 100644 --- a/test/extended/util/configv1shim.go +++ b/test/extended/util/configv1shim.go @@ -157,10 +157,10 @@ func (c *ConfigV1ClientShim) Ingresses() configv1.IngressInterface { } func (c *ConfigV1ClientShim) Networks() configv1.NetworkInterface { - if c.v1Kinds["Network"] { - panic(fmt.Errorf("Network not implemented")) + return &ConfigV1NetworksClientShim{ + fakeConfigV1NetworksClient: c.fakeConfigV1Client.Networks(), + configV1NetworksClient: c.configv1.Networks(), } - return c.configv1.Networks() } func (c *ConfigV1ClientShim) Nodes() configv1.NodeInterface { @@ -400,6 +400,195 @@ func (c *ConfigV1InfrastructuresClientShim) ApplyStatus(ctx context.Context, inf return nil, err } +type ConfigV1NetworksClientShim struct { + fakeConfigV1NetworksClient configv1.NetworkInterface + configV1NetworksClient configv1.NetworkInterface +} + +var _ configv1.NetworkInterface = &ConfigV1NetworksClientShim{} + +func (c *ConfigV1NetworksClientShim) Create(ctx context.Context, infrastructure *apiconfigv1.Network, opts metav1.CreateOptions) (*apiconfigv1.Network, error) { + _, err := c.fakeConfigV1NetworksClient.Get(ctx, infrastructure.Name, metav1.GetOptions{}) + if err == nil { + return nil, &OperationNotPermitted{Action: "create"} + } + if apierrors.IsNotFound(err) { + return c.configV1NetworksClient.Create(ctx, infrastructure, opts) + } + return nil, err +} + +func (c *ConfigV1NetworksClientShim) Update(ctx context.Context, infrastructure *apiconfigv1.Network, opts metav1.UpdateOptions) (*apiconfigv1.Network, error) { + _, err := c.fakeConfigV1NetworksClient.Get(ctx, infrastructure.Name, metav1.GetOptions{}) + if err == nil { + return nil, &OperationNotPermitted{Action: "update"} + } + if apierrors.IsNotFound(err) { + return c.configV1NetworksClient.Update(ctx, infrastructure, opts) + } + return nil, err +} + +func (c *ConfigV1NetworksClientShim) UpdateStatus(ctx context.Context, infrastructure *apiconfigv1.Network, opts metav1.UpdateOptions) (*apiconfigv1.Network, error) { + _, err := c.fakeConfigV1NetworksClient.Get(ctx, infrastructure.Name, metav1.GetOptions{}) + if err == nil { + return nil, &OperationNotPermitted{Action: "updatestatus"} + } + if apierrors.IsNotFound(err) { + return c.configV1NetworksClient.UpdateStatus(ctx, infrastructure, opts) + } + return nil, err +} + +func (c *ConfigV1NetworksClientShim) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.fakeConfigV1NetworksClient.Get(ctx, name, metav1.GetOptions{}) + if err == nil { + return &OperationNotPermitted{Action: "delete"} + } + if apierrors.IsNotFound(err) { + return c.configV1NetworksClient.Delete(ctx, name, opts) + } + return err +} + +func (c *ConfigV1NetworksClientShim) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + list, err := c.fakeConfigV1NetworksClient.List(ctx, listOpts) + if err != nil { + return fmt.Errorf("unable to list objects during DeleteCollection request: %v", err) + } + // if either of the static manifests is expected to be deleted, the whole request is invalid + if len(list.Items) > 0 { + return &OperationNotPermitted{Action: "deletecollection"} + } + return c.configV1NetworksClient.DeleteCollection(ctx, opts, listOpts) +} + +func (c *ConfigV1NetworksClientShim) Get(ctx context.Context, name string, opts metav1.GetOptions) (*apiconfigv1.Network, error) { + obj, err := c.fakeConfigV1NetworksClient.Get(ctx, name, metav1.GetOptions{}) + if err == nil { + return obj, nil + } + if apierrors.IsNotFound(err) { + return c.configV1NetworksClient.Get(ctx, name, opts) + } + return nil, err +} + +func (c *ConfigV1NetworksClientShim) List(ctx context.Context, opts metav1.ListOptions) (*apiconfigv1.NetworkList, error) { + // INFO: field selectors will not work + staticObjList, err := c.fakeConfigV1NetworksClient.List(ctx, opts) + if err != nil { + return nil, err + } + objList, err := c.configV1NetworksClient.List(ctx, opts) + if err != nil { + return nil, err + } + + items := []apiconfigv1.Network{} + knownKeys := make(map[string]struct{}) + for _, item := range staticObjList.Items { + items = append(items, item) + knownKeys[item.Name] = struct{}{} + } + + for _, item := range objList.Items { + // skip objects with corresponding static manifests + if _, exists := knownKeys[item.Name]; exists { + continue + } + items = append(items, item) + knownKeys[item.Name] = struct{}{} + } + + return &apiconfigv1.NetworkList{ + TypeMeta: objList.TypeMeta, + ListMeta: objList.ListMeta, + Items: items, + }, nil +} + +func (c *ConfigV1NetworksClientShim) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + // static manifests do not produce any watch event besides create + // If the object exists, no need to generate the ADDED watch event + + // INFO: field selectors will not work + staticObjList, err := c.fakeConfigV1NetworksClient.List(ctx, opts) + if err != nil { + return nil, err + } + + resourceWatcher, err := c.configV1NetworksClient.Watch(ctx, opts) + if err != nil { + return nil, err + } + + if len(staticObjList.Items) == 0 { + return resourceWatcher, nil + } + + objs := []runtime.Object{} + watcher := NewFakeWatcher() + // Produce ADDED watch event types for the static manifests + for _, item := range staticObjList.Items { + // make shallow copy + obj := item + watcher.Action(watch.Added, &obj) + objs = append(objs, &obj) + } + + go func() { + err := watcher.Follow(resourceWatcher, objs...) + if err != nil { + klog.Errorf("Config shim fake watcher returned prematurely: %v", err) + } + watcher.Stop() + }() + + return watcher, nil +} + +func (c *ConfigV1NetworksClientShim) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*apiconfigv1.Network, error) { + _, err := c.fakeConfigV1NetworksClient.Get(ctx, name, metav1.GetOptions{}) + if err == nil { + return nil, &OperationNotPermitted{Action: "patch"} + } + if apierrors.IsNotFound(err) { + return c.configV1NetworksClient.Patch(ctx, name, pt, data, opts, subresources...) + } + return nil, err +} + +func (c *ConfigV1NetworksClientShim) Apply(ctx context.Context, infrastructure *applyconfigv1.NetworkApplyConfiguration, opts metav1.ApplyOptions) (*apiconfigv1.Network, error) { + // Unable to determine existence of a static manifest + if infrastructure == nil || infrastructure.Name == nil { + return c.configV1NetworksClient.Apply(ctx, infrastructure, opts) + } + _, err := c.fakeConfigV1NetworksClient.Get(ctx, *infrastructure.Name, metav1.GetOptions{}) + if err == nil { + return nil, &OperationNotPermitted{Action: "apply"} + } + if apierrors.IsNotFound(err) { + return c.configV1NetworksClient.Apply(ctx, infrastructure, opts) + } + return nil, err +} + +func (c *ConfigV1NetworksClientShim) ApplyStatus(ctx context.Context, infrastructure *applyconfigv1.NetworkApplyConfiguration, opts metav1.ApplyOptions) (*apiconfigv1.Network, error) { + // Unable to determine existence of a static manifest + if infrastructure == nil || infrastructure.Name == nil { + return c.configV1NetworksClient.ApplyStatus(ctx, infrastructure, opts) + } + _, err := c.fakeConfigV1NetworksClient.Get(ctx, *infrastructure.Name, metav1.GetOptions{}) + if err == nil { + return nil, &OperationNotPermitted{Action: "applystatus"} + } + if apierrors.IsNotFound(err) { + return c.configV1NetworksClient.ApplyStatus(ctx, infrastructure, opts) + } + return nil, err +} + const ( DefaultChanSize = 1000 ) From a01eeebd94f57da57e9588fbc5b17286cb32ba9f Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Fri, 3 Mar 2023 11:11:52 +0100 Subject: [PATCH 04/15] Collect static manifests only when the manifest directory is not empty --- test/extended/util/client.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/test/extended/util/client.go b/test/extended/util/client.go index d5ef8271ac97..86882bda3384 100644 --- a/test/extended/util/client.go +++ b/test/extended/util/client.go @@ -574,9 +574,11 @@ func (c *CLI) RESTMapper() meta.RESTMapper { } func (c *CLI) setupStaticConfigsFromManifests() { - err, objects := collectConfigManifestsFromDir(c.staticConfigManifestDir) - o.Expect(err).ToNot(o.HaveOccurred()) - c.configObjects = objects + if len(c.staticConfigManifestDir) > 0 { + err, objects := collectConfigManifestsFromDir(c.staticConfigManifestDir) + o.Expect(err).ToNot(o.HaveOccurred()) + c.configObjects = objects + } } func (c *CLI) AppsClient() appsv1client.Interface { From a79aa621f0ee0f702e219998d679a8d56a739c05 Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Fri, 3 Mar 2023 12:34:19 +0100 Subject: [PATCH 05/15] Make make verify happy --- .../generated/zz_generated.annotations.go | 1652 +++++++++++++++-- test/extended/util/configv1shim.go | 2 +- 2 files changed, 1543 insertions(+), 111 deletions(-) diff --git a/test/extended/util/annotate/generated/zz_generated.annotations.go b/test/extended/util/annotate/generated/zz_generated.annotations.go index 649208ab423b..4d605d4205e2 100644 --- a/test/extended/util/annotate/generated/zz_generated.annotations.go +++ b/test/extended/util/annotate/generated/zz_generated.annotations.go @@ -7,12 +7,28 @@ import ( ) var Annotations = map[string]string{ + "[Conformance][sig-api-machinery][Feature:APIServer] local kubeconfig \"lb-ext.kubeconfig\" should be present on all masters and work": " [Suite:openshift/conformance/parallel/minimal]", + + "[Conformance][sig-api-machinery][Feature:APIServer] local kubeconfig \"lb-int.kubeconfig\" should be present on all masters and work": " [Suite:openshift/conformance/parallel/minimal]", + + "[Conformance][sig-api-machinery][Feature:APIServer] local kubeconfig \"localhost-recovery.kubeconfig\" should be present on all masters and work": " [Suite:openshift/conformance/parallel/minimal]", + + "[Conformance][sig-api-machinery][Feature:APIServer] local kubeconfig \"localhost.kubeconfig\" should be present on all masters and work": " [Suite:openshift/conformance/parallel/minimal]", + + "[Conformance][sig-sno][Serial] Cluster should allow a fast rollout of kube-apiserver with no pods restarts during API disruption [apigroup:config.openshift.io][apigroup:operator.openshift.io]": " [Suite:openshift/conformance/serial/minimal]", + + "[Serial] [sig-auth][Feature:OAuthServer] [RequestHeaders] [IdP] test RequestHeaders IdP [apigroup:config.openshift.io][apigroup:user.openshift.io][apigroup:apps.openshift.io]": " [Suite:openshift/conformance/serial]", + + "[sig-api-machinery] API data in etcd should be stored at the correct location and version for all resources [Serial]": " [Suite:openshift/conformance/serial]", + "[sig-api-machinery] API priority and fairness should ensure that requests can be classified by adding FlowSchema and PriorityLevelConfiguration": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-api-machinery] API priority and fairness should ensure that requests can't be drowned out (fairness)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-api-machinery] API priority and fairness should ensure that requests can't be drowned out (priority)": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-api-machinery] APIServer CR fields validation additionalCORSAllowedOrigins [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] listing mutating webhooks should work [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", "[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] listing validating webhooks should work [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", @@ -259,6 +275,62 @@ var Annotations = map[string]string{ "[sig-api-machinery] server version should find the server version [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "[sig-api-machinery][Feature:APIServer] TestTLSDefaults": " [Suite:openshift/conformance/parallel]", + + "[sig-api-machinery][Feature:APIServer] anonymous browsers should get a 403 from /": " [Suite:openshift/conformance/parallel]", + + "[sig-api-machinery][Feature:APIServer] authenticated browser should get a 200 from /": " [Suite:openshift/conformance/parallel]", + + "[sig-api-machinery][Feature:APIServer][Late] API LBs follow /readyz of kube-apiserver and don't send request early": " [Suite:openshift/conformance/parallel]", + + "[sig-api-machinery][Feature:APIServer][Late] API LBs follow /readyz of kube-apiserver and stop sending requests before server shutdowns for external clients": " [Suite:openshift/conformance/parallel]", + + "[sig-api-machinery][Feature:APIServer][Late] API LBs follow /readyz of kube-apiserver and stop sending requests": " [Suite:openshift/conformance/parallel]", + + "[sig-api-machinery][Feature:APIServer][Late] kube-apiserver terminates within graceful termination period": " [Suite:openshift/conformance/parallel]", + + "[sig-api-machinery][Feature:APIServer][Late] kubelet terminates kube-apiserver gracefully": " [Suite:openshift/conformance/parallel]", + + "[sig-api-machinery][Feature:Audit] Basic audit should audit API calls": " [Disabled:SpecialConfig]", + + "[sig-api-machinery][Feature:ClusterResourceQuota] Cluster resource quota should control resource limits across namespaces [apigroup:quota.openshift.io][apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-api-machinery][Feature:ResourceQuota] Object count should properly count the number of imagestreams resources [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for apps.openshift.io/v1, Resource=deploymentconfigs [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for build.openshift.io/v1, Resource=buildconfigs [apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for build.openshift.io/v1, Resource=builds [apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for image.openshift.io/v1, Resource=images [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for image.openshift.io/v1, Resource=imagestreams [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for oauth.openshift.io/v1, Resource=oauthaccesstokens [apigroup:oauth.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for oauth.openshift.io/v1, Resource=oauthauthorizetokens [apigroup:oauth.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for oauth.openshift.io/v1, Resource=oauthclientauthorizations [apigroup:oauth.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for oauth.openshift.io/v1, Resource=oauthclients [apigroup:oauth.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for route.openshift.io/v1, Resource=routes [apigroup:route.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for security.openshift.io/v1, Resource=rangeallocations [apigroup:security.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for template.openshift.io/v1, Resource=brokertemplateinstances [apigroup:template.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for template.openshift.io/v1, Resource=templateinstances [apigroup:template.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for template.openshift.io/v1, Resource=templates [apigroup:template.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for user.openshift.io/v1, Resource=groups [apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for user.openshift.io/v1, Resource=identities [apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-api-machinery][Feature:ServerSideApply] Server-Side Apply should work for user.openshift.io/v1, Resource=users [apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-apps] ControllerRevision [Serial] should manage the lifecycle of a ControllerRevision [Conformance]": " [Suite:openshift/conformance/serial/minimal] [Suite:k8s]", "[sig-apps] CronJob should be able to schedule after more than 100 missed schedule": " [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -475,6 +547,132 @@ var Annotations = map[string]string{ "[sig-apps] TTLAfterFinished job should be deleted once it finishes after TTL seconds": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-apps][Feature:DeploymentConfig] deploymentconfigs adoption will orphan all RCs and adopt them back when recreated [apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:DeploymentConfig] deploymentconfigs generation should deploy based on a status version bump [apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:DeploymentConfig] deploymentconfigs ignores deployer and lets the config with a NewReplicationControllerCreated reason should let the deployment config with a NewReplicationControllerCreated reason [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:DeploymentConfig] deploymentconfigs initially should not deploy if pods never transition to ready [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:DeploymentConfig] deploymentconfigs keep the deployer pod invariant valid should deal with cancellation after deployer pod succeeded [apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:DeploymentConfig] deploymentconfigs keep the deployer pod invariant valid should deal with cancellation of running deployment [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:DeploymentConfig] deploymentconfigs keep the deployer pod invariant valid should deal with config change in case the deployment is still running [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:DeploymentConfig] deploymentconfigs paused should disable actions on deployments [apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:DeploymentConfig] deploymentconfigs reaper [Slow] should delete all failed deployer pods and hook pods [apigroup:apps.openshift.io]": "", + + "[sig-apps][Feature:DeploymentConfig] deploymentconfigs rolled back should rollback to an older deployment [apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:DeploymentConfig] deploymentconfigs should adhere to Three Laws of Controllers [apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:DeploymentConfig] deploymentconfigs should respect image stream tag reference policy resolve the image pull spec [apigroup:apps.openshift.io][apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:DeploymentConfig] deploymentconfigs viewing rollout history should print the rollout history [apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:DeploymentConfig] deploymentconfigs when changing image change trigger should successfully trigger from an updated image [apigroup:apps.openshift.io][apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:DeploymentConfig] deploymentconfigs when run iteratively should immediately start a new deployment [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:DeploymentConfig] deploymentconfigs when run iteratively should only deploy the last deployment [apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:DeploymentConfig] deploymentconfigs when tagging images should successfully tag the deployed image [apigroup:apps.openshift.io][apigroup:authorization.openshift.io][apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:DeploymentConfig] deploymentconfigs with custom deployments should run the custom deployment steps [apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:DeploymentConfig] deploymentconfigs with enhanced status should include various info in status [apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:DeploymentConfig] deploymentconfigs with env in params referencing the configmap should expand the config map key to a value [apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:DeploymentConfig] deploymentconfigs with failing hook should get all logs from retried hooks [apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:DeploymentConfig] deploymentconfigs with minimum ready seconds set should not transition the deployment to Complete before satisfied [apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:DeploymentConfig] deploymentconfigs with multiple image change triggers should run a successful deployment with a trigger used by different containers [apigroup:apps.openshift.io][apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:DeploymentConfig] deploymentconfigs with multiple image change triggers should run a successful deployment with multiple triggers [apigroup:apps.openshift.io][apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:DeploymentConfig] deploymentconfigs with revision history limits should never persist more old deployments than acceptable after being observed by the controller [apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:DeploymentConfig] deploymentconfigs with test deployments should run a deployment to completion and then scale to zero [apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:DeploymentConfig] deploymentconfigs won't deploy RC with unresolved images when patched with empty image [apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:Jobs] Users should be able to create and run a job in a user project": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:OpenShiftControllerManager] TestDeployScale [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:OpenShiftControllerManager] TestDeploymentConfigDefaults [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:OpenShiftControllerManager] TestTriggers_MultipleICTs [apigroup:apps.openshift.io][apigroup:images.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:OpenShiftControllerManager] TestTriggers_configChange [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:OpenShiftControllerManager] TestTriggers_imageChange [apigroup:apps.openshift.io][apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:OpenShiftControllerManager] TestTriggers_imageChange_nonAutomatic [apigroup:image.openshift.io][apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-apps][Feature:OpenShiftControllerManager] TestTriggers_manual [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-arch] Cluster topology single node tests Verify that OpenShift components deploy one replica in SingleReplica topology mode": " [Suite:openshift/conformance/parallel]", + + "[sig-arch] ClusterOperators [apigroup:config.openshift.io] should define at least one namespace in their lists of related objects": " [Suite:openshift/conformance/parallel]", + + "[sig-arch] ClusterOperators [apigroup:config.openshift.io] should define at least one related object that is not a namespace": " [Suite:openshift/conformance/parallel]", + + "[sig-arch] ClusterOperators [apigroup:config.openshift.io] should define valid related objects": " [Suite:openshift/conformance/parallel]", + + "[sig-arch] Managed cluster should ensure control plane operators do not make themselves unevictable": " [Suite:openshift/conformance/parallel]", + + "[sig-arch] Managed cluster should ensure control plane pods do not run in best-effort QoS": " [Suite:openshift/conformance/parallel]", + + "[sig-arch] Managed cluster should ensure platform components have system-* priority class associated": " [Suite:openshift/conformance/parallel]", + + "[sig-arch] Managed cluster should ensure pods use downstream images from our release image with proper ImagePullPolicy [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-arch] Managed cluster should expose cluster services outside the cluster [apigroup:route.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-arch] Managed cluster should have operators on the cluster version [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-arch] Managed cluster should only include cluster daemonsets that have maxUnavailable update of 10 or 33 percent": " [Suite:openshift/conformance/parallel]", + + "[sig-arch] Managed cluster should recover when operator-owned objects are deleted [Disruptive][apigroup:config.openshift.io]": " [Serial]", + + "[sig-arch] Managed cluster should set requests but not limits": " [Suite:openshift/conformance/parallel]", + + "[sig-arch] [Conformance] FIPS TestFIPS": " [Suite:openshift/conformance/parallel/minimal]", + + "[sig-arch] [Conformance] sysctl pod should not start for sysctl not on whitelist kernel.msgmax": " [Suite:openshift/conformance/parallel/minimal]", + + "[sig-arch] [Conformance] sysctl pod should not start for sysctl not on whitelist net.ipv4.ip_dynaddr": " [Suite:openshift/conformance/parallel/minimal]", + + "[sig-arch] [Conformance] sysctl whitelists kernel.shm_rmid_forced": " [Suite:openshift/conformance/parallel/minimal]", + + "[sig-arch] [Conformance] sysctl whitelists net.ipv4.ip_local_port_range": " [Suite:openshift/conformance/parallel/minimal]", + + "[sig-arch] [Conformance] sysctl whitelists net.ipv4.ip_unprivileged_port_start": " [Suite:openshift/conformance/parallel/minimal]", + + "[sig-arch] [Conformance] sysctl whitelists net.ipv4.ping_group_range": " [Suite:openshift/conformance/parallel/minimal]", + + "[sig-arch] [Conformance] sysctl whitelists net.ipv4.tcp_syncookies": " [Suite:openshift/conformance/parallel/minimal]", + + "[sig-arch] ocp payload should be based on existing source OLM version should contain the source commit id": " [Suite:openshift/conformance/parallel]", + + "[sig-arch][Early] CRDs for openshift.io should have a status in the CRD schema": " [Suite:openshift/conformance/parallel]", + + "[sig-arch][Early] CRDs for openshift.io should have subresource.status": " [Suite:openshift/conformance/parallel]", + + "[sig-arch][Early] Managed cluster should [apigroup:config.openshift.io] start all core operators": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-arch][Feature:ClusterUpgrade] Cluster should remain functional during upgrade [Disruptive]": " [Serial]", + + "[sig-arch][Late] clients should not use APIs that are removed in upcoming releases [apigroup:apiserver.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-arch][Late] operators should not create watch channels very often [apigroup:apiserver.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-auth] Certificates API [Privileged:ClusterAdmin] should support CSR API operations [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", "[sig-auth] Certificates API [Privileged:ClusterAdmin] should support building a client with a CSR": " [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -519,6 +717,138 @@ var Annotations = map[string]string{ "[sig-auth] [Feature:NodeAuthorizer] Getting an existing secret should exit with the Forbidden error": " [Skipped:ibmroks] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-auth][Feature:Authentication] TestFrontProxy should succeed": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:BootstrapUser] The bootstrap user should successfully login with password decoded from kubeadmin secret [Disruptive]": " [Serial]", + + "[sig-auth][Feature:HTPasswdAuth] HTPasswd IDP should successfully configure htpasswd and be responsive [apigroup:user.openshift.io][apigroup:route.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:LDAP] LDAP IDP should authenticate against an ldap server [apigroup:user.openshift.io][apigroup:route.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:LDAP] LDAP should start an OpenLDAP test server [apigroup:user.openshift.io][apigroup:security.openshift.io][apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:LDAP][Serial] ldap group sync can sync groups from ldap [apigroup:user.openshift.io][apigroup:authorization.openshift.io][apigroup:security.openshift.io]": " [Suite:openshift/conformance/serial]", + + "[sig-auth][Feature:OAuthServer] ClientSecretWithPlus should create oauthclient [apigroup:oauth.openshift.io][apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OAuthServer] OAuth Authenticator accepts sha256 access tokens [apigroup:user.openshift.io][apigroup:oauth.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OAuthServer] OAuth server [apigroup:auth.openshift.io] should use http1.1 only to prevent http2 connection reuse": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OAuthServer] OAuth server has the correct token and certificate fallback semantics [apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OAuthServer] [Headers][apigroup:route.openshift.io][apigroup:config.openshift.io][apigroup:oauth.openshift.io] expected headers returned from the authorize URL": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OAuthServer] [Headers][apigroup:route.openshift.io][apigroup:config.openshift.io][apigroup:oauth.openshift.io] expected headers returned from the grant URL": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OAuthServer] [Headers][apigroup:route.openshift.io][apigroup:config.openshift.io][apigroup:oauth.openshift.io] expected headers returned from the login URL for the allow all IDP": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OAuthServer] [Headers][apigroup:route.openshift.io][apigroup:config.openshift.io][apigroup:oauth.openshift.io] expected headers returned from the login URL for the bootstrap IDP": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OAuthServer] [Headers][apigroup:route.openshift.io][apigroup:config.openshift.io][apigroup:oauth.openshift.io] expected headers returned from the login URL for when there is only one IDP": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OAuthServer] [Headers][apigroup:route.openshift.io][apigroup:config.openshift.io][apigroup:oauth.openshift.io] expected headers returned from the logout URL": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OAuthServer] [Headers][apigroup:route.openshift.io][apigroup:config.openshift.io][apigroup:oauth.openshift.io] expected headers returned from the root URL": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OAuthServer] [Headers][apigroup:route.openshift.io][apigroup:config.openshift.io][apigroup:oauth.openshift.io] expected headers returned from the token URL": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OAuthServer] [Headers][apigroup:route.openshift.io][apigroup:config.openshift.io][apigroup:oauth.openshift.io] expected headers returned from the token request URL": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OAuthServer] [Token Expiration] Using a OAuth client with a non-default token max age [apigroup:oauth.openshift.io] to generate tokens that do not expire works as expected when using a code authorization flow [apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OAuthServer] [Token Expiration] Using a OAuth client with a non-default token max age [apigroup:oauth.openshift.io] to generate tokens that do not expire works as expected when using a token authorization flow [apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OAuthServer] [Token Expiration] Using a OAuth client with a non-default token max age [apigroup:oauth.openshift.io] to generate tokens that expire shortly works as expected when using a code authorization flow [apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OAuthServer] [Token Expiration] Using a OAuth client with a non-default token max age [apigroup:oauth.openshift.io] to generate tokens that expire shortly works as expected when using a token authorization flow [apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OAuthServer] well-known endpoint should be reachable [apigroup:route.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OpenShiftAuthorization] RBAC proxy for openshift authz RunLegacyClusterRoleBindingEndpoint should succeed [apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OpenShiftAuthorization] RBAC proxy for openshift authz RunLegacyClusterRoleEndpoint should succeed [apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OpenShiftAuthorization] RBAC proxy for openshift authz RunLegacyEndpointConfirmNoEscalation [apigroup:authorization.openshift.io] should succeed": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OpenShiftAuthorization] RBAC proxy for openshift authz RunLegacyLocalRoleBindingEndpoint should succeed [apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OpenShiftAuthorization] RBAC proxy for openshift authz RunLegacyLocalRoleEndpoint should succeed [apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OpenShiftAuthorization] The default cluster RBAC policy should have correct RBAC rules": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OpenShiftAuthorization] authorization TestAuthorizationSubjectAccessReview should succeed [apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OpenShiftAuthorization] authorization TestAuthorizationSubjectAccessReviewAPIGroup should succeed [apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OpenShiftAuthorization] authorization TestBrowserSafeAuthorizer should succeed [apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OpenShiftAuthorization] authorization TestClusterReaderCoverage should succeed": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OpenShiftAuthorization] scopes TestScopeEscalations should succeed [apigroup:user.openshift.io][apigroup:authorization.openshift.io][apigroup:build.openshift.io][apigroup:oauth.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OpenShiftAuthorization] scopes TestScopedImpersonation should succeed [apigroup:user.openshift.io][apigroup:authorization.openshift.io][apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OpenShiftAuthorization] scopes TestScopedTokens should succeed [apigroup:user.openshift.io][apigroup:authorization.openshift.io][apigroup:oauth.openshift.io][apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OpenShiftAuthorization] scopes TestTokensWithIllegalScopes should succeed [apigroup:oauth.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OpenShiftAuthorization] scopes TestUnknownScopes should succeed [apigroup:user.openshift.io][apigroup:authorization.openshift.io][apigroup:project.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OpenShiftAuthorization] self-SAR compatibility TestBootstrapPolicySelfSubjectAccessReviews should succeed [apigroup:user.openshift.io][apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OpenShiftAuthorization] self-SAR compatibility TestSelfSubjectAccessReviewsNonExistingNamespace should succeed [apigroup:user.openshift.io][apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:OpenShiftAuthorization][Serial] authorization TestAuthorizationResourceAccessReview should succeed [apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/serial]", + + "[sig-auth][Feature:PodSecurity] restricted-v2 SCC should mutate empty securityContext to match restricted PSa profile": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:PodSecurity][Feature:SCC] creating pod controllers": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:ProjectAPI] TestInvalidRoleRefs should succeed [apigroup:authorization.openshift.io][apigroup:user.openshift.io][apigroup:project.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:ProjectAPI] TestProjectIsNamespace should succeed [apigroup:project.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:ProjectAPI] TestProjectWatch should succeed [apigroup:project.openshift.io][apigroup:authorization.openshift.io][apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:ProjectAPI] TestProjectWatchWithSelectionPredicate should succeed [apigroup:project.openshift.io][apigroup:authorization.openshift.io][apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:ProjectAPI] TestScopedProjectAccess should succeed [apigroup:user.openshift.io][apigroup:project.openshift.io][apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:ProjectAPI] TestUnprivilegedNewProject [apigroup:project.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:ProjectAPI][Serial] TestUnprivilegedNewProjectDenied [apigroup:authorization.openshift.io][apigroup:project.openshift.io]": " [Suite:openshift/conformance/serial]", + + "[sig-auth][Feature:RoleBindingRestrictions] RoleBindingRestrictions should be functional Create a RBAC rolebinding when subject is not already bound and is not permitted by any RBR should fail [apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:RoleBindingRestrictions] RoleBindingRestrictions should be functional Create a rolebinding that also contains system:non-existing users should succeed [apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:RoleBindingRestrictions] RoleBindingRestrictions should be functional Create a rolebinding when subject is already bound should succeed [apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:RoleBindingRestrictions] RoleBindingRestrictions should be functional Create a rolebinding when subject is not already bound and is not permitted by any RBR should fail [apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:RoleBindingRestrictions] RoleBindingRestrictions should be functional Create a rolebinding when subject is permitted by RBR should succeed [apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:RoleBindingRestrictions] RoleBindingRestrictions should be functional Create a rolebinding when there are no restrictions should succeed [apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:RoleBindingRestrictions] RoleBindingRestrictions should be functional Rolebinding restrictions tests single project should succeed [apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:SCC][Early] should not have pod creation failures during install": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:SecurityContextConstraints] TestAllowedSCCViaRBAC [apigroup:project.openshift.io][apigroup:user.openshift.io][apigroup:authorization.openshift.io][apigroup:security.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:SecurityContextConstraints] TestAllowedSCCViaRBAC with service account [apigroup:security.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:SecurityContextConstraints] TestPodDefaultCapabilities": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:SecurityContextConstraints] TestPodUpdateSCCEnforcement [apigroup:user.openshift.io][apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:SecurityContextConstraints] TestPodUpdateSCCEnforcement with service account": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:UserAPI] groups should work [apigroup:user.openshift.io][apigroup:project.openshift.io][apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-auth][Feature:UserAPI] users can manipulate groups [apigroup:user.openshift.io][apigroup:authorization.openshift.io][apigroup:project.openshift.io]": " [Suite:openshift/conformance/parallel]", + "[sig-autoscaling] Cluster size autoscaler scalability [Slow] CA ignores unschedulable pods while scheduling schedulable pods [Feature:ClusterAutoscalerScalability6]": " [Suite:k8s]", "[sig-autoscaling] Cluster size autoscaler scalability [Slow] should scale down empty nodes [Feature:ClusterAutoscalerScalability3]": " [Suite:k8s]", @@ -577,225 +907,1035 @@ var Annotations = map[string]string{ "[sig-autoscaling] Cluster size autoscaling [Slow] should scale down when expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]": " [Suite:k8s]", - "[sig-autoscaling] Cluster size autoscaling [Slow] should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", + "[sig-autoscaling] Cluster size autoscaling [Slow] should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] should scale up when non expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't be able to scale down when rescheduling a pod is required, but pdb doesn't allow drain[Feature:ClusterSizeAutoscalingScaleDown]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't scale down when non expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't scale up when expendable pod is preempted [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", + + "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't trigger additional scale-ups during processing scale-up [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", + + "[sig-autoscaling] DNS horizontal autoscaling [Serial] [Slow] kube-dns-autoscaler should scale kube-dns pods when cluster size changed": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-autoscaling] DNS horizontal autoscaling kube-dns-autoscaler should scale kube-dns pods in both nonfaulty and faulty scenarios": " [Disabled:SpecialConfig] [Suite:k8s]", + + "[sig-autoscaling] [Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling Autoscaling a service from 1 pod and 3 nodes to 8 pods and >=4 nodes takes less than 15 minutes": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) CustomResourceDefinition Should scale with a CRD targetRef": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) ReplicationController light Should scale from 1 pod to 2 pods": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) ReplicationController light [Slow] Should scale from 2 pods to 1 pod": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Pod Resource) Should scale from 5 pods to 3 pods and then from 3 pods to 1 pod using Average Utilization for aggregation": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet Should scale from 5 pods to 3 pods and then from 3 pods to 1 pod": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet with idle sidecar (ContainerResource use case) Should not scale up on a busy sidecar with an idle application": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet with idle sidecar (ContainerResource use case) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods on a busy application with an idle sidecar container": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicationController Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods and verify decision stability": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicationController Should scale from 5 pods to 3 pods and then from 3 pods to 1 pod and verify decision stability": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with autoscaling disabled shouldn't scale down": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with autoscaling disabled shouldn't scale up": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with both scale up and down controls configured should keep recommendation within the range over two stabilization windows": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with both scale up and down controls configured should keep recommendation within the range with stabilization window and pod limit rate": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with long upscale stabilization window should scale up only after the stabilization period": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by number of Pods rate should scale down no more than given number of Pods per minute": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by number of Pods rate should scale up no more than given number of Pods per minute": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by percentage should scale down no more than given percentage of current Pods per minute": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by percentage should scale up no more than given percentage of current Pods per minute": " [Suite:k8s]", + + "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with short downscale stabilization window should scale down soon after the stabilization period": " [Suite:k8s]", + + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with Custom Metric of type Object from Stackdriver should scale down to 0": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with Custom Metric of type Object from Stackdriver should scale down": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with Custom Metric of type Pod from Stackdriver should scale down with Prometheus": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with Custom Metric of type Pod from Stackdriver should scale down": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with Custom Metric of type Pod from Stackdriver should scale up with two metrics": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with External Metric from Stackdriver should scale down with target average value": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with External Metric from Stackdriver should scale down with target value": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with External Metric from Stackdriver should scale up with two metrics": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with multiple metrics of different types should not scale down when one metric is missing (Container Resource and External Metrics)": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with multiple metrics of different types should not scale down when one metric is missing (Pod and Object Metrics)": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with multiple metrics of different types should scale up when one metric is missing (Pod and External metrics)": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with multiple metrics of different types should scale up when one metric is missing (Resource and Object metrics)": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-builds][Feature:Builds] Multi-stage image builds should succeed [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds] Optimized image builds should succeed [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds] build can reference a cluster service with a build being created from new-build should be able to run a build that references a cluster service [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Skipped:Proxy] [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds] build have source revision metadata started build should contain source revision information [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds] build with empty source started build should build even with an empty source in build config [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds] build without output image building from templates should create an image from a S2i template without an output image reference defined [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds] build without output image building from templates should create an image from a docker template without an output image reference defined [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds] buildconfig secret injector should inject secrets to the appropriate buildconfigs [apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds] custom build with buildah being created from new-build should complete build with custom builder image [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds] imagechangetriggers imagechangetriggers should trigger builds of all types [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds] oc new-app should fail with a --name longer than 58 characters [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds] oc new-app should succeed with a --name of 58 characters [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Skipped:Proxy] [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds] oc new-app should succeed with an imagestream [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds] prune builds based on settings in the buildconfig buildconfigs should have a default history limit set when created via the group api [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds] prune builds based on settings in the buildconfig should prune builds after a buildConfig change [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds] prune builds based on settings in the buildconfig should prune canceled builds based on the failedBuildsHistoryLimit setting [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds] prune builds based on settings in the buildconfig should prune completed builds based on the successfulBuildsHistoryLimit setting [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds] prune builds based on settings in the buildconfig should prune errored builds based on the failedBuildsHistoryLimit setting [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds] prune builds based on settings in the buildconfig should prune failed builds based on the failedBuildsHistoryLimit setting [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds] remove all builds when build configuration is removed oc delete buildconfig should start builds and delete the buildconfig [apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds] result image should have proper labels set Docker build from a template should create a image from \"test-docker-build.json\" template with proper Docker labels [apigroup:build.openshift.io][apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds] result image should have proper labels set S2I build from a template should create a image from \"test-s2i-build.json\" template with proper Docker labels [apigroup:build.openshift.io][apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds] s2i build with a quota Building from a template should create an s2i build with a quota and run it [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds] s2i build with a root user image should create a root build and fail without a privileged SCC [apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds] s2i build with a root user image should create a root build and pass with a privileged SCC [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds] verify /run filesystem contents are writeable using a simple Docker Strategy Build [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds] verify /run filesystem contents do not have unexpected content using a simple Docker Strategy Build [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds][Serial][Slow][Disruptive] alter builds via cluster configuration build config no ocm rollout [apigroup:config.openshift.io] Apply default proxy configuration to docker build pod through env vars [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Serial][Slow][Disruptive] alter builds via cluster configuration build config no ocm rollout [apigroup:config.openshift.io] Apply default proxy configuration to source build pod through env vars [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Serial][Slow][Disruptive] alter builds via cluster configuration build config no ocm rollout [apigroup:config.openshift.io] Apply git proxy configuration to build pod [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Serial][Slow][Disruptive] alter builds via cluster configuration build config with ocm rollout [apigroup:config.openshift.io] Apply default image label configuration to build pod": "", + + "[sig-builds][Feature:Builds][Serial][Slow][Disruptive] alter builds via cluster configuration build config with ocm rollout [apigroup:config.openshift.io] Apply env configuration to build pod": "", + + "[sig-builds][Feature:Builds][Serial][Slow][Disruptive] alter builds via cluster configuration build config with ocm rollout [apigroup:config.openshift.io] Apply node selector configuration to build pod": "", + + "[sig-builds][Feature:Builds][Serial][Slow][Disruptive] alter builds via cluster configuration build config with ocm rollout [apigroup:config.openshift.io] Apply override image label configuration to build pod [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Serial][Slow][Disruptive] alter builds via cluster configuration build config with ocm rollout [apigroup:config.openshift.io] Apply resource configuration to build pod": "", + + "[sig-builds][Feature:Builds][Serial][Slow][Disruptive] alter builds via cluster configuration build config with ocm rollout [apigroup:config.openshift.io] Apply toleration override configuration to build pod": "", + + "[sig-builds][Feature:Builds][Serial][Slow][Disruptive] alter builds via cluster configuration registries config context should allow registries to be blacklisted": "", + + "[sig-builds][Feature:Builds][Serial][Slow][Disruptive] alter builds via cluster configuration registries config context should allow registries to be whitelisted": "", + + "[sig-builds][Feature:Builds][Serial][Slow][Disruptive] alter builds via cluster configuration registries config context should default registry search to docker.io for image pulls": "", + + "[sig-builds][Feature:Builds][Slow] Capabilities should be dropped for s2i builders s2i build with a rootable builder should not be able to switch to root with an assemble script [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] build can have Dockerfile input being created from new-build should be able to start a build from Dockerfile with FROM reference to scratch [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] build can have Dockerfile input being created from new-build should create a image via new-build [apigroup:build.openshift.io][apigroup:image.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] build can have Dockerfile input being created from new-build should create a image via new-build and infer the origin tag [apigroup:build.openshift.io][apigroup:image.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] build can have Dockerfile input being created from new-build testing build image with dockerfile contains a file path uses a variable in its name [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] build can have Dockerfile input being created from new-build testing build image with invalid dockerfile content [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] build can have container image source [apigroup:image.openshift.io] buildconfig with input source image and docker strategy should complete successfully and contain the expected file [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] build can have container image source [apigroup:image.openshift.io] buildconfig with input source image and s2i strategy should complete successfully and contain the expected file [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] build can have container image source [apigroup:image.openshift.io] creating a build with an input source image and custom strategy should resolve the imagestream references and secrets [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] build can have container image source [apigroup:image.openshift.io] creating a build with an input source image and docker strategy should resolve the imagestream references and secrets [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] build can have container image source [apigroup:image.openshift.io] creating a build with an input source image and s2i strategy should resolve the imagestream references and secrets [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] build controller RunBuildCompletePodDeleteTest should succeed [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] build controller RunBuildDeleteTest should succeed [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] build controller RunBuildRunningPodDeleteTest should succeed [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] builds should have deadlines oc start-build docker-build --wait Docker: should start a build and wait for the build failed and build pod being killed by kubelet [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] builds should have deadlines oc start-build source-build --wait Source: should start a build and wait for the build failed and build pod being killed by kubelet [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] builds should support proxies start build with broken proxy and a no_proxy override should start a docker build and wait for the build to succeed [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] builds should support proxies start build with broken proxy and a no_proxy override should start an s2i build and wait for the build to succeed [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] builds should support proxies start build with broken proxy should start a build and wait for the build to fail [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] builds should support proxies start build with cluster-wide custom PKI should mount the custom PKI into the build if specified [apigroup:config.openshift.io][apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] builds with a context directory docker context directory build should docker build an application using a context directory [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] builds with a context directory s2i context directory build should s2i build an application using a context directory [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] can use build secrets build with secrets and configMaps should contain secrets during the docker strategy build": "", + + "[sig-builds][Feature:Builds][Slow] can use build secrets build with secrets and configMaps should contain secrets during the source strategy build [apigroup:build.openshift.io][apigroup:image.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] can use private repositories as build input build using an HTTP token should be able to clone source code via an HTTP token [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] can use private repositories as build input build using an ssh private key should be able to clone source code via ssh [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] can use private repositories as build input build using an ssh private key should be able to clone source code via ssh using SCP-style URIs [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] completed builds should have digest of the image in their status Docker build started with log level >5 should save the image digest when finished [apigroup:build.openshift.io][apigroup:image.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] completed builds should have digest of the image in their status Docker build started with normal log level should save the image digest when finished [apigroup:build.openshift.io][apigroup:image.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] completed builds should have digest of the image in their status S2I build started with log level >5 should save the image digest when finished [apigroup:build.openshift.io][apigroup:image.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] completed builds should have digest of the image in their status S2I build started with normal log level should save the image digest when finished [apigroup:build.openshift.io][apigroup:image.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] incremental s2i build Building from a template should create a build from \"incremental-auth-build.json\" template and run it [apigroup:build.openshift.io][apigroup:image.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] s2i build with environment file in sources Building from a template should create a image from \"test-env-build.json\" template and run it in a pod [apigroup:build.openshift.io][apigroup:image.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context Setting build-args on Docker builds Should accept build args that are specified in the Dockerfile [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context Setting build-args on Docker builds Should complete with a warning on non-existent build-arg [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context Setting build-args on Docker builds Should copy build args from BuildConfig to Build [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context Trigger builds with branch refs matching directories on master branch Should checkout the config branch, not config directory [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context binary builds shoud accept --from-archive with https URL as an input [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context binary builds shoud accept --from-file with https URL as an input [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context binary builds should accept --from-dir as input [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context binary builds should accept --from-file as input [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context binary builds should accept --from-repo as input [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context binary builds should accept --from-repo with --commit as input [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context binary builds should reject binary build requests without a --from-xxxx value [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context cancel a binary build that doesn't start running in 5 minutes should start a build and wait for the build to be cancelled [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context cancel a build started by oc start-build --wait should start a build and wait for the build to cancel [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context oc start-build --wait should start a build and wait for the build to complete [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context oc start-build --wait should start a build and wait for the build to fail [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context oc start-build with pr ref should start a build from a PR ref, wait for the build to complete, and confirm the right level was used [apigroup:build.openshift.io][apigroup:image.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context override environment BUILD_LOGLEVEL in buildconfig can be overridden by build-loglevel [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context override environment BUILD_LOGLEVEL in buildconfig should create verbose output [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context override environment should accept environment variables [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context s2i build maintaining symlinks should s2i build image and maintain symlinks [apigroup:build.openshift.io][apigroup:image.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] starting a build using CLI start-build test context start a build via a webhook should be able to start builds via the webhook with valid secrets and fail with invalid secrets [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] testing build configuration hooks testing postCommit hook should run docker postCommit hooks [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] testing build configuration hooks testing postCommit hook should run s2i postCommit hooks [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] update failure status Build status Docker fetch image content failure should contain the Docker build fetch image content reason and message [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] update failure status Build status Docker fetch source failure should contain the Docker build fetch source failure reason and message [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] update failure status Build status OutOfMemoryKilled should contain OutOfMemoryKilled failure reason and message [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] update failure status Build status S2I bad context dir failure should contain the S2I bad context dir failure reason and message [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] update failure status Build status S2I fetch source failure should contain the S2I fetch source failure reason and message [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] update failure status Build status failed assemble container should contain the failure reason related to an assemble script failing in s2i [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] update failure status Build status failed https proxy invalid url should contain the generic failure reason and message [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] update failure status Build status fetch builder image failure should contain the fetch builder image failure reason and message [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] update failure status Build status postcommit hook failure should contain the post commit hook failure reason and message [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] update failure status Build status push image to registry failure should contain the image push to registry failure reason and message [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] using build configuration runPolicy build configuration with Parallel build run policy runs the builds in parallel [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] using build configuration runPolicy build configuration with Serial build run policy handling cancellation starts the next build immediately after one is canceled [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] using build configuration runPolicy build configuration with Serial build run policy handling deletion starts the next build immediately after running one is deleted [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] using build configuration runPolicy build configuration with Serial build run policy handling failure starts the next build immediately after one fails [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] using build configuration runPolicy build configuration with Serial build run policy runs the builds in serial order [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] using build configuration runPolicy build configuration with SerialLatestOnly build run policy runs the builds in serial order but cancel previous builds [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] using pull secrets in a build start-build test context binary builds should be able to run a build that is implicitly pulling from the internal registry [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] using pull secrets in a build start-build test context pulling from an external authenticated registry should be able to use a pull secret in a build [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][Slow] using pull secrets in a build start-build test context pulling from an external authenticated registry should be able to use a pull secret linked to the builder service account [apigroup:build.openshift.io]": "", + + "[sig-builds][Feature:Builds][pullsearch] docker build where the registry is not specified Building from a Dockerfile whose FROM image ref does not specify the image registry should create a docker build that has buildah search from our predefined list of image registries and succeed [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds][pullsecret] docker build using a pull secret Building from a template should create a docker build that pulls using a secret run it [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds][timing] capture build stages and durations should record build stages and durations for docker [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds][timing] capture build stages and durations should record build stages and durations for s2i [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds][valueFrom] process valueFrom in build strategy environment variables should fail resolving unresolvable valueFrom in docker build environment variable references [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds][valueFrom] process valueFrom in build strategy environment variables should fail resolving unresolvable valueFrom in sti build environment variable references [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds][valueFrom] process valueFrom in build strategy environment variables should successfully resolve valueFrom in docker build environment variables [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds][valueFrom] process valueFrom in build strategy environment variables should successfully resolve valueFrom in s2i build environment variables [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds][volumes] build volumes should mount given secrets and configmaps into the build pod for docker strategy builds [apigroup:image.openshift.io][apigroup:build.openshift.io][apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds][volumes] build volumes should mount given secrets and configmaps into the build pod for source strategy builds [apigroup:image.openshift.io][apigroup:build.openshift.io][apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds][volumes] csi build volumes within Tech Preview enabled cluster [apigroup:config.openshift.io] should mount given csi shared resource secret into the build pod for docker strategy builds [apigroup:image.openshift.io][apigroup:build.openshift.io][apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds][volumes] csi build volumes within Tech Preview enabled cluster [apigroup:config.openshift.io] should mount given csi shared resource secret into the build pod for source strategy builds [apigroup:image.openshift.io][apigroup:build.openshift.io][apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds][volumes] csi build volumes within Tech Preview enabled cluster [apigroup:config.openshift.io] should mount given csi shared resource secret without resource refresh into the build pod for docker strategy builds [apigroup:image.openshift.io][apigroup:build.openshift.io][apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds][volumes] csi build volumes within Tech Preview enabled cluster [apigroup:config.openshift.io] should mount given csi shared resource secret without resource refresh into the build pod for source strategy builds [apigroup:image.openshift.io][apigroup:build.openshift.io][apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds][webhook] TestWebhook [apigroup:build.openshift.io][apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds][webhook] TestWebhookGitHubPing [apigroup:image.openshift.io][apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds][webhook] TestWebhookGitHubPushWithImage [apigroup:image.openshift.io][apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:Builds][webhook] TestWebhookGitHubPushWithImageStream [apigroup:image.openshift.io][apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-builds][Feature:JenkinsRHELImagesOnly][Feature:Jenkins][Feature:Builds][sig-devex][Slow] openshift pipeline build jenkins pipeline build config strategy using a jenkins instance launched with the ephemeral template [apigroup:build.openshift.io]": "", + + "[sig-ci] [Early] prow job name should match cluster version [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-ci] [Early] prow job name should match network type": " [Suite:openshift/conformance/parallel]", + + "[sig-ci] [Early] prow job name should match platform type": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 should support forwarding over websockets": " [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 that expects NO client request should support a client that connects, sends DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 that expects a client request should support a client that connects, sends DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 that expects a client request should support a client that connects, sends NO DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl Port forwarding With a server listening on localhost should support forwarding over websockets": " [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl Port forwarding With a server listening on localhost that expects NO client request should support a client that connects, sends DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl Port forwarding With a server listening on localhost that expects a client request should support a client that connects, sends DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl Port forwarding With a server listening on localhost that expects a client request should support a client that connects, sends NO DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Guestbook application should create and stop a working application [Conformance]": " [Slow] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl api-versions should check if v1 is in available api versions [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl apply apply set/view last-applied": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl apply should apply a new configuration to an existing RC": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl apply should reuse port when apply to an existing SVC": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl cluster-info dump should check if cluster-info dump succeeds": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl cluster-info should check if Kubernetes control plane services is included in cluster-info [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl copy should copy a file from a running Pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl create quota should create a quota with scopes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl create quota should create a quota without scopes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl create quota should reject quota with invalid scopes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl describe should check if kubectl describe prints relevant information for cronjob": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl describe should check if kubectl describe prints relevant information for rc and pods [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl diff should check if kubectl diff finds a difference for Deployments [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl events should show event when pod is created": " [Disabled:RebaseInProgress] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl expose should create services for rc [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl get componentstatuses should get componentstatuses": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl label should update the label on a resource [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl logs should be able to retrieve and filter logs [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl patch should add annotations for pods in rc [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl replace should update a single-container pod's image [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl run pod should create a pod from an image when restart is Never [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl server-side dry-run should check if kubectl can dry-run update Pods [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl taint [Serial] should remove all the taints with the same key off a node": " [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl taint [Serial] should update the taint on a node": " [Suite:openshift/conformance/serial] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl validation should create/apply a CR with unknown fields for CRD with no validation schema": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl validation should create/apply a valid CR for CRD with validation schema": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl validation should create/apply an invalid/valid CR with arbitrary-extra properties for CRD with partially-specified validation schema": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl validation should detect unknown metadata fields in both the root and embedded object of a CR": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl validation should detect unknown metadata fields of a typed object": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Kubectl version should check is all data is printed [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Proxy server should support --unix-socket=/path [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Proxy server should support proxy with --port 0 [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should contain last line of the log": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should handle in-cluster config": " [Disabled:Broken] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should return command exit codes [Slow] running a failing command with --leave-stdin-open": " [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should return command exit codes [Slow] running a failing command without --restart=Never": " [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should return command exit codes [Slow] running a failing command without --restart=Never, but with --rm": " [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should return command exit codes execing into a container with a failing command": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should return command exit codes execing into a container with a successful command": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should return command exit codes running a failing command": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should return command exit codes running a successful command": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should support exec through an HTTP proxy": " [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should support exec through kubectl proxy": " [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should support exec using resource/name": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should support exec": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should support inline execution and attach": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Simple pod should support port-forward": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + + "[sig-cli] Kubectl client Update Demo should create and stop a replication controller [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client Update Demo should scale a replication controller [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + + "[sig-cli] Kubectl client kubectl wait should ignore not found error with --for=delete": " [Disabled:Broken] [Suite:k8s]", + + "[sig-cli] oc --request-timeout works as expected [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc adm build-chain [apigroup:build.openshift.io][apigroup:image.openshift.io][apigroup:project.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc adm cluster-role-reapers [Serial][apigroup:authorization.openshift.io][apigroup:user.openshift.io]": " [Suite:openshift/conformance/serial]", + + "[sig-cli] oc adm groups [apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc adm images [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc adm must-gather runs successfully for audit logs [apigroup:config.openshift.io][apigroup:oauth.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc adm must-gather runs successfully with options": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc adm must-gather runs successfully": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc adm must-gather when looking at the audit logs [sig-node] kubelet runs apiserver processes strictly sequentially in order to not risk audit log corruption": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc adm new-project [apigroup:project.openshift.io][apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc adm node-logs": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc adm policy [apigroup:authorization.openshift.io][apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc adm role-reapers [apigroup:authorization.openshift.io][apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc adm role-selectors [apigroup:template.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc adm serviceaccounts": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc adm storage-admin [apigroup:authorization.openshift.io][apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc adm ui-project-commands [apigroup:project.openshift.io][apigroup:authorization.openshift.io][apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc adm user-creation [apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc adm who-can [apigroup:authorization.openshift.io][apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc annotate pod": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc api-resources can output expected information about api-resources": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc api-resources can output expected information about build.openshift.io api-resources [apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc api-resources can output expected information about image.openshift.io api-resources [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc api-resources can output expected information about operator.openshift.io api-resources [apigroup:operator.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc api-resources can output expected information about project.openshift.io api-resources [apigroup:project.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc api-resources can output expected information about route.openshift.io api-resources and api-version [apigroup:route.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc api-resources can output expected information about snapshot.storage.k8s.io api-resources [apigroup:snapshot.storage.k8s.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc basics can create and interact with a list of resources": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc basics can create deploymentconfig and clusterquota [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc basics can describe an OAuth access token [apigroup:oauth.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc basics can get version information from API": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc basics can get version information from CLI": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc basics can output expected --dry-run text": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc basics can patch resources [apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc basics can process templates [apigroup:template.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc basics can show correct whoami result with console": " [Skipped:NoOptionalCapabilities] [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc basics can show correct whoami result": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc builds complex build start-build [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc builds complex build webhooks CRUD [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc builds get buildconfig [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc builds new-build [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc builds patch buildconfig [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc can get list of nodes": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc can route traffic to services [apigroup:route.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc can run inside of a busybox container [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc completion returns expected help messages": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc debug deployment configs from a build [apigroup:image.openshift.io][apigroup:apps.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc debug dissect deployment config debug [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc debug does not require a real resource on the server": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc debug ensure debug does not depend on a container actually existing for the selected resource [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc debug ensure it works with image streams [apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc env can set environment variables [apigroup:apps.openshift.io][apigroup:image.openshift.io][apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc explain list uncovered GroupVersionResources": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc explain networking types when using openshift-sdn should contain proper fields description for special networking types": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc explain should contain proper fields description for apps.openshift.io [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc explain should contain proper fields description for authorization.openshift.io [apigroup:authorization.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc explain should contain proper fields description for config.openshift.io [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc explain should contain proper fields description for console.openshift.io [apigroup:console.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc explain should contain proper fields description for image.openshift.io [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc explain should contain proper fields description for oauth.openshift.io [apigroup:oauth.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc explain should contain proper fields description for project.openshift.io [apigroup:project.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc explain should contain proper fields description for route.openshift.io [apigroup:route.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc explain should contain proper fields description for security.internal.openshift.io [apigroup:security.internal.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc explain should contain proper fields description for security.openshift.io [apigroup:security.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc explain should contain proper fields description for template.openshift.io [apigroup:template.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc explain should contain proper fields description for user.openshift.io [apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc explain should contain proper spec+status for CRDs": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc explain should contain spec+status for apps.openshift.io [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc explain should contain spec+status for build.openshift.io [apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc explain should contain spec+status for image.openshift.io [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc explain should contain spec+status for project.openshift.io [apigroup:project.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc explain should contain spec+status for route.openshift.io [apigroup:route.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc explain should contain spec+status for security.openshift.io [apigroup:security.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc explain should contain spec+status for template.openshift.io [apigroup:template.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc expose can ensure the expose command is functioning as expected [apigroup:route.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc help works as expected": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc idle [apigroup:apps.openshift.io][apigroup:route.openshift.io][apigroup:project.openshift.io][apigroup:image.openshift.io] by all": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc idle [apigroup:apps.openshift.io][apigroup:route.openshift.io][apigroup:project.openshift.io][apigroup:image.openshift.io] by checking previous scale": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc idle [apigroup:apps.openshift.io][apigroup:route.openshift.io][apigroup:project.openshift.io][apigroup:image.openshift.io] by label": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc idle [apigroup:apps.openshift.io][apigroup:route.openshift.io][apigroup:project.openshift.io][apigroup:image.openshift.io] by name": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc label pod": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc observe works as expected with cluster operators [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc observe works as expected": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc probe can ensure the probe command is functioning as expected on deploymentconfigs [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc probe can ensure the probe command is functioning as expected on pods": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc project --show-labels works for projects [apigroup:project.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc project can switch between different projects [apigroup:authorization.openshift.io][apigroup:user.openshift.io][apigroup:project.openshift.io][Serial]": " [Suite:openshift/conformance/serial]", + + "[sig-cli] oc rsh specific flags should work well when access to a remote shell": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc run can use --image flag correctly [apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc secret creates and retrieves expected": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc service creates and deletes services": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc statefulset creates and deletes statefulsets": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] oc status can show correct status after switching between projects [apigroup:project.openshift.io][apigroup:image.openshift.io][Serial]": " [Suite:openshift/conformance/serial]", + + "[sig-cli] oc status returns expected help messages [apigroup:project.openshift.io][apigroup:build.openshift.io][apigroup:image.openshift.io][apigroup:apps.openshift.io][apigroup:route.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] policy scc-subject-review, scc-review [apigroup:authorization.openshift.io][apigroup:user.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] templates different namespaces [apigroup:user.openshift.io][apigroup:project.openshift.io][apigroup:template.openshift.io][apigroup:authorization.openshift.io][Skipped:Disconnected]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli] templates process [apigroup:apps.openshift.io][apigroup:template.openshift.io][Skipped:Disconnected]": " [Suite:openshift/conformance/parallel]", + + "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/authentication.sh [apigroup:image.openshift.io]": "", + + "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/builds.sh [apigroup:image.openshift.io]": "", + + "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/config.sh [apigroup:image.openshift.io]": "", + + "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/deployments.sh [apigroup:image.openshift.io]": "", + + "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/describer.sh [apigroup:image.openshift.io]": "", + + "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/edit.sh [apigroup:image.openshift.io]": "", + + "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/env.sh [apigroup:image.openshift.io]": "", + + "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/framework-test.sh [apigroup:image.openshift.io]": "", + + "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/get.sh [apigroup:image.openshift.io]": "", + + "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/image-lookup.sh [apigroup:image.openshift.io]": "", + + "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/images.sh [apigroup:image.openshift.io]": "", + + "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/printer.sh [apigroup:image.openshift.io]": "", + + "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/quota.sh [apigroup:image.openshift.io]": "", + + "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/secrets.sh [apigroup:image.openshift.io]": "", + + "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/set-data.sh [apigroup:image.openshift.io]": "", + + "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/set-image.sh [apigroup:image.openshift.io]": "", + + "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/set-liveness-probe.sh [apigroup:image.openshift.io]": "", + + "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/setbuildhook.sh [apigroup:image.openshift.io]": "", + + "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/setbuildsecret.sh [apigroup:image.openshift.io]": "", + + "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/triggers.sh [apigroup:image.openshift.io]": "", + + "[sig-cli][Feature:LegacyCommandTests][Disruptive][Serial] test-cmd: test/cmd/volumes.sh [apigroup:image.openshift.io]": "", + + "[sig-cli][Slow] can use rsync to upload files to pods [apigroup:template.openshift.io] copy by strategy should copy files with the rsync strategy": "", + + "[sig-cli][Slow] can use rsync to upload files to pods [apigroup:template.openshift.io] copy by strategy should copy files with the rsync-daemon strategy": "", + + "[sig-cli][Slow] can use rsync to upload files to pods [apigroup:template.openshift.io] copy by strategy should copy files with the tar strategy": "", + + "[sig-cli][Slow] can use rsync to upload files to pods [apigroup:template.openshift.io] rsync specific flags should honor multiple --exclude flags": "", + + "[sig-cli][Slow] can use rsync to upload files to pods [apigroup:template.openshift.io] rsync specific flags should honor multiple --include flags": "", + + "[sig-cli][Slow] can use rsync to upload files to pods [apigroup:template.openshift.io] rsync specific flags should honor the --exclude flag": "", + + "[sig-cli][Slow] can use rsync to upload files to pods [apigroup:template.openshift.io] rsync specific flags should honor the --include flag": "", + + "[sig-cli][Slow] can use rsync to upload files to pods [apigroup:template.openshift.io] rsync specific flags should honor the --no-perms flag": "", + + "[sig-cli][Slow] can use rsync to upload files to pods [apigroup:template.openshift.io] rsync specific flags should honor the --progress flag": "", + + "[sig-cli][Slow] can use rsync to upload files to pods [apigroup:template.openshift.io] using a watch should watch for changes and rsync them": "", + + "[sig-cloud-provider][Feature:OpenShiftCloudControllerManager][Late] Deploy an external cloud provider [apigroup:machineconfiguration.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cluster-lifecycle] CSRs from machines that are not recognized by the cloud provider are not approved": " [Suite:openshift/conformance/parallel]", + + "[sig-cluster-lifecycle] Pods cannot access the /config/master API endpoint": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-cluster-lifecycle] TestAdminAck should succeed [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cluster-lifecycle][Feature:DisasterRecovery][Disruptive] [Feature:NodeRecovery] Cluster should survive master and worker failure and recover with machine health checks [apigroup:machine.openshift.io]": " [Serial]", + + "[sig-cluster-lifecycle][Feature:Machines] Managed cluster should have machine resources [apigroup:machine.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cluster-lifecycle][Feature:Machines][Disruptive] Managed cluster should recover from deleted worker machines [apigroup:machine.openshift.io]": " [Serial]", + + "[sig-cluster-lifecycle][Feature:Machines][Early] Managed cluster should have same number of Machines and Nodes [apigroup:machine.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-cluster-lifecycle][Feature:Machines][Serial] Managed cluster should grow and decrease when scaling different machineSets simultaneously [Timeout:30m][apigroup:machine.openshift.io]": " [Suite:openshift/conformance/serial]", + + "[sig-coreos] [Conformance] CoreOS bootimages TestBootimagesPresent [apigroup:machineconfiguration.openshift.io]": " [Suite:openshift/conformance/parallel/minimal]", + + "[sig-devex] check registry.redhat.io is available and samples operator can import sample imagestreams run sample related validations [apigroup:config.openshift.io][apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/dotnet:6.0-ubi8\" should print the usage": "", + + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/nginx:1.20-ubi7\" should print the usage": "", + + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/nginx:1.20-ubi8\" should print the usage": "", + + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/nodejs:14-ubi7\" should print the usage": "", + + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/nodejs:14-ubi8\" should print the usage": "", + + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/nodejs:16-ubi8\" should print the usage": "", + + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/perl:5.26-ubi8\" should print the usage": "", + + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/perl:5.30-el7\" should print the usage": "", + + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/perl:5.30-ubi8\" should print the usage": "", + + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/perl:5.32-ubi8\" should print the usage": "", + + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/php:7.3-ubi7\" should print the usage": "", + + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/php:7.4-ubi8\" should print the usage": "", + + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/php:8.0-ubi8\" should print the usage": "", + + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/python:2.7-ubi8\" should print the usage": "", + + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/python:3.6-ubi8\" should print the usage": "", + + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/python:3.8-ubi7\" should print the usage": "", + + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/python:3.8-ubi8\" should print the usage": "", + + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/python:3.9-ubi8\" should print the usage": "", + + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/ruby:2.5-ubi8\" should print the usage": "", + + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/ruby:2.7-ubi7\" should print the usage": "", + + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/ruby:2.7-ubi8\" should print the usage": "", - "[sig-autoscaling] Cluster size autoscaling [Slow] should scale up when non expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/ruby:3.0-ubi7\" should print the usage": "", - "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't be able to scale down when rescheduling a pod is required, but pdb doesn't allow drain[Feature:ClusterSizeAutoscalingScaleDown]": " [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled returning s2i usage when running the image \"image-registry.openshift-image-registry.svc:5000/openshift/ruby:3.0-ubi8\" should print the usage": "", - "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/dotnet:6.0-ubi8\" should be SCL enabled": "", - "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't scale down when non expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]": " [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/nginx:1.20-ubi7\" should be SCL enabled": "", - "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/nginx:1.20-ubi8\" should be SCL enabled": "", - "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't scale up when expendable pod is preempted [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/nodejs:14-ubi7\" should be SCL enabled": "", - "[sig-autoscaling] Cluster size autoscaling [Slow] shouldn't trigger additional scale-ups during processing scale-up [Feature:ClusterSizeAutoscalingScaleUp]": " [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/nodejs:14-ubi8\" should be SCL enabled": "", - "[sig-autoscaling] DNS horizontal autoscaling [Serial] [Slow] kube-dns-autoscaler should scale kube-dns pods when cluster size changed": " [Disabled:SpecialConfig] [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/nodejs:16-ubi8\" should be SCL enabled": "", - "[sig-autoscaling] DNS horizontal autoscaling kube-dns-autoscaler should scale kube-dns pods in both nonfaulty and faulty scenarios": " [Disabled:SpecialConfig] [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/perl:5.26-ubi8\" should be SCL enabled": "", - "[sig-autoscaling] [Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling Autoscaling a service from 1 pod and 3 nodes to 8 pods and >=4 nodes takes less than 15 minutes": " [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/perl:5.30-el7\" should be SCL enabled": "", - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) CustomResourceDefinition Should scale with a CRD targetRef": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/perl:5.30-ubi8\" should be SCL enabled": "", - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) ReplicationController light Should scale from 1 pod to 2 pods": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/perl:5.32-ubi8\" should be SCL enabled": "", - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) ReplicationController light [Slow] Should scale from 2 pods to 1 pod": " [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/php:7.3-ubi7\" should be SCL enabled": "", - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation": " [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/php:7.4-ubi8\" should be SCL enabled": "", - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation": " [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/php:8.0-ubi8\" should be SCL enabled": "", - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation": " [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/python:2.7-ubi8\" should be SCL enabled": "", - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation": " [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/python:3.6-ubi8\" should be SCL enabled": "", - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] Deployment (Pod Resource) Should scale from 5 pods to 3 pods and then from 3 pods to 1 pod using Average Utilization for aggregation": " [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/python:3.8-ubi7\" should be SCL enabled": "", - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods": " [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/python:3.8-ubi8\" should be SCL enabled": "", - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet Should scale from 5 pods to 3 pods and then from 3 pods to 1 pod": " [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/python:3.9-ubi8\" should be SCL enabled": "", - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet with idle sidecar (ContainerResource use case) Should not scale up on a busy sidecar with an idle application": " [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/ruby:2.5-ubi8\" should be SCL enabled": "", - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicaSet with idle sidecar (ContainerResource use case) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods on a busy application with an idle sidecar container": " [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/ruby:2.7-ubi7\" should be SCL enabled": "", - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicationController Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods and verify decision stability": " [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/ruby:2.7-ubi8\" should be SCL enabled": "", - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow] ReplicationController Should scale from 5 pods to 3 pods and then from 3 pods to 1 pod and verify decision stability": " [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/ruby:3.0-ubi7\" should be SCL enabled": "", - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation": " [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][Slow] openshift images should be SCL enabled using the SCL in s2i images \"image-registry.openshift-image-registry.svc:5000/openshift/ruby:3.0-ubi8\" should be SCL enabled": "", - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Container Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation": " [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][Slow] openshift sample application repositories [sig-devex][Feature:ImageEcosystem][nodejs] test nodejs images with nodejs-rest-http-crud db repo Building nodejs-postgresql app from new-app should build a nodejs-postgresql image and run it in a pod [apigroup:build.openshift.io]": "", - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Utilization for aggregation": " [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][Slow] openshift sample application repositories [sig-devex][Feature:ImageEcosystem][php] test php images with cakephp-ex db repo Building cakephp-mysql app from new-app should build a cakephp-mysql image and run it in a pod [apigroup:build.openshift.io]": "", - "[sig-autoscaling] [Feature:HPA] Horizontal pod autoscaling (scale resource: Memory) [Serial] [Slow] Deployment (Pod Resource) Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods using Average Value for aggregation": " [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][Slow] openshift sample application repositories [sig-devex][Feature:ImageEcosystem][python] test python images with django-ex db repo Building django-psql app from new-app should build a django-psql image and run it in a pod [apigroup:build.openshift.io]": "", - "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with autoscaling disabled shouldn't scale down": " [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][Slow] openshift sample application repositories [sig-devex][Feature:ImageEcosystem][ruby] test ruby images with rails-ex db repo Building rails-postgresql app from new-app should build a rails-postgresql image and run it in a pod [apigroup:build.openshift.io]": "", - "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with autoscaling disabled shouldn't scale up": " [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][mariadb][Slow] openshift mariadb image Creating from a template should instantiate the template [apigroup:image.openshift.io][apigroup:operator.openshift.io][apigroup:config.openshift.io][apigroup:apps.openshift.io]": " [Disabled:Broken]", - "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with both scale up and down controls configured should keep recommendation within the range over two stabilization windows": " [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][mysql][Slow] openshift mysql image Creating from a template should instantiate the template [apigroup:apps.openshift.io]": " [Disabled:Broken]", - "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with both scale up and down controls configured should keep recommendation within the range with stabilization window and pod limit rate": " [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][perl][Slow] hot deploy for openshift perl image hot deploy test should work [apigroup:image.openshift.io][apigroup:operator.openshift.io][apigroup:config.openshift.io][apigroup:build.openshift.io][apigroup:apps.openshift.io]": "", - "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with long upscale stabilization window should scale up only after the stabilization period": " [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][php][Slow] hot deploy for openshift php image CakePHP example should work with hot deploy [apigroup:image.openshift.io][apigroup:operator.openshift.io][apigroup:config.openshift.io][apigroup:build.openshift.io]": "", - "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by number of Pods rate should scale down no more than given number of Pods per minute": " [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][python][Slow] hot deploy for openshift python image Django example should work with hot deploy [apigroup:image.openshift.io][apigroup:operator.openshift.io][apigroup:config.openshift.io][apigroup:build.openshift.io]": "", - "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by number of Pods rate should scale up no more than given number of Pods per minute": " [Suite:k8s]", + "[sig-devex][Feature:ImageEcosystem][ruby][Slow] hot deploy for openshift ruby image Rails example should work with hot deploy [apigroup:image.openshift.io][apigroup:operator.openshift.io][apigroup:config.openshift.io][apigroup:build.openshift.io]": "", - "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by percentage should scale down no more than given percentage of current Pods per minute": " [Suite:k8s]", + "[sig-devex][Feature:OpenShiftControllerManager] TestAutomaticCreationOfPullSecrets [apigroup:config.openshift.io][apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", - "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with scale limited by percentage should scale up no more than given percentage of current Pods per minute": " [Suite:k8s]", + "[sig-devex][Feature:OpenShiftControllerManager] TestDockercfgTokenDeletedController [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", - "[sig-autoscaling] [Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior) with short downscale stabilization window should scale down soon after the stabilization period": " [Suite:k8s]", + "[sig-devex][Feature:Templates] template-api TestTemplate [apigroup:template.openshift.io]": " [Suite:openshift/conformance/parallel]", - "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with Custom Metric of type Object from Stackdriver should scale down to 0": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-devex][Feature:Templates] template-api TestTemplateTransformationFromConfig [apigroup:template.openshift.io]": " [Suite:openshift/conformance/parallel]", - "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with Custom Metric of type Object from Stackdriver should scale down": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-devex][Feature:Templates] templateinstance creation with invalid object reports error should report a failure on creation [apigroup:template.openshift.io]": " [Suite:openshift/conformance/parallel]", - "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with Custom Metric of type Pod from Stackdriver should scale down with Prometheus": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-devex][Feature:Templates] templateinstance cross-namespace test should create and delete objects across namespaces [apigroup:user.openshift.io][apigroup:template.openshift.io]": " [Suite:openshift/conformance/parallel]", - "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with Custom Metric of type Pod from Stackdriver should scale down": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-devex][Feature:Templates] templateinstance impersonation tests [apigroup:user.openshift.io][apigroup:authorization.openshift.io] should pass impersonation creation tests [apigroup:template.openshift.io]": " [Suite:openshift/conformance/parallel]", - "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with Custom Metric of type Pod from Stackdriver should scale up with two metrics": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-devex][Feature:Templates] templateinstance impersonation tests [apigroup:user.openshift.io][apigroup:authorization.openshift.io] should pass impersonation deletion tests [apigroup:template.openshift.io]": " [Suite:openshift/conformance/parallel]", - "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with External Metric from Stackdriver should scale down with target average value": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-devex][Feature:Templates] templateinstance impersonation tests [apigroup:user.openshift.io][apigroup:authorization.openshift.io] should pass impersonation update tests [apigroup:template.openshift.io]": " [Suite:openshift/conformance/parallel]", - "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with External Metric from Stackdriver should scale down with target value": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-devex][Feature:Templates] templateinstance object kinds test should create and delete objects from varying API groups [apigroup:template.openshift.io][apigroup:route.openshift.io]": " [Suite:openshift/conformance/parallel]", - "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with External Metric from Stackdriver should scale up with two metrics": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-devex][Feature:Templates] templateinstance readiness test should report failed soon after an annotated objects has failed [apigroup:template.openshift.io][apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with multiple metrics of different types should not scale down when one metric is missing (Container Resource and External Metrics)": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-devex][Feature:Templates] templateinstance readiness test should report ready soon after all annotated objects are ready [apigroup:template.openshift.io][apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with multiple metrics of different types should not scale down when one metric is missing (Pod and Object Metrics)": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-devex][Feature:Templates] templateinstance security tests [apigroup:authorization.openshift.io][apigroup:template.openshift.io] should pass security tests [apigroup:route.openshift.io]": " [Suite:openshift/conformance/parallel]", - "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with multiple metrics of different types should scale up when one metric is missing (Pod and External metrics)": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-etcd] etcd cluster has the same number of master nodes and voting members from the endpoints configmap [Early][apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", - "[sig-autoscaling] [HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver) with multiple metrics of different types should scale up when one metric is missing (Resource and Object metrics)": " [Skipped:gce] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-etcd] etcd leader changes are not excessive [Late]": " [Suite:openshift/conformance/parallel]", - "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 should support forwarding over websockets": " [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-etcd] etcd record the start revision of the etcd-operator [Early]": " [Suite:openshift/conformance/parallel]", - "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 that expects NO client request should support a client that connects, sends DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-etcd][Feature:DisasterRecovery][Disruptive] [Feature:EtcdRecovery] Cluster should recover from a backup taken on one node and recovered on another [apigroup:operator.openshift.io]": " [Serial]", - "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 that expects a client request should support a client that connects, sends DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-etcd][Feature:DisasterRecovery][Disruptive] [Feature:EtcdRecovery] Cluster should restore itself after quorum loss [apigroup:machine.openshift.io][apigroup:operator.openshift.io]": " [Serial]", - "[sig-cli] Kubectl Port forwarding With a server listening on 0.0.0.0 that expects a client request should support a client that connects, sends NO DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-etcd][Feature:EtcdVerticalScaling][Suite:openshift/etcd/scaling] etcd is able to vertically scale up and down with a single node [Timeout:60m][apigroup:machine.openshift.io]": "", - "[sig-cli] Kubectl Port forwarding With a server listening on localhost should support forwarding over websockets": " [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-imageregistry] Image registry [apigroup:route.openshift.io] should redirect on blob pull [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", - "[sig-cli] Kubectl Port forwarding With a server listening on localhost that expects NO client request should support a client that connects, sends DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-imageregistry][Feature:ImageAppend] Image append should create images by appending them [apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - "[sig-cli] Kubectl Port forwarding With a server listening on localhost that expects a client request should support a client that connects, sends DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-imageregistry][Feature:ImageExtract] Image extract should extract content from an image [apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - "[sig-cli] Kubectl Port forwarding With a server listening on localhost that expects a client request should support a client that connects, sends NO DATA, and disconnects": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-imageregistry][Feature:ImageInfo] Image info should display information about images [apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - "[sig-cli] Kubectl client Guestbook application should create and stop a working application [Conformance]": " [Slow] [Suite:k8s]", + "[sig-imageregistry][Feature:ImageLayers] Image layer subresource should identify a deleted image as missing [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", - "[sig-cli] Kubectl client Kubectl api-versions should check if v1 is in available api versions [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "[sig-imageregistry][Feature:ImageLayers] Image layer subresource should return layers from tagged images [apigroup:image.openshift.io][apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - "[sig-cli] Kubectl client Kubectl apply apply set/view last-applied": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-imageregistry][Feature:ImageLookup] Image policy should perform lookup when the Deployment gets the resolve-names annotation later [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", - "[sig-cli] Kubectl client Kubectl apply should apply a new configuration to an existing RC": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-imageregistry][Feature:ImageLookup] Image policy should perform lookup when the object has the resolve-names annotation [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", - "[sig-cli] Kubectl client Kubectl apply should reuse port when apply to an existing SVC": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-imageregistry][Feature:ImageLookup] Image policy should update OpenShift object image fields when local names are on [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", - "[sig-cli] Kubectl client Kubectl cluster-info dump should check if cluster-info dump succeeds": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-imageregistry][Feature:ImageLookup] Image policy should update standard Kube object image fields when local names are on [apigroup:image.openshift.io][apigroup:apps.openshift.io]": " [Suite:openshift/conformance/parallel]", - "[sig-cli] Kubectl client Kubectl cluster-info should check if Kubernetes control plane services is included in cluster-info [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "[sig-imageregistry][Feature:ImageMirror][Slow] Image mirror mirror image from integrated registry into few external registries [apigroup:image.openshift.io][apigroup:build.openshift.io]": "", - "[sig-cli] Kubectl client Kubectl copy should copy a file from a running Pod": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-imageregistry][Feature:ImageMirror][Slow] Image mirror mirror image from integrated registry to external registry [apigroup:image.openshift.io][apigroup:build.openshift.io]": "", - "[sig-cli] Kubectl client Kubectl create quota should create a quota with scopes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-imageregistry][Feature:ImagePrune][Serial][Suite:openshift/registry/serial][Local] Image hard prune [apigroup:apps.openshift.io][apigroup:user.openshift.io] should delete orphaned blobs [apigroup:image.openshift.io]": "", - "[sig-cli] Kubectl client Kubectl create quota should create a quota without scopes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-imageregistry][Feature:ImagePrune][Serial][Suite:openshift/registry/serial][Local] Image hard prune [apigroup:apps.openshift.io][apigroup:user.openshift.io] should show orphaned blob deletions in dry-run mode [apigroup:image.openshift.io]": "", - "[sig-cli] Kubectl client Kubectl create quota should reject quota with invalid scopes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-imageregistry][Feature:ImagePrune][Serial][Suite:openshift/registry/serial][Local] Image prune [apigroup:user.openshift.io] of schema 1 should prune old image [apigroup:build.openshift.io][apigroup:image.openshift.io]": "", - "[sig-cli] Kubectl client Kubectl describe should check if kubectl describe prints relevant information for cronjob": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-imageregistry][Feature:ImagePrune][Serial][Suite:openshift/registry/serial][Local] Image prune [apigroup:user.openshift.io] of schema 2 should prune old image with config [apigroup:build.openshift.io][apigroup:image.openshift.io]": "", - "[sig-cli] Kubectl client Kubectl describe should check if kubectl describe prints relevant information for rc and pods [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "[sig-imageregistry][Feature:ImagePrune][Serial][Suite:openshift/registry/serial][Local] Image prune [apigroup:user.openshift.io] with --all=false flag should prune only internally managed images [apigroup:build.openshift.io][apigroup:image.openshift.io]": "", - "[sig-cli] Kubectl client Kubectl diff should check if kubectl diff finds a difference for Deployments [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "[sig-imageregistry][Feature:ImagePrune][Serial][Suite:openshift/registry/serial][Local] Image prune [apigroup:user.openshift.io] with --prune-registry==false should prune old image but skip registry [apigroup:build.openshift.io][apigroup:image.openshift.io]": "", - "[sig-cli] Kubectl client Kubectl events should show event when pod is created": " [Disabled:RebaseInProgress] [Suite:k8s]", + "[sig-imageregistry][Feature:ImagePrune][Serial][Suite:openshift/registry/serial][Local] Image prune [apigroup:user.openshift.io] with default --all flag should prune both internally managed and external images [apigroup:build.openshift.io][apigroup:image.openshift.io]": "", - "[sig-cli] Kubectl client Kubectl expose should create services for rc [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "[sig-imageregistry][Feature:ImageQuota] Image resource quota should deny a push of built image exceeding openshift.io/imagestreams quota [apigroup:image.openshift.io]": " [Disabled:SpecialConfig]", - "[sig-cli] Kubectl client Kubectl get componentstatuses should get componentstatuses": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-imageregistry][Feature:ImageQuota][Serial][Suite:openshift/registry/serial] Image limit range [apigroup:config.openshift.io][apigroup:image.openshift.io][apigroup:operator.openshift.io] should deny a container image reference exceeding limit on openshift.io/image-tags resource [apigroup:build.openshift.io]": " [Disabled:SpecialConfig]", - "[sig-cli] Kubectl client Kubectl label should update the label on a resource [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "[sig-imageregistry][Feature:ImageQuota][Serial][Suite:openshift/registry/serial] Image limit range [apigroup:config.openshift.io][apigroup:image.openshift.io][apigroup:operator.openshift.io] should deny a push of built image exceeding limit on openshift.io/images resource [apigroup:build.openshift.io]": " [Disabled:SpecialConfig]", - "[sig-cli] Kubectl client Kubectl logs should be able to retrieve and filter logs [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "[sig-imageregistry][Feature:ImageQuota][Serial][Suite:openshift/registry/serial] Image limit range [apigroup:config.openshift.io][apigroup:image.openshift.io][apigroup:operator.openshift.io] should deny a push of built image exceeding openshift.io/Image limit [apigroup:build.openshift.io]": " [Disabled:SpecialConfig]", - "[sig-cli] Kubectl client Kubectl patch should add annotations for pods in rc [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "[sig-imageregistry][Feature:ImageQuota][Serial][Suite:openshift/registry/serial] Image limit range [apigroup:config.openshift.io][apigroup:image.openshift.io][apigroup:operator.openshift.io] should deny an import of a repository exceeding limit on openshift.io/image-tags resource [apigroup:build.openshift.io]": " [Disabled:SpecialConfig]", - "[sig-cli] Kubectl client Kubectl replace should update a single-container pod's image [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "[sig-imageregistry][Feature:ImageStreamImport][Serial][Slow] ImageStream API [apigroup:config.openshift.io] TestImportImageFromBlockedRegistry [apigroup:image.openshift.io]": "", - "[sig-cli] Kubectl client Kubectl run pod should create a pod from an image when restart is Never [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "[sig-imageregistry][Feature:ImageStreamImport][Serial][Slow] ImageStream API [apigroup:config.openshift.io] TestImportImageFromInsecureRegistry [apigroup:image.openshift.io]": "", - "[sig-cli] Kubectl client Kubectl server-side dry-run should check if kubectl can dry-run update Pods [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "[sig-imageregistry][Feature:ImageStreamImport][Serial][Slow] ImageStream API [apigroup:config.openshift.io] TestImportRepositoryFromBlockedRegistry [apigroup:image.openshift.io]": "", - "[sig-cli] Kubectl client Kubectl taint [Serial] should remove all the taints with the same key off a node": " [Skipped:SingleReplicaTopology] [Suite:openshift/conformance/serial] [Suite:k8s]", + "[sig-imageregistry][Feature:ImageStreamImport][Serial][Slow] ImageStream API [apigroup:config.openshift.io] TestImportRepositoryFromInsecureRegistry [apigroup:image.openshift.io]": "", - "[sig-cli] Kubectl client Kubectl taint [Serial] should update the taint on a node": " [Suite:openshift/conformance/serial] [Suite:k8s]", + "[sig-imageregistry][Feature:ImageTriggers] Annotation trigger reconciles after the image is overwritten [apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - "[sig-cli] Kubectl client Kubectl validation should create/apply a CR with unknown fields for CRD with no validation schema": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-imageregistry][Feature:ImageTriggers] Image change build triggers TestMultipleImageChangeBuildTriggers [apigroup:image.openshift.io][apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", - "[sig-cli] Kubectl client Kubectl validation should create/apply a valid CR for CRD with validation schema": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-imageregistry][Feature:ImageTriggers] Image change build triggers TestSimpleImageChangeBuildTriggerFromImageStreamTagCustom [apigroup:image.openshift.io][apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", - "[sig-cli] Kubectl client Kubectl validation should create/apply an invalid/valid CR with arbitrary-extra properties for CRD with partially-specified validation schema": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-imageregistry][Feature:ImageTriggers] Image change build triggers TestSimpleImageChangeBuildTriggerFromImageStreamTagCustomWithConfigChange [apigroup:image.openshift.io][apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", - "[sig-cli] Kubectl client Kubectl validation should detect unknown metadata fields in both the root and embedded object of a CR": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-imageregistry][Feature:ImageTriggers] Image change build triggers TestSimpleImageChangeBuildTriggerFromImageStreamTagDocker [apigroup:image.openshift.io][apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", - "[sig-cli] Kubectl client Kubectl validation should detect unknown metadata fields of a typed object": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-imageregistry][Feature:ImageTriggers] Image change build triggers TestSimpleImageChangeBuildTriggerFromImageStreamTagDockerWithConfigChange [apigroup:image.openshift.io][apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", - "[sig-cli] Kubectl client Kubectl version should check is all data is printed [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "[sig-imageregistry][Feature:ImageTriggers] Image change build triggers TestSimpleImageChangeBuildTriggerFromImageStreamTagSTI [apigroup:image.openshift.io][apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", - "[sig-cli] Kubectl client Proxy server should support --unix-socket=/path [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "[sig-imageregistry][Feature:ImageTriggers] Image change build triggers TestSimpleImageChangeBuildTriggerFromImageStreamTagSTIWithConfigChange [apigroup:image.openshift.io][apigroup:build.openshift.io]": " [Suite:openshift/conformance/parallel]", - "[sig-cli] Kubectl client Proxy server should support proxy with --port 0 [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "[sig-imageregistry][Feature:ImageTriggers][Serial] ImageStream API TestImageStreamMappingCreate [apigroup:image.openshift.io]": " [Suite:openshift/conformance/serial]", - "[sig-cli] Kubectl client Simple pod should contain last line of the log": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-imageregistry][Feature:ImageTriggers][Serial] ImageStream API TestImageStreamTagLifecycleHook [apigroup:image.openshift.io]": " [Suite:openshift/conformance/serial]", - "[sig-cli] Kubectl client Simple pod should handle in-cluster config": " [Disabled:Broken] [Suite:k8s]", + "[sig-imageregistry][Feature:ImageTriggers][Serial] ImageStream API TestImageStreamWithoutDockerImageConfig [apigroup:image.openshift.io]": " [Suite:openshift/conformance/serial]", - "[sig-cli] Kubectl client Simple pod should return command exit codes [Slow] running a failing command with --leave-stdin-open": " [Suite:k8s]", + "[sig-imageregistry][Feature:ImageTriggers][Serial] ImageStream admission TestImageStreamAdmitSpecUpdate [apigroup:image.openshift.io]": " [Suite:openshift/conformance/serial]", - "[sig-cli] Kubectl client Simple pod should return command exit codes [Slow] running a failing command without --restart=Never": " [Suite:k8s]", + "[sig-imageregistry][Feature:ImageTriggers][Serial] ImageStream admission TestImageStreamAdmitStatusUpdate [apigroup:image.openshift.io]": " [Suite:openshift/conformance/serial]", - "[sig-cli] Kubectl client Simple pod should return command exit codes [Slow] running a failing command without --restart=Never, but with --rm": " [Suite:k8s]", + "[sig-imageregistry][Feature:ImageTriggers][Serial] ImageStream admission TestImageStreamTagsAdmission [apigroup:image.openshift.io]": " [Suite:openshift/conformance/serial]", - "[sig-cli] Kubectl client Simple pod should return command exit codes execing into a container with a failing command": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-imageregistry][Feature:Image] oc tag should change image reference for internal images [apigroup:build.openshift.io][apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - "[sig-cli] Kubectl client Simple pod should return command exit codes execing into a container with a successful command": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-imageregistry][Feature:Image] oc tag should preserve image reference for external images [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", - "[sig-cli] Kubectl client Simple pod should return command exit codes running a failing command": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-imageregistry][Feature:Image] oc tag should work when only imagestreams api is available [apigroup:image.openshift.io][apigroup:authorization.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", - "[sig-cli] Kubectl client Simple pod should return command exit codes running a successful command": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-imageregistry][Feature:Image] signature TestImageAddSignature [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", - "[sig-cli] Kubectl client Simple pod should support exec through an HTTP proxy": " [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-imageregistry][Feature:Image] signature TestImageRemoveSignature [apigroup:image.openshift.io]": " [Suite:openshift/conformance/parallel]", - "[sig-cli] Kubectl client Simple pod should support exec through kubectl proxy": " [Skipped:Proxy] [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-imageregistry][Serial] Image signature workflow can push a signed image to openshift registry and verify it [apigroup:user.openshift.io][apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/serial]", - "[sig-cli] Kubectl client Simple pod should support exec using resource/name": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-installer][Feature:baremetal] Baremetal platform should have baremetalhost resources": " [Suite:openshift/conformance/parallel]", - "[sig-cli] Kubectl client Simple pod should support exec": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-installer][Feature:baremetal] Baremetal platform should have hostfirmwaresetting resources": " [Suite:openshift/conformance/parallel]", - "[sig-cli] Kubectl client Simple pod should support inline execution and attach": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-installer][Feature:baremetal] Baremetal platform should have preprovisioning images for workers": " [Suite:openshift/conformance/parallel]", - "[sig-cli] Kubectl client Simple pod should support port-forward": " [Suite:openshift/conformance/parallel] [Suite:k8s]", + "[sig-installer][Feature:baremetal] Baremetal platform should not allow updating BootMacAddress": " [Suite:openshift/conformance/parallel]", - "[sig-cli] Kubectl client Update Demo should create and stop a replication controller [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "[sig-installer][Feature:baremetal] Baremetal/OpenStack/vSphere/None/AWS/Azure/GCP platforms have a metal3 deployment": " [Suite:openshift/conformance/parallel]", - "[sig-cli] Kubectl client Update Demo should scale a replication controller [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "[sig-installer][Feature:baremetal][Serial] Baremetal platform should ensure [apigroup:config.openshift.io] cluster baremetal operator and metal3 deployment return back healthy after they are deleted": " [Suite:openshift/conformance/serial]", - "[sig-cli] Kubectl client kubectl wait should ignore not found error with --for=delete": " [Disabled:Broken] [Suite:k8s]", + "[sig-installer][Feature:baremetal][Serial] Baremetal platform should skip inspection when disabled by annotation": " [Suite:openshift/conformance/serial]", "[sig-instrumentation] Events API should delete a collection of events [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", @@ -815,6 +1955,26 @@ var Annotations = map[string]string{ "[sig-instrumentation] MetricsGrabber should grab all metrics from a Scheduler.": " [Disabled:Broken] [Suite:k8s]", + "[sig-instrumentation] Prometheus [apigroup:image.openshift.io] when installed on the cluster should have a AlertmanagerReceiversNotConfigured alert in firing state": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-instrumentation] Prometheus [apigroup:image.openshift.io] when installed on the cluster should have important platform topology metrics [apigroup:config.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-instrumentation] Prometheus [apigroup:image.openshift.io] when installed on the cluster should have non-Pod host cAdvisor metrics": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-instrumentation] Prometheus [apigroup:image.openshift.io] when installed on the cluster should provide ingress metrics": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-instrumentation] Prometheus [apigroup:image.openshift.io] when installed on the cluster should provide named network metrics [apigroup:project.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-instrumentation] Prometheus [apigroup:image.openshift.io] when installed on the cluster should report telemetry [Late]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-instrumentation] Prometheus [apigroup:image.openshift.io] when installed on the cluster should start and expose a secured proxy and unsecured metrics [apigroup:config.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-instrumentation] Prometheus [apigroup:image.openshift.io] when installed on the cluster shouldn't have failing rules evaluation": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-instrumentation] Prometheus [apigroup:image.openshift.io] when installed on the cluster shouldn't report any alerts in firing state apart from Watchdog and AlertmanagerReceiversNotConfigured [Early][apigroup:config.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-instrumentation] Prometheus [apigroup:image.openshift.io] when installed on the cluster when using openshift-sdn should be able to get the sdn ovs flows": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + "[sig-instrumentation] Stackdriver Monitoring should have accelerator metrics [Feature:StackdriverAcceleratorMonitoring]": " [Disabled:Unimplemented] [Suite:k8s]", "[sig-instrumentation] Stackdriver Monitoring should have cluster metrics [Feature:StackdriverMonitoring]": " [Disabled:Unimplemented] [Suite:k8s]", @@ -827,6 +1987,46 @@ var Annotations = map[string]string{ "[sig-instrumentation] Stackdriver Monitoring should run Stackdriver Metadata Agent [Feature:StackdriverMetadataAgent]": " [Disabled:Unimplemented] [Suite:k8s]", + "[sig-instrumentation][Late] Alerts shouldn't exceed the 650 series limit of total series sent via telemetry from each cluster": " [Suite:openshift/conformance/parallel]", + + "[sig-instrumentation][Late] Alerts shouldn't report any unexpected alerts in firing or pending state": " [Suite:openshift/conformance/parallel]", + + "[sig-instrumentation][Late] OpenShift alerting rules [apigroup:image.openshift.io] should have a runbook_url annotation if the alert is critical": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-instrumentation][Late] OpenShift alerting rules [apigroup:image.openshift.io] should have a valid severity label": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-instrumentation][Late] OpenShift alerting rules [apigroup:image.openshift.io] should have description and summary annotations": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-instrumentation][sig-builds][Feature:Builds] Prometheus when installed on the cluster should start and expose a secured proxy and verify build metrics [apigroup:build.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-network-edge] DNS should answer A and AAAA queries for a dual-stack service [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-network-edge] DNS should answer endpoint and wildcard queries for the cluster": " [Disabled:Broken]", + + "[sig-network-edge] DNS should answer queries using the local DNS endpoint": " [Suite:openshift/conformance/parallel]", + + "[sig-network-edge][Conformance][Area:Networking][Feature:Router] The HAProxy router should be able to connect to a service that is idled because a GET on the route will unidle it": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel/minimal]", + + "[sig-network-edge][Conformance][Area:Networking][Feature:Router] The HAProxy router should pass the gRPC interoperability tests [apigroup:route.openshift.io][apigroup:operator.openshift.io]": " [Suite:openshift/conformance/parallel/minimal]", + + "[sig-network-edge][Conformance][Area:Networking][Feature:Router][apigroup:route.openshift.io] The HAProxy router should pass the h2spec conformance tests [apigroup:authorization.openshift.io][apigroup:user.openshift.io][apigroup:security.openshift.io][apigroup:operator.openshift.io]": " [Suite:openshift/conformance/parallel/minimal]", + + "[sig-network-edge][Conformance][Area:Networking][Feature:Router][apigroup:route.openshift.io][apigroup:config.openshift.io] The HAProxy router should pass the http2 tests [apigroup:image.openshift.io][apigroup:operator.openshift.io]": " [Suite:openshift/conformance/parallel/minimal]", + + "[sig-network-edge][Feature:Idling] Idling with a single service and DeploymentConfig [apigroup:route.openshift.io] should idle the service and DeploymentConfig properly [apigroup:apps.openshift.io]": " [Disabled:Broken]", + + "[sig-network-edge][Feature:Idling] Idling with a single service and ReplicationController should idle the service and ReplicationController properly": " [Suite:openshift/conformance/parallel]", + + "[sig-network-edge][Feature:Idling] Unidling [apigroup:apps.openshift.io][apigroup:route.openshift.io] should handle many TCP connections by possibly dropping those over a certain bound [Serial]": " [Suite:openshift/conformance/serial]", + + "[sig-network-edge][Feature:Idling] Unidling [apigroup:apps.openshift.io][apigroup:route.openshift.io] should handle many UDP senders (by continuing to drop all packets on the floor) [Serial]": " [Suite:openshift/conformance/serial]", + + "[sig-network-edge][Feature:Idling] Unidling [apigroup:apps.openshift.io][apigroup:route.openshift.io] should work with TCP (when fully idled)": " [Suite:openshift/conformance/parallel]", + + "[sig-network-edge][Feature:Idling] Unidling [apigroup:apps.openshift.io][apigroup:route.openshift.io] should work with TCP (while idling)": " [Disabled:Broken]", + + "[sig-network-edge][Feature:Idling] Unidling [apigroup:apps.openshift.io][apigroup:route.openshift.io] should work with UDP": " [Suite:openshift/conformance/parallel]", + "[sig-network] CVE-2021-29923 IPv4 Service Type ClusterIP with leading zeros should work interpreted as decimal": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-network] ClusterDns [Feature:Example] should create pod that uses dns": " [Disabled:Broken] [Suite:k8s]", @@ -901,6 +2101,8 @@ var Annotations = map[string]string{ "[sig-network] IngressClass [Feature:Ingress] should set default value on new IngressClass [Serial]": " [Suite:openshift/conformance/serial] [Suite:k8s]", + "[sig-network] Internal connectivity for TCP and UDP on ports 9000-9999 is allowed [Serial:Self]": " [Suite:openshift/conformance/parallel]", + "[sig-network] KubeProxy should set TCP CLOSE_WAIT timeout [Privileged]": " [Disabled:Broken] [Suite:k8s]", "[sig-network] LoadBalancers ESIPP [Slow] should handle updates to ExternalTrafficPolicy field": " [Suite:k8s]", @@ -1353,6 +2555,148 @@ var Annotations = map[string]string{ "[sig-network] [Feature:Topology Hints] should distribute endpoints evenly": " [Disabled:SpecialConfig] [Suite:k8s]", + "[sig-network] external gateway address when using openshift ovn-kubernetes should match the address family of the pod": " [Suite:openshift/conformance/parallel]", + + "[sig-network] load balancer should be managed by OpenShift": " [Suite:openshift/conformance/parallel]", + + "[sig-network] load balancer should not be managed by OpenShift": " [Suite:openshift/conformance/parallel]", + + "[sig-network] multicast when using one of the OpenshiftSDN modes 'redhat/openshift-ovs-multitenant, redhat/openshift-ovs-networkpolicy' should allow multicast traffic in namespaces where it is enabled": " [Suite:openshift/conformance/parallel]", + + "[sig-network] multicast when using one of the OpenshiftSDN modes 'redhat/openshift-ovs-multitenant, redhat/openshift-ovs-networkpolicy' should block multicast traffic in namespaces where it is disabled": " [Suite:openshift/conformance/parallel]", + + "[sig-network] multicast when using one of the OpenshiftSDN modes 'redhat/openshift-ovs-subnet' should block multicast traffic": " [Suite:openshift/conformance/parallel]", + + "[sig-network] network isolation when using a plugin in a mode that does not isolate namespaces by default should allow communication between pods in different namespaces on different nodes": " [Suite:openshift/conformance/parallel]", + + "[sig-network] network isolation when using a plugin in a mode that does not isolate namespaces by default should allow communication between pods in different namespaces on the same node": " [Suite:openshift/conformance/parallel]", + + "[sig-network] network isolation when using a plugin in a mode that isolates namespaces by default should allow communication from default to non-default namespace on a different node": " [Suite:openshift/conformance/parallel]", + + "[sig-network] network isolation when using a plugin in a mode that isolates namespaces by default should allow communication from default to non-default namespace on the same node": " [Suite:openshift/conformance/parallel]", + + "[sig-network] network isolation when using a plugin in a mode that isolates namespaces by default should allow communication from non-default to default namespace on a different node": " [Suite:openshift/conformance/parallel]", + + "[sig-network] network isolation when using a plugin in a mode that isolates namespaces by default should allow communication from non-default to default namespace on the same node": " [Suite:openshift/conformance/parallel]", + + "[sig-network] network isolation when using a plugin in a mode that isolates namespaces by default should prevent communication between pods in different namespaces on different nodes": " [Suite:openshift/conformance/parallel]", + + "[sig-network] network isolation when using a plugin in a mode that isolates namespaces by default should prevent communication between pods in different namespaces on the same node": " [Suite:openshift/conformance/parallel]", + + "[sig-network] services basic functionality should allow connections to another pod on a different node via a service IP": " [Suite:openshift/conformance/parallel]", + + "[sig-network] services basic functionality should allow connections to another pod on the same node via a service IP": " [Suite:openshift/conformance/parallel]", + + "[sig-network] services when running openshift ipv4 cluster ensures external ip policy is configured correctly on the cluster [apigroup:config.openshift.io] [Serial]": " [Suite:openshift/conformance/serial]", + + "[sig-network] services when running openshift ipv4 cluster on bare metal [apigroup:config.openshift.io] ensures external auto assign cidr is configured correctly on the cluster [apigroup:config.openshift.io] [Serial]": " [Suite:openshift/conformance/serial]", + + "[sig-network] services when using a plugin in a mode that does not isolate namespaces by default should allow connections to pods in different namespaces on different nodes via service IPs": " [Suite:openshift/conformance/parallel]", + + "[sig-network] services when using a plugin in a mode that does not isolate namespaces by default should allow connections to pods in different namespaces on the same node via service IPs": " [Suite:openshift/conformance/parallel]", + + "[sig-network] services when using a plugin in a mode that isolates namespaces by default should allow connections from pods in the default namespace to a service in another namespace on a different node": " [Suite:openshift/conformance/parallel]", + + "[sig-network] services when using a plugin in a mode that isolates namespaces by default should allow connections from pods in the default namespace to a service in another namespace on the same node": " [Suite:openshift/conformance/parallel]", + + "[sig-network] services when using a plugin in a mode that isolates namespaces by default should allow connections to services in the default namespace from a pod in another namespace on a different node": " [Suite:openshift/conformance/parallel]", + + "[sig-network] services when using a plugin in a mode that isolates namespaces by default should allow connections to services in the default namespace from a pod in another namespace on the same node": " [Suite:openshift/conformance/parallel]", + + "[sig-network] services when using a plugin in a mode that isolates namespaces by default should prevent connections to pods in different namespaces on different nodes via service IPs": " [Suite:openshift/conformance/parallel]", + + "[sig-network] services when using a plugin in a mode that isolates namespaces by default should prevent connections to pods in different namespaces on the same node via service IPs": " [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:EgressFirewall] egressFirewall should have no impact outside its namespace": " [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:EgressFirewall] when using openshift ovn-kubernetes should ensure egressfirewall is created": " [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:EgressFirewall] when using openshift-sdn should ensure egressnetworkpolicy is created [apigroup:network.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:EgressIP][apigroup:operator.openshift.io] [external-targets][apigroup:user.openshift.io][apigroup:security.openshift.io] EgressIPs can be assigned automatically [Skipped:Network/OVNKubernetes]": " [Serial] [Suite:openshift/conformance/serial]", + + "[sig-network][Feature:EgressIP][apigroup:operator.openshift.io] [external-targets][apigroup:user.openshift.io][apigroup:security.openshift.io] only pods matched by the pod selector should have the EgressIPs [Skipped:Network/OpenShiftSDN]": " [Serial] [Suite:openshift/conformance/serial]", + + "[sig-network][Feature:EgressIP][apigroup:operator.openshift.io] [external-targets][apigroup:user.openshift.io][apigroup:security.openshift.io] pods should have the assigned EgressIPs and EgressIPs can be deleted and recreated [Skipped:azure][apigroup:route.openshift.io]": " [Serial] [Suite:openshift/conformance/serial]", + + "[sig-network][Feature:EgressIP][apigroup:operator.openshift.io] [external-targets][apigroup:user.openshift.io][apigroup:security.openshift.io] pods should have the assigned EgressIPs and EgressIPs can be updated [Skipped:Network/OpenShiftSDN]": " [Serial] [Suite:openshift/conformance/serial]", + + "[sig-network][Feature:EgressIP][apigroup:operator.openshift.io] [external-targets][apigroup:user.openshift.io][apigroup:security.openshift.io] pods should keep the assigned EgressIPs when being rescheduled to another node": " [Serial] [Suite:openshift/conformance/serial]", + + "[sig-network][Feature:EgressIP][apigroup:operator.openshift.io] [internal-targets] EgressIP pods should query hostNetwork pods with the local node's SNAT": " [Serial] [Suite:openshift/conformance/serial]", + + "[sig-network][Feature:EgressRouterCNI] should ensure ipv4 egressrouter cni resources are created [apigroup:operator.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:EgressRouterCNI] when using openshift ovn-kubernetes should ensure ipv6 egressrouter cni resources are created [apigroup:operator.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:Multus] should use multus to create net1 device from network-attachment-definition [apigroup:k8s.cni.cncf.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:Network Policy Audit logging] when using openshift ovn-kubernetes should ensure acl logs are created and correct [apigroup:project.openshift.io][apigroup:network.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:Router] The HAProxy router should enable openshift-monitoring to pull metrics": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:Router] The HAProxy router should expose a health check on the metrics port": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:Router] The HAProxy router should expose prometheus metrics for a route [apigroup:route.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:Router] The HAProxy router should expose the profiling endpoints": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:Router][apigroup:image.openshift.io] The HAProxy router should serve a route that points to two services and respect weights": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:Router][apigroup:operator.openshift.io] The HAProxy router should respond with 503 to unrecognized hosts": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:Router][apigroup:operator.openshift.io] The HAProxy router should serve routes that were created from an ingress [apigroup:route.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:Router][apigroup:operator.openshift.io] The HAProxy router should set Forwarded headers appropriately": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:Router][apigroup:route.openshift.io] The HAProxy router converges when multiple routers are writing conflicting status": " [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:Router][apigroup:route.openshift.io] The HAProxy router converges when multiple routers are writing status": " [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:Router][apigroup:route.openshift.io] The HAProxy router reports the expected host names in admitted routes' statuses": " [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:Router][apigroup:route.openshift.io] The HAProxy router should override the route host for overridden domains with a custom value [apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:Router][apigroup:route.openshift.io] The HAProxy router should override the route host with a custom value": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:Router][apigroup:route.openshift.io] The HAProxy router should run even if it has no access to update status [apigroup:image.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:Router][apigroup:route.openshift.io] The HAProxy router should serve the correct routes when running with the haproxy config manager": " [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:Router][apigroup:route.openshift.io] The HAProxy router should serve the correct routes when scoped to a single namespace and label set": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:Router][apigroup:route.openshift.io] when FIPS is disabled the HAProxy router should serve routes when configured with a 1024-bit RSA key": " [Feature:Networking-IPv4] [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:Router][apigroup:route.openshift.io] when FIPS is enabled the HAProxy router should not work when configured with a 1024-bit RSA key": " [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:Router][apigroup:route.openshift.io][apigroup:operator.openshift.io] The HAProxy router should support reencrypt to services backed by a serving certificate automatically": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:Whereabouts] should assign unique IP addresses to each pod in the event of a race condition case [apigroup:k8s.cni.cncf.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:Whereabouts] should use whereabouts net-attach-def to limit IP ranges for newly created pods [apigroup:k8s.cni.cncf.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:bond] should create a pod with bond interface [apigroup:k8s.cni.cncf.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:tap] should create a pod with a tap interface [apigroup:k8s.cni.cncf.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:tuning] pod should not start for sysctls not on whitelist [apigroup:k8s.cni.cncf.io] net.ipv4.conf.IFNAME.arp_filter": " [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:tuning] pod should not start for sysctls not on whitelist [apigroup:k8s.cni.cncf.io] net.ipv4.conf.all.send_redirects": " [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:tuning] pod should start with all sysctl on whitelist [apigroup:k8s.cni.cncf.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:tuning] pod sysctl should not affect existing pods [apigroup:k8s.cni.cncf.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:tuning] pod sysctl should not affect newly created pods [apigroup:k8s.cni.cncf.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:tuning] pod sysctls should not affect node [apigroup:k8s.cni.cncf.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-network][Feature:tuning] sysctl allowlist update should start a pod with custom sysctl only when the sysctl is added to whitelist": " [Suite:openshift/conformance/parallel]", + + "[sig-network][endpoints] admission [apigroup:config.openshift.io] blocks manual creation of EndpointSlices pointing to the cluster or service network": " [Suite:openshift/conformance/parallel]", + + "[sig-network][endpoints] admission [apigroup:config.openshift.io] blocks manual creation of Endpoints pointing to the cluster or service network": " [Suite:openshift/conformance/parallel]", + "[sig-node] AppArmor load AppArmor profiles can disable an AppArmor profile, using unconfined": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-node] AppArmor load AppArmor profiles should enforce an AppArmor profile": " [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -1455,6 +2799,10 @@ var Annotations = map[string]string{ "[sig-node] Lease lease API should be available [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", + "[sig-node] Managed cluster record the number of nodes at the beginning of the tests [Early]": " [Suite:openshift/conformance/parallel]", + + "[sig-node] Managed cluster should report ready nodes the entire duration of the test run [Late][apigroup:monitoring.coreos.com]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + "[sig-node] Mount propagation should propagate mounts within defined scopes": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-node] NoExecuteTaintManager Multiple Pods [Serial] evicts pods with minTolerationSeconds [Disruptive] [Conformance]": " [Skipped:SingleReplicaTopology] [Suite:k8s]", @@ -1713,6 +3061,40 @@ var Annotations = map[string]string{ "[sig-node] kubelet kubectl node-logs [Feature:add node log viewer] should return the logs for the requested service": " [Disabled:Alpha] [Suite:k8s]", + "[sig-node] should override timeoutGracePeriodSeconds when annotation is set": " [Suite:openshift/conformance/parallel]", + + "[sig-node] supplemental groups Ensure supplemental groups propagate to docker should propagate requested groups to the container [apigroup:security.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-node][Late] should not have pod creation failures due to systemd timeouts": " [Suite:openshift/conformance/parallel]", + + "[sig-operator] OLM should Implement packages API server and list packagemanifest info with namespace not NULL [apigroup:packages.operators.coreos.com]": " [Suite:openshift/conformance/parallel]", + + "[sig-operator] OLM should be installed with catalogsources at version v1alpha1 [apigroup:operators.coreos.com]": " [Suite:openshift/conformance/parallel]", + + "[sig-operator] OLM should be installed with clusterserviceversions at version v1alpha1 [apigroup:operators.coreos.com]": " [Suite:openshift/conformance/parallel]", + + "[sig-operator] OLM should be installed with installplans at version v1alpha1 [apigroup:operators.coreos.com]": " [Suite:openshift/conformance/parallel]", + + "[sig-operator] OLM should be installed with operatorgroups at version v1 [apigroup:operators.coreos.com]": " [Suite:openshift/conformance/parallel]", + + "[sig-operator] OLM should be installed with packagemanifests at version v1 [apigroup:packages.operators.coreos.com]": " [Suite:openshift/conformance/parallel]", + + "[sig-operator] OLM should be installed with subscriptions at version v1alpha1 [apigroup:operators.coreos.com]": " [Suite:openshift/conformance/parallel]", + + "[sig-operator] OLM should have imagePullPolicy:IfNotPresent on thier deployments": " [Suite:openshift/conformance/parallel]", + + "[sig-operator] an end user can use OLM Report Upgradeable in OLM ClusterOperators status [apigroup:config.openshift.io]": " [Suite:openshift/conformance/parallel]", + + "[sig-operator] an end user can use OLM can subscribe to the operator [apigroup:config.openshift.io]": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-scalability][Feature:Performance] Load cluster should populate the cluster [Slow][Serial][apigroup:template.openshift.io][apigroup:apps.openshift.io][apigroup:build.openshift.io]": "", + + "[sig-scalability][Feature:Performance][Serial][Slow] Load cluster concurrently with templates": "", + + "[sig-scalability][Feature:Performance][Serial][Slow] Mirror cluster it should read the cluster apps [apigroup:apps.openshift.io]": "", + + "[sig-scalability][Feature:Performance][Serial][Slow] Mirror cluster it should read the node info": "", + "[sig-scheduling] GPUDevicePluginAcrossRecreate [Feature:Recreate] run Nvidia GPU Device Plugin tests with a recreation": " [Disabled:SpecialConfig] [Suite:k8s]", "[sig-scheduling] LimitRange should create a LimitRange with defaults and ensure pod has those defaults applied. [Conformance]": " [Suite:openshift/conformance/parallel/minimal] [Suite:k8s]", @@ -1769,6 +3151,28 @@ var Annotations = map[string]string{ "[sig-scheduling] [Feature:GPUDevicePlugin] run Nvidia GPU Device Plugin tests": " [Disabled:SpecialConfig] [Suite:k8s]", + "[sig-scheduling][Early] The HAProxy router pods [apigroup:route.openshift.io] should be scheduled on different nodes": " [Suite:openshift/conformance/parallel]", + + "[sig-scheduling][Early] The openshift-apiserver pods [apigroup:apps.openshift.io][apigroup:authorization.openshift.io][apigroup:build.openshift.io][apigroup:image.openshift.io][apigroup:project.openshift.io][apigroup:quota.openshift.io][apigroup:route.openshift.io][apigroup:security.openshift.io][apigroup:template.openshift.io] should be scheduled on different nodes": " [Suite:openshift/conformance/parallel]", + + "[sig-scheduling][Early] The openshift-authentication pods [apigroup:oauth.openshift.io] should be scheduled on different nodes": " [Suite:openshift/conformance/parallel]", + + "[sig-scheduling][Early] The openshift-console console pods [apigroup:console.openshift.io] should be scheduled on different nodes": " [Suite:openshift/conformance/parallel]", + + "[sig-scheduling][Early] The openshift-console downloads pods [apigroup:console.openshift.io] should be scheduled on different nodes": " [Suite:openshift/conformance/parallel]", + + "[sig-scheduling][Early] The openshift-etcd pods [apigroup:operator.openshift.io] should be scheduled on different nodes": " [Suite:openshift/conformance/parallel]", + + "[sig-scheduling][Early] The openshift-image-registry pods [apigroup:imageregistry.operator.openshift.io] should be scheduled on different nodes": " [Suite:openshift/conformance/parallel]", + + "[sig-scheduling][Early] The openshift-monitoring prometheus-adapter pods [apigroup:monitoring.coreos.com] should be scheduled on different nodes": " [Suite:openshift/conformance/parallel]", + + "[sig-scheduling][Early] The openshift-monitoring thanos-querier pods [apigroup:monitoring.coreos.com] should be scheduled on different nodes": " [Suite:openshift/conformance/parallel]", + + "[sig-scheduling][Early] The openshift-oauth-apiserver pods [apigroup:oauth.openshift.io][apigroup:user.openshift.io] should be scheduled on different nodes": " [Suite:openshift/conformance/parallel]", + + "[sig-scheduling][Early] The openshift-operator-lifecycle-manager pods [apigroup:packages.operators.coreos.com] should be scheduled on different nodes": " [Suite:openshift/conformance/parallel]", + "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read-only inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: CSI Ephemeral-volume (default fs)] ephemeral should create read/write inline ephemeral volume": " [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -14441,6 +15845,8 @@ var Annotations = map[string]string{ "[sig-storage] In-tree Volumes [Driver: windows-gcepd] [Testpattern: Pre-provisioned PV (xfs)][Slow] volumes should store data": " [Suite:k8s]", + "[sig-storage] Managed cluster should have no crashlooping recycler pods over four minutes": " [Suite:openshift/conformance/parallel]", + "[sig-storage] Mounted volume expand [Feature:StorageProvider] Should verify mounted devices can be resized": " [Flaky] [Skipped:NoOptionalCapabilities] [Suite:k8s]", "[sig-storage] Multi-AZ Cluster Volumes should schedule pods in the same zones as statically provisioned PVs": " [Suite:openshift/conformance/parallel] [Suite:k8s]", @@ -14978,6 +16384,32 @@ var Annotations = map[string]string{ "[sig-storage] vsphere cloud provider stress [Feature:vsphere] vsphere stress tests": " [Disabled:Unsupported] [Suite:k8s]", "[sig-storage] vsphere statefulset [Feature:vsphere] vsphere statefulset testing": " [Disabled:Unsupported] [Suite:k8s]", + + "[sig-storage][Feature:CSIInlineVolumeAdmission][Serial] baseline namespace should allow pods with inline volumes when the driver uses the baseline label": " [Suite:openshift/conformance/serial]", + + "[sig-storage][Feature:CSIInlineVolumeAdmission][Serial] baseline namespace should allow pods with inline volumes when the driver uses the restricted label": " [Suite:openshift/conformance/serial]", + + "[sig-storage][Feature:CSIInlineVolumeAdmission][Serial] baseline namespace should deny pods with inline volumes when the driver uses the privileged label": " [Suite:openshift/conformance/serial]", + + "[sig-storage][Feature:CSIInlineVolumeAdmission][Serial] privileged namespace should allow pods with inline volumes when the driver uses the privileged label": " [Suite:openshift/conformance/serial]", + + "[sig-storage][Feature:CSIInlineVolumeAdmission][Serial] privileged namespace should allow pods with inline volumes when the driver uses the restricted label": " [Suite:openshift/conformance/serial]", + + "[sig-storage][Feature:CSIInlineVolumeAdmission][Serial] restricted namespace should allow pods with inline volumes when the driver uses the restricted label": " [Suite:openshift/conformance/serial]", + + "[sig-storage][Feature:CSIInlineVolumeAdmission][Serial] restricted namespace should deny pods with inline volumes when the driver uses the baseline label": " [Suite:openshift/conformance/serial]", + + "[sig-storage][Feature:CSIInlineVolumeAdmission][Serial] restricted namespace should deny pods with inline volumes when the driver uses the privileged label": " [Suite:openshift/conformance/serial]", + + "[sig-storage][Feature:DisableStorageClass][Serial] should not reconcile the StorageClass when StorageClassState is Unmanaged": " [Suite:openshift/conformance/serial]", + + "[sig-storage][Feature:DisableStorageClass][Serial] should reconcile the StorageClass when StorageClassState is Managed": " [Suite:openshift/conformance/serial]", + + "[sig-storage][Feature:DisableStorageClass][Serial] should remove the StorageClass when StorageClassState is Removed": " [Suite:openshift/conformance/serial]", + + "[sig-storage][Late] Metrics should report short attach times": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", + + "[sig-storage][Late] Metrics should report short mount times": " [Skipped:Disconnected] [Suite:openshift/conformance/parallel]", } func init() { diff --git a/test/extended/util/configv1shim.go b/test/extended/util/configv1shim.go index f71a5d8e90a4..d07111985332 100644 --- a/test/extended/util/configv1shim.go +++ b/test/extended/util/configv1shim.go @@ -678,7 +678,7 @@ func (f *FakeWatcher) Action(action watch.EventType, obj runtime.Object) { defer f.Unlock() if !f.Stopped { select { - case f.result <- watch.Event{action, obj}: + case f.result <- watch.Event{Type: action, Object: obj}: return default: panic(fmt.Errorf("channel full")) From 37fafc87bbec9968ebd0fb460a2d7cdacb9704bb Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Mon, 6 Mar 2023 19:32:45 +0100 Subject: [PATCH 06/15] Address comments Add support for field selectors for List and Watch requests Fix naming --- test/extended/util/configv1shim.go | 161 +++++++--- test/extended/util/configv1shim_test.go | 379 +++++++++++++++++++++--- test/extended/util/framework.go | 2 +- 3 files changed, 461 insertions(+), 81 deletions(-) diff --git a/test/extended/util/configv1shim.go b/test/extended/util/configv1shim.go index d07111985332..db7ea857601e 100644 --- a/test/extended/util/configv1shim.go +++ b/test/extended/util/configv1shim.go @@ -17,6 +17,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" types "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" @@ -122,7 +123,7 @@ func (c *ConfigV1ClientShim) Images() configv1.ImageInterface { } func (c *ConfigV1ClientShim) ImageContentPolicies() configv1.ImageContentPolicyInterface { - if c.v1Kinds["ImageContentPolicie"] { + if c.v1Kinds["ImageContentPolicy"] { panic(fmt.Errorf("ImageContentPolicie not implemented")) } return c.configv1.ImageContentPolicies() @@ -150,7 +151,7 @@ func (c *ConfigV1ClientShim) Infrastructures() configv1.InfrastructureInterface } func (c *ConfigV1ClientShim) Ingresses() configv1.IngressInterface { - if c.v1Kinds["Ingresse"] { + if c.v1Kinds["Ingress"] { panic(fmt.Errorf("Ingresse not implemented")) } return c.configv1.Ingresses() @@ -192,7 +193,7 @@ func (c *ConfigV1ClientShim) Projects() configv1.ProjectInterface { } func (c *ConfigV1ClientShim) Proxies() configv1.ProxyInterface { - if c.v1Kinds["Proxie"] { + if c.v1Kinds["Proxy"] { panic(fmt.Errorf("Proxie not implemented")) } return c.configv1.Proxies() @@ -286,23 +287,49 @@ func (c *ConfigV1InfrastructuresClientShim) Get(ctx context.Context, name string } func (c *ConfigV1InfrastructuresClientShim) List(ctx context.Context, opts metav1.ListOptions) (*apiconfigv1.InfrastructureList, error) { - // INFO: field selectors will not work - staticObjList, err := c.fakeConfigV1InfrastructuresClient.List(ctx, opts) - if err != nil { - return nil, err + items := []apiconfigv1.Infrastructure{} + knownKeys := make(map[string]struct{}) + + if len(opts.FieldSelector) > 0 { + sel, err := fields.ParseSelector(opts.FieldSelector) + if err != nil { + return nil, fmt.Errorf("unable to parse field selector") + } + // list all objects + staticObjList, err := c.fakeConfigV1InfrastructuresClient.List(ctx, metav1.ListOptions{LabelSelector: opts.LabelSelector}) + if err != nil { + return nil, err + } + for _, item := range staticObjList.Items { + // existing item is still a known item even though it does not match + // the field selector. It still needs to be excluded from the real objects. + knownKeys[item.Name] = struct{}{} + + // Based on https://github.com/openshift/origin/pull/27714 and + // https://github.com/kubernetes/kubernetes/blob/f14cc7fdfcfcedafc7910f043ec6eb74930cfee7/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion/converter.go#L128-L138 + // only metadata.Name and metadata.Namespaces are supported for CRDs + // Infrastructure is cluster scoped -> no metadata.namespace + if !sel.Matches(fields.Set(map[string]string{"metadata.name": item.Name})) { + continue + } + items = append(items, item) + } + } else { + staticObjList, err := c.fakeConfigV1InfrastructuresClient.List(ctx, opts) + if err != nil { + return nil, err + } + for _, item := range staticObjList.Items { + items = append(items, item) + knownKeys[item.Name] = struct{}{} + } } + objList, err := c.configV1InfrastructuresClient.List(ctx, opts) if err != nil { return nil, err } - items := []apiconfigv1.Infrastructure{} - knownKeys := make(map[string]struct{}) - for _, item := range staticObjList.Items { - items = append(items, item) - knownKeys[item.Name] = struct{}{} - } - for _, item := range objList.Items { // skip objects with corresponding static manifests if _, exists := knownKeys[item.Name]; exists { @@ -338,13 +365,30 @@ func (c *ConfigV1InfrastructuresClientShim) Watch(ctx context.Context, opts meta return resourceWatcher, nil } + // Only reduces the set of objects generating the ADD event + // The Follow will still get the full list of ignored objects + watchSel := fields.Everything() + if len(opts.FieldSelector) > 0 { + sel, err := fields.ParseSelector(opts.FieldSelector) + if err != nil { + return nil, fmt.Errorf("unable to parse field selector") + } + watchSel = sel + } + objs := []runtime.Object{} watcher := NewFakeWatcher() // Produce ADDED watch event types for the static manifests for _, item := range staticObjList.Items { // make shallow copy obj := item - watcher.Action(watch.Added, &obj) + // Based on https://github.com/openshift/origin/pull/27714 and + // https://github.com/kubernetes/kubernetes/blob/f14cc7fdfcfcedafc7910f043ec6eb74930cfee7/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion/converter.go#L128-L138 + // only metadata.Name and metadata.Namespaces are supported for CRDs + // Infrastructure is cluster scoped -> no metadata.namespace + if watchSel.Matches(fields.Set(map[string]string{"metadata.name": item.Name})) { + watcher.Action(watch.Added, &obj) + } objs = append(objs, &obj) } @@ -475,23 +519,49 @@ func (c *ConfigV1NetworksClientShim) Get(ctx context.Context, name string, opts } func (c *ConfigV1NetworksClientShim) List(ctx context.Context, opts metav1.ListOptions) (*apiconfigv1.NetworkList, error) { - // INFO: field selectors will not work - staticObjList, err := c.fakeConfigV1NetworksClient.List(ctx, opts) - if err != nil { - return nil, err + items := []apiconfigv1.Network{} + knownKeys := make(map[string]struct{}) + + if len(opts.FieldSelector) > 0 { + sel, err := fields.ParseSelector(opts.FieldSelector) + if err != nil { + return nil, fmt.Errorf("unable to parse field selector") + } + // list all objects + staticObjList, err := c.fakeConfigV1NetworksClient.List(ctx, metav1.ListOptions{LabelSelector: opts.LabelSelector}) + if err != nil { + return nil, err + } + for _, item := range staticObjList.Items { + // existing item is still a known item even though it does not match + // the field selector. It still needs to be excluded from the real objects. + knownKeys[item.Name] = struct{}{} + + // Based on https://github.com/openshift/origin/pull/27714 and + // https://github.com/kubernetes/kubernetes/blob/f14cc7fdfcfcedafc7910f043ec6eb74930cfee7/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion/converter.go#L128-L138 + // only metadata.Name and metadata.Namespaces are supported for CRDs + // Network is cluster scoped -> no metadata.namespace + if !sel.Matches(fields.Set(map[string]string{"metadata.name": item.Name})) { + continue + } + items = append(items, item) + } + } else { + staticObjList, err := c.fakeConfigV1NetworksClient.List(ctx, opts) + if err != nil { + return nil, err + } + for _, item := range staticObjList.Items { + items = append(items, item) + knownKeys[item.Name] = struct{}{} + } } + objList, err := c.configV1NetworksClient.List(ctx, opts) if err != nil { return nil, err } - items := []apiconfigv1.Network{} - knownKeys := make(map[string]struct{}) - for _, item := range staticObjList.Items { - items = append(items, item) - knownKeys[item.Name] = struct{}{} - } - for _, item := range objList.Items { // skip objects with corresponding static manifests if _, exists := knownKeys[item.Name]; exists { @@ -527,13 +597,30 @@ func (c *ConfigV1NetworksClientShim) Watch(ctx context.Context, opts metav1.List return resourceWatcher, nil } + // Only reduces the set of objects generating the ADD event + // The Follow will still get the full list of ignored objects + watchSel := fields.Everything() + if len(opts.FieldSelector) > 0 { + sel, err := fields.ParseSelector(opts.FieldSelector) + if err != nil { + return nil, fmt.Errorf("unable to parse field selector") + } + watchSel = sel + } + objs := []runtime.Object{} watcher := NewFakeWatcher() // Produce ADDED watch event types for the static manifests for _, item := range staticObjList.Items { // make shallow copy obj := item - watcher.Action(watch.Added, &obj) + // Based on https://github.com/openshift/origin/pull/27714 and + // https://github.com/kubernetes/kubernetes/blob/f14cc7fdfcfcedafc7910f043ec6eb74930cfee7/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion/converter.go#L128-L138 + // only metadata.Name and metadata.Namespaces are supported for CRDs + // Infrastructure is cluster scoped -> no metadata.namespace + if watchSel.Matches(fields.Set(map[string]string{"metadata.name": item.Name})) { + watcher.Action(watch.Added, &obj) + } objs = append(objs, &obj) } @@ -559,32 +646,32 @@ func (c *ConfigV1NetworksClientShim) Patch(ctx context.Context, name string, pt return nil, err } -func (c *ConfigV1NetworksClientShim) Apply(ctx context.Context, infrastructure *applyconfigv1.NetworkApplyConfiguration, opts metav1.ApplyOptions) (*apiconfigv1.Network, error) { +func (c *ConfigV1NetworksClientShim) Apply(ctx context.Context, network *applyconfigv1.NetworkApplyConfiguration, opts metav1.ApplyOptions) (*apiconfigv1.Network, error) { // Unable to determine existence of a static manifest - if infrastructure == nil || infrastructure.Name == nil { - return c.configV1NetworksClient.Apply(ctx, infrastructure, opts) + if network == nil || network.Name == nil { + return c.configV1NetworksClient.Apply(ctx, network, opts) } - _, err := c.fakeConfigV1NetworksClient.Get(ctx, *infrastructure.Name, metav1.GetOptions{}) + _, err := c.fakeConfigV1NetworksClient.Get(ctx, *network.Name, metav1.GetOptions{}) if err == nil { return nil, &OperationNotPermitted{Action: "apply"} } if apierrors.IsNotFound(err) { - return c.configV1NetworksClient.Apply(ctx, infrastructure, opts) + return c.configV1NetworksClient.Apply(ctx, network, opts) } return nil, err } -func (c *ConfigV1NetworksClientShim) ApplyStatus(ctx context.Context, infrastructure *applyconfigv1.NetworkApplyConfiguration, opts metav1.ApplyOptions) (*apiconfigv1.Network, error) { +func (c *ConfigV1NetworksClientShim) ApplyStatus(ctx context.Context, network *applyconfigv1.NetworkApplyConfiguration, opts metav1.ApplyOptions) (*apiconfigv1.Network, error) { // Unable to determine existence of a static manifest - if infrastructure == nil || infrastructure.Name == nil { - return c.configV1NetworksClient.ApplyStatus(ctx, infrastructure, opts) + if network == nil || network.Name == nil { + return c.configV1NetworksClient.ApplyStatus(ctx, network, opts) } - _, err := c.fakeConfigV1NetworksClient.Get(ctx, *infrastructure.Name, metav1.GetOptions{}) + _, err := c.fakeConfigV1NetworksClient.Get(ctx, *network.Name, metav1.GetOptions{}) if err == nil { return nil, &OperationNotPermitted{Action: "applystatus"} } if apierrors.IsNotFound(err) { - return c.configV1NetworksClient.ApplyStatus(ctx, infrastructure, opts) + return c.configV1NetworksClient.ApplyStatus(ctx, network, opts) } return nil, err } diff --git a/test/extended/util/configv1shim_test.go b/test/extended/util/configv1shim_test.go index 42e90a41b9e1..8140211ceb5c 100644 --- a/test/extended/util/configv1shim_test.go +++ b/test/extended/util/configv1shim_test.go @@ -51,6 +51,40 @@ func createInfrastructureObject(name string) *configv1.Infrastructure { } } +func createNetworkObject(name string) *configv1.Network { + return &configv1.Network{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "config.openshift.io/v1", + Kind: "Infrastructure", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: map[string]string{}, + }, + Spec: configv1.NetworkSpec{ + ClusterNetwork: []configv1.ClusterNetworkEntry{ + { + CIDR: "10.128.0.0/14", + HostPrefix: 23, + }, + }, + NetworkType: "OVNKubernetes", + ServiceNetwork: []string{"172.30.0.0/16"}, + }, + Status: configv1.NetworkStatus{ + ClusterNetwork: []configv1.ClusterNetworkEntry{ + { + CIDR: "10.128.0.0/14", + HostPrefix: 23, + }, + }, + ClusterNetworkMTU: 8901, + NetworkType: "OVNKubernetes", + ServiceNetwork: []string{"172.30.0.0/16"}, + }, + } +} + func TestConfigClientShimErrorOnMutation(t *testing.T) { updateNotPermitted := OperationNotPermitted{Action: "update"} updatestatusNotPermitted := OperationNotPermitted{Action: "updatestatus"} @@ -60,21 +94,21 @@ func TestConfigClientShimErrorOnMutation(t *testing.T) { deleteNotPermitted := OperationNotPermitted{Action: "delete"} deleteCollectionNotPermitted := OperationNotPermitted{Action: "deletecollection"} - object := createInfrastructureObject("cluster") - object.Labels["deleteLabel"] = "somevalue" - object2 := createInfrastructureObject("cluster2") - object2.Labels["deleteLabel"] = "somevalue2" + staticObject := createInfrastructureObject("staticObject") + staticObject.Labels["deleteLabel"] = "somevalue" + realObject := createInfrastructureObject("realObject") + realObject.Labels["deleteLabel"] = "somevalue2" configClient := fakeconfigv1client.NewSimpleClientset( - object2, + realObject, ) client := NewConfigClientShim( configClient, - []runtime.Object{object}, + []runtime.Object{staticObject}, ) - _, err := client.ConfigV1().Infrastructures().Get(context.TODO(), object.Name, metav1.GetOptions{}) + _, err := client.ConfigV1().Infrastructures().Get(context.TODO(), staticObject.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("Expected no error for a Get request, got %q instead", err) } @@ -84,95 +118,95 @@ func TestConfigClientShimErrorOnMutation(t *testing.T) { t.Fatalf("Expected no error for a List request, got %q instead", err) } - _, err = client.ConfigV1().Infrastructures().Update(context.TODO(), object, metav1.UpdateOptions{}) + _, err = client.ConfigV1().Infrastructures().Update(context.TODO(), staticObject, metav1.UpdateOptions{}) if err == nil || err.Error() != updateNotPermitted.Error() { t.Fatalf("Expected %q error for an Update request, got %q instead", updateNotPermitted.Error(), err) } - _, err = client.ConfigV1().Infrastructures().Update(context.TODO(), object2, metav1.UpdateOptions{}) + _, err = client.ConfigV1().Infrastructures().Update(context.TODO(), realObject, metav1.UpdateOptions{}) if err != nil { t.Fatalf("Expected no error for an Update request, got %q instead", err) } - _, err = client.ConfigV1().Infrastructures().UpdateStatus(context.TODO(), object, metav1.UpdateOptions{}) + _, err = client.ConfigV1().Infrastructures().UpdateStatus(context.TODO(), staticObject, metav1.UpdateOptions{}) if err == nil || err.Error() != updatestatusNotPermitted.Error() { t.Fatalf("Expected %q error for an UpdateStatus request, got %q instead", updatestatusNotPermitted.Error(), err) } - _, err = client.ConfigV1().Infrastructures().UpdateStatus(context.TODO(), object2, metav1.UpdateOptions{}) + _, err = client.ConfigV1().Infrastructures().UpdateStatus(context.TODO(), realObject, metav1.UpdateOptions{}) if err != nil { t.Fatalf("Expected no error for an UpdateStatus request, got %q instead", err) } - oldData, err := json.Marshal(object) + oldData, err := json.Marshal(staticObject) if err != nil { - t.Fatalf("Unable to marshal an object: %v", err) + t.Fatalf("Unable to marshal an staticObject: %v", err) } - object.Labels["key"] = "value" - newData, err := json.Marshal(object) + staticObject.Labels["key"] = "value" + newData, err := json.Marshal(staticObject) if err != nil { t.Fatalf("Unable to marshal an object: %v", err) } - delete(object.Labels, "key") + delete(staticObject.Labels, "key") patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, &configv1.Infrastructure{}) if err != nil { t.Fatalf("Unable to create a patch: %v", err) } - _, err = client.ConfigV1().Infrastructures().Patch(context.TODO(), object.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + _, err = client.ConfigV1().Infrastructures().Patch(context.TODO(), staticObject.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) if err == nil || err.Error() != patchNotPermitted.Error() { t.Fatalf("Expected %q error for a Patch request, got %q instead", patchNotPermitted.Error(), err) } - _, err = client.ConfigV1().Infrastructures().Patch(context.TODO(), object2.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + _, err = client.ConfigV1().Infrastructures().Patch(context.TODO(), realObject.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) if err != nil { t.Fatalf("Expected no error for a Patch request, got %q instead", err) } - applyConfig, err := applyconfigv1.ExtractInfrastructure(object, "test-mgr") + applyConfig, err := applyconfigv1.ExtractInfrastructure(staticObject, "test-mgr") if err != nil { - t.Fatalf("Unable to construct an apply config for %v: %v", object.Name, err) + t.Fatalf("Unable to construct an apply config for %v: %v", staticObject.Name, err) } _, err = client.ConfigV1().Infrastructures().Apply(context.TODO(), applyConfig, metav1.ApplyOptions{FieldManager: "test-mgr", Force: true}) if err == nil || err.Error() != applyNotPermitted.Error() { t.Fatalf("Expected %q error for an Apply request, got %q instead", applyNotPermitted.Error(), err) } - applyConfig2, err := applyconfigv1.ExtractInfrastructure(object2, "test-mgr") + applyConfig2, err := applyconfigv1.ExtractInfrastructure(realObject, "test-mgr") if err != nil { - t.Fatalf("Unable to construct an apply config for %v: %v", object2.Name, err) + t.Fatalf("Unable to construct an apply config for %v: %v", realObject.Name, err) } _, err = client.ConfigV1().Infrastructures().Apply(context.TODO(), applyConfig2, metav1.ApplyOptions{FieldManager: "test-mgr", Force: true}) if err != nil { t.Fatalf("Expected no error for an Apply request, got %q instead", err) } - applyStatusConfig, err := applyconfigv1.ExtractInfrastructureStatus(object, "test-mgr") + applyStatusConfig, err := applyconfigv1.ExtractInfrastructureStatus(staticObject, "test-mgr") if err != nil { - t.Fatalf("Unable to construct an apply status config for %v: %v", object.Name, err) + t.Fatalf("Unable to construct an apply status config for %v: %v", staticObject.Name, err) } _, err = client.ConfigV1().Infrastructures().ApplyStatus(context.TODO(), applyStatusConfig, metav1.ApplyOptions{FieldManager: "test-mgr", Force: true}) if err == nil || err.Error() != applyStatusNotPermitted.Error() { t.Fatalf("Expected %q error for an ApplyStatus request, got %q instead", applyStatusNotPermitted.Error(), err) } - applyStatusConfig2, err := applyconfigv1.ExtractInfrastructureStatus(object2, "test-mgr") + applyStatusConfig2, err := applyconfigv1.ExtractInfrastructureStatus(realObject, "test-mgr") if err != nil { - t.Fatalf("Unable to construct an apply status config for %v: %v", object2.Name, err) + t.Fatalf("Unable to construct an apply status config for %v: %v", realObject.Name, err) } _, err = client.ConfigV1().Infrastructures().ApplyStatus(context.TODO(), applyStatusConfig2, metav1.ApplyOptions{FieldManager: "test-mgr", Force: true}) if err != nil { t.Fatalf("Expected no error for an ApplyStatus request, got %q instead", err) } - err = client.ConfigV1().Infrastructures().Delete(context.TODO(), object.Name, metav1.DeleteOptions{}) + err = client.ConfigV1().Infrastructures().Delete(context.TODO(), staticObject.Name, metav1.DeleteOptions{}) if err == nil || err.Error() != deleteNotPermitted.Error() { t.Fatalf("Expected %q error for a Delete request, got %q instead", deleteNotPermitted.Error(), err) } - err = client.ConfigV1().Infrastructures().Delete(context.TODO(), object2.Name, metav1.DeleteOptions{}) + err = client.ConfigV1().Infrastructures().Delete(context.TODO(), realObject.Name, metav1.DeleteOptions{}) if err != nil { t.Fatalf("Expected no error for a Delete request, got %q instead", err) } @@ -197,37 +231,140 @@ func TestConfigClientShimWatchRequest(t *testing.T) { name string staticObjects []runtime.Object realObjects []runtime.Object + fieldSelector string + watch func(client *ConfigClientShim, ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) expectedWatchEvents []watch.Event }{ { - name: "merging static and real objects, not object override", + name: "merging static and real infrastructure objects, not object override", staticObjects: []runtime.Object{ - createInfrastructureObject("cluster"), + createInfrastructureObject("staticObject"), }, realObjects: []runtime.Object{ - createInfrastructureObject("cluster2"), - createInfrastructureObject("cluster3"), + createInfrastructureObject("realObject"), + createInfrastructureObject("realObject2"), + }, + watch: func(client *ConfigClientShim, ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return client.ConfigV1().Infrastructures().Watch(ctx, opts) }, expectedWatchEvents: []watch.Event{ - {Type: watch.Added, Object: createInfrastructureObject("cluster")}, - {Type: watch.Added, Object: createInfrastructureObject("cluster2")}, - {Type: watch.Added, Object: createInfrastructureObject("cluster3")}, + {Type: watch.Added, Object: createInfrastructureObject("staticObject")}, + {Type: watch.Added, Object: createInfrastructureObject("realObject")}, + {Type: watch.Added, Object: createInfrastructureObject("realObject2")}, }, }, { - name: "merging static and real objects, static object override", + name: "merging static and real infrastructure objects, static object override", staticObjects: []runtime.Object{ - createInfrastructureObject("cluster"), + createInfrastructureObject("staticObject"), }, realObjects: []runtime.Object{ - createInfrastructureObject("cluster"), - createInfrastructureObject("cluster2"), + createInfrastructureObject("staticObject"), + createInfrastructureObject("realObject"), + }, + watch: func(client *ConfigClientShim, ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return client.ConfigV1().Infrastructures().Watch(ctx, opts) }, expectedWatchEvents: []watch.Event{ - {Type: watch.Added, Object: createInfrastructureObject("cluster")}, - {Type: watch.Added, Object: createInfrastructureObject("cluster2")}, + {Type: watch.Added, Object: createInfrastructureObject("staticObject")}, + {Type: watch.Added, Object: createInfrastructureObject("realObject")}, }, }, + { + name: "merging static and real infrastructure objects, field selector match", + staticObjects: []runtime.Object{ + createInfrastructureObject("staticObject"), + }, + realObjects: []runtime.Object{ + createInfrastructureObject("staticObject"), + }, + watch: func(client *ConfigClientShim, ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return client.ConfigV1().Infrastructures().Watch(ctx, opts) + }, + fieldSelector: "metadata.name==staticObject", + expectedWatchEvents: []watch.Event{ + {Type: watch.Added, Object: createInfrastructureObject("staticObject")}, + }, + }, + { + name: "merging static and real infrastructure objects, field selector no match", + staticObjects: []runtime.Object{ + createInfrastructureObject("staticObject"), + }, + realObjects: []runtime.Object{ + createInfrastructureObject("staticObject"), + }, + watch: func(client *ConfigClientShim, ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return client.ConfigV1().Infrastructures().Watch(ctx, opts) + }, + fieldSelector: "metadata.name=!staticObject", + expectedWatchEvents: []watch.Event{}, + }, + { + name: "merging static and real network objects, not object override", + staticObjects: []runtime.Object{ + createNetworkObject("staticObject"), + }, + realObjects: []runtime.Object{ + createNetworkObject("realObject"), + createNetworkObject("realObject2"), + }, + watch: func(client *ConfigClientShim, ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return client.ConfigV1().Networks().Watch(ctx, opts) + }, + expectedWatchEvents: []watch.Event{ + {Type: watch.Added, Object: createNetworkObject("staticObject")}, + {Type: watch.Added, Object: createNetworkObject("realObject")}, + {Type: watch.Added, Object: createNetworkObject("realObject2")}, + }, + }, + { + name: "merging static and real network objects, static object override", + staticObjects: []runtime.Object{ + createNetworkObject("staticObject"), + }, + realObjects: []runtime.Object{ + createNetworkObject("staticObject"), + createNetworkObject("realObject"), + }, + watch: func(client *ConfigClientShim, ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return client.ConfigV1().Networks().Watch(ctx, opts) + }, + expectedWatchEvents: []watch.Event{ + {Type: watch.Added, Object: createNetworkObject("staticObject")}, + {Type: watch.Added, Object: createNetworkObject("realObject")}, + }, + }, + { + name: "merging static and real network objects, field selector match", + staticObjects: []runtime.Object{ + createNetworkObject("staticObject"), + }, + realObjects: []runtime.Object{ + createNetworkObject("staticObject"), + }, + watch: func(client *ConfigClientShim, ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return client.ConfigV1().Networks().Watch(ctx, opts) + }, + fieldSelector: "metadata.name==staticObject", + expectedWatchEvents: []watch.Event{ + {Type: watch.Added, Object: createNetworkObject("staticObject")}, + }, + }, + { + name: "merging static and real network objects, field selector no match", + staticObjects: []runtime.Object{ + createNetworkObject("staticObject"), + }, + realObjects: []runtime.Object{ + createNetworkObject("staticObject"), + }, + watch: func(client *ConfigClientShim, ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return client.ConfigV1().Networks().Watch(ctx, opts) + }, + fieldSelector: "metadata.name=!staticObject", + expectedWatchEvents: []watch.Event{}, + }, } for _, test := range tests { @@ -240,7 +377,7 @@ func TestConfigClientShimWatchRequest(t *testing.T) { ) // The watch request has to created first when using the fake clientset - resultChan, err := client.ConfigV1().Infrastructures().Watch(context.TODO(), metav1.ListOptions{}) + resultChan, err := test.watch(client, context.TODO(), metav1.ListOptions{FieldSelector: test.fieldSelector}) if err != nil { t.Fatalf("Expected no error, got %q instead", err) } @@ -267,10 +404,166 @@ func TestConfigClientShimWatchRequest(t *testing.T) { t.Errorf("failed waiting for watch event") } } - if eventCounter < size { + if eventCounter != size { t.Errorf("Expected %v watch events, got %v instead", eventCounter, size) } + }) + } +} + +func TestConfigClientShimList(t *testing.T) { + + staticObject := createInfrastructureObject("staticObject") + staticObject.Labels["static"] = "true" + + realObject1 := createInfrastructureObject("staticObject") + realObject1.Labels["static"] = "false" + + realObject2 := createInfrastructureObject("realObject") + realObject2.Labels["static"] = "false" + + configClient := fakeconfigv1client.NewSimpleClientset( + realObject1, + realObject2, + ) + + client := NewConfigClientShim( + configClient, + []runtime.Object{staticObject}, + ) + + listItems, err := client.ConfigV1().Infrastructures().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + t.Fatalf("Expected no error for a List request, got %q instead", err) + } + + if len(listItems.Items) != 2 { + t.Fatalf("Expected only a single item in the list, got %v instead", len(listItems.Items)) + } + + var staticObj *configv1.Infrastructure + + for _, item := range listItems.Items { + if item.Name == "staticObject" { + obj := item + staticObj = &obj + break + } + } + + if staticObj == nil { + t.Fatalf("Expected to find a static object, found none") + } + + if staticObj.Labels["static"] == "false" { + t.Fatalf("Expected static object, not real object") + } +} + +func TestConfigClientShimListInfrastructureFieldSelector(t *testing.T) { + + tests := []struct { + name string + fieldSelector string + expectedLen int + }{ + { + name: "field selector matches", + fieldSelector: "metadata.name=staticObject", + expectedLen: 1, + }, + { + name: "field selector does not match", + fieldSelector: "metadata.name!=staticObject", + expectedLen: 0, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + staticObject := createInfrastructureObject("staticObject") + staticObject.Labels["static"] = "true" + + realObject := createInfrastructureObject("staticObject") + realObject.Labels["static"] = "false" + + configClient := fakeconfigv1client.NewSimpleClientset( + realObject, + ) + + client := NewConfigClientShim( + configClient, + []runtime.Object{staticObject}, + ) + + listItems, err := client.ConfigV1().Infrastructures().List(context.TODO(), metav1.ListOptions{FieldSelector: test.fieldSelector}) + if err != nil { + t.Fatalf("Expected no error for a List request, got %q instead", err) + } + + if len(listItems.Items) != test.expectedLen { + t.Fatalf("Expected %v items in the list, got %v instead", test.expectedLen, len(listItems.Items)) + } + + if test.expectedLen > 0 { + if listItems.Items[0].Labels["static"] == "false" { + t.Fatalf("Expected static object, not real object") + } + } + }) + } +} + +func TestConfigClientShimListNetworkFieldSelector(t *testing.T) { + + tests := []struct { + name string + fieldSelector string + expectedLen int + }{ + { + name: "field selector matches", + fieldSelector: "metadata.name=staticObject", + expectedLen: 1, + }, + { + name: "field selector does not match", + fieldSelector: "metadata.name!=staticObject", + expectedLen: 0, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + staticObject := createNetworkObject("staticObject") + staticObject.Labels["static"] = "true" + + realObject := createNetworkObject("staticObject") + realObject.Labels["static"] = "false" + + configClient := fakeconfigv1client.NewSimpleClientset( + realObject, + ) + + client := NewConfigClientShim( + configClient, + []runtime.Object{staticObject}, + ) + + listItems, err := client.ConfigV1().Networks().List(context.TODO(), metav1.ListOptions{FieldSelector: test.fieldSelector}) + if err != nil { + t.Fatalf("Expected no error for a List request, got %q instead", err) + } + if len(listItems.Items) != test.expectedLen { + t.Fatalf("Expected %v items in the list, got %v instead", test.expectedLen, len(listItems.Items)) + } + + if test.expectedLen > 0 { + if listItems.Items[0].Labels["static"] == "false" { + t.Fatalf("Expected static object, not real object") + } + } }) } } diff --git a/test/extended/util/framework.go b/test/extended/util/framework.go index 9309d6e9e86a..67eaa6f7a50c 100644 --- a/test/extended/util/framework.go +++ b/test/extended/util/framework.go @@ -2099,7 +2099,7 @@ func collectConfigManifestsFromDir(configManifestsDir string) (error, []runtime. return nil } - body, err := ioutil.ReadFile(path) + body, err := os.ReadFile(path) if err != nil { return err } From 413c8cbf586ee4c2f669d114526328e41a1723e7 Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Fri, 10 Mar 2023 16:57:18 +0100 Subject: [PATCH 07/15] Document expected content of the static manifest directory --- test/extended/util/client.go | 36 ++++++++++++++++++--------------- test/extended/util/framework.go | 2 ++ 2 files changed, 22 insertions(+), 16 deletions(-) diff --git a/test/extended/util/client.go b/test/extended/util/client.go index 86882bda3384..39bbcfa236f4 100644 --- a/test/extended/util/client.go +++ b/test/extended/util/client.go @@ -79,23 +79,27 @@ import ( // CLI provides function to call the OpenShift CLI and Kubernetes and OpenShift // clients. type CLI struct { - execPath string - verb string - configPath string - adminConfigPath string + execPath string + verb string + configPath string + adminConfigPath string + + // directory with static manifests, each file is expected to be a single manifest + // manifest files can be stored under directory tree staticConfigManifestDir string - token string - username string - globalArgs []string - commandArgs []string - finalArgs []string - namespacesToDelete []string - stdin *bytes.Buffer - stdout io.Writer - stderr io.Writer - verbose bool - withoutNamespace bool - kubeFramework *framework.Framework + + token string + username string + globalArgs []string + commandArgs []string + finalArgs []string + namespacesToDelete []string + stdin *bytes.Buffer + stdout io.Writer + stderr io.Writer + verbose bool + withoutNamespace bool + kubeFramework *framework.Framework // read from a static manifest directory (set through STATIC_CONFIG_MANIFEST_DIR env) configObjects []runtime.Object diff --git a/test/extended/util/framework.go b/test/extended/util/framework.go index 67eaa6f7a50c..f577f4ee8891 100644 --- a/test/extended/util/framework.go +++ b/test/extended/util/framework.go @@ -1393,6 +1393,8 @@ func KubeConfigPath() string { } // StaticConfigManifestDir returns the value of STATIC_CONFIG_MANIFEST_DIR environment variable +// It points to a directory with static manifests, each file is expected to be a single manifest. +// Manifest files can be stored under directory tree. func StaticConfigManifestDir() string { return os.Getenv("STATIC_CONFIG_MANIFEST_DIR") } From 19713581844abd22f06c6809f3bb043f406b36e9 Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Fri, 10 Mar 2023 16:57:42 +0100 Subject: [PATCH 08/15] Discovery shim for config.openshift.io/v1 --- test/extended/util/configv1shim.go | 217 ++++++++++++++++++++++++++++- 1 file changed, 215 insertions(+), 2 deletions(-) diff --git a/test/extended/util/configv1shim.go b/test/extended/util/configv1shim.go index db7ea857601e..9b2d43bd4def 100644 --- a/test/extended/util/configv1shim.go +++ b/test/extended/util/configv1shim.go @@ -12,19 +12,76 @@ import ( configv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" configv1alpha1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1" - "k8s.io/klog/v2" + openapi_v2 "github.com/google/gnostic/openapiv2" + "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" types "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/version" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/discovery" + "k8s.io/client-go/openapi" "k8s.io/client-go/rest" + restclient "k8s.io/client-go/rest" + "k8s.io/klog/v2" +) + +var ( + configGroup = "config.openshift.io" + configVersion = "v1" + configGroupVersion = "config.openshift.io/v1" ) +func configAPIGroup() *metav1.APIGroup { + return &metav1.APIGroup{ + Name: configGroup, + Versions: []metav1.GroupVersionForDiscovery{ + { + GroupVersion: configGroupVersion, + Version: configVersion, + }, + }, + PreferredVersion: metav1.GroupVersionForDiscovery{ + GroupVersion: configGroupVersion, + Version: configVersion, + }, + // ServerAddressByClientCIDRs is empty + } +} + +func configGroupVersionForDiscovery() metav1.GroupVersionForDiscovery { + return metav1.GroupVersionForDiscovery{ + GroupVersion: configGroupVersion, + Version: configVersion, + } +} + +func mergeResources(aList, bList []metav1.APIResource) []metav1.APIResource { + knownKinds := make(map[string]struct{}) + for _, item := range aList { + knownKinds[item.Kind] = struct{}{} + } + resources := append([]metav1.APIResource{}, bList...) + for _, item := range bList { + if _, exists := knownKinds[item.Kind]; exists { + continue + } + resources = append(resources, item) + } + return resources +} + +func newAPIResourceList(groupVersion string, aList, bList []metav1.APIResource) *metav1.APIResourceList { + return &metav1.APIResourceList{ + GroupVersion: groupVersion, + APIResources: mergeResources(aList, bList), + } +} + // ConfigClientShim makes sure whenever there's a static // manifest present for a config v1 kind, fake client is used // instead of the real one. @@ -35,8 +92,164 @@ type ConfigClientShim struct { } func (c *ConfigClientShim) Discovery() discovery.DiscoveryInterface { - return c.configClient.Discovery() + return &ConfigV1DiscoveryClientShim{ + configClient: c.configClient, + fakeClient: c.fakeClient, + } +} + +type ConfigV1DiscoveryClientShim struct { + configClient configv1client.Interface + fakeClient *fakeconfigv1client.Clientset +} + +func (c *ConfigV1DiscoveryClientShim) ServerGroups() (*metav1.APIGroupList, error) { + groups, err := c.configClient.Discovery().ServerGroups() + if err != nil { + return groups, err + } + hasConfigGroup := false + for i, group := range groups.Groups { + if group.Name == configGroup { + hasConfigGroup = true + hasV1Version := false + for _, version := range group.Versions { + if version.Version == configVersion { + hasV1Version = true + break + } + } + if !hasV1Version { + groups.Groups[i].Versions = append(groups.Groups[i].Versions, configGroupVersionForDiscovery()) + } + break + } + } + if !hasConfigGroup { + groups.Groups = append(groups.Groups, *configAPIGroup()) + } + return groups, nil +} + +func (c *ConfigV1DiscoveryClientShim) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { + if groupVersion != configGroupVersion { + return c.configClient.Discovery().ServerResourcesForGroupVersion(groupVersion) + } + fakeList, err := c.fakeClient.Discovery().ServerResourcesForGroupVersion(groupVersion) + if err != nil { + return nil, err + } + realList, err := c.configClient.Discovery().ServerResourcesForGroupVersion(groupVersion) + if err == nil { + return newAPIResourceList(groupVersion, fakeList.APIResources, realList.APIResources), nil + } + if errors.IsNotFound(err) { + return newAPIResourceList(groupVersion, []metav1.APIResource{}, fakeList.APIResources), nil + } + return realList, err +} + +func (c *ConfigV1DiscoveryClientShim) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { + groups, resources, err := c.configClient.Discovery().ServerGroupsAndResources() + if err != nil { + return groups, resources, err + } + if groups != nil { + hasConfigGroup := false + for i, group := range groups { + if group.Name == configGroup { + hasConfigGroup = true + hasV1Version := false + for _, version := range group.Versions { + if version.Version == configVersion { + hasV1Version = true + break + } + } + if !hasV1Version { + groups[i].Versions = append(groups[i].Versions, configGroupVersionForDiscovery()) + } + break + } + } + if !hasConfigGroup { + groups = append(groups, configAPIGroup()) + } + } + + if resources != nil { + fakeList, err := c.fakeClient.Discovery().ServerResourcesForGroupVersion(configGroupVersion) + if err != nil { + return nil, nil, err + } + hasConfigGroup := false + for i, resource := range resources { + if resource.GroupVersion == configGroupVersion { + hasConfigGroup = true + resources[i].APIResources = mergeResources(fakeList.APIResources, resources[i].APIResources) + } + } + if !hasConfigGroup { + resources = append(resources, newAPIResourceList(configGroupVersion, []metav1.APIResource{}, fakeList.APIResources)) + } + } + + return groups, resources, nil +} + +func (c *ConfigV1DiscoveryClientShim) ServerPreferredResources() ([]*metav1.APIResourceList, error) { + resources, err := c.configClient.Discovery().ServerPreferredResources() + if err != nil { + return nil, err + } + + fakeList, err := c.fakeClient.Discovery().ServerResourcesForGroupVersion(configGroupVersion) + if err != nil { + return nil, err + } + + hasConfigGroup := false + for i, resource := range resources { + if resource.GroupVersion == configGroupVersion { + hasConfigGroup = true + resources[i].APIResources = mergeResources(fakeList.APIResources, resources[i].APIResources) + } + } + + if !hasConfigGroup { + resources = append(resources, newAPIResourceList(configGroupVersion, []metav1.APIResource{}, fakeList.APIResources)) + } + + return resources, nil +} + +func (c *ConfigV1DiscoveryClientShim) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { + return c.configClient.Discovery().ServerPreferredNamespacedResources() +} + +func (c *ConfigV1DiscoveryClientShim) ServerVersion() (*version.Info, error) { + return c.configClient.Discovery().ServerVersion() +} + +func (c *ConfigV1DiscoveryClientShim) OpenAPISchema() (*openapi_v2.Document, error) { + // TODO(jchaloup): once needed implement this method as well + panic(fmt.Errorf("APIServer not implemented")) } + +func (c *ConfigV1DiscoveryClientShim) OpenAPIV3() openapi.Client { + return c.configClient.Discovery().OpenAPIV3() +} + +func (c *ConfigV1DiscoveryClientShim) WithLegacy() discovery.DiscoveryInterface { + return c.configClient.Discovery().WithLegacy() +} + +func (c *ConfigV1DiscoveryClientShim) RESTClient() restclient.Interface { + return c.configClient.Discovery().RESTClient() +} + +var _ discovery.DiscoveryInterface = &ConfigV1DiscoveryClientShim{} + func (c *ConfigClientShim) ConfigV1() configv1.ConfigV1Interface { return &ConfigV1ClientShim{ configv1: c.configClient.ConfigV1(), From 588e1c804557b03f60559eedffb47ecc93bbcdbd Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Fri, 10 Mar 2023 17:04:34 +0100 Subject: [PATCH 09/15] Check abundance of events, check existence of a real object in a list --- go.mod | 2 +- test/extended/util/configv1shim_test.go | 17 ++++++++++++++--- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 3553602b20af..d8cf4377813a 100644 --- a/go.mod +++ b/go.mod @@ -13,6 +13,7 @@ require ( github.com/go-bindata/go-bindata v3.1.2+incompatible github.com/go-ldap/ldap/v3 v3.4.3 github.com/golang/protobuf v1.5.2 + github.com/google/gnostic v0.5.7-v3refs github.com/google/go-cmp v0.5.9 github.com/google/uuid v1.1.2 github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.3.0 @@ -138,7 +139,6 @@ require ( github.com/google/btree v1.0.1 // indirect github.com/google/cadvisor v0.46.0 // indirect github.com/google/cel-go v0.12.6 // indirect - github.com/google/gnostic v0.5.7-v3refs // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/googleapis/gax-go/v2 v2.1.1 // indirect diff --git a/test/extended/util/configv1shim_test.go b/test/extended/util/configv1shim_test.go index 8140211ceb5c..8ae837845e88 100644 --- a/test/extended/util/configv1shim_test.go +++ b/test/extended/util/configv1shim_test.go @@ -391,6 +391,7 @@ func TestConfigClientShimWatchRequest(t *testing.T) { // verify the watch events ticker := time.NewTicker(500 * time.Millisecond) size := len(test.expectedWatchEvents) + eventsSize := len(resultChan.ResultChan()) eventCounter := 0 for i := 0; i < size; i++ { select { @@ -404,8 +405,11 @@ func TestConfigClientShimWatchRequest(t *testing.T) { t.Errorf("failed waiting for watch event") } } - if eventCounter != size { - t.Errorf("Expected %v watch events, got %v instead", eventCounter, size) + if eventCounter < size { + t.Errorf("Expected %v watch events, got %v instead", size, eventCounter) + } + if eventsSize > size { + t.Errorf("Expected %v watch events, got %v instead", size, eventsSize) } }) } @@ -442,12 +446,15 @@ func TestConfigClientShimList(t *testing.T) { } var staticObj *configv1.Infrastructure + realObjFound := false for _, item := range listItems.Items { if item.Name == "staticObject" { obj := item staticObj = &obj - break + } + if item.Name == "realObject" { + realObjFound = true } } @@ -458,6 +465,10 @@ func TestConfigClientShimList(t *testing.T) { if staticObj.Labels["static"] == "false" { t.Fatalf("Expected static object, not real object") } + + if !realObjFound { + t.Fatalf("Unable to find a real object in the list") + } } func TestConfigClientShimListInfrastructureFieldSelector(t *testing.T) { From 7aa71fd4fcaf0e64cf3fe5d44ea6101bee944303 Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Mon, 20 Mar 2023 14:08:54 +0100 Subject: [PATCH 10/15] Unit test config v1 shim discovery methods --- test/extended/util/configv1shim.go | 114 ++++-- test/extended/util/configv1shim_test.go | 439 +++++++++++++++++++++++- 2 files changed, 521 insertions(+), 32 deletions(-) diff --git a/test/extended/util/configv1shim.go b/test/extended/util/configv1shim.go index 9b2d43bd4def..bfaabfae0938 100644 --- a/test/extended/util/configv1shim.go +++ b/test/extended/util/configv1shim.go @@ -65,7 +65,7 @@ func mergeResources(aList, bList []metav1.APIResource) []metav1.APIResource { for _, item := range aList { knownKinds[item.Kind] = struct{}{} } - resources := append([]metav1.APIResource{}, bList...) + resources := append([]metav1.APIResource{}, aList...) for _, item := range bList { if _, exists := knownKinds[item.Kind]; exists { continue @@ -93,14 +93,16 @@ type ConfigClientShim struct { func (c *ConfigClientShim) Discovery() discovery.DiscoveryInterface { return &ConfigV1DiscoveryClientShim{ - configClient: c.configClient, - fakeClient: c.fakeClient, + configClient: c.configClient, + fakeClient: c.fakeClient, + hasConfigV1Kinds: len(c.v1Kinds) > 0, } } type ConfigV1DiscoveryClientShim struct { - configClient configv1client.Interface - fakeClient *fakeconfigv1client.Clientset + configClient configv1client.Interface + fakeClient *fakeconfigv1client.Clientset + hasConfigV1Kinds bool } func (c *ConfigV1DiscoveryClientShim) ServerGroups() (*metav1.APIGroupList, error) { @@ -119,20 +121,20 @@ func (c *ConfigV1DiscoveryClientShim) ServerGroups() (*metav1.APIGroupList, erro break } } - if !hasV1Version { + if c.hasConfigV1Kinds && !hasV1Version { groups.Groups[i].Versions = append(groups.Groups[i].Versions, configGroupVersionForDiscovery()) } break } } - if !hasConfigGroup { + if c.hasConfigV1Kinds && !hasConfigGroup { groups.Groups = append(groups.Groups, *configAPIGroup()) } return groups, nil } func (c *ConfigV1DiscoveryClientShim) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { - if groupVersion != configGroupVersion { + if !c.hasConfigV1Kinds || groupVersion != configGroupVersion { return c.configClient.Discovery().ServerResourcesForGroupVersion(groupVersion) } fakeList, err := c.fakeClient.Discovery().ServerResourcesForGroupVersion(groupVersion) @@ -166,13 +168,13 @@ func (c *ConfigV1DiscoveryClientShim) ServerGroupsAndResources() ([]*metav1.APIG break } } - if !hasV1Version { + if c.hasConfigV1Kinds && !hasV1Version { groups[i].Versions = append(groups[i].Versions, configGroupVersionForDiscovery()) } break } } - if !hasConfigGroup { + if c.hasConfigV1Kinds && !hasConfigGroup { groups = append(groups, configAPIGroup()) } } @@ -186,10 +188,12 @@ func (c *ConfigV1DiscoveryClientShim) ServerGroupsAndResources() ([]*metav1.APIG for i, resource := range resources { if resource.GroupVersion == configGroupVersion { hasConfigGroup = true - resources[i].APIResources = mergeResources(fakeList.APIResources, resources[i].APIResources) + if c.hasConfigV1Kinds { + resources[i].APIResources = mergeResources(fakeList.APIResources, resources[i].APIResources) + } } } - if !hasConfigGroup { + if c.hasConfigV1Kinds && !hasConfigGroup { resources = append(resources, newAPIResourceList(configGroupVersion, []metav1.APIResource{}, fakeList.APIResources)) } } @@ -212,11 +216,13 @@ func (c *ConfigV1DiscoveryClientShim) ServerPreferredResources() ([]*metav1.APIR for i, resource := range resources { if resource.GroupVersion == configGroupVersion { hasConfigGroup = true - resources[i].APIResources = mergeResources(fakeList.APIResources, resources[i].APIResources) + if c.hasConfigV1Kinds { + resources[i].APIResources = mergeResources(fakeList.APIResources, resources[i].APIResources) + } } } - if !hasConfigGroup { + if c.hasConfigV1Kinds && !hasConfigGroup { resources = append(resources, newAPIResourceList(configGroupVersion, []metav1.APIResource{}, fakeList.APIResources)) } @@ -237,15 +243,18 @@ func (c *ConfigV1DiscoveryClientShim) OpenAPISchema() (*openapi_v2.Document, err } func (c *ConfigV1DiscoveryClientShim) OpenAPIV3() openapi.Client { - return c.configClient.Discovery().OpenAPIV3() + // TODO(jchaloup): once needed implement this method as well + panic(fmt.Errorf("APIServer not implemented")) } func (c *ConfigV1DiscoveryClientShim) WithLegacy() discovery.DiscoveryInterface { - return c.configClient.Discovery().WithLegacy() + // TODO(jchaloup): once needed implement this method as well + panic(fmt.Errorf("APIServer not implemented")) } func (c *ConfigV1DiscoveryClientShim) RESTClient() restclient.Interface { - return c.configClient.Discovery().RESTClient() + // TODO(jchaloup): once needed implement this method as well + panic(fmt.Errorf("APIServer not implemented")) } var _ discovery.DiscoveryInterface = &ConfigV1DiscoveryClientShim{} @@ -563,7 +572,6 @@ func (c *ConfigV1InfrastructuresClientShim) Watch(ctx context.Context, opts meta // static manifests do not produce any watch event besides create // If the object exists, no need to generate the ADDED watch event - // INFO: field selectors will not work staticObjList, err := c.fakeConfigV1InfrastructuresClient.List(ctx, opts) if err != nil { return nil, err @@ -664,35 +672,35 @@ type ConfigV1NetworksClientShim struct { var _ configv1.NetworkInterface = &ConfigV1NetworksClientShim{} -func (c *ConfigV1NetworksClientShim) Create(ctx context.Context, infrastructure *apiconfigv1.Network, opts metav1.CreateOptions) (*apiconfigv1.Network, error) { - _, err := c.fakeConfigV1NetworksClient.Get(ctx, infrastructure.Name, metav1.GetOptions{}) +func (c *ConfigV1NetworksClientShim) Create(ctx context.Context, network *apiconfigv1.Network, opts metav1.CreateOptions) (*apiconfigv1.Network, error) { + _, err := c.fakeConfigV1NetworksClient.Get(ctx, network.Name, metav1.GetOptions{}) if err == nil { return nil, &OperationNotPermitted{Action: "create"} } if apierrors.IsNotFound(err) { - return c.configV1NetworksClient.Create(ctx, infrastructure, opts) + return c.configV1NetworksClient.Create(ctx, network, opts) } return nil, err } -func (c *ConfigV1NetworksClientShim) Update(ctx context.Context, infrastructure *apiconfigv1.Network, opts metav1.UpdateOptions) (*apiconfigv1.Network, error) { - _, err := c.fakeConfigV1NetworksClient.Get(ctx, infrastructure.Name, metav1.GetOptions{}) +func (c *ConfigV1NetworksClientShim) Update(ctx context.Context, network *apiconfigv1.Network, opts metav1.UpdateOptions) (*apiconfigv1.Network, error) { + _, err := c.fakeConfigV1NetworksClient.Get(ctx, network.Name, metav1.GetOptions{}) if err == nil { return nil, &OperationNotPermitted{Action: "update"} } if apierrors.IsNotFound(err) { - return c.configV1NetworksClient.Update(ctx, infrastructure, opts) + return c.configV1NetworksClient.Update(ctx, network, opts) } return nil, err } -func (c *ConfigV1NetworksClientShim) UpdateStatus(ctx context.Context, infrastructure *apiconfigv1.Network, opts metav1.UpdateOptions) (*apiconfigv1.Network, error) { - _, err := c.fakeConfigV1NetworksClient.Get(ctx, infrastructure.Name, metav1.GetOptions{}) +func (c *ConfigV1NetworksClientShim) UpdateStatus(ctx context.Context, network *apiconfigv1.Network, opts metav1.UpdateOptions) (*apiconfigv1.Network, error) { + _, err := c.fakeConfigV1NetworksClient.Get(ctx, network.Name, metav1.GetOptions{}) if err == nil { return nil, &OperationNotPermitted{Action: "updatestatus"} } if apierrors.IsNotFound(err) { - return c.configV1NetworksClient.UpdateStatus(ctx, infrastructure, opts) + return c.configV1NetworksClient.UpdateStatus(ctx, network, opts) } return nil, err } @@ -795,7 +803,6 @@ func (c *ConfigV1NetworksClientShim) Watch(ctx context.Context, opts metav1.List // static manifests do not produce any watch event besides create // If the object exists, no need to generate the ADDED watch event - // INFO: field selectors will not work staticObjList, err := c.fakeConfigV1NetworksClient.List(ctx, opts) if err != nil { return nil, err @@ -994,6 +1001,41 @@ func (e OperationNotPermitted) Error() string { return fmt.Sprintf("operation %q not permitted", e.Action) } +func configV1InfrastructureAPIResources() []metav1.APIResource { + return []metav1.APIResource{metav1.APIResource{ + Name: "infrastructures", + SingularName: "infrastructure", + Namespaced: false, + Kind: "Infrastructure", + Verbs: []string{ + "get", "list", "watch", // only read-only requests permitted + }, + }, + metav1.APIResource{ + Name: "infrastructures/status", + SingularName: "", + Namespaced: false, + Kind: "Infrastructure", + Verbs: []string{ + "get", // only read-only requests permitted + }, + }, + } +} + +func configV1NetworkAPIResources() []metav1.APIResource { + return []metav1.APIResource{metav1.APIResource{ + Name: "networks", + SingularName: "network", + Namespaced: false, + Kind: "Network", + Verbs: []string{ + "get", "list", "watch", // only read-only requests permitted + }, + }, + } +} + func NewConfigClientShim( configClient configv1client.Interface, objects []runtime.Object, @@ -1014,6 +1056,22 @@ func NewConfigClientShim( v1Kinds[objectKind.Kind] = true } + apiResourceList := &metav1.APIResourceList{ + GroupVersion: configGroupVersion, + APIResources: []metav1.APIResource{}, + } + + for kind := range v1Kinds { + switch kind { + case "Infrastructure": + apiResourceList.APIResources = append(apiResourceList.APIResources, configV1InfrastructureAPIResources()...) + case "Network": + apiResourceList.APIResources = append(apiResourceList.APIResources, configV1NetworkAPIResources()...) + } + } + + fakeClient.Fake.Resources = []*metav1.APIResourceList{apiResourceList} + return &ConfigClientShim{ configClient: configClient, v1Kinds: v1Kinds, diff --git a/test/extended/util/configv1shim_test.go b/test/extended/util/configv1shim_test.go index 8ae837845e88..880c2d7f10d5 100644 --- a/test/extended/util/configv1shim_test.go +++ b/test/extended/util/configv1shim_test.go @@ -3,6 +3,8 @@ package util import ( "context" "encoding/json" + "fmt" + "sort" "testing" "time" @@ -10,6 +12,7 @@ import ( configv1 "github.com/openshift/api/config/v1" applyconfigv1 "github.com/openshift/client-go/config/applyconfigurations/config/v1" fakeconfigv1client "github.com/openshift/client-go/config/clientset/versioned/fake" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" @@ -55,7 +58,7 @@ func createNetworkObject(name string) *configv1.Network { return &configv1.Network{ TypeMeta: metav1.TypeMeta{ APIVersion: "config.openshift.io/v1", - Kind: "Infrastructure", + Kind: "Network", }, ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -391,7 +394,6 @@ func TestConfigClientShimWatchRequest(t *testing.T) { // verify the watch events ticker := time.NewTicker(500 * time.Millisecond) size := len(test.expectedWatchEvents) - eventsSize := len(resultChan.ResultChan()) eventCounter := 0 for i := 0; i < size; i++ { select { @@ -405,11 +407,15 @@ func TestConfigClientShimWatchRequest(t *testing.T) { t.Errorf("failed waiting for watch event") } } + if eventCounter < size { t.Errorf("Expected %v watch events, got %v instead", size, eventCounter) } - if eventsSize > size { - t.Errorf("Expected %v watch events, got %v instead", size, eventsSize) + + select { + case <-resultChan.ResultChan(): + t.Errorf("Expected no additional watch event") + case <-ticker.C: } }) } @@ -578,3 +584,428 @@ func TestConfigClientShimListNetworkFieldSelector(t *testing.T) { }) } } + +// the fake discovery's list of resources can not be constructed from +// populated objects. They need to be hand crafted. +// defaultFakeDiscoveryResources creates a default set of resources +// that are considered as real resources wherever a fake clientset is +// used instead of the real one. +func defaultFakeDiscoveryResources() []*metav1.APIResourceList { + return []*metav1.APIResourceList{ + { + GroupVersion: "operator.openshift.io/v1", + APIResources: []metav1.APIResource{ + { + Name: "kubestorageversionmigrators", + SingularName: "kubestorageversionmigrator", + Namespaced: false, + Kind: "KubeStorageVersionMigrator", + Verbs: []string{ + "delete", "deletecollection", "get", "list", "patch", "create", "update", "watch", + }, + }, + { + Name: "kubestorageversionmigrators/status", + SingularName: "", + Namespaced: false, + Kind: "KubeStorageVersionMigrator", + Verbs: []string{ + "get", "patch", "update", + }, + }, + }, + }, + } +} + +func TestConfigClientShimDiscoveryServerGroups(t *testing.T) { + tests := []struct { + name string + hasConfigV1Version bool + objects []runtime.Object + fakeResources []*metav1.APIResourceList + }{ + { + name: "no config v1 found with default kinds", + hasConfigV1Version: false, + fakeResources: defaultFakeDiscoveryResources(), + }, + { + name: "config v1 found with default kinds", + hasConfigV1Version: true, + objects: []runtime.Object{ + createInfrastructureObject("staticObject"), + }, + fakeResources: defaultFakeDiscoveryResources(), + }, + { + name: "config v1 already exists", + hasConfigV1Version: true, + objects: []runtime.Object{ + createInfrastructureObject("staticObject"), + }, + fakeResources: []*metav1.APIResourceList{ + { + GroupVersion: "config.openshift.io/v1", + APIResources: configV1InfrastructureAPIResources(), + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + configClient := fakeconfigv1client.NewSimpleClientset() + + client := NewConfigClientShim( + configClient, + test.objects, + ) + + configClient.Fake.Resources = test.fakeResources + + groupList, err := client.Discovery().ServerGroups() + if err != nil { + t.Fatalf("Expected no error for a Discovery().ServerGroups() request, got %q instead", err) + } + + hasConfigV1Version := false + for _, group := range groupList.Groups { + if group.Name != configGroup { + continue + } + for _, version := range group.Versions { + if version.Version == configVersion { + // duplicated + if hasConfigV1Version { + t.Fatalf("config v1 version duplicated") + } + hasConfigV1Version = true + } + } + } + + if test.hasConfigV1Version && !hasConfigV1Version { + t.Fatalf("Expected config v1 version to exists, got non-existing") + } + if !test.hasConfigV1Version && hasConfigV1Version { + t.Fatalf("Expected no config v1 version to exists, got existing") + } + }) + } + +} + +func TestConfigClientShimDiscoveryServerResourcesForGroupVersion(t *testing.T) { + tests := []struct { + name string + hasConfigV1Version bool + expectedResources []string + objects []runtime.Object + fakeResources []*metav1.APIResourceList + }{ + { + name: "no config v1 found with default kinds", + hasConfigV1Version: false, + fakeResources: defaultFakeDiscoveryResources(), + }, + { + name: "config v1 found with infrastructure kind with default kinds", + hasConfigV1Version: true, + objects: []runtime.Object{ + createInfrastructureObject("staticObject"), + }, + expectedResources: []string{"config.openshift.io/v1/infrastructures", "config.openshift.io/v1/infrastructures/status"}, + fakeResources: defaultFakeDiscoveryResources(), + }, + { + name: "config v1 found with network kind with default kinds", + hasConfigV1Version: true, + objects: []runtime.Object{ + createNetworkObject("staticObject"), + }, + expectedResources: []string{"config.openshift.io/v1/networks"}, + fakeResources: defaultFakeDiscoveryResources(), + }, + { + name: "config v1 found with infrastructure and network kind with default kinds", + hasConfigV1Version: true, + objects: []runtime.Object{ + createInfrastructureObject("staticObject"), + createNetworkObject("staticObject"), + }, + expectedResources: []string{"config.openshift.io/v1/infrastructures", "config.openshift.io/v1/infrastructures/status", "config.openshift.io/v1/networks"}, + fakeResources: defaultFakeDiscoveryResources(), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + configClient := fakeconfigv1client.NewSimpleClientset() + + client := NewConfigClientShim( + configClient, + test.objects, + ) + + configClient.Fake.Resources = test.fakeResources + + resourceList, err := client.Discovery().ServerResourcesForGroupVersion(configGroupVersion) + if !test.hasConfigV1Version { + if err == nil { + t.Fatalf("Expected error for a Discovery().ServerGroups() request") + } else if !errors.IsNotFound(err) { + t.Fatalf("Expected not found error for config.openshift.io/v1 for a Discovery().ServerGroups() request, got %v instead", err) + } + return + } + + if err != nil { + t.Fatalf("Expected no error for a Discovery().ServerGroups() request, got %v instead", err) + } + + hasConfigV1Version := false + if resourceList.GroupVersion == configGroupVersion { + hasConfigV1Version = true + } + + if test.hasConfigV1Version && !hasConfigV1Version { + t.Fatalf("Expected config v1 version to exists, got non-existing") + } + if !test.hasConfigV1Version && hasConfigV1Version { + t.Fatalf("Expected no config v1 version to exists, got existing") + } + + resources := []string{} + for _, resource := range resourceList.APIResources { + resources = append(resources, fmt.Sprintf("%v/%v", resourceList.GroupVersion, resource.Name)) + } + + sort.Strings(test.expectedResources) + sort.Strings(resources) + + diff := cmp.Diff(test.expectedResources, resources) + if diff != "" { + t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", test.name, diff) + } + + }) + } + +} + +func TestConfigClientShimDiscoveryServerGroupsAndResources(t *testing.T) { + tests := []struct { + name string + hasConfigV1Group bool + expectedResources []string + objects []runtime.Object + fakeResources []*metav1.APIResourceList + }{ + { + name: "no config v1 found with default kinds", + hasConfigV1Group: false, + expectedResources: []string{"operator.openshift.io/v1/kubestorageversionmigrators", "operator.openshift.io/v1/kubestorageversionmigrators/status"}, + fakeResources: defaultFakeDiscoveryResources(), + }, + { + name: "config v1 found with infrastructure kinds", + hasConfigV1Group: true, + objects: []runtime.Object{ + createInfrastructureObject("staticObject"), + }, + expectedResources: []string{"operator.openshift.io/v1/kubestorageversionmigrators", "operator.openshift.io/v1/kubestorageversionmigrators/status", "config.openshift.io/v1/infrastructures", "config.openshift.io/v1/infrastructures/status"}, + fakeResources: defaultFakeDiscoveryResources(), + }, + { + name: "config v1 found with network kinds", + hasConfigV1Group: true, + objects: []runtime.Object{ + createNetworkObject("staticObject"), + }, + expectedResources: []string{"operator.openshift.io/v1/kubestorageversionmigrators", "operator.openshift.io/v1/kubestorageversionmigrators/status", "config.openshift.io/v1/networks"}, + fakeResources: defaultFakeDiscoveryResources(), + }, + { + name: "config v1 found with infrastructure and network kinds", + hasConfigV1Group: true, + objects: []runtime.Object{ + createInfrastructureObject("staticInfrastructureObject"), + createNetworkObject("staticNetworkObject"), + }, + expectedResources: []string{"operator.openshift.io/v1/kubestorageversionmigrators", "operator.openshift.io/v1/kubestorageversionmigrators/status", "config.openshift.io/v1/infrastructures", "config.openshift.io/v1/infrastructures/status", "config.openshift.io/v1/networks"}, + fakeResources: defaultFakeDiscoveryResources(), + }, + { + name: "config v1 already exists", + hasConfigV1Group: true, + objects: []runtime.Object{ + createInfrastructureObject("staticObject"), + }, + expectedResources: []string{"config.openshift.io/v1/infrastructures", "config.openshift.io/v1/infrastructures/status"}, + fakeResources: []*metav1.APIResourceList{ + { + GroupVersion: "config.openshift.io/v1", + APIResources: configV1InfrastructureAPIResources(), + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + configClient := fakeconfigv1client.NewSimpleClientset() + + configClient.Fake.Resources = test.fakeResources + + client := NewConfigClientShim( + configClient, + test.objects, + ) + + groups, resourceList, err := client.Discovery().ServerGroupsAndResources() + if err != nil { + t.Fatalf("Expected no error for a Discovery().ServerGroupsAndResources() request, got %q instead", err) + } + + hasConfigV1Version := false + for _, group := range groups { + if group.Name != configGroup { + continue + } + for _, version := range group.Versions { + if version.Version == configVersion { + // duplicated + if hasConfigV1Version { + t.Fatalf("config v1 version duplicated") + } + hasConfigV1Version = true + } + } + } + + if test.hasConfigV1Group && !hasConfigV1Version { + t.Fatalf("Expected config v1 version to exists, got non-existing") + } + if !test.hasConfigV1Group && hasConfigV1Version { + t.Fatalf("Expected no config v1 version to exists, got existing") + } + + resources := []string{} + for _, item := range resourceList { + for _, resource := range item.APIResources { + resources = append(resources, fmt.Sprintf("%v/%v", item.GroupVersion, resource.Name)) + } + } + + sort.Strings(test.expectedResources) + sort.Strings(resources) + + diff := cmp.Diff(test.expectedResources, resources) + if diff != "" { + t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", test.name, diff) + } + }) + } + +} + +func TestConfigClientShimDiscoveryServerPreferredResources(t *testing.T) { + // Note: FakeDiscovery's ServerPreferredResources returns nil, nil + // Thus, there's currently no way to simulated the real client side. + tests := []struct { + name string + hasConfigV1Group bool + expectedResources []string + objects []runtime.Object + fakeResources []*metav1.APIResourceList + }{ + { + name: "no config v1 found with default kinds", + hasConfigV1Group: false, + expectedResources: []string{}, + fakeResources: defaultFakeDiscoveryResources(), + }, + { + name: "config v1 found with infrastructure kinds", + hasConfigV1Group: true, + objects: []runtime.Object{ + createInfrastructureObject("staticObject"), + }, + expectedResources: []string{"config.openshift.io/v1/infrastructures", "config.openshift.io/v1/infrastructures/status"}, + fakeResources: defaultFakeDiscoveryResources(), + }, + { + name: "config v1 found with network kinds", + hasConfigV1Group: true, + objects: []runtime.Object{ + createNetworkObject("staticObject"), + }, + expectedResources: []string{"config.openshift.io/v1/networks"}, + fakeResources: defaultFakeDiscoveryResources(), + }, + { + name: "config v1 found with infrastructure and network kinds", + hasConfigV1Group: true, + objects: []runtime.Object{ + createInfrastructureObject("staticInfrastructureObject"), + createNetworkObject("staticNetworkObject"), + }, + expectedResources: []string{"config.openshift.io/v1/infrastructures", "config.openshift.io/v1/infrastructures/status", "config.openshift.io/v1/networks"}, + fakeResources: defaultFakeDiscoveryResources(), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + configClient := fakeconfigv1client.NewSimpleClientset() + + configClient.Fake.Resources = test.fakeResources + + client := NewConfigClientShim( + configClient, + test.objects, + ) + + resourceList, err := client.Discovery().ServerPreferredResources() + if err != nil { + t.Fatalf("Expected no error for a Discovery().ServerGroupsAndResources() request, got %q instead", err) + } + + hasConfigV1Version := false + for _, item := range resourceList { + if item.GroupVersion != configGroupVersion { + continue + } + // duplicated + if hasConfigV1Version { + t.Fatalf("config v1 version duplicated") + } + hasConfigV1Version = true + } + + if test.hasConfigV1Group && !hasConfigV1Version { + t.Fatalf("Expected config v1 version to exists, got non-existing") + } + if !test.hasConfigV1Group && hasConfigV1Version { + t.Fatalf("Expected no config v1 version to exists, got existing") + } + + resources := []string{} + for _, item := range resourceList { + for _, resource := range item.APIResources { + resources = append(resources, fmt.Sprintf("%v/%v", item.GroupVersion, resource.Name)) + } + } + + sort.Strings(test.expectedResources) + sort.Strings(resources) + + diff := cmp.Diff(test.expectedResources, resources) + if diff != "" { + t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", test.name, diff) + } + }) + } + +} From 278330cb4345ed3d74199d88c5484ca0cf4a806b Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Mon, 20 Mar 2023 22:37:49 +0100 Subject: [PATCH 11/15] make update-gofmt --- test/extended/util/configv1shim.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/extended/util/configv1shim.go b/test/extended/util/configv1shim.go index bfaabfae0938..d29586aaaa95 100644 --- a/test/extended/util/configv1shim.go +++ b/test/extended/util/configv1shim.go @@ -1002,7 +1002,7 @@ func (e OperationNotPermitted) Error() string { } func configV1InfrastructureAPIResources() []metav1.APIResource { - return []metav1.APIResource{metav1.APIResource{ + return []metav1.APIResource{{ Name: "infrastructures", SingularName: "infrastructure", Namespaced: false, @@ -1011,7 +1011,7 @@ func configV1InfrastructureAPIResources() []metav1.APIResource { "get", "list", "watch", // only read-only requests permitted }, }, - metav1.APIResource{ + { Name: "infrastructures/status", SingularName: "", Namespaced: false, @@ -1024,7 +1024,7 @@ func configV1InfrastructureAPIResources() []metav1.APIResource { } func configV1NetworkAPIResources() []metav1.APIResource { - return []metav1.APIResource{metav1.APIResource{ + return []metav1.APIResource{{ Name: "networks", SingularName: "network", Namespaced: false, From c7b130ab2a2061e2994f60b0f1926dd0e50c5337 Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Tue, 21 Mar 2023 09:55:12 +0100 Subject: [PATCH 12/15] Return early without config v1 kinds --- test/extended/util/configv1shim.go | 34 ++++++++++++++++++------------ 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/test/extended/util/configv1shim.go b/test/extended/util/configv1shim.go index d29586aaaa95..f3cdcd4e37ca 100644 --- a/test/extended/util/configv1shim.go +++ b/test/extended/util/configv1shim.go @@ -110,6 +110,11 @@ func (c *ConfigV1DiscoveryClientShim) ServerGroups() (*metav1.APIGroupList, erro if err != nil { return groups, err } + + if !c.hasConfigV1Kinds { + return groups, nil + } + hasConfigGroup := false for i, group := range groups.Groups { if group.Name == configGroup { @@ -121,13 +126,13 @@ func (c *ConfigV1DiscoveryClientShim) ServerGroups() (*metav1.APIGroupList, erro break } } - if c.hasConfigV1Kinds && !hasV1Version { + if !hasV1Version { groups.Groups[i].Versions = append(groups.Groups[i].Versions, configGroupVersionForDiscovery()) } break } } - if c.hasConfigV1Kinds && !hasConfigGroup { + if !hasConfigGroup { groups.Groups = append(groups.Groups, *configAPIGroup()) } return groups, nil @@ -156,7 +161,8 @@ func (c *ConfigV1DiscoveryClientShim) ServerGroupsAndResources() ([]*metav1.APIG if err != nil { return groups, resources, err } - if groups != nil { + + if c.hasConfigV1Kinds && groups != nil { hasConfigGroup := false for i, group := range groups { if group.Name == configGroup { @@ -168,18 +174,18 @@ func (c *ConfigV1DiscoveryClientShim) ServerGroupsAndResources() ([]*metav1.APIG break } } - if c.hasConfigV1Kinds && !hasV1Version { + if !hasV1Version { groups[i].Versions = append(groups[i].Versions, configGroupVersionForDiscovery()) } break } } - if c.hasConfigV1Kinds && !hasConfigGroup { + if !hasConfigGroup { groups = append(groups, configAPIGroup()) } } - if resources != nil { + if c.hasConfigV1Kinds && resources != nil { fakeList, err := c.fakeClient.Discovery().ServerResourcesForGroupVersion(configGroupVersion) if err != nil { return nil, nil, err @@ -188,12 +194,10 @@ func (c *ConfigV1DiscoveryClientShim) ServerGroupsAndResources() ([]*metav1.APIG for i, resource := range resources { if resource.GroupVersion == configGroupVersion { hasConfigGroup = true - if c.hasConfigV1Kinds { - resources[i].APIResources = mergeResources(fakeList.APIResources, resources[i].APIResources) - } + resources[i].APIResources = mergeResources(fakeList.APIResources, resources[i].APIResources) } } - if c.hasConfigV1Kinds && !hasConfigGroup { + if !hasConfigGroup { resources = append(resources, newAPIResourceList(configGroupVersion, []metav1.APIResource{}, fakeList.APIResources)) } } @@ -207,6 +211,10 @@ func (c *ConfigV1DiscoveryClientShim) ServerPreferredResources() ([]*metav1.APIR return nil, err } + if !c.hasConfigV1Kinds { + return resources, nil + } + fakeList, err := c.fakeClient.Discovery().ServerResourcesForGroupVersion(configGroupVersion) if err != nil { return nil, err @@ -216,13 +224,11 @@ func (c *ConfigV1DiscoveryClientShim) ServerPreferredResources() ([]*metav1.APIR for i, resource := range resources { if resource.GroupVersion == configGroupVersion { hasConfigGroup = true - if c.hasConfigV1Kinds { - resources[i].APIResources = mergeResources(fakeList.APIResources, resources[i].APIResources) - } + resources[i].APIResources = mergeResources(fakeList.APIResources, resources[i].APIResources) } } - if c.hasConfigV1Kinds && !hasConfigGroup { + if !hasConfigGroup { resources = append(resources, newAPIResourceList(configGroupVersion, []metav1.APIResource{}, fakeList.APIResources)) } From ffd5e00dbec5e43c5273e87a4866a8bdfe4d92ea Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Fri, 24 Mar 2023 14:15:33 +0100 Subject: [PATCH 13/15] Do not use ginkgo.Expect for checking collectConfigManifestsFromDir error The ginkgo handler is not yet initialized properly --- test/extended/util/client.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/extended/util/client.go b/test/extended/util/client.go index 39bbcfa236f4..9498e61af297 100644 --- a/test/extended/util/client.go +++ b/test/extended/util/client.go @@ -580,7 +580,9 @@ func (c *CLI) RESTMapper() meta.RESTMapper { func (c *CLI) setupStaticConfigsFromManifests() { if len(c.staticConfigManifestDir) > 0 { err, objects := collectConfigManifestsFromDir(c.staticConfigManifestDir) - o.Expect(err).ToNot(o.HaveOccurred()) + if err != nil { + panic(err) + } c.configObjects = objects } } From 398aba32cd1134a79dfd3187ff76f7af58996fd8 Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Fri, 24 Mar 2023 15:12:48 +0100 Subject: [PATCH 14/15] Set staticConfigManifestDir for NewCLIWithFramework --- test/extended/util/client.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/test/extended/util/client.go b/test/extended/util/client.go index 9498e61af297..6a8d83d668b9 100644 --- a/test/extended/util/client.go +++ b/test/extended/util/client.go @@ -116,10 +116,11 @@ type resourceRef struct { // framework. It can be called inside of a Ginkgo .It() function. func NewCLIWithFramework(kubeFramework *framework.Framework) *CLI { cli := &CLI{ - kubeFramework: kubeFramework, - username: "admin", - execPath: "oc", - adminConfigPath: KubeConfigPath(), + kubeFramework: kubeFramework, + username: "admin", + execPath: "oc", + adminConfigPath: KubeConfigPath(), + staticConfigManifestDir: StaticConfigManifestDir(), } // Called only once (assumed the objects will never get modified) // TODO: run in every BeforeEach From c568b580f54cf07da2e66488fe300a197679df9d Mon Sep 17 00:00:00 2001 From: Jan Chaloupka Date: Wed, 29 Mar 2023 09:43:19 +0200 Subject: [PATCH 15/15] Update annotate rules to exclude [sig-network][Feature:EgressIP][apigroup:operator.openshift.io] [internal-targets] --- .../util/annotate/generated/zz_generated.annotations.go | 2 +- test/extended/util/annotate/rules.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/extended/util/annotate/generated/zz_generated.annotations.go b/test/extended/util/annotate/generated/zz_generated.annotations.go index 4d605d4205e2..733440a886e3 100644 --- a/test/extended/util/annotate/generated/zz_generated.annotations.go +++ b/test/extended/util/annotate/generated/zz_generated.annotations.go @@ -2623,7 +2623,7 @@ var Annotations = map[string]string{ "[sig-network][Feature:EgressIP][apigroup:operator.openshift.io] [external-targets][apigroup:user.openshift.io][apigroup:security.openshift.io] pods should keep the assigned EgressIPs when being rescheduled to another node": " [Serial] [Suite:openshift/conformance/serial]", - "[sig-network][Feature:EgressIP][apigroup:operator.openshift.io] [internal-targets] EgressIP pods should query hostNetwork pods with the local node's SNAT": " [Serial] [Suite:openshift/conformance/serial]", + "[sig-network][Feature:EgressIP][apigroup:operator.openshift.io] [internal-targets] EgressIP pods should query hostNetwork pods with the local node's SNAT": " [Disabled:Broken] [Serial]", "[sig-network][Feature:EgressRouterCNI] should ensure ipv4 egressrouter cni resources are created [apigroup:operator.openshift.io]": " [Suite:openshift/conformance/parallel]", diff --git a/test/extended/util/annotate/rules.go b/test/extended/util/annotate/rules.go index e711a2ab4274..e466da4bdc42 100644 --- a/test/extended/util/annotate/rules.go +++ b/test/extended/util/annotate/rules.go @@ -68,7 +68,7 @@ var ( `\[sig-network-edge\]\[Feature:Idling\] Unidling \[apigroup:apps.openshift.io\]\[apigroup:route.openshift.io\] should work with TCP \(while idling\)`, // https://bugzilla.redhat.com/show_bug.cgi?id=2070929 - `\[sig-network\]\[Feature:EgressIP\]\[apigroup:config.openshift.io\] \[internal-targets\]`, + `\[sig-network\]\[Feature:EgressIP\]\[apigroup:operator.openshift.io\] \[internal-targets\]`, // https://issues.redhat.com/browse/OCPBUGS-967 `\[sig-network\] IngressClass \[Feature:Ingress\] should prevent Ingress creation if more than 1 IngressClass marked as default`,