diff --git a/controllers/apps/transformer_component_workload.go b/controllers/apps/transformer_component_workload.go index da69abe4d30..928b3a8557a 100644 --- a/controllers/apps/transformer_component_workload.go +++ b/controllers/apps/transformer_component_workload.go @@ -26,6 +26,7 @@ import ( "reflect" "slices" "strings" + "time" "github.com/spf13/viper" "golang.org/x/exp/maps" @@ -451,6 +452,10 @@ func (r *componentWorkloadOps) expandVolume() error { // horizontalScale handles workload horizontal scale func (r *componentWorkloadOps) horizontalScale() error { its := r.runningITS + // handle memberjoin lifecycle action + if err := r.checkAndDoMemberJoin(); err != nil { + return err + } doScaleOut, doScaleIn := r.horizontalScaling() if !doScaleOut && !doScaleIn { if err := r.postScaleIn(); err != nil { @@ -561,6 +566,9 @@ func (r *componentWorkloadOps) scaleOut(itsObj *workloads.InstanceSet) error { if *itsObj.Spec.Replicas == 0 { return nil } + + r.annotateInstanceSetForMemberJoin() + graphCli := model.NewGraphClient(r.cli) graphCli.Noop(r.dag, r.protoITS) d, err := newDataClone(r.reqCtx, r.cli, r.cluster, r.synthesizeComp, itsObj, r.protoITS, backupKey) @@ -597,11 +605,84 @@ func (r *componentWorkloadOps) scaleOut(itsObj *workloads.InstanceSet) error { } } +func (r *componentWorkloadOps) annotateInstanceSetForMemberJoin() { + if r.synthesizeComp.LifecycleActions.MemberJoin == nil { + return + } + + podsToMemberjoin := getPodsToMemberJoinFromAnno(r.runningITS) + + for podName := range r.desiredCompPodNameSet { + if r.runningItsPodNameSet.Has(podName) { + continue + } + if podsToMemberjoin.Has(podName) { + continue + } + podsToMemberjoin.Insert(podName) + } + + if podsToMemberjoin.Len() > 0 { + r.protoITS.Annotations[constant.MemberJoinStatusAnnotationKey] = strings.Join(sets.List(podsToMemberjoin), ",") + } +} + +func getPodsToMemberJoinFromAnno(instanceSet *workloads.InstanceSet) sets.Set[string] { + podsToMemberjoin := sets.New[string]() + if instanceSet == nil { + return podsToMemberjoin + } + + if instanceSet.Annotations == nil { + return podsToMemberjoin + } + + if memberJoinStatus := instanceSet.Annotations[constant.MemberJoinStatusAnnotationKey]; memberJoinStatus != "" { + podsToMemberjoin.Insert(strings.Split(memberJoinStatus, ",")...) + } + + return podsToMemberjoin +} + func (r *componentWorkloadOps) leaveMember4ScaleIn() error { pods, err := component.ListOwnedPods(r.reqCtx.Ctx, r.cli, r.cluster.Namespace, r.cluster.Name, r.synthesizeComp.Name) if err != nil { return err } + + // TODO: Move memberLeave to the ITS controller. Instead of performing a switchover, we can directly scale down the non-leader nodes. This is because the pod ordinal is not guaranteed to be continuous. + podsToMemberLeave := make([]*corev1.Pod, 0) + + podsToMemberjoin := getPodsToMemberJoinFromAnno(r.runningITS) + for _, pod := range pods { + // if the pod not exists in the generated pod names, it should be a member that needs to leave + if _, ok := r.desiredCompPodNameSet[pod.Name]; ok { + continue + } + podsToMemberLeave = append(podsToMemberLeave, pod) + } + + var leaveErrors []error + for _, pod := range podsToMemberLeave { + + if podsToMemberjoin.Has(pod.Name) { + leaveErrors = append(leaveErrors, fmt.Errorf("pod %s is in memberjoin process", pod.Name)) + continue + } + + if err := r.leaveMemberForPod(pod, pods); err != nil { + leaveErrors = append(leaveErrors, err) + } + + } + if len(leaveErrors) > 0 { + return newRequeueError(time.Second, fmt.Sprintf("%v", leaveErrors)) + } + + return nil +} + +func (r *componentWorkloadOps) leaveMemberForPod(pod *corev1.Pod, pods []*corev1.Pod) error { isLeader := func(pod *corev1.Pod) bool { if pod == nil || len(pod.Labels) == 0 { return false @@ -635,41 +716,100 @@ func (r *componentWorkloadOps) leaveMember4ScaleIn() error { return err } - // TODO: Move memberLeave to the ITS controller. Instead of performing a switchover, we can directly scale down the non-leader nodes. This is because the pod ordinal is not guaranteed to be continuous. - podsToMemberLeave := make([]*corev1.Pod, 0) - for _, pod := range pods { - // if the pod not exists in the generated pod names, it should be a member that needs to leave - if _, ok := r.desiredCompPodNameSet[pod.Name]; ok { - continue - } - podsToMemberLeave = append(podsToMemberLeave, pod) + if !(isLeader(pod) || // if the pod is leader, it needs to call switchover + (r.synthesizeComp.LifecycleActions != nil && r.synthesizeComp.LifecycleActions.MemberLeave != nil)) { // if the memberLeave action is defined, it needs to call it + return nil } - for _, pod := range podsToMemberLeave { - if !(isLeader(pod) || // if the pod is leader, it needs to call switchover - (r.synthesizeComp.LifecycleActions != nil && r.synthesizeComp.LifecycleActions.MemberLeave != nil)) { // if the memberLeave action is defined, it needs to call it - continue + + lfa, err := lifecycle.New(r.synthesizeComp, pod, pods...) + if err != nil { + return err + } + + // switchover if the leaving pod is leader + if switchoverErr := tryToSwitchover(lfa, pod); switchoverErr != nil { + return switchoverErr + } + + if err = lfa.MemberLeave(r.reqCtx.Ctx, r.cli, nil); err != nil { + if !errors.Is(err, lifecycle.ErrActionNotDefined) && err == nil { + return err } + } + return nil +} - lfa, err1 := lifecycle.New(r.synthesizeComp, pod, pods...) - if err1 != nil { - if err == nil { - err = err1 - } +func (r *componentWorkloadOps) checkAndDoMemberJoin() error { + // just wait for memberjoin anno to be updated + if r.protoITS.Annotations[constant.MemberJoinStatusAnnotationKey] != "" { + return nil + } + + podsToMemberjoin := getPodsToMemberJoinFromAnno(r.runningITS) + if len(podsToMemberjoin) == 0 { + return nil + } + + if r.synthesizeComp.LifecycleActions == nil || r.synthesizeComp.LifecycleActions.MemberJoin == nil { + podsToMemberjoin.Clear() + } + err := r.doMemberJoin(podsToMemberjoin) + if err != nil { + return err + } + + if podsToMemberjoin.Len() == 0 { + // Anno will be merged later, so it should be deleted from both protoITS and runningITS + delete(r.protoITS.Annotations, constant.MemberJoinStatusAnnotationKey) + delete(r.runningITS.Annotations, constant.MemberJoinStatusAnnotationKey) + } else { + r.protoITS.Annotations[constant.MemberJoinStatusAnnotationKey] = strings.Join(sets.List(podsToMemberjoin), ",") + } + return nil +} + +func (r *componentWorkloadOps) doMemberJoin(podSet sets.Set[string]) error { + if len(podSet) == 0 { + return nil + } + + runningPods, err := component.ListOwnedPods(r.reqCtx.Ctx, r.cli, r.cluster.Namespace, r.cluster.Name, r.synthesizeComp.Name) + if err != nil { + return err + } + + var joinErrors []error + for _, pod := range runningPods { + if !podSet.Has(pod.Name) { continue } - // switchover if the leaving pod is leader - if switchoverErr := tryToSwitchover(lfa, pod); switchoverErr != nil { - return switchoverErr + if err := r.joinMemberForPod(pod, runningPods); err != nil { + joinErrors = append(joinErrors, fmt.Errorf("pod %s: %w", pod.Name, err)) + } else { + podSet.Delete(pod.Name) } + } - if err2 := lfa.MemberLeave(r.reqCtx.Ctx, r.cli, nil); err2 != nil { - if !errors.Is(err2, lifecycle.ErrActionNotDefined) && err == nil { - err = err2 - } + if len(joinErrors) > 0 { + return newRequeueError(time.Second, fmt.Sprintf("%v", joinErrors)) + } + return nil +} + +func (r *componentWorkloadOps) joinMemberForPod(pod *corev1.Pod, pods []*corev1.Pod) error { + lfa, err := lifecycle.New(r.synthesizeComp, pod, pods...) + if err != nil { + return err + } + + if err = lfa.MemberJoin(r.reqCtx.Ctx, r.cli, nil); err != nil { + if !errors.Is(err, lifecycle.ErrActionNotDefined) { + return err } } - return err // TODO: use requeue-after + + return nil } func (r *componentWorkloadOps) deletePVCs4ScaleIn(itsObj *workloads.InstanceSet) error { diff --git a/controllers/apps/transformer_component_workload_test.go b/controllers/apps/transformer_component_workload_test.go new file mode 100644 index 00000000000..39349712e6b --- /dev/null +++ b/controllers/apps/transformer_component_workload_test.go @@ -0,0 +1,291 @@ +/* +Copyright (C) 2022-2024 ApeCloud Co., Ltd +This file is part of KubeBlocks project +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package apps + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/util/sets" + + kbappsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" + workloads "github.com/apecloud/kubeblocks/apis/workloads/v1" + testk8s "github.com/apecloud/kubeblocks/pkg/testutil/k8s" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" + "github.com/apecloud/kubeblocks/pkg/constant" + "github.com/apecloud/kubeblocks/pkg/controller/component" + "github.com/apecloud/kubeblocks/pkg/controller/graph" + "github.com/apecloud/kubeblocks/pkg/controller/model" + intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" + testapps "github.com/apecloud/kubeblocks/pkg/testutil/apps" +) + +var _ = Describe("Component Workload Operations Test", func() { + const ( + clusterName = "test-cluster" + compName = "test-comp" + kubeblocksName = "kubeblocks" + ) + + var ( + reader *mockReader + dag *graph.DAG + synthesizeComp *component.SynthesizedComponent + ) + + newDAG := func(graphCli model.GraphClient, comp *appsv1alpha1.Component) *graph.DAG { + d := graph.NewDAG() + graphCli.Root(d, comp, comp, model.ActionStatusPtr()) + return d + } + + BeforeEach(func() { + reader = &mockReader{} + comp := &appsv1alpha1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: testCtx.DefaultNamespace, + Name: constant.GenerateClusterComponentName(clusterName, compName), + Labels: map[string]string{ + constant.AppManagedByLabelKey: constant.AppName, + constant.AppInstanceLabelKey: clusterName, + constant.KBAppComponentLabelKey: compName, + }, + }, + Spec: appsv1alpha1.ComponentSpec{}, + } + + synthesizeComp = &component.SynthesizedComponent{ + Namespace: testCtx.DefaultNamespace, + ClusterName: clusterName, + Name: compName, + Roles: []kbappsv1.ReplicaRole{ + {Name: "leader", Serviceable: true, Writable: true, Votable: true}, + {Name: "follower", Serviceable: false, Writable: false, Votable: false}, + }, + LifecycleActions: &kbappsv1.ComponentLifecycleActions{ + MemberJoin: &kbappsv1.Action{ + Exec: &kbappsv1.ExecAction{ + Image: "test-image", + }, + }, + MemberLeave: &kbappsv1.Action{ + Exec: &kbappsv1.ExecAction{ + Image: "test-image", + }, + }, + Switchover: &kbappsv1.Action{ + Exec: &kbappsv1.ExecAction{ + Image: "test-image", + }, + }, + }, + } + + graphCli := model.NewGraphClient(reader) + dag = newDAG(graphCli, comp) + }) + + Context("Member Leave Operations", func() { + var ( + ops *componentWorkloadOps + pod0 *corev1.Pod + pod1 *corev1.Pod + pods []*corev1.Pod + ) + + BeforeEach(func() { + pod0 = testapps.NewPodFactory(testCtx.DefaultNamespace, "test-pod-0"). + AddAnnotations(constant.MemberJoinStatusAnnotationKey, "test-pod"). + AddContainer(corev1.Container{ + Image: "test-image", + Name: "test-container", + }). + AddLabels( + constant.AppManagedByLabelKey, kubeblocksName, + constant.AppInstanceLabelKey, clusterName, + constant.KBAppComponentLabelKey, compName, + ). + GetObject() + + pod1 = testapps.NewPodFactory(testCtx.DefaultNamespace, "test-pod-1"). + AddAnnotations(constant.MemberJoinStatusAnnotationKey, "test-pod"). + AddContainer(corev1.Container{ + Image: "test-image", + Name: "test-container", + }). + AddLabels( + constant.AppManagedByLabelKey, kubeblocksName, + constant.AppInstanceLabelKey, clusterName, + constant.KBAppComponentLabelKey, compName, + ). + GetObject() + + pods = []*corev1.Pod{} + pods = append(pods, pod0) + pods = append(pods, pod1) + + container := corev1.Container{ + Name: "mock-container-name", + Image: testapps.ApeCloudMySQLImage, + ImagePullPolicy: corev1.PullIfNotPresent, + } + + mockITS := testapps.NewInstanceSetFactory(testCtx.DefaultNamespace, + "test-its", clusterName, compName). + AddFinalizers([]string{constant.DBClusterFinalizerName}). + AddContainer(container). + AddAppInstanceLabel(clusterName). + AddAppComponentLabel(compName). + AddAppManagedByLabel(). + AddAnnotations(constant.MemberJoinStatusAnnotationKey, ""). + SetReplicas(2). + SetRoles([]workloads.ReplicaRole{ + {Name: "leader", AccessMode: workloads.ReadWriteMode, CanVote: true, IsLeader: true}, + {Name: "follower", AccessMode: workloads.ReadonlyMode, CanVote: true, IsLeader: false}, + }). + GetObject() + + mockCluster := testapps.NewClusterFactory(testCtx.DefaultNamespace, "test-cluster", "test-def"). + GetObject() + + ops = &componentWorkloadOps{ + cli: k8sClient, + reqCtx: intctrlutil.RequestCtx{Ctx: ctx, Log: logger}, + synthesizeComp: synthesizeComp, + cluster: mockCluster, + runningITS: mockITS, + protoITS: mockITS.DeepCopy(), + dag: dag, + } + + testapps.MockKBAgentClient4Workload(&testCtx, pods) + }) + + It("should handle member leave process correctly", func() { + for _, pod := range pods { + Expect(ops.cli.Create(ctx, pod)).Should(BeNil()) + } + + ops.desiredCompPodNameSet = make(sets.Set[string]) + ops.desiredCompPodNameSet.Insert(pod0.Name) + + By("setting up member join status") + ops.runningITS.Annotations[constant.MemberJoinStatusAnnotationKey] = "" + + By("executing leave member operation") + err := ops.leaveMember4ScaleIn() + Expect(err).Should(BeNil()) + Expect(pod0.Labels["test.kubeblock.io/memberleave-completed"]).Should(Equal("")) + Expect(pod1.Labels["test.kubeblock.io/memberleave-completed"]).ShouldNot(Equal("")) + + for _, pod := range pods { + Expect(ops.cli.Delete(ctx, pod)).Should(BeNil()) + } + + }) + + It("should return requeueError when exec memberleave with memberjoin processing ", func() { + for _, pod := range pods { + Expect(ops.cli.Create(ctx, pod)).Should(BeNil()) + } + + ops.desiredCompPodNameSet = make(sets.Set[string]) + ops.desiredCompPodNameSet.Insert(pod0.Name) + + By("setting up member join status") + ops.runningITS.Annotations[constant.MemberJoinStatusAnnotationKey] = pod1.Name + + By("executing leave member operation") + err := ops.leaveMember4ScaleIn() + Expect(err).ShouldNot(BeNil()) + Expect(pod0.Labels["test.kubeblock.io/memberleave-completed"]).Should(Equal("")) + Expect(pod1.Labels["test.kubeblock.io/memberleave-completed"]).Should(Equal("")) + + for _, pod := range pods { + Expect(ops.cli.Delete(ctx, pod)).Should(BeNil()) + } + }) + + It("should handle switchover for leader pod", func() { + By("setting up leader pod") + pod1.Labels[constant.RoleLabelKey] = "follower" + pod1.Labels[constant.RoleLabelKey] = "leader" + + for _, pod := range pods { + Expect(ops.cli.Create(ctx, pod)).Should(BeNil()) + } + + ops.desiredCompPodNameSet = make(sets.Set[string]) + ops.desiredCompPodNameSet.Insert(pod0.Name) + + By("executing leave member for leader") + err := ops.leaveMemberForPod(pod1, []*corev1.Pod{pod1}) + Expect(err).ShouldNot(BeNil()) + Expect(pod0.Labels[constant.RoleLabelKey]).Should(Equal("leader")) + Expect(pod1.Labels[constant.RoleLabelKey]).ShouldNot(Equal("leader")) + + for _, pod := range pods { + Expect(ops.cli.Delete(ctx, pod)).Should(BeNil()) + } + }) + + It("should handle member join process correctly", func() { + + for _, pod := range pods { + Expect(ops.cli.Create(ctx, pod)).Should(BeNil()) + } + + ops.desiredCompPodNameSet = make(sets.Set[string]) + ops.desiredCompPodNameSet.Insert(pod0.Name) + + By("setting up pod status") + ops.runningITS.Annotations[constant.MemberJoinStatusAnnotationKey] = pod1.Name + testk8s.MockPodIsRunning(ctx, testCtx, pod1) + + By("executing leave member operation") + err := ops.checkAndDoMemberJoin() + Expect(err).Should(BeNil()) + Expect(pod0.Labels["test.kubeblock.io/memberjoin-completed"]).Should(Equal("")) + Expect(pod1.Labels["test.kubeblock.io/memberjoin-completed"]).ShouldNot(Equal("")) + Expect(ops.protoITS.Annotations[constant.MemberJoinStatusAnnotationKey]).Should(Equal("")) + + for _, pod := range pods { + Expect(ops.cli.Delete(ctx, pod)).Should(BeNil()) + } + }) + + It("should annotate instance for member join correctly", func() { + Expect(ops.cli.Create(ctx, pod0)).Should(BeNil()) + + ops.desiredCompPodNameSet = make(sets.Set[string]) + ops.desiredCompPodNameSet.Insert(pod0.Name) + ops.desiredCompPodNameSet.Insert(pod1.Name) + + ops.runningItsPodNameSet = make(sets.Set[string]) + ops.runningItsPodNameSet.Insert(pod0.Name) + + ops.annotateInstanceSetForMemberJoin() + + Expect(ops.protoITS.Annotations[constant.MemberJoinStatusAnnotationKey]).Should(Equal(pod1.Name)) + + Expect(ops.cli.Delete(ctx, pod0)).Should(BeNil()) + + }) + }) +}) diff --git a/controllers/apps/utils.go b/controllers/apps/utils.go index a40c7df63e1..72beb7f3756 100644 --- a/controllers/apps/utils.go +++ b/controllers/apps/utils.go @@ -86,7 +86,6 @@ func clientOption(v *model.ObjectVertex) *multicluster.ClientOption { } return multicluster.InControlContext() } - func resolveServiceDefaultFields(oldSpec, newSpec *corev1.ServiceSpec) { var exist *corev1.ServicePort for i, port := range newSpec.Ports { diff --git a/pkg/constant/annotations.go b/pkg/constant/annotations.go index c2dd7c352cc..07acd923571 100644 --- a/pkg/constant/annotations.go +++ b/pkg/constant/annotations.go @@ -46,6 +46,7 @@ const ( // NodeSelectorOnceAnnotationKey adds nodeSelector in podSpec for one pod exactly once NodeSelectorOnceAnnotationKey = "workloads.kubeblocks.io/node-selector-once" + MemberJoinStatusAnnotationKey = "workloads.kubeblocks.io/memberjoin-pod-list" ) // annotations for multi-cluster diff --git a/pkg/testutil/apps/kb_agent_util.go b/pkg/testutil/apps/kb_agent_util.go index 3dd9d9e84e7..7e8af1b7a6e 100644 --- a/pkg/testutil/apps/kb_agent_util.go +++ b/pkg/testutil/apps/kb_agent_util.go @@ -83,3 +83,85 @@ func MockKBAgentClient4HScale(testCtx *testutil.TestContext, clusterKey types.Na }).AnyTimes() }) } + +func MockKBAgentClient4Workload(testCtx *testutil.TestContext, pods []*corev1.Pod) { + const ( + memberJoinCompoletedLabel = "test.kubeblock.io/memberjoin-completed" + memberLeaveCompoletedLabel = "test.kubeblock.io/memberleave-completed" + ) + + rsp := kbagentproto.ActionResponse{Message: "mock success"} + handleMemberLeave := func(podName string) (kbagentproto.ActionResponse, error) { + for _, pod := range pods { + if pod.Name != podName { + continue + } + pod.Labels[memberLeaveCompoletedLabel] = "true" + err := testCtx.Cli.Update(testCtx.Ctx, pod) + if err != nil { + return kbagentproto.ActionResponse{}, err + } + } + return rsp, nil + } + + handleMemberJoin := func(podName string) (kbagentproto.ActionResponse, error) { + for _, pod := range pods { + if pod.Name != podName { + continue + } + pod.Labels[memberJoinCompoletedLabel] = "true" + err := testCtx.Cli.Update(testCtx.Ctx, pod) + if err != nil { + return kbagentproto.ActionResponse{}, err + } + } + return rsp, nil + } + + handleSwitchOver := func(podName string) (kbagentproto.ActionResponse, error) { + for _, pod := range pods { + if pod.Name != podName { + continue + } + if pod.Labels[constant.RoleLabelKey] != "leader" { + return rsp, nil + } + pod.Labels[constant.RoleLabelKey] = "follower" + err := testCtx.Cli.Update(testCtx.Ctx, pod) + if err != nil { + return kbagentproto.ActionResponse{}, err + } + } + + for _, pod := range pods { + if pod.Name == podName { + continue + } + pod.Labels[constant.RoleLabelKey] = "leader" + err := testCtx.Cli.Update(testCtx.Ctx, pod) + if err != nil { + return kbagentproto.ActionResponse{}, err + } + } + return rsp, nil + } + + MockKBAgentClient(func(recorder *kbagent.MockClientMockRecorder) { + recorder.Action(gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, req kbagentproto.ActionRequest) (kbagentproto.ActionResponse, error) { + switch req.Action { + case "memberLeave": + podName := req.Parameters["KB_LEAVE_MEMBER_POD_NAME"] + return handleMemberLeave(podName) + case "memberJoin": + podName := req.Parameters["KB_JOIN_MEMBER_POD_NAME"] + return handleMemberJoin(podName) + case "switchover": + podName := req.Parameters["KB_LEADER_POD_NAME"] + return handleSwitchOver(podName) + default: + return rsp, nil + } + }).AnyTimes() + }) +} diff --git a/pkg/testutil/k8s/pod_util.go b/pkg/testutil/k8s/pod_util.go index 44c9f100e6e..5e36bb25319 100644 --- a/pkg/testutil/k8s/pod_util.go +++ b/pkg/testutil/k8s/pod_util.go @@ -91,6 +91,12 @@ func MockPodIsFailed(ctx context.Context, testCtx testutil.TestContext, pod *cor gomega.Expect(testCtx.Cli.Status().Patch(ctx, pod, patch)).Should(gomega.Succeed()) } +func MockPodIsRunning(ctx context.Context, testCtx testutil.TestContext, pod *corev1.Pod) { + patch := client.MergeFrom(pod.DeepCopy()) + pod.Status.Phase = corev1.PodRunning + gomega.Expect(testCtx.Cli.Status().Patch(ctx, pod, patch)).Should(gomega.Succeed()) +} + // RemovePodFinalizer removes the pod finalizer to delete the pod finally. func RemovePodFinalizer(ctx context.Context, testCtx testutil.TestContext, pod *corev1.Pod) { patch := client.MergeFrom(pod.DeepCopy())