Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

OCPVE-669: chore: refactor e2e tests #424

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions controllers/lvmcluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -155,10 +155,10 @@ func (r *LVMClusterReconciler) reconcile(ctx context.Context, instance *lvmv1alp
}
if lvsExist {
waitForLVRemoval := time.Second * 10
err := fmt.Errorf("found PVCs provisioned by topolvm, waiting %s for their deletion: %w", waitForLVRemoval, err)
err := fmt.Errorf("found PVCs provisioned by topolvm, waiting %s for their deletion", waitForLVRemoval)
r.WarningEvent(ctx, instance, EventReasonErrorDeletionPending, err)
// check every 10 seconds if there are still PVCs present
return ctrl.Result{RequeueAfter: waitForLVRemoval}, err
return ctrl.Result{RequeueAfter: waitForLVRemoval}, nil
}

logger.Info("processing LVMCluster deletion")
Expand Down
9 changes: 6 additions & 3 deletions test/e2e/aws_disk.go → test/e2e/aws_disk_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,15 @@ import (
"strings"
"time"

. "github.com/onsi/ginkgo/v2"

"github.com/go-logr/logr"

"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/go-logr/logr"
"github.com/onsi/ginkgo/v2"

corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
Expand Down Expand Up @@ -211,7 +214,7 @@ func getEC2Client(ctx context.Context, region string) (*ec2.EC2, error) {
Region: aws.String(region),
Credentials: credentials.NewStaticCredentials(string(id), string(key), ""),
Logger: aws.LoggerFunc(func(args ...interface{}) {
ginkgo.GinkgoLogr.Info(fmt.Sprint(args), "source", "aws")
GinkgoLogr.Info(fmt.Sprint(args), "source", "aws")
}),
})
if err != nil {
Expand Down
5 changes: 4 additions & 1 deletion test/e2e/config.go → test/e2e/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,16 +24,19 @@ import (
snapapi "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
configv1 "github.com/openshift/api/config/v1"
secv1 "github.com/openshift/api/security/v1"
lvmv1 "github.com/openshift/lvm-operator/api/v1alpha1"

operatorv1 "github.com/operator-framework/api/pkg/operators/v1"
operatorv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"

"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
k8sscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
crclient "sigs.k8s.io/controller-runtime/pkg/client"

lvmv1 "github.com/openshift/lvm-operator/api/v1alpha1"
)

const (
Expand Down
16 changes: 4 additions & 12 deletions test/e2e/disk_setup.go → test/e2e/disk_setup_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,13 +27,11 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)

func diskSetup(ctx context.Context) error {
func diskSetup(ctx context.Context) {
// get nodes
By(fmt.Sprintf("getting all worker nodes by label %s", labelNodeRoleWorker))
nodeList := &corev1.NodeList{}
if err := crClient.List(ctx, nodeList, client.HasLabels{labelNodeRoleWorker}); err != nil {
return fmt.Errorf("could not list worker nodes nodes for Disk setup: %w", err)
}
Expect(crClient.List(ctx, nodeList, client.HasLabels{labelNodeRoleWorker})).To(Succeed())

By("getting AWS region info from the first Node spec")
nodeInfo, err := getAWSNodeInfo(nodeList.Items[0])
Expand All @@ -51,8 +49,6 @@ func diskSetup(ctx context.Context) error {
// create and attach volumes
By("creating and attaching Disks")
Expect(NewAWSDiskManager(ec2, GinkgoLogr).CreateAndAttachAWSVolumes(ctx, nodeEnv)).To(Succeed())

return nil
}

func getNodeEnvironmentFromNodeList(nodeList *corev1.NodeList) ([]NodeDisks, error) {
Expand All @@ -76,13 +72,11 @@ func getNodeEnvironmentFromNodeList(nodeList *corev1.NodeList) ([]NodeDisks, err
return nodeEnv, nil
}

func diskRemoval(ctx context.Context) error {
func diskTeardown(ctx context.Context) {
// get nodes
By(fmt.Sprintf("getting all worker nodes by label %s", labelNodeRoleWorker))
nodeList := &corev1.NodeList{}
if err := crClient.List(ctx, nodeList, client.HasLabels{labelNodeRoleWorker}); err != nil {
return fmt.Errorf("could not list worker nodes nodes for Disk setup: %w", err)
}
Expect(crClient.List(ctx, nodeList, client.HasLabels{labelNodeRoleWorker})).To(Succeed())

By("getting AWS region info from the first Node spec")
nodeInfo, err := getAWSNodeInfo(nodeList.Items[0])
Expand All @@ -96,6 +90,4 @@ func diskRemoval(ctx context.Context) error {
// cleaning Disk
By("cleaning up Disks")
Expect(NewAWSDiskManager(ec2, GinkgoLogr).cleanupAWSDisks(ctx)).To(Succeed())

return err
}
Loading