Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

🐛 Remove logger from scope #1913

Merged
merged 1 commit into from
Feb 28, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 8 additions & 7 deletions controllers/openstackcluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -116,10 +116,11 @@ func (r *OpenStackClusterReconciler) Reconcile(ctx context.Context, req ctrl.Req
}
}()

scope, err := r.ScopeFactory.NewClientScopeFromCluster(ctx, r.Client, openStackCluster, r.CaCertificates, log)
clientScope, err := r.ScopeFactory.NewClientScopeFromCluster(ctx, r.Client, openStackCluster, r.CaCertificates, log)
if err != nil {
return reconcile.Result{}, err
}
scope := scope.NewWithLogger(clientScope, log)

// Resolve and store referenced & dependent resources for the bastion
if openStackCluster.Spec.Bastion != nil && openStackCluster.Spec.Bastion.Enabled {
Expand Down Expand Up @@ -154,7 +155,7 @@ func (r *OpenStackClusterReconciler) Reconcile(ctx context.Context, req ctrl.Req
return reconcileNormal(scope, cluster, openStackCluster)
}

func (r *OpenStackClusterReconciler) reconcileDelete(ctx context.Context, scope scope.Scope, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) {
func (r *OpenStackClusterReconciler) reconcileDelete(ctx context.Context, scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) {
scope.Logger().Info("Reconciling Cluster delete")

// Wait for machines to be deleted before removing the finalizer as they
Expand Down Expand Up @@ -232,7 +233,7 @@ func contains(arr []string, target string) bool {
return false
}

func deleteBastion(scope scope.Scope, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) error {
func deleteBastion(scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) error {
scope.Logger().Info("Deleting Bastion")

computeService, err := compute.NewService(scope)
Expand Down Expand Up @@ -313,7 +314,7 @@ func deleteBastion(scope scope.Scope, cluster *clusterv1.Cluster, openStackClust
return nil
}

func reconcileNormal(scope scope.Scope, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) { //nolint:unparam
func reconcileNormal(scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) { //nolint:unparam
scope.Logger().Info("Reconciling Cluster")

// If the OpenStackCluster doesn't have our finalizer, add it.
Expand Down Expand Up @@ -364,7 +365,7 @@ func reconcileNormal(scope scope.Scope, cluster *clusterv1.Cluster, openStackClu
return reconcile.Result{}, nil
}

func reconcileBastion(scope scope.Scope, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) {
func reconcileBastion(scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) {
scope.Logger().Info("Reconciling Bastion")

if openStackCluster.Spec.Bastion == nil || !openStackCluster.Spec.Bastion.Enabled {
Expand Down Expand Up @@ -542,7 +543,7 @@ func getBastionSecurityGroups(openStackCluster *infrav1.OpenStackCluster) []infr
return instanceSpecSecurityGroups
}

func getOrCreateBastionPorts(scope scope.Scope, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster, networkingService *networking.Service, clusterName string) error {
func getOrCreateBastionPorts(scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster, networkingService *networking.Service, clusterName string) error {
scope.Logger().Info("Reconciling ports for bastion", "bastion", bastionName(openStackCluster.Name))

if openStackCluster.Status.Bastion == nil {
Expand Down Expand Up @@ -584,7 +585,7 @@ func bastionHashHasChanged(computeHash string, clusterAnnotations map[string]str
return latestHash != computeHash
}

func reconcileNetworkComponents(scope scope.Scope, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) error {
func reconcileNetworkComponents(scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) error {
clusterName := fmt.Sprintf("%s-%s", cluster.Namespace, cluster.Name)

networkingService, err := networking.NewService(scope)
Expand Down
38 changes: 28 additions & 10 deletions controllers/openstackcluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ import (
"reflect"
"testing"

"github.com/go-logr/logr"
"github.com/golang/mock/gomock"
"github.com/gophercloud/gophercloud"
"github.com/gophercloud/gophercloud/openstack/compute/v2/servers"
Expand Down Expand Up @@ -105,7 +104,7 @@ var _ = Describe("OpenStackCluster controller", func() {
framework.CreateNamespace(ctx, input)

mockCtrl = gomock.NewController(GinkgoT())
mockScopeFactory = scope.NewMockScopeFactory(mockCtrl, "", logr.Discard())
mockScopeFactory = scope.NewMockScopeFactory(mockCtrl, "")
reconciler = func() *OpenStackClusterReconciler {
return &OpenStackClusterReconciler{
Client: k8sClient,
Expand Down Expand Up @@ -207,8 +206,10 @@ var _ = Describe("OpenStackCluster controller", func() {
}
err = k8sClient.Status().Update(ctx, testCluster)
Expect(err).To(BeNil())
scope, err := mockScopeFactory.NewClientScopeFromCluster(ctx, k8sClient, testCluster, nil, logr.Discard())
log := GinkgoLogr
clientScope, err := mockScopeFactory.NewClientScopeFromCluster(ctx, k8sClient, testCluster, nil, log)
Expect(err).To(BeNil())
scope := scope.NewWithLogger(clientScope, log)

computeClientRecorder := mockScopeFactory.ComputeClient.EXPECT()
computeClientRecorder.GetServer("bastion-uuid").Return(nil, gophercloud.ErrResourceNotFound{})
Expand Down Expand Up @@ -258,8 +259,10 @@ var _ = Describe("OpenStackCluster controller", func() {
err = k8sClient.Status().Update(ctx, testCluster)
Expect(err).To(BeNil())

scope, err := mockScopeFactory.NewClientScopeFromCluster(ctx, k8sClient, testCluster, nil, logr.Discard())
log := GinkgoLogr
clientScope, err := mockScopeFactory.NewClientScopeFromCluster(ctx, k8sClient, testCluster, nil, log)
Expect(err).To(BeNil())
scope := scope.NewWithLogger(clientScope, log)

server := clients.ServerExt{}
server.ID = "adopted-bastion-uuid"
Expand Down Expand Up @@ -342,8 +345,10 @@ var _ = Describe("OpenStackCluster controller", func() {
err = k8sClient.Status().Update(ctx, testCluster)
Expect(err).To(BeNil())

scope, err := mockScopeFactory.NewClientScopeFromCluster(ctx, k8sClient, testCluster, nil, logr.Discard())
log := GinkgoLogr
clientScope, err := mockScopeFactory.NewClientScopeFromCluster(ctx, k8sClient, testCluster, nil, log)
Expect(err).To(BeNil())
scope := scope.NewWithLogger(clientScope, log)

server := clients.ServerExt{}
server.ID = "adopted-fip-bastion-uuid"
Expand Down Expand Up @@ -425,8 +430,10 @@ var _ = Describe("OpenStackCluster controller", func() {
err = k8sClient.Status().Update(ctx, testCluster)
Expect(err).To(BeNil())

scope, err := mockScopeFactory.NewClientScopeFromCluster(ctx, k8sClient, testCluster, nil, logr.Discard())
log := GinkgoLogr
clientScope, err := mockScopeFactory.NewClientScopeFromCluster(ctx, k8sClient, testCluster, nil, log)
Expect(err).To(BeNil())
scope := scope.NewWithLogger(clientScope, log)

server := clients.ServerExt{}
server.ID = "requeue-bastion-uuid"
Expand Down Expand Up @@ -484,8 +491,10 @@ var _ = Describe("OpenStackCluster controller", func() {
err = k8sClient.Status().Update(ctx, testCluster)
Expect(err).To(BeNil())

scope, err := mockScopeFactory.NewClientScopeFromCluster(ctx, k8sClient, testCluster, nil, logr.Discard())
log := GinkgoLogr
clientScope, err := mockScopeFactory.NewClientScopeFromCluster(ctx, k8sClient, testCluster, nil, log)
Expect(err).To(BeNil())
scope := scope.NewWithLogger(clientScope, log)

server := clients.ServerExt{}
server.ID = "delete-bastion-uuid"
Expand Down Expand Up @@ -534,8 +543,11 @@ var _ = Describe("OpenStackCluster controller", func() {
Expect(err).To(BeNil())
err = k8sClient.Create(ctx, capiCluster)
Expect(err).To(BeNil())
scope, err := mockScopeFactory.NewClientScopeFromCluster(ctx, k8sClient, testCluster, nil, logr.Discard())

log := GinkgoLogr
clientScope, err := mockScopeFactory.NewClientScopeFromCluster(ctx, k8sClient, testCluster, nil, log)
Expect(err).To(BeNil())
scope := scope.NewWithLogger(clientScope, log)

networkClientRecorder := mockScopeFactory.NetworkClient.EXPECT()

Expand Down Expand Up @@ -614,8 +626,11 @@ var _ = Describe("OpenStackCluster controller", func() {
Expect(err).To(BeNil())
err = k8sClient.Create(ctx, capiCluster)
Expect(err).To(BeNil())
scope, err := mockScopeFactory.NewClientScopeFromCluster(ctx, k8sClient, testCluster, nil, logr.Discard())

log := GinkgoLogr
clientScope, err := mockScopeFactory.NewClientScopeFromCluster(ctx, k8sClient, testCluster, nil, log)
Expect(err).To(BeNil())
scope := scope.NewWithLogger(clientScope, log)

networkClientRecorder := mockScopeFactory.NetworkClient.EXPECT()

Expand Down Expand Up @@ -675,8 +690,11 @@ var _ = Describe("OpenStackCluster controller", func() {
Expect(err).To(BeNil())
err = k8sClient.Create(ctx, capiCluster)
Expect(err).To(BeNil())
scope, err := mockScopeFactory.NewClientScopeFromCluster(ctx, k8sClient, testCluster, nil, logr.Discard())

log := GinkgoLogr
clientScope, err := mockScopeFactory.NewClientScopeFromCluster(ctx, k8sClient, testCluster, nil, log)
Expect(err).To(BeNil())
scope := scope.NewWithLogger(clientScope, log)

networkClientRecorder := mockScopeFactory.NetworkClient.EXPECT()

Expand Down
11 changes: 6 additions & 5 deletions controllers/openstackfloatingippool_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,10 +83,11 @@ func (r *OpenStackFloatingIPPoolReconciler) Reconcile(ctx context.Context, req c
return ctrl.Result{}, client.IgnoreNotFound(err)
}

scope, err := r.ScopeFactory.NewClientScopeFromFloatingIPPool(ctx, r.Client, pool, r.CaCertificates, log)
clientScope, err := r.ScopeFactory.NewClientScopeFromFloatingIPPool(ctx, r.Client, pool, r.CaCertificates, log)
if err != nil {
return reconcile.Result{}, err
}
scope := scope.NewWithLogger(clientScope, log)

// This is done before deleting the pool, because we want to handle deleted IPs before we delete the pool
if err := r.reconcileIPAddresses(ctx, scope, pool); err != nil {
Expand Down Expand Up @@ -203,7 +204,7 @@ func (r *OpenStackFloatingIPPoolReconciler) Reconcile(ctx context.Context, req c
return ctrl.Result{}, r.Client.Status().Update(ctx, pool)
}

func (r *OpenStackFloatingIPPoolReconciler) reconcileDelete(ctx context.Context, scope scope.Scope, pool *infrav1alpha1.OpenStackFloatingIPPool) error {
func (r *OpenStackFloatingIPPoolReconciler) reconcileDelete(ctx context.Context, scope *scope.WithLogger, pool *infrav1alpha1.OpenStackFloatingIPPool) error {
log := ctrl.LoggerFrom(ctx)
ipAddresses := &ipamv1.IPAddressList{}
if err := r.Client.List(ctx, ipAddresses, client.InNamespace(pool.Namespace), client.MatchingFields{infrav1alpha1.OpenStackFloatingIPPoolNameIndex: pool.Name}); err != nil {
Expand Down Expand Up @@ -267,7 +268,7 @@ func diff(a []string, b []string) []string {
return result
}

func (r *OpenStackFloatingIPPoolReconciler) reconcileIPAddresses(ctx context.Context, scope scope.Scope, pool *infrav1alpha1.OpenStackFloatingIPPool) error {
func (r *OpenStackFloatingIPPoolReconciler) reconcileIPAddresses(ctx context.Context, scope *scope.WithLogger, pool *infrav1alpha1.OpenStackFloatingIPPool) error {
ipAddresses := &ipamv1.IPAddressList{}
if err := r.Client.List(ctx, ipAddresses, client.InNamespace(pool.Namespace), client.MatchingFields{infrav1alpha1.OpenStackFloatingIPPoolNameIndex: pool.Name}); err != nil {
return err
Expand Down Expand Up @@ -309,7 +310,7 @@ func (r *OpenStackFloatingIPPoolReconciler) reconcileIPAddresses(ctx context.Con
return nil
}

func (r *OpenStackFloatingIPPoolReconciler) getIP(ctx context.Context, scope scope.Scope, pool *infrav1alpha1.OpenStackFloatingIPPool) (string, error) {
func (r *OpenStackFloatingIPPoolReconciler) getIP(ctx context.Context, scope *scope.WithLogger, pool *infrav1alpha1.OpenStackFloatingIPPool) (string, error) {
// There's a potential leak of IPs here, if the reconcile loop fails after we claim an IP but before we create the IPAddress object.
var ip string

Expand Down Expand Up @@ -388,7 +389,7 @@ func (r *OpenStackFloatingIPPoolReconciler) getIP(ctx context.Context, scope sco
return ip, nil
}

func (r *OpenStackFloatingIPPoolReconciler) reconcileFloatingIPNetwork(scope scope.Scope, pool *infrav1alpha1.OpenStackFloatingIPPool) error {
func (r *OpenStackFloatingIPPoolReconciler) reconcileFloatingIPNetwork(scope *scope.WithLogger, pool *infrav1alpha1.OpenStackFloatingIPPool) error {
// If the pool already has a network, we don't need to do anything
if pool.Status.FloatingIPNetwork != nil {
return nil
Expand Down
11 changes: 6 additions & 5 deletions controllers/openstackmachine_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -142,10 +142,11 @@ func (r *OpenStackMachineReconciler) Reconcile(ctx context.Context, req ctrl.Req
}
}()

scope, err := r.ScopeFactory.NewClientScopeFromMachine(ctx, r.Client, openStackMachine, infraCluster, r.CaCertificates, log)
clientScope, err := r.ScopeFactory.NewClientScopeFromMachine(ctx, r.Client, openStackMachine, infraCluster, r.CaCertificates, log)
if err != nil {
return reconcile.Result{}, err
}
scope := scope.NewWithLogger(clientScope, log)

// Resolve and store referenced resources
changed, err := compute.ResolveReferencedMachineResources(scope, infraCluster, &openStackMachine.Spec, &openStackMachine.Status.ReferencedResources)
Expand Down Expand Up @@ -240,7 +241,7 @@ func (r *OpenStackMachineReconciler) SetupWithManager(ctx context.Context, mgr c
Complete(r)
}

func (r *OpenStackMachineReconciler) reconcileDelete(scope scope.Scope, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine) (ctrl.Result, error) { //nolint:unparam
func (r *OpenStackMachineReconciler) reconcileDelete(scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine) (ctrl.Result, error) { //nolint:unparam
scope.Logger().Info("Reconciling Machine delete")

clusterName := fmt.Sprintf("%s-%s", cluster.ObjectMeta.Namespace, cluster.Name)
Expand Down Expand Up @@ -333,7 +334,7 @@ func GetPortIDs(ports []infrav1.PortStatus) []string {
return portIDs
}

func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope scope.Scope, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine) (_ ctrl.Result, reterr error) {
func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine) (_ ctrl.Result, reterr error) {
var err error

// If the OpenStackMachine is in an error state, return early.
Expand Down Expand Up @@ -489,7 +490,7 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope
return ctrl.Result{}, nil
}

func getOrCreateMachinePorts(scope scope.Scope, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, networkingService *networking.Service, clusterName string) error {
func getOrCreateMachinePorts(scope *scope.WithLogger, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine, networkingService *networking.Service, clusterName string) error {
scope.Logger().Info("Reconciling ports for machine", "machine", machine.Name)
var machinePortsStatus []infrav1.PortStatus
var err error
Expand Down Expand Up @@ -647,7 +648,7 @@ func getManagedSecurityGroups(openStackCluster *infrav1.OpenStackCluster, machin
return machineSpecSecurityGroups
}

func (r *OpenStackMachineReconciler) reconcileLoadBalancerMember(scope scope.Scope, openStackCluster *infrav1.OpenStackCluster, openStackMachine *infrav1.OpenStackMachine, instanceNS *compute.InstanceNetworkStatus, clusterName string) error {
func (r *OpenStackMachineReconciler) reconcileLoadBalancerMember(scope *scope.WithLogger, openStackCluster *infrav1.OpenStackCluster, openStackMachine *infrav1.OpenStackMachine, instanceNS *compute.InstanceNetworkStatus, clusterName string) error {
ip := instanceNS.IP(openStackCluster.Status.Network.Name)
loadbalancerService, err := loadbalancer.NewService(scope)
if err != nil {
Expand Down
9 changes: 4 additions & 5 deletions controllers/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ import (
"path/filepath"
"testing"

"github.com/go-logr/logr"
"github.com/golang/mock/gomock"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
Expand Down Expand Up @@ -127,8 +126,9 @@ var _ = Describe("EnvTest sanity check", func() {
})

var _ = Describe("When calling getOrCreate", func() {
logger := GinkgoLogr

var (
logger logr.Logger
reconsiler OpenStackMachineReconciler
mockCtrl *gomock.Controller
mockScopeFactory *scope.MockScopeFactory
Expand All @@ -138,11 +138,10 @@ var _ = Describe("When calling getOrCreate", func() {

BeforeEach(func() {
ctx = context.Background()
logger = logr.Discard()
reconsiler = OpenStackMachineReconciler{}
mockCtrl = gomock.NewController(GinkgoT())
mockScopeFactory = scope.NewMockScopeFactory(mockCtrl, "1234", logger)
computeService, err = compute.NewService(mockScopeFactory)
mockScopeFactory = scope.NewMockScopeFactory(mockCtrl, "1234")
computeService, err = compute.NewService(scope.NewWithLogger(mockScopeFactory, logger))
Expect(err).NotTo(HaveOccurred())
})

Expand Down
4 changes: 2 additions & 2 deletions pkg/cloud/services/compute/dependent_resources.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import (
"sigs.k8s.io/cluster-api-provider-openstack/pkg/scope"
)

func ResolveDependentMachineResources(scope scope.Scope, openStackMachine *infrav1.OpenStackMachine) (changed bool, err error) {
func ResolveDependentMachineResources(scope *scope.WithLogger, openStackMachine *infrav1.OpenStackMachine) (changed bool, err error) {
changed = false

networkingService, err := networking.NewService(scope)
Expand All @@ -33,7 +33,7 @@ func ResolveDependentMachineResources(scope scope.Scope, openStackMachine *infra
return networkingService.AdoptMachinePorts(scope, openStackMachine, openStackMachine.Status.ReferencedResources.PortsOpts)
}

func ResolveDependentBastionResources(scope scope.Scope, openStackCluster *infrav1.OpenStackCluster, bastionName string) (changed bool, err error) {
func ResolveDependentBastionResources(scope *scope.WithLogger, openStackCluster *infrav1.OpenStackCluster, bastionName string) (changed bool, err error) {
changed = false

networkingService, err := networking.NewService(scope)
Expand Down
12 changes: 7 additions & 5 deletions pkg/cloud/services/compute/dependent_resources_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ package compute
import (
"testing"

"github.com/go-logr/logr"
"github.com/go-logr/logr/testr"
"github.com/golang/mock/gomock"
"github.com/google/go-cmp/cmp"
. "github.com/onsi/gomega"
Expand Down Expand Up @@ -93,8 +93,9 @@ func Test_ResolveDependentMachineResources(t *testing.T) {
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
g := NewWithT(t)
log := testr.New(t)
mockCtrl := gomock.NewController(t)
mockScopeFactory := scope.NewMockScopeFactory(mockCtrl, "", logr.Discard())
mockScopeFactory := scope.NewMockScopeFactory(mockCtrl, "")

defaultOpenStackMachine := &infrav1.OpenStackMachine{
ObjectMeta: metav1.ObjectMeta{
Expand All @@ -103,7 +104,7 @@ func Test_ResolveDependentMachineResources(t *testing.T) {
Status: tt.openStackMachineStatus,
}

_, err := ResolveDependentMachineResources(mockScopeFactory, defaultOpenStackMachine)
_, err := ResolveDependentMachineResources(scope.NewWithLogger(mockScopeFactory, log), defaultOpenStackMachine)
if tt.wantErr {
g.Expect(err).Error()
return
Expand Down Expand Up @@ -192,10 +193,11 @@ func TestResolveDependentBastionResources(t *testing.T) {
for _, tt := range tests {
t.Run(tt.testName, func(t *testing.T) {
g := NewWithT(t)
log := testr.New(t)
mockCtrl := gomock.NewController(t)
mockScopeFactory := scope.NewMockScopeFactory(mockCtrl, "", logr.Discard())
mockScopeFactory := scope.NewMockScopeFactory(mockCtrl, "")

_, err := ResolveDependentBastionResources(mockScopeFactory, tt.openStackCluster, bastionName)
_, err := ResolveDependentBastionResources(scope.NewWithLogger(mockScopeFactory, log), tt.openStackCluster, bastionName)
if tt.wantErr {
g.Expect(err).Error()
return
Expand Down
Loading