Skip to content

Commit

Permalink
Merge pull request #314 from shiftstack/merge-bot-main
Browse files Browse the repository at this point in the history
NO-JIRA: Bump to latest CAPO v0.10
  • Loading branch information
openshift-merge-bot[bot] authored May 20, 2024
2 parents be72b75 + 43c0ed5 commit 61be938
Show file tree
Hide file tree
Showing 8 changed files with 120 additions and 28 deletions.
1 change: 0 additions & 1 deletion OWNERS_ALIASES
Original file line number Diff line number Diff line change
Expand Up @@ -24,4 +24,3 @@ aliases:
- mdbooth
cluster-api-openstack-reviewers:
- emilienm
- dulek
25 changes: 15 additions & 10 deletions controllers/openstackmachine_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -776,17 +776,22 @@ func (r *OpenStackMachineReconciler) getOrCreateInstance(logger logr.Logger, ope
}
if instanceStatus == nil {
// Check if there is an existing instance with machine name, in case where instance ID would not have been stored in machine status
if instanceStatus, err = computeService.GetInstanceStatusByName(openStackMachine, openStackMachine.Name); err == nil {
if instanceStatus != nil {
return instanceStatus, nil
}
if openStackMachine.Status.InstanceID != nil {
logger.Info("Not reconciling machine in failed state. The previously existing OpenStack instance is no longer available")
conditions.MarkFalse(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotFoundReason, clusterv1.ConditionSeverityError, "virtual machine no longer exists")
openStackMachine.SetFailure(capierrors.UpdateMachineError, errors.New("virtual machine no longer exists"))
return nil, nil
}
instanceStatus, err = computeService.GetInstanceStatusByName(openStackMachine, openStackMachine.Name)
if err != nil {
logger.Info("Unable to get OpenStack instance by name", "name", openStackMachine.Name)
conditions.MarkFalse(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceCreateFailedReason, clusterv1.ConditionSeverityError, err.Error())
return nil, err
}
if instanceStatus != nil {
return instanceStatus, nil
}
if openStackMachine.Status.InstanceID != nil {
logger.Info("Not reconciling machine in failed state. The previously existing OpenStack instance is no longer available")
conditions.MarkFalse(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotFoundReason, clusterv1.ConditionSeverityError, "virtual machine no longer exists")
openStackMachine.SetFailure(capierrors.UpdateMachineError, errors.New("virtual machine no longer exists"))
return nil, nil
}

instanceSpec, err := machineToInstanceSpec(openStackCluster, machine, openStackMachine, userData)
if err != nil {
return nil, err
Expand Down
1 change: 1 addition & 0 deletions docs/book/src/clusteropenstack/configuration.md
Original file line number Diff line number Diff line change
Expand Up @@ -565,6 +565,7 @@ permitted from anywhere, as with the default rules).
We can add security group rules that authorize traffic from all nodes via `allNodesSecurityGroupRules`.
It takes a list of security groups rules that should be applied to selected nodes.
The following rule fields are mutually exclusive: `remoteManagedGroups`, `remoteGroupID` and `remoteIPPrefix`.
If none of these fields are set, the rule will have a remote IP prefix of `0.0.0.0/0` per Neutron default.

Valid values for `remoteManagedGroups` are `controlplane`, `worker` and `bastion`.

Expand Down
3 changes: 3 additions & 0 deletions docs/book/src/topics/crd-changes/v1alpha7-to-v1beta1.md
Original file line number Diff line number Diff line change
Expand Up @@ -396,6 +396,9 @@ The field `managedSecurityGroups` is now a pointer to a `ManagedSecurityGroups`
Also, we can now add security group rules that authorize traffic from all nodes via `allNodesSecurityGroupRules`.
It takes a list of security groups rules that should be applied to selected nodes.
The following rule fields are mutually exclusive: `remoteManagedGroups`, `remoteGroupID` and `remoteIPPrefix`.
If none of these fields are set, the rule will have a remote IP prefix of `0.0.0.0/0` per Neutron default.

```yaml
Valid values for `remoteManagedGroups` are `controlplane`, `worker` and `bastion`.

Also, `OpenStackCluster.Spec.AllowAllInClusterTraffic` moved under `ManagedSecurityGroups`.
Expand Down
5 changes: 3 additions & 2 deletions pkg/cloud/services/loadbalancer/loadbalancer.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,9 +92,10 @@ func (s *Service) ReconcileLoadBalancer(openStackCluster *infrav1.OpenStackClust

if lb.ProvisioningStatus != loadBalancerProvisioningStatusActive {
var err error
lb, err = s.waitForLoadBalancerActive(lb.ID)
lbID := lb.ID
lb, err = s.waitForLoadBalancerActive(lbID)
if err != nil {
return false, fmt.Errorf("load balancer %q with id %s is not active after timeout: %v", loadBalancerName, lb.ID, err)
return false, fmt.Errorf("load balancer %q with id %s is not active after timeout: %v", loadBalancerName, lbID, err)
}
}

Expand Down
28 changes: 28 additions & 0 deletions pkg/cloud/services/loadbalancer/loadbalancer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,13 @@ func Test_ReconcileLoadBalancer(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()

// Shortcut wait timeout
backoffDurationPrev := backoff.Duration
backoff.Duration = 0
defer func() {
backoff.Duration = backoffDurationPrev
}()

// Stub the call to net.LookupHost
lookupHost = func(host string) (addrs *string, err error) {
if net.ParseIP(host) != nil {
Expand Down Expand Up @@ -139,6 +146,27 @@ func Test_ReconcileLoadBalancer(t *testing.T) {
},
wantError: nil,
},
{
name: "reconcile loadbalancer in non active state should timeout",
expectNetwork: func(*mock.MockNetworkClientMockRecorder) {
// add network api call results here
},
expectLoadBalancer: func(m *mock.MockLbClientMockRecorder) {
pendingLB := loadbalancers.LoadBalancer{
ID: "aaaaaaaa-bbbb-cccc-dddd-333333333333",
Name: "k8s-clusterapi-cluster-AAAAA-kubeapi",
ProvisioningStatus: "PENDING_CREATE",
}

// return existing loadbalancer in non-active state
lbList := []loadbalancers.LoadBalancer{pendingLB}
m.ListLoadBalancers(loadbalancers.ListOpts{Name: pendingLB.Name}).Return(lbList, nil)

// wait for loadbalancer until it times out
m.GetLoadBalancer("aaaaaaaa-bbbb-cccc-dddd-333333333333").Return(&pendingLB, nil).Return(&pendingLB, nil).AnyTimes()
},
wantError: fmt.Errorf("load balancer \"k8s-clusterapi-cluster-AAAAA-kubeapi\" with id aaaaaaaa-bbbb-cccc-dddd-333333333333 is not active after timeout: timed out waiting for the condition"),
},
}
for _, tt := range lbtests {
t.Run(tt.name, func(t *testing.T) {
Expand Down
4 changes: 0 additions & 4 deletions pkg/cloud/services/networking/securitygroups.go
Original file line number Diff line number Diff line change
Expand Up @@ -297,10 +297,6 @@ func getAllNodesRules(remoteManagedGroups map[string]string, allNodesSecurityGro

// validateRemoteManagedGroups validates that the remoteManagedGroups target existing managed security groups.
func validateRemoteManagedGroups(remoteManagedGroups map[string]string, ruleRemoteManagedGroups []infrav1.ManagedSecurityGroupName) error {
if len(ruleRemoteManagedGroups) == 0 {
return fmt.Errorf("remoteManagedGroups is required")
}

for _, group := range ruleRemoteManagedGroups {
if _, ok := remoteManagedGroups[group.String()]; !ok {
return fmt.Errorf("remoteManagedGroups: %s is not a valid remote managed security group", group)
Expand Down
81 changes: 70 additions & 11 deletions pkg/cloud/services/networking/securitygroups_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,19 +49,14 @@ func TestValidateRemoteManagedGroups(t *testing.T) {
wantErr: true,
},
{
name: "Valid rule with missing remoteManagedGroups",
name: "Valid rule with no remoteManagedGroups",
rule: infrav1.SecurityGroupRuleSpec{
PortRangeMin: ptr.To(22),
PortRangeMax: ptr.To(22),
Protocol: ptr.To("tcp"),
PortRangeMin: ptr.To(22),
PortRangeMax: ptr.To(22),
Protocol: ptr.To("tcp"),
RemoteIPPrefix: ptr.To("0.0.0.0/0"),
},
remoteManagedGroups: map[string]string{
"self": "self",
"controlplane": "1",
"worker": "2",
"bastion": "3",
},
wantErr: true,
wantErr: false,
},
{
name: "Valid rule with remoteManagedGroups",
Expand Down Expand Up @@ -171,6 +166,70 @@ func TestGetAllNodesRules(t *testing.T) {
},
},
},
{
name: "Valid remoteIPPrefix in a rule",
remoteManagedGroups: map[string]string{
"controlplane": "1",
"worker": "2",
},
allNodesSecurityGroupRules: []infrav1.SecurityGroupRuleSpec{
{
Protocol: ptr.To("tcp"),
PortRangeMin: ptr.To(22),
PortRangeMax: ptr.To(22),
RemoteIPPrefix: ptr.To("0.0.0.0/0"),
},
},
wantRules: []resolvedSecurityGroupRuleSpec{
{
Protocol: "tcp",
PortRangeMin: 22,
PortRangeMax: 22,
RemoteIPPrefix: "0.0.0.0/0",
},
},
},
{
name: "Valid allNodesSecurityGroupRules with no remote parameter",
remoteManagedGroups: map[string]string{
"controlplane": "1",
"worker": "2",
},
allNodesSecurityGroupRules: []infrav1.SecurityGroupRuleSpec{
{
Protocol: ptr.To("tcp"),
PortRangeMin: ptr.To(22),
PortRangeMax: ptr.To(22),
},
},
wantRules: []resolvedSecurityGroupRuleSpec{
{
Protocol: "tcp",
PortRangeMin: 22,
PortRangeMax: 22,
},
},
wantErr: false,
},
{
name: "Invalid allNodesSecurityGroupRules with bastion while remoteManagedGroups does not have bastion",
remoteManagedGroups: map[string]string{
"controlplane": "1",
"worker": "2",
},
allNodesSecurityGroupRules: []infrav1.SecurityGroupRuleSpec{
{
Protocol: ptr.To("tcp"),
PortRangeMin: ptr.To(22),
PortRangeMax: ptr.To(22),
RemoteManagedGroups: []infrav1.ManagedSecurityGroupName{
"bastion",
},
},
},
wantRules: nil,
wantErr: true,
},
{
name: "Invalid allNodesSecurityGroupRules with wrong remoteManagedGroups",
remoteManagedGroups: map[string]string{
Expand Down

0 comments on commit 61be938

Please sign in to comment.