-
Notifications
You must be signed in to change notification settings - Fork 257
/
openstackcluster_controller.go
935 lines (804 loc) · 36.1 KB
/
openstackcluster_controller.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"errors"
"fmt"
"time"
"github.com/gophercloud/gophercloud/v2/openstack/networking/v2/networks"
"github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports"
"github.com/gophercloud/gophercloud/v2/openstack/networking/v2/subnets"
corev1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/client-go/tools/record"
"k8s.io/utils/ptr"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
capierrors "sigs.k8s.io/cluster-api/errors"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/annotations"
"sigs.k8s.io/cluster-api/util/collections"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/cluster-api/util/predicates"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
infrav1alpha1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1alpha1"
infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1"
"sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/compute"
"sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/loadbalancer"
"sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/networking"
"sigs.k8s.io/cluster-api-provider-openstack/pkg/scope"
utils "sigs.k8s.io/cluster-api-provider-openstack/pkg/utils/controllers"
capoerrors "sigs.k8s.io/cluster-api-provider-openstack/pkg/utils/errors"
"sigs.k8s.io/cluster-api-provider-openstack/pkg/utils/names"
)
const (
waitForBastionToReconcile = 15 * time.Second
)
// OpenStackClusterReconciler reconciles a OpenStackCluster object.
type OpenStackClusterReconciler struct {
Client client.Client
Recorder record.EventRecorder
WatchFilterValue string
ScopeFactory scope.Factory
CaCertificates []byte // PEM encoded ca certificates.
}
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=openstackclusters,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=openstackclusters/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch
func (r *OpenStackClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, reterr error) {
log := ctrl.LoggerFrom(ctx)
// Fetch the OpenStackCluster instance
openStackCluster := &infrav1.OpenStackCluster{}
err := r.Client.Get(ctx, req.NamespacedName, openStackCluster)
if err != nil {
if apierrors.IsNotFound(err) {
return reconcile.Result{}, nil
}
return reconcile.Result{}, err
}
// Fetch the Cluster.
cluster, err := util.GetOwnerCluster(ctx, r.Client, openStackCluster.ObjectMeta)
if err != nil {
return reconcile.Result{}, err
}
if cluster == nil {
log.Info("Cluster Controller has not yet set OwnerRef")
return reconcile.Result{}, nil
}
log = log.WithValues("cluster", cluster.Name)
if annotations.IsPaused(cluster, openStackCluster) {
log.Info("OpenStackCluster or linked Cluster is marked as paused. Not reconciling")
return reconcile.Result{}, nil
}
patchHelper, err := patch.NewHelper(openStackCluster, r.Client)
if err != nil {
return ctrl.Result{}, err
}
// Always patch the openStackCluster when exiting this function so we can persist any OpenStackCluster changes.
defer func() {
if err := patchHelper.Patch(ctx, openStackCluster); err != nil {
result = ctrl.Result{}
reterr = kerrors.NewAggregate([]error{reterr, fmt.Errorf("error patching OpenStackCluster %s/%s: %w", openStackCluster.Namespace, openStackCluster.Name, err)})
}
}()
clientScope, err := r.ScopeFactory.NewClientScopeFromObject(ctx, r.Client, r.CaCertificates, log, openStackCluster)
if err != nil {
return reconcile.Result{}, err
}
scope := scope.NewWithLogger(clientScope, log)
// Handle deleted clusters
if !openStackCluster.DeletionTimestamp.IsZero() {
return r.reconcileDelete(ctx, scope, cluster, openStackCluster)
}
// Handle non-deleted clusters
return r.reconcileNormal(ctx, scope, cluster, openStackCluster)
}
func (r *OpenStackClusterReconciler) reconcileDelete(ctx context.Context, scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) {
scope.Logger().Info("Reconciling Cluster delete")
// Wait for machines to be deleted before removing the finalizer as they
// depend on this resource to deprovision. Additionally it appears that
// allowing the Kubernetes API to vanish too quickly will upset the capi
// kubeadm control plane controller.
machines, err := collections.GetFilteredMachinesForCluster(ctx, r.Client, cluster)
if err != nil {
return ctrl.Result{}, err
}
if len(machines) != 0 {
scope.Logger().Info("Waiting for machines to be deleted", "remaining", len(machines))
return ctrl.Result{RequeueAfter: 5 * time.Second}, nil
}
clusterResourceName := names.ClusterResourceName(cluster)
// A bastion may have been created if cluster initialisation previously reached populating the network status
// We attempt to delete it even if no status was written, just in case
if openStackCluster.Status.Network != nil {
if err := r.deleteBastion(ctx, scope, cluster, openStackCluster); err != nil {
return reconcile.Result{}, err
}
}
// If a bastion server was found, we need to reconcile now until it's actually deleted.
// We don't want to remove the cluster finalizer until the associated OpenStackServer resource is deleted.
bastionServer, err := r.getBastionServer(ctx, openStackCluster, cluster)
if client.IgnoreNotFound(err) != nil {
return reconcile.Result{}, err
}
if bastionServer != nil {
scope.Logger().Info("Waiting for the bastion OpenStackServer object to be deleted", "openStackServer", bastionServer.Name)
return ctrl.Result{Requeue: true}, nil
}
networkingService, err := networking.NewService(scope)
if err != nil {
return reconcile.Result{}, err
}
if openStackCluster.Spec.APIServerLoadBalancer.IsEnabled() {
loadBalancerService, err := loadbalancer.NewService(scope)
if err != nil {
return reconcile.Result{}, err
}
if err = loadBalancerService.DeleteLoadBalancer(openStackCluster, clusterResourceName); err != nil {
handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to delete load balancer: %w", err), false)
return reconcile.Result{}, fmt.Errorf("failed to delete load balancer: %w", err)
}
}
// if ManagedSubnets was not set, no network was created.
if len(openStackCluster.Spec.ManagedSubnets) > 0 {
if err = networkingService.DeleteRouter(openStackCluster, clusterResourceName); err != nil {
handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to delete router: %w", err), false)
return ctrl.Result{}, fmt.Errorf("failed to delete router: %w", err)
}
if err = networkingService.DeleteClusterPorts(openStackCluster); err != nil {
handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to delete ports: %w", err), false)
return reconcile.Result{}, fmt.Errorf("failed to delete ports: %w", err)
}
if err = networkingService.DeleteNetwork(openStackCluster, clusterResourceName); err != nil {
handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to delete network: %w", err), false)
return ctrl.Result{}, fmt.Errorf("failed to delete network: %w", err)
}
}
if err = networkingService.DeleteSecurityGroups(openStackCluster, clusterResourceName); err != nil {
handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to delete security groups: %w", err), false)
return reconcile.Result{}, fmt.Errorf("failed to delete security groups: %w", err)
}
// Cluster is deleted so remove the finalizer.
controllerutil.RemoveFinalizer(openStackCluster, infrav1.ClusterFinalizer)
scope.Logger().Info("Reconciled Cluster deleted successfully")
return ctrl.Result{}, nil
}
func contains(arr []string, target string) bool {
for _, a := range arr {
if a == target {
return true
}
}
return false
}
func (r *OpenStackClusterReconciler) deleteBastion(ctx context.Context, scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) error {
scope.Logger().Info("Deleting Bastion")
computeService, err := compute.NewService(scope)
if err != nil {
return err
}
networkingService, err := networking.NewService(scope)
if err != nil {
return err
}
bastionServer, err := r.getBastionServer(ctx, openStackCluster, cluster)
if client.IgnoreNotFound(err) != nil {
return err
}
if openStackCluster.Status.Bastion != nil && openStackCluster.Status.Bastion.FloatingIP != "" {
if err = networkingService.DeleteFloatingIP(openStackCluster, openStackCluster.Status.Bastion.FloatingIP); err != nil {
handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to delete floating IP: %w", err), false)
return fmt.Errorf("failed to delete floating IP: %w", err)
}
}
bastionStatus := openStackCluster.Status.Bastion
var instanceStatus *compute.InstanceStatus
if bastionStatus != nil && bastionServer != nil && bastionServer.Status.InstanceID != nil {
instanceStatus, err = computeService.GetInstanceStatus(*bastionServer.Status.InstanceID)
if err != nil {
return err
}
}
if instanceStatus != nil {
instanceNS, err := instanceStatus.NetworkStatus()
if err != nil {
return err
}
addresses := instanceNS.Addresses()
for _, address := range addresses {
if address.Type == corev1.NodeExternalIP {
// Floating IP may not have properly saved in bastion status (thus not deleted above), delete any remaining floating IP
if err = networkingService.DeleteFloatingIP(openStackCluster, address.Address); err != nil {
handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to delete floating IP: %w", err), false)
return fmt.Errorf("failed to delete floating IP: %w", err)
}
}
}
}
if err := r.reconcileDeleteBastionServer(ctx, scope, openStackCluster, cluster); err != nil {
handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to delete bastion: %w", err), false)
return fmt.Errorf("failed to delete bastion: %w", err)
}
openStackCluster.Status.Bastion = nil
scope.Logger().Info("Deleted Bastion")
return nil
}
func (r *OpenStackClusterReconciler) reconcileNormal(ctx context.Context, scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) { //nolint:unparam
scope.Logger().Info("Reconciling Cluster")
// If the OpenStackCluster doesn't have our finalizer, add it.
if controllerutil.AddFinalizer(openStackCluster, infrav1.ClusterFinalizer) {
// Register the finalizer immediately to avoid orphaning OpenStack resources on delete
return reconcile.Result{}, nil
}
computeService, err := compute.NewService(scope)
if err != nil {
return reconcile.Result{}, err
}
err = reconcileNetworkComponents(scope, cluster, openStackCluster)
if err != nil {
return reconcile.Result{}, err
}
// TODO(emilien) we should do that separately but the reconcileBastion
// should happen after the cluster Ready is true
result, err := r.reconcileBastion(ctx, scope, cluster, openStackCluster)
if err != nil {
return reconcile.Result{}, err
}
if result != nil {
return *result, nil
}
availabilityZones, err := computeService.GetAvailabilityZones()
if err != nil {
return ctrl.Result{}, err
}
// Create a new list in case any AZs have been removed from OpenStack
openStackCluster.Status.FailureDomains = make(clusterv1.FailureDomains)
for _, az := range availabilityZones {
// By default, the AZ is used or not used for control plane nodes depending on the flag
found := !ptr.Deref(openStackCluster.Spec.ControlPlaneOmitAvailabilityZone, false)
// If explicit AZs for control plane nodes are given, they override the value
if len(openStackCluster.Spec.ControlPlaneAvailabilityZones) > 0 {
found = contains(openStackCluster.Spec.ControlPlaneAvailabilityZones, az.ZoneName)
}
// Add the AZ object to the failure domains for the cluster
openStackCluster.Status.FailureDomains[az.ZoneName] = clusterv1.FailureDomainSpec{
ControlPlane: found,
}
}
openStackCluster.Status.Ready = true
openStackCluster.Status.FailureMessage = nil
openStackCluster.Status.FailureReason = nil
scope.Logger().Info("Reconciled Cluster created successfully")
return reconcile.Result{}, nil
}
func (r *OpenStackClusterReconciler) reconcileBastion(ctx context.Context, scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (*ctrl.Result, error) {
scope.Logger().V(4).Info("Reconciling Bastion")
clusterResourceName := names.ClusterResourceName(cluster)
computeService, err := compute.NewService(scope)
if err != nil {
return nil, err
}
networkingService, err := networking.NewService(scope)
if err != nil {
return nil, err
}
bastionServer, waitingForServer, err := r.reconcileBastionServer(ctx, scope, openStackCluster, cluster)
if err != nil || waitingForServer {
return &reconcile.Result{RequeueAfter: waitForBastionToReconcile}, err
}
if bastionServer == nil {
return nil, nil
}
var instanceStatus *compute.InstanceStatus
if bastionServer != nil && bastionServer.Status.InstanceID != nil {
if instanceStatus, err = computeService.GetInstanceStatus(*bastionServer.Status.InstanceID); err != nil {
return nil, err
}
}
if instanceStatus == nil {
// At this point we return an error if we don't have an instance status
return nil, fmt.Errorf("bastion instance status is nil")
}
// Save hash & status as soon as we know we have an instance
instanceStatus.UpdateBastionStatus(openStackCluster)
port, err := computeService.GetManagementPort(openStackCluster, instanceStatus)
if err != nil {
err = fmt.Errorf("getting management port for bastion: %w", err)
handleUpdateOSCError(openStackCluster, err, false)
return nil, err
}
return bastionAddFloatingIP(openStackCluster, clusterResourceName, port, networkingService)
}
func bastionAddFloatingIP(openStackCluster *infrav1.OpenStackCluster, clusterResourceName string, port *ports.Port, networkingService *networking.Service) (*reconcile.Result, error) {
fp, err := networkingService.GetFloatingIPByPortID(port.ID)
if err != nil {
handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to get or create floating IP for bastion: %w", err), false)
return nil, fmt.Errorf("failed to get floating IP for bastion port: %w", err)
}
if fp != nil {
// Floating IP is already attached to bastion, no need to proceed
openStackCluster.Status.Bastion.FloatingIP = fp.FloatingIP
return nil, nil
}
var floatingIP *string
switch {
case openStackCluster.Status.Bastion.FloatingIP != "":
// Some floating IP has already been created for this bastion, make sure we re-use it
floatingIP = &openStackCluster.Status.Bastion.FloatingIP
case openStackCluster.Spec.Bastion.FloatingIP != nil:
// Use floating IP from the spec
floatingIP = openStackCluster.Spec.Bastion.FloatingIP
}
// Check if there is an existing floating IP attached to bastion, in case where FloatingIP would not yet have been stored in cluster status
fp, err = networkingService.GetOrCreateFloatingIP(openStackCluster, openStackCluster, clusterResourceName, floatingIP)
if err != nil {
handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to get or create floating IP for bastion: %w", err), false)
return nil, fmt.Errorf("failed to get or create floating IP for bastion: %w", err)
}
openStackCluster.Status.Bastion.FloatingIP = fp.FloatingIP
err = networkingService.AssociateFloatingIP(openStackCluster, fp, port.ID)
if err != nil {
handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to associate floating IP with bastion: %w", err), false)
return nil, fmt.Errorf("failed to associate floating IP with bastion: %w", err)
}
return nil, nil
}
// reconcileDeleteBastionServer reconciles the OpenStackServer object for the OpenStackCluster bastion.
// It returns nil if the OpenStackServer object is not found, otherwise it returns an error if any.
func (r *OpenStackClusterReconciler) reconcileDeleteBastionServer(ctx context.Context, scope *scope.WithLogger, openStackCluster *infrav1.OpenStackCluster, cluster *clusterv1.Cluster) error {
scope.Logger().Info("Reconciling Bastion delete server")
server := &infrav1alpha1.OpenStackServer{}
err := r.Client.Get(ctx, client.ObjectKey{Namespace: openStackCluster.Namespace, Name: bastionName(cluster.Name)}, server)
if client.IgnoreNotFound(err) != nil {
return err
}
if apierrors.IsNotFound(err) {
return nil
}
return r.Client.Delete(ctx, server)
}
// reconcileBastionServer reconciles the OpenStackServer object for the OpenStackCluster bastion.
// It returns the OpenStackServer object, a boolean indicating if the reconciliation should continue
// and an error if any.
func (r *OpenStackClusterReconciler) reconcileBastionServer(ctx context.Context, scope *scope.WithLogger, openStackCluster *infrav1.OpenStackCluster, cluster *clusterv1.Cluster) (*infrav1alpha1.OpenStackServer, bool, error) {
server, err := r.getBastionServer(ctx, openStackCluster, cluster)
if client.IgnoreNotFound(err) != nil {
scope.Logger().Error(err, "Failed to get the bastion OpenStackServer object")
return nil, true, err
}
bastionNotFound := apierrors.IsNotFound(err)
// If the bastion is not enabled, we don't need to create it and continue with the reconciliation.
if bastionNotFound && !openStackCluster.Spec.Bastion.IsEnabled() {
return nil, false, nil
}
// If the bastion is found but is not enabled, we need to delete it and reconcile.
if !bastionNotFound && !openStackCluster.Spec.Bastion.IsEnabled() {
scope.Logger().Info("Bastion is not enabled, deleting the OpenStackServer object")
if err := r.deleteBastion(ctx, scope, cluster, openStackCluster); err != nil {
return nil, true, err
}
return nil, true, nil
}
// If the bastion is found but the spec has changed, we need to delete it and reconcile.
bastionServerSpec := bastionToOpenStackServerSpec(openStackCluster)
if !bastionNotFound && server != nil && !apiequality.Semantic.DeepEqual(bastionServerSpec, &server.Spec) {
scope.Logger().Info("Bastion spec has changed, re-creating the OpenStackServer object")
if err := r.deleteBastion(ctx, scope, cluster, openStackCluster); err != nil {
return nil, true, err
}
return nil, true, nil
}
// If the bastion is not found, we need to create it.
if bastionNotFound {
scope.Logger().Info("Creating the bastion OpenStackServer object")
server, err = r.createBastionServer(ctx, openStackCluster, cluster)
if err != nil {
return nil, true, err
}
return server, true, nil
}
// If the bastion server is not ready, we need to wait for it to be ready and reconcile.
if !server.Status.Ready {
scope.Logger().Info("Waiting for the bastion OpenStackServer to be ready")
return server, true, nil
}
return server, false, nil
}
// getBastionServer returns the OpenStackServer object for the bastion server.
// It returns the OpenStackServer object and an error if any.
func (r *OpenStackClusterReconciler) getBastionServer(ctx context.Context, openStackCluster *infrav1.OpenStackCluster, cluster *clusterv1.Cluster) (*infrav1alpha1.OpenStackServer, error) {
bastionServer := &infrav1alpha1.OpenStackServer{}
bastionServerName := client.ObjectKey{
Namespace: openStackCluster.Namespace,
Name: bastionName(cluster.Name),
}
err := r.Client.Get(ctx, bastionServerName, bastionServer)
if err != nil {
return nil, err
}
return bastionServer, nil
}
// createBastionServer creates the OpenStackServer object for the bastion server.
// It returns the OpenStackServer object and an error if any.
func (r *OpenStackClusterReconciler) createBastionServer(ctx context.Context, openStackCluster *infrav1.OpenStackCluster, cluster *clusterv1.Cluster) (*infrav1alpha1.OpenStackServer, error) {
bastionServerSpec := bastionToOpenStackServerSpec(openStackCluster)
bastionServer := &infrav1alpha1.OpenStackServer{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
clusterv1.ClusterNameLabel: openStackCluster.Labels[clusterv1.ClusterNameLabel],
},
Name: bastionName(cluster.Name),
Namespace: openStackCluster.Namespace,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: openStackCluster.APIVersion,
Kind: openStackCluster.Kind,
Name: openStackCluster.Name,
UID: openStackCluster.UID,
},
},
},
Spec: *bastionServerSpec,
}
if err := r.Client.Create(ctx, bastionServer); err != nil {
return nil, fmt.Errorf("failed to create bastion server: %w", err)
}
return bastionServer, nil
}
// bastionToOpenStackServerSpec converts the OpenStackMachineSpec for the bastion to an OpenStackServerSpec.
// It returns the OpenStackServerSpec and an error if any.
func bastionToOpenStackServerSpec(openStackCluster *infrav1.OpenStackCluster) *infrav1alpha1.OpenStackServerSpec {
bastion := openStackCluster.Spec.Bastion
if bastion == nil {
bastion = &infrav1.Bastion{}
}
if bastion.Spec == nil {
// For the case when Bastion is deleted but we don't have spec, let's use an empty one.
// v1beta1 API validations prevent this from happening in normal circumstances.
bastion.Spec = &infrav1.OpenStackMachineSpec{}
}
az := ""
if bastion.AvailabilityZone != nil {
az = *bastion.AvailabilityZone
}
openStackServerSpec := openStackMachineSpecToOpenStackServerSpec(bastion.Spec, openStackCluster.Spec.IdentityRef, compute.InstanceTags(bastion.Spec, openStackCluster), az, nil, getBastionSecurityGroupID(openStackCluster), openStackCluster.Status.Network.ID)
return openStackServerSpec
}
func bastionName(clusterResourceName string) string {
return fmt.Sprintf("%s-bastion", clusterResourceName)
}
// getBastionSecurityGroupID returns the ID of the bastion security group if
// managed security groups is enabled.
func getBastionSecurityGroupID(openStackCluster *infrav1.OpenStackCluster) *string {
if openStackCluster.Spec.ManagedSecurityGroups == nil {
return nil
}
if openStackCluster.Status.BastionSecurityGroup != nil {
return &openStackCluster.Status.BastionSecurityGroup.ID
}
return nil
}
func resolveLoadBalancerNetwork(openStackCluster *infrav1.OpenStackCluster, networkingService *networking.Service) error {
lbSpec := openStackCluster.Spec.APIServerLoadBalancer
if lbSpec.IsEnabled() {
lbStatus := openStackCluster.Status.APIServerLoadBalancer
if lbStatus == nil {
lbStatus = &infrav1.LoadBalancer{}
openStackCluster.Status.APIServerLoadBalancer = lbStatus
}
lbNetStatus := lbStatus.LoadBalancerNetwork
if lbNetStatus == nil {
lbNetStatus = &infrav1.NetworkStatusWithSubnets{
NetworkStatus: infrav1.NetworkStatus{},
}
}
if lbSpec.Network != nil {
lbNet, err := networkingService.GetNetworkByParam(lbSpec.Network)
if err != nil {
if errors.Is(err, capoerrors.ErrFilterMatch) {
handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to find loadbalancer network: %w", err), true)
}
return fmt.Errorf("failed to find network: %w", err)
}
lbNetStatus.Name = lbNet.Name
lbNetStatus.ID = lbNet.ID
lbNetStatus.Tags = lbNet.Tags
// Filter out only relevant subnets specified by the spec
lbNetStatus.Subnets = []infrav1.Subnet{}
for _, s := range lbSpec.Subnets {
matchFound := false
for _, subnetID := range lbNet.Subnets {
if s.ID != nil && subnetID == *s.ID {
matchFound = true
lbNetStatus.Subnets = append(
lbNetStatus.Subnets, infrav1.Subnet{
ID: *s.ID,
})
}
}
if !matchFound {
handleUpdateOSCError(openStackCluster, fmt.Errorf("no subnet match was found in the specified network (specified subnet: %v, available subnets: %v)", s, lbNet.Subnets), false)
return fmt.Errorf("no subnet match was found in the specified network (specified subnet: %v, available subnets: %v)", s, lbNet.Subnets)
}
}
}
}
return nil
}
func reconcileNetworkComponents(scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) error {
clusterResourceName := names.ClusterResourceName(cluster)
networkingService, err := networking.NewService(scope)
if err != nil {
return err
}
scope.Logger().Info("Reconciling network components")
err = networkingService.ReconcileExternalNetwork(openStackCluster)
if err != nil {
handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to reconcile external network: %w", err), false)
return fmt.Errorf("failed to reconcile external network: %w", err)
}
if len(openStackCluster.Spec.ManagedSubnets) == 0 {
if err := reconcilePreExistingNetworkComponents(scope, networkingService, openStackCluster); err != nil {
return err
}
} else if len(openStackCluster.Spec.ManagedSubnets) == 1 {
if err := reconcileProvisionedNetworkComponents(networkingService, openStackCluster, clusterResourceName); err != nil {
return err
}
} else {
return fmt.Errorf("failed to reconcile network: ManagedSubnets only supports one element, %d provided", len(openStackCluster.Spec.ManagedSubnets))
}
err = resolveLoadBalancerNetwork(openStackCluster, networkingService)
if err != nil {
handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to reconcile loadbalancer network: %w", err), false)
return fmt.Errorf("failed to reconcile loadbalancer network: %w", err)
}
err = networkingService.ReconcileSecurityGroups(openStackCluster, clusterResourceName)
if err != nil {
handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to reconcile security groups: %w", err), false)
return fmt.Errorf("failed to reconcile security groups: %w", err)
}
return reconcileControlPlaneEndpoint(scope, networkingService, openStackCluster, clusterResourceName)
}
// reconcilePreExistingNetworkComponents reconciles the cluster network status when the cluster is
// using pre-existing networks and subnets which are not provisioned by the
// cluster controller.
func reconcilePreExistingNetworkComponents(scope *scope.WithLogger, networkingService *networking.Service, openStackCluster *infrav1.OpenStackCluster) error {
scope.Logger().V(4).Info("No need to reconcile network, searching network and subnet instead")
if openStackCluster.Status.Network == nil {
openStackCluster.Status.Network = &infrav1.NetworkStatusWithSubnets{}
}
if openStackCluster.Spec.Network != nil {
network, err := networkingService.GetNetworkByParam(openStackCluster.Spec.Network)
if err != nil {
handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to find network: %w", err), false)
return fmt.Errorf("error fetching cluster network: %w", err)
}
setClusterNetwork(openStackCluster, network)
}
subnets, err := getClusterSubnets(networkingService, openStackCluster)
if err != nil {
return err
}
// Populate the cluster status with the cluster subnets
capoSubnets := make([]infrav1.Subnet, len(subnets))
for i := range subnets {
subnet := &subnets[i]
capoSubnets[i] = infrav1.Subnet{
ID: subnet.ID,
Name: subnet.Name,
CIDR: subnet.CIDR,
Tags: subnet.Tags,
}
}
if err := utils.ValidateSubnets(capoSubnets); err != nil {
return err
}
openStackCluster.Status.Network.Subnets = capoSubnets
// If network is not yet populated, use networkID defined on the first
// cluster subnet to get the Network. Cluster subnets are constrained to
// be in the same network.
if openStackCluster.Status.Network.ID == "" && len(subnets) > 0 {
network, err := networkingService.GetNetworkByID(subnets[0].NetworkID)
if err != nil {
return err
}
setClusterNetwork(openStackCluster, network)
}
return nil
}
func reconcileProvisionedNetworkComponents(networkingService *networking.Service, openStackCluster *infrav1.OpenStackCluster, clusterResourceName string) error {
err := networkingService.ReconcileNetwork(openStackCluster, clusterResourceName)
if err != nil {
handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to reconcile network: %w", err), false)
return fmt.Errorf("failed to reconcile network: %w", err)
}
err = networkingService.ReconcileSubnet(openStackCluster, clusterResourceName)
if err != nil {
handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to reconcile subnets: %w", err), false)
return fmt.Errorf("failed to reconcile subnets: %w", err)
}
err = networkingService.ReconcileRouter(openStackCluster, clusterResourceName)
if err != nil {
handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to reconcile router: %w", err), false)
return fmt.Errorf("failed to reconcile router: %w", err)
}
return nil
}
// reconcileControlPlaneEndpoint configures the control plane endpoint for the
// cluster, creating it if necessary, and updates ControlPlaneEndpoint in the
// cluster spec.
func reconcileControlPlaneEndpoint(scope *scope.WithLogger, networkingService *networking.Service, openStackCluster *infrav1.OpenStackCluster, clusterResourceName string) error {
// Calculate the port that we will use for the API server
apiServerPort := getAPIServerPort(openStackCluster)
// host must be set by a matching control plane endpoint provider below
var host string
switch {
// API server load balancer is enabled. Create an Octavia load balancer.
// Note that we reconcile the load balancer even if the control plane
// endpoint is already set.
case openStackCluster.Spec.APIServerLoadBalancer.IsEnabled():
loadBalancerService, err := loadbalancer.NewService(scope)
if err != nil {
return err
}
terminalFailure, err := loadBalancerService.ReconcileLoadBalancer(openStackCluster, clusterResourceName, apiServerPort)
if err != nil {
handleUpdateOSCError(openStackCluster, fmt.Errorf("failed to reconcile load balancer: %w", err), terminalFailure)
return fmt.Errorf("failed to reconcile load balancer: %w", err)
}
// Control plane endpoint is the floating IP if one was defined, otherwise the VIP address
if openStackCluster.Status.APIServerLoadBalancer.IP != "" {
host = openStackCluster.Status.APIServerLoadBalancer.IP
} else {
host = openStackCluster.Status.APIServerLoadBalancer.InternalIP
}
// Control plane endpoint is already set
// Note that checking this here means that we don't re-execute any of
// the branches below if the control plane endpoint is already set.
case openStackCluster.Spec.ControlPlaneEndpoint != nil && openStackCluster.Spec.ControlPlaneEndpoint.IsValid():
host = openStackCluster.Spec.ControlPlaneEndpoint.Host
// API server load balancer is disabled, but floating IP is not. Create
// a floating IP to be attached directly to a control plane host.
case !ptr.Deref(openStackCluster.Spec.DisableAPIServerFloatingIP, false):
fp, err := networkingService.GetOrCreateFloatingIP(openStackCluster, openStackCluster, clusterResourceName, openStackCluster.Spec.APIServerFloatingIP)
if err != nil {
handleUpdateOSCError(openStackCluster, fmt.Errorf("floating IP cannot be got or created: %w", err), false)
return fmt.Errorf("floating IP cannot be got or created: %w", err)
}
host = fp.FloatingIP
// API server load balancer is disabled and we aren't using a control
// plane floating IP. In this case we configure APIServerFixedIP as the
// control plane endpoint and leave it to the user to configure load
// balancing.
case openStackCluster.Spec.APIServerFixedIP != nil:
host = *openStackCluster.Spec.APIServerFixedIP
// Control plane endpoint is not set, and none can be created
default:
err := fmt.Errorf("unable to determine control plane endpoint")
handleUpdateOSCError(openStackCluster, err, false)
return err
}
openStackCluster.Spec.ControlPlaneEndpoint = &clusterv1.APIEndpoint{
Host: host,
Port: int32(apiServerPort),
}
return nil
}
// getAPIServerPort returns the port to use for the API server based on the cluster spec.
func getAPIServerPort(openStackCluster *infrav1.OpenStackCluster) int {
switch {
case openStackCluster.Spec.ControlPlaneEndpoint != nil && openStackCluster.Spec.ControlPlaneEndpoint.IsValid():
return int(openStackCluster.Spec.ControlPlaneEndpoint.Port)
case openStackCluster.Spec.APIServerPort != nil:
return *openStackCluster.Spec.APIServerPort
}
return 6443
}
func (r *OpenStackClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error {
clusterToInfraFn := util.ClusterToInfrastructureMapFunc(ctx, infrav1.SchemeGroupVersion.WithKind("OpenStackCluster"), mgr.GetClient(), &infrav1.OpenStackCluster{})
log := ctrl.LoggerFrom(ctx)
return ctrl.NewControllerManagedBy(mgr).
WithOptions(options).
For(&infrav1.OpenStackCluster{}).
Watches(
&clusterv1.Cluster{},
handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, o client.Object) []reconcile.Request {
requests := clusterToInfraFn(ctx, o)
if len(requests) < 1 {
return nil
}
c := &infrav1.OpenStackCluster{}
if err := r.Client.Get(ctx, requests[0].NamespacedName, c); err != nil {
log.V(4).Error(err, "Failed to get OpenStack cluster")
return nil
}
if annotations.IsExternallyManaged(c) {
log.V(4).Info("OpenStackCluster is externally managed, skipping mapping")
return nil
}
return requests
}),
builder.WithPredicates(predicates.ClusterUnpaused(ctrl.LoggerFrom(ctx))),
).
WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)).
WithEventFilter(predicates.ResourceIsNotExternallyManaged(ctrl.LoggerFrom(ctx))).
Complete(r)
}
func handleUpdateOSCError(openstackCluster *infrav1.OpenStackCluster, message error, isFatal bool) {
if isFatal {
err := capierrors.UpdateClusterError
openstackCluster.Status.FailureReason = &err
openstackCluster.Status.FailureMessage = ptr.To(message.Error())
}
}
// getClusterSubnets retrieves the subnets based on the Subnet filters specified on OpenstackCluster.
func getClusterSubnets(networkingService *networking.Service, openStackCluster *infrav1.OpenStackCluster) ([]subnets.Subnet, error) {
var clusterSubnets []subnets.Subnet
var err error
openStackClusterSubnets := openStackCluster.Spec.Subnets
networkID := ""
if openStackCluster.Status.Network != nil {
networkID = openStackCluster.Status.Network.ID
}
if len(openStackClusterSubnets) == 0 {
if networkID == "" {
// This should be a validation error
return nil, fmt.Errorf("no network or subnets specified in OpenStackCluster spec")
}
listOpts := subnets.ListOpts{
NetworkID: networkID,
}
clusterSubnets, err = networkingService.GetSubnetsByFilter(listOpts)
if err != nil {
err = fmt.Errorf("failed to find subnets: %w", err)
if errors.Is(err, capoerrors.ErrFilterMatch) {
handleUpdateOSCError(openStackCluster, err, true)
}
return nil, err
}
if len(clusterSubnets) > 2 {
return nil, fmt.Errorf("more than two subnets found in the Network. Specify the subnets in the OpenStackCluster.Spec instead")
}
} else {
for subnet := range openStackClusterSubnets {
filteredSubnet, err := networkingService.GetNetworkSubnetByParam(networkID, &openStackClusterSubnets[subnet])
if err != nil {
err = fmt.Errorf("failed to find subnet %d in network %s: %w", subnet, networkID, err)
if errors.Is(err, capoerrors.ErrFilterMatch) {
handleUpdateOSCError(openStackCluster, err, true)
}
return nil, err
}
clusterSubnets = append(clusterSubnets, *filteredSubnet)
// Constrain the next search to the network of the first subnet
networkID = filteredSubnet.NetworkID
}
}
return clusterSubnets, nil
}
// setClusterNetwork sets network information in the cluster status from an OpenStack network.
func setClusterNetwork(openStackCluster *infrav1.OpenStackCluster, network *networks.Network) {
openStackCluster.Status.Network.ID = network.ID
openStackCluster.Status.Network.Name = network.Name
openStackCluster.Status.Network.Tags = network.Tags
}