-
Notifications
You must be signed in to change notification settings - Fork 1.3k
/
Copy pathdockercluster_controller.go
204 lines (174 loc) · 8 KB
/
dockercluster_controller.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package controllers implements controller functionality.
package controllers
import (
"context"
"github.com/go-logr/logr"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1"
"sigs.k8s.io/cluster-api/test/infrastructure/docker/docker"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/cluster-api/util/predicates"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/source"
)
// DockerClusterReconciler reconciles a DockerCluster object.
type DockerClusterReconciler struct {
client.Client
Log logr.Logger
}
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=dockerclusters,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=dockerclusters/status;dockerclusters/finalizers,verbs=get;update;patch
// Reconcile reads that state of the cluster for a DockerCluster object and makes changes based on the state read
// and what is in the DockerCluster.Spec.
func (r *DockerClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, rerr error) {
log := ctrl.LoggerFrom(ctx)
// Fetch the DockerCluster instance
dockerCluster := &infrav1.DockerCluster{}
if err := r.Client.Get(ctx, req.NamespacedName, dockerCluster); err != nil {
if apierrors.IsNotFound(err) {
return ctrl.Result{}, nil
}
return ctrl.Result{}, err
}
// Fetch the Cluster.
cluster, err := util.GetOwnerCluster(ctx, r.Client, dockerCluster.ObjectMeta)
if err != nil {
return ctrl.Result{}, err
}
if cluster == nil {
log.Info("Waiting for Cluster Controller to set OwnerRef on DockerCluster")
return ctrl.Result{}, nil
}
log = log.WithValues("cluster", cluster.Name)
// Create a helper for managing a docker container hosting the loadbalancer.
externalLoadBalancer, err := docker.NewLoadBalancer(cluster, dockerCluster)
if err != nil {
return ctrl.Result{}, errors.Wrapf(err, "failed to create helper for managing the externalLoadBalancer")
}
// Initialize the patch helper
patchHelper, err := patch.NewHelper(dockerCluster, r.Client)
if err != nil {
return ctrl.Result{}, err
}
// Always attempt to Patch the DockerCluster object and status after each reconciliation.
defer func() {
if err := patchDockerCluster(ctx, patchHelper, dockerCluster); err != nil {
log.Error(err, "failed to patch DockerCluster")
if rerr == nil {
rerr = err
}
}
}()
// Support FailureDomains
// In cloud providers this would likely look up which failure domains are supported and set the status appropriately.
// In the case of Docker, failure domains don't mean much so we simply copy the Spec into the Status.
dockerCluster.Status.FailureDomains = dockerCluster.Spec.FailureDomains
// Add finalizer first if not exist to avoid the race condition between init and delete
if !controllerutil.ContainsFinalizer(dockerCluster, infrav1.ClusterFinalizer) {
controllerutil.AddFinalizer(dockerCluster, infrav1.ClusterFinalizer)
return ctrl.Result{}, nil
}
// Handle deleted clusters
if !dockerCluster.DeletionTimestamp.IsZero() {
return r.reconcileDelete(ctx, dockerCluster, externalLoadBalancer)
}
// Handle non-deleted clusters
return r.reconcileNormal(ctx, dockerCluster, externalLoadBalancer)
}
func patchDockerCluster(ctx context.Context, patchHelper *patch.Helper, dockerCluster *infrav1.DockerCluster) error {
// Always update the readyCondition by summarizing the state of other conditions.
// A step counter is added to represent progress during the provisioning process (instead we are hiding it during the deletion process).
conditions.SetSummary(dockerCluster,
conditions.WithConditions(
infrav1.LoadBalancerAvailableCondition,
),
conditions.WithStepCounterIf(dockerCluster.ObjectMeta.DeletionTimestamp.IsZero()),
)
// Patch the object, ignoring conflicts on the conditions owned by this controller.
return patchHelper.Patch(
ctx,
dockerCluster,
patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{
clusterv1.ReadyCondition,
infrav1.LoadBalancerAvailableCondition,
}},
)
}
func (r *DockerClusterReconciler) reconcileNormal(ctx context.Context, dockerCluster *infrav1.DockerCluster, externalLoadBalancer *docker.LoadBalancer) (ctrl.Result, error) {
// Create the docker container hosting the load balancer.
if err := externalLoadBalancer.Create(ctx); err != nil {
conditions.MarkFalse(dockerCluster, infrav1.LoadBalancerAvailableCondition, infrav1.LoadBalancerProvisioningFailedReason, clusterv1.ConditionSeverityWarning, err.Error())
return ctrl.Result{}, errors.Wrap(err, "failed to create load balancer")
}
// Set APIEndpoints with the load balancer IP so the Cluster API Cluster Controller can pull it
lbIP, err := externalLoadBalancer.IP(ctx)
if err != nil {
conditions.MarkFalse(dockerCluster, infrav1.LoadBalancerAvailableCondition, infrav1.LoadBalancerProvisioningFailedReason, clusterv1.ConditionSeverityWarning, err.Error())
return ctrl.Result{}, errors.Wrap(err, "failed to get ip for the load balancer")
}
dockerCluster.Spec.ControlPlaneEndpoint = infrav1.APIEndpoint{
Host: lbIP,
Port: 6443,
}
// Mark the dockerCluster ready
dockerCluster.Status.Ready = true
conditions.MarkTrue(dockerCluster, infrav1.LoadBalancerAvailableCondition)
return ctrl.Result{}, nil
}
func (r *DockerClusterReconciler) reconcileDelete(ctx context.Context, dockerCluster *infrav1.DockerCluster, externalLoadBalancer *docker.LoadBalancer) (ctrl.Result, error) {
// Set the LoadBalancerAvailableCondition reporting delete is started, and issue a patch in order to make
// this visible to the users.
// NB. The operation in docker is fast, so there is the chance the user will not notice the status change;
// nevertheless we are issuing a patch so we can test a pattern that will be used by other providers as well
patchHelper, err := patch.NewHelper(dockerCluster, r.Client)
if err != nil {
return ctrl.Result{}, err
}
conditions.MarkFalse(dockerCluster, infrav1.LoadBalancerAvailableCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "")
if err := patchDockerCluster(ctx, patchHelper, dockerCluster); err != nil {
return ctrl.Result{}, errors.Wrap(err, "failed to patch DockerCluster")
}
// Delete the docker container hosting the load balancer
if err := externalLoadBalancer.Delete(ctx); err != nil {
return ctrl.Result{}, errors.Wrap(err, "failed to delete load balancer")
}
// Cluster is deleted so remove the finalizer.
controllerutil.RemoveFinalizer(dockerCluster, infrav1.ClusterFinalizer)
return ctrl.Result{}, nil
}
// SetupWithManager will add watches for this controller.
func (r *DockerClusterReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error {
c, err := ctrl.NewControllerManagedBy(mgr).
For(&infrav1.DockerCluster{}).
WithOptions(options).
WithEventFilter(predicates.ResourceNotPaused(r.Log)).
Build(r)
if err != nil {
return err
}
return c.Watch(
&source.Kind{Type: &clusterv1.Cluster{}},
handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(infrav1.GroupVersion.WithKind("DockerCluster"))),
predicates.ClusterUnpaused(r.Log),
)
}