From 7d1ccdab131458c8d3b361b2e36d840e3c16f89a Mon Sep 17 00:00:00 2001 From: Carl Montanari Date: Sun, 17 Dec 2023 13:03:08 -0800 Subject: [PATCH] feat: ImageRequest cr instead of http request --- apis/doc.go | 4 +- apis/v1alpha1/imagerequest.go | 58 ++++ apis/v1alpha1/register.go | 2 + apis/v1alpha1/zz_generated.deepcopy.go | 98 ++++++ ...rnetes.containerlab.dev_imagerequests.yaml | 89 ++++++ ...rnetes.containerlab.dev_imagerequests.yaml | 89 ++++++ charts/clabernetes/templates/clusterrole.yaml | 44 ++- .../test-fixtures/golden/clusterrole.yaml | 40 +++ .../test-fixtures/golden/clusterrole.yaml | 35 +++ clicker/clabernetes.go | 14 +- controllers/imagerequest/controller.go | 61 ++++ controllers/imagerequest/crud.go | 83 ++++++ controllers/imagerequest/reconcile.go | 278 ++++++++++++++++++ controllers/topology/deployment.go | 2 +- controllers/topology/reconcile.go | 96 +++--- controllers/topology/reconciler.go | 71 ++++- controllers/topology/rolebinding.go | 263 +++++++++++++++++ controllers/topology/service.go | 2 +- controllers/topology/serviceaccount.go | 241 +++++++++++++++ .../render-deployment/containerlab-debug.json | 2 +- .../insecure-registries.json | 2 +- .../render-deployment/launcher-log-level.json | 2 +- .../privileged-launcher.json | 2 +- .../deployment/render-deployment/simple.json | 2 +- .../typed/apis/v1alpha1/apis_client.go | 5 + .../apis/v1alpha1/fake/fake_apis_client.go | 4 + .../apis/v1alpha1/fake/fake_imagerequest.go | 198 +++++++++++++ .../apis/v1alpha1/generated_expansion.go | 2 + .../typed/apis/v1alpha1/imagerequest.go | 244 +++++++++++++++ generated/openapi/openapi_generated.go | 223 ++++++++++++++ http/image.go | 251 ---------------- http/server.go | 5 - http/types/image.go | 13 - launcher/clabernetes.go | 42 +-- launcher/client.go | 32 ++ launcher/image.go | 168 ++++++----- manager/clabernetes.go | 20 +- manager/init.go | 13 +- manager/kubernetes.go | 10 +- manager/preinit.go | 16 +- manager/prepare.go | 16 +- manager/prestart.go | 13 +- manager/start.go | 2 + 43 files changed, 2356 insertions(+), 501 deletions(-) create mode 100644 apis/v1alpha1/imagerequest.go create mode 100644 assets/crd/clabernetes.containerlab.dev_imagerequests.yaml create mode 100644 charts/clabernetes/crds/clabernetes.containerlab.dev_imagerequests.yaml create mode 100644 controllers/imagerequest/controller.go create mode 100644 controllers/imagerequest/crud.go create mode 100644 controllers/imagerequest/reconcile.go create mode 100644 controllers/topology/rolebinding.go create mode 100644 controllers/topology/serviceaccount.go create mode 100644 generated/clientset/typed/apis/v1alpha1/fake/fake_imagerequest.go create mode 100644 generated/clientset/typed/apis/v1alpha1/imagerequest.go delete mode 100644 http/image.go delete mode 100644 http/types/image.go create mode 100644 launcher/client.go diff --git a/apis/doc.go b/apis/doc.go index eb851cb3..fd21386f 100644 --- a/apis/doc.go +++ b/apis/doc.go @@ -13,6 +13,6 @@ const ( // TopologyKindKne is the "kne" kind of topology. TopologyKindKne = "kne" - // TopologyKindUnknown is the kind of topology for unknown topologies. - TopologyKindUnknown = "unknown" + // ImageRequest is the Kind of the ImageRequest custom resource. + ImageRequest = "imageRequest" ) diff --git a/apis/v1alpha1/imagerequest.go b/apis/v1alpha1/imagerequest.go new file mode 100644 index 00000000..ee517c85 --- /dev/null +++ b/apis/v1alpha1/imagerequest.go @@ -0,0 +1,58 @@ +package v1alpha1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageRequest is an object that represents a request (from a launcher pod) to pull an image on a +// given kubernetes node such that the image can be "pulled through" into the launcher docker +// daemon. +// +k8s:openapi-gen=true +type ImageRequest struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ImageRequestSpec `json:"spec,omitempty"` + Status ImageRequestStatus `json:"status,omitempty"` +} + +// ImageRequestSpec is the spec for a Config resource. +type ImageRequestSpec struct { + // TopologyName is the name of the topology requesting the image. + TopologyName string `json:"topologyName"` + // TopologyNodeName is the name of the node in the topology (i.e. the router name in a + // containerlab topology) that the image is being requested for. + TopologyNodeName string `json:"topologyNodeName"` + // KubernetesNode is the node where the launcher pod is running and where the image should be + // pulled too. + KubernetesNode string `json:"kubernetesNode"` + // RequestedImage is the image that the launcher pod wants the controller to get pulled onto + // the specified node. + RequestedImage string `json:"requestedImage"` + // RequestedImagePullSecrets is a list of configured pull secrets to set in the pull pod spec. + // +listType=set + // +optional + RequestedImagePullSecrets []string `json:"requestedImagePullSecrets"` +} + +// ImageRequestStatus is the status for a ImageRequest resource. +type ImageRequestStatus struct { + // Accepted indicates that the ImageRequest controller has seen this image request and is going + // to process it. This can be useful to let the requesting pod know that "yep, this is in the + // works, and i can go watch the cri images on this node now". + Accepted bool `json:"accepted"` + // Complete indicates that the ImageRequest controller has seen that the puller pod has done its + // job and that the image has been pulled onto the requested node. + Complete bool `json:"complete"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageRequestList is a list of ImageRequest objects. +type ImageRequestList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + + Items []ImageRequest `json:"items"` +} diff --git a/apis/v1alpha1/register.go b/apis/v1alpha1/register.go index d8a7ece6..d0f4b363 100644 --- a/apis/v1alpha1/register.go +++ b/apis/v1alpha1/register.go @@ -44,6 +44,8 @@ func GetAPIs() (apimachineryscheme.GroupVersion, []apimachineryruntime.Object) { return SchemeGroupVersion, []apimachineryruntime.Object{ &Config{}, &ConfigList{}, + &ImageRequest{}, + &ImageRequestList{}, &Topology{}, &TopologyList{}, } diff --git a/apis/v1alpha1/zz_generated.deepcopy.go b/apis/v1alpha1/zz_generated.deepcopy.go index fb3a8494..c2bc26b0 100644 --- a/apis/v1alpha1/zz_generated.deepcopy.go +++ b/apis/v1alpha1/zz_generated.deepcopy.go @@ -394,6 +394,104 @@ func (in *ImagePull) DeepCopy() *ImagePull { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRequest) DeepCopyInto(out *ImageRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRequest. +func (in *ImageRequest) DeepCopy() *ImageRequest { + if in == nil { + return nil + } + out := new(ImageRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRequestList) DeepCopyInto(out *ImageRequestList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImageRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRequestList. +func (in *ImageRequestList) DeepCopy() *ImageRequestList { + if in == nil { + return nil + } + out := new(ImageRequestList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageRequestList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRequestSpec) DeepCopyInto(out *ImageRequestSpec) { + *out = *in + if in.RequestedImagePullSecrets != nil { + in, out := &in.RequestedImagePullSecrets, &out.RequestedImagePullSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRequestSpec. +func (in *ImageRequestSpec) DeepCopy() *ImageRequestSpec { + if in == nil { + return nil + } + out := new(ImageRequestSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRequestStatus) DeepCopyInto(out *ImageRequestStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRequestStatus. +func (in *ImageRequestStatus) DeepCopy() *ImageRequestStatus { + if in == nil { + return nil + } + out := new(ImageRequestStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in InsecureRegistries) DeepCopyInto(out *InsecureRegistries) { { diff --git a/assets/crd/clabernetes.containerlab.dev_imagerequests.yaml b/assets/crd/clabernetes.containerlab.dev_imagerequests.yaml new file mode 100644 index 00000000..0fbe0fdd --- /dev/null +++ b/assets/crd/clabernetes.containerlab.dev_imagerequests.yaml @@ -0,0 +1,89 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: imagerequests.clabernetes.containerlab.dev +spec: + group: clabernetes.containerlab.dev + names: + kind: ImageRequest + listKind: ImageRequestList + plural: imagerequests + singular: imagerequest + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: ImageRequest is an object that represents a request (from a launcher + pod) to pull an image on a given kubernetes node such that the image can + be "pulled through" into the launcher docker daemon. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ImageRequestSpec is the spec for a Config resource. + properties: + kubernetesNode: + description: KubernetesNode is the node where the launcher pod is + running and where the image should be pulled too. + type: string + requestedImage: + description: RequestedImage is the image that the launcher pod wants + the controller to get pulled onto the specified node. + type: string + requestedImagePullSecrets: + description: RequestedImagePullSecrets is a list of configured pull + secrets to set in the pull pod spec. + items: + type: string + type: array + x-kubernetes-list-type: set + topologyName: + description: TopologyName is the name of the topology requesting the + image. + type: string + topologyNodeName: + description: TopologyNodeName is the name of the node in the topology + (i.e. the router name in a containerlab topology) that the image + is being requested for. + type: string + required: + - kubernetesNode + - requestedImage + - topologyName + - topologyNodeName + type: object + status: + description: ImageRequestStatus is the status for a ImageRequest resource. + properties: + accepted: + description: Accepted indicates that the ImageRequest controller has + seen this image request and is going to process it. This can be + useful to let the requesting pod know that "yep, this is in the + works, and i can go watch the cri images on this node now". + type: boolean + complete: + description: Complete indicates that the ImageRequest controller has + seen that the puller pod has done its job and that the image has + been pulled onto the requested node. + type: boolean + required: + - accepted + - complete + type: object + type: object + served: true + storage: true diff --git a/charts/clabernetes/crds/clabernetes.containerlab.dev_imagerequests.yaml b/charts/clabernetes/crds/clabernetes.containerlab.dev_imagerequests.yaml new file mode 100644 index 00000000..0fbe0fdd --- /dev/null +++ b/charts/clabernetes/crds/clabernetes.containerlab.dev_imagerequests.yaml @@ -0,0 +1,89 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: imagerequests.clabernetes.containerlab.dev +spec: + group: clabernetes.containerlab.dev + names: + kind: ImageRequest + listKind: ImageRequestList + plural: imagerequests + singular: imagerequest + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: ImageRequest is an object that represents a request (from a launcher + pod) to pull an image on a given kubernetes node such that the image can + be "pulled through" into the launcher docker daemon. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ImageRequestSpec is the spec for a Config resource. + properties: + kubernetesNode: + description: KubernetesNode is the node where the launcher pod is + running and where the image should be pulled too. + type: string + requestedImage: + description: RequestedImage is the image that the launcher pod wants + the controller to get pulled onto the specified node. + type: string + requestedImagePullSecrets: + description: RequestedImagePullSecrets is a list of configured pull + secrets to set in the pull pod spec. + items: + type: string + type: array + x-kubernetes-list-type: set + topologyName: + description: TopologyName is the name of the topology requesting the + image. + type: string + topologyNodeName: + description: TopologyNodeName is the name of the node in the topology + (i.e. the router name in a containerlab topology) that the image + is being requested for. + type: string + required: + - kubernetesNode + - requestedImage + - topologyName + - topologyNodeName + type: object + status: + description: ImageRequestStatus is the status for a ImageRequest resource. + properties: + accepted: + description: Accepted indicates that the ImageRequest controller has + seen this image request and is going to process it. This can be + useful to let the requesting pod know that "yep, this is in the + works, and i can go watch the cri images on this node now". + type: boolean + complete: + description: Complete indicates that the ImageRequest controller has + seen that the puller pod has done its job and that the image has + been pulled onto the requested node. + type: boolean + required: + - accepted + - complete + type: object + type: object + served: true + storage: true diff --git a/charts/clabernetes/templates/clusterrole.yaml b/charts/clabernetes/templates/clusterrole.yaml index 68aac99b..bc4c3324 100644 --- a/charts/clabernetes/templates/clusterrole.yaml +++ b/charts/clabernetes/templates/clusterrole.yaml @@ -47,6 +47,7 @@ rules: - services - pods - persistentvolumeclaims + - serviceaccounts verbs: - get - list @@ -78,4 +79,45 @@ rules: - update - delete - patch - - watch \ No newline at end of file + - watch + - apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + verbs: + - get + - list + - create + - update + - delete + - patch + - watch + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + revision: "{{ .Release.Revision }}" + clabernetes/app: {{ .Values.appName }} + clabernetes/name: "{{ .Values.appName }}-launcher-role" + clabernetes/component: launcher-role + {{- if .Values.globalLabels }} +{{ .Values.globalLabels | toYaml | indent 4 }} + {{- end }} + {{- if .Values.globalAnnotations }} + annotations: +{{ .Values.globalAnnotations | toYaml | indent 4 }} + {{- end }} + name: "{{ .Values.appName }}-launcher-role" +rules: + - apiGroups: + - clabernetes.containerlab.dev + resources: + - imagerequests + verbs: + - get + - create diff --git a/charts/clabernetes/tests/clicker/test-fixtures/golden/clusterrole.yaml b/charts/clabernetes/tests/clicker/test-fixtures/golden/clusterrole.yaml index 1b34224d..afb3b9ff 100755 --- a/charts/clabernetes/tests/clicker/test-fixtures/golden/clusterrole.yaml +++ b/charts/clabernetes/tests/clicker/test-fixtures/golden/clusterrole.yaml @@ -46,6 +46,7 @@ rules: - services - pods - persistentvolumeclaims + - serviceaccounts verbs: - get - list @@ -78,3 +79,42 @@ rules: - delete - patch - watch + - apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +--- +# Source: clabernetes/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + chart: "clabernetes-0.0.0" + release: release-name + heritage: Helm + revision: "1" + clabernetes/app: clabernetes-plus-clicker + clabernetes/name: "clabernetes-plus-clicker-launcher-role" + clabernetes/component: launcher-role + anotherlabel: anotherlabelvalue + somelabel: somelabelvalue + annotations: + annotherannotation: anotherannotationvalue + someannotation: someannotationvalue + name: "clabernetes-plus-clicker-launcher-role" +rules: + - apiGroups: + - clabernetes.containerlab.dev + resources: + - imagerequests + verbs: + - get + - create diff --git a/charts/clabernetes/tests/default_vaules/test-fixtures/golden/clusterrole.yaml b/charts/clabernetes/tests/default_vaules/test-fixtures/golden/clusterrole.yaml index 6d3ab3d1..0601ef18 100755 --- a/charts/clabernetes/tests/default_vaules/test-fixtures/golden/clusterrole.yaml +++ b/charts/clabernetes/tests/default_vaules/test-fixtures/golden/clusterrole.yaml @@ -41,6 +41,7 @@ rules: - services - pods - persistentvolumeclaims + - serviceaccounts verbs: - get - list @@ -73,3 +74,37 @@ rules: - delete - patch - watch + - apiGroups: + - rbac.authorization.k8s.io + resources: + - rolebindings + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +--- +# Source: clabernetes/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + chart: "clabernetes-0.0.0" + release: release-name + heritage: Helm + revision: "1" + clabernetes/app: clabernetes + clabernetes/name: "clabernetes-launcher-role" + clabernetes/component: launcher-role + name: "clabernetes-launcher-role" +rules: + - apiGroups: + - clabernetes.containerlab.dev + resources: + - imagerequests + verbs: + - get + - create diff --git a/clicker/clabernetes.go b/clicker/clabernetes.go index 6a1ba1f1..891931d6 100644 --- a/clicker/clabernetes.go +++ b/clicker/clabernetes.go @@ -99,21 +99,17 @@ func (c *clabernetes) run() error { err := c.setup() if err != nil { - clabernetesutil.Panic(err.Error()) + c.logger.Fatal(err.Error()) } selfPod, err := c.getSelfPod() if err != nil { - c.logger.Criticalf("failed fetching our own pod info, err: %s", err) - - clabernetesutil.Panic(err.Error()) + c.logger.Fatalf("failed fetching our own pod info, err: %s", err) } targetNodes, err := c.getInvokeNodes() if err != nil { - c.logger.Criticalf("failed fetching cluster nodes, err: %s", err) - - clabernetesutil.Panic(err.Error()) + c.logger.Fatalf("failed fetching cluster nodes, err: %s", err) } configMap := c.buildConfigMap() @@ -122,9 +118,7 @@ func (c *clabernetes) run() error { ConfigMaps(c.namespace). Create(c.ctx, configMap, metav1.CreateOptions{}) if err != nil { - c.logger.Criticalf("failed creating clicker configmap %q, err: %s", configMap.Name, err) - - clabernetesutil.Panic(err.Error()) + c.logger.Fatalf("failed creating clicker configmap %q, err: %s", configMap.Name, err) } if !c.args.SkipConfigMapCleanup { diff --git a/controllers/imagerequest/controller.go b/controllers/imagerequest/controller.go new file mode 100644 index 00000000..5366d409 --- /dev/null +++ b/controllers/imagerequest/controller.go @@ -0,0 +1,61 @@ +package imagerequest + +import ( + clabernetesapis "github.com/srl-labs/clabernetes/apis" + clabernetesapisv1alpha1 "github.com/srl-labs/clabernetes/apis/v1alpha1" + clabernetescontrollers "github.com/srl-labs/clabernetes/controllers" + clabernetesmanagertypes "github.com/srl-labs/clabernetes/manager/types" + "k8s.io/client-go/kubernetes" + ctrlruntime "sigs.k8s.io/controller-runtime" + ctrlruntimecontroller "sigs.k8s.io/controller-runtime/pkg/controller" +) + +const ( + concurrentReconciles = 10 +) + +// NewController returns a new Controller. +func NewController( + clabernetes clabernetesmanagertypes.Clabernetes, +) clabernetescontrollers.Controller { + ctx := clabernetes.GetContext() + + baseController := clabernetescontrollers.NewBaseController( + ctx, + clabernetesapis.ImageRequest, + clabernetes.GetAppName(), + clabernetes.GetKubeConfig(), + clabernetes.GetCtrlRuntimeClient(), + ) + + c := &Controller{ + BaseController: baseController, + KubeClient: clabernetes.GetKubeClient(), + } + + return c +} + +// Controller is the Containerlab topology controller object. +type Controller struct { + *clabernetescontrollers.BaseController + // the *uncached* (non ctrl-runtime client) so we can do watches + KubeClient *kubernetes.Clientset +} + +// SetupWithManager sets up the controller with the Manager. +func (c *Controller) SetupWithManager(mgr ctrlruntime.Manager) error { + c.BaseController.Log.Infof( + "setting up %s controller with manager", + clabernetesapis.ImageRequest, + ) + + return ctrlruntime.NewControllerManagedBy(mgr). + WithOptions( + ctrlruntimecontroller.Options{ + MaxConcurrentReconciles: concurrentReconciles, + }, + ). + For(&clabernetesapisv1alpha1.ImageRequest{}). + Complete(c) +} diff --git a/controllers/imagerequest/crud.go b/controllers/imagerequest/crud.go new file mode 100644 index 00000000..391c44c4 --- /dev/null +++ b/controllers/imagerequest/crud.go @@ -0,0 +1,83 @@ +package imagerequest + +import ( + "context" + + clabernetesapis "github.com/srl-labs/clabernetes/apis" + clabernetesapisv1alpha1 "github.com/srl-labs/clabernetes/apis/v1alpha1" + apimachinerytypes "k8s.io/apimachinery/pkg/types" + ctrlruntime "sigs.k8s.io/controller-runtime" +) + +// getTopologyFromReq fetches the reconcile target Topology from the Request. +func (c *Controller) getImageRequestFromReq( + ctx context.Context, + req ctrlruntime.Request, +) (*clabernetesapisv1alpha1.ImageRequest, error) { + imageRequest := &clabernetesapisv1alpha1.ImageRequest{} + + err := c.BaseController.Client.Get( + ctx, + apimachinerytypes.NamespacedName{ + Namespace: req.Namespace, + Name: req.Name, + }, + imageRequest, + ) + + return imageRequest, err +} + +func (c *Controller) update( + ctx context.Context, + imageRequest *clabernetesapisv1alpha1.ImageRequest, +) error { + c.Log.Debugf( + "updating %s '%s/%s'", + clabernetesapis.ImageRequest, + imageRequest.GetNamespace(), + imageRequest.GetName(), + ) + + err := c.Client.Update(ctx, imageRequest) + if err != nil { + c.Log.Criticalf( + "failed updating %s '%s/%s' error: %s", + clabernetesapis.ImageRequest, + imageRequest.GetNamespace(), + imageRequest.GetName(), + err, + ) + + return err + } + + return nil +} + +func (c *Controller) delete( + ctx context.Context, + imageRequest *clabernetesapisv1alpha1.ImageRequest, +) error { + c.Log.Debugf( + "deleting %s '%s/%s'", + clabernetesapis.ImageRequest, + imageRequest.GetNamespace(), + imageRequest.GetName(), + ) + + err := c.Client.Delete(ctx, imageRequest) + if err != nil { + c.Log.Criticalf( + "failed deleting %s '%s/%s' error: %s", + clabernetesapis.ImageRequest, + imageRequest.GetNamespace(), + imageRequest.GetName(), + err, + ) + + return err + } + + return nil +} diff --git a/controllers/imagerequest/reconcile.go b/controllers/imagerequest/reconcile.go new file mode 100644 index 00000000..e9452efa --- /dev/null +++ b/controllers/imagerequest/reconcile.go @@ -0,0 +1,278 @@ +package imagerequest + +import ( + "context" + "fmt" + + ctrlruntimeutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + clabernetesapisv1alpha1 "github.com/srl-labs/clabernetes/apis/v1alpha1" + clabernetesconfig "github.com/srl-labs/clabernetes/config" + clabernetesconstants "github.com/srl-labs/clabernetes/constants" + clabernetesutil "github.com/srl-labs/clabernetes/util" + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" + k8scorev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apimachinerywatch "k8s.io/apimachinery/pkg/watch" + + apimachineryerrors "k8s.io/apimachinery/pkg/api/errors" + + ctrlruntime "sigs.k8s.io/controller-runtime" +) + +const ( + puller = "puller" +) + +// Reconcile handles reconciliation for this controller. +func (c *Controller) Reconcile( + ctx context.Context, + req ctrlruntime.Request, +) (ctrlruntime.Result, error) { + c.BaseController.LogReconcileStart(req) + + imageRequest, err := c.getImageRequestFromReq(ctx, req) + if err != nil { + if apimachineryerrors.IsNotFound(err) { + c.BaseController.LogReconcileCompleteObjectNotExist(req) + + return ctrlruntime.Result{}, nil + } + + c.BaseController.LogReconcileFailedGettingObject(req, err) + + return ctrlruntime.Result{}, err + } + + if imageRequest.DeletionTimestamp != nil { + return ctrlruntime.Result{}, nil + } + + if !imageRequest.Status.Accepted { + // set "accepted" so the launcher knows that the controller has seen the request + imageRequest.Status.Accepted = true + + err = c.update(ctx, imageRequest) + if err != nil { + return ctrlruntime.Result{}, err + } + + // just requeue, we won't enter this block again anyway + return ctrlruntime.Result{Requeue: true}, nil + } + + pullerPodName, err := c.spawnImagePullerPod(ctx, imageRequest) + if err != nil { + return ctrlruntime.Result{}, err + } + + err = c.waitImagePullerPodOutOfPending(ctx, imageRequest.Namespace, pullerPodName) + if err != nil { + return ctrlruntime.Result{}, err + } + + err = c.deleteImagePullerPod(ctx, imageRequest.Namespace, pullerPodName) + if err != nil { + return ctrlruntime.Result{}, err + } + + if !imageRequest.Status.Complete { + imageRequest.Status.Complete = true + + err = c.update(ctx, imageRequest) + if err != nil { + return ctrlruntime.Result{}, err + } + } + + // we've done the job of the puller pod, delete the cr + err = c.delete(ctx, imageRequest) + if err != nil { + return ctrlruntime.Result{}, err + } + + return ctrlruntime.Result{}, nil +} + +func (c *Controller) spawnImagePullerPod( + ctx context.Context, + imageRequest *clabernetesapisv1alpha1.ImageRequest, +) (string, error) { + globalAnnotations, globalLabels := clabernetesconfig.GetManager().GetAllMetadata() + + imageHash := clabernetesutil.HashBytes([]byte(imageRequest.Spec.RequestedImage)) + + selectorLabels := map[string]string{ + clabernetesconstants.LabelApp: clabernetesconstants.Clabernetes, + clabernetesconstants.LabelName: fmt.Sprintf( + "%s-%s", + clabernetesconstants.Clabernetes, + puller, + ), + clabernetesconstants.LabelTopologyOwner: imageRequest.Spec.TopologyName, + clabernetesconstants.LabelTopologyNode: imageRequest.Spec.TopologyNodeName, + clabernetesconstants.LabelPullerNodeTarget: imageRequest.Spec.KubernetesNode, + clabernetesconstants.LabelPullerImageHash: imageHash[:13], + } + + labels := make(map[string]string) + + for k, v := range selectorLabels { + labels[k] = v + } + + for k, v := range globalLabels { + labels[k] = v + } + + annotations := map[string]string{ + // image string wont be valid for a label, so we'll put it here + "clabernetes/pullerRequestedImage": imageRequest.Spec.RequestedImage, + } + + for k, v := range globalAnnotations { + annotations[k] = v + } + + requestedPullSecrets := make( + []k8scorev1.LocalObjectReference, + len(imageRequest.Spec.RequestedImagePullSecrets), + ) + + for idx, pullSecret := range imageRequest.Spec.RequestedImagePullSecrets { + requestedPullSecrets[idx] = k8scorev1.LocalObjectReference{Name: pullSecret} + } + + pullerPod := &k8scorev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: clabernetesutilkubernetes.SafeConcatNameKubernetes( + clabernetesconstants.Clabernetes, + puller, + imageRequest.Spec.KubernetesNode, + imageHash, + ), + Namespace: imageRequest.Namespace, + Annotations: annotations, + Labels: labels, + }, + Spec: k8scorev1.PodSpec{ + Containers: []k8scorev1.Container{ + { + Name: "puller", + Image: imageRequest.Spec.RequestedImage, + // we don't care if it runs, only care if we can pull the image... + Command: []string{ + "exit", + "0", + }, + ImagePullPolicy: "IfNotPresent", + }, + }, + NodeName: imageRequest.Spec.KubernetesNode, + ImagePullSecrets: requestedPullSecrets, + }, + } + + // always set the owner ref to ensure that when we delete this imageRequest cr the pod will + // get deleted (even if dont explicitly do so for some reason) + err := ctrlruntimeutil.SetOwnerReference(imageRequest, pullerPod, c.Client.Scheme()) + if err != nil { + return "", err + } + + err = c.Client.Create( + ctx, + pullerPod, + ) + if err != nil { + c.Log.Criticalf( + "failed creating image puller pod for image %q, node %q, error: %s", + imageRequest.Spec.RequestedImage, + imageRequest.Spec.KubernetesNode, + err, + ) + + return "", err + } + + return pullerPod.Name, nil +} + +func (c *Controller) waitImagePullerPodOutOfPending( + ctx context.Context, + namespace, pullerPodName string, +) error { + listOptions := metav1.ListOptions{ + FieldSelector: fmt.Sprintf("metadata.name=%s", pullerPodName), + Watch: true, + } + + watch, err := c.KubeClient.CoreV1().Pods(namespace).Watch(ctx, listOptions) + if err != nil { + return err + } + + for event := range watch.ResultChan() { + switch event.Type { //nolint:exhaustive + case apimachinerywatch.Added, apimachinerywatch.Modified: + pod, ok := event.Object.(*k8scorev1.Pod) + if !ok { + panic("this is a bug in the puller pod watch, this should not happen") + } + + switch pod.Status.Phase { //nolint:exhaustive + case k8scorev1.PodPending: + // pending so it hasnt been scheduled, and certainly hasnt pulled the image yet... + continue + case k8scorev1.PodRunning, k8scorev1.PodSucceeded, k8scorev1.PodFailed: + // its running/succeeded/failed any of which means the image has been pulled and + // we can be done/kill the pod now + c.Log.Infof( + "puller pod '%s/%s' has left pending state, image pulled", + namespace, + pullerPodName, + ) + + watch.Stop() + } + } + } + + return nil +} + +func (c *Controller) deleteImagePullerPod( + ctx context.Context, + namespace, pullerPodName string, +) error { + c.Log.Debugf("destroying image puller pod %q", pullerPodName) + + err := c.Client.Delete( + ctx, + &k8scorev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: pullerPodName, + }, + }, + ) + if err != nil { + if apimachineryerrors.IsNotFound(err) { + c.Log.Warnf( + "puller pod '%s/%s' not found when attempting to delete, this is probably"+ + " ok as it is either deleted or will be when we delete this image request cr", + namespace, pullerPodName, + ) + + return nil + } + + c.Log.Criticalf( + "failed deleting image puller pod '%s/%s', error: %s", namespace, pullerPodName, err, + ) + + return err + } + + return nil +} diff --git a/controllers/topology/deployment.go b/controllers/topology/deployment.go index c91ccd6b..7e3b1e09 100644 --- a/controllers/topology/deployment.go +++ b/controllers/topology/deployment.go @@ -154,7 +154,7 @@ func (r *DeploymentReconciler) renderDeploymentBase( Spec: k8scorev1.PodSpec{ Containers: []k8scorev1.Container{}, RestartPolicy: "Always", - ServiceAccountName: "default", + ServiceAccountName: launcherServiceAccountName(), Volumes: []k8scorev1.Volume{}, Hostname: nodeName, }, diff --git a/controllers/topology/reconcile.go b/controllers/topology/reconcile.go index 5ccc215a..9e5c98b4 100644 --- a/controllers/topology/reconcile.go +++ b/controllers/topology/reconcile.go @@ -3,6 +3,8 @@ package topology import ( "context" + clabernetesapisv1alpha1 "github.com/srl-labs/clabernetes/apis/v1alpha1" + apimachineryerrors "k8s.io/apimachinery/pkg/api/errors" ctrlruntime "sigs.k8s.io/controller-runtime" ) @@ -37,6 +39,15 @@ func (c *Controller) Reconcile( return ctrlruntime.Result{}, nil } + // we always reconcile the "namespace" resources first -- meaning the resources that exist in + // the namespace that are not 1:1 to a Topology -- for example: service account and role + // binding. These resources are created for the namespace on creation of the first Topology in + // the namespace, and are removed when the last Topology is removed from the namespace. + err = c.TopologyReconciler.ReconcileNamespaceResources(ctx, topology) + if err != nil { + return ctrlruntime.Result{}, err + } + reconcileData, err := NewReconcileData(topology) if err != nil { c.BaseController.Log.Criticalf( @@ -53,7 +64,50 @@ func (c *Controller) Reconcile( return ctrlruntime.Result{}, err } - err = c.TopologyReconciler.ReconcileConfigMap( + err = c.reconcileResources(ctx, topology, reconcileData) + if err != nil { + return ctrlruntime.Result{}, err + } + + if reconcileData.ShouldUpdateResource { + // we should update because config hash or something changed, so snag the updated status + // data out of the reconcile data, put it in the resource, and push the update + err = reconcileData.SetStatus(&topology.Status) + if err != nil { + c.BaseController.Log.Criticalf( + "failed setting object '%s/%s' status, error: %s", + topology.Namespace, + topology.Name, + err, + ) + + return ctrlruntime.Result{}, err + } + + err = c.BaseController.Client.Update(ctx, topology) + if err != nil { + c.BaseController.Log.Criticalf( + "failed updating object '%s/%s' error: %s", + topology.Namespace, + topology.Name, + err, + ) + + return ctrlruntime.Result{}, err + } + } + + c.BaseController.LogReconcileCompleteSuccess(req) + + return ctrlruntime.Result{}, nil +} + +func (c *Controller) reconcileResources( + ctx context.Context, + topology *clabernetesapisv1alpha1.Topology, + reconcileData *ReconcileData, +) error { + err := c.TopologyReconciler.ReconcileConfigMap( ctx, topology, reconcileData, @@ -64,7 +118,7 @@ func (c *Controller) Reconcile( err, ) - return ctrlruntime.Result{}, err + return err } err = c.TopologyReconciler.ReconcileServices( @@ -74,7 +128,7 @@ func (c *Controller) Reconcile( ) if err != nil { // error already logged - return ctrlruntime.Result{}, err + return err } err = c.TopologyReconciler.ReconcilePersistentVolumeClaim( @@ -85,7 +139,7 @@ func (c *Controller) Reconcile( if err != nil { c.BaseController.Log.Criticalf("failed reconciling clabernetes pvcs, error: %s", err) - return ctrlruntime.Result{}, err + return err } err = c.TopologyReconciler.ReconcileDeployments( @@ -96,38 +150,8 @@ func (c *Controller) Reconcile( if err != nil { c.BaseController.Log.Criticalf("failed reconciling clabernetes deployments, error: %s", err) - return ctrlruntime.Result{}, err + return err } - if reconcileData.ShouldUpdateResource { - // we should update because config hash or something changed, so snag the updated status - // data out of the reconcile data, put it in the resource, and push the update - err = reconcileData.SetStatus(&topology.Status) - if err != nil { - c.BaseController.Log.Criticalf( - "failed setting object '%s/%s' status, error: %s", - topology.Namespace, - topology.Name, - err, - ) - - return ctrlruntime.Result{}, err - } - - err = c.BaseController.Client.Update(ctx, topology) - if err != nil { - c.BaseController.Log.Criticalf( - "failed updating object '%s/%s' error: %s", - topology.Namespace, - topology.Name, - err, - ) - - return ctrlruntime.Result{}, err - } - } - - c.BaseController.LogReconcileCompleteSuccess(req) - - return ctrlruntime.Result{}, nil + return nil } diff --git a/controllers/topology/reconciler.go b/controllers/topology/reconciler.go index 83007336..c8c71dd1 100644 --- a/controllers/topology/reconciler.go +++ b/controllers/topology/reconciler.go @@ -24,13 +24,6 @@ import ( ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) -// ResourceListerFunc represents a function that can list the objects that a topology controller -// is responsible for. -type ResourceListerFunc func( - ctx context.Context, - client ctrlruntimeclient.Client, -) ([]ctrlruntimeclient.Object, error) - // NewReconciler creates a new generic Reconciler (TopologyReconciler). func NewReconciler( log claberneteslogging.Instance, @@ -43,7 +36,17 @@ func NewReconciler( return &Reconciler{ Log: log, Client: client, - + serviceAccountReconciler: NewServiceAccountReconciler( + log, + client, + configManagerGetter, + ), + roleBindingReconciler: NewRoleBindingReconciler( + log, + client, + configManagerGetter, + managerAppName, + ), configMapReconciler: NewConfigMapReconciler( log, configManagerGetter, @@ -79,10 +82,11 @@ func NewReconciler( // common/standard resources that represent a clabernetes object (configmap, deployments, // services, etc.). type Reconciler struct { - Log claberneteslogging.Instance - Client ctrlruntimeclient.Client - ResourceKind string + Log claberneteslogging.Instance + Client ctrlruntimeclient.Client + serviceAccountReconciler *ServiceAccountReconciler + roleBindingReconciler *RoleBindingReconciler configMapReconciler *ConfigMapReconciler serviceNodeAliasReconciler *ServiceNodeAliasReconciler serviceFabricReconciler *ServiceFabricReconciler @@ -91,6 +95,51 @@ type Reconciler struct { deploymentReconciler *DeploymentReconciler } +// ReconcileNamespaceResources reconciles resources that exist in a Topology's namespace but are not +// 1:1 with a Topology -- for example ServiceAccount and RoleBinding resources which are created at +// the point the first Topology in a namespace is created and exist until the final Topology in a +// namespace is being removed. +func (r *Reconciler) ReconcileNamespaceResources( + ctx context.Context, + owningTopology *clabernetesapisv1alpha1.Topology, +) error { + err := r.ReconcileServiceAccount(ctx, owningTopology) + if err != nil { + return err + } + + err = r.ReconcileRoleBinding(ctx, owningTopology) + if err != nil { + return err + } + + return nil +} + +// ReconcileServiceAccount reconciles the service account for the given namespace -- note that there +// is only *one* service account per namespace, but its simply reconciled each time a Topology is +// reconciled to make life easy. This and the RoleBinding are the only resources we need to worry +// about when deleting, a Topology resource, hence there is `deleting` arg to indicate if we should +// see if we should clean things up. +func (r *Reconciler) ReconcileServiceAccount( + ctx context.Context, + owningTopology *clabernetesapisv1alpha1.Topology, +) error { + return r.serviceAccountReconciler.Reconcile(ctx, owningTopology) +} + +// ReconcileRoleBinding reconciles the role binding for the given namespace -- note that there +// is only *one* role binding per namespace, but its simply reconciled each time a Topology is +// reconciled to make life easy. This and the ServiceAccount are the only resources we need to worry +// about when deleting, a Topology resource, hence there is `deleting` arg to indicate if we should +// see if we should clean things up. +func (r *Reconciler) ReconcileRoleBinding( + ctx context.Context, + owningTopology *clabernetesapisv1alpha1.Topology, +) error { + return r.roleBindingReconciler.Reconcile(ctx, owningTopology) +} + // ReconcileConfigMap reconciles the primary configmap containing clabernetes configs, tunnel // information, pull secret information, and perhaps more in the future. func (r *Reconciler) ReconcileConfigMap( diff --git a/controllers/topology/rolebinding.go b/controllers/topology/rolebinding.go new file mode 100644 index 00000000..872bc92f --- /dev/null +++ b/controllers/topology/rolebinding.go @@ -0,0 +1,263 @@ +package topology + +import ( + "context" + "fmt" + "reflect" + + clabernetesapisv1alpha1 "github.com/srl-labs/clabernetes/apis/v1alpha1" + + ctrlruntimeutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" + + clabernetesconfig "github.com/srl-labs/clabernetes/config" + clabernetesconstants "github.com/srl-labs/clabernetes/constants" + claberneteslogging "github.com/srl-labs/clabernetes/logging" + k8srbacv1 "k8s.io/api/rbac/v1" + apimachineryerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apimachinerytypes "k8s.io/apimachinery/pkg/types" + + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +// NewRoleBindingReconciler returns an instance of RoleBindingReconciler. +func NewRoleBindingReconciler( + log claberneteslogging.Instance, + client ctrlruntimeclient.Client, + configManagerGetter clabernetesconfig.ManagerGetterFunc, + appName string, +) *RoleBindingReconciler { + return &RoleBindingReconciler{ + log: log, + client: client, + configManagerGetter: configManagerGetter, + appName: appName, + } +} + +func launcherRoleBindingName() string { + return fmt.Sprintf("%s-launcher-role-binding", clabernetesconstants.Clabernetes) +} + +// RoleBindingReconciler is a subcomponent of the "TopologyReconciler" but is exposed for testing +// purposes. This is the component responsible for rendering/validating (and deleting when +// necessary) the clabernetes launcher role binding for a given namespace. +type RoleBindingReconciler struct { + log claberneteslogging.Instance + client ctrlruntimeclient.Client + configManagerGetter clabernetesconfig.ManagerGetterFunc + appName string +} + +// Reconcile either enforces the RoleBinding configuration for a given namespace or removes the +// role binding if the Topology being reconciled is the last Topology resource in the namespace. +func (r *RoleBindingReconciler) Reconcile( + ctx context.Context, + owningTopology *clabernetesapisv1alpha1.Topology, +) error { + namespace := owningTopology.Namespace + + r.log.Debugf( + "reconciling launcher role binding in namespace %q", + namespace, + ) + + existingRoleBinding, err := r.reconcileGetAndCreateIfNotExist(ctx, owningTopology) + if err != nil { + return err + } + + renderedRoleBinding, err := r.Render(owningTopology, existingRoleBinding) + if err != nil { + r.log.Criticalf( + "failed rendering role binding for namespace %q, error: %s", + namespace, + err, + ) + + return err + } + + if r.Conforms(existingRoleBinding, renderedRoleBinding, owningTopology.UID) { + r.log.Debugf( + "launcher role binding in namespace %q conforms, nothing to do", + namespace, + ) + + return nil + } + + err = r.client.Update( + ctx, + renderedRoleBinding, + ) + if err != nil { + r.log.Criticalf( + "failed updating launcher role binding in namespace %q, error: %s", + namespace, + err, + ) + + return err + } + + return nil +} + +func (r *RoleBindingReconciler) reconcileGetAndCreateIfNotExist( //nolint:dupl + ctx context.Context, + owningTopology *clabernetesapisv1alpha1.Topology, +) (*k8srbacv1.RoleBinding, error) { + namespace := owningTopology.Namespace + + existingRoleBinding := &k8srbacv1.RoleBinding{} + + err := r.client.Get( + ctx, + apimachinerytypes.NamespacedName{ + Namespace: namespace, + Name: launcherRoleBindingName(), + }, + existingRoleBinding, + ) + if err == nil { + return existingRoleBinding, nil + } + + if apimachineryerrors.IsNotFound(err) { + r.log.Infof("no launcher role binding found in namespace %q, creating...", namespace) + + var renderedRoleBinding *k8srbacv1.RoleBinding + + renderedRoleBinding, err = r.Render(owningTopology, nil) + if err != nil { + r.log.Criticalf( + "failed rendering role binding for namespace %q, error: %s", + namespace, + err, + ) + + return existingRoleBinding, err + } + + err = r.client.Create(ctx, renderedRoleBinding) + if err != nil { + r.log.Criticalf( + "failed creating role binding in namespace %q, error: %s", + namespace, + err, + ) + + return existingRoleBinding, err + } + + return existingRoleBinding, nil + } + + r.log.Debugf( + "failed getting role binding in namespace %q, error: %s", + namespace, + err, + ) + + return existingRoleBinding, err +} + +// Render renders the role binding for the given namespace. Exported for easy testing. +func (r *RoleBindingReconciler) Render( + owningTopology *clabernetesapisv1alpha1.Topology, + existingRoleBinding *k8srbacv1.RoleBinding, +) (*k8srbacv1.RoleBinding, error) { + annotations, globalLabels := r.configManagerGetter().GetAllMetadata() + + labels := map[string]string{ + clabernetesconstants.LabelApp: clabernetesconstants.Clabernetes, + } + + for k, v := range globalLabels { + labels[k] = v + } + + renderedRoleBinding := &k8srbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: launcherRoleBindingName(), + Namespace: owningTopology.Namespace, + Labels: labels, + Annotations: annotations, + }, + Subjects: []k8srbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: launcherServiceAccountName(), + Namespace: owningTopology.Namespace, + }, + }, + RoleRef: k8srbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: fmt.Sprintf("%s-launcher-role", r.appName), + }, + } + + // when we render we want to include any existing owner references as rolebindings (and sa) are + // owned by all topologies in a given namespace, so make sure to retain those + if existingRoleBinding != nil { + renderedRoleBinding.OwnerReferences = existingRoleBinding.GetOwnerReferences() + } + + err := ctrlruntimeutil.SetOwnerReference(owningTopology, renderedRoleBinding, r.client.Scheme()) + if err != nil { + return nil, err + } + + return renderedRoleBinding, nil +} + +// Conforms returns true if an existing RoleBinding conforms with the rendered RoleBinding. +func (r *RoleBindingReconciler) Conforms( + existingRoleBinding, + renderedRoleBinding *k8srbacv1.RoleBinding, + expectedOwnerUID apimachinerytypes.UID, +) bool { + if !reflect.DeepEqual(existingRoleBinding.RoleRef, renderedRoleBinding.RoleRef) { + return false + } + + if !reflect.DeepEqual(existingRoleBinding.Subjects, renderedRoleBinding.Subjects) { + return false + } + + if !clabernetesutilkubernetes.AnnotationsOrLabelsConform( + existingRoleBinding.ObjectMeta.Annotations, + renderedRoleBinding.ObjectMeta.Annotations, + ) { + return false + } + + if !clabernetesutilkubernetes.AnnotationsOrLabelsConform( + existingRoleBinding.ObjectMeta.Labels, + renderedRoleBinding.ObjectMeta.Labels, + ) { + return false + } + + // we need to check to make sure that *at least* our topology exists as an owner for this + if len(existingRoleBinding.ObjectMeta.OwnerReferences) == 0 { + // we should have *at least* one owner reference + return false + } + + var ourOwnerRefExists bool + + for _, ownerRef := range existingRoleBinding.OwnerReferences { + if ownerRef.UID == expectedOwnerUID { + ourOwnerRefExists = true + + break + } + } + + return ourOwnerRefExists +} diff --git a/controllers/topology/service.go b/controllers/topology/service.go index 364bdc19..3e0ed63a 100644 --- a/controllers/topology/service.go +++ b/controllers/topology/service.go @@ -71,7 +71,7 @@ func ServiceConforms( } if len(existingService.ObjectMeta.OwnerReferences) != 1 { - // we should have only one owner reference, the extractor + // we should have only one owner reference, the topology return false } diff --git a/controllers/topology/serviceaccount.go b/controllers/topology/serviceaccount.go new file mode 100644 index 00000000..0207e9f5 --- /dev/null +++ b/controllers/topology/serviceaccount.go @@ -0,0 +1,241 @@ +package topology + +import ( + "context" + "fmt" + + apimachineryerrors "k8s.io/apimachinery/pkg/api/errors" + + clabernetesapisv1alpha1 "github.com/srl-labs/clabernetes/apis/v1alpha1" + clabernetesconstants "github.com/srl-labs/clabernetes/constants" + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" + k8scorev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apimachinerytypes "k8s.io/apimachinery/pkg/types" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" + ctrlruntimeutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + clabernetesconfig "github.com/srl-labs/clabernetes/config" + claberneteslogging "github.com/srl-labs/clabernetes/logging" +) + +// NewServiceAccountReconciler returns an instance of ServiceAccountReconciler. +func NewServiceAccountReconciler( + log claberneteslogging.Instance, + client ctrlruntimeclient.Client, + configManagerGetter clabernetesconfig.ManagerGetterFunc, +) *ServiceAccountReconciler { + return &ServiceAccountReconciler{ + log: log, + client: client, + configManagerGetter: configManagerGetter, + } +} + +func launcherServiceAccountName() string { + return fmt.Sprintf("%s-launcher-service-account", clabernetesconstants.Clabernetes) +} + +// ServiceAccountReconciler is a subcomponent of the "TopologyReconciler" but is exposed for testing +// purposes. This is the component responsible for rendering/validating (and deleting when +// necessary) the clabernetes launcher service account for a given namespace. +type ServiceAccountReconciler struct { + log claberneteslogging.Instance + client ctrlruntimeclient.Client + configManagerGetter clabernetesconfig.ManagerGetterFunc +} + +// Reconcile either enforces the ServiceAccount configuration for a given namespace or removes the +// service account if the Topology being reconciled is the last Topology resource in the namespace. +func (r *ServiceAccountReconciler) Reconcile( + ctx context.Context, + owningTopology *clabernetesapisv1alpha1.Topology, +) error { + namespace := owningTopology.Namespace + + r.log.Debugf( + "reconciling launcher service account in namespace %q", + namespace, + ) + + existingServiceAccount, err := r.reconcileGetAndCreateIfNotExist(ctx, owningTopology) + if err != nil { + return err + } + + renderedServiceAccount, err := r.Render(owningTopology, existingServiceAccount) + if err != nil { + r.log.Criticalf( + "failed rendering service account for namespace %q, error: %s", + namespace, + err, + ) + + return err + } + + if r.Conforms(existingServiceAccount, renderedServiceAccount, owningTopology.UID) { + r.log.Debugf( + "launcher service account in namespace %q conforms, nothing to do", + namespace, + ) + + return nil + } + + err = r.client.Update( + ctx, + renderedServiceAccount, + ) + if err != nil { + r.log.Criticalf( + "failed updating launcher service account in namespace %q, error: %s", + namespace, + err, + ) + + return err + } + + return nil +} + +func (r *ServiceAccountReconciler) reconcileGetAndCreateIfNotExist( //nolint:dupl + ctx context.Context, + owningTopology *clabernetesapisv1alpha1.Topology, +) (*k8scorev1.ServiceAccount, error) { + namespace := owningTopology.Namespace + + existingServiceAccount := &k8scorev1.ServiceAccount{} + + err := r.client.Get( + ctx, + apimachinerytypes.NamespacedName{ + Namespace: namespace, + Name: launcherServiceAccountName(), + }, + existingServiceAccount, + ) + if err == nil { + return existingServiceAccount, nil + } + + if apimachineryerrors.IsNotFound(err) { + r.log.Infof("no launcher service account found in namespace %q, creating...", namespace) + + var renderedServiceAccount *k8scorev1.ServiceAccount + + renderedServiceAccount, err = r.Render(owningTopology, nil) + if err != nil { + r.log.Criticalf( + "failed rendering service account for namespace %q, error: %s", + namespace, + err, + ) + + return existingServiceAccount, err + } + + err = r.client.Create(ctx, renderedServiceAccount) + if err != nil { + r.log.Criticalf( + "failed creating service account in namespace %q, error: %s", + namespace, + err, + ) + + return existingServiceAccount, err + } + + return existingServiceAccount, nil + } + + r.log.Debugf( + "failed getting service account in namespace %q, error: %s", + namespace, + err, + ) + + return existingServiceAccount, err +} + +// Render renders a service account for the given namespace. Exported for easy testing. +func (r *ServiceAccountReconciler) Render( + owningTopology *clabernetesapisv1alpha1.Topology, + existingServieAccount *k8scorev1.ServiceAccount, +) (*k8scorev1.ServiceAccount, error) { + annotations, globalLabels := r.configManagerGetter().GetAllMetadata() + + labels := map[string]string{ + clabernetesconstants.LabelApp: clabernetesconstants.Clabernetes, + } + + for k, v := range globalLabels { + labels[k] = v + } + + renderedServiceAccount := &k8scorev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: launcherServiceAccountName(), + Namespace: owningTopology.Namespace, + Labels: labels, + Annotations: annotations, + }, + } + + // when we render we want to include any existing owner references as rolebindings (and sa) are + // owned by all topologies in a given namespace, so make sure to retain those + if existingServieAccount != nil { + renderedServiceAccount.OwnerReferences = existingServieAccount.GetOwnerReferences() + } + + err := ctrlruntimeutil.SetOwnerReference( + owningTopology, + renderedServiceAccount, + r.client.Scheme(), + ) + if err != nil { + return nil, err + } + + return renderedServiceAccount, nil +} + +// Conforms returns true if an existing ServiceAccount conforms with the rendered ServiceAccount. +func (r *ServiceAccountReconciler) Conforms( + existingServiceAccount, + renderedServiceAccount *k8scorev1.ServiceAccount, + expectedOwnerUID apimachinerytypes.UID, +) bool { + if !clabernetesutilkubernetes.AnnotationsOrLabelsConform( + existingServiceAccount.ObjectMeta.Annotations, + renderedServiceAccount.ObjectMeta.Annotations, + ) { + return false + } + + if !clabernetesutilkubernetes.AnnotationsOrLabelsConform( + existingServiceAccount.ObjectMeta.Labels, + renderedServiceAccount.ObjectMeta.Labels, + ) { + return false + } + + // we need to check to make sure that *at least* our topology exists as an owner for this + if len(existingServiceAccount.ObjectMeta.OwnerReferences) == 1 { + // we should have *at least* one owner reference + return false + } + + var ourOwnerRefExists bool + + for _, ownerRef := range existingServiceAccount.OwnerReferences { + if ownerRef.UID == expectedOwnerUID { + ourOwnerRefExists = true + + break + } + } + + return ourOwnerRefExists +} diff --git a/controllers/topology/test-fixtures/golden/deployment/render-deployment/containerlab-debug.json b/controllers/topology/test-fixtures/golden/deployment/render-deployment/containerlab-debug.json index cb73990c..710c6835 100755 --- a/controllers/topology/test-fixtures/golden/deployment/render-deployment/containerlab-debug.json +++ b/controllers/topology/test-fixtures/golden/deployment/render-deployment/containerlab-debug.json @@ -228,7 +228,7 @@ } ], "restartPolicy": "Always", - "serviceAccountName": "default", + "serviceAccountName": "clabernetes-launcher-service-account", "hostname": "srl1" } }, diff --git a/controllers/topology/test-fixtures/golden/deployment/render-deployment/insecure-registries.json b/controllers/topology/test-fixtures/golden/deployment/render-deployment/insecure-registries.json index 2921ade2..50bdf2b1 100755 --- a/controllers/topology/test-fixtures/golden/deployment/render-deployment/insecure-registries.json +++ b/controllers/topology/test-fixtures/golden/deployment/render-deployment/insecure-registries.json @@ -228,7 +228,7 @@ } ], "restartPolicy": "Always", - "serviceAccountName": "default", + "serviceAccountName": "clabernetes-launcher-service-account", "hostname": "srl1" } }, diff --git a/controllers/topology/test-fixtures/golden/deployment/render-deployment/launcher-log-level.json b/controllers/topology/test-fixtures/golden/deployment/render-deployment/launcher-log-level.json index d02f94a2..72cece89 100755 --- a/controllers/topology/test-fixtures/golden/deployment/render-deployment/launcher-log-level.json +++ b/controllers/topology/test-fixtures/golden/deployment/render-deployment/launcher-log-level.json @@ -224,7 +224,7 @@ } ], "restartPolicy": "Always", - "serviceAccountName": "default", + "serviceAccountName": "clabernetes-launcher-service-account", "hostname": "srl1" } }, diff --git a/controllers/topology/test-fixtures/golden/deployment/render-deployment/privileged-launcher.json b/controllers/topology/test-fixtures/golden/deployment/render-deployment/privileged-launcher.json index fdcbe0fb..38a21d0f 100755 --- a/controllers/topology/test-fixtures/golden/deployment/render-deployment/privileged-launcher.json +++ b/controllers/topology/test-fixtures/golden/deployment/render-deployment/privileged-launcher.json @@ -157,7 +157,7 @@ } ], "restartPolicy": "Always", - "serviceAccountName": "default", + "serviceAccountName": "clabernetes-launcher-service-account", "hostname": "srl1" } }, diff --git a/controllers/topology/test-fixtures/golden/deployment/render-deployment/simple.json b/controllers/topology/test-fixtures/golden/deployment/render-deployment/simple.json index 4c9ca75b..59970695 100755 --- a/controllers/topology/test-fixtures/golden/deployment/render-deployment/simple.json +++ b/controllers/topology/test-fixtures/golden/deployment/render-deployment/simple.json @@ -224,7 +224,7 @@ } ], "restartPolicy": "Always", - "serviceAccountName": "default", + "serviceAccountName": "clabernetes-launcher-service-account", "hostname": "srl1" } }, diff --git a/generated/clientset/typed/apis/v1alpha1/apis_client.go b/generated/clientset/typed/apis/v1alpha1/apis_client.go index 3387adf3..c9d4173f 100644 --- a/generated/clientset/typed/apis/v1alpha1/apis_client.go +++ b/generated/clientset/typed/apis/v1alpha1/apis_client.go @@ -29,6 +29,7 @@ import ( type ClabernetesV1alpha1Interface interface { RESTClient() rest.Interface ConfigsGetter + ImageRequestsGetter TopologiesGetter } @@ -41,6 +42,10 @@ func (c *ClabernetesV1alpha1Client) Configs(namespace string) ConfigInterface { return newConfigs(c, namespace) } +func (c *ClabernetesV1alpha1Client) ImageRequests(namespace string) ImageRequestInterface { + return newImageRequests(c, namespace) +} + func (c *ClabernetesV1alpha1Client) Topologies(namespace string) TopologyInterface { return newTopologies(c, namespace) } diff --git a/generated/clientset/typed/apis/v1alpha1/fake/fake_apis_client.go b/generated/clientset/typed/apis/v1alpha1/fake/fake_apis_client.go index 1ec5f915..28d3b0cd 100644 --- a/generated/clientset/typed/apis/v1alpha1/fake/fake_apis_client.go +++ b/generated/clientset/typed/apis/v1alpha1/fake/fake_apis_client.go @@ -32,6 +32,10 @@ func (c *FakeClabernetesV1alpha1) Configs(namespace string) v1alpha1.ConfigInter return &FakeConfigs{c, namespace} } +func (c *FakeClabernetesV1alpha1) ImageRequests(namespace string) v1alpha1.ImageRequestInterface { + return &FakeImageRequests{c, namespace} +} + func (c *FakeClabernetesV1alpha1) Topologies(namespace string) v1alpha1.TopologyInterface { return &FakeTopologies{c, namespace} } diff --git a/generated/clientset/typed/apis/v1alpha1/fake/fake_imagerequest.go b/generated/clientset/typed/apis/v1alpha1/fake/fake_imagerequest.go new file mode 100644 index 00000000..374d39f0 --- /dev/null +++ b/generated/clientset/typed/apis/v1alpha1/fake/fake_imagerequest.go @@ -0,0 +1,198 @@ +/* + Copyright The Kubernetes Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "github.com/srl-labs/clabernetes/apis/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeImageRequests implements ImageRequestInterface +type FakeImageRequests struct { + Fake *FakeClabernetesV1alpha1 + ns string +} + +var imagerequestsResource = v1alpha1.SchemeGroupVersion.WithResource("imagerequests") + +var imagerequestsKind = v1alpha1.SchemeGroupVersion.WithKind("ImageRequest") + +// Get takes name of the imageRequest, and returns the corresponding imageRequest object, and an error if there is any. +func (c *FakeImageRequests) Get( + ctx context.Context, + name string, + options v1.GetOptions, +) (result *v1alpha1.ImageRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(imagerequestsResource, c.ns, name), &v1alpha1.ImageRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ImageRequest), err +} + +// List takes label and field selectors, and returns the list of ImageRequests that match those selectors. +func (c *FakeImageRequests) List( + ctx context.Context, + opts v1.ListOptions, +) (result *v1alpha1.ImageRequestList, err error) { + obj, err := c.Fake. + Invokes( + testing.NewListAction(imagerequestsResource, imagerequestsKind, c.ns, opts), + &v1alpha1.ImageRequestList{}, + ) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.ImageRequestList{ListMeta: obj.(*v1alpha1.ImageRequestList).ListMeta} + for _, item := range obj.(*v1alpha1.ImageRequestList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested imageRequests. +func (c *FakeImageRequests) Watch( + ctx context.Context, + opts v1.ListOptions, +) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(imagerequestsResource, c.ns, opts)) + +} + +// Create takes the representation of a imageRequest and creates it. Returns the server's representation of the imageRequest, and an error, if there is any. +func (c *FakeImageRequests) Create( + ctx context.Context, + imageRequest *v1alpha1.ImageRequest, + opts v1.CreateOptions, +) (result *v1alpha1.ImageRequest, err error) { + obj, err := c.Fake. + Invokes( + testing.NewCreateAction(imagerequestsResource, c.ns, imageRequest), + &v1alpha1.ImageRequest{}, + ) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ImageRequest), err +} + +// Update takes the representation of a imageRequest and updates it. Returns the server's representation of the imageRequest, and an error, if there is any. +func (c *FakeImageRequests) Update( + ctx context.Context, + imageRequest *v1alpha1.ImageRequest, + opts v1.UpdateOptions, +) (result *v1alpha1.ImageRequest, err error) { + obj, err := c.Fake. + Invokes( + testing.NewUpdateAction(imagerequestsResource, c.ns, imageRequest), + &v1alpha1.ImageRequest{}, + ) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ImageRequest), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeImageRequests) UpdateStatus( + ctx context.Context, + imageRequest *v1alpha1.ImageRequest, + opts v1.UpdateOptions, +) (*v1alpha1.ImageRequest, error) { + obj, err := c.Fake. + Invokes( + testing.NewUpdateSubresourceAction(imagerequestsResource, "status", c.ns, imageRequest), + &v1alpha1.ImageRequest{}, + ) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ImageRequest), err +} + +// Delete takes name of the imageRequest and deletes it. Returns an error if one occurs. +func (c *FakeImageRequests) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes( + testing.NewDeleteActionWithOptions(imagerequestsResource, c.ns, name, opts), + &v1alpha1.ImageRequest{}, + ) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeImageRequests) DeleteCollection( + ctx context.Context, + opts v1.DeleteOptions, + listOpts v1.ListOptions, +) error { + action := testing.NewDeleteCollectionAction(imagerequestsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.ImageRequestList{}) + return err +} + +// Patch applies the patch and returns the patched imageRequest. +func (c *FakeImageRequests) Patch( + ctx context.Context, + name string, + pt types.PatchType, + data []byte, + opts v1.PatchOptions, + subresources ...string, +) (result *v1alpha1.ImageRequest, err error) { + obj, err := c.Fake. + Invokes( + testing.NewPatchSubresourceAction( + imagerequestsResource, + c.ns, + name, + pt, + data, + subresources...), + &v1alpha1.ImageRequest{}, + ) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ImageRequest), err +} diff --git a/generated/clientset/typed/apis/v1alpha1/generated_expansion.go b/generated/clientset/typed/apis/v1alpha1/generated_expansion.go index 48da474e..3890ff63 100644 --- a/generated/clientset/typed/apis/v1alpha1/generated_expansion.go +++ b/generated/clientset/typed/apis/v1alpha1/generated_expansion.go @@ -20,4 +20,6 @@ package v1alpha1 type ConfigExpansion interface{} +type ImageRequestExpansion interface{} + type TopologyExpansion interface{} diff --git a/generated/clientset/typed/apis/v1alpha1/imagerequest.go b/generated/clientset/typed/apis/v1alpha1/imagerequest.go new file mode 100644 index 00000000..7b5926ac --- /dev/null +++ b/generated/clientset/typed/apis/v1alpha1/imagerequest.go @@ -0,0 +1,244 @@ +/* + Copyright The Kubernetes Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/srl-labs/clabernetes/apis/v1alpha1" + scheme "github.com/srl-labs/clabernetes/generated/clientset/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ImageRequestsGetter has a method to return a ImageRequestInterface. +// A group's client should implement this interface. +type ImageRequestsGetter interface { + ImageRequests(namespace string) ImageRequestInterface +} + +// ImageRequestInterface has methods to work with ImageRequest resources. +type ImageRequestInterface interface { + Create( + ctx context.Context, + imageRequest *v1alpha1.ImageRequest, + opts v1.CreateOptions, + ) (*v1alpha1.ImageRequest, error) + Update( + ctx context.Context, + imageRequest *v1alpha1.ImageRequest, + opts v1.UpdateOptions, + ) (*v1alpha1.ImageRequest, error) + UpdateStatus( + ctx context.Context, + imageRequest *v1alpha1.ImageRequest, + opts v1.UpdateOptions, + ) (*v1alpha1.ImageRequest, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ImageRequest, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ImageRequestList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch( + ctx context.Context, + name string, + pt types.PatchType, + data []byte, + opts v1.PatchOptions, + subresources ...string, + ) (result *v1alpha1.ImageRequest, err error) + ImageRequestExpansion +} + +// imageRequests implements ImageRequestInterface +type imageRequests struct { + client rest.Interface + ns string +} + +// newImageRequests returns a ImageRequests +func newImageRequests(c *ClabernetesV1alpha1Client, namespace string) *imageRequests { + return &imageRequests{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the imageRequest, and returns the corresponding imageRequest object, and an error if there is any. +func (c *imageRequests) Get( + ctx context.Context, + name string, + options v1.GetOptions, +) (result *v1alpha1.ImageRequest, err error) { + result = &v1alpha1.ImageRequest{} + err = c.client.Get(). + Namespace(c.ns). + Resource("imagerequests"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ImageRequests that match those selectors. +func (c *imageRequests) List( + ctx context.Context, + opts v1.ListOptions, +) (result *v1alpha1.ImageRequestList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ImageRequestList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("imagerequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested imageRequests. +func (c *imageRequests) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("imagerequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a imageRequest and creates it. Returns the server's representation of the imageRequest, and an error, if there is any. +func (c *imageRequests) Create( + ctx context.Context, + imageRequest *v1alpha1.ImageRequest, + opts v1.CreateOptions, +) (result *v1alpha1.ImageRequest, err error) { + result = &v1alpha1.ImageRequest{} + err = c.client.Post(). + Namespace(c.ns). + Resource("imagerequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(imageRequest). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a imageRequest and updates it. Returns the server's representation of the imageRequest, and an error, if there is any. +func (c *imageRequests) Update( + ctx context.Context, + imageRequest *v1alpha1.ImageRequest, + opts v1.UpdateOptions, +) (result *v1alpha1.ImageRequest, err error) { + result = &v1alpha1.ImageRequest{} + err = c.client.Put(). + Namespace(c.ns). + Resource("imagerequests"). + Name(imageRequest.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(imageRequest). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *imageRequests) UpdateStatus( + ctx context.Context, + imageRequest *v1alpha1.ImageRequest, + opts v1.UpdateOptions, +) (result *v1alpha1.ImageRequest, err error) { + result = &v1alpha1.ImageRequest{} + err = c.client.Put(). + Namespace(c.ns). + Resource("imagerequests"). + Name(imageRequest.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(imageRequest). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the imageRequest and deletes it. Returns an error if one occurs. +func (c *imageRequests) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("imagerequests"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *imageRequests) DeleteCollection( + ctx context.Context, + opts v1.DeleteOptions, + listOpts v1.ListOptions, +) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("imagerequests"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched imageRequest. +func (c *imageRequests) Patch( + ctx context.Context, + name string, + pt types.PatchType, + data []byte, + opts v1.PatchOptions, + subresources ...string, +) (result *v1alpha1.ImageRequest, err error) { + result = &v1alpha1.ImageRequest{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("imagerequests"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/generated/openapi/openapi_generated.go b/generated/openapi/openapi_generated.go index e7cf6e94..e59d644e 100644 --- a/generated/openapi/openapi_generated.go +++ b/generated/openapi/openapi_generated.go @@ -72,6 +72,18 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/srl-labs/clabernetes/apis/v1alpha1.ImagePull": schema_srl_labs_clabernetes_apis_v1alpha1_ImagePull( ref, ), + "github.com/srl-labs/clabernetes/apis/v1alpha1.ImageRequest": schema_srl_labs_clabernetes_apis_v1alpha1_ImageRequest( + ref, + ), + "github.com/srl-labs/clabernetes/apis/v1alpha1.ImageRequestList": schema_srl_labs_clabernetes_apis_v1alpha1_ImageRequestList( + ref, + ), + "github.com/srl-labs/clabernetes/apis/v1alpha1.ImageRequestSpec": schema_srl_labs_clabernetes_apis_v1alpha1_ImageRequestSpec( + ref, + ), + "github.com/srl-labs/clabernetes/apis/v1alpha1.ImageRequestStatus": schema_srl_labs_clabernetes_apis_v1alpha1_ImageRequestStatus( + ref, + ), "github.com/srl-labs/clabernetes/apis/v1alpha1.LinkEndpoint": schema_srl_labs_clabernetes_apis_v1alpha1_LinkEndpoint( ref, ), @@ -810,6 +822,217 @@ func schema_srl_labs_clabernetes_apis_v1alpha1_ImagePull( } } +func schema_srl_labs_clabernetes_apis_v1alpha1_ImageRequest( + ref common.ReferenceCallback, +) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ImageRequest is an object that represents a request (from a launcher pod) to pull an image on a given kubernetes node such that the image can be \"pulled through\" into the launcher docker daemon.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref( + "github.com/srl-labs/clabernetes/apis/v1alpha1.ImageRequestSpec", + ), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref( + "github.com/srl-labs/clabernetes/apis/v1alpha1.ImageRequestStatus", + ), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/srl-labs/clabernetes/apis/v1alpha1.ImageRequestSpec", "github.com/srl-labs/clabernetes/apis/v1alpha1.ImageRequestStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_srl_labs_clabernetes_apis_v1alpha1_ImageRequestList( + ref common.ReferenceCallback, +) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ImageRequestList is a list of ImageRequest objects.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref( + "github.com/srl-labs/clabernetes/apis/v1alpha1.ImageRequest", + ), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "github.com/srl-labs/clabernetes/apis/v1alpha1.ImageRequest", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_srl_labs_clabernetes_apis_v1alpha1_ImageRequestSpec( + ref common.ReferenceCallback, +) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ImageRequestSpec is the spec for a Config resource.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "topologyName": { + SchemaProps: spec.SchemaProps{ + Description: "TopologyName is the name of the topology requesting the image.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "topologyNodeName": { + SchemaProps: spec.SchemaProps{ + Description: "TopologyNodeName is the name of the node in the topology (i.e. the router name in a containerlab topology) that the image is being requested for.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "kubernetesNode": { + SchemaProps: spec.SchemaProps{ + Description: "KubernetesNode is the node where the launcher pod is running and where the image should be pulled too.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "requestedImage": { + SchemaProps: spec.SchemaProps{ + Description: "RequestedImage is the image that the launcher pod wants the controller to get pulled onto the specified node.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "requestedImagePullSecrets": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "set", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "RequestedImagePullSecrets is a list of configured pull secrets to set in the pull pod spec.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + Required: []string{ + "topologyName", + "topologyNodeName", + "kubernetesNode", + "requestedImage", + }, + }, + }, + } +} + +func schema_srl_labs_clabernetes_apis_v1alpha1_ImageRequestStatus( + ref common.ReferenceCallback, +) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ImageRequestStatus is the status for a ImageRequest resource.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "accepted": { + SchemaProps: spec.SchemaProps{ + Description: "Accepted indicates that the ImageRequest controller has seen this image request and is going to process it. This can be useful to let the requesting pod know that \"yep, this is in the works, and i can go watch the cri images on this node now\".", + Default: false, + Type: []string{"boolean"}, + Format: "", + }, + }, + "complete": { + SchemaProps: spec.SchemaProps{ + Description: "Complete indicates that the ImageRequest controller has seen that the puller pod has done its job and that the image has been pulled onto the requested node.", + Default: false, + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"accepted", "complete"}, + }, + }, + } +} + func schema_srl_labs_clabernetes_apis_v1alpha1_LinkEndpoint( ref common.ReferenceCallback, ) common.OpenAPIDefinition { diff --git a/http/image.go b/http/image.go deleted file mode 100644 index fb31ef19..00000000 --- a/http/image.go +++ /dev/null @@ -1,251 +0,0 @@ -package http - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/http" - - apimachineryerrors "k8s.io/apimachinery/pkg/api/errors" - apimachinerywatch "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/kubernetes" - - clabernetesconfig "github.com/srl-labs/clabernetes/config" - clabernetesconstants "github.com/srl-labs/clabernetes/constants" - clabernetesutil "github.com/srl-labs/clabernetes/util" - clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" - - k8scorev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" - - claberneteshttptypes "github.com/srl-labs/clabernetes/http/types" -) - -const ( - imageRoute = "/image" - puller = "puller" -) - -func (m *manager) imageHandler(w http.ResponseWriter, r *http.Request) { - m.logRequest(r) - - imageRequest, err := processImageRequest(r) - if err != nil { - msg := fmt.Sprintf("encountered error processing image request, error: %s", err) - - m.logger.Critical(msg) - - w.WriteHeader(http.StatusInternalServerError) - - _, err = w.Write([]byte(msg)) - if err != nil { - m.logger.Criticalf( - "failed writing error message to image request response, error: %s", - err, - ) - } - - return - } - - m.logger.Debugf( - "received image pull request from pod '%s/%s' in topology %q on node %q,"+ - " requesting image %q", - imageRequest.TopologyNamespace, - imageRequest.RequestingPodName, - imageRequest.TopologyName, - imageRequest.KubernetesNodeName, - imageRequest.RequestedImageName, - ) - - // we run the spawning in the background to not block the launcher or http server. it will - // always try to clean up so it should be ok... fingers crossed?! - go func() { - err = spawnImagePullerPod(m.ctx, m.client, m.kubeClient, imageRequest) - if err != nil { - m.logger.Criticalf( - "handling image pull pod for requesting pod '%s/%s' in topology %q on node %q,"+ - " requesting image %q failed, err: %s", - imageRequest.TopologyNamespace, - imageRequest.RequestingPodName, - imageRequest.TopologyName, - imageRequest.KubernetesNodeName, - imageRequest.RequestedImageName, - err, - ) - } else { - m.logger.Debugf( - "handling image pull pod for requesting pod '%s/%s' in topology %q on node %q,"+ - " requesting image %q completed", - imageRequest.TopologyNamespace, - imageRequest.RequestingPodName, - imageRequest.TopologyName, - imageRequest.KubernetesNodeName, - imageRequest.RequestedImageName, - ) - } - }() - - w.WriteHeader(http.StatusOK) -} - -func processImageRequest( - r *http.Request, -) (*claberneteshttptypes.ImageRequest, error) { - imageRequest := &claberneteshttptypes.ImageRequest{} - - reqBody, err := io.ReadAll(r.Body) - if err != nil { - return nil, err - } - - err = json.Unmarshal(reqBody, imageRequest) - if err != nil { - return nil, err - } - - return imageRequest, nil -} - -func spawnImagePullerPod( - ctx context.Context, - client ctrlruntimeclient.Client, - kubeClient *kubernetes.Clientset, - imageRequest *claberneteshttptypes.ImageRequest, -) (reterr error) { - globalAnnotations, globalLabels := clabernetesconfig.GetManager().GetAllMetadata() - - imageHash := clabernetesutil.HashBytes([]byte(imageRequest.RequestedImageName)) - - podName := clabernetesutilkubernetes.SafeConcatNameKubernetes( - clabernetesconstants.Clabernetes, - puller, - imageRequest.KubernetesNodeName, - imageHash, - ) - - selectorLabels := map[string]string{ - clabernetesconstants.LabelApp: clabernetesconstants.Clabernetes, - clabernetesconstants.LabelName: fmt.Sprintf( - "%s-%s", - clabernetesconstants.Clabernetes, - puller, - ), - clabernetesconstants.LabelTopologyOwner: imageRequest.TopologyName, - clabernetesconstants.LabelTopologyNode: imageRequest.TopologyNodeName, - clabernetesconstants.LabelPullerNodeTarget: imageRequest.TopologyNodeName, - clabernetesconstants.LabelPullerImageHash: imageHash[:13], - } - - labels := make(map[string]string) - - for k, v := range selectorLabels { - labels[k] = v - } - - for k, v := range globalLabels { - labels[k] = v - } - - annotations := map[string]string{ - // image string wont be valid for a label, so we'll put it here - "clabernetes/pullerRequestedImage": imageRequest.RequestedImageName, - } - - for k, v := range globalAnnotations { - annotations[k] = v - } - - requestedPullSecrets := make( - []k8scorev1.LocalObjectReference, - len(imageRequest.ConfiguredPullSecrets), - ) - - for idx, pullSecret := range imageRequest.ConfiguredPullSecrets { - requestedPullSecrets[idx] = k8scorev1.LocalObjectReference{Name: pullSecret} - } - - createCtx, cancel := context.WithTimeout( - ctx, - clabernetesconstants.DefaultClientOperationTimeout, - ) - - pullerPod := &k8scorev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: podName, - Namespace: imageRequest.TopologyNamespace, - Annotations: annotations, - Labels: labels, - }, - Spec: k8scorev1.PodSpec{ - Containers: []k8scorev1.Container{ - { - Name: "puller", - Image: imageRequest.RequestedImageName, - // we don't care if it runs, only care if we can pull the image... - Command: []string{ - "exit", - "0", - }, - ImagePullPolicy: "IfNotPresent", - }, - }, - NodeName: imageRequest.KubernetesNodeName, - ImagePullSecrets: requestedPullSecrets, - }, - } - - defer func() { - err := client.Delete(ctx, pullerPod) - if !apimachineryerrors.IsNotFound(err) { - reterr = err - } - }() - - err := client.Create( - createCtx, - pullerPod, - ) - - cancel() - - if err != nil { - return err - } - - watchCtx, cancel := context.WithTimeout(ctx, clabernetesconstants.PullerPodTimeout) - defer cancel() - - listOptions := metav1.ListOptions{ - FieldSelector: fmt.Sprintf("metadata.name=%s", podName), - Watch: true, - } - - watch, reterr := kubeClient.CoreV1().Pods(pullerPod.Namespace).Watch(watchCtx, listOptions) - if reterr != nil { - return err - } - - for event := range watch.ResultChan() { - switch event.Type { //nolint:exhaustive - case apimachinerywatch.Added, apimachinerywatch.Modified: - pod, ok := event.Object.(*k8scorev1.Pod) - if !ok { - panic("this is a bug in the puller pod watch, this should not happen") - } - - switch pod.Status.Phase { //nolint:exhaustive - case k8scorev1.PodPending: - continue - case k8scorev1.PodRunning, k8scorev1.PodSucceeded, k8scorev1.PodFailed: - // its running/succeeded/failed any of which means the image has been pulled - watch.Stop() - } - } - } - - return reterr -} diff --git a/http/server.go b/http/server.go index 080f979e..a418584a 100644 --- a/http/server.go +++ b/http/server.go @@ -100,11 +100,6 @@ func (m *manager) Start() { m.aliveHandler, ) - mux.HandleFunc( - imageRoute, - m.imageHandler, - ) - m.server = &http.Server{ BaseContext: func(_ net.Listener) context.Context { return m.ctx diff --git a/http/types/image.go b/http/types/image.go deleted file mode 100644 index 3dd90efd..00000000 --- a/http/types/image.go +++ /dev/null @@ -1,13 +0,0 @@ -package types - -// ImageRequest represents a request from a launcher pod to have an image pulled onto the node that -// the launcher is running on. -type ImageRequest struct { - TopologyName string `json:"topologyName"` - TopologyNamespace string `json:"topologyNamespace"` - TopologyNodeName string `json:"topologyNodeName"` - KubernetesNodeName string `json:"nodeName"` - RequestingPodName string `json:"requestingPodName"` - RequestedImageName string `json:"requestedImageName"` - ConfiguredPullSecrets []string `json:"configuredPullSecrets"` -} diff --git a/launcher/clabernetes.go b/launcher/clabernetes.go index 43a09b46..a1dec08b 100644 --- a/launcher/clabernetes.go +++ b/launcher/clabernetes.go @@ -8,6 +8,7 @@ import ( "time" clabernetesapisv1alpha1 "github.com/srl-labs/clabernetes/apis/v1alpha1" + clabernetesgeneratedclientset "github.com/srl-labs/clabernetes/generated/clientset" "sigs.k8s.io/yaml" @@ -21,7 +22,7 @@ const ( containerCheckInterval = 5 * time.Second ) -// StartClabernetes is a function that starts the clabernetes launcher. +// StartClabernetes is a function that starts the clabernetes launcher. It cannot fail, only panic. func StartClabernetes() { if clabernetesInstance != nil { clabernetesutil.Panic("clabernetes instance already created...") @@ -54,8 +55,9 @@ func StartClabernetes() { ctx, cancel := clabernetesutil.SignalHandledContext(clabernetesLogger.Criticalf) clabernetesInstance = &clabernetes{ - ctx: ctx, - cancel: cancel, + ctx: ctx, + cancel: cancel, + kubeClabernetesClient: mustNewKubeClabernetesClient(clabernetesLogger), appName: clabernetesutil.GetEnvStrOrDefault( clabernetesconstants.AppNameEnv, clabernetesconstants.AppNameDefault, @@ -76,6 +78,8 @@ type clabernetes struct { ctx context.Context cancel context.CancelFunc + kubeClabernetesClient *clabernetesgeneratedclientset.Clientset + appName string logger claberneteslogging.Instance @@ -120,9 +124,7 @@ func (c *clabernetes) setup() { err := c.handleInsecureRegistries() if err != nil { - c.logger.Criticalf("failed configuring insecure docker registries, err: %s", err) - - clabernetesutil.Panic(err.Error()) + c.logger.Fatalf("failed configuring insecure docker registries, err: %s", err) } c.logger.Debug("ensuring docker is running...") @@ -136,16 +138,12 @@ func (c *clabernetes) setup() { // see https://github.com/srl-labs/clabernetes/issues/47 err = c.enableLegacyIPTables() if err != nil { - c.logger.Criticalf("failed enabling legacy ip tables, err: %s", err) - - clabernetesutil.Panic(err.Error()) + c.logger.Fatalf("failed enabling legacy ip tables, err: %s", err) } err = c.startDocker() if err != nil { - c.logger.Criticalf("failed ensuring docker is running, err: %s", err) - - clabernetesutil.Panic(err.Error()) + c.logger.Fatalf("failed ensuring docker is running, err: %s", err) } c.logger.Warn("docker started, but using legacy ip tables") @@ -155,9 +153,7 @@ func (c *clabernetes) setup() { err = c.getFilesFromURL() if err != nil { - c.logger.Criticalf("failed getting file(s) from remote url, err: %s", err) - - clabernetesutil.Panic(err.Error()) + c.logger.Fatalf("failed getting file(s) from remote url, err: %s", err) } } @@ -166,9 +162,7 @@ func (c *clabernetes) launch() { err := c.runContainerlab() if err != nil { - c.logger.Criticalf("failed launching containerlab, err: %s", err) - - clabernetesutil.Panic(err.Error()) + c.logger.Fatalf("failed launching containerlab, err: %s", err) } c.containerIDs = c.getContainerIDs() @@ -188,18 +182,14 @@ func (c *clabernetes) launch() { tunnelBytes, err := os.ReadFile("tunnels.yaml") if err != nil { - c.logger.Criticalf("failed loading tunnels yaml file content, err: %s", err) - - clabernetesutil.Panic(err.Error()) + c.logger.Fatalf("failed loading tunnels yaml file content, err: %s", err) } var tunnelObj []*clabernetesapisv1alpha1.Tunnel err = yaml.Unmarshal(tunnelBytes, &tunnelObj) if err != nil { - c.logger.Criticalf("failed unmarshalling tunnels config, err: %s", err) - - clabernetesutil.Panic(err.Error()) + c.logger.Fatalf("failed unmarshalling tunnels config, err: %s", err) } for _, tunnel := range tunnelObj { @@ -210,14 +200,12 @@ func (c *clabernetes) launch() { tunnel.ID, ) if err != nil { - c.logger.Criticalf( + c.logger.Fatalf( "failed setting up tunnel to remote node '%s' for local interface '%s', error: %s", tunnel.RemoteNodeName, tunnel.LocalLinkName, err, ) - - clabernetesutil.Panic(err.Error()) } } } diff --git a/launcher/client.go b/launcher/client.go new file mode 100644 index 00000000..aeea2db7 --- /dev/null +++ b/launcher/client.go @@ -0,0 +1,32 @@ +package launcher + +import ( + "time" + + clabernetesgeneratedclientset "github.com/srl-labs/clabernetes/generated/clientset" + claberneteslogging "github.com/srl-labs/clabernetes/logging" + "k8s.io/client-go/rest" +) + +const ( + clientDefaultTimeout = time.Minute +) + +func mustNewKubeClabernetesClient( + logger claberneteslogging.Instance, +) *clabernetesgeneratedclientset.Clientset { + kubeConfig, err := rest.InClusterConfig() + if err != nil { + logger.Fatalf("failed getting in cluster kubeconfig, err: %s", err) + } + + kubeClabernetesClient, err := clabernetesgeneratedclientset.NewForConfig(kubeConfig) + if err != nil { + logger.Fatalf( + "failed creating clabernetes kube client from in cluster kubeconfig, err: %s", + err, + ) + } + + return kubeClabernetesClient +} diff --git a/launcher/image.go b/launcher/image.go index 81b235a3..01d94927 100644 --- a/launcher/image.go +++ b/launcher/image.go @@ -1,20 +1,19 @@ package launcher import ( - "bytes" "context" - "crypto/tls" - "encoding/json" "fmt" - "io" - "net/http" "os" "os/exec" "time" - "gopkg.in/yaml.v3" + clabernetesapisv1alpha1 "github.com/srl-labs/clabernetes/apis/v1alpha1" + clabernetesutilkubernetes "github.com/srl-labs/clabernetes/util/kubernetes" + + apimachineryerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - claberneteshttptypes "github.com/srl-labs/clabernetes/http/types" + "gopkg.in/yaml.v3" claberneteserrors "github.com/srl-labs/clabernetes/errors" @@ -29,6 +28,13 @@ const ( imageCheckLogCounter = 6 ) +func generateImageRequestCRName(nodeName, imageName string) string { + // hash the image name so it doesn't contain invalid chars for k8s name + return clabernetesutilkubernetes.SafeConcatNameKubernetes( + nodeName, clabernetesutil.HashBytes([]byte(imageName)), + ) +} + func (c *clabernetes) image() { abort, imageManager := c.prepareImagePullThrough() if abort { @@ -40,7 +46,7 @@ func (c *clabernetes) image() { c.logger.Warnf("failed image pull through (check), err: %s", err) if c.imagePullThroughMode == clabernetesconstants.ImagePullThroughModeAlways { - clabernetesutil.Panic( + c.logger.Fatal( "image pull through failed and pull through mode is always, cannot continue", ) } @@ -143,14 +149,8 @@ func (c *clabernetes) prepareImagePullThrough() ( c.logger.Warnf("error creating image manager, err: %s", err) if c.imagePullThroughMode == clabernetesconstants.ImagePullThroughModeAlways { - msg := fmt.Sprintf( - "image pull through mode is always, but criKind is unset or unknown," + - " cannot continue...", - ) - - c.logger.Critical(msg) - - clabernetesutil.Panic(msg) + c.logger.Fatal("image pull through mode is always, but criKind is unset or unknown," + + " cannot continue...") } c.logger.Warn( @@ -163,14 +163,10 @@ func (c *clabernetes) prepareImagePullThrough() ( if c.imageName == "" { if c.imagePullThroughMode == clabernetesconstants.ImagePullThroughModeAlways { - msg := fmt.Sprintf( + c.logger.Fatal( "image pull through mode is always, node image is unknown," + " cannot continue...", ) - - c.logger.Critical(msg) - - clabernetesutil.Panic(msg) } c.logger.Warn( @@ -196,96 +192,112 @@ func (c *clabernetes) requestImagePull( imageManager claberneteslauncherimage.Manager, configuredPullSecrets []string, ) error { - err := c.sendImagePullRequest(configuredPullSecrets) - if err != nil { - c.logger.Warnf("failed image pull through (request pull), err: %s", err) + nodeName := os.Getenv(clabernetesconstants.NodeNameEnv) - return err - } + imageRequestCRName := generateImageRequestCRName(nodeName, c.imageName) - err = c.waitForImage(imageManager) + err := c.createImageRequestCR(nodeName, imageRequestCRName, configuredPullSecrets) if err != nil { - c.logger.Warnf("failed image pull through (wait), err: %s", err) + c.logger.Warnf("failed image pull through (create request), err: %s", err) return err } - return nil -} + err = c.waitImageRequestCRAccepted(imageRequestCRName) + if err != nil { + c.logger.Warnf("failed image pull through (wait accepted), err: %s", err) -func (c *clabernetes) sendImagePullRequest(configuredPullSecrets []string) error { - imageRequest := claberneteshttptypes.ImageRequest{ - TopologyName: os.Getenv(clabernetesconstants.LauncherTopologyNameEnv), - TopologyNamespace: os.Getenv(clabernetesconstants.PodNamespaceEnv), - TopologyNodeName: os.Getenv(clabernetesconstants.LauncherNodeNameEnv), - KubernetesNodeName: os.Getenv(clabernetesconstants.NodeNameEnv), - RequestingPodName: os.Getenv(clabernetesconstants.PodNameEnv), - RequestedImageName: c.imageName, - ConfiguredPullSecrets: configuredPullSecrets, + return err } - requestJSON, err := json.Marshal(imageRequest) + err = c.waitForImage(imageManager) if err != nil { - c.logger.Criticalf("failed marshaling image pull request, error: %s", err) + c.logger.Warnf("failed image pull through (wait image present), err: %s", err) return err } - body := bytes.NewReader(requestJSON) + return nil +} - ctx, cancel := context.WithTimeout(c.ctx, clabernetesconstants.DefaultClientOperationTimeout) +func (c *clabernetes) createImageRequestCR( + nodeName, imageRequestCRName string, + configuredPullSecrets []string, +) error { + ctx, cancel := context.WithTimeout(c.ctx, clientDefaultTimeout) defer cancel() - request, err := http.NewRequestWithContext( - ctx, - http.MethodPost, - fmt.Sprintf( - "https://%s.%s/image", - fmt.Sprintf("%s-http", os.Getenv(clabernetesconstants.AppNameEnv)), - os.Getenv(clabernetesconstants.ManagerNamespaceEnv), - ), - body, - ) + _, err := c.kubeClabernetesClient.ClabernetesV1alpha1(). + ImageRequests(os.Getenv(clabernetesconstants.PodNamespaceEnv)). + Create( + ctx, + &clabernetesapisv1alpha1.ImageRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: imageRequestCRName, + }, + Spec: clabernetesapisv1alpha1.ImageRequestSpec{ + TopologyName: os.Getenv( + clabernetesconstants.LauncherTopologyNameEnv, + ), + TopologyNodeName: os.Getenv(clabernetesconstants.LauncherNodeNameEnv), + KubernetesNode: nodeName, + RequestedImage: c.imageName, + RequestedImagePullSecrets: configuredPullSecrets, + }, + }, + metav1.CreateOptions{}, + ) if err != nil { - c.logger.Criticalf("failed building image pull request, error: %s", err) + if apimachineryerrors.IsAlreadyExists(err) { + // if it already exists some other launcher has requested this image for this node + return nil + } + // any other error would be a bad bingo return err } - request.Header.Set("Content-Type", "application/json") + return nil +} - client := &http.Client{Transport: &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec - }} +func (c *clabernetes) waitImageRequestCRAccepted(imageRequestCRName string) error { + startTime := time.Now() - response, err := client.Do(request) - if err != nil { - c.logger.Criticalf("failed executing image pull request, error: %s", err) + ticker := time.NewTicker(imageCheckPollInterval) - return err - } + for range ticker.C { + if time.Since(startTime) > clabernetesconstants.PullerPodTimeout { + break + } - responseBody, err := io.ReadAll(response.Body) - if err != nil { - c.logger.Criticalf("failed reading image pull request response, error: %s", err) + ctx, cancel := context.WithTimeout(c.ctx, clientDefaultTimeout) - return err - } + imageRequestCR, err := c.kubeClabernetesClient.ClabernetesV1alpha1(). + ImageRequests(os.Getenv(clabernetesconstants.PodNamespaceEnv)). + Get( + ctx, + imageRequestCRName, + metav1.GetOptions{}, + ) - if response.StatusCode != http.StatusOK { - msg := fmt.Sprintf( - "received non 200 status code from image pull request, response body: %s", - string(responseBody), - ) + cancel() - c.logger.Criticalf(msg) + if err != nil { + return err + } - return fmt.Errorf("%w: %s", claberneteserrors.ErrLaunch, msg) + if imageRequestCR.Status.Accepted { + // cr has been "accepted" meaning controller will handle getting the image pulled on + // our node. + return nil + } } - _ = response.Body.Close() - - return nil + return fmt.Errorf( + "%w: timed out waiting for image request cr %q to change to accepted state", + claberneteserrors.ErrLaunch, + imageRequestCRName, + ) } func (c *clabernetes) waitForImage( diff --git a/manager/clabernetes.go b/manager/clabernetes.go index 3bfedee5..e98372c2 100644 --- a/manager/clabernetes.go +++ b/manager/clabernetes.go @@ -6,17 +6,19 @@ import ( "os" "time" + "k8s.io/klog/v2" + clabernetesgeneratedclientset "github.com/srl-labs/clabernetes/generated/clientset" claberneteshttp "github.com/srl-labs/clabernetes/http" apimachineryruntime "k8s.io/apimachinery/pkg/runtime" - ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" - clabernetesconstants "github.com/srl-labs/clabernetes/constants" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" + ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log" claberneteslogging "github.com/srl-labs/clabernetes/logging" clabernetesutil "github.com/srl-labs/clabernetes/util" @@ -52,11 +54,13 @@ func StartClabernetes(initializer bool) { err := createNewKlogLogger(logManager) if err != nil { - clabernetesLogger.Criticalf("failed patching klog, err: %s", err) - - clabernetesutil.Panic(err.Error()) + clabernetesLogger.Fatalf("failed patching klog, err: %s", err) } + // set controller-runtime log too; we just use klogr since we'll patch it to be wrapped in our + // log manager anyway + ctrlruntimelog.SetLogger(klog.NewKlogr()) + ctx, cancel := clabernetesutil.SignalHandledContext(clabernetesLogger.Criticalf) clabernetesInstance = &clabernetes{ @@ -233,9 +237,3 @@ func (c *clabernetes) Exit(exitCode int) { os.Exit(exitCode) } - -func (c *clabernetes) Panic(msg string) { - claberneteslogging.GetManager().Flush() - - clabernetesutil.Panic(msg) -} diff --git a/manager/init.go b/manager/init.go index 17924430..fbfaa6a0 100644 --- a/manager/init.go +++ b/manager/init.go @@ -2,7 +2,6 @@ package manager import ( "context" - "fmt" ) func (c *clabernetes) init(ctx context.Context) { @@ -14,11 +13,7 @@ func (c *clabernetes) init(ctx context.Context) { err := initializeCertificates(c) if err != nil { - msg := fmt.Sprintf("failed initializing certificates, err: %s", err) - - c.logger.Critical(msg) - - c.Panic(err.Error()) + c.logger.Fatalf("failed initializing certificates, err: %s", err) } c.logger.Debug("initializing certificates complete...") @@ -27,11 +22,7 @@ func (c *clabernetes) init(ctx context.Context) { err = initializeCrds(c) if err != nil { - msg := fmt.Sprintf("failed initializing crds, err: %s", err) - - c.logger.Critical(msg) - - c.Panic(err.Error()) + c.logger.Fatalf("failed initializing crds, err: %s", err) } c.logger.Debug("initializing crds complete...") diff --git a/manager/kubernetes.go b/manager/kubernetes.go index f468a0d1..039a817f 100644 --- a/manager/kubernetes.go +++ b/manager/kubernetes.go @@ -2,7 +2,6 @@ package manager import ( clabernetesapisv1alpha1 "github.com/srl-labs/clabernetes/apis/v1alpha1" - "k8s.io/apimachinery/pkg/labels" apimachineryruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/rest" @@ -33,6 +32,7 @@ func newManager(scheme *apimachineryruntime.Scheme, appName string) (ctrlruntime // about anything else (for now -- and we can override it with opts.ByObject // anyway?! and... who the hell calls their app "clabernetes" so this should // really limit the cache nicely :) + // currently this matters for launcher service accounts and role bindings "clabernetes/app": appName, }, ) @@ -46,6 +46,14 @@ func newManager(scheme *apimachineryruntime.Scheme, appName string) (ctrlruntime }, }, }, + // we need to cache all our image request crs too of course + &clabernetesapisv1alpha1.ImageRequest{}: { + Namespaces: map[string]ctrlruntimecache.Config{ + ctrlruntimecache.AllNamespaces: { + LabelSelector: labels.Everything(), + }, + }, + }, // watch our config "singleton" too; while this is sorta/basically a "cluster" // CR -- we dont want to have to force users to have cluster wide perms, *and* // we want to be able to set an owner ref to the manager deployment, so the diff --git a/manager/preinit.go b/manager/preinit.go index de2f4f8e..187df89d 100644 --- a/manager/preinit.go +++ b/manager/preinit.go @@ -17,33 +17,25 @@ func (c *clabernetes) preInit() { c.namespace, err = clabernetesutilkubernetes.CurrentNamespace() if err != nil { - c.logger.Criticalf("failed getting current namespace, err: %s", err) - - c.Panic(err.Error()) + c.logger.Fatalf("failed getting current namespace, err: %s", err) } c.kubeConfig, err = rest.InClusterConfig() if err != nil { - c.logger.Criticalf("failed getting in cluster kubeconfig, err: %s", err) - - c.Panic(err.Error()) + c.logger.Fatalf("failed getting in cluster kubeconfig, err: %s", err) } c.kubeClient, err = kubernetes.NewForConfig(c.kubeConfig) if err != nil { - c.logger.Criticalf("failed creating kube client from in cluster kubeconfig, err: %s", err) - - c.Panic(err.Error()) + c.logger.Fatalf("failed creating kube client from in cluster kubeconfig, err: %s", err) } c.kubeClabernetesClient, err = clabernetesgeneratedclientset.NewForConfig(c.kubeConfig) if err != nil { - c.logger.Criticalf( + c.logger.Fatalf( "failed creating clabernetes kube client from in cluster kubeconfig, err: %s", err, ) - - c.Panic(err.Error()) } c.scheme = apimachineryruntime.NewScheme() diff --git a/manager/prepare.go b/manager/prepare.go index bcb78c0a..f10da5e9 100644 --- a/manager/prepare.go +++ b/manager/prepare.go @@ -1,9 +1,5 @@ package manager -import ( - "fmt" -) - // prepare handles preparation tasks that happen before running the clabernetes.start method. func (c *clabernetes) prepare() { c.logger.Info("begin prepare...") @@ -12,11 +8,7 @@ func (c *clabernetes) prepare() { err := prepareCertificates(c) if err != nil { - msg := fmt.Sprintf("failed preparing certificates, err: %s", err) - - c.logger.Critical(msg) - - c.Panic(msg) + c.logger.Fatalf("failed preparing certificates, err: %s", err) } c.logger.Debug("preparing certificates complete...") @@ -25,11 +17,7 @@ func (c *clabernetes) prepare() { err = registerToScheme(c) if err != nil { - msg := fmt.Sprintf("failed registering apis to scheme, err: %s", err) - - c.logger.Critical(msg) - - c.Panic(msg) + c.logger.Fatalf("failed registering apis to scheme, err: %s", err) } c.logger.Debug("preparing scheme complete...") diff --git a/manager/prestart.go b/manager/prestart.go index aa6d297b..28e5efe4 100644 --- a/manager/prestart.go +++ b/manager/prestart.go @@ -1,7 +1,6 @@ package manager import ( - "fmt" "strings" clabernetesconfig "github.com/srl-labs/clabernetes/config" @@ -21,11 +20,7 @@ func (c *clabernetes) preStart() { // we *shouldn't* actually ever hit this as the config manager can start and *not* find a // config that it manages just fine, but i guess its possible that something terrible // could happen that would prevent us from continuing. - msg := fmt.Sprintf("failed starting config manager, err: %s", err) - - c.logger.Critical(msg) - - clabernetesutil.Panic(msg) + c.logger.Fatalf("failed starting config manager, err: %s", err) } c.logger.Debug("config manager started...") @@ -34,11 +29,7 @@ func (c *clabernetes) preStart() { nodeCriKind, err := cri(c) if err != nil { - msg := fmt.Sprintf("failed dermining cri sameness, err: %s", err) - - c.logger.Critical(msg) - - clabernetesutil.Panic(msg) + c.logger.Fatalf("failed dermining cri sameness, err: %s", err) } c.criKind = nodeCriKind diff --git a/manager/start.go b/manager/start.go index 1ac0690d..9ae8acb0 100644 --- a/manager/start.go +++ b/manager/start.go @@ -3,6 +3,7 @@ package manager import ( "context" + clabernetescontrollersimagerequest "github.com/srl-labs/clabernetes/controllers/imagerequest" clabernetescontrollerstopology "github.com/srl-labs/clabernetes/controllers/topology" clabernetesconstants "github.com/srl-labs/clabernetes/constants" @@ -42,6 +43,7 @@ func (c *clabernetes) startLeading(ctx context.Context) { controllersToRegisterFuncs := []clabernetescontrollers.NewController{ clabernetescontrollerstopology.NewController, + clabernetescontrollersimagerequest.NewController, } for _, newF := range controllersToRegisterFuncs {