Skip to content

Commit

Permalink
Merge pull request #747 from SchSeba/device_plugin_redesign
Browse files Browse the repository at this point in the history
Redesign device plugin reset
  • Loading branch information
SchSeba authored Nov 19, 2024
2 parents 92fee7b + baa41c9 commit 623c339
Show file tree
Hide file tree
Showing 14 changed files with 353 additions and 542 deletions.
2 changes: 1 addition & 1 deletion bindata/manifests/plugins/sriov-device-plugin.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ spec:
hostNetwork: true
nodeSelector:
{{- range $key, $value := .NodeSelectorField }}
{{ $key }}: {{ $value }}
{{ $key }}: "{{ $value }}"
{{- end }}
tolerations:
- operator: Exists
Expand Down
130 changes: 24 additions & 106 deletions controllers/helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ import (
"encoding/json"
"fmt"
"os"
"sort"
"strings"

errs "github.com/pkg/errors"
Expand Down Expand Up @@ -51,7 +50,7 @@ import (
)

var (
webhooks = map[string](string){
webhooks = map[string]string{
constants.InjectorWebHookName: constants.InjectorWebHookPath,
constants.OperatorWebHookName: constants.OperatorWebHookPath,
}
Expand Down Expand Up @@ -162,29 +161,33 @@ func formatJSON(str string) (string, error) {
return prettyJSON.String(), nil
}

// GetDefaultNodeSelector return a nodeSelector with worker and linux os
func GetDefaultNodeSelector() map[string]string {
return map[string]string{"node-role.kubernetes.io/worker": "",
"kubernetes.io/os": "linux"}
return map[string]string{
"node-role.kubernetes.io/worker": "",
"kubernetes.io/os": "linux",
}
}

// hasNoValidPolicy returns true if no SriovNetworkNodePolicy
// or only the (deprecated) "default" policy is present
func hasNoValidPolicy(pl []sriovnetworkv1.SriovNetworkNodePolicy) bool {
switch len(pl) {
case 0:
return true
case 1:
return pl[0].Name == constants.DefaultPolicyName
default:
return false
// GetDefaultNodeSelectorForDevicePlugin return a nodeSelector with worker linux os
// and the enabled sriov device plugin
func GetNodeSelectorForDevicePlugin(dc *sriovnetworkv1.SriovOperatorConfig) map[string]string {
if len(dc.Spec.ConfigDaemonNodeSelector) == 0 {
return map[string]string{
"kubernetes.io/os": "linux",
constants.SriovDevicePluginLabel: constants.SriovDevicePluginLabelEnabled,
}
}

tmp := dc.Spec.DeepCopy()
tmp.ConfigDaemonNodeSelector[constants.SriovDevicePluginLabel] = constants.SriovDevicePluginLabelEnabled
return tmp.ConfigDaemonNodeSelector
}

func syncPluginDaemonObjs(ctx context.Context,
client k8sclient.Client,
scheme *runtime.Scheme,
dc *sriovnetworkv1.SriovOperatorConfig,
pl *sriovnetworkv1.SriovNetworkNodePolicyList) error {
dc *sriovnetworkv1.SriovOperatorConfig) error {
logger := log.Log.WithName("syncPluginDaemonObjs")
logger.V(1).Info("Start to sync sriov daemons objects")

Expand All @@ -195,42 +198,17 @@ func syncPluginDaemonObjs(ctx context.Context,
data.Data["ReleaseVersion"] = os.Getenv("RELEASEVERSION")
data.Data["ResourcePrefix"] = vars.ResourcePrefix
data.Data["ImagePullSecrets"] = GetImagePullSecrets()
data.Data["NodeSelectorField"] = GetDefaultNodeSelector()
data.Data["NodeSelectorField"] = GetNodeSelectorForDevicePlugin(dc)
data.Data["UseCDI"] = dc.Spec.UseCDI
objs, err := renderDsForCR(constants.PluginPath, &data)
if err != nil {
logger.Error(err, "Fail to render SR-IoV manifests")
return err
}

if hasNoValidPolicy(pl.Items) {
for _, obj := range objs {
err := deleteK8sResource(ctx, client, obj)
if err != nil {
return err
}
}
return nil
}

// Sync DaemonSets
for _, obj := range objs {
if obj.GetKind() == constants.DaemonSet && len(dc.Spec.ConfigDaemonNodeSelector) > 0 {
scheme := kscheme.Scheme
ds := &appsv1.DaemonSet{}
err = scheme.Convert(obj, ds, nil)
if err != nil {
logger.Error(err, "Fail to convert to DaemonSet")
return err
}
ds.Spec.Template.Spec.NodeSelector = dc.Spec.ConfigDaemonNodeSelector
err = scheme.Convert(ds, obj, nil)
if err != nil {
logger.Error(err, "Fail to convert to Unstructured")
return err
}
}
err = syncDsObject(ctx, client, scheme, dc, pl, obj)
err = syncDsObject(ctx, client, scheme, dc, obj)
if err != nil {
logger.Error(err, "Couldn't sync SR-IoV daemons objects")
return err
Expand All @@ -240,14 +218,7 @@ func syncPluginDaemonObjs(ctx context.Context,
return nil
}

func deleteK8sResource(ctx context.Context, client k8sclient.Client, in *uns.Unstructured) error {
if err := apply.DeleteObject(ctx, client, in); err != nil {
return fmt.Errorf("failed to delete object %v with err: %v", in, err)
}
return nil
}

func syncDsObject(ctx context.Context, client k8sclient.Client, scheme *runtime.Scheme, dc *sriovnetworkv1.SriovOperatorConfig, pl *sriovnetworkv1.SriovNetworkNodePolicyList, obj *uns.Unstructured) error {
func syncDsObject(ctx context.Context, client k8sclient.Client, scheme *runtime.Scheme, dc *sriovnetworkv1.SriovOperatorConfig, obj *uns.Unstructured) error {
logger := log.Log.WithName("syncDsObject")
kind := obj.GetKind()
logger.V(1).Info("Start to sync Objects", "Kind", kind)
Expand All @@ -267,7 +238,7 @@ func syncDsObject(ctx context.Context, client k8sclient.Client, scheme *runtime.
logger.Error(err, "Fail to convert to DaemonSet")
return err
}
err = syncDaemonSet(ctx, client, scheme, dc, pl, ds)
err = syncDaemonSet(ctx, client, scheme, dc, ds)
if err != nil {
logger.Error(err, "Fail to sync DaemonSet", "Namespace", ds.Namespace, "Name", ds.Name)
return err
Expand All @@ -276,54 +247,6 @@ func syncDsObject(ctx context.Context, client k8sclient.Client, scheme *runtime.
return nil
}

func setDsNodeAffinity(pl *sriovnetworkv1.SriovNetworkNodePolicyList, ds *appsv1.DaemonSet) error {
terms := nodeSelectorTermsForPolicyList(pl.Items)
if len(terms) > 0 {
ds.Spec.Template.Spec.Affinity = &corev1.Affinity{
NodeAffinity: &corev1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
NodeSelectorTerms: terms,
},
},
}
}
return nil
}

func nodeSelectorTermsForPolicyList(policies []sriovnetworkv1.SriovNetworkNodePolicy) []corev1.NodeSelectorTerm {
terms := []corev1.NodeSelectorTerm{}
for _, p := range policies {
// Note(adrianc): default policy is deprecated and ignored.
if p.Name == constants.DefaultPolicyName {
continue
}

if len(p.Spec.NodeSelector) == 0 {
continue
}
expressions := []corev1.NodeSelectorRequirement{}
for k, v := range p.Spec.NodeSelector {
exp := corev1.NodeSelectorRequirement{
Operator: corev1.NodeSelectorOpIn,
Key: k,
Values: []string{v},
}
expressions = append(expressions, exp)
}
// sorting is needed to keep the daemon spec stable.
// the items are popped in a random order from the map
sort.Slice(expressions, func(i, j int) bool {
return expressions[i].Key < expressions[j].Key
})
nodeSelector := corev1.NodeSelectorTerm{
MatchExpressions: expressions,
}
terms = append(terms, nodeSelector)
}

return terms
}

// renderDsForCR returns a busybox pod with the same name/namespace as the cr
func renderDsForCR(path string, data *render.RenderData) ([]*uns.Unstructured, error) {
logger := log.Log.WithName("renderDsForCR")
Expand All @@ -336,16 +259,11 @@ func renderDsForCR(path string, data *render.RenderData) ([]*uns.Unstructured, e
return objs, nil
}

func syncDaemonSet(ctx context.Context, client k8sclient.Client, scheme *runtime.Scheme, dc *sriovnetworkv1.SriovOperatorConfig, pl *sriovnetworkv1.SriovNetworkNodePolicyList, in *appsv1.DaemonSet) error {
func syncDaemonSet(ctx context.Context, client k8sclient.Client, scheme *runtime.Scheme, dc *sriovnetworkv1.SriovOperatorConfig, in *appsv1.DaemonSet) error {
logger := log.Log.WithName("syncDaemonSet")
logger.V(1).Info("Start to sync DaemonSet", "Namespace", in.Namespace, "Name", in.Name)
var err error

if pl != nil {
if err = setDsNodeAffinity(pl, in); err != nil {
return err
}
}
if err = controllerutil.SetControllerReference(dc, in, scheme); err != nil {
return err
}
Expand Down
Loading

0 comments on commit 623c339

Please sign in to comment.