Skip to content

Commit

Permalink
fix: nil check replicas ptr before de-referencing
Browse files Browse the repository at this point in the history
This change does a nil check on the int32 ptr specifying the number of replica's inside the TalosControlPlane spec.

Signed-off-by: Gerard de Leeuw <[email protected]>
Co-authored-by: Artem Chernyshev <[email protected]>
  • Loading branch information
lion7 and Unix4ever committed Apr 17, 2023
1 parent b10e2e7 commit e9b6948
Show file tree
Hide file tree
Showing 4 changed files with 14 additions and 12 deletions.
10 changes: 10 additions & 0 deletions api/v1alpha3/taloscontrolplane_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,16 @@ type TalosControlPlaneSpec struct {
RolloutStrategy *RolloutStrategy `json:"rolloutStrategy,omitempty"`
}

// GetReplicas reads spec replicas in a safe way.
// If replicas is nil it will return 0.
func (s *TalosControlPlaneSpec) GetReplicas() int32 {
if s.Replicas == nil {
return 0
}

return *s.Replicas
}

// RolloutStrategy describes how to replace existing machines
// with new ones.
type RolloutStrategy struct {
Expand Down
12 changes: 2 additions & 10 deletions controllers/scale.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,7 @@ import (

func (r *TalosControlPlaneReconciler) scaleUpControlPlane(ctx context.Context, cluster *clusterv1.Cluster, tcp *controlplanev1.TalosControlPlane, controlPlane *ControlPlane) (ctrl.Result, error) {
numMachines := len(controlPlane.Machines)
desiredReplicas := 0

if tcp.Spec.Replicas != nil {
desiredReplicas = int(*tcp.Spec.Replicas)
}
desiredReplicas := tcp.Spec.GetReplicas()

conditions.MarkFalse(tcp, controlplanev1.ResizedCondition, controlplanev1.ScalingUpReason, clusterv1.ConditionSeverityWarning,
"Scaling up control plane to %d replicas (actual %d)",
Expand All @@ -48,11 +44,7 @@ func (r *TalosControlPlaneReconciler) scaleDownControlPlane(
machinesRequireUpgrade collections.Machines) (ctrl.Result, error) {

numMachines := len(controlPlane.Machines)
desiredReplicas := 0

if tcp.Spec.Replicas != nil {
desiredReplicas = int(*tcp.Spec.Replicas)
}
desiredReplicas := tcp.Spec.GetReplicas()

conditions.MarkFalse(tcp, controlplanev1.ResizedCondition, controlplanev1.ScalingDownReason, clusterv1.ConditionSeverityWarning,
"Scaling down control plane to %d replicas (actual %d)",
Expand Down
2 changes: 1 addition & 1 deletion controllers/taloscontrolplane_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -726,7 +726,7 @@ func (r *TalosControlPlaneReconciler) reconcileMachines(ctx context.Context, clu

// If we've made it this far, we can assume that all ownedMachines are up to date
numMachines := len(machines.Items)
desiredReplicas := int(*tcp.Spec.Replicas)
desiredReplicas := int(tcp.Spec.GetReplicas())

controlPlane, err := newControlPlane(ctx, r.Client, cluster, tcp, collections.FromMachineList(machines))
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion controllers/upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ func (r *TalosControlPlaneReconciler) upgradeControlPlane(
switch tcp.Spec.RolloutStrategy.Type {
case controlplanev1.RollingUpdateStrategyType:
// RolloutStrategy is currently defaulted and validated to be RollingUpdate
maxNodes := *tcp.Spec.Replicas + int32(tcp.Spec.RolloutStrategy.RollingUpdate.MaxSurge.IntValue())
maxNodes := tcp.Spec.GetReplicas() + int32(tcp.Spec.RolloutStrategy.RollingUpdate.MaxSurge.IntValue())
if int32(controlPlane.Machines.Len()) < maxNodes {
// scaleUp ensures that we don't continue scaling up while waiting for Machines to have NodeRefs
return r.scaleUpControlPlane(ctx, cluster, tcp, controlPlane)
Expand Down

0 comments on commit e9b6948

Please sign in to comment.