Skip to content

Commit

Permalink
Explicitly set a exponential backoff rate limiter for controller conf…
Browse files Browse the repository at this point in the history
…ig (#416)

The rate limiter base delay and max delay values can be configured using
flags. The default values are 500ms for base delay and 15min for max delay.
  • Loading branch information
thunderboltsid committed Apr 29, 2024
1 parent 8d5ec97 commit e16f6d1
Show file tree
Hide file tree
Showing 3 changed files with 118 additions and 19 deletions.
18 changes: 17 additions & 1 deletion controllers/options.go
Original file line number Diff line number Diff line change
@@ -1,10 +1,15 @@
package controllers

import "errors"
import (
"errors"

"k8s.io/client-go/util/workqueue"
)

// ControllerConfig is the configuration for cluster and machine controllers
type ControllerConfig struct {
MaxConcurrentReconciles int
RateLimiter workqueue.RateLimiter
}

// ControllerConfigOpts is a function that can be used to configure the controller config
Expand All @@ -20,3 +25,14 @@ func WithMaxConcurrentReconciles(max int) ControllerConfigOpts {
return nil
}
}

// WithRateLimiter sets the rate limiter for the controller
func WithRateLimiter(rateLimiter workqueue.RateLimiter) ControllerConfigOpts {
return func(c *ControllerConfig) error {
if rateLimiter == nil {
return errors.New("rate limiter cannot be nil")
}
c.RateLimiter = rateLimiter
return nil
}
}
36 changes: 36 additions & 0 deletions controllers/options_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"testing"

"github.com/stretchr/testify/assert"
"k8s.io/client-go/util/workqueue"
)

func TestWithMaxConcurrentReconciles(t *testing.T) {
Expand Down Expand Up @@ -37,3 +38,38 @@ func TestWithMaxConcurrentReconciles(t *testing.T) {
})
}
}

func TestWithRateLimiter(t *testing.T) {
tests := []struct {
name string
rateLimiter workqueue.RateLimiter
expectError bool
expectedType interface{}
}{
{
name: "TestWithRateLimiterNil",
rateLimiter: nil,
expectError: true,
expectedType: nil,
},
{
name: "TestWithRateLimiterSet",
rateLimiter: workqueue.DefaultControllerRateLimiter(),
expectError: false,
expectedType: &workqueue.MaxOfRateLimiter{},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
opt := WithRateLimiter(tt.rateLimiter)
config := &ControllerConfig{}
err := opt(config)
if tt.expectError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
assert.IsType(t, tt.expectedType, config.RateLimiter)
}
})
}
}
83 changes: 65 additions & 18 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,37 +17,29 @@ limitations under the License.
package main

import (
"errors"
"flag"
"os"
"time"

// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
// to ensure that exec-entrypoint and run can make use of them.

"go.uber.org/zap/zapcore"
"golang.org/x/time/rate"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
capiv1 "sigs.k8s.io/cluster-api/api/v1beta1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
//+kubebuilder:scaffold:imports

infrav1alpha4 "github.com/nutanix-cloud-native/cluster-api-provider-nutanix/api/v1alpha4"
infrav1beta1 "github.com/nutanix-cloud-native/cluster-api-provider-nutanix/api/v1beta1"
"github.com/nutanix-cloud-native/cluster-api-provider-nutanix/controllers"
//+kubebuilder:scaffold:imports
)

var (
Expand Down Expand Up @@ -79,6 +71,10 @@ func main() {
enableLeaderElection bool
probeAddr string
maxConcurrentReconciles int
baseDelay time.Duration
maxDelay time.Duration
bucketSize int
qps int
)
flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.")
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
Expand All @@ -90,14 +86,12 @@ func main() {
"max-concurrent-reconciles",
defaultMaxConcurrentReconciles,
"The maximum number of allowed, concurrent reconciles.")
opts := zap.Options{
TimeEncoder: zapcore.RFC3339TimeEncoder,
}
opts.BindFlags(flag.CommandLine)
flag.DurationVar(&baseDelay, "rate-limiter-base-delay", 500*time.Millisecond, "The base delay for the rate limiter.")
flag.DurationVar(&maxDelay, "rate-limiter-max-delay", 15*time.Minute, "The maximum delay for the rate limiter.")
flag.IntVar(&bucketSize, "rate-limiter-bucket-size", 100, "The bucket size for the rate limiter.")
flag.IntVar(&qps, "rate-limiter-qps", 10, "The QPS for the rate limiter.")
flag.Parse()

ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))

mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddr,
Expand All @@ -111,6 +105,12 @@ func main() {
os.Exit(1)
}

rateLimiter, err := compositeRateLimiter(baseDelay, maxDelay, bucketSize, qps)
if err != nil {
setupLog.Error(err, "unable to create composite rate limiter")
os.Exit(1)
}

// Setup the context that's going to be used in controllers and for the manager.
ctx := ctrl.SetupSignalHandler()

Expand All @@ -137,6 +137,7 @@ func main() {
configMapInformer,
mgr.GetScheme(),
controllers.WithMaxConcurrentReconciles(maxConcurrentReconciles),
controllers.WithRateLimiter(rateLimiter),
)
if err != nil {
setupLog.Error(err, "unable to create controller", "controller", "NutanixCluster")
Expand All @@ -153,6 +154,7 @@ func main() {
configMapInformer,
mgr.GetScheme(),
controllers.WithMaxConcurrentReconciles(maxConcurrentReconciles),
controllers.WithRateLimiter(rateLimiter),
)
if err != nil {
setupLog.Error(err, "unable to create controller", "controller", "NutanixMachine")
Expand All @@ -179,3 +181,48 @@ func main() {
os.Exit(1)
}
}

// compositeRateLimiter will build a limiter similar to the default from DefaultControllerRateLimiter but with custom values.
func compositeRateLimiter(baseDelay, maxDelay time.Duration, bucketSize, qps int) (workqueue.RateLimiter, error) {
// Validate the rate limiter configuration
if err := validateRateLimiterConfig(baseDelay, maxDelay, bucketSize, qps); err != nil {
return nil, err
}
exponentialBackoffLimiter := workqueue.NewItemExponentialFailureRateLimiter(baseDelay, maxDelay)
bucketLimiter := &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(qps), bucketSize)}
return workqueue.NewMaxOfRateLimiter(exponentialBackoffLimiter, bucketLimiter), nil
}

// validateRateLimiterConfig validates the rate limiter configuration parameters
func validateRateLimiterConfig(baseDelay, maxDelay time.Duration, bucketSize, qps int) error {
// Check if baseDelay is a non-negative value
if baseDelay < 0 {
return errors.New("baseDelay cannot be negative")
}

// Check if maxDelay is non-negative and greater than or equal to baseDelay
if maxDelay < 0 {
return errors.New("maxDelay cannot be negative")
}

if maxDelay < baseDelay {
return errors.New("maxDelay should be greater than or equal to baseDelay")
}

// Check if bucketSize is a positive number
if bucketSize <= 0 {
return errors.New("bucketSize must be positive")
}

// Check if qps is a positive number
if qps <= 0 {
return errors.New("minimum QPS must be positive")
}

// Check if bucketSize is at least as large as the QPS
if bucketSize < qps {
return errors.New("bucketSize must be at least as large as the QPS to handle bursts effectively")
}

return nil
}

0 comments on commit e16f6d1

Please sign in to comment.