Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: port status ConfigMap name setting to v1.18 #2

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions cluster-autoscaler/FAQ.md
Original file line number Diff line number Diff line change
Expand Up @@ -663,6 +663,7 @@ The following startup parameters are supported for cluster autoscaler:
| `estimator` | Type of resource estimator to be used in scale up | binpacking
| `expander` | Type of node group expander to be used in scale up. | random
| `write-status-configmap` | Should CA write status information to a configmap | true
| `status-config-map-name` | The name of the status ConfigMap that CA writes | cluster-autoscaler-status
| `max-inactivity` | Maximum time from last recorded autoscaler activity before automatic restart | 10 minutes
| `max-failing-time` | Maximum time from last recorded successful autoscaler run before automatic restart | 15 minutes
| `balance-similar-node-groups` | Detect similar node groups and balance the number of nodes between them | false
Expand Down
32 changes: 16 additions & 16 deletions cluster-autoscaler/clusterstate/clusterstate_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ func TestOKWithScaleUp(t *testing.T) {
assert.NotNil(t, provider)

fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap")
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
Expand Down Expand Up @@ -94,7 +94,7 @@ func TestEmptyOK(t *testing.T) {
assert.NotNil(t, provider)

fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap")
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
Expand Down Expand Up @@ -135,7 +135,7 @@ func TestOKOneUnreadyNode(t *testing.T) {
assert.NotNil(t, provider)

fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap")
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
Expand Down Expand Up @@ -173,7 +173,7 @@ func TestNodeWithoutNodeGroupDontCrash(t *testing.T) {
provider.AddNode("no_ng", noNgNode)

fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap")
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
Expand All @@ -200,7 +200,7 @@ func TestOKOneUnreadyNodeWithScaleDownCandidate(t *testing.T) {
assert.NotNil(t, provider)

fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap")
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
Expand Down Expand Up @@ -265,7 +265,7 @@ func TestMissingNodes(t *testing.T) {
assert.NotNil(t, provider)

fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap")
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
Expand Down Expand Up @@ -307,7 +307,7 @@ func TestTooManyUnready(t *testing.T) {

assert.NotNil(t, provider)
fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap")
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
Expand All @@ -331,7 +331,7 @@ func TestExpiredScaleUp(t *testing.T) {
assert.NotNil(t, provider)

fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap")
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
Expand All @@ -357,7 +357,7 @@ func TestRegisterScaleDown(t *testing.T) {
assert.NotNil(t, provider)

fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap")
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
Expand Down Expand Up @@ -410,7 +410,7 @@ func TestUpcomingNodes(t *testing.T) {

assert.NotNil(t, provider)
fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap")
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
Expand All @@ -433,7 +433,7 @@ func TestIncorrectSize(t *testing.T) {
provider.AddNode("ng1", ng1_1)
assert.NotNil(t, provider)
fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap")
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
Expand Down Expand Up @@ -469,7 +469,7 @@ func TestUnregisteredNodes(t *testing.T) {
provider.AddNode("ng1", ng1_2)

fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap")
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
Expand Down Expand Up @@ -621,7 +621,7 @@ func TestScaleUpBackoff(t *testing.T) {
assert.NotNil(t, provider)

fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap")
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
Expand Down Expand Up @@ -689,7 +689,7 @@ func TestGetClusterSize(t *testing.T) {
provider.AddNode("notAutoscaledNode", notAutoscaledNode)

fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap")
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
Expand Down Expand Up @@ -731,7 +731,7 @@ func TestUpdateScaleUp(t *testing.T) {
provider := testprovider.NewTestCloudProvider(nil, nil)
provider.AddNodeGroup("ng1", 1, 10, 5)
fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap")
clusterstate := NewClusterStateRegistry(
provider,
ClusterStateRegistryConfig{
Expand Down Expand Up @@ -809,7 +809,7 @@ func TestScaleUpFailures(t *testing.T) {
assert.NotNil(t, provider)

fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false)
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap")
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{}, fakeLogRecorder, newBackoff())

clusterstate.RegisterFailedScaleUp(provider.GetNodeGroup("ng1"), metrics.Timeout, now)
Expand Down
16 changes: 7 additions & 9 deletions cluster-autoscaler/clusterstate/utils/status.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,6 @@ import (
)

const (
// StatusConfigMapName is the name of ConfigMap with status.
StatusConfigMapName = "cluster-autoscaler-status"
// ConfigMapLastUpdatedKey is the name of annotation informing about status ConfigMap last update.
ConfigMapLastUpdatedKey = "cluster-autoscaler.kubernetes.io/last-updated"
// ConfigMapLastUpdateFormat it the timestamp format used for last update annotation in status ConfigMap
Expand Down Expand Up @@ -65,11 +63,11 @@ func (ler *LogEventRecorder) Eventf(eventtype, reason, message string, args ...i
// NewStatusMapRecorder creates a LogEventRecorder creating events on status configmap.
// If the configmap doesn't exist it will be created (with 'Initializing' status).
// If active == false the map will not be created and no events will be recorded.
func NewStatusMapRecorder(kubeClient kube_client.Interface, namespace string, recorder record.EventRecorder, active bool) (*LogEventRecorder, error) {
func NewStatusMapRecorder(kubeClient kube_client.Interface, namespace string, recorder record.EventRecorder, active bool, statusConfigMapName string) (*LogEventRecorder, error) {
var mapObj runtime.Object
var err error
if active {
mapObj, err = WriteStatusConfigMap(kubeClient, namespace, "Initializing", nil)
mapObj, err = WriteStatusConfigMap(kubeClient, namespace, "Initializing", nil, statusConfigMapName)
if err != nil {
return nil, errors.New("Failed to init status ConfigMap")
}
Expand All @@ -84,14 +82,14 @@ func NewStatusMapRecorder(kubeClient kube_client.Interface, namespace string, re
// WriteStatusConfigMap writes updates status ConfigMap with a given message or creates a new
// ConfigMap if it doesn't exist. If logRecorder is passed and configmap update is successful
// logRecorder's internal reference will be updated.
func WriteStatusConfigMap(kubeClient kube_client.Interface, namespace string, msg string, logRecorder *LogEventRecorder) (*apiv1.ConfigMap, error) {
func WriteStatusConfigMap(kubeClient kube_client.Interface, namespace string, msg string, logRecorder *LogEventRecorder, statusConfigMapName string) (*apiv1.ConfigMap, error) {
statusUpdateTime := time.Now().Format(ConfigMapLastUpdateFormat)
statusMsg := fmt.Sprintf("Cluster-autoscaler status at %s:\n%v", statusUpdateTime, msg)
var configMap *apiv1.ConfigMap
var getStatusError, writeStatusError error
var errMsg string
maps := kubeClient.CoreV1().ConfigMaps(namespace)
configMap, getStatusError = maps.Get(context.TODO(), StatusConfigMapName, metav1.GetOptions{})
configMap, getStatusError = maps.Get(context.TODO(), statusConfigMapName, metav1.GetOptions{})
if getStatusError == nil {
configMap.Data["status"] = statusMsg
if configMap.ObjectMeta.Annotations == nil {
Expand All @@ -103,7 +101,7 @@ func WriteStatusConfigMap(kubeClient kube_client.Interface, namespace string, ms
configMap = &apiv1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: StatusConfigMapName,
Name: statusConfigMapName,
Annotations: map[string]string{
ConfigMapLastUpdatedKey: statusUpdateTime,
},
Expand Down Expand Up @@ -133,9 +131,9 @@ func WriteStatusConfigMap(kubeClient kube_client.Interface, namespace string, ms
}

// DeleteStatusConfigMap deletes status configmap
func DeleteStatusConfigMap(kubeClient kube_client.Interface, namespace string) error {
func DeleteStatusConfigMap(kubeClient kube_client.Interface, namespace string, statusConfigMapName string) error {
maps := kubeClient.CoreV1().ConfigMaps(namespace)
err := maps.Delete(context.TODO(), StatusConfigMapName, metav1.DeleteOptions{})
err := maps.Delete(context.TODO(), statusConfigMapName, metav1.DeleteOptions{})
if err != nil {
klog.Error("Failed to delete status configmap")
}
Expand Down
12 changes: 6 additions & 6 deletions cluster-autoscaler/clusterstate/utils/status_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ func setUpTest(t *testing.T) *testInfo {
configMap: &apiv1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: StatusConfigMapName,
Name: "my-cool-configmap",
},
Data: map[string]string{},
},
Expand All @@ -61,7 +61,7 @@ func setUpTest(t *testing.T) *testInfo {
result.client.Fake.AddReactor("get", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
get := action.(core.GetAction)
assert.Equal(result.t, namespace, get.GetNamespace())
assert.Equal(result.t, StatusConfigMapName, get.GetName())
assert.Equal(result.t, "my-cool-configmap", get.GetName())
result.getCalled = true
if result.getError != nil {
return true, nil, result.getError
Expand All @@ -78,7 +78,7 @@ func setUpTest(t *testing.T) *testInfo {
create := action.(core.CreateAction)
assert.Equal(result.t, namespace, create.GetNamespace())
configMap := create.GetObject().(*apiv1.ConfigMap)
assert.Equal(result.t, StatusConfigMapName, configMap.ObjectMeta.Name)
assert.Equal(result.t, "my-cool-configmap", configMap.ObjectMeta.Name)
result.createCalled = true
return true, configMap, nil
})
Expand All @@ -87,7 +87,7 @@ func setUpTest(t *testing.T) *testInfo {

func TestWriteStatusConfigMapExisting(t *testing.T) {
ti := setUpTest(t)
result, err := WriteStatusConfigMap(ti.client, ti.namespace, "TEST_MSG", nil)
result, err := WriteStatusConfigMap(ti.client, ti.namespace, "TEST_MSG", nil, "my-cool-configmap")
assert.Equal(t, ti.configMap, result)
assert.Contains(t, result.Data["status"], "TEST_MSG")
assert.Contains(t, result.ObjectMeta.Annotations, ConfigMapLastUpdatedKey)
Expand All @@ -100,7 +100,7 @@ func TestWriteStatusConfigMapExisting(t *testing.T) {
func TestWriteStatusConfigMapCreate(t *testing.T) {
ti := setUpTest(t)
ti.getError = kube_errors.NewNotFound(apiv1.Resource("configmap"), "nope, not found")
result, err := WriteStatusConfigMap(ti.client, ti.namespace, "TEST_MSG", nil)
result, err := WriteStatusConfigMap(ti.client, ti.namespace, "TEST_MSG", nil, "my-cool-configmap")
assert.Contains(t, result.Data["status"], "TEST_MSG")
assert.Contains(t, result.ObjectMeta.Annotations, ConfigMapLastUpdatedKey)
assert.Nil(t, err)
Expand All @@ -112,7 +112,7 @@ func TestWriteStatusConfigMapCreate(t *testing.T) {
func TestWriteStatusConfigMapError(t *testing.T) {
ti := setUpTest(t)
ti.getError = errors.New("stuff bad")
result, err := WriteStatusConfigMap(ti.client, ti.namespace, "TEST_MSG", nil)
result, err := WriteStatusConfigMap(ti.client, ti.namespace, "TEST_MSG", nil, "my-cool-configmap")
assert.NotNil(t, err)
assert.Contains(t, err.Error(), "stuff bad")
assert.Nil(t, result)
Expand Down
2 changes: 2 additions & 0 deletions cluster-autoscaler/config/autoscaling_options.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,8 @@ type AutoscalingOptions struct {
NodeDeletionDelayTimeout time.Duration
// WriteStatusConfigMap tells if the status information should be written to a ConfigMap
WriteStatusConfigMap bool
// StaticConfigMapName
StatusConfigMapName string
// BalanceSimilarNodeGroups enables logic that identifies node groups with similar machines and tries to balance node count between them.
BalanceSimilarNodeGroups bool
// ConfigNamespace is the namespace cluster-autoscaler is running in and all related configmaps live in
Expand Down
4 changes: 2 additions & 2 deletions cluster-autoscaler/context/autoscaling_context.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,12 +111,12 @@ func NewAutoscalingKubeClients(opts config.AutoscalingOptions, kubeClient, event
listerRegistryStopChannel := make(chan struct{})
listerRegistry := kube_util.NewListerRegistryWithDefaultListers(kubeClient, listerRegistryStopChannel)
kubeEventRecorder := kube_util.CreateEventRecorder(eventsKubeClient)
logRecorder, err := utils.NewStatusMapRecorder(kubeClient, opts.ConfigNamespace, kubeEventRecorder, opts.WriteStatusConfigMap)
logRecorder, err := utils.NewStatusMapRecorder(kubeClient, opts.ConfigNamespace, kubeEventRecorder, opts.WriteStatusConfigMap, opts.StatusConfigMapName)
if err != nil {
klog.Error("Failed to initialize status configmap, unable to write status events")
// Get a dummy, so we can at least safely call the methods
// TODO(maciekpytel): recover from this after successful status configmap update?
logRecorder, _ = utils.NewStatusMapRecorder(eventsKubeClient, opts.ConfigNamespace, kubeEventRecorder, false)
logRecorder, _ = utils.NewStatusMapRecorder(eventsKubeClient, opts.ConfigNamespace, kubeEventRecorder, false, opts.StatusConfigMapName)
}

return &AutoscalingKubeClients{
Expand Down
2 changes: 1 addition & 1 deletion cluster-autoscaler/core/scale_test_common.go
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ func NewScaleTestAutoscalingContext(
// Not enough buffer space causes the test to hang without printing any logs.
// This is not useful.
fakeRecorder := kube_record.NewFakeRecorder(100)
fakeLogRecorder, err := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false)
fakeLogRecorder, err := utils.NewStatusMapRecorder(fakeClient, "kube-system", fakeRecorder, false, "my-cool-configmap")
if err != nil {
return context.AutoscalingContext{}, err
}
Expand Down
6 changes: 3 additions & 3 deletions cluster-autoscaler/core/static_autoscaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -285,7 +285,7 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) errors.AutoscalerError
if autoscalingContext.WriteStatusConfigMap {
status := a.clusterStateRegistry.GetStatus(currentTime)
utils.WriteStatusConfigMap(autoscalingContext.ClientSet, autoscalingContext.ConfigNamespace,
status.GetReadableString(), a.AutoscalingContext.LogRecorder)
status.GetReadableString(), a.AutoscalingContext.LogRecorder, a.AutoscalingContext.StatusConfigMapName)
}

// This deferred processor execution allows the processors to handle a situation when a scale-(up|down)
Expand Down Expand Up @@ -682,7 +682,7 @@ func (a *StaticAutoscaler) ExitCleanUp() {
if !a.AutoscalingContext.WriteStatusConfigMap {
return
}
utils.DeleteStatusConfigMap(a.AutoscalingContext.ClientSet, a.AutoscalingContext.ConfigNamespace)
utils.DeleteStatusConfigMap(a.AutoscalingContext.ClientSet, a.AutoscalingContext.ConfigNamespace, a.AutoscalingContext.StatusConfigMapName)

a.clusterStateRegistry.Stop()
}
Expand Down Expand Up @@ -747,7 +747,7 @@ func (a *StaticAutoscaler) onEmptyCluster(status string, emitEvent bool) {
metrics.UpdateClusterSafeToAutoscale(false)
metrics.UpdateNodesCount(0, 0, 0, 0, 0)
if a.AutoscalingContext.WriteStatusConfigMap {
utils.WriteStatusConfigMap(a.AutoscalingContext.ClientSet, a.AutoscalingContext.ConfigNamespace, status, a.AutoscalingContext.LogRecorder)
utils.WriteStatusConfigMap(a.AutoscalingContext.ClientSet, a.AutoscalingContext.ConfigNamespace, status, a.AutoscalingContext.LogRecorder, a.AutoscalingContext.StatusConfigMapName)
}
if emitEvent {
a.AutoscalingContext.LogRecorder.Eventf(apiv1.EventTypeWarning, "ClusterUnhealthy", status)
Expand Down
Loading