diff --git a/admiral/pkg/apis/admiral/filters/filters.go b/admiral/pkg/apis/admiral/filters/filters.go index ff6681ba..d37ba5ff 100644 --- a/admiral/pkg/apis/admiral/filters/filters.go +++ b/admiral/pkg/apis/admiral/filters/filters.go @@ -28,16 +28,3 @@ func Logger(inner http.Handler, name string) http.Handler { ) }) } - -func Auth(inner http.Handler, name string) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - - log.Printf( - "Auth Logger for endpoint %s", name, - ) - //TODO implement authnz - - inner.ServeHTTP(w, r) - - }) -} diff --git a/admiral/pkg/clusters/sidecar_handler_test.go b/admiral/pkg/clusters/sidecar_handler_test.go index 4eaddca0..3fd1c50e 100644 --- a/admiral/pkg/clusters/sidecar_handler_test.go +++ b/admiral/pkg/clusters/sidecar_handler_test.go @@ -1 +1,32 @@ package clusters + +import ( + "context" + "github.com/stretchr/testify/assert" + "istio.io/client-go/pkg/apis/networking/v1alpha3" + "testing" +) + +func TestSidecarHandler_Added(t *testing.T) { + var dh SidecarHandler + var ctx context.Context + var obj *v1alpha3.Sidecar + err := dh.Added(ctx, obj) + assert.Nil(t, err) +} + +func TestSidecarHandler_Updated(t *testing.T) { + var dh SidecarHandler + var ctx context.Context + var obj *v1alpha3.Sidecar + err := dh.Updated(ctx, obj) + assert.Nil(t, err) +} + +func TestSidecarHandler_Deleted(t *testing.T) { + var dh SidecarHandler + var ctx context.Context + var obj *v1alpha3.Sidecar + err := dh.Deleted(ctx, obj) + assert.Nil(t, err) +} diff --git a/admiral/pkg/clusters/types_test.go b/admiral/pkg/clusters/types_test.go index e4274254..2fdca067 100644 --- a/admiral/pkg/clusters/types_test.go +++ b/admiral/pkg/clusters/types_test.go @@ -1,7 +1,9 @@ package clusters import ( + "github.com/stretchr/testify/assert" "sync" + "testing" "time" "github.com/google/go-cmp/cmp/cmpopts" @@ -42,3 +44,36 @@ func setupForTypeTests() { common.InitializeConfig(admiralParamsForTypesTests()) }) } + +func TestRangeRemoteControllers(t *testing.T) { + setupForTypeTests() + + rr := &RemoteRegistry{ + Mutex: sync.Mutex{}, + remoteControllers: map[string]*RemoteController{"test": &RemoteController{}}, + } + + counter := 0 + // testFunc is a function that tests the RemoteController + testFunc := func(k string, v *RemoteController) { + counter = counter + 1 + } + + // TestRangeRemoteControllers is a table-driven test for RangeRemoteControllers + tests := []struct { + name string + expected int + }{ + { + name: "TestRangeRemoteControllers", + expected: 1, + }, + } + for _, tt := range tests { + counter = 0 + t.Run(tt.name, func(t *testing.T) { + rr.RangeRemoteControllers(testFunc) + assert.Equal(t, tt.expected, counter) + }) + } +} diff --git a/admiral/pkg/controller/admiral/configmap_test.go b/admiral/pkg/controller/admiral/configmap_test.go index 46b8da55..292985b7 100644 --- a/admiral/pkg/controller/admiral/configmap_test.go +++ b/admiral/pkg/controller/admiral/configmap_test.go @@ -2,6 +2,7 @@ package admiral import ( "context" + "github.com/istio-ecosystem/admiral/admiral/pkg/client/loader" "testing" "time" @@ -172,3 +173,27 @@ func TestConfigMapController_PutConfigMap(t *testing.T) { } } + +func TestNewConfigMapController(t *testing.T) { + clientLoader := &loader.FakeClientLoader{} + seIPPrefix := "prefix" + controller, err := NewConfigMapController(seIPPrefix, clientLoader) + if err != nil { + t.Errorf("No error expected. Err: %v", err) + } + + if controller.ServiceEntryIPPrefix != seIPPrefix { + t.Errorf("Expected %v but got %v", seIPPrefix, controller.ServiceEntryIPPrefix) + } +} + +func TestConfigMapController_GetIPPrefixForServiceEntries(t *testing.T) { + seIPPrefix := "prefix" + controller := ConfigMapController{ + ServiceEntryIPPrefix: seIPPrefix, + } + + if controller.GetIPPrefixForServiceEntries() != seIPPrefix { + t.Errorf("Expected %v but got %v", seIPPrefix, controller.GetIPPrefixForServiceEntries()) + } +} diff --git a/admiral/pkg/controller/admiral/controller.go b/admiral/pkg/controller/admiral/controller.go index 3b5dcd23..9bab25de 100644 --- a/admiral/pkg/controller/admiral/controller.go +++ b/admiral/pkg/controller/admiral/controller.go @@ -95,156 +95,9 @@ func NewController(name, clusterEndpoint string, stopCh <-chan struct{}, delegat queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), } controller.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - var ( - txId = uuid.NewString() - metaName, metaNamespace string - ) - meta, ok := obj.(metav1.Object) - if ok && meta != nil && meta.GetResourceVersion() != "" { - txId = common.GenerateTxId(meta, controller.name, txId) - metaName = meta.GetName() - metaNamespace = meta.GetNamespace() - } - ctxLogger := log.WithFields(log.Fields{ - "op": operationInformerEvents, - "name": metaName, - "namespace": metaNamespace, - "controller": controller.name, - "cluster": controller.cluster, - "txId": txId, - }) - key, err := cache.MetaNamespaceKeyFunc(obj) - if err == nil { - ctxLogger.Infof(ControllerLogFormat, taskAddEventToQueue, controller.queue.Len(), Add+" Event") - controller.queue.Add(InformerCacheObj{ - key: key, - eventType: Add, - obj: obj, - txId: txId, - ctxLogger: ctxLogger, - }) - } - }, - UpdateFunc: func(oldObj, newObj interface{}) { - var ( - ctx = context.Background() - txId = uuid.NewString() - metaName, metaNamespace string - ) - meta, ok := newObj.(metav1.Object) - if ok && meta != nil && meta.GetResourceVersion() != "" { - txId = common.GenerateTxId(meta, controller.name, txId) - metaName = meta.GetName() - metaNamespace = meta.GetNamespace() - } - ctx = context.WithValue(ctx, "txId", txId) - ctxLogger := log.WithFields(log.Fields{ - "op": operationInformerEvents, - "name": metaName, - "namespace": metaNamespace, - "controller": controller.name, - "cluster": controller.cluster, - "txId": txId, - }) - - key, err := cache.MetaNamespaceKeyFunc(newObj) - if err == nil { - ctxLogger.Infof(ControllerLogFormat, taskAddEventToQueue, controller.queue.Len(), Update+" Event") - // Check if the event has already been processed or the resource version - // has changed. If either the event has not been processed yet or the - // resource version has changed only then add it to the queue - - status, err := controller.delegator.GetProcessItemStatus(newObj) - if err != nil { - ctxLogger.Errorf(err.Error()) - } - - // Check if the generation of the object has changed - // if the generation of old and new object is same then we do not process the object - doesGenerationMatch, err := controller.delegator.DoesGenerationMatch(ctxLogger, oldObj, newObj) - if err != nil { - ctxLogger.Errorf(ControllerLogFormat, taskAddEventToQueue, controller.queue.Len(), err.Error()) - } - if status == common.Processed && doesGenerationMatch { - ctxLogger.Infof(ControllerLogFormat, taskAddEventToQueue, controller.queue.Len(), - fmt.Sprintf("skipped processing event due to status=%s doesGenerationMatch=%v", - status, doesGenerationMatch)) - return - } - - // Check if the generation of the object has changed - // if the generation of old and new object is same then we do not process the object - isOnlyReplicaCountChanged, err := controller.delegator.IsOnlyReplicaCountChanged(ctxLogger, oldObj, newObj) - if err != nil { - ctxLogger.Errorf(ControllerLogFormat, taskAddEventToQueue, controller.queue.Len(), err.Error()) - } - if status == common.Processed && isOnlyReplicaCountChanged { - ctxLogger.Infof(ControllerLogFormat, taskAddEventToQueue, controller.queue.Len(), - fmt.Sprintf("skipped processing event due to status=%s isOnlyReplicaCountChanged=%v", - status, isOnlyReplicaCountChanged)) - return - } - - controller.delegator.LogValueOfAdmiralIoIgnore(newObj) - latestObj, isVersionChanged := checkIfResourceVersionHasIncreased(ctxLogger, ctx, oldObj, newObj, delegator) - txId, ctxLogger = updateTxId(ctx, newObj, latestObj, txId, ctxLogger, controller) - - if status == common.NotProcessed || isVersionChanged { - ctxLogger.Infof(ControllerLogFormat, taskAddEventToQueue, controller.queue.Len(), - fmt.Sprintf("version changed=%v", isVersionChanged)) - controller.queue.Add( - InformerCacheObj{ - key: key, - eventType: Update, - obj: latestObj, - oldObj: oldObj, - txId: txId, - ctxLogger: ctxLogger, - }) - // If the pod is running in Active Mode we update the status to ProcessingInProgress - // to prevent any duplicate events that might be added to the queue if there is full - // resync that happens and a similar event in the queue is not processed yet - if !commonUtil.IsAdmiralReadOnly() { - ctxLogger.Infof(ControllerLogFormat, taskAddEventToQueue, controller.queue.Len(), - "status=%s", common.ProcessingInProgress) - controller.delegator.UpdateProcessItemStatus(latestObj, common.ProcessingInProgress) - } - } - } - }, - DeleteFunc: func(obj interface{}) { - var ( - txId = uuid.NewString() - ) - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) - if err == nil { - meta, ok := obj.(metav1.Object) - var metaName, metaNamespace string - if ok && meta != nil && meta.GetResourceVersion() != "" { - txId = common.GenerateTxId(meta, controller.name, txId) - metaName = meta.GetName() - metaNamespace = meta.GetNamespace() - } - ctxLogger := log.WithFields(log.Fields{ - "op": operationInformerEvents, - "name": metaName, - "namespace": metaNamespace, - "controller": controller.name, - "cluster": controller.cluster, - "txId": txId, - }) - ctxLogger.Infof(ControllerLogFormat, taskAddEventToQueue, controller.queue.Len(), Delete+" Event") - controller.queue.Add( - InformerCacheObj{ - key: key, - eventType: Delete, - obj: obj, - txId: txId, - ctxLogger: ctxLogger, - }) - } - }, + AddFunc: controller.AddFuncImpl, + UpdateFunc: controller.UpdateFuncImpl, + DeleteFunc: controller.DeleteFuncImpl, }) go controller.Run(stopCh) return controller @@ -272,6 +125,159 @@ func updateTxId( return txId, ctxLogger } +func (c *Controller) AddFuncImpl(obj interface{}) { + var ( + txId = uuid.NewString() + metaName, metaNamespace string + ) + meta, ok := obj.(metav1.Object) + if ok && meta != nil && meta.GetResourceVersion() != "" { + txId = common.GenerateTxId(meta, c.name, txId) + metaName = meta.GetName() + metaNamespace = meta.GetNamespace() + } + ctxLogger := log.WithFields(log.Fields{ + "op": operationInformerEvents, + "name": metaName, + "namespace": metaNamespace, + "controller": c.name, + "cluster": c.cluster, + "txId": txId, + }) + key, err := cache.MetaNamespaceKeyFunc(obj) + if err == nil { + ctxLogger.Infof(ControllerLogFormat, taskAddEventToQueue, c.queue.Len(), Add+" Event") + c.queue.Add(InformerCacheObj{ + key: key, + eventType: Add, + obj: obj, + txId: txId, + ctxLogger: ctxLogger, + }) + } +} + +func (c *Controller) UpdateFuncImpl(oldObj, newObj interface{}) { + var ( + ctx = context.Background() + txId = uuid.NewString() + metaName, metaNamespace string + ) + meta, ok := newObj.(metav1.Object) + if ok && meta != nil && meta.GetResourceVersion() != "" { + txId = common.GenerateTxId(meta, c.name, txId) + metaName = meta.GetName() + metaNamespace = meta.GetNamespace() + } + ctx = context.WithValue(ctx, "txId", txId) + ctxLogger := log.WithFields(log.Fields{ + "op": operationInformerEvents, + "name": metaName, + "namespace": metaNamespace, + "controller": c.name, + "cluster": c.cluster, + "txId": txId, + }) + + key, err := cache.MetaNamespaceKeyFunc(newObj) + if err == nil { + ctxLogger.Infof(ControllerLogFormat, taskAddEventToQueue, c.queue.Len(), Update+" Event") + // Check if the event has already been processed or the resource version + // has changed. If either the event has not been processed yet or the + // resource version has changed only then add it to the queue + + status, err := c.delegator.GetProcessItemStatus(newObj) + if err != nil { + ctxLogger.Errorf(err.Error()) + } + + // Check if the generation of the object has changed + // if the generation of old and new object is same then we do not process the object + doesGenerationMatch, err := c.delegator.DoesGenerationMatch(ctxLogger, oldObj, newObj) + if err != nil { + ctxLogger.Errorf(ControllerLogFormat, taskAddEventToQueue, c.queue.Len(), err.Error()) + } + if status == common.Processed && doesGenerationMatch { + ctxLogger.Infof(ControllerLogFormat, taskAddEventToQueue, c.queue.Len(), + fmt.Sprintf("skipped processing event due to status=%s doesGenerationMatch=%v", + status, doesGenerationMatch)) + return + } + + // Check if the generation of the object has changed + // if the generation of old and new object is same then we do not process the object + isOnlyReplicaCountChanged, err := c.delegator.IsOnlyReplicaCountChanged(ctxLogger, oldObj, newObj) + if err != nil { + ctxLogger.Errorf(ControllerLogFormat, taskAddEventToQueue, c.queue.Len(), err.Error()) + } + if status == common.Processed && isOnlyReplicaCountChanged { + ctxLogger.Infof(ControllerLogFormat, taskAddEventToQueue, c.queue.Len(), + fmt.Sprintf("skipped processing event due to status=%s isOnlyReplicaCountChanged=%v", + status, isOnlyReplicaCountChanged)) + return + } + + c.delegator.LogValueOfAdmiralIoIgnore(newObj) + latestObj, isVersionChanged := checkIfResourceVersionHasIncreased(ctxLogger, ctx, oldObj, newObj, c.delegator) + txId, ctxLogger = updateTxId(ctx, newObj, latestObj, txId, ctxLogger, *c) + + if status == common.NotProcessed || isVersionChanged { + ctxLogger.Infof(ControllerLogFormat, taskAddEventToQueue, c.queue.Len(), + fmt.Sprintf("version changed=%v", isVersionChanged)) + c.queue.Add( + InformerCacheObj{ + key: key, + eventType: Update, + obj: latestObj, + oldObj: oldObj, + txId: txId, + ctxLogger: ctxLogger, + }) + // If the pod is running in Active Mode we update the status to ProcessingInProgress + // to prevent any duplicate events that might be added to the queue if there is full + // resync that happens and a similar event in the queue is not processed yet + if !commonUtil.IsAdmiralReadOnly() { + ctxLogger.Infof(ControllerLogFormat, taskAddEventToQueue, c.queue.Len(), + "status=%s", common.ProcessingInProgress) + c.delegator.UpdateProcessItemStatus(latestObj, common.ProcessingInProgress) + } + } + } +} + +func (c *Controller) DeleteFuncImpl(obj interface{}) { + var ( + txId = uuid.NewString() + ) + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err == nil { + meta, ok := obj.(metav1.Object) + var metaName, metaNamespace string + if ok && meta != nil && meta.GetResourceVersion() != "" { + txId = common.GenerateTxId(meta, c.name, txId) + metaName = meta.GetName() + metaNamespace = meta.GetNamespace() + } + ctxLogger := log.WithFields(log.Fields{ + "op": operationInformerEvents, + "name": metaName, + "namespace": metaNamespace, + "controller": c.name, + "cluster": c.cluster, + "txId": txId, + }) + ctxLogger.Infof(ControllerLogFormat, taskAddEventToQueue, c.queue.Len(), Delete+" Event") + c.queue.Add( + InformerCacheObj{ + key: key, + eventType: Delete, + obj: obj, + txId: txId, + ctxLogger: ctxLogger, + }) + } +} + // Run starts the controller until it receives a message over stopCh func (c *Controller) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() diff --git a/admiral/pkg/controller/admiral/controller_test.go b/admiral/pkg/controller/admiral/controller_test.go index a7b716e5..a48322d2 100644 --- a/admiral/pkg/controller/admiral/controller_test.go +++ b/admiral/pkg/controller/admiral/controller_test.go @@ -3,6 +3,9 @@ package admiral import ( "context" v1 "k8s.io/api/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "strings" "testing" log "github.com/sirupsen/logrus" @@ -280,3 +283,182 @@ func TestShouldRetry(t *testing.T) { }) } } + +// Successfully creates a new controller with given parameters +/*func TestNewControllerCreation(t *testing.T) { + stopCh := make(chan struct{}) + defer close(stopCh) + + delegator := new(MockDelegator) + informer := new(MockSharedIndexInformer) + + controller := NewController("test-controller", "test-cluster", stopCh, delegator, informer) + + if controller.name != "test-controller" { + t.Errorf("expected controller name to be 'test-controller', got %s", controller.name) + } + if controller.cluster != "test-cluster" { + t.Errorf("expected cluster to be 'test-cluster', got %s", controller.cluster) + } + if controller.delegator != delegator { + t.Errorf("expected delegator to be set") + } + if controller.informer != informer { + t.Errorf("expected informer to be set") + } +}*/ + +// Handles nil newObj or latestObj gracefully +func TestUpdateTxIdHandlesNilObjectsGracefully(t *testing.T) { + ctx := context.Background() + var latestObj metav1.Object = nil + newObj := &metav1.ObjectMeta{ResourceVersion: "2"} + txId := "initialTxId" + ctxLogger := log.WithFields(log.Fields{}) + controller := Controller{name: "testController", cluster: "testCluster"} + + updatedTxId, updatedLogger := updateTxId(ctx, newObj, latestObj, txId, ctxLogger, controller) + + if updatedTxId != txId { + t.Errorf("Expected txId to remain unchanged, got %s", updatedTxId) + } + + if updatedLogger != ctxLogger { + t.Errorf("Expected logger to remain unchanged") + } +} + +func TestUpdateTxIdWhenResourceVersionsDiffer(t *testing.T) { + ctx := context.Background() + latestObj := &metav1.ObjectMeta{ResourceVersion: "1"} + newObj := &metav1.ObjectMeta{ResourceVersion: "2"} + txId := "initialTxId" + ctxLogger := log.WithFields(log.Fields{}) + controller := Controller{name: "testController", cluster: "testCluster"} + + updatedTxId, updatedLogger := updateTxId(ctx, newObj, latestObj, txId, ctxLogger, controller) + + expectedTxIdPrefix := "1-initialTxId" + if !strings.HasPrefix(updatedTxId, expectedTxIdPrefix) { + t.Errorf("Expected txId to start with %s, got %s", expectedTxIdPrefix, updatedTxId) + } + + if updatedLogger.Data["txId"] != updatedTxId { + t.Errorf("Expected logger txId to be %s, got %s", updatedTxId, updatedLogger.Data["txId"]) + } +} + +// Successfully adds an object to the queue when a valid object is passed +func TestAddFuncImplAddsValidObjectToQueue(t *testing.T) { + // Arrange + queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) + informer := cache.NewSharedIndexInformer(nil, &v1.Pod{}, 0, cache.Indexers{}) + controller := &Controller{ + name: "test-controller", + cluster: "test-cluster", + queue: queue, + informer: informer, + } + obj := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + ResourceVersion: "1", + }, + } + + // Act + controller.AddFuncImpl(obj) + + // Assert + if queue.Len() != 1 { + t.Errorf("Expected queue length to be 1, got %d", queue.Len()) + } +} + +// Processes new object if resource version has changed +func TestUpdateFuncImplProcessesNewObjectOnResourceVersionChange(t *testing.T) { + // Arrange + controller := &Controller{ + name: "test-controller", + cluster: "test-cluster", + delegator: &MockDelegator{}, + queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), + } + oldObj := &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "1"}} + newObj := &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "2"}} + + // Act + controller.UpdateFuncImpl(oldObj, newObj) + + // Assert + if controller.queue.Len() != 1 { + t.Errorf("Expected queue length to be 1, got %d", controller.queue.Len()) + } +} + +// Successfully adds a delete event to the queue when a valid object is passed +func TestDeleteFuncImplAddsDeleteEventToQueue(t *testing.T) { + + // Create a mock object that implements the necessary interface + mockObj := &metav1.ObjectMeta{ + Name: "test-object", + Namespace: "test-namespace", + ResourceVersion: "12345", + } + + // Create a mock queue + mockQueue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) + + // Initialize the Controller with the mock queue + controller := &Controller{ + name: "test-controller", + cluster: "test-cluster", + queue: mockQueue, + } + + // Call the DeleteFuncImpl method with the mock object + controller.DeleteFuncImpl(mockObj) + + // Check if the event was added to the queue + if mockQueue.Len() != 1 { + t.Errorf("Expected queue length of 1, but got %d", mockQueue.Len()) + } + + // Retrieve the item from the queue and verify its contents + item, _ := mockQueue.Get() + informerCacheObj, ok := item.(InformerCacheObj) + if !ok { + t.Errorf("Expected item of type InformerCacheObj, but got %T", item) + } + + if informerCacheObj.eventType != Delete { + t.Errorf("Expected event type 'Delete', but got %s", informerCacheObj.eventType) + } + + if informerCacheObj.key == "" { + t.Error("Expected a non-empty key") + } + + if informerCacheObj.obj != mockObj { + t.Error("Expected obj to be the same as the passed object") + } +} + +func TestController_ProcessNextItem(t *testing.T) { + // Arrange + mockQueue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) + mockDelegator := &MockDelegator{} + controller := &Controller{ + name: "test-controller", + cluster: "test-cluster", + delegator: mockDelegator, + queue: mockQueue, + } + + // Add an item to the queue + mockQueue.Add("test-key") + + // Act + controller.processNextItem() +} diff --git a/admiral/pkg/controller/admiral/outlierdetection_test.go b/admiral/pkg/controller/admiral/outlierdetection_test.go index ca2fd25a..96c47e1e 100644 --- a/admiral/pkg/controller/admiral/outlierdetection_test.go +++ b/admiral/pkg/controller/admiral/outlierdetection_test.go @@ -2,6 +2,7 @@ package admiral import ( "context" + "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned" "sync" "testing" @@ -275,3 +276,104 @@ func TestLogValueOfAdmiralIoIgnore(t *testing.T) { o.LogValueOfAdmiralIoIgnore(od) // No error should occur } + +func TestOutlierDetectionController_DoesGenerationMatch(t *testing.T) { + // Test case 1: obj1 and obj2 are not OutlierDetection objects + o := &OutlierDetectionController{} + match, err := o.DoesGenerationMatch(nil, "not an outlier detection", "not an outlier detection") + assert.False(t, match) + assert.Nil(t, err) + + // Test case 2: obj1 and obj2 are OutlierDetection objects with different generations + o = &OutlierDetectionController{} + obj1 := &v1.OutlierDetection{ObjectMeta: metaV1.ObjectMeta{Generation: 1}} + obj2 := &v1.OutlierDetection{ObjectMeta: metaV1.ObjectMeta{Generation: 2}} + match, err = o.DoesGenerationMatch(nil, obj1, obj2) + assert.False(t, match) + assert.Nil(t, err) +} + +func TestOutlierDetectionController_IsOnlyReplicaCountChanged(t *testing.T) { + // Test case 1: obj1 and obj2 are not OutlierDetection objects + o := &OutlierDetectionController{} + match, err := o.IsOnlyReplicaCountChanged(nil, "not an outlier detection", "not an outlier detection") + assert.False(t, match) + assert.Nil(t, err) + + // Test case 2: obj1 and obj2 are OutlierDetection objects with different replica counts + o = &OutlierDetectionController{} + obj1 := &v1.OutlierDetection{ObjectMeta: metaV1.ObjectMeta{Generation: 1}} + obj2 := &v1.OutlierDetection{ObjectMeta: metaV1.ObjectMeta{Generation: 1}} + match, err = o.IsOnlyReplicaCountChanged(nil, obj1, obj2) + assert.False(t, match) + assert.Nil(t, err) +} + +func TestOdCache_UpdateODProcessingStatus(t *testing.T) { + p := common.AdmiralParams{} + p.LabelSet = &common.LabelSet{} + common.InitializeConfig(p) + + oc := &odCache{ + cache: make(map[string]map[string]map[string]*odItems), + mutex: &sync.RWMutex{}, + } + + od1 := &v1.OutlierDetection{ + ObjectMeta: metaV1.ObjectMeta{ + Name: "od1", + Namespace: "ns1", + }, + Status: v1.OutlierDetectionStatus{ + State: common.NotProcessed, + }, + Spec: model.OutlierDetection{ + Selector: map[string]string{"identity": "payments", "env": "e2e"}, + }, + } + + oc.Put(od1) + oc.UpdateODProcessingStatus(od1, common.Processed) + + assert.Equal(t, oc.GetODProcessStatus(od1), common.Processed) +} + +func TestOdCache_GetProcessStatus(t *testing.T) { + p := common.AdmiralParams{} + p.LabelSet = &common.LabelSet{} + common.InitializeConfig(p) + + oc1 := &odCache{ + cache: make(map[string]map[string]map[string]*odItems), + mutex: &sync.RWMutex{}, + } + + od1 := &v1.OutlierDetection{ + ObjectMeta: metaV1.ObjectMeta{ + Name: "od1", + Namespace: "ns1", + }, + Status: v1.OutlierDetectionStatus{ + State: common.NotProcessed, + }, + Spec: model.OutlierDetection{ + Selector: map[string]string{"identity": "payments", "env": "e2e"}, + }, + } + + oc1.Put(od1) + assert.Equal(t, oc1.GetProcessStatus(od1), common.ProcessingInProgress) +} + +func TestOutlierDetectionController_Get2(t *testing.T) { + p := common.AdmiralParams{} + p.LabelSet = &common.LabelSet{} + common.InitializeConfig(p) + + odc := &OutlierDetectionController{ + cache: &odCache{cache: make(map[string]map[string]map[string]*odItems), mutex: &sync.RWMutex{}}, + crdclient: &versioned.Clientset{}, + } + + odc.Get(context.Background(), false, "") +} diff --git a/admiral/pkg/controller/admiral/rollouts_test.go b/admiral/pkg/controller/admiral/rollouts_test.go index 263908db..e4b0df05 100644 --- a/admiral/pkg/controller/admiral/rollouts_test.go +++ b/admiral/pkg/controller/admiral/rollouts_test.go @@ -1097,3 +1097,93 @@ func TestRolloutLogValueOfAdmiralIoIgnore(t *testing.T) { d.LogValueOfAdmiralIoIgnore(&argo.Rollout{ObjectMeta: metav1.ObjectMeta{Namespace: "test-ns", Annotations: map[string]string{common.AdmiralIgnoreAnnotation: "true"}}}) // No error should occur } + +func TestRolloutCache_Put(t *testing.T) { + rc := &rolloutCache{ + cache: make(map[string]*RolloutClusterEntry), + mutex: &sync.Mutex{}, + } + + rce := &RolloutClusterEntry{ + Identity: "test", + } + + rc.Put(rce) + + assert.Equal(t, 1, len(rc.cache)) + assert.Equal(t, rce, rc.cache["test"]) +} + +func TestRolloutCache_Get(t *testing.T) { + rce := &RolloutClusterEntry{ + Identity: "test", + Rollouts: map[string]*RolloutItem{ + "dev": { + Rollout: &argo.Rollout{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + }, + }, + }, + } + + rc := &rolloutCache{ + cache: map[string]*RolloutClusterEntry{"rce": rce}, + mutex: &sync.Mutex{}, + } + + actual := rc.Get("rce", "dev") + + assert.Equal(t, "test", actual.ObjectMeta.Name) + assert.Nil(t, rc.Get("rce", "prd")) +} + +func TestRolloutCache_List(t *testing.T) { + rce := &RolloutClusterEntry{ + Identity: "test", + Rollouts: map[string]*RolloutItem{ + "dev": { + Rollout: &argo.Rollout{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + }, + }, + }, + } + + rc := &rolloutCache{ + cache: map[string]*RolloutClusterEntry{"rce": rce}, + mutex: &sync.Mutex{}, + } + + actual := rc.List() + + assert.Equal(t, 1, len(actual)) +} + +func TestRolloutCache_Delete(t *testing.T) { + rce := &RolloutClusterEntry{ + Identity: "test", + Rollouts: map[string]*RolloutItem{ + "dev": { + Rollout: &argo.Rollout{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + }, + }, + }, + } + + rc := &rolloutCache{ + cache: map[string]*RolloutClusterEntry{"test": rce}, + mutex: &sync.Mutex{}, + } + + rc.Put(rce) + rc.Delete(rce) + + assert.Equal(t, 0, len(rc.cache)) +} diff --git a/admiral/pkg/controller/common/config_test.go b/admiral/pkg/controller/common/config_test.go index 5b31c701..0b7b2b9c 100644 --- a/admiral/pkg/controller/common/config_test.go +++ b/admiral/pkg/controller/common/config_test.go @@ -22,31 +22,64 @@ func setupForConfigTests() { AdmiralCRDIdentityLabel: "identity", IdentityPartitionKey: "admiral.io/identityPartition", }, - EnableSAN: true, - SANPrefix: "prefix", - HostnameSuffix: "mesh", - SyncNamespace: "admiral-sync", - SecretFilterTags: "admiral/sync", - CacheReconcileDuration: time.Minute, - ClusterRegistriesNamespace: "default", - DependenciesNamespace: "default", - Profile: "default", - WorkloadSidecarName: "default", - WorkloadSidecarUpdate: "disabled", - MetricsEnabled: true, - DeprecatedEnvoyFilterVersion: "1.10,1.17", - EnvoyFilterVersion: "1.10,1.13,1.17", - CartographerFeatures: map[string]string{"throttle_filter_gen": "disabled"}, - DisableIPGeneration: false, - EnableSWAwareNSCaches: true, - ExportToIdentityList: []string{"*"}, - ExportToMaxNamespaces: 35, - AdmiralOperatorMode: false, - OperatorSyncNamespace: "admiral-sync", - OperatorSecretFilterTags: "admiral/syncoperator", - DiscoveryClustersForNumaflow: make([]string, 0), - ClientDiscoveryClustersForJobs: make([]string, 0), - EnableClientDiscovery: true, + EnableSAN: true, + SANPrefix: "prefix", + HostnameSuffix: "mesh", + SyncNamespace: "admiral-sync", + SecretFilterTags: "admiral/sync", + CacheReconcileDuration: time.Minute, + ClusterRegistriesNamespace: "default", + DependenciesNamespace: "default", + Profile: "default", + WorkloadSidecarName: "default", + WorkloadSidecarUpdate: "disabled", + MetricsEnabled: true, + DeprecatedEnvoyFilterVersion: "1.10,1.17", + EnvoyFilterVersion: "1.10,1.13,1.17", + CartographerFeatures: map[string]string{"throttle_filter_gen": "disabled"}, + DisableIPGeneration: false, + EnableSWAwareNSCaches: true, + ExportToIdentityList: []string{"*"}, + ExportToMaxNamespaces: 35, + AdmiralOperatorMode: false, + OperatorSyncNamespace: "admiral-sync", + OperatorIdentityValue: "operator", + ShardIdentityValue: "shard", + OperatorSecretFilterTags: "admiral/syncoperator", + DiscoveryClustersForNumaflow: make([]string, 0), + ClientDiscoveryClustersForJobs: make([]string, 0), + EnableClientDiscovery: true, + ArgoRolloutsEnabled: true, + EnvoyFilterAdditionalConfig: "additional", + EnableRoutingPolicy: true, + HAMode: "true", + EnableDiffCheck: true, + EnableProxyEnvoyFilter: true, + EnableDependencyProcessing: true, + SeAddressConfigmap: "configmap", + DeploymentOrRolloutWorkerConcurrency: 10, + DependentClusterWorkerConcurrency: 10, + DependencyWarmupMultiplier: 10, + MaxRequestsPerConnection: 10, + EnableClientConnectionConfigProcessing: true, + DisableDefaultAutomaticFailover: true, + EnableServiceEntryCache: true, + EnableDestinationRuleCache: true, + AlphaIdentityList: []string{"identity1", "identity2"}, + EnableActivePassive: true, + ClientInitiatedProcessingEnabled: true, + IngressLBPolicy: "policy", + IngressVSExportToNamespaces: []string{"namespace"}, + VSRoutingGateways: []string{"gateway"}, + EnableGenerationCheck: true, + EnableIsOnlyReplicaCountChangedCheck: true, + PreventSplitBrain: true, + AdmiralStateSyncerMode: true, + DefaultWarmupDurationSecs: 10, + AdmiralConfig: "someConfig", + AdditionalEndpointSuffixes: []string{"suffix1", "suffix2"}, + AdditionalEndpointLabelFilters: []string{"label1", "label2"}, + EnableWorkloadDataStorage: true, } ResetSync() initHappened = true @@ -327,6 +360,134 @@ func TestConfigManagement(t *testing.T) { if len(GetClientDiscoveryClustersForNumaflow()) != 0 { t.Errorf("clusters for numaflow client discovery mismatch, expected 0, got %v", GetClientDiscoveryClustersForNumaflow()) } + + if !GetArgoRolloutsEnabled() { + t.Errorf("Argo rollouts enabled mismatch, expected true, got %v", GetArgoRolloutsEnabled()) + } + + if GetEnvoyFilterAdditionalConfig() != "additional" { + t.Errorf("Envoy filter additional config mismatch, expected additional, got %v", GetEnvoyFilterAdditionalConfig()) + } + + if !GetEnableRoutingPolicy() { + t.Errorf("Enable routing policy mismatch, expected true, got %v", GetEnableRoutingPolicy()) + } + + if GetHAMode() != "true" { + t.Errorf("HA mode mismatch, expected true, got %v", GetHAMode()) + } + + if !GetDiffCheckEnabled() { + t.Errorf("Diff check enabled mismatch, expected true, got %v", GetDiffCheckEnabled()) + } + + if !IsProxyEnvoyFilterEnabled() { + t.Errorf("Proxy Envoy Filter enabled mismatch, expected true, got %v", IsProxyEnvoyFilterEnabled()) + } + + if !IsDependencyProcessingEnabled() { + t.Errorf("Dependency processing enabled mismatch, expected true, got %v", IsDependencyProcessingEnabled()) + } + + if GetSeAddressConfigMap() != "configmap" { + t.Errorf("SE address config map mismatch, expected configmap, got %v", GetSeAddressConfigMap()) + } + + if DeploymentOrRolloutWorkerConcurrency() != 10 { + t.Errorf("Deployment or rollout worker concurrency mismatch, expected 10, got %v", DeploymentOrRolloutWorkerConcurrency()) + } + + if DependentClusterWorkerConcurrency() != 10 { + t.Errorf("Dependent cluster worker concurrency mismatch, expected 10, got %v", DependentClusterWorkerConcurrency()) + } + + if DependencyWarmupMultiplier() != 10 { + t.Errorf("Dependency warmup multiplier mismatch, expected 10, got %v", DependencyWarmupMultiplier()) + } + + if MaxRequestsPerConnection() != 10 { + t.Errorf("Max requests per connection mismatch, expected 10, got %v", MaxRequestsPerConnection()) + } + + if !IsClientConnectionConfigProcessingEnabled() { + t.Errorf("Client connection config processing enabled mismatch, expected true, got %v", IsClientConnectionConfigProcessingEnabled()) + } + + if DisableDefaultAutomaticFailover() != true { + t.Errorf("Disable default automatic failover mismatch, expected true, got %v", DisableDefaultAutomaticFailover()) + } + + if !EnableServiceEntryCache() { + t.Errorf("Enable service entry cache mismatch, expected true, got %v", EnableServiceEntryCache()) + } + + if !EnableDestinationRuleCache() { + t.Errorf("Enable destination rule cache mismatch, expected true, got %v", EnableDestinationRuleCache()) + } + + if len(AlphaIdentityList()) != 2 { + t.Errorf("Alpha identity list mismatch, expected 2, got %v", len(AlphaIdentityList())) + } + + if AlphaIdentityList()[0] != "identity1" && AlphaIdentityList()[1] != "identity2" { + t.Errorf("Alpha identity list mismatch, expected identity1 and identity2, got %v and %v", AlphaIdentityList()[0], AlphaIdentityList()[1]) + } + + if !IsOnlyReplicaCountChanged() { + t.Errorf("Is only replica count changed mismatch, expected true, got %v", IsOnlyReplicaCountChanged()) + } + + if !EnableActivePassive() { + t.Errorf("Enable active passive mismatch, expected true, got %v", EnableActivePassive()) + } + + if PreventSplitBrain() != true { + t.Errorf("Prevent split brain mismatch, expected true, got %v", PreventSplitBrain()) + } + + if IsAdmiralStateSyncerMode() != true { + t.Errorf("Admiral state syncer mode mismatch, expected true, got %v", IsAdmiralStateSyncerMode()) + } + + if GetDefaultWarmupDurationSecs() != int64(10) { + t.Errorf("Default warmup duration mismatch, expected 10, got %v", GetDefaultWarmupDurationSecs()) + } + + if !DoGenerationCheck() { + t.Errorf("Do generation check mismatch, expected true, got %v", DoGenerationCheck()) + } + + if GetVSRoutingGateways()[0] != "gateway" { + t.Errorf("VS routing gateways mismatch, expected gateway, got %v", GetVSRoutingGateways()) + } + + if GetIngressVSExportToNamespace()[0] != "namespace" { + t.Errorf("Ingress VS export to namespace mismatch, expected namespace, got %v", GetIngressVSExportToNamespace()[0]) + } + + if GetIngressLBPolicy() != "policy" { + t.Errorf("Ingress LB policy mismatch, expected policy, got %v", GetIngressLBPolicy()) + } + + if ClientInitiatedProcessingEnabled() != true { + t.Errorf("Client initiated processing enabled mismatch, expected true, got %v", ClientInitiatedProcessingEnabled()) + } + + if GetAdmiralConfigPath() != "someConfig" { + t.Errorf("Admiral config path mismatch, expected someConfig, got %v", GetAdmiralConfigPath()) + } + + if GetAdditionalEndpointSuffixes()[0] != "suffix1" && GetAdditionalEndpointSuffixes()[1] != "suffix2" { + t.Errorf("Additional endpoint suffixes mismatch, expected [suffix1, suffix2], got %v", GetAdditionalEndpointSuffixes()) + } + + if GetAdditionalEndpointLabelFilters()[0] != "label1" && GetAdditionalEndpointLabelFilters()[1] != "label2" { + t.Errorf("Additional endpoint label filters mismatch, expected [label1, label2], got %v", GetAdditionalEndpointLabelFilters()) + } + + if !GetEnableWorkloadDataStorage() { + t.Errorf("Enable workload data storage mismatch, expected true, got %v", GetEnableWorkloadDataStorage()) + } } func TestGetCRDIdentityLabelWithCRDIdentity(t *testing.T) { @@ -340,6 +501,38 @@ func TestGetCRDIdentityLabelWithCRDIdentity(t *testing.T) { admiralParams.LabelSet.AdmiralCRDIdentityLabel = backOldIdentity } +func TestSetArgoRolloutsEnabled(t *testing.T) { + p := AdmiralParams{} + p.ArgoRolloutsEnabled = true + ResetSync() + InitializeConfig(p) + + SetArgoRolloutsEnabled(true) + assert.Equal(t, true, GetArgoRolloutsEnabled()) +} + +func TestSetCartographerFeature(t *testing.T) { + p := AdmiralParams{} + ResetSync() + InitializeConfig(p) + + SetCartographerFeature("feature", "enabled") + assert.Equal(t, "enabled", wrapper.params.CartographerFeatures["feature"]) +} + +func TestGetResyncIntervals(t *testing.T) { + p := AdmiralParams{} + p.CacheReconcileDuration = time.Minute + p.SeAndDrCacheReconcileDuration = time.Minute + ResetSync() + InitializeConfig(p) + + actual := GetResyncIntervals() + + assert.Equal(t, time.Minute, actual.SeAndDrReconcileInterval) + assert.Equal(t, time.Minute, actual.UniversalReconcileInterval) +} + //func TestGetCRDIdentityLabelWithLabel(t *testing.T) { // // admiralParams := GetAdmiralParams() diff --git a/admiral/pkg/controller/common/metrics_test.go b/admiral/pkg/controller/common/metrics_test.go index 18e9b602..56ca2a0b 100644 --- a/admiral/pkg/controller/common/metrics_test.go +++ b/admiral/pkg/controller/common/metrics_test.go @@ -38,3 +38,8 @@ func TestNewGaugeFrom(t *testing.T) { }) } } + +func TestNoop_Set(t *testing.T) { + n := Noop{} + n.Set(1) +} diff --git a/admiral/pkg/controller/common/types_test.go b/admiral/pkg/controller/common/types_test.go index 20bc60b6..b586ed42 100644 --- a/admiral/pkg/controller/common/types_test.go +++ b/admiral/pkg/controller/common/types_test.go @@ -279,3 +279,139 @@ func TestSidecarEgressRange(t *testing.T) { assert.Equal(t, 3, numOfIter) } + +func TestMapOfMapOfMapOfMaps(t *testing.T) { + mapOfMapOfMapOfMaps := NewMapOfMapOfMapOfMaps() + mapOfMapOfMapOfMaps.Put("pkey1", "skey1", "tkey1", "key", "value") + + mapOfMapOfMaps1 := mapOfMapOfMapOfMaps.Get("pkey1") + if mapOfMapOfMaps1 == nil || mapOfMapOfMapOfMaps.Len() != 1 || mapOfMapOfMaps1.Get("skey1").Get("tkey1").Get("key") != "value" { + t.Fail() + } +} + +func TestPutMapOfMapsOfMaps(t *testing.T) { + mapOfMapOfMapOfMaps := NewMapOfMapOfMapOfMaps() + mapOfMapOfMaps := NewMapOfMapOfMaps() + mapOfMapOfMaps.Put("pkey1", "skey1", "key", "value") + + mapOfMapOfMapOfMaps.PutMapofMapsofMaps("tkey1", mapOfMapOfMaps) + if mapOfMapOfMaps.Get("pkey1").Get("skey1").Get("key") != "value" { + t.Fail() + } +} + +func TestMap_CheckIfPresent(t *testing.T) { + m := NewMap() + m.Put("key1", "value") + if !m.CheckIfPresent("key1") { + t.Fail() + } + if m.CheckIfPresent("key2") { + t.Fail() + } +} + +func TestMap_Len(t *testing.T) { + m := NewMap() + m.Put("key1", "value") + m.Put("key2", "value") + if m.Len() != 2 { + t.Fail() + } +} + +func TestMap_Copy(t *testing.T) { + m := NewMap() + m.Put("key1", "value1") + m.Put("key2", "value2") + mCopy := m.Copy() + if mCopy["key1"] != "value1" || mCopy["key2"] != "value2" { + t.Fail() + } +} + +func TestMap_CopyJustValues(t *testing.T) { + m := NewMap() + m.Put("key1", "value") + m.Put("key2", "value") + mCopy := m.CopyJustValues() + if len(mCopy) != 2 { + t.Fail() + } + if mCopy[0] != "value" || mCopy[1] != "value" { + t.Fail() + } +} + +func TestMapOfMaps_PutMap(t *testing.T) { + m := NewMap() + m.Put("key1", "value") + mom := NewMapOfMaps() + mom.PutMap("pkey", m) + + if mom.Get("pkey").Get("key1") != "value" { + t.Fail() + } +} + +func TestMapOfMaps_Len(t *testing.T) { + mom := NewMapOfMaps() + m := NewMap() + m.Put("key1", "value") + mom.PutMap("pkey", m) + if mom.Len() != 1 { + t.Fail() + } +} + +func TestMapOfMaps_GetKeys(t *testing.T) { + mom := NewMapOfMaps() + m := NewMap() + m.Put("key1", "value") + mom.PutMap("pkey", m) + keys := mom.GetKeys() + if len(keys) != 1 || keys[0] != "pkey" { + t.Fail() + } +} + +func TestSidecarEgressMap_Delete(t *testing.T) { + sem := NewSidecarEgressMap() + sem.Put("pkey1", "skey1", "fqdn", map[string]string{"pkey2": "pkey2"}) + sem.Delete("pkey1") + if sem.Get("pkey1") != nil { + t.Fail() + } +} + +func TestProxyFilterConfig_String(t *testing.T) { + pfc := ProxyFilterConfig{ + ConfigFile: "configFile", + } + expected := "{ConfigFile: configFile, DNSTimeoutMs:0, DNSRetries: 0, GatewayAssetAlias: , Services: []}" + if pfc.String() != expected { + t.Errorf("expected=%v, got=%v", expected, pfc.String()) + } +} + +func TestProxiedServiceInfo_String(t *testing.T) { + psi := ProxiedServiceInfo{ + Identity: "identity", + ProxyAlias: "proxyAlias", + } + expected := "{Identity:identity, Enviroments: []}" + if psi.String() != expected { + t.Errorf("expected=%v, got=%v", expected, psi.String()) + } +} + +func TestProxiedServiceEnvironment_String(t *testing.T) { + pse := ProxiedServiceEnvironment{ + Environment: "environment", + } + expected := "{Environment:environment, DnsName: , CNames: []}" + if pse.String() != expected { + t.Errorf("expected=%v, got=%v", expected, pse.String()) + } +} diff --git a/admiral/pkg/controller/secret/secretcontroller_test.go b/admiral/pkg/controller/secret/secretcontroller_test.go index 2f19cbc5..761af1f5 100644 --- a/admiral/pkg/controller/secret/secretcontroller_test.go +++ b/admiral/pkg/controller/secret/secretcontroller_test.go @@ -17,6 +17,7 @@ package secret import ( "context" "fmt" + "k8s.io/client-go/kubernetes" "reflect" "sync" "testing" @@ -547,3 +548,88 @@ func TestRemoveClusterFromShard(t *testing.T) { }) } } + +func TestNewClustersStore(t *testing.T) { + t.Parallel() + store := newClustersStore() + assert.NotNil(t, store) +} + +// Initializes a new Controller with default secret resolver when admiralProfile is "default" +func TestNewControllerWithDefaultProfile(t *testing.T) { + kubeclientset := &kubernetes.Clientset{} + namespace := "test-namespace" + cs := &ClusterStore{} + addCallback := func(config *rest.Config, dataKey string, resyncPeriod util.ResyncIntervals) error { return nil } + updateCallback := func(config *rest.Config, dataKey string, resyncPeriod util.ResyncIntervals) error { return nil } + removeCallback := func(dataKey string) error { return nil } + admiralProfile := common.AdmiralProfileDefault + secretResolverConfig := "" + + controller := NewController(kubeclientset, namespace, cs, addCallback, updateCallback, removeCallback, admiralProfile, secretResolverConfig) + + if controller == nil { + t.Fatalf("Expected controller to be initialized, got nil") + } + + if controller.secretResolver == nil { + t.Fatalf("Expected secret resolver to be initialized, got nil") + } +} + +// Handles unrecognized admiralProfile by logging an error and returning nil +func TestNewControllerWithUnrecognizedProfile(t *testing.T) { + kubeclientset := &kubernetes.Clientset{} + namespace := "test-namespace" + cs := &ClusterStore{} + addCallback := func(config *rest.Config, dataKey string, resyncPeriod util.ResyncIntervals) error { return nil } + updateCallback := func(config *rest.Config, dataKey string, resyncPeriod util.ResyncIntervals) error { return nil } + removeCallback := func(dataKey string) error { return nil } + admiralProfile := "unknown-profile" + secretResolverConfig := "" + + controller := NewController(kubeclientset, namespace, cs, addCallback, updateCallback, removeCallback, admiralProfile, secretResolverConfig) + + if controller != nil { + t.Fatalf("Expected controller to be nil for unrecognized profile, got non-nil") + } +} + +// Successfully initializes a new Controller with valid parameters +func TestStartSecretControllerInitialization(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mockK8s := &kubernetes.Clientset{} + addCallback := func(config *rest.Config, dataKey string, resyncPeriod util.ResyncIntervals) error { return nil } + updateCallback := func(config *rest.Config, dataKey string, resyncPeriod util.ResyncIntervals) error { return nil } + removeCallback := func(dataKey string) error { return nil } + namespace := "default" + admiralProfile := common.AdmiralProfileDefault + secretResolverConfig := "" + + controller, err := StartSecretController(ctx, mockK8s, addCallback, updateCallback, removeCallback, namespace, admiralProfile, secretResolverConfig) + + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + + if controller == nil { + t.Fatalf("Expected controller to be initialized, got nil") + } +} + +// Returns error when kubeConfig is empty +func TestCreateRemoteClusterWithEmptyKubeConfig(t *testing.T) { + // Arrange + controller := &Controller{} + + // Act + remoteCluster, restConfig, err := controller.createRemoteCluster([]byte(""), "secretName", "clusterID", "namespace") + + // Assert + assert.Nil(t, remoteCluster) + assert.Nil(t, restConfig) + assert.Error(t, err) + assert.Equal(t, "kubeconfig is empty", err.Error()) +} diff --git a/admiral/pkg/controller/util/util_test.go b/admiral/pkg/controller/util/util_test.go index 7e9afcd9..28089ed6 100644 --- a/admiral/pkg/controller/util/util_test.go +++ b/admiral/pkg/controller/util/util_test.go @@ -2,8 +2,11 @@ package util import ( "bytes" + "github.com/sirupsen/logrus/hooks/test" "reflect" + "strings" "testing" + "time" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" @@ -149,3 +152,49 @@ func TestLogElapsedTime(t *testing.T) { assert.Contains(t, buf.String(), "op=test_op identity=test_identity env=test_env cluster=test_clusterId txTime=") log.SetOutput(oldOut) } + +// Logs the elapsed time in milliseconds after execution +func TestLogElapsedTimeController(t *testing.T) { + logger := log.NewEntry(log.New()) + logMessage := "Test operation" + + logFunc := LogElapsedTimeController(logger, logMessage) + + // Simulate some operation + time.Sleep(100 * time.Millisecond) + + // Capture the log output + hook := test.NewLocal(logger.Logger) + logFunc() + + if len(hook.Entries) != 1 { + t.Fatalf("Expected one log entry, got %d", len(hook.Entries)) + } + + entry := hook.LastEntry() + if !strings.Contains(entry.Message, logMessage) { + t.Errorf("Expected log message to contain %q, got %q", logMessage, entry.Message) + } + + if !strings.Contains(entry.Message, "txTime=") { + t.Errorf("Expected log message to contain elapsed time, got %q", entry.Message) + } +} + +// Function returns a closure that logs elapsed time +func TestLogElapsedTimeForTaskClosure(t *testing.T) { + logger := log.NewEntry(log.New()) + op := "operation" + name := "taskName" + namespace := "default" + cluster := "cluster1" + message := "test message" + + logFunc := LogElapsedTimeForTask(logger, op, name, namespace, cluster, message) + + if logFunc == nil { + t.Error("Expected a non-nil closure function") + } + + logFunc() +} diff --git a/admiral/pkg/registry/clusterIdentity_test.go b/admiral/pkg/registry/clusterIdentity_test.go index b2a276fb..1db7aa8f 100644 --- a/admiral/pkg/registry/clusterIdentity_test.go +++ b/admiral/pkg/registry/clusterIdentity_test.go @@ -1 +1,89 @@ package registry + +import "testing" + +// Creates a ClusterIdentity with given name and sourceIdentity values +func TestNewClusterIdentityWithValidInputs(t *testing.T) { + name := "test-identity" + sourceIdentity := true + + identity := NewClusterIdentity(name, sourceIdentity) + + if identity.IdentityName != name { + t.Errorf("expected IdentityName to be %s, got %s", name, identity.IdentityName) + } + + if identity.SourceIdentity != sourceIdentity { + t.Errorf("expected SourceIdentity to be %v, got %v", sourceIdentity, identity.SourceIdentity) + } +} + +func TestNewClusterStore(t *testing.T) { + store := newClusterStore() + + if store.cache == nil { + t.Error("expected cache to be initialized") + } + + if store.mutex == nil { + t.Error("expected mutex to be initialized") + } +} + +func TestNewClusterIdentityStoreHandler(t *testing.T) { + handler := NewClusterIdentityStoreHandler() + + if handler.store.cache == nil { + t.Error("expected cache to be initialized") + } + + if handler.store.mutex == nil { + t.Error("expected mutex to be initialized") + } +} + +func TestAddUpdateIdentityToCluster(t *testing.T) { + handler := NewClusterIdentityStoreHandler() + identity := NewClusterIdentity("test-identity", true) + clusterName := "test-cluster" + + err := handler.AddUpdateIdentityToCluster(identity, clusterName) + + if err != nil { + t.Errorf("expected no error, got %v", err) + } +} + +func TestRemoveIdentityToCluster(t *testing.T) { + handler := NewClusterIdentityStoreHandler() + identity := NewClusterIdentity("test-identity", true) + clusterName := "test-cluster" + + err := handler.RemoveIdentityToCluster(identity, clusterName) + + if err != nil { + t.Errorf("expected no error, got %v", err) + } +} + +func TestGetAllIdentitiesForCluster(t *testing.T) { + handler := NewClusterIdentityStoreHandler() + identity := NewClusterIdentity("test-identity", true) + clusterName := "test-cluster" + + err := handler.AddUpdateIdentityToCluster(identity, clusterName) + + if err != nil { + t.Errorf("expected no error, got %v", err) + } + + cache, err := handler.GetAllIdentitiesForCluster(clusterName) + + if err != nil { + t.Errorf("expected no error, got %v", err) + } + + if cache.Store == nil { + t.Error("expected cache to be initialized") + } +} diff --git a/admiral/pkg/registry/clusterShard_test.go b/admiral/pkg/registry/clusterShard_test.go index b2a276fb..880913f9 100644 --- a/admiral/pkg/registry/clusterShard_test.go +++ b/admiral/pkg/registry/clusterShard_test.go @@ -1 +1,47 @@ package registry + +import "testing" + +func TestNewClusterShardStoreHandler(t *testing.T) { + handler := NewClusterShardStoreHandler() + + if handler == nil { + t.Error("expected handler to be initialized") + } +} + +func TestAddClusterToShard(t *testing.T) { + handler := NewClusterShardStoreHandler() + cluster := "test-cluster" + shard := "test-shard" + + err := handler.AddClusterToShard(cluster, shard) + + if err != nil { + t.Errorf("expected no error, got %v", err) + } +} + +func TestRemoveClusterFromShard(t *testing.T) { + handler := NewClusterShardStoreHandler() + cluster := "test-cluster" + shard := "test-shard" + + err := handler.RemoveClusterFromShard(cluster, shard) + + if err != nil { + t.Errorf("expected no error, got %v", err) + } +} + +func TestAddAllClustersToShard(t *testing.T) { + handler := NewClusterShardStoreHandler() + clusters := []string{"test-cluster1", "test-cluster2"} + shard := "test-shard" + + err := handler.AddAllClustersToShard(clusters, shard) + + if err != nil { + t.Errorf("expected no error, got %v", err) + } +} diff --git a/admiral/pkg/registry/rolloutConfig_parser.go b/admiral/pkg/registry/rolloutConfig_parser.go deleted file mode 100644 index 0a177d51..00000000 --- a/admiral/pkg/registry/rolloutConfig_parser.go +++ /dev/null @@ -1,15 +0,0 @@ -package registry - -func NewRolloutConfigSyncer(cacheHandler IdentityConfigCache) *rolloutConfigSyncer { - return &rolloutConfigSyncer{ - cacheHandler: cacheHandler, - } -} - -type rolloutConfigSyncer struct { - cacheHandler IdentityConfigCache -} - -func (syncer *rolloutConfigSyncer) Sync(config interface{}) error { - return nil -} diff --git a/admiral/pkg/test/eventually.go b/admiral/pkg/test/eventually.go deleted file mode 100644 index c0ccd9ea..00000000 --- a/admiral/pkg/test/eventually.go +++ /dev/null @@ -1,61 +0,0 @@ -package test - -import ( - "testing" - "time" - - "github.com/cenkalti/backoff" -) - -// A Condition is a function that returns true when a test condition is satisfied. -type Condition func() bool - -// Eventually polls cond until it completes (returns true) or times out (resulting in a test failure). -func Eventually(t *testing.T, name string, cond Condition) { - t.Helper() - EventualOpts{backoff.NewExponentialBackOff()}.Eventually(t, name, cond) -} - -// EventualOpts defines a polling strategy for operations that must eventually succeed. A new EventualOpts must be provided -// for each invocation of Eventually (or call Reset on a previously completed set of options). -type EventualOpts struct { - strategy *backoff.ExponentialBackOff -} - -// NewEventualOpts constructs an EventualOpts instance with the provided polling interval and deadline. EventualOpts will -// perform randomized exponential backoff using the starting interval, and will stop polling (and therefore fail) after -// deadline time as elapsed from calling Eventually. -// -// Note: we always backoff with a randomization of 0.5 (50%), a multiplier of 1.5, and a max interval of one minute. -func NewEventualOpts(interval, deadline time.Duration) *EventualOpts { - strategy := backoff.NewExponentialBackOff() - strategy.InitialInterval = interval - strategy.MaxElapsedTime = deadline - return &EventualOpts{strategy} -} - -// Eventually polls cond until it succeeds (returns true) or we exceed the deadline. Eventually performs backoff while -// polling cond. -// -// name is printed as part of the test failure message when we exceed the deadline to help identify the test case failing. -// cond does not need to be thread-safe: it is only called from the current goroutine. cond itself can also fail the test early using t.Fatal. -func (e EventualOpts) Eventually(t *testing.T, name string, cond Condition) { - t.Helper() - - // Check once before we start polling. - if cond() { - return - } - - // We didn't get a happy fast-path, so set up timers and wait. - // The backoff's ticker will close the channel after MaxElapsedTime, so we don't need to worry about a timeout. - poll := backoff.NewTicker(e.strategy).C - for { - _, cont := <-poll - if cond() { - return - } else if !cont { - t.Fatalf("timed out waiting for condition %q to complete", name) - } - } -}