From f4066c295bec647a94985ec9fc8d32ae799667bd Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 16:30:24 -0700
Subject: [PATCH 001/235] add admiral/crd/clientconnectionconfig.yaml
---
admiral/crd/clientconnectionconfig.yaml | 118 ++++++++++++++++++++++++
1 file changed, 118 insertions(+)
create mode 100644 admiral/crd/clientconnectionconfig.yaml
diff --git a/admiral/crd/clientconnectionconfig.yaml b/admiral/crd/clientconnectionconfig.yaml
new file mode 100644
index 00000000..00966083
--- /dev/null
+++ b/admiral/crd/clientconnectionconfig.yaml
@@ -0,0 +1,118 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: clientconnectionconfigs.admiral.io
+spec:
+ group: admiral.io
+ names:
+ kind: ClientConnectionConfig
+ listKind: ClientConnectionConfigList
+ plural: clientconnectionconfigs
+ shortNames:
+ - ccc
+ singular: clientconnectionconfig
+ scope: Namespaced
+ versions:
+ - name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ connectionPool:
+ properties:
+ http:
+ description: HTTP connection pool settings
+ properties:
+ h2UpgradePolicy:
+ format: int32
+ type: integer
+ http1MaxPendingRequests:
+ description: Maximum number of pending HTTP requests to a
+ destination.
+ format: int32
+ type: integer
+ http2MaxRequests:
+ description: Maximum number of requests to a backend
+ format: int32
+ type: integer
+ idleTimeout:
+ description: The idle timeout for upstream connection
+ type: string
+ maxRequestsPerConnection:
+ description: Maximum number of requests per connection to
+ a backend.
+ format: int32
+ type: integer
+ maxRetries:
+ format: int32
+ type: integer
+ useClientProtocol:
+ type: boolean
+ type: object
+ tcp:
+ properties:
+ connectTimeout:
+ description: TCP connection timeout.
+ type: string
+ maxConnectionDuration:
+ description: The maximum duration of a connection
+ type: string
+ maxConnections:
+ description: Maximum number of HTTP1 /TCP connections to a
+ destination host.
+ format: int32
+ type: integer
+ tcpKeepalive:
+ properties:
+ interval:
+ description: The time duration between keep-alive probes.
+ type: string
+ probes:
+ format: int32
+ type: integer
+ time:
+ type: string
+ type: object
+ type: object
+ type: object
+ tunnel:
+ properties:
+ protocol:
+ type: string
+ targetHost:
+ type: string
+ targetPort:
+ format: int32
+ type: integer
+ type: object
+ type: object
+ status:
+ properties:
+ clustersSynced:
+ format: int32
+ type: integer
+ state:
+ type: string
+ required:
+ - clustersSynced
+ - state
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
\ No newline at end of file
From 017d1901988b4c5d476e569fd9845c90b95416ff Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:49:34 -0700
Subject: [PATCH 002/235] add file .gitignore
---
.gitignore | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/.gitignore b/.gitignore
index 6fc8f208..4e443ccb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,8 +1,11 @@
.idea
.idea/vcs.xml
-out*
+out/*
+out.yaml
*.tar.gz
*.out
+*.prof
istio-*
.DS_Store
+cobertura-coverage.xml
From 209b5e8512fb63ca8afc4e350a2cd86d0991f6a5 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:49:37 -0700
Subject: [PATCH 003/235] add file CONTRIBUTING.md
---
CONTRIBUTING.md | 54 ++++++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 51 insertions(+), 3 deletions(-)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index e9bd99c4..b4405367 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -21,7 +21,7 @@ export KUBECONFIG=~/.kube/config
```
* Install [Prerequisites](./docs/Examples.md#Prerequisite) and make sure to install istio control plane in cluster. Alternatively, you can use the script to install istio control plane on the cluster created in previous step:
-Mac: `$ADMIRAL_HOME/tests/install_istio.sh 1.7.4 osx`
+Mac: `$ADMIRAL_HOME/tests/install_istio.sh 1.10.4 osx`
Mac (Apple Silicon): `$ADMIRAL_HOME/tests/install_istio.sh 1.7.4 osx-arm64`
@@ -73,7 +73,38 @@ minikube service prometheus -n istio-system --url
```
## Protobuf code generation
-* If you've made changes to protobuf model objects and need to re-generate their clientsets, use `sh hack/update-codegen.sh` and checkin the generated files
+* Required installations and their versions -
+
+### Initial Setup
+```bash
+Install protobuf
+go install sigs.k8s.io/controller-tools@v0.10.0
+go install k8s.io/code-generator v0.24.2
+go install google.golang.org/protobuf@v1.28.1
+make setup
+```
+
+### Generate `*.pb.go` files from `*.proto` files
+```bash
+go generate ./...
+```
+
+### Generate deepcopy functions
+```bash
+make model-gen
+```
+
+* If you've made changes to protobuf model objects and need to re-generate their clientsets, use following steps and checkin the generated files
+### Generate clientsets
+```bash
+sh hack/update-codegen.sh
+```
+
+### Generate CRD
+```bash
+set $GOPATH based on your go setup
+chmod +x $GOPATH/pkg/mod/sigs.k8s.io/controller-tools@v0.10.0/.run-in.sh && sh $GOPATH/pkg/mod/sigs.k8s.io/controller-tools@v0.10.0/.run-controller-gen.sh crd paths=./admiral/pkg/apis/admiral/v1/... output:stdout > admiral/crd/out.yaml
+```
## Integration tests
### Single cluster
@@ -87,4 +118,21 @@ cd $ADMIRAL_HOME/tests
```
TODO
```
-
\ No newline at end of file
+
+## Before PR
+1. Clone repository
+1. Add unit tests and fmea tests(in case applicable) along with the checked in code.
+1. Confirm that the unit test coverage did not drop with your change.
+1. Run regression and make sure it is not failing
+1. Please update any bdd tests in case applicable
+
+## During PR
+1. Create Pull Request from your branch to the master branch.
+1. Make sure the build succeeds
+1. Maintainers on Admiral Repository will review the pull request.
+1. PR will be merged after code is reviewed and all checks are passing
+
+## After PR
+1. When merging the PR, ensure that all commits are squashed into a single commit. (This can be done in advance via interactive rebase or through the github UI)
+1. Once the changes are deployed to qal environment, verify the fix looks good and bdds are successful.
+
From 48427752e1be21154e32fe83cebd9d649ef95a13 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:49:40 -0700
Subject: [PATCH 004/235] add file Makefile
---
Makefile | 49 ++++++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 46 insertions(+), 3 deletions(-)
diff --git a/Makefile b/Makefile
index d2afaa59..93920526 100644
--- a/Makefile
+++ b/Makefile
@@ -10,10 +10,10 @@ SHELL := /bin/bash
GOCMD?=go
GOBUILD?=$(GOCMD) build
GOCLEAN?=$(GOCMD) clean
-GOTEST?=$(GOCMD) test -race
+GOTEST?=$(GOCMD) test
GOGET?=$(GOCMD) get
-GOBIN?=$(GOPATH)/bin
OUT?=./out/
+GOLINTER_VERSION=v1.58.1
BINARY_NAME?=$(OUT)admiral
BINARY_DARWIN?=$(BINARY_NAME)_darwin
@@ -45,7 +45,9 @@ build-mac:
$(GOBUILD) -o $(BINARY_DARWIN) -v $(MAIN_PATH_ADMIRAL)
test:
- $(GOTEST) -v `go list ./... | grep -v client` -coverprofile=c.out
+ $(GOTEST) -v -failfast -race -timeout 0 `go list ./... | grep -v client | grep -v fmeatests | grep -v tests` -coverprofile=c.out
+ $(GOCMD) install github.com/boumenot/gocover-cobertura@latest
+ $(GOPATH)/bin/gocover-cobertura < c.out > cobertura-coverage.xml
clean:
$(GOCLEAN)
@@ -95,10 +97,16 @@ docker-build: set-tag
#NOTE: Assumes binary has already been built (admiral)
docker build -t $(IMAGE):$(TAG) -f ./admiral/docker/$(DOCKERFILE) .
+podman-build: set-tag
+ #NOTE: Assumes binary has already been built (admiral)
+ podman build --storage-driver=overlay --isolation=chroot --ulimit=nofile=1048576:1048576 --cgroup-manager=cgroupfs --events-backend=file -t $(IMAGE):$(TAG) -f ./admiral/docker/$(DOCKERFILE) .
+
docker-publish:
ifndef DO_NOT_PUBLISH
+ifndef PIPELINE_BUILD
echo "$(DOCKER_PASS)" | docker login -u $(DOCKER_USER) --password-stdin
endif
+endif
ifeq ($(TAG),)
echo "This is not a Tag/Release, skipping docker publish"
else
@@ -115,6 +123,30 @@ else
endif
endif
+podman-publish:
+ifndef DO_NOT_PUBLISH
+ifndef PIPELINE_BUILD
+ echo "$(DOCKER_PASS)" | podman login -u ${DOCKER_USERNAME} --password-stdin --storage-driver=overlay
+endif
+endif
+ifeq ($(TAG),)
+ echo "This is not a Tag/Release, skipping docker publish"
+else
+ifndef DO_NOT_PUBLISH
+ podman push $(IMAGE):$(TAG) --storage-driver=overlay --cgroup-manager=cgroupfs --events-backend=file
+ podman pull $(IMAGE):$(TAG) --storage-driver=overlay --cgroup-manager=cgroupfs --events-backend=file
+endif
+endif
+#no tag set and its master branch, in this case publish `latest` tag
+ifeq ($(TAG),)
+ifeq ($(BRANCH),master)
+ podman push $(IMAGE):latest --storage-driver=overlay --cgroup-manager=cgroupfs --events-backend=file
+ podman pull $(IMAGE):$(TAG) --storage-driver=overlay --cgroup-manager=cgroupfs --events-backend=file
+else
+ echo "This is not master branch, skipping to publish 'latest' tag"
+endif
+endif
+
download-kustomize:
curl -s -O -L https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/v${KUSTOMIZE_VERSION}/kustomize_v${KUSTOMIZE_VERSION}_${OPSYS}_amd64.tar.gz
tar xzf ./kustomize_v${KUSTOMIZE_VERSION}_${OPSYS}_amd64.tar.gz
@@ -141,3 +173,14 @@ gen-yaml:
cp ./install/prometheus/prometheus.yaml ./out/yaml/prometheus.yaml
cp ./install/sample/rp.yaml ./out/yaml/rp.yaml
cp ./install/scripts/*.sh ./out/scripts/
+
+install_linter:
+ go install github.com/golangci/golangci-lint/cmd/golangci-lint@${GOLINTER_VERSION}
+
+lint:
+ golangci-lint run --fast -c .golangci.yml
+
+perf:
+ go install github.com/onsi/ginkgo/v2/ginkgo
+ TOTAL_ASSETS=10 API_SERVER_DELAY_MULTIPLIER=1ms ginkgo -v --fail-fast ./tests/perf
+ TOTAL_ASSETS=25 API_SERVER_DELAY_MULTIPLIER=1ms ginkgo -v --fail-fast ./tests/perf
From b47b7a4ab9b59832cdb83a59c7c4e6da31cb7a8e Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:49:43 -0700
Subject: [PATCH 005/235] add file README.md
---
README.md | 66 +++++++++++++++++++++++++++++++++++++------------------
1 file changed, 45 insertions(+), 21 deletions(-)
diff --git a/README.md b/README.md
index 1e06c76f..dda9171d 100644
--- a/README.md
+++ b/README.md
@@ -4,7 +4,15 @@
-[![CircleCI](https://circleci.com/gh/istio-ecosystem/admiral/tree/master.svg?style=svg)](https://circleci.com/gh/istio-ecosystem/admiral/tree/master) [![codecov](https://codecov.io/gh/istio-ecosystem/admiral/branch/master/graph/badge.svg)](https://codecov.io/gh/istio-ecosystem/admiral)
+[//]: # (Build Status)
+
+[![CircleCI](https://circleci.com/gh/istio-ecosystem/admiral/tree/master.svg?style=svg)](https://circleci.com/gh/istio-ecosystem/admiral/tree/master)
+
+[//]: # (Code Coverage)
+
+[![codecov](https://codecov.io/gh/istio-ecosystem/admiral/branch/master/graph/badge.svg)](https://codecov.io/gh/istio-ecosystem/admiral)
+
+[//]: # (usage)
**Admiral provides automatic configuration and service discovery for multicluster Istio service mesh**
@@ -32,10 +40,17 @@ Organizations below are **officially** using Admiral. Please send a PR with your
* [Scaling Service Mesh to an Enterprise Microservices Ecosystem](https://apiworld2019aidevworld2019.sched.com/event/SLIQ/pro-talk-scaling-service-mesh-to-an-enterprise-microservices-ecosystem)
+* [Admiral – Enabling Multi-Cluster Mesh](https://www.meetup.com/San-Diego-Cloud-Native-Computing-Meetup/events/262826967/)
+
+[//]: # (support)
+
## Collaboration and Communication
[Admiral Slack Channel](https://istio.slack.com/archives/CT3F18T08) - `Note:` This channel is under Istio slack org, please fill out this [form](https://docs.google.com/forms/d/e/1FAIpQLSfdsupDfOWBtNVvVvXED6ULxtR4UIsYGCH_cQcRr0VcG1ZqQQ/viewform) to get access to Istio slack.
+## Local Development
+Refer to [Local Development Setup](./CONTRIBUTING.md#setting-up-for-local-development)
+
## Contributing
Refer to [Contributing doc](./CONTRIBUTING.md)
@@ -43,48 +58,57 @@ Refer to [Contributing doc](./CONTRIBUTING.md)
Details can be found [here](./docs/Processes.md)
-## Admiral sequence diagram
+## Admiral Sequence Diagram
+
+### Legend:
+SE - Istio ServiceEntry
+
+VS - Istio VirtualService
+
+DR - Istio DestinationRule
+
+K8sAPI - Kubernetes API Server
+
+GTP - Admiral GlobalTrafficPolicy
+
```mermaid
sequenceDiagram
autonumber 1
Service/VirtualService Handler->>+Rollout/Deployment Handler: Add/Update/Delete events
- loop
- autonumber 1
- GTP Handler->>GTP Handler: Add/Update/Delete events
- end
autonumber 1
- GTP Handler ->> ServiceEntry Handler: Add/Update
- loop
- autonumber 1
- Rollout/Deployment Handler->>Rollout/Deployment Handler: Add/Delete events of rollout/deployment
- end
+ GTP/OutlierDetection Handler->>Update All Resources: Add/Update
+ autonumber 1
+ DependencyRecord Handler->>Update All Resources: Add/Update
autonumber 1
- Rollout/Deployment Handler->>ServiceEntry Handler: Add/Update
+ Rollout/Deployment Handler->>Update All Resources: Add/Update
+
autonumber 2
- ServiceEntry Handler->>RemoteControllers: Fetch All Cluster Controllers
+ Update All Resources->>RemoteControllers: Fetch All Cluster Controllers
rect rgb(255, 255, 220)
loop
- ServiceEntry Handler->>K8sAPI 1..N: For each cluster, get corresponding service object
- K8sAPI 1..N-->>ServiceEntry Handler: Continue if service does not exist for deployment/rollout
- K8sAPI 1..N-->>ServiceEntry Handler: Build list of source services
+ Update All Resources->>K8sAPI 1..N: For each cluster, get corresponding service object
+ K8sAPI 1..N-->>Update All Resources: Continue if service does not exist for deployment/rollout
+ K8sAPI 1..N-->>Update All Resources: Build list of source services
end
end
rect rgb(255, 255, 220)
loop
- ServiceEntry Handler->>K8sAPI 1..N: Derive SE from each service in the list
- ServiceEntry Handler->>GTP Cache: Derive DR from GTP
+ Update All Resources->>K8sAPI 1..N: Derive SE from each service in the list
+ Update All Resources->>GTP/OutlierDetection Cache: Derive DR from GTP/OutlierDetection
rect rgb(204, 255, 204)
loop
- ServiceEntry Handler->>K8sAPI 1..N: Add/Update SE/DR in source clusters
+ Update All Resources->>K8sAPI 1..N: Add/Update SE/DR/VS in source clusters
+ Update All Resources->>DynamoDB: Add/Update WorkloadData for source clusters
end
end
end
end
- ServiceEntry Handler->>DependencyCache: Fetch dependent clusters
+ Update All Resources->>DependencyCache: Fetch dependent clusters
rect rgb(204, 255, 204)
loop
- ServiceEntry Handler->>K8sAPI 1..N: Add/Update SE/DR in dependent clusters
+ Update All Resources->>K8sAPI 1..N: Add/Update SE/DR/VS in dependent clusters
+ Update All Resources->>DynamoDB: Add/Update WorkloadData for dependent clusters
end
end
```
From ce9a4487de7885384e48ea15875457fc63f7b0d4 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:49:46 -0700
Subject: [PATCH 006/235] add file admiral/apis/v1/types.go
---
admiral/apis/v1/types.go | 31 +++++++++++++++++++++++++++++++
1 file changed, 31 insertions(+)
diff --git a/admiral/apis/v1/types.go b/admiral/apis/v1/types.go
index 235a72f6..f0b16def 100644
--- a/admiral/apis/v1/types.go
+++ b/admiral/apis/v1/types.go
@@ -2,4 +2,35 @@ package v1
const (
Admiral = "Admiral"
+ Intuit = "intuit"
)
+
+type AdmiralConfig struct {
+ IdpsConfig IdpsConfig `yaml:"idps,omitempty"`
+ IgnoreIdentityList IgnoreIdentityList `yaml:"ignoreIdentityList,omitempty"`
+ WorkloadDatabase DynamoDB `yaml:"workloadDynamoDB,omitempty"`
+}
+
+type IgnoreIdentityList struct {
+ StateCheckerPeriodInSeconds int `yaml:"stateCheckerPeriodInSeconds,omitempty"`
+ DynamoDB DynamoDB `yaml:"dynamoDB,omitempty"`
+}
+
+type DynamoDB struct {
+ TableName string `yaml:"tableName,omitempty"`
+ Region string `yaml:"region,omitempty"`
+ Role string `yaml:"role,omitempty"`
+ ClusterEnvironment string `yaml:"clusterEnvironment,omitempty"`
+}
+
+type IdpsConfig struct {
+ ApiKeyId string `yaml:"api-key-id,omitempty"`
+ ApiSecretKey string `yaml:"api-secret-key,omitempty"`
+ ApiEndPoint string `yaml:"api-endpoint,omitempty"`
+ MgmtSecretKey string `yaml:"mgmt-api-secret-key,omitempty"`
+ MgmtEndpoint string `yaml:"mgmt-endpoint,omitempty"`
+ MgmtTempCredExpiry int32 `yaml:"mgmt-temp-cred-expiry,omitempty"`
+ PolicyId string `yaml:"policy-id,omitempty"`
+ ExpiryRequest int32 `yaml:"temporary-credentials-expiry-requested-mins,omitempty"`
+ KubeConfigSecretFolder string `yaml:"kubeconfig-secret-folder,omitempty"`
+}
From 7583b8e41668a77ef65ea709ac86b51b24e11656 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:49:49 -0700
Subject: [PATCH 007/235] add file admiral/cmd/admiral/cmd/root.go
---
admiral/cmd/admiral/cmd/root.go | 108 +++++++++++++++++++++++++++++---
1 file changed, 101 insertions(+), 7 deletions(-)
diff --git a/admiral/cmd/admiral/cmd/root.go b/admiral/cmd/admiral/cmd/root.go
index 6c253070..df08c658 100644
--- a/admiral/cmd/admiral/cmd/root.go
+++ b/admiral/cmd/admiral/cmd/root.go
@@ -12,10 +12,17 @@ import (
"github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/routes"
"github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/server"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/client/loader"
"github.com/istio-ecosystem/admiral/admiral/pkg/clusters"
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
+ "gopkg.in/natefinch/lumberjack.v2"
+)
+
+const (
+ deploymentOrRolloutWorkerConcurrency = 5
+ dependentClusterWorkerConcurrency = 5
)
var (
@@ -43,13 +50,42 @@ func GetRootCmd(args []string) *cobra.Command {
},
Run: func(cmd *cobra.Command, args []string) {
log.SetLevel(log.Level(params.LogLevel))
+ if params.LogToFile {
+ // open a file and rotate it at a certain size
+ _, err := os.OpenFile(params.LogFilePath, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
+ if err != nil {
+ log.Error("error opening file for logging: " + err.Error() + " switching to stdout")
+ } else {
+ log.SetOutput(&lumberjack.Logger{
+ Filename: params.LogFilePath,
+ MaxSize: params.LogFileSizeInMBs, // megabytes
+ MaxBackups: 10,
+ MaxAge: 28, //days
+ })
+ }
+ }
log.Info("Starting Admiral")
- remoteRegistry, err := clusters.InitAdmiral(ctx, params)
-
+ var (
+ err error
+ remoteRegistry *clusters.RemoteRegistry
+ )
+ if params.HAMode == common.HAController {
+ remoteRegistry, err = clusters.InitAdmiralHA(ctx, params)
+ } else {
+ remoteRegistry, err = clusters.InitAdmiral(ctx, params)
+ }
if err != nil {
log.Fatalf("Error: %v", err)
}
+ // This is required for PERF tests only.
+ // Perf tests requires remote registry object for validations.
+ // There is no way to inject this object
+ // There is no other away to propogate this object to perf suite
+ if params.KubeconfigPath == loader.FakeKubeconfigPath {
+ cmd.SetContext(context.WithValue(cmd.Context(), "remote-registry", remoteRegistry))
+ }
+
service := server.Service{}
metricsService := server.Service{}
opts.RemoteRegistry = remoteRegistry
@@ -87,24 +123,33 @@ func GetRootCmd(args []string) *cobra.Command {
rootCmd.PersistentFlags().AddGoFlagSet(flag.CommandLine)
rootCmd.PersistentFlags().IntVar(¶ms.LogLevel, "log_level", int(log.InfoLevel),
fmt.Sprintf("Set log verbosity, defaults to 'Info'. Must be between %v and %v", int(log.PanicLevel), int(log.TraceLevel)))
+ rootCmd.PersistentFlags().BoolVar(¶ms.LogToFile, "log_to_file", false,
+ "If enabled, use file to log instead of stdout")
+ rootCmd.PersistentFlags().StringVar(¶ms.LogFilePath, "log_file_path", "/app/logs/admiral.log",
+ "Path to log file. If not specified, defaults to /app/logs/admiral.log")
+ rootCmd.PersistentFlags().IntVar(¶ms.LogFileSizeInMBs, "log_file_size_in_MBs", 200,
+ "Size of the log file in Mbs. If not specified, defaults to 200 Mbs")
rootCmd.PersistentFlags().StringVar(¶ms.KubeconfigPath, "kube_config", "",
"Use a Kubernetes configuration file instead of in-cluster configuration")
rootCmd.PersistentFlags().BoolVar(¶ms.ArgoRolloutsEnabled, "argo_rollouts", false,
"Use argo rollout configurations")
+ rootCmd.PersistentFlags().StringVar(¶ms.SecretFilterTags, "secret_filter_tags", "admiral/sync", "Filter tags for the specific admiral namespace secret to watch")
rootCmd.PersistentFlags().StringVar(¶ms.ClusterRegistriesNamespace, "secret_namespace", "admiral",
"Namespace to monitor for secrets defaults to admiral-secrets")
rootCmd.PersistentFlags().StringVar(¶ms.DependenciesNamespace, "dependency_namespace", "admiral",
"Namespace to monitor for changes to dependency objects")
rootCmd.PersistentFlags().StringVar(¶ms.SyncNamespace, "sync_namespace", "admiral-sync",
"Namespace in which Admiral will put its generated configurations")
- rootCmd.PersistentFlags().DurationVar(¶ms.CacheRefreshDuration, "sync_period", 5*time.Minute,
+ rootCmd.PersistentFlags().DurationVar(¶ms.CacheReconcileDuration, "sync_period", 5*time.Minute,
"Interval for syncing Kubernetes resources, defaults to 5 min")
+ rootCmd.PersistentFlags().DurationVar(¶ms.SeAndDrCacheReconcileDuration, "se_dr_sync_period", 5*time.Minute,
+ "Interval for syncing ServiceEntries and DestinationRules resources, defaults to 5 min")
rootCmd.PersistentFlags().BoolVar(¶ms.EnableSAN, "enable_san", false,
"If SAN should be enabled for created Service Entries")
rootCmd.PersistentFlags().StringVar(¶ms.SANPrefix, "san_prefix", "",
"Prefix to use when creating SAN for Service Entries")
- rootCmd.PersistentFlags().StringVar(¶ms.SecretResolver, "secret_resolver", "",
- "Type of resolver to use to fetch kubeconfig for monitored clusters")
+ rootCmd.PersistentFlags().StringVar(¶ms.Profile, "secret_resolver", common.AdmiralProfileDefault,
+ "Type of resolver. Valid options - default|intuit")
rootCmd.PersistentFlags().StringVar(¶ms.LabelSet.DeploymentAnnotation, "deployment_annotation", "sidecar.istio.io/inject",
"The annotation, on a pod spec in a deployment, which must be set to \"true\" for Admiral to listen on the deployment")
rootCmd.PersistentFlags().StringVar(¶ms.LabelSet.SubsetLabel, "subset_label", "subset",
@@ -121,7 +166,7 @@ func GetRootCmd(args []string) *cobra.Command {
"The hostname suffix to customize the cname generated by admiral. Default suffix value will be \"global\"")
rootCmd.PersistentFlags().StringVar(¶ms.LabelSet.WorkloadIdentityKey, "workload_identity_key", "identity",
"The workload identity key, on deployment which holds identity value used to generate cname by admiral. Default label key will be \"identity\" Admiral will look for a label with this key. If present, that will be used. If not, it will try an annotation (for use cases where an identity is longer than 63 chars)")
- rootCmd.PersistentFlags().StringVar(¶ms.LabelSet.GlobalTrafficDeploymentLabel, "globaltraffic_deployment_label", "identity",
+ rootCmd.PersistentFlags().StringVar(¶ms.LabelSet.AdmiralCRDIdentityLabel, "admiral_crd_identity_label", "identity",
"The label key which will be used to tie globaltrafficpolicy objects to deployments. Configured separately to the workload identity key because this one won't fall back to annotations.")
rootCmd.PersistentFlags().StringVar(¶ms.WorkloadSidecarUpdate, "workload_sidecar_update", "disabled",
"The parameter will be used to decide whether to update workload sidecar resource or not. By default these updates will be disabled.")
@@ -132,22 +177,71 @@ func GetRootCmd(args []string) *cobra.Command {
"The order would be to use annotation specified as `env_key`, followed by label specified as `env_key` and then fallback to the label `env`")
rootCmd.PersistentFlags().StringVar(¶ms.LabelSet.GatewayApp, "gateway_app", "istio-ingressgateway",
"The the value of the `app` label to use to match and find the service that represents the ingress for cross cluster traffic (AUTO_PASSTHROUGH mode)")
+ rootCmd.PersistentFlags().StringVar(¶ms.AdmiralConfig, "secret_resolver_config_path", "/etc/config/resolver_config.yaml",
+ "Path to the secret resolver config")
rootCmd.PersistentFlags().BoolVar(¶ms.MetricsEnabled, "metrics", true, "Enable prometheus metrics collections")
rootCmd.PersistentFlags().StringVar(¶ms.AdmiralStateCheckerName, "admiral_state_checker_name", "NoOPStateChecker", "The value of the admiral_state_checker_name label to configure the DR Strategy for Admiral")
rootCmd.PersistentFlags().StringVar(¶ms.DRStateStoreConfigPath, "dr_state_store_config_path", "", "Location of config file which has details for data store. Ex:- Dynamo DB connection details")
rootCmd.PersistentFlags().StringVar(¶ms.ServiceEntryIPPrefix, "se_ip_prefix", "240.0", "IP prefix for the auto generated IPs for service entries. Only the first two octets: Eg- 240.0")
- rootCmd.PersistentFlags().StringVar(¶ms.EnvoyFilterVersion, "envoy_filter_version", "",
+ rootCmd.PersistentFlags().StringVar(¶ms.EnvoyFilterVersion, "envoy_filter_version", "1.17,1.20",
"The value of envoy filter version is used to match the proxy version for envoy filter created by routing policy")
+ rootCmd.PersistentFlags().StringVar(¶ms.DeprecatedEnvoyFilterVersion, "deprecated_envoy_filter_version", "",
+ "The value of envoy filter version which are deprecated and need to be removed from the clusters")
rootCmd.PersistentFlags().StringVar(¶ms.EnvoyFilterAdditionalConfig, "envoy_filter_additional_config", "",
"The value of envoy filter is to add additional config to the filter config section")
rootCmd.PersistentFlags().BoolVar(¶ms.EnableRoutingPolicy, "enable_routing_policy", false,
"If Routing Policy feature needs to be enabled")
rootCmd.PersistentFlags().StringArrayVar(¶ms.ExcludedIdentityList, "excluded_identity_list", []string{},
"List of identities which should be excluded from getting processed")
+ rootCmd.PersistentFlags().BoolVar(¶ms.EnableDiffCheck, "enable_diff_check", true, "Enable diff check")
rootCmd.PersistentFlags().StringArrayVar(¶ms.AdditionalEndpointSuffixes, "additional_endpoint_suffixes", []string{},
"Suffixes that Admiral should use to generate additional endpoints through VirtualServices")
rootCmd.PersistentFlags().StringArrayVar(¶ms.AdditionalEndpointLabelFilters, "additional_endpoint_label_filters", []string{},
"Labels that admiral will check on deployment/rollout before creating additional endpoints. '*' would indicate generating additional endpoints for all deployment/rollouts")
+ rootCmd.PersistentFlags().BoolVar(¶ms.EnableWorkloadDataStorage, "enable_workload_data_storage", false,
+ "When true, workload data will be stored in a persistent storage")
+ rootCmd.PersistentFlags().BoolVar(¶ms.DisableDefaultAutomaticFailover, "disable_default_automatic_failover", false,
+ "When set to true, automatic failover will only be enabled when there is a OutlierDetection CR or GTP defined with outlier configurations")
+ rootCmd.PersistentFlags().BoolVar(¶ms.DisableIPGeneration, "disable_ip_generation", false, "When set to true, ips will not be generated and written to service entries")
+ rootCmd.PersistentFlags().StringVar(¶ms.LabelSet.IdentityPartitionKey, "identity_partition_key", "admiral.io/identityPartition",
+ "The annotation on a deployment/rollout spec, which will be used to divide an asset based on user-specified partition. Defaults to `admiral.io/identityPartition`.")
+ rootCmd.PersistentFlags().StringArrayVar(¶ms.ExportToIdentityList, "exportto_identity_list", []string{"*"}, "List of identities to write ExportTo field for")
+ rootCmd.PersistentFlags().IntVar(¶ms.ExportToMaxNamespaces, "exportto_max_namespaces", 35, "Max number of namespaces to write in ExportTo field before just replacing with *")
+
+ // Admiral HA flags
+ rootCmd.PersistentFlags().StringVar(¶ms.HAMode, "ha_mode", "",
+ "HA Mode changes the functionality of admiral. Valid options are: "+common.HAController)
+ rootCmd.PersistentFlags().IntVar(¶ms.DNSRetries, "dns_retries", 3, "number of retries for dns resolution")
+ rootCmd.PersistentFlags().IntVar(¶ms.DNSTimeoutMs, "dns_timeout_ms", 1000, "ttl for dns resolution timeout")
+ rootCmd.PersistentFlags().StringVar(¶ms.DnsConfigFile, "dns_config_file", "/etc/resolv.conf", "the dns config file to use")
+ rootCmd.PersistentFlags().BoolVar(¶ms.EnableProxyEnvoyFilter, "enable_proxy_envoy_filter", false,
+ "When true, envoyfilter through dependency proxy will be generated")
+ rootCmd.PersistentFlags().BoolVar(¶ms.EnableDependencyProcessing, "enable_dependency_processing", false,
+ "When true, SE/DR/VS processing flow will be kicked in upon receiving any update event on dependency record")
+ rootCmd.PersistentFlags().StringVar(¶ms.SeAddressConfigmap, "se_address_configmap", "se-address-configmap",
+ "the confimap to use for generating se addresses (will be auto-created if does not exist)")
+ rootCmd.PersistentFlags().BoolVar(¶ms.EnableOutlierDetection, "enable_outlierdetection", false, "Enable/Disable OutlierDetection")
+ rootCmd.PersistentFlags().IntVar(¶ms.DeploymentOrRolloutWorkerConcurrency, "deployment_or_rollout_worker_concurrency", deploymentOrRolloutWorkerConcurrency,
+ "Deployment/Rollout Controller worker concurrency")
+ rootCmd.PersistentFlags().IntVar(¶ms.DependentClusterWorkerConcurrency, "dependent_cluster_worker_concurrency", dependentClusterWorkerConcurrency,
+ "Dependent cluster worker concurrency")
+ rootCmd.PersistentFlags().IntVar(¶ms.DependencyWarmupMultiplier, "dependency_warmup_multiplier", 2,
+ "Dependency warmup multiplier is the time used for dependency proxy warmup time multiplied by cache warmup")
+ rootCmd.PersistentFlags().Int32Var(¶ms.MaxRequestsPerConnection, "max_requests_per_connection", clusters.DefaultMaxRequestsPerConnection,
+ "Maximum number of requests per connection to a backend. Setting this parameter to 1 disables keep alive. Default 100, can go up to 2^29.")
+ rootCmd.PersistentFlags().BoolVar(¶ms.EnableServiceEntryCache, "enable_serviceentry_cache", false, "Enable/Disable Caching serviceentries")
+ rootCmd.PersistentFlags().BoolVar(¶ms.EnableDestinationRuleCache, "enable_destinationrule_cache", false, "Enable/Disable Caching destinationrules")
+ rootCmd.PersistentFlags().BoolVar(¶ms.EnableAbsoluteFQDN, "enable_absolute_fqdn", true, "Enable/Disable Absolute FQDN")
+ rootCmd.PersistentFlags().StringArrayVar(¶ms.AlphaIdentityList, "alpha_identity_list", []string{},
+ "Identities which can be used for testing of alpha features")
+ rootCmd.PersistentFlags().BoolVar(¶ms.EnableAbsoluteFQDNForLocalEndpoints, "enable_absolute_fqdn_for_local_endpoints", false, "Enable/Disable Absolute FQDN for local endpoints")
+ rootCmd.PersistentFlags().BoolVar(¶ms.EnableClientConnectionConfigProcessing, "enable_client_connection_config_processing", false, "Enable/Disable ClientConnectionConfig Processing")
+ rootCmd.PersistentFlags().StringArrayVar(¶ms.GatewayAssetAliases, "gateway_asset_aliases", []string{"Intuit.platform.servicesgateway.servicesgateway"}, "The asset aliases used for API Gateway")
+ rootCmd.PersistentFlags().BoolVar(¶ms.EnableActivePassive, "enable_active_passive", false, "Enable/Disable Active-Passive behavior")
+ rootCmd.PersistentFlags().BoolVar(¶ms.EnableSWAwareNSCaches, "enable_sw_aware_ns_caches", false, "Enable/Disable SW Aware NS Caches")
+ rootCmd.PersistentFlags().BoolVar(¶ms.EnableSyncIstioResourcesToSourceClusters, "enable_sync_istio_resources_to_source_clusters", true, "Enable/Disable Sync of Istio Resources to Source Clusters")
+ rootCmd.PersistentFlags().BoolVar(¶ms.AdmiralStateSyncerMode, "admiral_state_syncer_mode", false, "Enable/Disable admiral to run as state syncer only")
+ rootCmd.PersistentFlags().Int64Var(¶ms.DefaultWarmupDurationSecs, "default_warmup_duration_in_seconds", 45, "The default value for the warmupDurationSecs to be used on Destination Rules created by admiral")
return rootCmd
}
From 7834afdda7ea44e4296f50551e5d2a8eea58ee8d Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:49:52 -0700
Subject: [PATCH 008/235] add file admiral/crd/routingPolicy.yaml
---
admiral/crd/routingPolicy.yaml | 42 ++--------------------------------
1 file changed, 2 insertions(+), 40 deletions(-)
diff --git a/admiral/crd/routingPolicy.yaml b/admiral/crd/routingPolicy.yaml
index 1b918399..1b392644 100644
--- a/admiral/crd/routingPolicy.yaml
+++ b/admiral/crd/routingPolicy.yaml
@@ -4,49 +4,11 @@ metadata:
name: routingpolicies.admiral.io
spec:
group: admiral.io
+ version: v1alpha1
names:
kind: RoutingPolicy
- listKind: RoutingPolicyList
plural: routingpolicies
shortNames:
- rp
- rps
- singular: routingpolicy
- scope: Namespaced
- versions:
- - name: v1alpha1
- schema:
- openAPIV3Schema:
- description: generic cdr object to wrap the GlobalTrafficPolicy api
- properties:
- apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
- type: string
- kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
- type: string
- metadata:
- type: object
- spec:
- properties:
- config:
- additionalProperties:
- type: string
- type: object
- hosts:
- items:
- type: string
- type: array
- plugin:
- type: string
- type: object
- required:
- - metadata
- - spec
- type: object
- served: true
- storage: true
+ scope: Namespaced
\ No newline at end of file
From f3ac303b6ed0519862d2d7f1dab11244daa5069c Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:49:55 -0700
Subject: [PATCH 009/235] add file admiral/pkg/apis/admiral/model/doc.go
---
admiral/pkg/apis/admiral/model/doc.go | 2 ++
1 file changed, 2 insertions(+)
diff --git a/admiral/pkg/apis/admiral/model/doc.go b/admiral/pkg/apis/admiral/model/doc.go
index ca172b45..819babba 100644
--- a/admiral/pkg/apis/admiral/model/doc.go
+++ b/admiral/pkg/apis/admiral/model/doc.go
@@ -4,5 +4,7 @@ package model
//go:generate protoc -I . globalrouting.proto --go_out=plugins=grpc:.
//go:generate protoc -I . routingpolicy.proto --go_out=plugins=grpc:.
//go:generate protoc -I . dependencyproxy.proto --go_out=plugins=grpc:.
+//go:generate protoc -I . outlierdetection.proto --go_out=plugins=grpc:.
+//go:generate protoc -I . clientconnectionconfig.proto --go_out=plugins=grpc:.
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:deepcopy-gen=package,register
From bcda56038dd38ce0d9a380aeb9faea9a48fe2256 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:49:58 -0700
Subject: [PATCH 010/235] add file
admiral/pkg/apis/admiral/model/globalrouting.proto
---
.../apis/admiral/model/globalrouting.proto | 21 +++++++++----------
1 file changed, 10 insertions(+), 11 deletions(-)
diff --git a/admiral/pkg/apis/admiral/model/globalrouting.proto b/admiral/pkg/apis/admiral/model/globalrouting.proto
index e5b07f59..3b23d1be 100644
--- a/admiral/pkg/apis/admiral/model/globalrouting.proto
+++ b/admiral/pkg/apis/admiral/model/globalrouting.proto
@@ -79,18 +79,17 @@ message TrafficPolicy {
//Ex: dnsPrefix = west => generated service name = west.stage.servicename.global
string dnsPrefix = 4;
- message OutlierDetection {
- //REQUIRED: Minimum duration of time in seconds, the endpoint will be ejected
- int64 base_ejection_time = 1;
- //REQUIRED: No. of consecutive failures in specified interval after which the endpoint will be ejected
- uint32 consecutive_gateway_errors = 2;
- //REQUIRED: Time interval between ejection sweep analysis
- int64 interval = 3;
- }
-
- //OPTIONAL: to configure the outlierDetection in DestinationRule
- OutlierDetection outlier_detection = 5;
+ message OutlierDetection {
+ //REQUIRED: Minimum duration of time in seconds, the endpoint will be ejected
+ int64 base_ejection_time = 1;
+ //REQUIRED: No. of consecutive failures in specified interval after which the endpoint will be ejected
+ uint32 consecutive_gateway_errors = 2;
+ //REQUIRED: Time interval between ejection sweep analysis
+ int64 interval = 3;
+ }
+ //OPTIONAL: to configure the outlierDetection in DestinationRule
+ OutlierDetection outlier_detection = 5;
}
message TrafficGroup {
From fa64451350ca95786f8d2df569db61b46ff7bad1 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:50:01 -0700
Subject: [PATCH 011/235] add file
admiral/pkg/apis/admiral/model/zz_generated.deepcopy.go
---
.../admiral/model/zz_generated.deepcopy.go | 240 ++++++++++++++++++
1 file changed, 240 insertions(+)
diff --git a/admiral/pkg/apis/admiral/model/zz_generated.deepcopy.go b/admiral/pkg/apis/admiral/model/zz_generated.deepcopy.go
index 36054065..2a743676 100644
--- a/admiral/pkg/apis/admiral/model/zz_generated.deepcopy.go
+++ b/admiral/pkg/apis/admiral/model/zz_generated.deepcopy.go
@@ -21,6 +21,141 @@ limitations under the License.
package model
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClientConnectionConfig) DeepCopyInto(out *ClientConnectionConfig) {
+ *out = *in
+ if in.ConnectionPool != nil {
+ in, out := &in.ConnectionPool, &out.ConnectionPool
+ *out = new(ConnectionPool)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Tunnel != nil {
+ in, out := &in.Tunnel, &out.Tunnel
+ *out = new(Tunnel)
+ (*in).DeepCopyInto(*out)
+ }
+ out.XXX_NoUnkeyedLiteral = in.XXX_NoUnkeyedLiteral
+ if in.XXX_unrecognized != nil {
+ in, out := &in.XXX_unrecognized, &out.XXX_unrecognized
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConnectionConfig.
+func (in *ClientConnectionConfig) DeepCopy() *ClientConnectionConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(ClientConnectionConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConnectionPool) DeepCopyInto(out *ConnectionPool) {
+ *out = *in
+ if in.Tcp != nil {
+ in, out := &in.Tcp, &out.Tcp
+ *out = new(ConnectionPool_TCP)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Http != nil {
+ in, out := &in.Http, &out.Http
+ *out = new(ConnectionPool_HTTP)
+ (*in).DeepCopyInto(*out)
+ }
+ out.XXX_NoUnkeyedLiteral = in.XXX_NoUnkeyedLiteral
+ if in.XXX_unrecognized != nil {
+ in, out := &in.XXX_unrecognized, &out.XXX_unrecognized
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPool.
+func (in *ConnectionPool) DeepCopy() *ConnectionPool {
+ if in == nil {
+ return nil
+ }
+ out := new(ConnectionPool)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConnectionPool_HTTP) DeepCopyInto(out *ConnectionPool_HTTP) {
+ *out = *in
+ out.XXX_NoUnkeyedLiteral = in.XXX_NoUnkeyedLiteral
+ if in.XXX_unrecognized != nil {
+ in, out := &in.XXX_unrecognized, &out.XXX_unrecognized
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPool_HTTP.
+func (in *ConnectionPool_HTTP) DeepCopy() *ConnectionPool_HTTP {
+ if in == nil {
+ return nil
+ }
+ out := new(ConnectionPool_HTTP)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConnectionPool_TCP) DeepCopyInto(out *ConnectionPool_TCP) {
+ *out = *in
+ if in.TcpKeepalive != nil {
+ in, out := &in.TcpKeepalive, &out.TcpKeepalive
+ *out = new(ConnectionPool_TcpKeepalive)
+ (*in).DeepCopyInto(*out)
+ }
+ out.XXX_NoUnkeyedLiteral = in.XXX_NoUnkeyedLiteral
+ if in.XXX_unrecognized != nil {
+ in, out := &in.XXX_unrecognized, &out.XXX_unrecognized
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPool_TCP.
+func (in *ConnectionPool_TCP) DeepCopy() *ConnectionPool_TCP {
+ if in == nil {
+ return nil
+ }
+ out := new(ConnectionPool_TCP)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConnectionPool_TcpKeepalive) DeepCopyInto(out *ConnectionPool_TcpKeepalive) {
+ *out = *in
+ out.XXX_NoUnkeyedLiteral = in.XXX_NoUnkeyedLiteral
+ if in.XXX_unrecognized != nil {
+ in, out := &in.XXX_unrecognized, &out.XXX_unrecognized
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPool_TcpKeepalive.
+func (in *ConnectionPool_TcpKeepalive) DeepCopy() *ConnectionPool_TcpKeepalive {
+ if in == nil {
+ return nil
+ }
+ out := new(ConnectionPool_TcpKeepalive)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Dependency) DeepCopyInto(out *Dependency) {
*out = *in
@@ -147,6 +282,62 @@ func (in *GlobalTrafficPolicy) DeepCopy() *GlobalTrafficPolicy {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OutlierConfig) DeepCopyInto(out *OutlierConfig) {
+ *out = *in
+ out.XXX_NoUnkeyedLiteral = in.XXX_NoUnkeyedLiteral
+ if in.XXX_unrecognized != nil {
+ in, out := &in.XXX_unrecognized, &out.XXX_unrecognized
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutlierConfig.
+func (in *OutlierConfig) DeepCopy() *OutlierConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(OutlierConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OutlierDetection) DeepCopyInto(out *OutlierDetection) {
+ *out = *in
+ if in.OutlierConfig != nil {
+ in, out := &in.OutlierConfig, &out.OutlierConfig
+ *out = new(OutlierConfig)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ out.XXX_NoUnkeyedLiteral = in.XXX_NoUnkeyedLiteral
+ if in.XXX_unrecognized != nil {
+ in, out := &in.XXX_unrecognized, &out.XXX_unrecognized
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutlierDetection.
+func (in *OutlierDetection) DeepCopy() *OutlierDetection {
+ if in == nil {
+ return nil
+ }
+ out := new(OutlierDetection)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Proxy) DeepCopyInto(out *Proxy) {
*out = *in
@@ -239,6 +430,11 @@ func (in *TrafficPolicy) DeepCopyInto(out *TrafficPolicy) {
}
}
}
+ if in.OutlierDetection != nil {
+ in, out := &in.OutlierDetection, &out.OutlierDetection
+ *out = new(TrafficPolicy_OutlierDetection)
+ (*in).DeepCopyInto(*out)
+ }
out.XXX_NoUnkeyedLiteral = in.XXX_NoUnkeyedLiteral
if in.XXX_unrecognized != nil {
in, out := &in.XXX_unrecognized, &out.XXX_unrecognized
@@ -257,3 +453,47 @@ func (in *TrafficPolicy) DeepCopy() *TrafficPolicy {
in.DeepCopyInto(out)
return out
}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TrafficPolicy_OutlierDetection) DeepCopyInto(out *TrafficPolicy_OutlierDetection) {
+ *out = *in
+ out.XXX_NoUnkeyedLiteral = in.XXX_NoUnkeyedLiteral
+ if in.XXX_unrecognized != nil {
+ in, out := &in.XXX_unrecognized, &out.XXX_unrecognized
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficPolicy_OutlierDetection.
+func (in *TrafficPolicy_OutlierDetection) DeepCopy() *TrafficPolicy_OutlierDetection {
+ if in == nil {
+ return nil
+ }
+ out := new(TrafficPolicy_OutlierDetection)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Tunnel) DeepCopyInto(out *Tunnel) {
+ *out = *in
+ out.XXX_NoUnkeyedLiteral = in.XXX_NoUnkeyedLiteral
+ if in.XXX_unrecognized != nil {
+ in, out := &in.XXX_unrecognized, &out.XXX_unrecognized
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tunnel.
+func (in *Tunnel) DeepCopy() *Tunnel {
+ if in == nil {
+ return nil
+ }
+ out := new(Tunnel)
+ in.DeepCopyInto(out)
+ return out
+}
From b8ca8445b3f4a3305d290e87d2173035ef80e0d8 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:50:04 -0700
Subject: [PATCH 012/235] add file
admiral/pkg/apis/admiral/routes/handler_test.go
---
.../pkg/apis/admiral/routes/handler_test.go | 160 +++++++++++++++++-
1 file changed, 152 insertions(+), 8 deletions(-)
diff --git a/admiral/pkg/apis/admiral/routes/handler_test.go b/admiral/pkg/apis/admiral/routes/handler_test.go
index bce660fe..f3831c35 100644
--- a/admiral/pkg/apis/admiral/routes/handler_test.go
+++ b/admiral/pkg/apis/admiral/routes/handler_test.go
@@ -10,6 +10,7 @@ import (
"testing"
"github.com/gorilla/mux"
+ v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
"github.com/istio-ecosystem/admiral/admiral/pkg/clusters"
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/istio"
@@ -17,7 +18,7 @@ import (
"github.com/stretchr/testify/assert"
"istio.io/client-go/pkg/apis/networking/v1alpha3"
istiofake "istio.io/client-go/pkg/clientset/versioned/fake"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestReturnSuccessGET(t *testing.T) {
@@ -113,14 +114,14 @@ func TestGetServiceEntriesByCluster(t *testing.T) {
name: "failure with admiral not monitored cluster",
clusterName: "bar",
remoteControllers: nil,
- expectedErr: "Admiral is not monitoring cluster bar\n",
+ expectedErr: "admiral is not monitoring cluster bar\n",
statusCode: 404,
},
{
name: "failure with cluster not provided request",
clusterName: "",
remoteControllers: nil,
- expectedErr: "Cluster name not provided as part of the request\n",
+ expectedErr: "cluster name not provided as part of the request\n",
statusCode: 400,
},
{
@@ -133,7 +134,7 @@ func TestGetServiceEntriesByCluster(t *testing.T) {
},
},
},
- expectedErr: "No service entries configured for cluster - cluster1",
+ expectedErr: "no service entries configured for cluster - cluster1",
statusCode: 200,
},
{
@@ -162,16 +163,19 @@ func TestGetServiceEntriesByCluster(t *testing.T) {
}
opts.RemoteRegistry = rr
if c.name == "success with service entry for cluster" {
- fakeIstioClient.NetworkingV1alpha3().ServiceEntries("admiral-sync").Create(ctx, &v1alpha3.ServiceEntry{}, v1.CreateOptions{})
+ fakeIstioClient.NetworkingV1alpha3().ServiceEntries("admiral-sync").Create(ctx, &v1alpha3.ServiceEntry{}, metaV1.CreateOptions{})
}
opts.GetServiceEntriesByCluster(w, r)
resp := w.Result()
body, _ := ioutil.ReadAll(resp.Body)
- if string(body) != c.expectedErr && c.name != "success with service entry for cluster" {
- t.Errorf("Error mismatch. Got %v, want %v", string(body), c.expectedErr)
+ if c.name != "success with service entry for cluster" {
+ if string(body) != c.expectedErr {
+ t.Errorf("Error mismatch, got: %v, want: %v", string(body), c.expectedErr)
+ }
}
+
if resp.StatusCode != c.statusCode {
- t.Errorf("Status code mismatch. Got %v, want %v", resp.StatusCode, c.statusCode)
+ t.Errorf("Status code mismatch, got: %v, want: %v", resp.StatusCode, c.statusCode)
}
})
}
@@ -229,3 +233,143 @@ func TestGetServiceEntriesByIdentity(t *testing.T) {
})
}
}
+
+func TestGetGlobalTrafficPolicyByIdentityAndEnv(t *testing.T) {
+ globalTrafficCache := &mockGlobalTrafficCache{
+ identityCache: map[string]*v1.GlobalTrafficPolicy{
+ "stage.testID": {
+ ObjectMeta: metaV1.ObjectMeta{
+ Namespace: "stage-testns",
+ Name: "stage-testapp",
+ Labels: map[string]string{"identity": "testID", "admiral.io/env": "stage"},
+ },
+ },
+ "default.testID": {
+ ObjectMeta: metaV1.ObjectMeta{
+ Namespace: "default-testns",
+ Name: "default-testapp",
+ Labels: map[string]string{"identity": "testID", "admiral.io/env": "stage"},
+ },
+ },
+ },
+ }
+ validOpts := RouteOpts{
+ RemoteRegistry: &clusters.RemoteRegistry{
+ AdmiralCache: &clusters.AdmiralCache{
+ SeClusterCache: common.NewMapOfMaps(),
+ GlobalTrafficCache: globalTrafficCache,
+ },
+ },
+ }
+ testCases := []struct {
+ name string
+ identity string
+ env string
+ opts RouteOpts
+ expectedStatus int
+ expectedError string
+ expectedGTPName string
+ }{
+ {
+ name: "nil RemoteRegistry in RouteOpts should result in InternalServerError",
+ identity: "testID",
+ env: "stage",
+ opts: RouteOpts{},
+ expectedStatus: 500,
+ expectedError: "invalid remote registry cache",
+ },
+ {
+ name: "nil RemoteRegistry.AdmiralCache in RouteOpts should result in InternalServerError",
+ identity: "testID",
+ env: "stage",
+ opts: RouteOpts{
+ RemoteRegistry: &clusters.RemoteRegistry{},
+ },
+ expectedStatus: 500,
+ expectedError: "invalid remote registry cache",
+ },
+ {
+ name: "missing identity path param should result in HTTP bad request",
+ identity: "",
+ env: "stage",
+ opts: validOpts,
+ expectedStatus: 400,
+ expectedError: "identity not provided as part of the path param",
+ },
+ {
+ name: "missing env query param should return a valid 200 response with a valid GTP payload",
+ identity: "testID",
+ env: "",
+ opts: validOpts,
+ expectedStatus: 200,
+ expectedGTPName: "default-testapp",
+ },
+ {
+ name: "querying for an invalid gtp should result in a 404",
+ identity: "invalidGTP",
+ env: "stage",
+ opts: validOpts,
+ expectedStatus: 404,
+ expectedError: "globaltraffic policy with identity: invalidGTP and env: stage was not found",
+ },
+ {
+ name: "valid GTP queried should return a valid 200 response with a valid GTP payload",
+ identity: "testID",
+ env: "stage",
+ opts: validOpts,
+ expectedStatus: 200,
+ expectedGTPName: "stage-testapp",
+ },
+ }
+
+ for _, c := range testCases {
+ t.Run(c.name, func(t *testing.T) {
+ r := httptest.NewRequest("GET", "http://admiral.test.com/identity/{id}/globaltrafficpolicy?env="+c.env, nil)
+ r = mux.SetURLVars(r, map[string]string{"identity": c.identity})
+ w := httptest.NewRecorder()
+ c.opts.GetGlobalTrafficPolicyByIdentityAndEnv(w, r)
+ res := w.Result()
+ data, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ t.Error(err)
+ }
+ if res.StatusCode != c.expectedStatus {
+ t.Errorf("expected http status %d got %d", c.expectedStatus, res.StatusCode)
+ }
+ if c.expectedError != "" {
+ responseJSON := make(map[string]string)
+ json.Unmarshal(data, &responseJSON)
+ if responseJSON["error"] != c.expectedError {
+ t.Errorf("expected error '%s' got '%s'", c.expectedError, responseJSON["error"])
+ }
+ } else {
+ var responseGTP *v1.GlobalTrafficPolicy
+ json.Unmarshal(data, &responseGTP)
+ if responseGTP == nil {
+ t.Error("expected response GTP to be not nil")
+ }
+ if c.expectedGTPName != responseGTP.Name {
+ t.Errorf("expected GTP %s got GTP %s", c.expectedGTPName, responseGTP.Name)
+ }
+ }
+ res.Body.Close()
+ })
+ }
+
+}
+
+type mockGlobalTrafficCache struct {
+ identityCache map[string]*v1.GlobalTrafficPolicy
+}
+
+func (m *mockGlobalTrafficCache) GetFromIdentity(identity string, environment string) (*v1.GlobalTrafficPolicy, error) {
+ return m.identityCache[common.ConstructKeyWithEnvAndIdentity(environment, identity)], nil
+}
+
+func (*mockGlobalTrafficCache) Put(*v1.GlobalTrafficPolicy) error {
+ return nil
+}
+
+func (*mockGlobalTrafficCache) Delete(string, string) error {
+ return nil
+}
From 4bb63da50a4fb0456ecabf05b91a164e7af8702d Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:50:07 -0700
Subject: [PATCH 013/235] add file admiral/pkg/apis/admiral/routes/handlers.go
---
admiral/pkg/apis/admiral/routes/handlers.go | 120 +++++++++++++++-----
1 file changed, 92 insertions(+), 28 deletions(-)
diff --git a/admiral/pkg/apis/admiral/routes/handlers.go b/admiral/pkg/apis/admiral/routes/handlers.go
index 79267333..ee27ab0c 100644
--- a/admiral/pkg/apis/admiral/routes/handlers.go
+++ b/admiral/pkg/apis/admiral/routes/handlers.go
@@ -1,16 +1,19 @@
package routes
import (
+ "context"
"encoding/json"
"fmt"
- "log"
"net/http"
"strconv"
"strings"
+ commonUtil "github.com/istio-ecosystem/admiral/admiral/pkg/util"
+
"github.com/gorilla/mux"
"github.com/istio-ecosystem/admiral/admiral/pkg/clusters"
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ "github.com/sirupsen/logrus"
"istio.io/client-go/pkg/apis/networking/v1alpha3"
)
@@ -35,34 +38,34 @@ If Running in passive mode, the health check returns 502 which forces DNS looku
*/
func (opts *RouteOpts) ReturnSuccessGET(w http.ResponseWriter, r *http.Request) {
- allQueryParams:= r.URL.Query()
+ allQueryParams := r.URL.Query()
checkIfReadOnlyStringVal := allQueryParams.Get("checkifreadonly")
//Remove all spaces
- checkIfReadOnlyStringVal = strings.ReplaceAll(checkIfReadOnlyStringVal," ","")
+ checkIfReadOnlyStringVal = strings.ReplaceAll(checkIfReadOnlyStringVal, " ", "")
// checkIfReadOnlyStringVal will be empty in case ""checkifreadonly" query param is not sent in the request. checkIfReadOnlyBoolVal will be false
checkIfReadOnlyBoolVal, err := strconv.ParseBool(checkIfReadOnlyStringVal)
var response string
- if len(checkIfReadOnlyStringVal) ==0 || nil==err {
- if checkIfReadOnlyBoolVal{
+ if len(checkIfReadOnlyStringVal) == 0 || nil == err {
+ if checkIfReadOnlyBoolVal {
- if clusters.CurrentAdmiralState.ReadOnly{
+ if commonUtil.IsAdmiralReadOnly() {
//Force fail health check if Admiral is in Readonly mode
w.WriteHeader(503)
- }else {
+ } else {
w.WriteHeader(200)
}
- }else {
+ } else {
w.WriteHeader(200)
}
response = fmt.Sprintf("Heath check method called: %v, URI: %v, Method: %v\n", r.Host, r.RequestURI, r.Method)
- }else {
+ } else {
w.WriteHeader(400)
- response = fmt.Sprintf("Health check method called with bad query param value %v for checkifreadonly",checkIfReadOnlyStringVal)
+ response = fmt.Sprintf("Health check method called with bad query param value %v for checkifreadonly", checkIfReadOnlyStringVal)
}
_, writeErr := w.Write([]byte(response))
if writeErr != nil {
- log.Printf("Error writing body: %v", writeErr)
+ logrus.Printf("Error writing body: %v", writeErr)
http.Error(w, "can't write body", http.StatusInternalServerError)
}
}
@@ -78,12 +81,12 @@ func (opts *RouteOpts) GetClusters(w http.ResponseWriter, r *http.Request) {
out, err := json.Marshal(clusterList)
if err != nil {
- log.Printf("Failed to marshall response for GetClusters call")
+ logrus.Printf("Failed to marshall response for GetClusters call")
http.Error(w, "Failed to marshall response", http.StatusInternalServerError)
} else {
if len(clusterList) == 0 {
message := "No cluster is monitored by admiral"
- log.Println(message)
+ logrus.Println(message)
w.WriteHeader(200)
out, _ = json.Marshal(message)
} else {
@@ -92,14 +95,16 @@ func (opts *RouteOpts) GetClusters(w http.ResponseWriter, r *http.Request) {
}
_, err := w.Write(out)
if err != nil {
- log.Println("Failed to write message: ", err)
+ logrus.Println("Failed to write message: ", err)
}
}
}
func (opts *RouteOpts) GetServiceEntriesByCluster(w http.ResponseWriter, r *http.Request) {
+ ctxLogger := logrus.WithFields(logrus.Fields{
+ "txId": common.FetchTxIdOrGenNew(context.TODO()),
+ })
defer r.Body.Close()
-
params := mux.Vars(r)
clusterName := strings.Trim(params["clustername"], " ")
@@ -109,44 +114,87 @@ func (opts *RouteOpts) GetServiceEntriesByCluster(w http.ResponseWriter, r *http
if clusterName != "" {
- serviceEntriesByCluster, err := clusters.GetServiceEntriesByCluster(ctx, clusterName, opts.RemoteRegistry)
+ serviceEntriesByCluster, err := clusters.GetServiceEntriesByCluster(ctxLogger, ctx, clusterName, opts.RemoteRegistry)
if err != nil {
- log.Printf("API call get service entry by cluster failed for clustername %v with Error: %v", clusterName, err.Error())
- if strings.Contains(err.Error(), "Admiral is not monitoring cluster") {
+ logrus.Printf("API call get service entry by cluster failed for clustername %v with Error: %v", clusterName, err.Error())
+ if strings.Contains(strings.ToLower(err.Error()), strings.ToLower("Admiral is not monitoring cluster")) {
http.Error(w, err.Error(), http.StatusNotFound)
} else {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
} else {
if len(serviceEntriesByCluster) == 0 {
- log.Printf("API call get service entry by cluster failed for clustername %v with Error: %v", clusterName, "No service entries configured for cluster - "+clusterName)
+ logrus.Printf("API call get service entry by cluster failed for clustername %v with Error: %v", clusterName, "no service entries configured for cluster - "+clusterName)
w.WriteHeader(200)
- _, err := w.Write([]byte(fmt.Sprintf("No service entries configured for cluster - %s", clusterName)))
+ _, err := w.Write([]byte(fmt.Sprintf("no service entries configured for cluster - %s", clusterName)))
if err != nil {
- log.Println("Error writing body: ", err)
+ logrus.Println("Error writing body: ", err)
}
} else {
response = serviceEntriesByCluster
out, err := json.Marshal(response)
if err != nil {
- log.Printf("Failed to marshall response for GetServiceEntriesByCluster call")
+ logrus.Printf("Failed to marshall response for GetServiceEntriesByCluster call")
http.Error(w, fmt.Sprintf("Failed to marshall response for getting service entries api for cluster %s", clusterName), http.StatusInternalServerError)
} else {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(200)
_, err := w.Write(out)
if err != nil {
- log.Println("failed to write resp body: ", err)
+ logrus.Println("failed to write resp body: ", err)
}
}
}
}
} else {
- log.Printf("Cluster name not provided as part of the request")
- http.Error(w, "Cluster name not provided as part of the request", http.StatusBadRequest)
+ logrus.Printf("cluster name not provided as part of the request")
+ http.Error(w, "cluster name not provided as part of the request", http.StatusBadRequest)
+ }
+}
+
+// GetGlobalTrafficPolicyByIdentityAndEnv handler returns GlobalTrafficPolicy resource based on
+// the matching env and identity passed as query parameters
+func (opts *RouteOpts) GetGlobalTrafficPolicyByIdentityAndEnv(w http.ResponseWriter, r *http.Request) {
+
+ pathParams := mux.Vars(r)
+ identity, ok := pathParams["identity"]
+ if !ok || identity == "" {
+ generateErrorResponse(w, http.StatusBadRequest, "identity not provided as part of the path param")
+ return
+ }
+
+ env := r.FormValue("env")
+ if env == "" {
+ env = "default"
+ }
+
+ if opts.RemoteRegistry == nil || opts.RemoteRegistry.AdmiralCache == nil {
+ logrus.Warn("invalid remote registry cache")
+ generateErrorResponse(w, http.StatusInternalServerError, "invalid remote registry cache")
+ return
+ }
+
+ gtps := opts.RemoteRegistry.AdmiralCache.GlobalTrafficCache
+
+ if gtps == nil {
+ logrus.Print("globaltrafficcache not initialized")
+ generateErrorResponse(w, http.StatusInternalServerError, "invalid globaltrafficcache")
+ return
}
+
+ gtp, err := gtps.GetFromIdentity(identity, env)
+ if err != nil {
+ logrus.Warn(err)
+ generateErrorResponse(w, http.StatusInternalServerError, err.Error())
+ }
+ if gtp == nil {
+ generateErrorResponse(w, http.StatusNotFound, fmt.Sprintf("globaltraffic policy with identity: %s and env: %s was not found", identity, env))
+ return
+ }
+
+ generateResponseJSON(w, http.StatusOK, gtp)
}
func (opts *RouteOpts) GetServiceEntriesByIdentity(w http.ResponseWriter, r *http.Request) {
@@ -171,20 +219,36 @@ func (opts *RouteOpts) GetServiceEntriesByIdentity(w http.ResponseWriter, r *htt
response = append(response, identityServiceEntry)
}
})
+
out, err := json.Marshal(response)
if err != nil {
- log.Printf("Failed to marshall response GetServiceEntriesByIdentity call")
+ logrus.Printf("Failed to marshall response GetServiceEntriesByIdentity call")
http.Error(w, fmt.Sprintf("Failed to marshall response for getting service entries api for identity %s", identity), http.StatusInternalServerError)
} else {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(200)
_, err := w.Write(out)
if err != nil {
- log.Println("failed to write resp body", err)
+ logrus.Println("failed to write resp body", err)
}
}
} else {
- log.Printf("Identity not provided as part of the request")
+ logrus.Printf("Identity not provided as part of the request")
http.Error(w, "Identity not provided as part of the request", http.StatusBadRequest)
}
}
+
+func generateErrorResponse(w http.ResponseWriter, code int, message string) {
+ generateResponseJSON(w, code, map[string]string{"error": message})
+}
+
+func generateResponseJSON(w http.ResponseWriter, code int, payload interface{}) {
+ response, err := json.Marshal(payload)
+ if err != nil {
+ logrus.Printf("failed to serialize the payload due to %v", err)
+ response = []byte("{\"error\": \"malformed response payload\"}")
+ }
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(code)
+ w.Write(response)
+}
From e4a025aadfa308f423202571b722e57cf04d316f Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:50:10 -0700
Subject: [PATCH 014/235] add file admiral/pkg/apis/admiral/routes/routes.go
---
admiral/pkg/apis/admiral/routes/routes.go | 16 +++++++++++-----
1 file changed, 11 insertions(+), 5 deletions(-)
diff --git a/admiral/pkg/apis/admiral/routes/routes.go b/admiral/pkg/apis/admiral/routes/routes.go
index 660b6d0f..f68c0726 100644
--- a/admiral/pkg/apis/admiral/routes/routes.go
+++ b/admiral/pkg/apis/admiral/routes/routes.go
@@ -1,13 +1,13 @@
package routes
import (
+ "log"
+ "net/http"
+
"github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/filters"
"github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/server"
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
"github.com/prometheus/client_golang/prometheus/promhttp"
- "k8s.io/client-go/tools/clientcmd"
- "log"
- "net/http"
)
var Filter = server.Filters{
@@ -16,8 +16,7 @@ var Filter = server.Filters{
func NewAdmiralAPIServer(opts *RouteOpts) server.Routes {
// create the config from the path
- config, err := clientcmd.BuildConfigFromFlags("", opts.KubeconfigPath)
-
+ config, err := opts.RemoteRegistry.ClientLoader.LoadKubeClientFromPath(opts.KubeconfigPath)
if err != nil || config == nil {
log.Printf("could not retrieve kubeconfig: %v", err)
}
@@ -48,6 +47,13 @@ func NewAdmiralAPIServer(opts *RouteOpts) server.Routes {
Pattern: "/identity/{identity}/serviceentries",
HandlerFunc: opts.GetServiceEntriesByIdentity,
},
+ server.Route{
+ Name: "Get the GlobalTrafficPolicy based on the env and identity/asset alias",
+ Method: "GET",
+ Pattern: "/identity/{identity}/globaltrafficpolicy",
+ Query: "env",
+ HandlerFunc: opts.GetGlobalTrafficPolicyByIdentityAndEnv,
+ },
}
}
From bdbdc6f1f600d0e9d9a9be09bf3e6f26c6e842bd Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:50:13 -0700
Subject: [PATCH 015/235] add file admiral/pkg/apis/admiral/server/server.go
---
admiral/pkg/apis/admiral/server/server.go | 12 +++++++++---
1 file changed, 9 insertions(+), 3 deletions(-)
diff --git a/admiral/pkg/apis/admiral/server/server.go b/admiral/pkg/apis/admiral/server/server.go
index 02a07115..7ed4834d 100644
--- a/admiral/pkg/apis/admiral/server/server.go
+++ b/admiral/pkg/apis/admiral/server/server.go
@@ -2,12 +2,15 @@ package server
import (
"context"
- "github.com/gorilla/mux"
- "github.com/istio-ecosystem/admiral/admiral/pkg/clusters"
"log"
"net/http"
"strconv"
"strings"
+
+ "github.com/gorilla/mux"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/clusters"
+
+ _ "net/http/pprof"
)
type Service struct {
@@ -20,7 +23,7 @@ type Service struct {
// filter definition as a func
type FilterHandlerFunc func(inner http.Handler, name string) http.Handler
-//structs used to collect routes and filters
+// structs used to collect routes and filters
type Filter struct {
HandlerFunc FilterHandlerFunc
}
@@ -47,6 +50,9 @@ func (s *Service) Start(ctx context.Context, port int, routes Routes, filter []F
go waitForStop(s)
router := s.newRouter(routes, filter)
+ if port == 8080 {
+ router.PathPrefix("/debug/").Handler(http.DefaultServeMux)
+ }
s.server = http.Server{Addr: ":" + strconv.Itoa(port), Handler: router}
From e02ea212135e057ea9a0fb0c83bd719a36b5e192 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:50:16 -0700
Subject: [PATCH 016/235] add file
admiral/pkg/client/clientset/versioned/clientset.go
---
.../client/clientset/versioned/clientset.go | 19 +++++++++----------
1 file changed, 9 insertions(+), 10 deletions(-)
diff --git a/admiral/pkg/client/clientset/versioned/clientset.go b/admiral/pkg/client/clientset/versioned/clientset.go
index e41fbf87..e6fc532a 100644
--- a/admiral/pkg/client/clientset/versioned/clientset.go
+++ b/admiral/pkg/client/clientset/versioned/clientset.go
@@ -20,10 +20,9 @@ package versioned
import (
"fmt"
- "log"
"net/http"
- admiralv1 "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned/typed/admiral/v1"
+ admiralv1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1"
discovery "k8s.io/client-go/discovery"
rest "k8s.io/client-go/rest"
flowcontrol "k8s.io/client-go/util/flowcontrol"
@@ -31,19 +30,19 @@ import (
type Interface interface {
Discovery() discovery.DiscoveryInterface
- AdmiralV1() admiralv1.AdmiralV1Interface
+ AdmiralV1alpha1() admiralv1alpha1.AdmiralV1alpha1Interface
}
// Clientset contains the clients for groups. Each group has exactly one
// version included in a Clientset.
type Clientset struct {
*discovery.DiscoveryClient
- admiralV1 *admiralv1.AdmiralV1Client
+ admiralV1alpha1 *admiralv1alpha1.AdmiralV1alpha1Client
}
-// AdmiralV1 retrieves the AdmiralV1Client
-func (c *Clientset) AdmiralV1() admiralv1.AdmiralV1Interface {
- return c.admiralV1
+// AdmiralV1alpha1 retrieves the AdmiralV1alpha1Client
+func (c *Clientset) AdmiralV1alpha1() admiralv1alpha1.AdmiralV1alpha1Interface {
+ return c.admiralV1alpha1
}
// Discovery retrieves the DiscoveryClient
@@ -90,7 +89,7 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset,
var cs Clientset
var err error
- cs.admiralV1, err = admiralv1.NewForConfigAndClient(&configShallowCopy, httpClient)
+ cs.admiralV1alpha1, err = admiralv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
}
@@ -107,7 +106,7 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset,
func NewForConfigOrDie(c *rest.Config) *Clientset {
cs, err := NewForConfig(c)
if err != nil {
- log.Fatal(err)
+ panic(err)
}
return cs
}
@@ -115,7 +114,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset {
// New creates a new Clientset for the given RESTClient.
func New(c rest.Interface) *Clientset {
var cs Clientset
- cs.admiralV1 = admiralv1.New(c)
+ cs.admiralV1alpha1 = admiralv1alpha1.New(c)
cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
return &cs
From 9b5d34920dc041253acce22bc1b5bafefa51fd03 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:50:19 -0700
Subject: [PATCH 017/235] add file
admiral/pkg/client/clientset/versioned/fake/clientset_generated.go
---
.../clientset/versioned/fake/clientset_generated.go | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/admiral/pkg/client/clientset/versioned/fake/clientset_generated.go b/admiral/pkg/client/clientset/versioned/fake/clientset_generated.go
index 728dbc59..1726cd4e 100644
--- a/admiral/pkg/client/clientset/versioned/fake/clientset_generated.go
+++ b/admiral/pkg/client/clientset/versioned/fake/clientset_generated.go
@@ -20,8 +20,8 @@ package fake
import (
clientset "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned"
- admiralv1 "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned/typed/admiral/v1"
- fakeadmiralv1 "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned/typed/admiral/v1/fake"
+ admiralv1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1"
+ fakeadmiralv1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/discovery"
@@ -79,7 +79,7 @@ var (
_ testing.FakeClient = &Clientset{}
)
-// AdmiralV1 retrieves the AdmiralV1Client
-func (c *Clientset) AdmiralV1() admiralv1.AdmiralV1Interface {
- return &fakeadmiralv1.FakeAdmiralV1{Fake: &c.Fake}
+// AdmiralV1alpha1 retrieves the AdmiralV1alpha1Client
+func (c *Clientset) AdmiralV1alpha1() admiralv1alpha1.AdmiralV1alpha1Interface {
+ return &fakeadmiralv1alpha1.FakeAdmiralV1alpha1{Fake: &c.Fake}
}
From f679dc7a37efe2cb12f3dcae5ca554c9bced0d82 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:50:22 -0700
Subject: [PATCH 018/235] add file
admiral/pkg/client/clientset/versioned/fake/register.go
---
.../clientset/versioned/fake/register.go | 18 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)
diff --git a/admiral/pkg/client/clientset/versioned/fake/register.go b/admiral/pkg/client/clientset/versioned/fake/register.go
index c4b33625..c6f018b2 100644
--- a/admiral/pkg/client/clientset/versioned/fake/register.go
+++ b/admiral/pkg/client/clientset/versioned/fake/register.go
@@ -19,7 +19,7 @@ limitations under the License.
package fake
import (
- admiralv1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
+ admiralv1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
@@ -31,20 +31,20 @@ var scheme = runtime.NewScheme()
var codecs = serializer.NewCodecFactory(scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
- admiralv1.AddToScheme,
+ admiralv1alpha1.AddToScheme,
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in:
//
-// import (
-// "k8s.io/client-go/kubernetes"
-// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
-// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
-// )
+// import (
+// "k8s.io/client-go/kubernetes"
+// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
+// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
+// )
//
-// kclientset, _ := kubernetes.NewForConfig(c)
-// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
+// kclientset, _ := kubernetes.NewForConfig(c)
+// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
From 929f5415c2818d900d035fc8629c9daf1127ef30 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:50:25 -0700
Subject: [PATCH 019/235] add file
admiral/pkg/client/clientset/versioned/scheme/register.go
---
.../clientset/versioned/scheme/register.go | 18 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)
diff --git a/admiral/pkg/client/clientset/versioned/scheme/register.go b/admiral/pkg/client/clientset/versioned/scheme/register.go
index 6d706b8f..ecb7483e 100644
--- a/admiral/pkg/client/clientset/versioned/scheme/register.go
+++ b/admiral/pkg/client/clientset/versioned/scheme/register.go
@@ -19,7 +19,7 @@ limitations under the License.
package scheme
import (
- admiralv1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
+ admiralv1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
@@ -31,20 +31,20 @@ var Scheme = runtime.NewScheme()
var Codecs = serializer.NewCodecFactory(Scheme)
var ParameterCodec = runtime.NewParameterCodec(Scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
- admiralv1.AddToScheme,
+ admiralv1alpha1.AddToScheme,
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in:
//
-// import (
-// "k8s.io/client-go/kubernetes"
-// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
-// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
-// )
+// import (
+// "k8s.io/client-go/kubernetes"
+// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
+// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
+// )
//
-// kclientset, _ := kubernetes.NewForConfig(c)
-// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
+// kclientset, _ := kubernetes.NewForConfig(c)
+// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
From b90a7a6e5f48e5a90ae121a32682d9e977c24b1f Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:50:28 -0700
Subject: [PATCH 020/235] add file
admiral/pkg/client/informers/externalversions/admiral/interface.go
---
.../informers/externalversions/admiral/interface.go | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/admiral/pkg/client/informers/externalversions/admiral/interface.go b/admiral/pkg/client/informers/externalversions/admiral/interface.go
index 5ba8ebcc..3d0542cc 100644
--- a/admiral/pkg/client/informers/externalversions/admiral/interface.go
+++ b/admiral/pkg/client/informers/externalversions/admiral/interface.go
@@ -19,14 +19,14 @@ limitations under the License.
package admiral
import (
- v1 "github.com/istio-ecosystem/admiral/admiral/pkg/client/informers/externalversions/admiral/v1"
+ v1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/client/informers/externalversions/admiral/v1alpha1"
internalinterfaces "github.com/istio-ecosystem/admiral/admiral/pkg/client/informers/externalversions/internalinterfaces"
)
// Interface provides access to each of this group's versions.
type Interface interface {
- // V1 provides access to shared informers for resources in V1.
- V1() v1.Interface
+ // V1alpha1 provides access to shared informers for resources in V1alpha1.
+ V1alpha1() v1alpha1.Interface
}
type group struct {
@@ -40,7 +40,7 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList
return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
-// V1 returns a new v1.Interface.
-func (g *group) V1() v1.Interface {
- return v1.New(g.factory, g.namespace, g.tweakListOptions)
+// V1alpha1 returns a new v1alpha1.Interface.
+func (g *group) V1alpha1() v1alpha1.Interface {
+ return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
}
From 1c942914a62d37a59c6c10801331e6b099f4c3c3 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:50:31 -0700
Subject: [PATCH 021/235] add file
admiral/pkg/client/informers/externalversions/generic.go
---
.../informers/externalversions/generic.go | 26 ++++++++++++-------
1 file changed, 16 insertions(+), 10 deletions(-)
diff --git a/admiral/pkg/client/informers/externalversions/generic.go b/admiral/pkg/client/informers/externalversions/generic.go
index deb1f07f..1f61c3f1 100644
--- a/admiral/pkg/client/informers/externalversions/generic.go
+++ b/admiral/pkg/client/informers/externalversions/generic.go
@@ -21,7 +21,7 @@ package externalversions
import (
"fmt"
- v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
+ v1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
)
@@ -52,15 +52,21 @@ func (f *genericInformer) Lister() cache.GenericLister {
// TODO extend this to unknown resources with a client pool
func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) {
switch resource {
- // Group=admiral.io, Version=v1
- case v1.SchemeGroupVersion.WithResource("dependencies"):
- return &genericInformer{resource: resource.GroupResource(), informer: f.Admiral().V1().Dependencies().Informer()}, nil
- case v1.SchemeGroupVersion.WithResource("dependencyproxies"):
- return &genericInformer{resource: resource.GroupResource(), informer: f.Admiral().V1().DependencyProxies().Informer()}, nil
- case v1.SchemeGroupVersion.WithResource("globaltrafficpolicies"):
- return &genericInformer{resource: resource.GroupResource(), informer: f.Admiral().V1().GlobalTrafficPolicies().Informer()}, nil
- case v1.SchemeGroupVersion.WithResource("routingpolicies"):
- return &genericInformer{resource: resource.GroupResource(), informer: f.Admiral().V1().RoutingPolicies().Informer()}, nil
+ // Group=admiral.io, Version=v1alpha1
+ case v1alpha1.SchemeGroupVersion.WithResource("clientconnectionconfigs"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Admiral().V1alpha1().ClientConnectionConfigs().Informer()}, nil
+ case v1alpha1.SchemeGroupVersion.WithResource("dependencies"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Admiral().V1alpha1().Dependencies().Informer()}, nil
+ case v1alpha1.SchemeGroupVersion.WithResource("dependencyproxies"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Admiral().V1alpha1().DependencyProxies().Informer()}, nil
+ case v1alpha1.SchemeGroupVersion.WithResource("globaltrafficpolicies"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Admiral().V1alpha1().GlobalTrafficPolicies().Informer()}, nil
+ case v1alpha1.SchemeGroupVersion.WithResource("outlierdetections"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Admiral().V1alpha1().OutlierDetections().Informer()}, nil
+ case v1alpha1.SchemeGroupVersion.WithResource("routingpolicies"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Admiral().V1alpha1().RoutingPolicies().Informer()}, nil
+ case v1alpha1.SchemeGroupVersion.WithResource("trafficconfigs"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Admiral().V1alpha1().TrafficConfigs().Informer()}, nil
}
From 3ad9e25c73e9c939952a192fd129ac2f68f948ab Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:50:34 -0700
Subject: [PATCH 022/235] add file admiral/pkg/clusters/envoyfilter.go
---
admiral/pkg/clusters/envoyfilter.go | 241 ++++++++++++++++------------
1 file changed, 136 insertions(+), 105 deletions(-)
diff --git a/admiral/pkg/clusters/envoyfilter.go b/admiral/pkg/clusters/envoyfilter.go
index eaf5525e..5d6e313a 100644
--- a/admiral/pkg/clusters/envoyfilter.go
+++ b/admiral/pkg/clusters/envoyfilter.go
@@ -6,12 +6,16 @@ import (
"fmt"
"strings"
- structpb "github.com/golang/protobuf/ptypes/struct"
- v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
+ k8sErrors "k8s.io/apimachinery/pkg/api/errors"
+
+ structPb "github.com/golang/protobuf/ptypes/struct"
+ v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
log "github.com/sirupsen/logrus"
+ "google.golang.org/protobuf/types/known/structpb"
"istio.io/api/networking/v1alpha3"
+ networkingv1alpha3 "istio.io/api/networking/v1alpha3"
networking "istio.io/client-go/pkg/apis/networking/v1alpha3"
metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -20,71 +24,110 @@ var (
getSha1 = common.GetSha1
)
-const hostsKey = "hosts: "
-const pluginKey = "plugin: "
+const (
+ envoyFilter = "EnvoyFilter"
+ hostsKey = "hosts: "
+ pluginKey = "plugin: "
+ envoyfilterAssociatedRoutingPolicyNameAnnotation = "associated-routing-policy-name"
+ envoyfilterAssociatedRoutingPolicyIdentityeAnnotation = "associated-routing-policy-identity"
+)
+
+// getEnvoyFilterNamespace returns the user namespace where envoy filter needs to be created.
+func getEnvoyFilterNamespace() string {
+ var namespace string
+ namespace = common.NamespaceIstioSystem
+ return namespace
+}
+func createOrUpdateEnvoyFilter(ctx context.Context, rc *RemoteController, routingPolicy *v1.RoutingPolicy, eventType admiral.EventType, workloadIdentityKey string, admiralCache *AdmiralCache) ([]*networking.EnvoyFilter, error) {
-func createOrUpdateEnvoyFilter(ctx context.Context, rc *RemoteController, routingPolicy *v1.RoutingPolicy, eventType admiral.EventType, workloadIdentityKey string, admiralCache *AdmiralCache, workloadSelectorMap map[string]string) (*networking.EnvoyFilter, error) {
+ var (
+ filterNamespace string
+ err error
+ )
- envoyfilterSpec, err := constructEnvoyFilterStruct(routingPolicy, workloadSelectorMap)
+ filterNamespace = getEnvoyFilterNamespace()
+ routingPolicyNameSha, err := getSha1(routingPolicy.Name + common.GetRoutingPolicyEnv(routingPolicy) + common.GetRoutingPolicyIdentity(routingPolicy))
if err != nil {
- log.Error("error occurred while constructing envoy filter struct")
+ log.Errorf(LogErrFormat, eventType, envoyFilter, routingPolicy.Name, rc.ClusterID, "error occurred while computing routingPolicy name sha1")
return nil, err
}
-
- selectorLabelsSha, err := getSha1(workloadIdentityKey + common.GetRoutingPolicyEnv(routingPolicy))
+ dependentIdentitySha, err := getSha1(workloadIdentityKey)
if err != nil {
- log.Error("error occurred while computing workload labels sha1")
+ log.Errorf(LogErrFormat, eventType, envoyFilter, routingPolicy.Name, rc.ClusterID, "error occurred while computing dependentIdentity sha1")
return nil, err
}
if len(common.GetEnvoyFilterVersion()) == 0 {
- log.Error("envoy filter version not supplied")
+ log.Errorf(LogErrFormat, eventType, envoyFilter, routingPolicy.Name, rc.ClusterID, "envoy filter version not supplied")
return nil, errors.New("envoy filter version not supplied")
}
- envoyFilterName := fmt.Sprintf("%s-dynamicrouting-%s-%s", strings.ToLower(routingPolicy.Spec.Plugin), selectorLabelsSha, common.GetEnvoyFilterVersion())
- envoyfilter := &networking.EnvoyFilter{
- TypeMeta: metaV1.TypeMeta{
- Kind: "EnvoyFilter",
- APIVersion: "networking.istio.io/v1alpha3",
- },
- ObjectMeta: metaV1.ObjectMeta{
- Name: envoyFilterName,
- Namespace: common.NamespaceIstioSystem,
- },
- //nolint
- Spec: *envoyfilterSpec,
- }
- admiralCache.RoutingPolicyFilterCache.Put(workloadIdentityKey+common.GetRoutingPolicyEnv(routingPolicy), rc.ClusterID, envoyFilterName)
- var filter *networking.EnvoyFilter
- //get the envoyfilter if it exists. If it exists, update it. Otherwise create it.
- if eventType == admiral.Add || eventType == admiral.Update {
- // We query the API server instead of getting it from cache because there could be potential condition where the filter exists in the cache but not on the cluster.
- filter, err = rc.RoutingPolicyController.IstioClient.NetworkingV1alpha3().
- EnvoyFilters(common.NamespaceIstioSystem).Get(ctx, envoyFilterName, metaV1.GetOptions{})
- if err != nil {
- log.Infof("msg=%s filtername=%s clustername=%s", "creating the envoy filter", envoyFilterName, rc.ClusterID)
- filter, err = rc.RoutingPolicyController.IstioClient.NetworkingV1alpha3().
- EnvoyFilters(common.NamespaceIstioSystem).Create(ctx, envoyfilter, metaV1.CreateOptions{})
- if err != nil {
- log.Infof("error creating filter: %v", err)
+ var versionsArray = common.GetEnvoyFilterVersion() // e.g. 1.13,1.17
+ env := common.GetRoutingPolicyEnv(routingPolicy)
+ filterList := make([]*networking.EnvoyFilter, 0)
+
+ for _, version := range versionsArray {
+ envoyFilterName := fmt.Sprintf("%s-dr-%s-%s-%s", strings.ToLower(routingPolicy.Spec.Plugin), routingPolicyNameSha, dependentIdentitySha, version)
+ envoyfilterSpec := constructEnvoyFilterStruct(routingPolicy, map[string]string{common.AssetAlias: workloadIdentityKey}, version, envoyFilterName)
+
+ log.Infof(LogFormat, eventType, envoyFilter, envoyFilterName, rc.ClusterID, "version +"+version)
+
+ envoyfilter := &networking.EnvoyFilter{
+ TypeMeta: metaV1.TypeMeta{
+ Kind: "EnvoyFilter",
+ APIVersion: "networking.istio.io/v1alpha3",
+ },
+ ObjectMeta: metaV1.ObjectMeta{
+ Name: envoyFilterName,
+ Namespace: filterNamespace,
+ Annotations: map[string]string{
+ envoyfilterAssociatedRoutingPolicyNameAnnotation: routingPolicy.Name,
+ envoyfilterAssociatedRoutingPolicyIdentityeAnnotation: common.GetRoutingPolicyIdentity(routingPolicy),
+ },
+ },
+ //nolint
+ Spec: *envoyfilterSpec,
+ }
+
+ // To maintain mapping of envoyfilters created for a routing policy, and to facilitate deletion of envoyfilters when routing policy is deleted
+ admiralCache.RoutingPolicyFilterCache.Put(routingPolicy.Name+common.GetRoutingPolicyIdentity(routingPolicy)+env, rc.ClusterID, envoyFilterName, filterNamespace)
+
+ //get the envoyfilter if it exists. If it exists, update it. Otherwise create it.
+ if eventType == admiral.Add || eventType == admiral.Update {
+ // We query the API server instead of getting it from cache because there could be potential condition where the filter exists in the cache but not on the cluster.
+ var err2 error
+ filter, err1 := rc.RoutingPolicyController.IstioClient.NetworkingV1alpha3().
+ EnvoyFilters(filterNamespace).Get(ctx, envoyFilterName, metaV1.GetOptions{})
+
+ if k8sErrors.IsNotFound(err1) {
+ log.Infof(LogFormat, eventType, envoyFilter, envoyFilterName, rc.ClusterID, "creating the envoy filter")
+ filter, err2 = rc.RoutingPolicyController.IstioClient.NetworkingV1alpha3().
+ EnvoyFilters(filterNamespace).Create(ctx, envoyfilter, metaV1.CreateOptions{})
+ } else if err1 == nil {
+ log.Infof(LogFormat, eventType, envoyFilter, envoyFilterName, rc.ClusterID, "updating existing envoy filter")
+ envoyfilter.ResourceVersion = filter.ResourceVersion
+ filter, err2 = rc.RoutingPolicyController.IstioClient.NetworkingV1alpha3().
+ EnvoyFilters(filterNamespace).Update(ctx, envoyfilter, metaV1.UpdateOptions{})
+ } else {
+ err = common.AppendError(err1, err)
+ log.Errorf(LogErrFormat, eventType, envoyFilter, routingPolicy.Name, rc.ClusterID, err1)
+ }
+
+ if err2 == nil {
+ filterList = append(filterList, filter)
+ } else {
+ err = common.AppendError(err2, err)
+ log.Errorf(LogErrFormat, eventType, envoyFilter, routingPolicy.Name, rc.ClusterID, err2)
}
- } else {
- log.Infof("msg=%s filtername=%s clustername=%s", "updating existing envoy filter", envoyFilterName, rc.ClusterID)
- envoyfilter.ResourceVersion = filter.ResourceVersion
- filter, err = rc.RoutingPolicyController.IstioClient.NetworkingV1alpha3().
- EnvoyFilters(common.NamespaceIstioSystem).Update(ctx, envoyfilter, metaV1.UpdateOptions{})
}
}
-
- return filter, err
+ return filterList, err
}
-
-func constructEnvoyFilterStruct(routingPolicy *v1.RoutingPolicy, workloadSelectorLabels map[string]string) (*v1alpha3.EnvoyFilter, error) {
+func constructEnvoyFilterStruct(routingPolicy *v1.RoutingPolicy, workloadSelectorLabels map[string]string, filterVersion string, filterName string) *v1alpha3.EnvoyFilter {
var envoyFilterStringConfig string
var wasmPath string
for key, val := range routingPolicy.Spec.Config {
if key == common.WASMPath {
- wasmPath = val
+ wasmPath = common.WasmPathValue
continue
}
envoyFilterStringConfig += fmt.Sprintf("%s: %s\n", key, val)
@@ -92,43 +135,38 @@ func constructEnvoyFilterStruct(routingPolicy *v1.RoutingPolicy, workloadSelecto
if len(common.GetEnvoyFilterAdditionalConfig()) != 0 {
envoyFilterStringConfig += common.GetEnvoyFilterAdditionalConfig() + "\n"
}
- hosts, err := getHosts(routingPolicy)
- if err != nil {
- return nil, err
- }
- envoyFilterStringConfig += hosts + "\n"
- plugin, err := getPlugin(routingPolicy)
- if err != nil {
- return nil, err
- }
- envoyFilterStringConfig += plugin
+ envoyFilterStringConfig += getHosts(routingPolicy) + "\n"
+ envoyFilterStringConfig += getPlugin(routingPolicy)
- configuration := structpb.Struct{
- Fields: map[string]*structpb.Value{
- "@type": {Kind: &structpb.Value_StringValue{StringValue: "type.googleapis.com/google.protobuf.StringValue"}},
- "value": {Kind: &structpb.Value_StringValue{StringValue: envoyFilterStringConfig}},
+ log.Infof("msg=%s type=routingpolicy name=%s", "adding config", routingPolicy.Name)
+
+ configuration := structPb.Struct{
+ Fields: map[string]*structPb.Value{
+ "@type": {Kind: &structPb.Value_StringValue{StringValue: "type.googleapis.com/google.protobuf.StringValue"}},
+ "value": {Kind: &structPb.Value_StringValue{StringValue: envoyFilterStringConfig}},
},
}
- vmConfig := structpb.Struct{
- Fields: map[string]*structpb.Value{
- "runtime": {Kind: &structpb.Value_StringValue{StringValue: "envoy.wasm.runtime.v8"}},
- "code": {Kind: &structpb.Value_StructValue{StructValue: &structpb.Struct{Fields: map[string]*structpb.Value{
- "local": {Kind: &structpb.Value_StructValue{StructValue: &structpb.Struct{Fields: map[string]*structpb.Value{
- "filename": {Kind: &structpb.Value_StringValue{StringValue: wasmPath}},
+ vmConfig := structPb.Struct{
+ Fields: map[string]*structPb.Value{
+ "runtime": {Kind: &structPb.Value_StringValue{StringValue: "envoy.wasm.runtime.v8"}},
+ "vm_id": {Kind: &structpb.Value_StringValue{StringValue: filterName}},
+ "code": {Kind: &structPb.Value_StructValue{StructValue: &structPb.Struct{Fields: map[string]*structPb.Value{
+ "local": {Kind: &structPb.Value_StructValue{StructValue: &structPb.Struct{Fields: map[string]*structPb.Value{
+ "filename": {Kind: &structPb.Value_StringValue{StringValue: wasmPath}},
}}}},
}}}},
},
}
- typedConfigValue := structpb.Struct{
- Fields: map[string]*structpb.Value{
+ typedConfigValue := structPb.Struct{
+ Fields: map[string]*structPb.Value{
"config": {
- Kind: &structpb.Value_StructValue{
- StructValue: &structpb.Struct{
- Fields: map[string]*structpb.Value{
- "configuration": {Kind: &structpb.Value_StructValue{StructValue: &configuration}},
- "vm_config": {Kind: &structpb.Value_StructValue{StructValue: &vmConfig}},
+ Kind: &structPb.Value_StructValue{
+ StructValue: &structPb.Struct{
+ Fields: map[string]*structPb.Value{
+ "configuration": {Kind: &structPb.Value_StructValue{StructValue: &configuration}},
+ "vm_config": {Kind: &structPb.Value_StructValue{StructValue: &vmConfig}},
},
},
},
@@ -136,19 +174,22 @@ func constructEnvoyFilterStruct(routingPolicy *v1.RoutingPolicy, workloadSelecto
},
}
- typedConfig := &structpb.Struct{
- Fields: map[string]*structpb.Value{
- "@type": {Kind: &structpb.Value_StringValue{StringValue: "type.googleapis.com/udpa.type.v1.TypedStruct"}},
- "type_url": {Kind: &structpb.Value_StringValue{StringValue: "type.googleapis.com/envoy.extensions.filters.http.wasm.v3.Wasm"}},
- "value": {Kind: &structpb.Value_StructValue{StructValue: &typedConfigValue}},
+ typedConfig := &structPb.Struct{
+ Fields: map[string]*structPb.Value{
+ "@type": {Kind: &structPb.Value_StringValue{StringValue: "type.googleapis.com/udpa.type.v1.TypedStruct"}},
+ "type_url": {Kind: &structPb.Value_StringValue{StringValue: "type.googleapis.com/envoy.extensions.filters.http.wasm.v3.Wasm"}},
+ "value": {Kind: &structPb.Value_StructValue{StructValue: &typedConfigValue}},
},
}
- envoyfilterSpec := getEnvoyFilterSpec(workloadSelectorLabels, typedConfig)
- return envoyfilterSpec, nil
+ envoyfilter := getEnvoyFilterSpec(workloadSelectorLabels, "dynamicRoutingFilterPatch", typedConfig, v1alpha3.EnvoyFilter_SIDECAR_OUTBOUND,
+ &v1alpha3.EnvoyFilter_ListenerMatch_SubFilterMatch{Name: "envoy.filters.http.router"}, v1alpha3.EnvoyFilter_Patch_INSERT_BEFORE, filterVersion)
+ return envoyfilter
}
-func getEnvoyFilterSpec(workloadSelectorLabels map[string]string, typedConfig *structpb.Struct) *v1alpha3.EnvoyFilter {
+func getEnvoyFilterSpec(workloadSelectorLabels map[string]string, filterName string, typedConfig *structPb.Struct,
+ filterContext networkingv1alpha3.EnvoyFilter_PatchContext, subfilter *v1alpha3.EnvoyFilter_ListenerMatch_SubFilterMatch,
+ insertPosition networkingv1alpha3.EnvoyFilter_Patch_Operation, filterVersion string) *v1alpha3.EnvoyFilter {
return &v1alpha3.EnvoyFilter{
WorkloadSelector: &v1alpha3.WorkloadSelector{Labels: workloadSelectorLabels},
@@ -156,29 +197,27 @@ func getEnvoyFilterSpec(workloadSelectorLabels map[string]string, typedConfig *s
{
ApplyTo: v1alpha3.EnvoyFilter_HTTP_FILTER,
Match: &v1alpha3.EnvoyFilter_EnvoyConfigObjectMatch{
- Context: v1alpha3.EnvoyFilter_SIDECAR_OUTBOUND,
+ Context: filterContext,
// TODO: Figure out the possibility of using this for istio version upgrades. Can we add multiple filters with different proxy version Match here?
- Proxy: &v1alpha3.EnvoyFilter_ProxyMatch{ProxyVersion: "^" + strings.ReplaceAll(common.GetEnvoyFilterVersion(), ".", "\\.") + ".*"},
+ Proxy: &v1alpha3.EnvoyFilter_ProxyMatch{ProxyVersion: "^" + strings.ReplaceAll(filterVersion, ".", "\\.") + ".*"},
ObjectTypes: &v1alpha3.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &v1alpha3.EnvoyFilter_ListenerMatch{
FilterChain: &v1alpha3.EnvoyFilter_ListenerMatch_FilterChainMatch{
Filter: &v1alpha3.EnvoyFilter_ListenerMatch_FilterMatch{
- Name: "envoy.filters.network.http_connection_manager",
- SubFilter: &v1alpha3.EnvoyFilter_ListenerMatch_SubFilterMatch{
- Name: "envoy.filters.http.router",
- },
+ Name: "envoy.filters.network.http_connection_manager",
+ SubFilter: subfilter,
},
},
},
},
},
Patch: &v1alpha3.EnvoyFilter_Patch{
- Operation: v1alpha3.EnvoyFilter_Patch_INSERT_BEFORE,
- Value: &structpb.Struct{
- Fields: map[string]*structpb.Value{
- "name": {Kind: &structpb.Value_StringValue{StringValue: "dynamicRoutingFilterPatch"}},
+ Operation: insertPosition,
+ Value: &structPb.Struct{
+ Fields: map[string]*structPb.Value{
+ "name": {Kind: &structPb.Value_StringValue{StringValue: filterName}},
"typed_config": {
- Kind: &structpb.Value_StructValue{
+ Kind: &structPb.Value_StructValue{
StructValue: typedConfig,
},
},
@@ -190,24 +229,16 @@ func getEnvoyFilterSpec(workloadSelectorLabels map[string]string, typedConfig *s
}
}
-func getHosts(routingPolicy *v1.RoutingPolicy) (string, error) {
+func getHosts(routingPolicy *v1.RoutingPolicy) string {
hosts := ""
for _, host := range routingPolicy.Spec.Hosts {
hosts += host + ","
}
- if len(hosts) == 0 {
- log.Error("routing policy hosts cannot be empty")
- return "", errors.New("routing policy hosts cannot be empty")
- }
hosts = strings.TrimSuffix(hosts, ",")
- return hostsKey + hosts, nil
+ return hostsKey + hosts
}
-func getPlugin(routingPolicy *v1.RoutingPolicy) (string, error) {
+func getPlugin(routingPolicy *v1.RoutingPolicy) string {
plugin := routingPolicy.Spec.Plugin
- if len(plugin) == 0 {
- log.Error("routing policy plugin cannot be empty")
- return "", errors.New("routing policy plugin cannot be empty")
- }
- return pluginKey + plugin, nil
+ return pluginKey + plugin
}
From 1325a2cb20bdc24433d09d1a2258712072662635 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:50:52 -0700
Subject: [PATCH 023/235] add file .github/CODEOWNERS
---
.github/CODEOWNERS | 10 ++++++++++
1 file changed, 10 insertions(+)
create mode 100644 .github/CODEOWNERS
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
new file mode 100644
index 00000000..124a48c3
--- /dev/null
+++ b/.github/CODEOWNERS
@@ -0,0 +1,10 @@
+# List of source code paths and code owners
+# For more information on the CODEOWNERS file go to:
+# https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners#codeowners-syntax
+
+# Uncomment line 10 and add the correct owners's usernames.
+# These owners will be the default owners for everything in
+# the repo. Unless a later match takes precedence,
+# @global-owner1 and @global-owner2 will be requested for
+# review when someone opens a pull request.
+* @services-mesh/service-mesh
From 622376b137d63bf57c3bc99e8a50a18e38a24b0b Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:50:55 -0700
Subject: [PATCH 024/235] add file .github/PULL_REQUEST_TEMPLATE.md
---
.github/PULL_REQUEST_TEMPLATE.md | 15 +++++++++++++++
1 file changed, 15 insertions(+)
create mode 100644 .github/PULL_REQUEST_TEMPLATE.md
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 00000000..b6a5000c
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,15 @@
+### Checklist
+🚨 Please review this repository's [contribution guidelines](./CONTRIBUTING.md).
+
+- [ ] I've read and agree to the project's contribution guidelines.
+- [ ] I'm requesting to **pull a topic/feature/bugfix branch**.
+- [ ] I checked that my code additions will pass code linting checks and unit tests.
+- [ ] I updated unit and integration tests (if applicable).
+- [ ] I'm ready to notify the team of this contribution.
+
+### Description
+What does this change do and why?
+
+[Link to related ISSUE]
+
+Thank you!
\ No newline at end of file
From 9b92bd5dd9888077a981b6e0011bc981d62ae743 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:50:58 -0700
Subject: [PATCH 025/235] add file .golangci.yml
---
.golangci.yml | 36 ++++++++++++++++++++++++++++++++++++
1 file changed, 36 insertions(+)
create mode 100644 .golangci.yml
diff --git a/.golangci.yml b/.golangci.yml
new file mode 100644
index 00000000..e362df34
--- /dev/null
+++ b/.golangci.yml
@@ -0,0 +1,36 @@
+name: golangci-lint
+on:
+ push:
+ tags:
+ - v*
+ branches:
+ - master
+ - main
+ pull_request:
+jobs:
+ golangci:
+ name: lint
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - uses: actions/setup-go@v3
+ with:
+ go-version: '1.17.7'
+ - name: golangci-lint
+ uses: golangci/golangci-lint-action@v2
+ with:
+ # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
+ version: v1.47.3
+ skip-go-installation: true
+
+ # Optional: working directory, useful for monorepos
+ # working-directory: somedir
+
+ # Optional: golangci-lint command line arguments.
+ args: >-
+ --skip-dirs=admiral/pkg/client/clientset/versioned
+ --tests=false
+ --timeout=5m
+
+ # Optional: show only new issues if it's a pull request. The default value is `false`.
+ # only-new-issues: true
\ No newline at end of file
From cebd999a36206e64f0d79267b209c0fd6f71f863 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:51:01 -0700
Subject: [PATCH 026/235] add file DESIGN.md
---
DESIGN.md | 1 +
1 file changed, 1 insertion(+)
create mode 100644 DESIGN.md
diff --git a/DESIGN.md b/DESIGN.md
new file mode 100644
index 00000000..2df85123
--- /dev/null
+++ b/DESIGN.md
@@ -0,0 +1 @@
+# Admiral
From 5d84abd8bf66e05fa5e8c62c4ba2ccaa91cf79cc Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:51:06 -0700
Subject: [PATCH 027/235] add file admiral/crd/outlierdetection.yaml
---
admiral/crd/outlierdetection.yaml | 79 +++++++++++++++++++++++++++++++
1 file changed, 79 insertions(+)
create mode 100644 admiral/crd/outlierdetection.yaml
diff --git a/admiral/crd/outlierdetection.yaml b/admiral/crd/outlierdetection.yaml
new file mode 100644
index 00000000..5edb4b28
--- /dev/null
+++ b/admiral/crd/outlierdetection.yaml
@@ -0,0 +1,79 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: outlierdetections.admiral.io
+spec:
+ group: admiral.io
+ names:
+ kind: OutlierDetection
+ listKind: OutlierDetectionList
+ plural: outlierdetections
+ singular: outlierdetection
+ shortNames:
+ - od
+ - ods
+ scope: Namespaced
+ versions:
+ - name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ description: generic cdr object to wrap the OutlierDetection api
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ outlier_config:
+ description: 'REQUIRED: base outlier configuration.'
+ properties:
+ base_ejection_time:
+ description: 'REQUIRED: Minimum duration of time in seconds, the
+ endpoint will be ejected'
+ format: int64
+ type: integer
+ consecutive_gateway_errors:
+ description: 'REQUIRED: No. of consecutive failures in specified
+ interval after which the endpoint will be ejected'
+ format: int32
+ type: integer
+ interval:
+ description: 'REQUIRED: Time interval between ejection sweep analysis'
+ format: int64
+ type: integer
+ type: object
+ selector:
+ additionalProperties:
+ type: string
+ description: 'REQUIRED: One or more labels that indicate a specific
+ set of pods/VMs on which this outlier configuration should be applied.
+ The scope of label search is restricted to namespace mark for mesh
+ enablement this will scan all cluster and namespace'
+ type: object
+ type: object
+ status:
+ properties:
+ clustersSynced:
+ format: int32
+ type: integer
+ state:
+ type: string
+ required:
+ - clustersSynced
+ - state
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
\ No newline at end of file
From b7bf2227bf73c8f194f3a6ff47c31082218ad315 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:51:09 -0700
Subject: [PATCH 028/235] add file admiral/crd/trafficconfig.yaml
---
admiral/crd/trafficconfig.yaml | 291 +++++++++++++++++++++++++++++++++
1 file changed, 291 insertions(+)
create mode 100644 admiral/crd/trafficconfig.yaml
diff --git a/admiral/crd/trafficconfig.yaml b/admiral/crd/trafficconfig.yaml
new file mode 100644
index 00000000..fb2258ca
--- /dev/null
+++ b/admiral/crd/trafficconfig.yaml
@@ -0,0 +1,291 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.10.0
+ creationTimestamp: null
+ name: trafficconfigs.admiral.io
+spec:
+ group: admiral.io
+ names:
+ kind: TrafficConfig
+ listKind: TrafficConfigList
+ plural: trafficconfigs
+ singular: trafficconfig
+ shortNames:
+ - tc
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ description: TrafficConfig is the Schema for the trafficconfigs API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: QuotaGroupSpec defines the desired state of QuotaGroup specified
+ by the user
+ properties:
+ edgeService:
+ properties:
+ dynamicRouting:
+ items:
+ properties:
+ cacheKeyAlgorithm:
+ type: string
+ local:
+ type: boolean
+ name:
+ type: string
+ ttlSec:
+ type: integer
+ url:
+ type: string
+ type: object
+ type: array
+ filters:
+ items:
+ properties:
+ name:
+ type: string
+ options:
+ items:
+ type: string
+ type: array
+ retries:
+ properties:
+ attempts:
+ type: integer
+ perTryTimeout:
+ type: string
+ type: object
+ type: object
+ type: array
+ routes:
+ items:
+ properties:
+ filterSelector:
+ type: string
+ inbound:
+ type: string
+ name:
+ type: string
+ outbound:
+ type: string
+ timeout:
+ type: integer
+ config:
+ items:
+ properties:
+ targetGroupSelector:
+ type: string
+ targetSelector:
+ type: string
+ type: object
+ type: array
+ workloadEnvSelectors:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ targets:
+ items:
+ properties:
+ name:
+ type: string
+ meshDNS:
+ type: string
+ port:
+ type: integer
+ socketTimeout:
+ type: integer
+ type: object
+ type: array
+ targetGroups:
+ items:
+ properties:
+ name:
+ type: string
+ weights:
+ items:
+ properties:
+ name:
+ type: string
+ weight:
+ type: integer
+ type: object
+ type: array
+ appOverrides:
+ items:
+ properties:
+ assetAlias:
+ type: string
+ assetID:
+ type: string
+ weights:
+ items:
+ properties:
+ name:
+ type: string
+ weight:
+ type: integer
+ type: object
+ type: array
+ type: object
+ type: array
+ type: object
+ type: array
+ type: object
+ quotaGroup:
+ properties:
+ appQuotaGroups:
+ items:
+ properties:
+ associatedApps:
+ items:
+ type: string
+ type: array
+ description:
+ type: string
+ name:
+ type: string
+ quotas:
+ items:
+ properties:
+ algorithm:
+ type: string
+ behaviour:
+ type: string
+ keyType:
+ type: string
+ maxAmount:
+ type: integer
+ method:
+ type: string
+ name:
+ type: string
+ rule:
+ type: string
+ timePeriod:
+ type: string
+ type: object
+ type: array
+ workloadEnvSelectors:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ totalQuotaGroups:
+ items:
+ properties:
+ adaptiveConcurrency:
+ properties:
+ concurrencyUpdateInterval:
+ type: string
+ latencyThreshold:
+ type: string
+ minRTTCalInterval:
+ type: string
+ minRTTCalJitter:
+ type: integer
+ minRTTCalMinConcurrency:
+ type: integer
+ minRTTCalRequestCount:
+ type: integer
+ sampleAggregatePercentile:
+ type: integer
+ skippedURLs:
+ items:
+ type: string
+ type: array
+ type: object
+ cpuLimit:
+ type: integer
+ description:
+ type: string
+ failureModeBehaviour:
+ type: string
+ memoryLimit:
+ type: integer
+ name:
+ type: string
+ podLevelThreshold:
+ type: integer
+ quotas:
+ items:
+ properties:
+ algorithm:
+ type: string
+ behaviour:
+ type: string
+ keyType:
+ type: string
+ maxAmount:
+ type: integer
+ method:
+ type: string
+ name:
+ type: string
+ rule:
+ type: string
+ timePeriod:
+ type: string
+ type: object
+ type: array
+ regionLevelLimit:
+ type: boolean
+ workloadEnvSelectors:
+ items:
+ type: string
+ type: array
+ type: object
+ type: array
+ type: object
+ workloadEnvs:
+ items:
+ type: string
+ type: array
+ type: object
+ status:
+ description: TrafficConfigStatus defines the observed state of QuotaGroup
+ properties:
+ disabled:
+ type: boolean
+ disabledTime:
+ format: date-time
+ type: string
+ lastAppliedConfigVersion:
+ type: string
+ lastUpdateTime:
+ format: date-time
+ type: string
+ message:
+ description: 'INSERT ADDITIONAL STATUS FIELD - define observed state
+ of cluster Important: Run "make" to regenerate code after modifying
+ this file'
+ type: string
+ status:
+ type: boolean
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
\ No newline at end of file
From 8ba3b29e47dd7d9a6b07a0310cbb9c53adb660e4 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:51:12 -0700
Subject: [PATCH 029/235] add file
admiral/pkg/apis/admiral/model/clientconnectionconfig.pb.go
---
.../model/clientconnectionconfig.pb.go | 461 ++++++++++++++++++
1 file changed, 461 insertions(+)
create mode 100644 admiral/pkg/apis/admiral/model/clientconnectionconfig.pb.go
diff --git a/admiral/pkg/apis/admiral/model/clientconnectionconfig.pb.go b/admiral/pkg/apis/admiral/model/clientconnectionconfig.pb.go
new file mode 100644
index 00000000..eff16a3a
--- /dev/null
+++ b/admiral/pkg/apis/admiral/model/clientconnectionconfig.pb.go
@@ -0,0 +1,461 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: clientconnectionconfig.proto
+
+package model
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type ConnectionPool_HTTP_H2UpgradePolicy int32
+
+const (
+ ConnectionPool_HTTP_DEFAULT ConnectionPool_HTTP_H2UpgradePolicy = 0
+ ConnectionPool_HTTP_DO_NOT_UPGRADE ConnectionPool_HTTP_H2UpgradePolicy = 1
+ ConnectionPool_HTTP_UPGRADE ConnectionPool_HTTP_H2UpgradePolicy = 2
+)
+
+var ConnectionPool_HTTP_H2UpgradePolicy_name = map[int32]string{
+ 0: "DEFAULT",
+ 1: "DO_NOT_UPGRADE",
+ 2: "UPGRADE",
+}
+
+var ConnectionPool_HTTP_H2UpgradePolicy_value = map[string]int32{
+ "DEFAULT": 0,
+ "DO_NOT_UPGRADE": 1,
+ "UPGRADE": 2,
+}
+
+func (x ConnectionPool_HTTP_H2UpgradePolicy) String() string {
+ return proto.EnumName(ConnectionPool_HTTP_H2UpgradePolicy_name, int32(x))
+}
+
+func (ConnectionPool_HTTP_H2UpgradePolicy) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_282331a83280fd5c, []int{1, 2, 0}
+}
+
+type ClientConnectionConfig struct {
+ ConnectionPool *ConnectionPool `protobuf:"bytes,1,opt,name=connectionPool,proto3" json:"connectionPool,omitempty"`
+ Tunnel *Tunnel `protobuf:"bytes,2,opt,name=tunnel,proto3" json:"tunnel,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ClientConnectionConfig) Reset() { *m = ClientConnectionConfig{} }
+func (m *ClientConnectionConfig) String() string { return proto.CompactTextString(m) }
+func (*ClientConnectionConfig) ProtoMessage() {}
+func (*ClientConnectionConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_282331a83280fd5c, []int{0}
+}
+
+func (m *ClientConnectionConfig) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ClientConnectionConfig.Unmarshal(m, b)
+}
+func (m *ClientConnectionConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ClientConnectionConfig.Marshal(b, m, deterministic)
+}
+func (m *ClientConnectionConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClientConnectionConfig.Merge(m, src)
+}
+func (m *ClientConnectionConfig) XXX_Size() int {
+ return xxx_messageInfo_ClientConnectionConfig.Size(m)
+}
+func (m *ClientConnectionConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClientConnectionConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClientConnectionConfig proto.InternalMessageInfo
+
+func (m *ClientConnectionConfig) GetConnectionPool() *ConnectionPool {
+ if m != nil {
+ return m.ConnectionPool
+ }
+ return nil
+}
+
+func (m *ClientConnectionConfig) GetTunnel() *Tunnel {
+ if m != nil {
+ return m.Tunnel
+ }
+ return nil
+}
+
+type ConnectionPool struct {
+ Tcp *ConnectionPool_TCP `protobuf:"bytes,1,opt,name=tcp,proto3" json:"tcp,omitempty"`
+ Http *ConnectionPool_HTTP `protobuf:"bytes,2,opt,name=http,proto3" json:"http,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ConnectionPool) Reset() { *m = ConnectionPool{} }
+func (m *ConnectionPool) String() string { return proto.CompactTextString(m) }
+func (*ConnectionPool) ProtoMessage() {}
+func (*ConnectionPool) Descriptor() ([]byte, []int) {
+ return fileDescriptor_282331a83280fd5c, []int{1}
+}
+
+func (m *ConnectionPool) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ConnectionPool.Unmarshal(m, b)
+}
+func (m *ConnectionPool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ConnectionPool.Marshal(b, m, deterministic)
+}
+func (m *ConnectionPool) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ConnectionPool.Merge(m, src)
+}
+func (m *ConnectionPool) XXX_Size() int {
+ return xxx_messageInfo_ConnectionPool.Size(m)
+}
+func (m *ConnectionPool) XXX_DiscardUnknown() {
+ xxx_messageInfo_ConnectionPool.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConnectionPool proto.InternalMessageInfo
+
+func (m *ConnectionPool) GetTcp() *ConnectionPool_TCP {
+ if m != nil {
+ return m.Tcp
+ }
+ return nil
+}
+
+func (m *ConnectionPool) GetHttp() *ConnectionPool_HTTP {
+ if m != nil {
+ return m.Http
+ }
+ return nil
+}
+
+type ConnectionPool_TcpKeepalive struct {
+ Probes uint32 `protobuf:"varint,1,opt,name=probes,proto3" json:"probes,omitempty"`
+ Time string `protobuf:"bytes,2,opt,name=time,proto3" json:"time,omitempty"`
+ Interval string `protobuf:"bytes,3,opt,name=interval,proto3" json:"interval,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ConnectionPool_TcpKeepalive) Reset() { *m = ConnectionPool_TcpKeepalive{} }
+func (m *ConnectionPool_TcpKeepalive) String() string { return proto.CompactTextString(m) }
+func (*ConnectionPool_TcpKeepalive) ProtoMessage() {}
+func (*ConnectionPool_TcpKeepalive) Descriptor() ([]byte, []int) {
+ return fileDescriptor_282331a83280fd5c, []int{1, 0}
+}
+
+func (m *ConnectionPool_TcpKeepalive) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ConnectionPool_TcpKeepalive.Unmarshal(m, b)
+}
+func (m *ConnectionPool_TcpKeepalive) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ConnectionPool_TcpKeepalive.Marshal(b, m, deterministic)
+}
+func (m *ConnectionPool_TcpKeepalive) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ConnectionPool_TcpKeepalive.Merge(m, src)
+}
+func (m *ConnectionPool_TcpKeepalive) XXX_Size() int {
+ return xxx_messageInfo_ConnectionPool_TcpKeepalive.Size(m)
+}
+func (m *ConnectionPool_TcpKeepalive) XXX_DiscardUnknown() {
+ xxx_messageInfo_ConnectionPool_TcpKeepalive.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConnectionPool_TcpKeepalive proto.InternalMessageInfo
+
+func (m *ConnectionPool_TcpKeepalive) GetProbes() uint32 {
+ if m != nil {
+ return m.Probes
+ }
+ return 0
+}
+
+func (m *ConnectionPool_TcpKeepalive) GetTime() string {
+ if m != nil {
+ return m.Time
+ }
+ return ""
+}
+
+func (m *ConnectionPool_TcpKeepalive) GetInterval() string {
+ if m != nil {
+ return m.Interval
+ }
+ return ""
+}
+
+type ConnectionPool_TCP struct {
+ // Maximum number of HTTP1 /TCP connections to a destination host.
+ MaxConnections int32 `protobuf:"varint,1,opt,name=maxConnections,proto3" json:"maxConnections,omitempty"`
+ ConnectTimeout string `protobuf:"bytes,2,opt,name=connectTimeout,proto3" json:"connectTimeout,omitempty"`
+ TcpKeepalive *ConnectionPool_TcpKeepalive `protobuf:"bytes,3,opt,name=tcpKeepalive,proto3" json:"tcpKeepalive,omitempty"`
+ // The maximum duration of a connection
+ MaxConnectionDuration string `protobuf:"bytes,4,opt,name=maxConnectionDuration,proto3" json:"maxConnectionDuration,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ConnectionPool_TCP) Reset() { *m = ConnectionPool_TCP{} }
+func (m *ConnectionPool_TCP) String() string { return proto.CompactTextString(m) }
+func (*ConnectionPool_TCP) ProtoMessage() {}
+func (*ConnectionPool_TCP) Descriptor() ([]byte, []int) {
+ return fileDescriptor_282331a83280fd5c, []int{1, 1}
+}
+
+func (m *ConnectionPool_TCP) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ConnectionPool_TCP.Unmarshal(m, b)
+}
+func (m *ConnectionPool_TCP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ConnectionPool_TCP.Marshal(b, m, deterministic)
+}
+func (m *ConnectionPool_TCP) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ConnectionPool_TCP.Merge(m, src)
+}
+func (m *ConnectionPool_TCP) XXX_Size() int {
+ return xxx_messageInfo_ConnectionPool_TCP.Size(m)
+}
+func (m *ConnectionPool_TCP) XXX_DiscardUnknown() {
+ xxx_messageInfo_ConnectionPool_TCP.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConnectionPool_TCP proto.InternalMessageInfo
+
+func (m *ConnectionPool_TCP) GetMaxConnections() int32 {
+ if m != nil {
+ return m.MaxConnections
+ }
+ return 0
+}
+
+func (m *ConnectionPool_TCP) GetConnectTimeout() string {
+ if m != nil {
+ return m.ConnectTimeout
+ }
+ return ""
+}
+
+func (m *ConnectionPool_TCP) GetTcpKeepalive() *ConnectionPool_TcpKeepalive {
+ if m != nil {
+ return m.TcpKeepalive
+ }
+ return nil
+}
+
+func (m *ConnectionPool_TCP) GetMaxConnectionDuration() string {
+ if m != nil {
+ return m.MaxConnectionDuration
+ }
+ return ""
+}
+
+// HTTP connection pool settings
+type ConnectionPool_HTTP struct {
+ // Maximum number of pending HTTP requests to a destination.
+ Http1MaxPendingRequests int32 `protobuf:"varint,1,opt,name=http1MaxPendingRequests,proto3" json:"http1MaxPendingRequests,omitempty"`
+ // Maximum number of requests to a backend
+ Http2MaxRequests int32 `protobuf:"varint,2,opt,name=http2MaxRequests,proto3" json:"http2MaxRequests,omitempty"`
+ // Maximum number of requests per connection to a backend.
+ MaxRequestsPerConnection int32 `protobuf:"varint,3,opt,name=maxRequestsPerConnection,proto3" json:"maxRequestsPerConnection,omitempty"`
+ MaxRetries int32 `protobuf:"varint,4,opt,name=maxRetries,proto3" json:"maxRetries,omitempty"`
+ IdleTimeout string `protobuf:"bytes,5,opt,name=idleTimeout,proto3" json:"idleTimeout,omitempty"`
+ H2UpgradePolicy ConnectionPool_HTTP_H2UpgradePolicy `protobuf:"varint,6,opt,name=h2UpgradePolicy,proto3,enum=admiral.global.v1alpha.ConnectionPool_HTTP_H2UpgradePolicy" json:"h2UpgradePolicy,omitempty"`
+ UseClientProtocol bool `protobuf:"varint,7,opt,name=useClientProtocol,proto3" json:"useClientProtocol,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ConnectionPool_HTTP) Reset() { *m = ConnectionPool_HTTP{} }
+func (m *ConnectionPool_HTTP) String() string { return proto.CompactTextString(m) }
+func (*ConnectionPool_HTTP) ProtoMessage() {}
+func (*ConnectionPool_HTTP) Descriptor() ([]byte, []int) {
+ return fileDescriptor_282331a83280fd5c, []int{1, 2}
+}
+
+func (m *ConnectionPool_HTTP) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ConnectionPool_HTTP.Unmarshal(m, b)
+}
+func (m *ConnectionPool_HTTP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ConnectionPool_HTTP.Marshal(b, m, deterministic)
+}
+func (m *ConnectionPool_HTTP) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ConnectionPool_HTTP.Merge(m, src)
+}
+func (m *ConnectionPool_HTTP) XXX_Size() int {
+ return xxx_messageInfo_ConnectionPool_HTTP.Size(m)
+}
+func (m *ConnectionPool_HTTP) XXX_DiscardUnknown() {
+ xxx_messageInfo_ConnectionPool_HTTP.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConnectionPool_HTTP proto.InternalMessageInfo
+
+func (m *ConnectionPool_HTTP) GetHttp1MaxPendingRequests() int32 {
+ if m != nil {
+ return m.Http1MaxPendingRequests
+ }
+ return 0
+}
+
+func (m *ConnectionPool_HTTP) GetHttp2MaxRequests() int32 {
+ if m != nil {
+ return m.Http2MaxRequests
+ }
+ return 0
+}
+
+func (m *ConnectionPool_HTTP) GetMaxRequestsPerConnection() int32 {
+ if m != nil {
+ return m.MaxRequestsPerConnection
+ }
+ return 0
+}
+
+func (m *ConnectionPool_HTTP) GetMaxRetries() int32 {
+ if m != nil {
+ return m.MaxRetries
+ }
+ return 0
+}
+
+func (m *ConnectionPool_HTTP) GetIdleTimeout() string {
+ if m != nil {
+ return m.IdleTimeout
+ }
+ return ""
+}
+
+func (m *ConnectionPool_HTTP) GetH2UpgradePolicy() ConnectionPool_HTTP_H2UpgradePolicy {
+ if m != nil {
+ return m.H2UpgradePolicy
+ }
+ return ConnectionPool_HTTP_DEFAULT
+}
+
+func (m *ConnectionPool_HTTP) GetUseClientProtocol() bool {
+ if m != nil {
+ return m.UseClientProtocol
+ }
+ return false
+}
+
+type Tunnel struct {
+ Protocol string `protobuf:"bytes,1,opt,name=protocol,proto3" json:"protocol,omitempty"`
+ TargetHost string `protobuf:"bytes,2,opt,name=targetHost,proto3" json:"targetHost,omitempty"`
+ TargetPort uint32 `protobuf:"varint,3,opt,name=targetPort,proto3" json:"targetPort,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Tunnel) Reset() { *m = Tunnel{} }
+func (m *Tunnel) String() string { return proto.CompactTextString(m) }
+func (*Tunnel) ProtoMessage() {}
+func (*Tunnel) Descriptor() ([]byte, []int) {
+ return fileDescriptor_282331a83280fd5c, []int{2}
+}
+
+func (m *Tunnel) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Tunnel.Unmarshal(m, b)
+}
+func (m *Tunnel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Tunnel.Marshal(b, m, deterministic)
+}
+func (m *Tunnel) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Tunnel.Merge(m, src)
+}
+func (m *Tunnel) XXX_Size() int {
+ return xxx_messageInfo_Tunnel.Size(m)
+}
+func (m *Tunnel) XXX_DiscardUnknown() {
+ xxx_messageInfo_Tunnel.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Tunnel proto.InternalMessageInfo
+
+func (m *Tunnel) GetProtocol() string {
+ if m != nil {
+ return m.Protocol
+ }
+ return ""
+}
+
+func (m *Tunnel) GetTargetHost() string {
+ if m != nil {
+ return m.TargetHost
+ }
+ return ""
+}
+
+func (m *Tunnel) GetTargetPort() uint32 {
+ if m != nil {
+ return m.TargetPort
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterEnum("admiral.global.v1alpha.ConnectionPool_HTTP_H2UpgradePolicy", ConnectionPool_HTTP_H2UpgradePolicy_name, ConnectionPool_HTTP_H2UpgradePolicy_value)
+ proto.RegisterType((*ClientConnectionConfig)(nil), "admiral.global.v1alpha.ClientConnectionConfig")
+ proto.RegisterType((*ConnectionPool)(nil), "admiral.global.v1alpha.ConnectionPool")
+ proto.RegisterType((*ConnectionPool_TcpKeepalive)(nil), "admiral.global.v1alpha.ConnectionPool.TcpKeepalive")
+ proto.RegisterType((*ConnectionPool_TCP)(nil), "admiral.global.v1alpha.ConnectionPool.TCP")
+ proto.RegisterType((*ConnectionPool_HTTP)(nil), "admiral.global.v1alpha.ConnectionPool.HTTP")
+ proto.RegisterType((*Tunnel)(nil), "admiral.global.v1alpha.Tunnel")
+}
+
+func init() { proto.RegisterFile("clientconnectionconfig.proto", fileDescriptor_282331a83280fd5c) }
+
+var fileDescriptor_282331a83280fd5c = []byte{
+ // 562 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xef, 0x6a, 0x13, 0x4f,
+ 0x14, 0xfd, 0x6d, 0xf3, 0xaf, 0xbf, 0x9b, 0x36, 0x8d, 0x17, 0x8c, 0x4b, 0x90, 0x12, 0xf2, 0xa1,
+ 0x84, 0x2a, 0x0b, 0x4d, 0x45, 0x44, 0x85, 0x52, 0x37, 0xd5, 0x80, 0xb6, 0x5d, 0x86, 0x8d, 0x82,
+ 0x5f, 0xca, 0x64, 0x77, 0x4c, 0x06, 0x66, 0x77, 0xd6, 0xcd, 0x24, 0xc4, 0xc7, 0xf0, 0x0d, 0x7c,
+ 0x14, 0x1f, 0xc6, 0x07, 0x91, 0x9d, 0xec, 0x26, 0x9b, 0xb4, 0x81, 0xf8, 0x29, 0xb9, 0xe7, 0xde,
+ 0x73, 0xee, 0x99, 0xb3, 0x33, 0xf0, 0xd4, 0x13, 0x9c, 0x85, 0xca, 0x93, 0x61, 0xc8, 0x3c, 0xc5,
+ 0x65, 0xe8, 0xc9, 0xf0, 0x1b, 0x1f, 0x59, 0x51, 0x2c, 0x95, 0xc4, 0x06, 0xf5, 0x03, 0x1e, 0x53,
+ 0x61, 0x8d, 0x84, 0x1c, 0x52, 0x61, 0xcd, 0xce, 0xa8, 0x88, 0xc6, 0xb4, 0xfd, 0xcb, 0x80, 0x86,
+ 0xad, 0x89, 0xf6, 0x92, 0x68, 0x6b, 0x22, 0xde, 0x40, 0x6d, 0x25, 0xe6, 0x48, 0x29, 0x4c, 0xa3,
+ 0x65, 0x74, 0xaa, 0xdd, 0x13, 0xeb, 0x61, 0x2d, 0xcb, 0x5e, 0x9b, 0x26, 0x1b, 0x6c, 0x7c, 0x09,
+ 0x65, 0x35, 0x0d, 0x43, 0x26, 0xcc, 0x3d, 0xad, 0x73, 0xbc, 0x4d, 0xc7, 0xd5, 0x53, 0x24, 0x9d,
+ 0x6e, 0xff, 0xac, 0x40, 0x6d, 0x5d, 0x1a, 0xdf, 0x42, 0x41, 0x79, 0x51, 0xea, 0xe7, 0x74, 0x37,
+ 0x3f, 0x96, 0x6b, 0x3b, 0x24, 0xa1, 0xe1, 0x05, 0x14, 0xc7, 0x4a, 0x45, 0xa9, 0x8d, 0x67, 0x3b,
+ 0xd2, 0xfb, 0xae, 0xeb, 0x10, 0x4d, 0x6c, 0x7e, 0x86, 0x03, 0xd7, 0x8b, 0x3e, 0x32, 0x16, 0x51,
+ 0xc1, 0x67, 0x0c, 0x1b, 0x50, 0x8e, 0x62, 0x39, 0x64, 0x13, 0xed, 0xe8, 0x90, 0xa4, 0x15, 0x22,
+ 0x14, 0x15, 0x0f, 0x98, 0x5e, 0xf4, 0x3f, 0xd1, 0xff, 0xb1, 0x09, 0xfb, 0x3c, 0x54, 0x2c, 0x9e,
+ 0x51, 0x61, 0x16, 0x34, 0xbe, 0xac, 0x9b, 0x7f, 0x0c, 0x28, 0xb8, 0xb6, 0x83, 0x27, 0x50, 0x0b,
+ 0xe8, 0x7c, 0xb5, 0x7f, 0xa1, 0x5b, 0x22, 0x1b, 0x68, 0x32, 0x97, 0x66, 0xec, 0xf2, 0x80, 0xc9,
+ 0xa9, 0x4a, 0x37, 0x6d, 0xa0, 0xf8, 0x05, 0x0e, 0x54, 0xce, 0xaf, 0xde, 0x5b, 0xed, 0x9e, 0xef,
+ 0x9a, 0x5b, 0x8e, 0x4a, 0xd6, 0x84, 0xf0, 0x05, 0x3c, 0x5e, 0xb3, 0xd4, 0x9b, 0xc6, 0x34, 0xf9,
+ 0x35, 0x8b, 0xda, 0xc7, 0xc3, 0xcd, 0xe6, 0xef, 0x02, 0x14, 0x93, 0x34, 0xf1, 0x15, 0x3c, 0x49,
+ 0xf2, 0x3c, 0xbb, 0xa6, 0x73, 0x87, 0x85, 0x3e, 0x0f, 0x47, 0x84, 0x7d, 0x9f, 0xb2, 0x89, 0xca,
+ 0x0e, 0xbc, 0xad, 0x8d, 0xa7, 0x50, 0x4f, 0x5a, 0xdd, 0x6b, 0x3a, 0x5f, 0x52, 0xf6, 0x34, 0xe5,
+ 0x1e, 0x8e, 0xaf, 0xc1, 0x0c, 0x56, 0xa5, 0xc3, 0xe2, 0x95, 0x25, 0x9d, 0x44, 0x89, 0x6c, 0xed,
+ 0xe3, 0x31, 0x80, 0xee, 0xa9, 0x98, 0xb3, 0x89, 0x3e, 0x55, 0x89, 0xe4, 0x10, 0x6c, 0x41, 0x95,
+ 0xfb, 0x82, 0x65, 0xf1, 0x97, 0xf4, 0xb1, 0xf3, 0x10, 0x32, 0x38, 0x1a, 0x77, 0x07, 0xd1, 0x28,
+ 0xa6, 0x3e, 0x73, 0xa4, 0xe0, 0xde, 0x0f, 0xb3, 0xdc, 0x32, 0x3a, 0xb5, 0xee, 0x9b, 0x7f, 0xb8,
+ 0x77, 0x56, 0x7f, 0x5d, 0x82, 0x6c, 0x6a, 0xe2, 0x73, 0x78, 0x34, 0x9d, 0xb0, 0xc5, 0x4b, 0x76,
+ 0x92, 0x17, 0xef, 0x49, 0x61, 0x56, 0x5a, 0x46, 0x67, 0x9f, 0xdc, 0x6f, 0xb4, 0x2f, 0xe0, 0x68,
+ 0x43, 0x11, 0xab, 0x50, 0xe9, 0x5d, 0xbd, 0xbf, 0x1c, 0x7c, 0x72, 0xeb, 0xff, 0x21, 0x42, 0xad,
+ 0x77, 0x7b, 0x77, 0x73, 0xeb, 0xde, 0x0d, 0x9c, 0x0f, 0xe4, 0xb2, 0x77, 0x55, 0x37, 0x92, 0x81,
+ 0xac, 0xd8, 0x6b, 0xfb, 0x50, 0x5e, 0xbc, 0xd2, 0xe4, 0x3e, 0x47, 0xd9, 0x3e, 0x63, 0x71, 0x9f,
+ 0xb3, 0x3a, 0x49, 0x4f, 0xd1, 0x78, 0xc4, 0x54, 0x5f, 0x4e, 0xb2, 0xbb, 0x99, 0x43, 0x56, 0x7d,
+ 0x47, 0xc6, 0x4a, 0x7f, 0x8b, 0x43, 0x92, 0x43, 0xde, 0x55, 0xbe, 0x96, 0x02, 0xe9, 0x33, 0x31,
+ 0x2c, 0x6b, 0xc9, 0xf3, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x6c, 0xc5, 0x51, 0x76, 0xe4, 0x04,
+ 0x00, 0x00,
+}
From 52d44d7cbadc318d27be55ca1e0b4c50616a0995 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:51:15 -0700
Subject: [PATCH 030/235] add file
admiral/pkg/apis/admiral/model/clientconnectionconfig.proto
---
.../model/clientconnectionconfig.proto | 78 +++++++++++++++++++
1 file changed, 78 insertions(+)
create mode 100644 admiral/pkg/apis/admiral/model/clientconnectionconfig.proto
diff --git a/admiral/pkg/apis/admiral/model/clientconnectionconfig.proto b/admiral/pkg/apis/admiral/model/clientconnectionconfig.proto
new file mode 100644
index 00000000..a3c43cfa
--- /dev/null
+++ b/admiral/pkg/apis/admiral/model/clientconnectionconfig.proto
@@ -0,0 +1,78 @@
+syntax = "proto3";
+
+package admiral.global.v1alpha;
+
+option go_package = "model";
+
+message ClientConnectionConfig {
+
+ ConnectionPool connectionPool = 1;
+
+ Tunnel tunnel = 2;
+}
+
+message ConnectionPool {
+
+ message TcpKeepalive {
+
+ uint32 probes = 1;
+
+ string time = 2;
+
+ string interval = 3;
+
+ }
+
+ message TCP {
+
+ // Maximum number of HTTP1 /TCP connections to a destination host.
+ int32 maxConnections = 1;
+
+ string connectTimeout = 2;
+
+ TcpKeepalive tcpKeepalive = 3;
+
+ // The maximum duration of a connection
+ string maxConnectionDuration = 4;
+ }
+
+ // HTTP connection pool settings
+ message HTTP {
+
+ // Maximum number of pending HTTP requests to a destination.
+ int32 http1MaxPendingRequests = 1;
+
+ // Maximum number of requests to a backend
+ int32 http2MaxRequests = 2;
+
+ // Maximum number of requests per connection to a backend.
+ int32 maxRequestsPerConnection = 3;
+
+ int32 maxRetries = 4;
+
+ string idleTimeout = 5;
+
+ enum H2UpgradePolicy {
+ DEFAULT = 0;
+ DO_NOT_UPGRADE = 1;
+ UPGRADE = 2;
+ };
+ H2UpgradePolicy h2UpgradePolicy = 6;
+
+ bool useClientProtocol = 7;
+ };
+
+
+ TCP tcp = 1;
+
+ HTTP http = 2;
+
+}
+
+message Tunnel {
+ string protocol = 1;
+
+ string targetHost = 2;
+
+ uint32 targetPort = 3;
+}
\ No newline at end of file
From d21c50e9ac88c3499ec777b065ac750a00be1192 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:51:18 -0700
Subject: [PATCH 031/235] add file
admiral/pkg/apis/admiral/model/outlierdetection.pb.go
---
.../apis/admiral/model/outlierdetection.pb.go | 163 ++++++++++++++++++
1 file changed, 163 insertions(+)
create mode 100644 admiral/pkg/apis/admiral/model/outlierdetection.pb.go
diff --git a/admiral/pkg/apis/admiral/model/outlierdetection.pb.go b/admiral/pkg/apis/admiral/model/outlierdetection.pb.go
new file mode 100644
index 00000000..9fc7ff33
--- /dev/null
+++ b/admiral/pkg/apis/admiral/model/outlierdetection.pb.go
@@ -0,0 +1,163 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: outlierdetection.proto
+
+package model
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type OutlierDetection struct {
+ // REQUIRED: base outlier configuration.
+ OutlierConfig *OutlierConfig `protobuf:"bytes,1,opt,name=outlier_config,json=outlierConfig,proto3" json:"outlier_config,omitempty"`
+ // REQUIRED: One or more labels that indicate a specific set of pods/VMs
+ // on which this outlier configuration should be applied. The scope of
+ // label search is restricted to namespace mark for mesh enablement
+ // this will scan all cluster and namespace
+ Selector map[string]string `protobuf:"bytes,2,rep,name=selector,proto3" json:"selector,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *OutlierDetection) Reset() { *m = OutlierDetection{} }
+func (m *OutlierDetection) String() string { return proto.CompactTextString(m) }
+func (*OutlierDetection) ProtoMessage() {}
+func (*OutlierDetection) Descriptor() ([]byte, []int) {
+ return fileDescriptor_84cca5395405be5d, []int{0}
+}
+
+func (m *OutlierDetection) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_OutlierDetection.Unmarshal(m, b)
+}
+func (m *OutlierDetection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_OutlierDetection.Marshal(b, m, deterministic)
+}
+func (m *OutlierDetection) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_OutlierDetection.Merge(m, src)
+}
+func (m *OutlierDetection) XXX_Size() int {
+ return xxx_messageInfo_OutlierDetection.Size(m)
+}
+func (m *OutlierDetection) XXX_DiscardUnknown() {
+ xxx_messageInfo_OutlierDetection.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OutlierDetection proto.InternalMessageInfo
+
+func (m *OutlierDetection) GetOutlierConfig() *OutlierConfig {
+ if m != nil {
+ return m.OutlierConfig
+ }
+ return nil
+}
+
+func (m *OutlierDetection) GetSelector() map[string]string {
+ if m != nil {
+ return m.Selector
+ }
+ return nil
+}
+
+// OutlierConfig describes routing for a endpoint.
+type OutlierConfig struct {
+ //REQUIRED: Minimum duration of time in seconds, the endpoint will be ejected
+ BaseEjectionTime int64 `protobuf:"varint,1,opt,name=base_ejection_time,json=baseEjectionTime,proto3" json:"base_ejection_time,omitempty"`
+ //REQUIRED: No. of consecutive failures in specified interval after which the endpoint will be ejected
+ ConsecutiveGatewayErrors uint32 `protobuf:"varint,2,opt,name=consecutive_gateway_errors,json=consecutiveGatewayErrors,proto3" json:"consecutive_gateway_errors,omitempty"`
+ //REQUIRED: Time interval between ejection sweep analysis
+ Interval int64 `protobuf:"varint,3,opt,name=interval,proto3" json:"interval,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *OutlierConfig) Reset() { *m = OutlierConfig{} }
+func (m *OutlierConfig) String() string { return proto.CompactTextString(m) }
+func (*OutlierConfig) ProtoMessage() {}
+func (*OutlierConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_84cca5395405be5d, []int{1}
+}
+
+func (m *OutlierConfig) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_OutlierConfig.Unmarshal(m, b)
+}
+func (m *OutlierConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_OutlierConfig.Marshal(b, m, deterministic)
+}
+func (m *OutlierConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_OutlierConfig.Merge(m, src)
+}
+func (m *OutlierConfig) XXX_Size() int {
+ return xxx_messageInfo_OutlierConfig.Size(m)
+}
+func (m *OutlierConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_OutlierConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OutlierConfig proto.InternalMessageInfo
+
+func (m *OutlierConfig) GetBaseEjectionTime() int64 {
+ if m != nil {
+ return m.BaseEjectionTime
+ }
+ return 0
+}
+
+func (m *OutlierConfig) GetConsecutiveGatewayErrors() uint32 {
+ if m != nil {
+ return m.ConsecutiveGatewayErrors
+ }
+ return 0
+}
+
+func (m *OutlierConfig) GetInterval() int64 {
+ if m != nil {
+ return m.Interval
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterType((*OutlierDetection)(nil), "admiral.global.v1alpha.OutlierDetection")
+ proto.RegisterMapType((map[string]string)(nil), "admiral.global.v1alpha.OutlierDetection.SelectorEntry")
+ proto.RegisterType((*OutlierConfig)(nil), "admiral.global.v1alpha.OutlierConfig")
+}
+
+func init() { proto.RegisterFile("outlierdetection.proto", fileDescriptor_84cca5395405be5d) }
+
+var fileDescriptor_84cca5395405be5d = []byte{
+ // 296 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x91, 0xc1, 0x4a, 0xc3, 0x40,
+ 0x18, 0x84, 0x49, 0x43, 0xb5, 0x6e, 0x89, 0x84, 0x45, 0x4a, 0xc8, 0xa9, 0x14, 0x84, 0x1e, 0x64,
+ 0xc1, 0x0a, 0x22, 0xea, 0x49, 0x0d, 0x5e, 0x04, 0x61, 0xf5, 0xe4, 0x25, 0x6c, 0x92, 0xdf, 0xb8,
+ 0xba, 0xc9, 0x5f, 0x36, 0x9b, 0x48, 0x9e, 0xc4, 0xe7, 0xf4, 0x0d, 0xa4, 0x9b, 0x18, 0x5a, 0x10,
+ 0xbc, 0xe5, 0xcf, 0xcc, 0x7c, 0x33, 0xb0, 0x64, 0x86, 0xb5, 0x51, 0x12, 0x74, 0x06, 0x06, 0x52,
+ 0x23, 0xb1, 0x64, 0x6b, 0x8d, 0x06, 0xe9, 0x4c, 0x64, 0x85, 0xd4, 0x42, 0xb1, 0x5c, 0x61, 0x22,
+ 0x14, 0x6b, 0x4e, 0x85, 0x5a, 0xbf, 0x89, 0xc5, 0xb7, 0x43, 0xfc, 0xc7, 0x2e, 0x72, 0xf7, 0x1b,
+ 0xa1, 0x0f, 0xe4, 0xb0, 0xc7, 0xc4, 0x29, 0x96, 0xaf, 0x32, 0x0f, 0x9c, 0xb9, 0xb3, 0x9c, 0xae,
+ 0x8e, 0xd9, 0xdf, 0x14, 0xd6, 0x13, 0x6e, 0xad, 0x99, 0x7b, 0xb8, 0x7d, 0x52, 0x4e, 0x26, 0x15,
+ 0x28, 0x48, 0x0d, 0xea, 0x60, 0x34, 0x77, 0x97, 0xd3, 0xd5, 0xf9, 0x3f, 0x9c, 0x61, 0x09, 0x7b,
+ 0xea, 0x83, 0x51, 0x69, 0x74, 0xcb, 0x07, 0x4e, 0x78, 0x45, 0xbc, 0x1d, 0x89, 0xfa, 0xc4, 0xfd,
+ 0x80, 0xd6, 0xee, 0x3c, 0xe0, 0x9b, 0x4f, 0x7a, 0x44, 0xc6, 0x8d, 0x50, 0x35, 0x04, 0x23, 0xfb,
+ 0xaf, 0x3b, 0x2e, 0x47, 0x17, 0xce, 0xe2, 0xcb, 0x21, 0xde, 0xce, 0x62, 0x7a, 0x42, 0x68, 0x22,
+ 0x2a, 0x88, 0xe1, 0xbd, 0xeb, 0x8d, 0x8d, 0x2c, 0xc0, 0xc2, 0x5c, 0xee, 0x6f, 0x94, 0xa8, 0x17,
+ 0x9e, 0x65, 0x01, 0xf4, 0x9a, 0x84, 0x29, 0x96, 0x15, 0xa4, 0xb5, 0x91, 0x0d, 0xc4, 0xb9, 0x30,
+ 0xf0, 0x29, 0xda, 0x18, 0xb4, 0x46, 0x5d, 0xd9, 0x3a, 0x8f, 0x07, 0x5b, 0x8e, 0xfb, 0xce, 0x10,
+ 0x59, 0x9d, 0x86, 0x64, 0x22, 0x4b, 0x03, 0xba, 0x11, 0x2a, 0x70, 0x6d, 0xc3, 0x70, 0xdf, 0xec,
+ 0xbf, 0x8c, 0x0b, 0xcc, 0x40, 0x25, 0x7b, 0xf6, 0xd5, 0xce, 0x7e, 0x02, 0x00, 0x00, 0xff, 0xff,
+ 0x6a, 0x78, 0x4b, 0x26, 0xcf, 0x01, 0x00, 0x00,
+}
From e1ef897031a46f5d9df18a8ee3df591e9419583f Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:51:21 -0700
Subject: [PATCH 032/235] add file
admiral/pkg/apis/admiral/model/outlierdetection.proto
---
.../apis/admiral/model/outlierdetection.proto | 44 +++++++++++++++++++
1 file changed, 44 insertions(+)
create mode 100644 admiral/pkg/apis/admiral/model/outlierdetection.proto
diff --git a/admiral/pkg/apis/admiral/model/outlierdetection.proto b/admiral/pkg/apis/admiral/model/outlierdetection.proto
new file mode 100644
index 00000000..a15f5854
--- /dev/null
+++ b/admiral/pkg/apis/admiral/model/outlierdetection.proto
@@ -0,0 +1,44 @@
+syntax = "proto3";
+
+package admiral.global.v1alpha;
+
+option go_package = "model";
+
+// ```
+// apiVersion: admiral.io/v1alpha1
+// kind: OutlierDetection
+// metadata:
+// name: my-outlier-configuration
+// spec:
+// selector:
+// identity: my-identity
+// env: prd
+// outlier_config:
+// base_ejection_time: 180
+// consecutive_gateway_errors: 100
+// interval: 60
+// ```
+
+message OutlierDetection {
+ // REQUIRED: base outlier configuration.
+ OutlierConfig outlier_config = 1;
+
+ // REQUIRED: One or more labels that indicate a specific set of pods/VMs
+ // on which this outlier configuration should be applied. The scope of
+ // label search is restricted to namespace mark for mesh enablement
+ // this will scan all cluster and namespace
+ map selector = 2;
+}
+
+// OutlierConfig describes routing for a endpoint.
+message OutlierConfig {
+
+ //REQUIRED: Minimum duration of time in seconds, the endpoint will be ejected
+ int64 base_ejection_time = 1;
+
+ //REQUIRED: No. of consecutive failures in specified interval after which the endpoint will be ejected
+ uint32 consecutive_gateway_errors = 2;
+
+ //REQUIRED: Time interval between ejection sweep analysis
+ int64 interval = 3;
+}
\ No newline at end of file
From 33bf644976bcc41c86ad51ec25c225a3a4832c96 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:51:25 -0700
Subject: [PATCH 033/235] add file admiral/pkg/apis/admiral/v1alpha1/
---
admiral/pkg/apis/admiral/v1alpha1/doc.go | 3 +
admiral/pkg/apis/admiral/v1alpha1/register.go | 70 ++
admiral/pkg/apis/admiral/v1alpha1/type.go | 323 +++++
.../admiral/v1alpha1/zz_generated.deepcopy.go | 1086 +++++++++++++++++
4 files changed, 1482 insertions(+)
create mode 100644 admiral/pkg/apis/admiral/v1alpha1/doc.go
create mode 100644 admiral/pkg/apis/admiral/v1alpha1/register.go
create mode 100644 admiral/pkg/apis/admiral/v1alpha1/type.go
create mode 100644 admiral/pkg/apis/admiral/v1alpha1/zz_generated.deepcopy.go
diff --git a/admiral/pkg/apis/admiral/v1alpha1/doc.go b/admiral/pkg/apis/admiral/v1alpha1/doc.go
new file mode 100644
index 00000000..562e6cbb
--- /dev/null
+++ b/admiral/pkg/apis/admiral/v1alpha1/doc.go
@@ -0,0 +1,3 @@
+// +k8s:deepcopy-gen=package
+// +groupName=admiral.io
+package v1alpha1
diff --git a/admiral/pkg/apis/admiral/v1alpha1/register.go b/admiral/pkg/apis/admiral/v1alpha1/register.go
new file mode 100644
index 00000000..232c50ef
--- /dev/null
+++ b/admiral/pkg/apis/admiral/v1alpha1/register.go
@@ -0,0 +1,70 @@
+package v1alpha1
+
+import (
+ "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral"
+
+ meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupVersion is the identifier for the API which includes
+// the name of the group and the version of the API
+var SchemeGroupVersion = schema.GroupVersion{
+ Group: admiral.GroupName,
+ Version: "v1alpha1",
+}
+
+// create a SchemeBuilder which uses functions to add types to
+// the scheme
+var (
+ SchemeBuilder runtime.SchemeBuilder
+ localSchemeBuilder = &SchemeBuilder
+ AddToScheme = localSchemeBuilder.AddToScheme
+)
+
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+func init() {
+ // We only register manually written functions here. The registration of the
+ // generated functions takes place in the generated files. The separation
+ // makes the code compile even when the generated files are missing.
+ localSchemeBuilder.Register(addKnownTypes)
+}
+
+// addKnownTypes adds our types to the API scheme by registering
+// MyResource and MyResourceList
+func addKnownTypes(scheme *runtime.Scheme) error {
+ //scheme.AddUnversionedTypes(
+ // SchemeGroupVersion,
+ // &Dependency{},
+ // &DependencyList{},
+ // &GlobalTrafficPolicy{},
+ // &GlobalTrafficPolicyList{},
+ //)
+
+ scheme.AddKnownTypes(
+ SchemeGroupVersion,
+ &ClientConnectionConfig{},
+ &ClientConnectionConfigList{},
+ &Dependency{},
+ &DependencyList{},
+ &DependencyProxy{},
+ &DependencyProxyList{},
+ &GlobalTrafficPolicy{},
+ &GlobalTrafficPolicyList{},
+ &OutlierDetection{},
+ &OutlierDetectionList{},
+ &RoutingPolicy{},
+ &RoutingPolicyList{},
+ &TrafficConfig{},
+ &TrafficConfigList{},
+ )
+
+ // register the type in the scheme
+ meta_v1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/admiral/pkg/apis/admiral/v1alpha1/type.go b/admiral/pkg/apis/admiral/v1alpha1/type.go
new file mode 100644
index 00000000..61e63aa2
--- /dev/null
+++ b/admiral/pkg/apis/admiral/v1alpha1/type.go
@@ -0,0 +1,323 @@
+package v1alpha1
+
+import (
+ "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/model"
+ meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// generic cdr object to wrap the dependency api
+type Dependency struct {
+ meta_v1.TypeMeta `json:",inline"`
+ meta_v1.ObjectMeta `json:"metadata"`
+ Spec model.Dependency `json:"spec"`
+ Status DependencyStatus `json:"status"`
+}
+
+// FooStatus is the status for a Foo resource
+type DependencyStatus struct {
+ ClusterSynced int32 `json:"clustersSynced"`
+ State string `json:"state"`
+}
+
+// FooList is a list of Foo resources
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type DependencyList struct {
+ meta_v1.TypeMeta `json:",inline"`
+ meta_v1.ListMeta `json:"metadata"`
+
+ Items []Dependency `json:"items"`
+}
+
+// generic cdr object to wrap the GlobalTrafficPolicy api
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type GlobalTrafficPolicy struct {
+ meta_v1.TypeMeta `json:",inline"`
+ meta_v1.ObjectMeta `json:"metadata"`
+ Spec model.GlobalTrafficPolicy `json:"spec"`
+ Status GlobalTrafficPolicyStatus `json:"status"`
+}
+
+// FooStatus is the status for a Foo resource
+
+type GlobalTrafficPolicyStatus struct {
+ ClusterSynced int32 `json:"clustersSynced"`
+ State string `json:"state"`
+}
+
+// FooList is a list of Foo resources
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type GlobalTrafficPolicyList struct {
+ meta_v1.TypeMeta `json:",inline"`
+ meta_v1.ListMeta `json:"metadata"`
+
+ Items []GlobalTrafficPolicy `json:"items"`
+}
+
+// generic cdr object to wrap the OutlierDetection api
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type OutlierDetection struct {
+ meta_v1.TypeMeta `json:",inline"`
+ meta_v1.ObjectMeta `json:"metadata"`
+ Spec model.OutlierDetection `json:"spec"`
+ Status OutlierDetectionStatus `json:"status"`
+}
+
+// FooStatus is the status for a Foo resource
+
+type OutlierDetectionStatus struct {
+ ClusterSynced int32 `json:"clustersSynced"`
+ State string `json:"state"`
+}
+
+// OutlierDetectionList is a list of OutlierDetection resources
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type OutlierDetectionList struct {
+ meta_v1.TypeMeta `json:",inline"`
+ meta_v1.ListMeta `json:"metadata"`
+
+ Items []OutlierDetection `json:"items"`
+}
+
+// generic cdr object to wrap the RoutingPolicy api
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type RoutingPolicy struct {
+ meta_v1.TypeMeta `json:",inline"`
+ meta_v1.ObjectMeta `json:"metadata"`
+ Spec model.RoutingPolicy `json:"spec"`
+ Status RoutingPolicyStatus `json:"status"`
+}
+
+// FooStatus is the status for a Foo resource
+
+type RoutingPolicyStatus struct {
+ ClusterSynced int32 `json:"clustersSynced"`
+ State string `json:"state"`
+}
+
+// FooList is a list of Foo resources
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type RoutingPolicyList struct {
+ meta_v1.TypeMeta `json:",inline"`
+ meta_v1.ListMeta `json:"metadata"`
+
+ Items []RoutingPolicy `json:"items"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:openapi-gen=true
+// +kubebuilder:printcolumn:name="Destination",type="string",JSONPath=`.spec.destination.identity`
+// +kubebuilder:printcolumn:name="Proxy",type="string",JSONPath=`.spec.proxy.identity`
+// +kubebuilder:resource:shortName=dp
+type DependencyProxy struct {
+ meta_v1.TypeMeta `json:",inline"`
+ meta_v1.ObjectMeta `json:"metadata"`
+ Spec model.DependencyProxy `json:"spec"`
+ Status DependencyProxyStatus `json:"status"`
+}
+
+// DependencyProxyStatus is the status for a DependencyProxy resource
+type DependencyProxyStatus struct {
+ State string `json:"state"`
+}
+
+// DependencyProxyList is a list of DependencyProxy resources
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type DependencyProxyList struct {
+ meta_v1.TypeMeta `json:",inline"`
+ meta_v1.ListMeta `json:"metadata"`
+
+ Items []DependencyProxy `json:"items"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:openapi-gen=true
+// +kubebuilder:resource:shortName=cc
+type ClientConnectionConfig struct {
+ meta_v1.TypeMeta `json:",inline"`
+ meta_v1.ObjectMeta `json:"metadata"`
+ Spec ClientConnectionConfigSpec `json:"spec"`
+ Status ClientConnectionConfigStatus `json:"status"`
+}
+
+type ClientConnectionConfigSpec struct {
+ ConnectionPool model.ConnectionPool `json:"connectionPool"`
+ Tunnel model.Tunnel `json:"tunnel"`
+}
+
+// ClientConnectionConfigStatus is the status for a ClientConnectionConfig resource
+type ClientConnectionConfigStatus struct {
+ State string `json:"state"`
+}
+
+// ClientConnectionConfigList is a list of ClientConnectionConfig resources
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type ClientConnectionConfigList struct {
+ meta_v1.TypeMeta `json:",inline"`
+ meta_v1.ListMeta `json:"metadata"`
+
+ Items []ClientConnectionConfig `json:"items"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// TrafficConfig is the Schema for the TrafficConfigs API
+type TrafficConfig struct {
+ meta_v1.TypeMeta `json:",inline"`
+ meta_v1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec TrafficConfigSpec `json:"spec,omitempty"`
+ Status TrafficConfigStatus `json:"status,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// TrafficConfigList contains a list of TrafficConfig
+type TrafficConfigList struct {
+ meta_v1.TypeMeta `json:",inline"`
+ meta_v1.ListMeta `json:"metadata,omitempty"`
+ Items []TrafficConfig `json:"items"`
+}
+
+// QuotaGroupSpec defines the desired state of QuotaGroup specified by the user
+type TrafficConfigSpec struct {
+ WorkloadEnv []string `json:"workloadEnvs"`
+ EdgeService *EdgeService `json:"edgeService"`
+ QuotaGroup *QuotaGroup `json:"quotaGroup"`
+}
+
+type EdgeService struct {
+ DynamicRouting []*DynamicRouting `json:"dynamicRouting"`
+ Filters []*Filter `json:"filters"`
+ Routes []*Route `json:"routes"`
+ Targets []*Target `json:"targets,omitempty"`
+ TargetGroups []*TargetGroup `json:"targetGroups,omitempty"`
+}
+
+type Target struct {
+ Name string `json:"name"`
+ MeshDNS *string `json:"meshDNS,omitempty"`
+ Port int `json:"port"`
+ SocketTimeout int `json:"socketTimeout"`
+}
+
+type TargetGroup struct {
+ Name string `json:"name"`
+ Weights []*Weight `json:"weights"`
+ AppOverrides []*AppOverride `json:"appOverrides,omitempty"`
+}
+
+type AppOverride struct {
+ AssetAlias string `json:"assetAlias"`
+
+ AssetID string `json:"assetID"` // assetID is just a UUID string
+ Weights []*Weight `json:"weights"`
+}
+
+type Weight struct {
+ Name string `json:"name"`
+ Weight int `json:"weight"`
+}
+
+type QuotaGroup struct {
+ TotalQuotaGroup []*TotalQuotaGroup `json:"totalQuotaGroups"`
+ AppQuotaGroups []*AppQuotaGroup `json:"appQuotaGroups,omitempty"`
+}
+
+type Route struct {
+ Name string `json:"name"`
+ Inbound string `json:"inbound"`
+ Outbound string `json:"outbound"`
+ FilterSelector string `json:"filterSelector"`
+ WorkloadEnvSelectors []string `json:"workloadEnvSelectors"`
+ Timeout int `json:"timeout"`
+ Config []*Config `json:"config,omitempty"`
+}
+
+type Config struct {
+ TargetGroupSelector string `json:"targetGroupSelector"`
+ TargetSelector string `json:"targetSelector"`
+}
+
+type Filter struct {
+ Name string `json:"name"`
+ Retries Retry `json:"retries"`
+ Options []string `json:"options"`
+}
+
+type Retry struct {
+ Attempts int `json:"attempts"`
+ PerTryTimeout string `json:"perTryTimeout"`
+}
+
+type DynamicRouting struct {
+ Name string `json:"name"`
+ Url string `json:"url"`
+ CacheKeyAlgorithm string `json:"cacheKeyAlgorithm"`
+ TtlSec int `json:"ttlSec"`
+ Local bool `json:"local"`
+}
+
+type TotalQuotaGroup struct {
+ Name string `json:"name"`
+ Description string `json:"description"`
+ Quotas []*Quota `json:"quotas"`
+ WorkloadEnvSelectors []string `json:"workloadEnvSelectors"`
+ RegionLevelLimit bool `json:"regionLevelLimit"`
+ CPULimit *int `json:"cpuLimit,omitempty"`
+ MemoryLimit *int `json:"memoryLimit,omitempty"`
+ PodLevelThreshold *int `json:"podLevelThreshold"`
+ FailureModeBehaviour string `json:"failureModeBehaviour"`
+ AdaptiveConcurrency *AdaptiveConcurrency `json:"adaptiveConcurrency,omitempty"`
+}
+type AppQuotaGroup struct {
+ Name string `json:"name"`
+ Description string `json:"description"`
+ Quotas []*Quota `json:"quotas"`
+ AssociatedApps []string `json:"associatedApps"`
+ WorkloadEnvSelectors []string `json:"workloadEnvSelectors"`
+}
+
+type AdaptiveConcurrency struct {
+ LatencyThreshold string `json:"latencyThreshold"`
+ SkippedURLs []string `json:"skippedURLs"`
+ SampleAggregatePercentile int `json:"sampleAggregatePercentile"`
+ ConcurrencyUpdateInterval string `json:"concurrencyUpdateInterval"`
+ MinRTTCalInterval string `json:"minRTTCalInterval"`
+ MinRTTCalJitter int `json:"minRTTCalJitter"`
+ MinRTTCalRequestCount int `json:"minRTTCalRequestCount"`
+ MinRTTCalMinConcurrency int `json:"minRTTCalMinConcurrency"`
+ Enabled bool `json:"enabled"`
+}
+
+type Quota struct {
+ Name string `json:"name"`
+ TimePeriod string `json:"timePeriod"`
+ MaxAmount int `json:"maxAmount"`
+ KeyType string `json:"keyType"`
+ Algorithm string `json:"algorithm"`
+ Behaviour string `json:"behaviour"`
+ Rule string `json:"rule"`
+ Path string `json:"path,omitempty"`
+ Methods []string `json:"methods,omitempty"`
+ Headers []*Header `json:"headers,omitempty"`
+}
+
+type Header struct {
+ Name string `json:"name"`
+ Value string `json:"value"`
+ Condition string `json:"condition"` // EQUALS, PREFIX, CONTAINS, REGEX
+}
+
+// TrafficConfigStatus defines the observed state of TrafficConfig
+type TrafficConfigStatus struct {
+ Message string `json:"message"`
+ LastAppliedConfigVersion string `json:"lastAppliedConfigVersion"`
+ LastUpdateTime meta_v1.Time `json:"lastUpdateTime"`
+ Status bool `json:"status"`
+}
diff --git a/admiral/pkg/apis/admiral/v1alpha1/zz_generated.deepcopy.go b/admiral/pkg/apis/admiral/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 00000000..9a91f0b8
--- /dev/null
+++ b/admiral/pkg/apis/admiral/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,1086 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AdaptiveConcurrency) DeepCopyInto(out *AdaptiveConcurrency) {
+ *out = *in
+ if in.SkippedURLs != nil {
+ in, out := &in.SkippedURLs, &out.SkippedURLs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdaptiveConcurrency.
+func (in *AdaptiveConcurrency) DeepCopy() *AdaptiveConcurrency {
+ if in == nil {
+ return nil
+ }
+ out := new(AdaptiveConcurrency)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AppOverride) DeepCopyInto(out *AppOverride) {
+ *out = *in
+ if in.Weights != nil {
+ in, out := &in.Weights, &out.Weights
+ *out = make([]*Weight, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(Weight)
+ **out = **in
+ }
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppOverride.
+func (in *AppOverride) DeepCopy() *AppOverride {
+ if in == nil {
+ return nil
+ }
+ out := new(AppOverride)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AppQuotaGroup) DeepCopyInto(out *AppQuotaGroup) {
+ *out = *in
+ if in.Quotas != nil {
+ in, out := &in.Quotas, &out.Quotas
+ *out = make([]*Quota, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(Quota)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ }
+ if in.AssociatedApps != nil {
+ in, out := &in.AssociatedApps, &out.AssociatedApps
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.WorkloadEnvSelectors != nil {
+ in, out := &in.WorkloadEnvSelectors, &out.WorkloadEnvSelectors
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppQuotaGroup.
+func (in *AppQuotaGroup) DeepCopy() *AppQuotaGroup {
+ if in == nil {
+ return nil
+ }
+ out := new(AppQuotaGroup)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClientConnectionConfig) DeepCopyInto(out *ClientConnectionConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConnectionConfig.
+func (in *ClientConnectionConfig) DeepCopy() *ClientConnectionConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(ClientConnectionConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClientConnectionConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClientConnectionConfigList) DeepCopyInto(out *ClientConnectionConfigList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ClientConnectionConfig, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConnectionConfigList.
+func (in *ClientConnectionConfigList) DeepCopy() *ClientConnectionConfigList {
+ if in == nil {
+ return nil
+ }
+ out := new(ClientConnectionConfigList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ClientConnectionConfigList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClientConnectionConfigSpec) DeepCopyInto(out *ClientConnectionConfigSpec) {
+ *out = *in
+ in.ConnectionPool.DeepCopyInto(&out.ConnectionPool)
+ in.Tunnel.DeepCopyInto(&out.Tunnel)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConnectionConfigSpec.
+func (in *ClientConnectionConfigSpec) DeepCopy() *ClientConnectionConfigSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ClientConnectionConfigSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClientConnectionConfigStatus) DeepCopyInto(out *ClientConnectionConfigStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConnectionConfigStatus.
+func (in *ClientConnectionConfigStatus) DeepCopy() *ClientConnectionConfigStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ClientConnectionConfigStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Config) DeepCopyInto(out *Config) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config.
+func (in *Config) DeepCopy() *Config {
+ if in == nil {
+ return nil
+ }
+ out := new(Config)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Dependency) DeepCopyInto(out *Dependency) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Dependency.
+func (in *Dependency) DeepCopy() *Dependency {
+ if in == nil {
+ return nil
+ }
+ out := new(Dependency)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Dependency) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DependencyList) DeepCopyInto(out *DependencyList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Dependency, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DependencyList.
+func (in *DependencyList) DeepCopy() *DependencyList {
+ if in == nil {
+ return nil
+ }
+ out := new(DependencyList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DependencyList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DependencyProxy) DeepCopyInto(out *DependencyProxy) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DependencyProxy.
+func (in *DependencyProxy) DeepCopy() *DependencyProxy {
+ if in == nil {
+ return nil
+ }
+ out := new(DependencyProxy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DependencyProxy) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DependencyProxyList) DeepCopyInto(out *DependencyProxyList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]DependencyProxy, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DependencyProxyList.
+func (in *DependencyProxyList) DeepCopy() *DependencyProxyList {
+ if in == nil {
+ return nil
+ }
+ out := new(DependencyProxyList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DependencyProxyList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DependencyProxyStatus) DeepCopyInto(out *DependencyProxyStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DependencyProxyStatus.
+func (in *DependencyProxyStatus) DeepCopy() *DependencyProxyStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(DependencyProxyStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DependencyStatus) DeepCopyInto(out *DependencyStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DependencyStatus.
+func (in *DependencyStatus) DeepCopy() *DependencyStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(DependencyStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DynamicRouting) DeepCopyInto(out *DynamicRouting) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamicRouting.
+func (in *DynamicRouting) DeepCopy() *DynamicRouting {
+ if in == nil {
+ return nil
+ }
+ out := new(DynamicRouting)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EdgeService) DeepCopyInto(out *EdgeService) {
+ *out = *in
+ if in.DynamicRouting != nil {
+ in, out := &in.DynamicRouting, &out.DynamicRouting
+ *out = make([]*DynamicRouting, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(DynamicRouting)
+ **out = **in
+ }
+ }
+ }
+ if in.Filters != nil {
+ in, out := &in.Filters, &out.Filters
+ *out = make([]*Filter, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(Filter)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ }
+ if in.Routes != nil {
+ in, out := &in.Routes, &out.Routes
+ *out = make([]*Route, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(Route)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ }
+ if in.Targets != nil {
+ in, out := &in.Targets, &out.Targets
+ *out = make([]*Target, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(Target)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ }
+ if in.TargetGroups != nil {
+ in, out := &in.TargetGroups, &out.TargetGroups
+ *out = make([]*TargetGroup, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(TargetGroup)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EdgeService.
+func (in *EdgeService) DeepCopy() *EdgeService {
+ if in == nil {
+ return nil
+ }
+ out := new(EdgeService)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Filter) DeepCopyInto(out *Filter) {
+ *out = *in
+ out.Retries = in.Retries
+ if in.Options != nil {
+ in, out := &in.Options, &out.Options
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Filter.
+func (in *Filter) DeepCopy() *Filter {
+ if in == nil {
+ return nil
+ }
+ out := new(Filter)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GlobalTrafficPolicy) DeepCopyInto(out *GlobalTrafficPolicy) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalTrafficPolicy.
+func (in *GlobalTrafficPolicy) DeepCopy() *GlobalTrafficPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(GlobalTrafficPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *GlobalTrafficPolicy) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GlobalTrafficPolicyList) DeepCopyInto(out *GlobalTrafficPolicyList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]GlobalTrafficPolicy, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalTrafficPolicyList.
+func (in *GlobalTrafficPolicyList) DeepCopy() *GlobalTrafficPolicyList {
+ if in == nil {
+ return nil
+ }
+ out := new(GlobalTrafficPolicyList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *GlobalTrafficPolicyList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GlobalTrafficPolicyStatus) DeepCopyInto(out *GlobalTrafficPolicyStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalTrafficPolicyStatus.
+func (in *GlobalTrafficPolicyStatus) DeepCopy() *GlobalTrafficPolicyStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(GlobalTrafficPolicyStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Header) DeepCopyInto(out *Header) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Header.
+func (in *Header) DeepCopy() *Header {
+ if in == nil {
+ return nil
+ }
+ out := new(Header)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OutlierDetection) DeepCopyInto(out *OutlierDetection) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutlierDetection.
+func (in *OutlierDetection) DeepCopy() *OutlierDetection {
+ if in == nil {
+ return nil
+ }
+ out := new(OutlierDetection)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OutlierDetection) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OutlierDetectionList) DeepCopyInto(out *OutlierDetectionList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]OutlierDetection, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutlierDetectionList.
+func (in *OutlierDetectionList) DeepCopy() *OutlierDetectionList {
+ if in == nil {
+ return nil
+ }
+ out := new(OutlierDetectionList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OutlierDetectionList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OutlierDetectionStatus) DeepCopyInto(out *OutlierDetectionStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutlierDetectionStatus.
+func (in *OutlierDetectionStatus) DeepCopy() *OutlierDetectionStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(OutlierDetectionStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Quota) DeepCopyInto(out *Quota) {
+ *out = *in
+ if in.Methods != nil {
+ in, out := &in.Methods, &out.Methods
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Headers != nil {
+ in, out := &in.Headers, &out.Headers
+ *out = make([]*Header, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(Header)
+ **out = **in
+ }
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Quota.
+func (in *Quota) DeepCopy() *Quota {
+ if in == nil {
+ return nil
+ }
+ out := new(Quota)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *QuotaGroup) DeepCopyInto(out *QuotaGroup) {
+ *out = *in
+ if in.TotalQuotaGroup != nil {
+ in, out := &in.TotalQuotaGroup, &out.TotalQuotaGroup
+ *out = make([]*TotalQuotaGroup, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(TotalQuotaGroup)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ }
+ if in.AppQuotaGroups != nil {
+ in, out := &in.AppQuotaGroups, &out.AppQuotaGroups
+ *out = make([]*AppQuotaGroup, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(AppQuotaGroup)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuotaGroup.
+func (in *QuotaGroup) DeepCopy() *QuotaGroup {
+ if in == nil {
+ return nil
+ }
+ out := new(QuotaGroup)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Retry) DeepCopyInto(out *Retry) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Retry.
+func (in *Retry) DeepCopy() *Retry {
+ if in == nil {
+ return nil
+ }
+ out := new(Retry)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Route) DeepCopyInto(out *Route) {
+ *out = *in
+ if in.WorkloadEnvSelectors != nil {
+ in, out := &in.WorkloadEnvSelectors, &out.WorkloadEnvSelectors
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Config != nil {
+ in, out := &in.Config, &out.Config
+ *out = make([]*Config, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(Config)
+ **out = **in
+ }
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Route.
+func (in *Route) DeepCopy() *Route {
+ if in == nil {
+ return nil
+ }
+ out := new(Route)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RoutingPolicy) DeepCopyInto(out *RoutingPolicy) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingPolicy.
+func (in *RoutingPolicy) DeepCopy() *RoutingPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(RoutingPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *RoutingPolicy) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RoutingPolicyList) DeepCopyInto(out *RoutingPolicyList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]RoutingPolicy, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingPolicyList.
+func (in *RoutingPolicyList) DeepCopy() *RoutingPolicyList {
+ if in == nil {
+ return nil
+ }
+ out := new(RoutingPolicyList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *RoutingPolicyList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RoutingPolicyStatus) DeepCopyInto(out *RoutingPolicyStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingPolicyStatus.
+func (in *RoutingPolicyStatus) DeepCopy() *RoutingPolicyStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(RoutingPolicyStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Target) DeepCopyInto(out *Target) {
+ *out = *in
+ if in.MeshDNS != nil {
+ in, out := &in.MeshDNS, &out.MeshDNS
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Target.
+func (in *Target) DeepCopy() *Target {
+ if in == nil {
+ return nil
+ }
+ out := new(Target)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TargetGroup) DeepCopyInto(out *TargetGroup) {
+ *out = *in
+ if in.Weights != nil {
+ in, out := &in.Weights, &out.Weights
+ *out = make([]*Weight, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(Weight)
+ **out = **in
+ }
+ }
+ }
+ if in.AppOverrides != nil {
+ in, out := &in.AppOverrides, &out.AppOverrides
+ *out = make([]*AppOverride, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(AppOverride)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroup.
+func (in *TargetGroup) DeepCopy() *TargetGroup {
+ if in == nil {
+ return nil
+ }
+ out := new(TargetGroup)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TotalQuotaGroup) DeepCopyInto(out *TotalQuotaGroup) {
+ *out = *in
+ if in.Quotas != nil {
+ in, out := &in.Quotas, &out.Quotas
+ *out = make([]*Quota, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(Quota)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ }
+ if in.WorkloadEnvSelectors != nil {
+ in, out := &in.WorkloadEnvSelectors, &out.WorkloadEnvSelectors
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.CPULimit != nil {
+ in, out := &in.CPULimit, &out.CPULimit
+ *out = new(int)
+ **out = **in
+ }
+ if in.MemoryLimit != nil {
+ in, out := &in.MemoryLimit, &out.MemoryLimit
+ *out = new(int)
+ **out = **in
+ }
+ if in.PodLevelThreshold != nil {
+ in, out := &in.PodLevelThreshold, &out.PodLevelThreshold
+ *out = new(int)
+ **out = **in
+ }
+ if in.AdaptiveConcurrency != nil {
+ in, out := &in.AdaptiveConcurrency, &out.AdaptiveConcurrency
+ *out = new(AdaptiveConcurrency)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TotalQuotaGroup.
+func (in *TotalQuotaGroup) DeepCopy() *TotalQuotaGroup {
+ if in == nil {
+ return nil
+ }
+ out := new(TotalQuotaGroup)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TrafficConfig) DeepCopyInto(out *TrafficConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficConfig.
+func (in *TrafficConfig) DeepCopy() *TrafficConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(TrafficConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *TrafficConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TrafficConfigList) DeepCopyInto(out *TrafficConfigList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]TrafficConfig, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficConfigList.
+func (in *TrafficConfigList) DeepCopy() *TrafficConfigList {
+ if in == nil {
+ return nil
+ }
+ out := new(TrafficConfigList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *TrafficConfigList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TrafficConfigSpec) DeepCopyInto(out *TrafficConfigSpec) {
+ *out = *in
+ if in.WorkloadEnv != nil {
+ in, out := &in.WorkloadEnv, &out.WorkloadEnv
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.EdgeService != nil {
+ in, out := &in.EdgeService, &out.EdgeService
+ *out = new(EdgeService)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.QuotaGroup != nil {
+ in, out := &in.QuotaGroup, &out.QuotaGroup
+ *out = new(QuotaGroup)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficConfigSpec.
+func (in *TrafficConfigSpec) DeepCopy() *TrafficConfigSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(TrafficConfigSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TrafficConfigStatus) DeepCopyInto(out *TrafficConfigStatus) {
+ *out = *in
+ in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficConfigStatus.
+func (in *TrafficConfigStatus) DeepCopy() *TrafficConfigStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(TrafficConfigStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Weight) DeepCopyInto(out *Weight) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Weight.
+func (in *Weight) DeepCopy() *Weight {
+ if in == nil {
+ return nil
+ }
+ out := new(Weight)
+ in.DeepCopyInto(out)
+ return out
+}
From e410056ee03438f2f9094b2ef434ffc46a93f181 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:51:29 -0700
Subject: [PATCH 034/235] add file
admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/
---
.../typed/admiral/v1alpha1/admiral_client.go | 137 ++++++++++++
.../v1alpha1/clientconnectionconfig.go | 195 ++++++++++++++++++
.../typed/admiral/v1alpha1/dependency.go | 195 ++++++++++++++++++
.../typed/admiral/v1alpha1/dependencyproxy.go | 195 ++++++++++++++++++
.../versioned/typed/admiral/v1alpha1/doc.go | 20 ++
.../typed/admiral/v1alpha1/fake/doc.go | 20 ++
.../v1alpha1/fake/fake_admiral_client.go | 64 ++++++
.../fake/fake_clientconnectionconfig.go | 142 +++++++++++++
.../admiral/v1alpha1/fake/fake_dependency.go | 142 +++++++++++++
.../v1alpha1/fake/fake_dependencyproxy.go | 142 +++++++++++++
.../v1alpha1/fake/fake_globaltrafficpolicy.go | 142 +++++++++++++
.../v1alpha1/fake/fake_outlierdetection.go | 142 +++++++++++++
.../v1alpha1/fake/fake_routingpolicy.go | 142 +++++++++++++
.../v1alpha1/fake/fake_trafficconfig.go | 142 +++++++++++++
.../admiral/v1alpha1/generated_expansion.go | 33 +++
.../admiral/v1alpha1/globaltrafficpolicy.go | 195 ++++++++++++++++++
.../admiral/v1alpha1/outlierdetection.go | 195 ++++++++++++++++++
.../typed/admiral/v1alpha1/routingpolicy.go | 195 ++++++++++++++++++
.../typed/admiral/v1alpha1/trafficconfig.go | 195 ++++++++++++++++++
19 files changed, 2633 insertions(+)
create mode 100644 admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/admiral_client.go
create mode 100644 admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/clientconnectionconfig.go
create mode 100644 admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/dependency.go
create mode 100644 admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/dependencyproxy.go
create mode 100644 admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/doc.go
create mode 100644 admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/doc.go
create mode 100644 admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/fake_admiral_client.go
create mode 100644 admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/fake_clientconnectionconfig.go
create mode 100644 admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/fake_dependency.go
create mode 100644 admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/fake_dependencyproxy.go
create mode 100644 admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/fake_globaltrafficpolicy.go
create mode 100644 admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/fake_outlierdetection.go
create mode 100644 admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/fake_routingpolicy.go
create mode 100644 admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/fake_trafficconfig.go
create mode 100644 admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/generated_expansion.go
create mode 100644 admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/globaltrafficpolicy.go
create mode 100644 admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/outlierdetection.go
create mode 100644 admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/routingpolicy.go
create mode 100644 admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/trafficconfig.go
diff --git a/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/admiral_client.go b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/admiral_client.go
new file mode 100644
index 00000000..ae67e5ca
--- /dev/null
+++ b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/admiral_client.go
@@ -0,0 +1,137 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "net/http"
+
+ v1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned/scheme"
+ rest "k8s.io/client-go/rest"
+)
+
+type AdmiralV1alpha1Interface interface {
+ RESTClient() rest.Interface
+ ClientConnectionConfigsGetter
+ DependenciesGetter
+ DependencyProxiesGetter
+ GlobalTrafficPoliciesGetter
+ OutlierDetectionsGetter
+ RoutingPoliciesGetter
+ TrafficConfigsGetter
+}
+
+// AdmiralV1alpha1Client is used to interact with features provided by the admiral.io group.
+type AdmiralV1alpha1Client struct {
+ restClient rest.Interface
+}
+
+func (c *AdmiralV1alpha1Client) ClientConnectionConfigs(namespace string) ClientConnectionConfigInterface {
+ return newClientConnectionConfigs(c, namespace)
+}
+
+func (c *AdmiralV1alpha1Client) Dependencies(namespace string) DependencyInterface {
+ return newDependencies(c, namespace)
+}
+
+func (c *AdmiralV1alpha1Client) DependencyProxies(namespace string) DependencyProxyInterface {
+ return newDependencyProxies(c, namespace)
+}
+
+func (c *AdmiralV1alpha1Client) GlobalTrafficPolicies(namespace string) GlobalTrafficPolicyInterface {
+ return newGlobalTrafficPolicies(c, namespace)
+}
+
+func (c *AdmiralV1alpha1Client) OutlierDetections(namespace string) OutlierDetectionInterface {
+ return newOutlierDetections(c, namespace)
+}
+
+func (c *AdmiralV1alpha1Client) RoutingPolicies(namespace string) RoutingPolicyInterface {
+ return newRoutingPolicies(c, namespace)
+}
+
+func (c *AdmiralV1alpha1Client) TrafficConfigs(namespace string) TrafficConfigInterface {
+ return newTrafficConfigs(c, namespace)
+}
+
+// NewForConfig creates a new AdmiralV1alpha1Client for the given config.
+// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
+// where httpClient was generated with rest.HTTPClientFor(c).
+func NewForConfig(c *rest.Config) (*AdmiralV1alpha1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ httpClient, err := rest.HTTPClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return NewForConfigAndClient(&config, httpClient)
+}
+
+// NewForConfigAndClient creates a new AdmiralV1alpha1Client for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AdmiralV1alpha1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientForConfigAndClient(&config, h)
+ if err != nil {
+ return nil, err
+ }
+ return &AdmiralV1alpha1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new AdmiralV1alpha1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *AdmiralV1alpha1Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new AdmiralV1alpha1Client for the given RESTClient.
+func New(c rest.Interface) *AdmiralV1alpha1Client {
+ return &AdmiralV1alpha1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := v1alpha1.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *AdmiralV1alpha1Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/clientconnectionconfig.go b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/clientconnectionconfig.go
new file mode 100644
index 00000000..5f44403c
--- /dev/null
+++ b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/clientconnectionconfig.go
@@ -0,0 +1,195 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ v1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ scheme "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// ClientConnectionConfigsGetter has a method to return a ClientConnectionConfigInterface.
+// A group's client should implement this interface.
+type ClientConnectionConfigsGetter interface {
+ ClientConnectionConfigs(namespace string) ClientConnectionConfigInterface
+}
+
+// ClientConnectionConfigInterface has methods to work with ClientConnectionConfig resources.
+type ClientConnectionConfigInterface interface {
+ Create(ctx context.Context, clientConnectionConfig *v1alpha1.ClientConnectionConfig, opts v1.CreateOptions) (*v1alpha1.ClientConnectionConfig, error)
+ Update(ctx context.Context, clientConnectionConfig *v1alpha1.ClientConnectionConfig, opts v1.UpdateOptions) (*v1alpha1.ClientConnectionConfig, error)
+ UpdateStatus(ctx context.Context, clientConnectionConfig *v1alpha1.ClientConnectionConfig, opts v1.UpdateOptions) (*v1alpha1.ClientConnectionConfig, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClientConnectionConfig, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClientConnectionConfigList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClientConnectionConfig, err error)
+ ClientConnectionConfigExpansion
+}
+
+// clientConnectionConfigs implements ClientConnectionConfigInterface
+type clientConnectionConfigs struct {
+ client rest.Interface
+ ns string
+}
+
+// newClientConnectionConfigs returns a ClientConnectionConfigs
+func newClientConnectionConfigs(c *AdmiralV1alpha1Client, namespace string) *clientConnectionConfigs {
+ return &clientConnectionConfigs{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the clientConnectionConfig, and returns the corresponding clientConnectionConfig object, and an error if there is any.
+func (c *clientConnectionConfigs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClientConnectionConfig, err error) {
+ result = &v1alpha1.ClientConnectionConfig{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("clientconnectionconfigs").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of ClientConnectionConfigs that match those selectors.
+func (c *clientConnectionConfigs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClientConnectionConfigList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.ClientConnectionConfigList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("clientconnectionconfigs").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested clientConnectionConfigs.
+func (c *clientConnectionConfigs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("clientconnectionconfigs").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a clientConnectionConfig and creates it. Returns the server's representation of the clientConnectionConfig, and an error, if there is any.
+func (c *clientConnectionConfigs) Create(ctx context.Context, clientConnectionConfig *v1alpha1.ClientConnectionConfig, opts v1.CreateOptions) (result *v1alpha1.ClientConnectionConfig, err error) {
+ result = &v1alpha1.ClientConnectionConfig{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("clientconnectionconfigs").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(clientConnectionConfig).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a clientConnectionConfig and updates it. Returns the server's representation of the clientConnectionConfig, and an error, if there is any.
+func (c *clientConnectionConfigs) Update(ctx context.Context, clientConnectionConfig *v1alpha1.ClientConnectionConfig, opts v1.UpdateOptions) (result *v1alpha1.ClientConnectionConfig, err error) {
+ result = &v1alpha1.ClientConnectionConfig{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("clientconnectionconfigs").
+ Name(clientConnectionConfig.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(clientConnectionConfig).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *clientConnectionConfigs) UpdateStatus(ctx context.Context, clientConnectionConfig *v1alpha1.ClientConnectionConfig, opts v1.UpdateOptions) (result *v1alpha1.ClientConnectionConfig, err error) {
+ result = &v1alpha1.ClientConnectionConfig{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("clientconnectionconfigs").
+ Name(clientConnectionConfig.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(clientConnectionConfig).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the clientConnectionConfig and deletes it. Returns an error if one occurs.
+func (c *clientConnectionConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("clientconnectionconfigs").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *clientConnectionConfigs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("clientconnectionconfigs").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched clientConnectionConfig.
+func (c *clientConnectionConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClientConnectionConfig, err error) {
+ result = &v1alpha1.ClientConnectionConfig{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("clientconnectionconfigs").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/dependency.go b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/dependency.go
new file mode 100644
index 00000000..5c913b99
--- /dev/null
+++ b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/dependency.go
@@ -0,0 +1,195 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ v1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ scheme "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// DependenciesGetter has a method to return a DependencyInterface.
+// A group's client should implement this interface.
+type DependenciesGetter interface {
+ Dependencies(namespace string) DependencyInterface
+}
+
+// DependencyInterface has methods to work with Dependency resources.
+type DependencyInterface interface {
+ Create(ctx context.Context, dependency *v1alpha1.Dependency, opts v1.CreateOptions) (*v1alpha1.Dependency, error)
+ Update(ctx context.Context, dependency *v1alpha1.Dependency, opts v1.UpdateOptions) (*v1alpha1.Dependency, error)
+ UpdateStatus(ctx context.Context, dependency *v1alpha1.Dependency, opts v1.UpdateOptions) (*v1alpha1.Dependency, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Dependency, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.DependencyList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Dependency, err error)
+ DependencyExpansion
+}
+
+// dependencies implements DependencyInterface
+type dependencies struct {
+ client rest.Interface
+ ns string
+}
+
+// newDependencies returns a Dependencies
+func newDependencies(c *AdmiralV1alpha1Client, namespace string) *dependencies {
+ return &dependencies{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the dependency, and returns the corresponding dependency object, and an error if there is any.
+func (c *dependencies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Dependency, err error) {
+ result = &v1alpha1.Dependency{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("dependencies").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Dependencies that match those selectors.
+func (c *dependencies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.DependencyList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.DependencyList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("dependencies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested dependencies.
+func (c *dependencies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("dependencies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a dependency and creates it. Returns the server's representation of the dependency, and an error, if there is any.
+func (c *dependencies) Create(ctx context.Context, dependency *v1alpha1.Dependency, opts v1.CreateOptions) (result *v1alpha1.Dependency, err error) {
+ result = &v1alpha1.Dependency{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("dependencies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(dependency).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a dependency and updates it. Returns the server's representation of the dependency, and an error, if there is any.
+func (c *dependencies) Update(ctx context.Context, dependency *v1alpha1.Dependency, opts v1.UpdateOptions) (result *v1alpha1.Dependency, err error) {
+ result = &v1alpha1.Dependency{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("dependencies").
+ Name(dependency.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(dependency).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *dependencies) UpdateStatus(ctx context.Context, dependency *v1alpha1.Dependency, opts v1.UpdateOptions) (result *v1alpha1.Dependency, err error) {
+ result = &v1alpha1.Dependency{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("dependencies").
+ Name(dependency.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(dependency).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the dependency and deletes it. Returns an error if one occurs.
+func (c *dependencies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("dependencies").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *dependencies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("dependencies").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched dependency.
+func (c *dependencies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Dependency, err error) {
+ result = &v1alpha1.Dependency{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("dependencies").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/dependencyproxy.go b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/dependencyproxy.go
new file mode 100644
index 00000000..10395cef
--- /dev/null
+++ b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/dependencyproxy.go
@@ -0,0 +1,195 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ v1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ scheme "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// DependencyProxiesGetter has a method to return a DependencyProxyInterface.
+// A group's client should implement this interface.
+type DependencyProxiesGetter interface {
+ DependencyProxies(namespace string) DependencyProxyInterface
+}
+
+// DependencyProxyInterface has methods to work with DependencyProxy resources.
+type DependencyProxyInterface interface {
+ Create(ctx context.Context, dependencyProxy *v1alpha1.DependencyProxy, opts v1.CreateOptions) (*v1alpha1.DependencyProxy, error)
+ Update(ctx context.Context, dependencyProxy *v1alpha1.DependencyProxy, opts v1.UpdateOptions) (*v1alpha1.DependencyProxy, error)
+ UpdateStatus(ctx context.Context, dependencyProxy *v1alpha1.DependencyProxy, opts v1.UpdateOptions) (*v1alpha1.DependencyProxy, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.DependencyProxy, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.DependencyProxyList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.DependencyProxy, err error)
+ DependencyProxyExpansion
+}
+
+// dependencyProxies implements DependencyProxyInterface
+type dependencyProxies struct {
+ client rest.Interface
+ ns string
+}
+
+// newDependencyProxies returns a DependencyProxies
+func newDependencyProxies(c *AdmiralV1alpha1Client, namespace string) *dependencyProxies {
+ return &dependencyProxies{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the dependencyProxy, and returns the corresponding dependencyProxy object, and an error if there is any.
+func (c *dependencyProxies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.DependencyProxy, err error) {
+ result = &v1alpha1.DependencyProxy{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("dependencyproxies").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of DependencyProxies that match those selectors.
+func (c *dependencyProxies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.DependencyProxyList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.DependencyProxyList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("dependencyproxies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested dependencyProxies.
+func (c *dependencyProxies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("dependencyproxies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a dependencyProxy and creates it. Returns the server's representation of the dependencyProxy, and an error, if there is any.
+func (c *dependencyProxies) Create(ctx context.Context, dependencyProxy *v1alpha1.DependencyProxy, opts v1.CreateOptions) (result *v1alpha1.DependencyProxy, err error) {
+ result = &v1alpha1.DependencyProxy{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("dependencyproxies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(dependencyProxy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a dependencyProxy and updates it. Returns the server's representation of the dependencyProxy, and an error, if there is any.
+func (c *dependencyProxies) Update(ctx context.Context, dependencyProxy *v1alpha1.DependencyProxy, opts v1.UpdateOptions) (result *v1alpha1.DependencyProxy, err error) {
+ result = &v1alpha1.DependencyProxy{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("dependencyproxies").
+ Name(dependencyProxy.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(dependencyProxy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *dependencyProxies) UpdateStatus(ctx context.Context, dependencyProxy *v1alpha1.DependencyProxy, opts v1.UpdateOptions) (result *v1alpha1.DependencyProxy, err error) {
+ result = &v1alpha1.DependencyProxy{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("dependencyproxies").
+ Name(dependencyProxy.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(dependencyProxy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the dependencyProxy and deletes it. Returns an error if one occurs.
+func (c *dependencyProxies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("dependencyproxies").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *dependencyProxies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("dependencyproxies").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched dependencyProxy.
+func (c *dependencyProxies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.DependencyProxy, err error) {
+ result = &v1alpha1.DependencyProxy{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("dependencyproxies").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/doc.go b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/doc.go
new file mode 100644
index 00000000..df51baa4
--- /dev/null
+++ b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1alpha1
diff --git a/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/doc.go b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/doc.go
new file mode 100644
index 00000000..16f44399
--- /dev/null
+++ b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/fake_admiral_client.go b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/fake_admiral_client.go
new file mode 100644
index 00000000..ee2fbeca
--- /dev/null
+++ b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/fake_admiral_client.go
@@ -0,0 +1,64 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeAdmiralV1alpha1 struct {
+ *testing.Fake
+}
+
+func (c *FakeAdmiralV1alpha1) ClientConnectionConfigs(namespace string) v1alpha1.ClientConnectionConfigInterface {
+ return &FakeClientConnectionConfigs{c, namespace}
+}
+
+func (c *FakeAdmiralV1alpha1) Dependencies(namespace string) v1alpha1.DependencyInterface {
+ return &FakeDependencies{c, namespace}
+}
+
+func (c *FakeAdmiralV1alpha1) DependencyProxies(namespace string) v1alpha1.DependencyProxyInterface {
+ return &FakeDependencyProxies{c, namespace}
+}
+
+func (c *FakeAdmiralV1alpha1) GlobalTrafficPolicies(namespace string) v1alpha1.GlobalTrafficPolicyInterface {
+ return &FakeGlobalTrafficPolicies{c, namespace}
+}
+
+func (c *FakeAdmiralV1alpha1) OutlierDetections(namespace string) v1alpha1.OutlierDetectionInterface {
+ return &FakeOutlierDetections{c, namespace}
+}
+
+func (c *FakeAdmiralV1alpha1) RoutingPolicies(namespace string) v1alpha1.RoutingPolicyInterface {
+ return &FakeRoutingPolicies{c, namespace}
+}
+
+func (c *FakeAdmiralV1alpha1) TrafficConfigs(namespace string) v1alpha1.TrafficConfigInterface {
+ return &FakeTrafficConfigs{c, namespace}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeAdmiralV1alpha1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/fake_clientconnectionconfig.go b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/fake_clientconnectionconfig.go
new file mode 100644
index 00000000..291da6ae
--- /dev/null
+++ b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/fake_clientconnectionconfig.go
@@ -0,0 +1,142 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeClientConnectionConfigs implements ClientConnectionConfigInterface
+type FakeClientConnectionConfigs struct {
+ Fake *FakeAdmiralV1alpha1
+ ns string
+}
+
+var clientconnectionconfigsResource = schema.GroupVersionResource{Group: "admiral.io", Version: "v1alpha1", Resource: "clientconnectionconfigs"}
+
+var clientconnectionconfigsKind = schema.GroupVersionKind{Group: "admiral.io", Version: "v1alpha1", Kind: "ClientConnectionConfig"}
+
+// Get takes name of the clientConnectionConfig, and returns the corresponding clientConnectionConfig object, and an error if there is any.
+func (c *FakeClientConnectionConfigs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClientConnectionConfig, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(clientconnectionconfigsResource, c.ns, name), &v1alpha1.ClientConnectionConfig{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ClientConnectionConfig), err
+}
+
+// List takes label and field selectors, and returns the list of ClientConnectionConfigs that match those selectors.
+func (c *FakeClientConnectionConfigs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClientConnectionConfigList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(clientconnectionconfigsResource, clientconnectionconfigsKind, c.ns, opts), &v1alpha1.ClientConnectionConfigList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha1.ClientConnectionConfigList{ListMeta: obj.(*v1alpha1.ClientConnectionConfigList).ListMeta}
+ for _, item := range obj.(*v1alpha1.ClientConnectionConfigList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested clientConnectionConfigs.
+func (c *FakeClientConnectionConfigs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(clientconnectionconfigsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a clientConnectionConfig and creates it. Returns the server's representation of the clientConnectionConfig, and an error, if there is any.
+func (c *FakeClientConnectionConfigs) Create(ctx context.Context, clientConnectionConfig *v1alpha1.ClientConnectionConfig, opts v1.CreateOptions) (result *v1alpha1.ClientConnectionConfig, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(clientconnectionconfigsResource, c.ns, clientConnectionConfig), &v1alpha1.ClientConnectionConfig{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ClientConnectionConfig), err
+}
+
+// Update takes the representation of a clientConnectionConfig and updates it. Returns the server's representation of the clientConnectionConfig, and an error, if there is any.
+func (c *FakeClientConnectionConfigs) Update(ctx context.Context, clientConnectionConfig *v1alpha1.ClientConnectionConfig, opts v1.UpdateOptions) (result *v1alpha1.ClientConnectionConfig, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(clientconnectionconfigsResource, c.ns, clientConnectionConfig), &v1alpha1.ClientConnectionConfig{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ClientConnectionConfig), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeClientConnectionConfigs) UpdateStatus(ctx context.Context, clientConnectionConfig *v1alpha1.ClientConnectionConfig, opts v1.UpdateOptions) (*v1alpha1.ClientConnectionConfig, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(clientconnectionconfigsResource, "status", c.ns, clientConnectionConfig), &v1alpha1.ClientConnectionConfig{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ClientConnectionConfig), err
+}
+
+// Delete takes name of the clientConnectionConfig and deletes it. Returns an error if one occurs.
+func (c *FakeClientConnectionConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteActionWithOptions(clientconnectionconfigsResource, c.ns, name, opts), &v1alpha1.ClientConnectionConfig{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeClientConnectionConfigs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(clientconnectionconfigsResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v1alpha1.ClientConnectionConfigList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched clientConnectionConfig.
+func (c *FakeClientConnectionConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClientConnectionConfig, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(clientconnectionconfigsResource, c.ns, name, pt, data, subresources...), &v1alpha1.ClientConnectionConfig{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.ClientConnectionConfig), err
+}
diff --git a/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/fake_dependency.go b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/fake_dependency.go
new file mode 100644
index 00000000..3e8280e8
--- /dev/null
+++ b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/fake_dependency.go
@@ -0,0 +1,142 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeDependencies implements DependencyInterface
+type FakeDependencies struct {
+ Fake *FakeAdmiralV1alpha1
+ ns string
+}
+
+var dependenciesResource = schema.GroupVersionResource{Group: "admiral.io", Version: "v1alpha1", Resource: "dependencies"}
+
+var dependenciesKind = schema.GroupVersionKind{Group: "admiral.io", Version: "v1alpha1", Kind: "Dependency"}
+
+// Get takes name of the dependency, and returns the corresponding dependency object, and an error if there is any.
+func (c *FakeDependencies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Dependency, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(dependenciesResource, c.ns, name), &v1alpha1.Dependency{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.Dependency), err
+}
+
+// List takes label and field selectors, and returns the list of Dependencies that match those selectors.
+func (c *FakeDependencies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.DependencyList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(dependenciesResource, dependenciesKind, c.ns, opts), &v1alpha1.DependencyList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha1.DependencyList{ListMeta: obj.(*v1alpha1.DependencyList).ListMeta}
+ for _, item := range obj.(*v1alpha1.DependencyList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested dependencies.
+func (c *FakeDependencies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(dependenciesResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a dependency and creates it. Returns the server's representation of the dependency, and an error, if there is any.
+func (c *FakeDependencies) Create(ctx context.Context, dependency *v1alpha1.Dependency, opts v1.CreateOptions) (result *v1alpha1.Dependency, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(dependenciesResource, c.ns, dependency), &v1alpha1.Dependency{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.Dependency), err
+}
+
+// Update takes the representation of a dependency and updates it. Returns the server's representation of the dependency, and an error, if there is any.
+func (c *FakeDependencies) Update(ctx context.Context, dependency *v1alpha1.Dependency, opts v1.UpdateOptions) (result *v1alpha1.Dependency, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(dependenciesResource, c.ns, dependency), &v1alpha1.Dependency{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.Dependency), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeDependencies) UpdateStatus(ctx context.Context, dependency *v1alpha1.Dependency, opts v1.UpdateOptions) (*v1alpha1.Dependency, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(dependenciesResource, "status", c.ns, dependency), &v1alpha1.Dependency{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.Dependency), err
+}
+
+// Delete takes name of the dependency and deletes it. Returns an error if one occurs.
+func (c *FakeDependencies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteActionWithOptions(dependenciesResource, c.ns, name, opts), &v1alpha1.Dependency{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeDependencies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(dependenciesResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v1alpha1.DependencyList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched dependency.
+func (c *FakeDependencies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Dependency, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(dependenciesResource, c.ns, name, pt, data, subresources...), &v1alpha1.Dependency{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.Dependency), err
+}
diff --git a/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/fake_dependencyproxy.go b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/fake_dependencyproxy.go
new file mode 100644
index 00000000..d4389c6a
--- /dev/null
+++ b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/fake_dependencyproxy.go
@@ -0,0 +1,142 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeDependencyProxies implements DependencyProxyInterface
+type FakeDependencyProxies struct {
+ Fake *FakeAdmiralV1alpha1
+ ns string
+}
+
+var dependencyproxiesResource = schema.GroupVersionResource{Group: "admiral.io", Version: "v1alpha1", Resource: "dependencyproxies"}
+
+var dependencyproxiesKind = schema.GroupVersionKind{Group: "admiral.io", Version: "v1alpha1", Kind: "DependencyProxy"}
+
+// Get takes name of the dependencyProxy, and returns the corresponding dependencyProxy object, and an error if there is any.
+func (c *FakeDependencyProxies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.DependencyProxy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(dependencyproxiesResource, c.ns, name), &v1alpha1.DependencyProxy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.DependencyProxy), err
+}
+
+// List takes label and field selectors, and returns the list of DependencyProxies that match those selectors.
+func (c *FakeDependencyProxies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.DependencyProxyList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(dependencyproxiesResource, dependencyproxiesKind, c.ns, opts), &v1alpha1.DependencyProxyList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha1.DependencyProxyList{ListMeta: obj.(*v1alpha1.DependencyProxyList).ListMeta}
+ for _, item := range obj.(*v1alpha1.DependencyProxyList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested dependencyProxies.
+func (c *FakeDependencyProxies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(dependencyproxiesResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a dependencyProxy and creates it. Returns the server's representation of the dependencyProxy, and an error, if there is any.
+func (c *FakeDependencyProxies) Create(ctx context.Context, dependencyProxy *v1alpha1.DependencyProxy, opts v1.CreateOptions) (result *v1alpha1.DependencyProxy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(dependencyproxiesResource, c.ns, dependencyProxy), &v1alpha1.DependencyProxy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.DependencyProxy), err
+}
+
+// Update takes the representation of a dependencyProxy and updates it. Returns the server's representation of the dependencyProxy, and an error, if there is any.
+func (c *FakeDependencyProxies) Update(ctx context.Context, dependencyProxy *v1alpha1.DependencyProxy, opts v1.UpdateOptions) (result *v1alpha1.DependencyProxy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(dependencyproxiesResource, c.ns, dependencyProxy), &v1alpha1.DependencyProxy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.DependencyProxy), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeDependencyProxies) UpdateStatus(ctx context.Context, dependencyProxy *v1alpha1.DependencyProxy, opts v1.UpdateOptions) (*v1alpha1.DependencyProxy, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(dependencyproxiesResource, "status", c.ns, dependencyProxy), &v1alpha1.DependencyProxy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.DependencyProxy), err
+}
+
+// Delete takes name of the dependencyProxy and deletes it. Returns an error if one occurs.
+func (c *FakeDependencyProxies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteActionWithOptions(dependencyproxiesResource, c.ns, name, opts), &v1alpha1.DependencyProxy{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeDependencyProxies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(dependencyproxiesResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v1alpha1.DependencyProxyList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched dependencyProxy.
+func (c *FakeDependencyProxies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.DependencyProxy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(dependencyproxiesResource, c.ns, name, pt, data, subresources...), &v1alpha1.DependencyProxy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.DependencyProxy), err
+}
diff --git a/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/fake_globaltrafficpolicy.go b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/fake_globaltrafficpolicy.go
new file mode 100644
index 00000000..8a2beb5b
--- /dev/null
+++ b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/fake_globaltrafficpolicy.go
@@ -0,0 +1,142 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeGlobalTrafficPolicies implements GlobalTrafficPolicyInterface
+type FakeGlobalTrafficPolicies struct {
+ Fake *FakeAdmiralV1alpha1
+ ns string
+}
+
+var globaltrafficpoliciesResource = schema.GroupVersionResource{Group: "admiral.io", Version: "v1alpha1", Resource: "globaltrafficpolicies"}
+
+var globaltrafficpoliciesKind = schema.GroupVersionKind{Group: "admiral.io", Version: "v1alpha1", Kind: "GlobalTrafficPolicy"}
+
+// Get takes name of the globalTrafficPolicy, and returns the corresponding globalTrafficPolicy object, and an error if there is any.
+func (c *FakeGlobalTrafficPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.GlobalTrafficPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(globaltrafficpoliciesResource, c.ns, name), &v1alpha1.GlobalTrafficPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.GlobalTrafficPolicy), err
+}
+
+// List takes label and field selectors, and returns the list of GlobalTrafficPolicies that match those selectors.
+func (c *FakeGlobalTrafficPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.GlobalTrafficPolicyList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(globaltrafficpoliciesResource, globaltrafficpoliciesKind, c.ns, opts), &v1alpha1.GlobalTrafficPolicyList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha1.GlobalTrafficPolicyList{ListMeta: obj.(*v1alpha1.GlobalTrafficPolicyList).ListMeta}
+ for _, item := range obj.(*v1alpha1.GlobalTrafficPolicyList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested globalTrafficPolicies.
+func (c *FakeGlobalTrafficPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(globaltrafficpoliciesResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a globalTrafficPolicy and creates it. Returns the server's representation of the globalTrafficPolicy, and an error, if there is any.
+func (c *FakeGlobalTrafficPolicies) Create(ctx context.Context, globalTrafficPolicy *v1alpha1.GlobalTrafficPolicy, opts v1.CreateOptions) (result *v1alpha1.GlobalTrafficPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(globaltrafficpoliciesResource, c.ns, globalTrafficPolicy), &v1alpha1.GlobalTrafficPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.GlobalTrafficPolicy), err
+}
+
+// Update takes the representation of a globalTrafficPolicy and updates it. Returns the server's representation of the globalTrafficPolicy, and an error, if there is any.
+func (c *FakeGlobalTrafficPolicies) Update(ctx context.Context, globalTrafficPolicy *v1alpha1.GlobalTrafficPolicy, opts v1.UpdateOptions) (result *v1alpha1.GlobalTrafficPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(globaltrafficpoliciesResource, c.ns, globalTrafficPolicy), &v1alpha1.GlobalTrafficPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.GlobalTrafficPolicy), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeGlobalTrafficPolicies) UpdateStatus(ctx context.Context, globalTrafficPolicy *v1alpha1.GlobalTrafficPolicy, opts v1.UpdateOptions) (*v1alpha1.GlobalTrafficPolicy, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(globaltrafficpoliciesResource, "status", c.ns, globalTrafficPolicy), &v1alpha1.GlobalTrafficPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.GlobalTrafficPolicy), err
+}
+
+// Delete takes name of the globalTrafficPolicy and deletes it. Returns an error if one occurs.
+func (c *FakeGlobalTrafficPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteActionWithOptions(globaltrafficpoliciesResource, c.ns, name, opts), &v1alpha1.GlobalTrafficPolicy{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeGlobalTrafficPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(globaltrafficpoliciesResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v1alpha1.GlobalTrafficPolicyList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched globalTrafficPolicy.
+func (c *FakeGlobalTrafficPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.GlobalTrafficPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(globaltrafficpoliciesResource, c.ns, name, pt, data, subresources...), &v1alpha1.GlobalTrafficPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.GlobalTrafficPolicy), err
+}
diff --git a/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/fake_outlierdetection.go b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/fake_outlierdetection.go
new file mode 100644
index 00000000..fcb03b69
--- /dev/null
+++ b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/fake_outlierdetection.go
@@ -0,0 +1,142 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeOutlierDetections implements OutlierDetectionInterface
+type FakeOutlierDetections struct {
+ Fake *FakeAdmiralV1alpha1
+ ns string
+}
+
+var outlierdetectionsResource = schema.GroupVersionResource{Group: "admiral.io", Version: "v1alpha1", Resource: "outlierdetections"}
+
+var outlierdetectionsKind = schema.GroupVersionKind{Group: "admiral.io", Version: "v1alpha1", Kind: "OutlierDetection"}
+
+// Get takes name of the outlierDetection, and returns the corresponding outlierDetection object, and an error if there is any.
+func (c *FakeOutlierDetections) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.OutlierDetection, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(outlierdetectionsResource, c.ns, name), &v1alpha1.OutlierDetection{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.OutlierDetection), err
+}
+
+// List takes label and field selectors, and returns the list of OutlierDetections that match those selectors.
+func (c *FakeOutlierDetections) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.OutlierDetectionList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(outlierdetectionsResource, outlierdetectionsKind, c.ns, opts), &v1alpha1.OutlierDetectionList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha1.OutlierDetectionList{ListMeta: obj.(*v1alpha1.OutlierDetectionList).ListMeta}
+ for _, item := range obj.(*v1alpha1.OutlierDetectionList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested outlierDetections.
+func (c *FakeOutlierDetections) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(outlierdetectionsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a outlierDetection and creates it. Returns the server's representation of the outlierDetection, and an error, if there is any.
+func (c *FakeOutlierDetections) Create(ctx context.Context, outlierDetection *v1alpha1.OutlierDetection, opts v1.CreateOptions) (result *v1alpha1.OutlierDetection, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(outlierdetectionsResource, c.ns, outlierDetection), &v1alpha1.OutlierDetection{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.OutlierDetection), err
+}
+
+// Update takes the representation of a outlierDetection and updates it. Returns the server's representation of the outlierDetection, and an error, if there is any.
+func (c *FakeOutlierDetections) Update(ctx context.Context, outlierDetection *v1alpha1.OutlierDetection, opts v1.UpdateOptions) (result *v1alpha1.OutlierDetection, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(outlierdetectionsResource, c.ns, outlierDetection), &v1alpha1.OutlierDetection{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.OutlierDetection), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeOutlierDetections) UpdateStatus(ctx context.Context, outlierDetection *v1alpha1.OutlierDetection, opts v1.UpdateOptions) (*v1alpha1.OutlierDetection, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(outlierdetectionsResource, "status", c.ns, outlierDetection), &v1alpha1.OutlierDetection{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.OutlierDetection), err
+}
+
+// Delete takes name of the outlierDetection and deletes it. Returns an error if one occurs.
+func (c *FakeOutlierDetections) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteActionWithOptions(outlierdetectionsResource, c.ns, name, opts), &v1alpha1.OutlierDetection{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeOutlierDetections) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(outlierdetectionsResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v1alpha1.OutlierDetectionList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched outlierDetection.
+func (c *FakeOutlierDetections) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.OutlierDetection, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(outlierdetectionsResource, c.ns, name, pt, data, subresources...), &v1alpha1.OutlierDetection{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.OutlierDetection), err
+}
diff --git a/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/fake_routingpolicy.go b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/fake_routingpolicy.go
new file mode 100644
index 00000000..d1c84a16
--- /dev/null
+++ b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/fake_routingpolicy.go
@@ -0,0 +1,142 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeRoutingPolicies implements RoutingPolicyInterface
+type FakeRoutingPolicies struct {
+ Fake *FakeAdmiralV1alpha1
+ ns string
+}
+
+var routingpoliciesResource = schema.GroupVersionResource{Group: "admiral.io", Version: "v1alpha1", Resource: "routingpolicies"}
+
+var routingpoliciesKind = schema.GroupVersionKind{Group: "admiral.io", Version: "v1alpha1", Kind: "RoutingPolicy"}
+
+// Get takes name of the routingPolicy, and returns the corresponding routingPolicy object, and an error if there is any.
+func (c *FakeRoutingPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RoutingPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(routingpoliciesResource, c.ns, name), &v1alpha1.RoutingPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.RoutingPolicy), err
+}
+
+// List takes label and field selectors, and returns the list of RoutingPolicies that match those selectors.
+func (c *FakeRoutingPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RoutingPolicyList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(routingpoliciesResource, routingpoliciesKind, c.ns, opts), &v1alpha1.RoutingPolicyList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha1.RoutingPolicyList{ListMeta: obj.(*v1alpha1.RoutingPolicyList).ListMeta}
+ for _, item := range obj.(*v1alpha1.RoutingPolicyList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested routingPolicies.
+func (c *FakeRoutingPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(routingpoliciesResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a routingPolicy and creates it. Returns the server's representation of the routingPolicy, and an error, if there is any.
+func (c *FakeRoutingPolicies) Create(ctx context.Context, routingPolicy *v1alpha1.RoutingPolicy, opts v1.CreateOptions) (result *v1alpha1.RoutingPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(routingpoliciesResource, c.ns, routingPolicy), &v1alpha1.RoutingPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.RoutingPolicy), err
+}
+
+// Update takes the representation of a routingPolicy and updates it. Returns the server's representation of the routingPolicy, and an error, if there is any.
+func (c *FakeRoutingPolicies) Update(ctx context.Context, routingPolicy *v1alpha1.RoutingPolicy, opts v1.UpdateOptions) (result *v1alpha1.RoutingPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(routingpoliciesResource, c.ns, routingPolicy), &v1alpha1.RoutingPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.RoutingPolicy), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeRoutingPolicies) UpdateStatus(ctx context.Context, routingPolicy *v1alpha1.RoutingPolicy, opts v1.UpdateOptions) (*v1alpha1.RoutingPolicy, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(routingpoliciesResource, "status", c.ns, routingPolicy), &v1alpha1.RoutingPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.RoutingPolicy), err
+}
+
+// Delete takes name of the routingPolicy and deletes it. Returns an error if one occurs.
+func (c *FakeRoutingPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteActionWithOptions(routingpoliciesResource, c.ns, name, opts), &v1alpha1.RoutingPolicy{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeRoutingPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(routingpoliciesResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v1alpha1.RoutingPolicyList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched routingPolicy.
+func (c *FakeRoutingPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RoutingPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(routingpoliciesResource, c.ns, name, pt, data, subresources...), &v1alpha1.RoutingPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.RoutingPolicy), err
+}
diff --git a/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/fake_trafficconfig.go b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/fake_trafficconfig.go
new file mode 100644
index 00000000..5cb2d83a
--- /dev/null
+++ b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/fake/fake_trafficconfig.go
@@ -0,0 +1,142 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeTrafficConfigs implements TrafficConfigInterface
+type FakeTrafficConfigs struct {
+ Fake *FakeAdmiralV1alpha1
+ ns string
+}
+
+var trafficconfigsResource = schema.GroupVersionResource{Group: "admiral.io", Version: "v1alpha1", Resource: "trafficconfigs"}
+
+var trafficconfigsKind = schema.GroupVersionKind{Group: "admiral.io", Version: "v1alpha1", Kind: "TrafficConfig"}
+
+// Get takes name of the trafficConfig, and returns the corresponding trafficConfig object, and an error if there is any.
+func (c *FakeTrafficConfigs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.TrafficConfig, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(trafficconfigsResource, c.ns, name), &v1alpha1.TrafficConfig{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.TrafficConfig), err
+}
+
+// List takes label and field selectors, and returns the list of TrafficConfigs that match those selectors.
+func (c *FakeTrafficConfigs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.TrafficConfigList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(trafficconfigsResource, trafficconfigsKind, c.ns, opts), &v1alpha1.TrafficConfigList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1alpha1.TrafficConfigList{ListMeta: obj.(*v1alpha1.TrafficConfigList).ListMeta}
+ for _, item := range obj.(*v1alpha1.TrafficConfigList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested trafficConfigs.
+func (c *FakeTrafficConfigs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(trafficconfigsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a trafficConfig and creates it. Returns the server's representation of the trafficConfig, and an error, if there is any.
+func (c *FakeTrafficConfigs) Create(ctx context.Context, trafficConfig *v1alpha1.TrafficConfig, opts v1.CreateOptions) (result *v1alpha1.TrafficConfig, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(trafficconfigsResource, c.ns, trafficConfig), &v1alpha1.TrafficConfig{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.TrafficConfig), err
+}
+
+// Update takes the representation of a trafficConfig and updates it. Returns the server's representation of the trafficConfig, and an error, if there is any.
+func (c *FakeTrafficConfigs) Update(ctx context.Context, trafficConfig *v1alpha1.TrafficConfig, opts v1.UpdateOptions) (result *v1alpha1.TrafficConfig, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(trafficconfigsResource, c.ns, trafficConfig), &v1alpha1.TrafficConfig{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.TrafficConfig), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeTrafficConfigs) UpdateStatus(ctx context.Context, trafficConfig *v1alpha1.TrafficConfig, opts v1.UpdateOptions) (*v1alpha1.TrafficConfig, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(trafficconfigsResource, "status", c.ns, trafficConfig), &v1alpha1.TrafficConfig{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.TrafficConfig), err
+}
+
+// Delete takes name of the trafficConfig and deletes it. Returns an error if one occurs.
+func (c *FakeTrafficConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteActionWithOptions(trafficconfigsResource, c.ns, name, opts), &v1alpha1.TrafficConfig{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeTrafficConfigs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(trafficconfigsResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v1alpha1.TrafficConfigList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched trafficConfig.
+func (c *FakeTrafficConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.TrafficConfig, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(trafficconfigsResource, c.ns, name, pt, data, subresources...), &v1alpha1.TrafficConfig{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1alpha1.TrafficConfig), err
+}
diff --git a/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/generated_expansion.go b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/generated_expansion.go
new file mode 100644
index 00000000..7c7ff2f5
--- /dev/null
+++ b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/generated_expansion.go
@@ -0,0 +1,33 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+type ClientConnectionConfigExpansion interface{}
+
+type DependencyExpansion interface{}
+
+type DependencyProxyExpansion interface{}
+
+type GlobalTrafficPolicyExpansion interface{}
+
+type OutlierDetectionExpansion interface{}
+
+type RoutingPolicyExpansion interface{}
+
+type TrafficConfigExpansion interface{}
diff --git a/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/globaltrafficpolicy.go b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/globaltrafficpolicy.go
new file mode 100644
index 00000000..10b9e22f
--- /dev/null
+++ b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/globaltrafficpolicy.go
@@ -0,0 +1,195 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ v1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ scheme "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// GlobalTrafficPoliciesGetter has a method to return a GlobalTrafficPolicyInterface.
+// A group's client should implement this interface.
+type GlobalTrafficPoliciesGetter interface {
+ GlobalTrafficPolicies(namespace string) GlobalTrafficPolicyInterface
+}
+
+// GlobalTrafficPolicyInterface has methods to work with GlobalTrafficPolicy resources.
+type GlobalTrafficPolicyInterface interface {
+ Create(ctx context.Context, globalTrafficPolicy *v1alpha1.GlobalTrafficPolicy, opts v1.CreateOptions) (*v1alpha1.GlobalTrafficPolicy, error)
+ Update(ctx context.Context, globalTrafficPolicy *v1alpha1.GlobalTrafficPolicy, opts v1.UpdateOptions) (*v1alpha1.GlobalTrafficPolicy, error)
+ UpdateStatus(ctx context.Context, globalTrafficPolicy *v1alpha1.GlobalTrafficPolicy, opts v1.UpdateOptions) (*v1alpha1.GlobalTrafficPolicy, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.GlobalTrafficPolicy, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.GlobalTrafficPolicyList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.GlobalTrafficPolicy, err error)
+ GlobalTrafficPolicyExpansion
+}
+
+// globalTrafficPolicies implements GlobalTrafficPolicyInterface
+type globalTrafficPolicies struct {
+ client rest.Interface
+ ns string
+}
+
+// newGlobalTrafficPolicies returns a GlobalTrafficPolicies
+func newGlobalTrafficPolicies(c *AdmiralV1alpha1Client, namespace string) *globalTrafficPolicies {
+ return &globalTrafficPolicies{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the globalTrafficPolicy, and returns the corresponding globalTrafficPolicy object, and an error if there is any.
+func (c *globalTrafficPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.GlobalTrafficPolicy, err error) {
+ result = &v1alpha1.GlobalTrafficPolicy{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("globaltrafficpolicies").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of GlobalTrafficPolicies that match those selectors.
+func (c *globalTrafficPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.GlobalTrafficPolicyList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.GlobalTrafficPolicyList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("globaltrafficpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested globalTrafficPolicies.
+func (c *globalTrafficPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("globaltrafficpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a globalTrafficPolicy and creates it. Returns the server's representation of the globalTrafficPolicy, and an error, if there is any.
+func (c *globalTrafficPolicies) Create(ctx context.Context, globalTrafficPolicy *v1alpha1.GlobalTrafficPolicy, opts v1.CreateOptions) (result *v1alpha1.GlobalTrafficPolicy, err error) {
+ result = &v1alpha1.GlobalTrafficPolicy{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("globaltrafficpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(globalTrafficPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a globalTrafficPolicy and updates it. Returns the server's representation of the globalTrafficPolicy, and an error, if there is any.
+func (c *globalTrafficPolicies) Update(ctx context.Context, globalTrafficPolicy *v1alpha1.GlobalTrafficPolicy, opts v1.UpdateOptions) (result *v1alpha1.GlobalTrafficPolicy, err error) {
+ result = &v1alpha1.GlobalTrafficPolicy{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("globaltrafficpolicies").
+ Name(globalTrafficPolicy.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(globalTrafficPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *globalTrafficPolicies) UpdateStatus(ctx context.Context, globalTrafficPolicy *v1alpha1.GlobalTrafficPolicy, opts v1.UpdateOptions) (result *v1alpha1.GlobalTrafficPolicy, err error) {
+ result = &v1alpha1.GlobalTrafficPolicy{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("globaltrafficpolicies").
+ Name(globalTrafficPolicy.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(globalTrafficPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the globalTrafficPolicy and deletes it. Returns an error if one occurs.
+func (c *globalTrafficPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("globaltrafficpolicies").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *globalTrafficPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("globaltrafficpolicies").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched globalTrafficPolicy.
+func (c *globalTrafficPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.GlobalTrafficPolicy, err error) {
+ result = &v1alpha1.GlobalTrafficPolicy{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("globaltrafficpolicies").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/outlierdetection.go b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/outlierdetection.go
new file mode 100644
index 00000000..ec3816b1
--- /dev/null
+++ b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/outlierdetection.go
@@ -0,0 +1,195 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ v1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ scheme "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// OutlierDetectionsGetter has a method to return a OutlierDetectionInterface.
+// A group's client should implement this interface.
+type OutlierDetectionsGetter interface {
+ OutlierDetections(namespace string) OutlierDetectionInterface
+}
+
+// OutlierDetectionInterface has methods to work with OutlierDetection resources.
+type OutlierDetectionInterface interface {
+ Create(ctx context.Context, outlierDetection *v1alpha1.OutlierDetection, opts v1.CreateOptions) (*v1alpha1.OutlierDetection, error)
+ Update(ctx context.Context, outlierDetection *v1alpha1.OutlierDetection, opts v1.UpdateOptions) (*v1alpha1.OutlierDetection, error)
+ UpdateStatus(ctx context.Context, outlierDetection *v1alpha1.OutlierDetection, opts v1.UpdateOptions) (*v1alpha1.OutlierDetection, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.OutlierDetection, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.OutlierDetectionList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.OutlierDetection, err error)
+ OutlierDetectionExpansion
+}
+
+// outlierDetections implements OutlierDetectionInterface
+type outlierDetections struct {
+ client rest.Interface
+ ns string
+}
+
+// newOutlierDetections returns a OutlierDetections
+func newOutlierDetections(c *AdmiralV1alpha1Client, namespace string) *outlierDetections {
+ return &outlierDetections{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the outlierDetection, and returns the corresponding outlierDetection object, and an error if there is any.
+func (c *outlierDetections) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.OutlierDetection, err error) {
+ result = &v1alpha1.OutlierDetection{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("outlierdetections").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of OutlierDetections that match those selectors.
+func (c *outlierDetections) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.OutlierDetectionList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.OutlierDetectionList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("outlierdetections").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested outlierDetections.
+func (c *outlierDetections) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("outlierdetections").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a outlierDetection and creates it. Returns the server's representation of the outlierDetection, and an error, if there is any.
+func (c *outlierDetections) Create(ctx context.Context, outlierDetection *v1alpha1.OutlierDetection, opts v1.CreateOptions) (result *v1alpha1.OutlierDetection, err error) {
+ result = &v1alpha1.OutlierDetection{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("outlierdetections").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(outlierDetection).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a outlierDetection and updates it. Returns the server's representation of the outlierDetection, and an error, if there is any.
+func (c *outlierDetections) Update(ctx context.Context, outlierDetection *v1alpha1.OutlierDetection, opts v1.UpdateOptions) (result *v1alpha1.OutlierDetection, err error) {
+ result = &v1alpha1.OutlierDetection{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("outlierdetections").
+ Name(outlierDetection.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(outlierDetection).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *outlierDetections) UpdateStatus(ctx context.Context, outlierDetection *v1alpha1.OutlierDetection, opts v1.UpdateOptions) (result *v1alpha1.OutlierDetection, err error) {
+ result = &v1alpha1.OutlierDetection{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("outlierdetections").
+ Name(outlierDetection.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(outlierDetection).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the outlierDetection and deletes it. Returns an error if one occurs.
+func (c *outlierDetections) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("outlierdetections").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *outlierDetections) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("outlierdetections").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched outlierDetection.
+func (c *outlierDetections) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.OutlierDetection, err error) {
+ result = &v1alpha1.OutlierDetection{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("outlierdetections").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/routingpolicy.go b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/routingpolicy.go
new file mode 100644
index 00000000..75d65c74
--- /dev/null
+++ b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/routingpolicy.go
@@ -0,0 +1,195 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ v1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ scheme "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// RoutingPoliciesGetter has a method to return a RoutingPolicyInterface.
+// A group's client should implement this interface.
+type RoutingPoliciesGetter interface {
+ RoutingPolicies(namespace string) RoutingPolicyInterface
+}
+
+// RoutingPolicyInterface has methods to work with RoutingPolicy resources.
+type RoutingPolicyInterface interface {
+ Create(ctx context.Context, routingPolicy *v1alpha1.RoutingPolicy, opts v1.CreateOptions) (*v1alpha1.RoutingPolicy, error)
+ Update(ctx context.Context, routingPolicy *v1alpha1.RoutingPolicy, opts v1.UpdateOptions) (*v1alpha1.RoutingPolicy, error)
+ UpdateStatus(ctx context.Context, routingPolicy *v1alpha1.RoutingPolicy, opts v1.UpdateOptions) (*v1alpha1.RoutingPolicy, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.RoutingPolicy, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.RoutingPolicyList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RoutingPolicy, err error)
+ RoutingPolicyExpansion
+}
+
+// routingPolicies implements RoutingPolicyInterface
+type routingPolicies struct {
+ client rest.Interface
+ ns string
+}
+
+// newRoutingPolicies returns a RoutingPolicies
+func newRoutingPolicies(c *AdmiralV1alpha1Client, namespace string) *routingPolicies {
+ return &routingPolicies{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the routingPolicy, and returns the corresponding routingPolicy object, and an error if there is any.
+func (c *routingPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RoutingPolicy, err error) {
+ result = &v1alpha1.RoutingPolicy{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("routingpolicies").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of RoutingPolicies that match those selectors.
+func (c *routingPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RoutingPolicyList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.RoutingPolicyList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("routingpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested routingPolicies.
+func (c *routingPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("routingpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a routingPolicy and creates it. Returns the server's representation of the routingPolicy, and an error, if there is any.
+func (c *routingPolicies) Create(ctx context.Context, routingPolicy *v1alpha1.RoutingPolicy, opts v1.CreateOptions) (result *v1alpha1.RoutingPolicy, err error) {
+ result = &v1alpha1.RoutingPolicy{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("routingpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(routingPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a routingPolicy and updates it. Returns the server's representation of the routingPolicy, and an error, if there is any.
+func (c *routingPolicies) Update(ctx context.Context, routingPolicy *v1alpha1.RoutingPolicy, opts v1.UpdateOptions) (result *v1alpha1.RoutingPolicy, err error) {
+ result = &v1alpha1.RoutingPolicy{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("routingpolicies").
+ Name(routingPolicy.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(routingPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *routingPolicies) UpdateStatus(ctx context.Context, routingPolicy *v1alpha1.RoutingPolicy, opts v1.UpdateOptions) (result *v1alpha1.RoutingPolicy, err error) {
+ result = &v1alpha1.RoutingPolicy{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("routingpolicies").
+ Name(routingPolicy.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(routingPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the routingPolicy and deletes it. Returns an error if one occurs.
+func (c *routingPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("routingpolicies").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *routingPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("routingpolicies").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched routingPolicy.
+func (c *routingPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RoutingPolicy, err error) {
+ result = &v1alpha1.RoutingPolicy{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("routingpolicies").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/trafficconfig.go b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/trafficconfig.go
new file mode 100644
index 00000000..3a997e94
--- /dev/null
+++ b/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1/trafficconfig.go
@@ -0,0 +1,195 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ v1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ scheme "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// TrafficConfigsGetter has a method to return a TrafficConfigInterface.
+// A group's client should implement this interface.
+type TrafficConfigsGetter interface {
+ TrafficConfigs(namespace string) TrafficConfigInterface
+}
+
+// TrafficConfigInterface has methods to work with TrafficConfig resources.
+type TrafficConfigInterface interface {
+ Create(ctx context.Context, trafficConfig *v1alpha1.TrafficConfig, opts v1.CreateOptions) (*v1alpha1.TrafficConfig, error)
+ Update(ctx context.Context, trafficConfig *v1alpha1.TrafficConfig, opts v1.UpdateOptions) (*v1alpha1.TrafficConfig, error)
+ UpdateStatus(ctx context.Context, trafficConfig *v1alpha1.TrafficConfig, opts v1.UpdateOptions) (*v1alpha1.TrafficConfig, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.TrafficConfig, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.TrafficConfigList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.TrafficConfig, err error)
+ TrafficConfigExpansion
+}
+
+// trafficConfigs implements TrafficConfigInterface
+type trafficConfigs struct {
+ client rest.Interface
+ ns string
+}
+
+// newTrafficConfigs returns a TrafficConfigs
+func newTrafficConfigs(c *AdmiralV1alpha1Client, namespace string) *trafficConfigs {
+ return &trafficConfigs{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the trafficConfig, and returns the corresponding trafficConfig object, and an error if there is any.
+func (c *trafficConfigs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.TrafficConfig, err error) {
+ result = &v1alpha1.TrafficConfig{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("trafficconfigs").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of TrafficConfigs that match those selectors.
+func (c *trafficConfigs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.TrafficConfigList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.TrafficConfigList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("trafficconfigs").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested trafficConfigs.
+func (c *trafficConfigs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("trafficconfigs").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a trafficConfig and creates it. Returns the server's representation of the trafficConfig, and an error, if there is any.
+func (c *trafficConfigs) Create(ctx context.Context, trafficConfig *v1alpha1.TrafficConfig, opts v1.CreateOptions) (result *v1alpha1.TrafficConfig, err error) {
+ result = &v1alpha1.TrafficConfig{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("trafficconfigs").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(trafficConfig).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a trafficConfig and updates it. Returns the server's representation of the trafficConfig, and an error, if there is any.
+func (c *trafficConfigs) Update(ctx context.Context, trafficConfig *v1alpha1.TrafficConfig, opts v1.UpdateOptions) (result *v1alpha1.TrafficConfig, err error) {
+ result = &v1alpha1.TrafficConfig{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("trafficconfigs").
+ Name(trafficConfig.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(trafficConfig).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *trafficConfigs) UpdateStatus(ctx context.Context, trafficConfig *v1alpha1.TrafficConfig, opts v1.UpdateOptions) (result *v1alpha1.TrafficConfig, err error) {
+ result = &v1alpha1.TrafficConfig{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("trafficconfigs").
+ Name(trafficConfig.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(trafficConfig).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the trafficConfig and deletes it. Returns an error if one occurs.
+func (c *trafficConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("trafficconfigs").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *trafficConfigs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("trafficconfigs").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched trafficConfig.
+func (c *trafficConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.TrafficConfig, err error) {
+ result = &v1alpha1.TrafficConfig{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("trafficconfigs").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
From e8c40813aee58f868855453d6be836d4ca8ba525 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:51:33 -0700
Subject: [PATCH 035/235] add file
admiral/pkg/client/informers/externalversions/admiral/v1alpha1/
---
.../v1alpha1/clientconnectionconfig.go | 90 +++++++++++++++++++
.../admiral/v1alpha1/dependency.go | 90 +++++++++++++++++++
.../admiral/v1alpha1/dependencyproxy.go | 90 +++++++++++++++++++
.../admiral/v1alpha1/globaltrafficpolicy.go | 90 +++++++++++++++++++
.../admiral/v1alpha1/interface.go | 87 ++++++++++++++++++
.../admiral/v1alpha1/outlierdetection.go | 90 +++++++++++++++++++
.../admiral/v1alpha1/routingpolicy.go | 90 +++++++++++++++++++
.../admiral/v1alpha1/trafficconfig.go | 90 +++++++++++++++++++
8 files changed, 717 insertions(+)
create mode 100644 admiral/pkg/client/informers/externalversions/admiral/v1alpha1/clientconnectionconfig.go
create mode 100644 admiral/pkg/client/informers/externalversions/admiral/v1alpha1/dependency.go
create mode 100644 admiral/pkg/client/informers/externalversions/admiral/v1alpha1/dependencyproxy.go
create mode 100644 admiral/pkg/client/informers/externalversions/admiral/v1alpha1/globaltrafficpolicy.go
create mode 100644 admiral/pkg/client/informers/externalversions/admiral/v1alpha1/interface.go
create mode 100644 admiral/pkg/client/informers/externalversions/admiral/v1alpha1/outlierdetection.go
create mode 100644 admiral/pkg/client/informers/externalversions/admiral/v1alpha1/routingpolicy.go
create mode 100644 admiral/pkg/client/informers/externalversions/admiral/v1alpha1/trafficconfig.go
diff --git a/admiral/pkg/client/informers/externalversions/admiral/v1alpha1/clientconnectionconfig.go b/admiral/pkg/client/informers/externalversions/admiral/v1alpha1/clientconnectionconfig.go
new file mode 100644
index 00000000..d3b3581d
--- /dev/null
+++ b/admiral/pkg/client/informers/externalversions/admiral/v1alpha1/clientconnectionconfig.go
@@ -0,0 +1,90 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ time "time"
+
+ admiralv1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ versioned "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned"
+ internalinterfaces "github.com/istio-ecosystem/admiral/admiral/pkg/client/informers/externalversions/internalinterfaces"
+ v1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/client/listers/admiral/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// ClientConnectionConfigInformer provides access to a shared informer and lister for
+// ClientConnectionConfigs.
+type ClientConnectionConfigInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1alpha1.ClientConnectionConfigLister
+}
+
+type clientConnectionConfigInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewClientConnectionConfigInformer constructs a new informer for ClientConnectionConfig type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewClientConnectionConfigInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredClientConnectionConfigInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredClientConnectionConfigInformer constructs a new informer for ClientConnectionConfig type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredClientConnectionConfigInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.AdmiralV1alpha1().ClientConnectionConfigs(namespace).List(context.TODO(), options)
+ },
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.AdmiralV1alpha1().ClientConnectionConfigs(namespace).Watch(context.TODO(), options)
+ },
+ },
+ &admiralv1alpha1.ClientConnectionConfig{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *clientConnectionConfigInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredClientConnectionConfigInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *clientConnectionConfigInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&admiralv1alpha1.ClientConnectionConfig{}, f.defaultInformer)
+}
+
+func (f *clientConnectionConfigInformer) Lister() v1alpha1.ClientConnectionConfigLister {
+ return v1alpha1.NewClientConnectionConfigLister(f.Informer().GetIndexer())
+}
diff --git a/admiral/pkg/client/informers/externalversions/admiral/v1alpha1/dependency.go b/admiral/pkg/client/informers/externalversions/admiral/v1alpha1/dependency.go
new file mode 100644
index 00000000..5fa99c11
--- /dev/null
+++ b/admiral/pkg/client/informers/externalversions/admiral/v1alpha1/dependency.go
@@ -0,0 +1,90 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ time "time"
+
+ admiralv1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ versioned "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned"
+ internalinterfaces "github.com/istio-ecosystem/admiral/admiral/pkg/client/informers/externalversions/internalinterfaces"
+ v1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/client/listers/admiral/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// DependencyInformer provides access to a shared informer and lister for
+// Dependencies.
+type DependencyInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1alpha1.DependencyLister
+}
+
+type dependencyInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewDependencyInformer constructs a new informer for Dependency type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewDependencyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredDependencyInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredDependencyInformer constructs a new informer for Dependency type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredDependencyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.AdmiralV1alpha1().Dependencies(namespace).List(context.TODO(), options)
+ },
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.AdmiralV1alpha1().Dependencies(namespace).Watch(context.TODO(), options)
+ },
+ },
+ &admiralv1alpha1.Dependency{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *dependencyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredDependencyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *dependencyInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&admiralv1alpha1.Dependency{}, f.defaultInformer)
+}
+
+func (f *dependencyInformer) Lister() v1alpha1.DependencyLister {
+ return v1alpha1.NewDependencyLister(f.Informer().GetIndexer())
+}
diff --git a/admiral/pkg/client/informers/externalversions/admiral/v1alpha1/dependencyproxy.go b/admiral/pkg/client/informers/externalversions/admiral/v1alpha1/dependencyproxy.go
new file mode 100644
index 00000000..43c202c0
--- /dev/null
+++ b/admiral/pkg/client/informers/externalversions/admiral/v1alpha1/dependencyproxy.go
@@ -0,0 +1,90 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ time "time"
+
+ admiralv1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ versioned "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned"
+ internalinterfaces "github.com/istio-ecosystem/admiral/admiral/pkg/client/informers/externalversions/internalinterfaces"
+ v1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/client/listers/admiral/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// DependencyProxyInformer provides access to a shared informer and lister for
+// DependencyProxies.
+type DependencyProxyInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1alpha1.DependencyProxyLister
+}
+
+type dependencyProxyInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewDependencyProxyInformer constructs a new informer for DependencyProxy type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewDependencyProxyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredDependencyProxyInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredDependencyProxyInformer constructs a new informer for DependencyProxy type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredDependencyProxyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.AdmiralV1alpha1().DependencyProxies(namespace).List(context.TODO(), options)
+ },
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.AdmiralV1alpha1().DependencyProxies(namespace).Watch(context.TODO(), options)
+ },
+ },
+ &admiralv1alpha1.DependencyProxy{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *dependencyProxyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredDependencyProxyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *dependencyProxyInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&admiralv1alpha1.DependencyProxy{}, f.defaultInformer)
+}
+
+func (f *dependencyProxyInformer) Lister() v1alpha1.DependencyProxyLister {
+ return v1alpha1.NewDependencyProxyLister(f.Informer().GetIndexer())
+}
diff --git a/admiral/pkg/client/informers/externalversions/admiral/v1alpha1/globaltrafficpolicy.go b/admiral/pkg/client/informers/externalversions/admiral/v1alpha1/globaltrafficpolicy.go
new file mode 100644
index 00000000..6c64ae30
--- /dev/null
+++ b/admiral/pkg/client/informers/externalversions/admiral/v1alpha1/globaltrafficpolicy.go
@@ -0,0 +1,90 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ time "time"
+
+ admiralv1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ versioned "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned"
+ internalinterfaces "github.com/istio-ecosystem/admiral/admiral/pkg/client/informers/externalversions/internalinterfaces"
+ v1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/client/listers/admiral/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// GlobalTrafficPolicyInformer provides access to a shared informer and lister for
+// GlobalTrafficPolicies.
+type GlobalTrafficPolicyInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1alpha1.GlobalTrafficPolicyLister
+}
+
+type globalTrafficPolicyInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewGlobalTrafficPolicyInformer constructs a new informer for GlobalTrafficPolicy type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewGlobalTrafficPolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredGlobalTrafficPolicyInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredGlobalTrafficPolicyInformer constructs a new informer for GlobalTrafficPolicy type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredGlobalTrafficPolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.AdmiralV1alpha1().GlobalTrafficPolicies(namespace).List(context.TODO(), options)
+ },
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.AdmiralV1alpha1().GlobalTrafficPolicies(namespace).Watch(context.TODO(), options)
+ },
+ },
+ &admiralv1alpha1.GlobalTrafficPolicy{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *globalTrafficPolicyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredGlobalTrafficPolicyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *globalTrafficPolicyInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&admiralv1alpha1.GlobalTrafficPolicy{}, f.defaultInformer)
+}
+
+func (f *globalTrafficPolicyInformer) Lister() v1alpha1.GlobalTrafficPolicyLister {
+ return v1alpha1.NewGlobalTrafficPolicyLister(f.Informer().GetIndexer())
+}
diff --git a/admiral/pkg/client/informers/externalversions/admiral/v1alpha1/interface.go b/admiral/pkg/client/informers/externalversions/admiral/v1alpha1/interface.go
new file mode 100644
index 00000000..89ac02d0
--- /dev/null
+++ b/admiral/pkg/client/informers/externalversions/admiral/v1alpha1/interface.go
@@ -0,0 +1,87 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ internalinterfaces "github.com/istio-ecosystem/admiral/admiral/pkg/client/informers/externalversions/internalinterfaces"
+)
+
+// Interface provides access to all the informers in this group version.
+type Interface interface {
+ // ClientConnectionConfigs returns a ClientConnectionConfigInformer.
+ ClientConnectionConfigs() ClientConnectionConfigInformer
+ // Dependencies returns a DependencyInformer.
+ Dependencies() DependencyInformer
+ // DependencyProxies returns a DependencyProxyInformer.
+ DependencyProxies() DependencyProxyInformer
+ // GlobalTrafficPolicies returns a GlobalTrafficPolicyInformer.
+ GlobalTrafficPolicies() GlobalTrafficPolicyInformer
+ // OutlierDetections returns a OutlierDetectionInformer.
+ OutlierDetections() OutlierDetectionInformer
+ // RoutingPolicies returns a RoutingPolicyInformer.
+ RoutingPolicies() RoutingPolicyInformer
+ // TrafficConfigs returns a TrafficConfigInformer.
+ TrafficConfigs() TrafficConfigInformer
+}
+
+type version struct {
+ factory internalinterfaces.SharedInformerFactory
+ namespace string
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// New returns a new Interface.
+func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
+ return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
+}
+
+// ClientConnectionConfigs returns a ClientConnectionConfigInformer.
+func (v *version) ClientConnectionConfigs() ClientConnectionConfigInformer {
+ return &clientConnectionConfigInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
+
+// Dependencies returns a DependencyInformer.
+func (v *version) Dependencies() DependencyInformer {
+ return &dependencyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
+
+// DependencyProxies returns a DependencyProxyInformer.
+func (v *version) DependencyProxies() DependencyProxyInformer {
+ return &dependencyProxyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
+
+// GlobalTrafficPolicies returns a GlobalTrafficPolicyInformer.
+func (v *version) GlobalTrafficPolicies() GlobalTrafficPolicyInformer {
+ return &globalTrafficPolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
+
+// OutlierDetections returns a OutlierDetectionInformer.
+func (v *version) OutlierDetections() OutlierDetectionInformer {
+ return &outlierDetectionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
+
+// RoutingPolicies returns a RoutingPolicyInformer.
+func (v *version) RoutingPolicies() RoutingPolicyInformer {
+ return &routingPolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
+
+// TrafficConfigs returns a TrafficConfigInformer.
+func (v *version) TrafficConfigs() TrafficConfigInformer {
+ return &trafficConfigInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
diff --git a/admiral/pkg/client/informers/externalversions/admiral/v1alpha1/outlierdetection.go b/admiral/pkg/client/informers/externalversions/admiral/v1alpha1/outlierdetection.go
new file mode 100644
index 00000000..a9d1a79a
--- /dev/null
+++ b/admiral/pkg/client/informers/externalversions/admiral/v1alpha1/outlierdetection.go
@@ -0,0 +1,90 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ time "time"
+
+ admiralv1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ versioned "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned"
+ internalinterfaces "github.com/istio-ecosystem/admiral/admiral/pkg/client/informers/externalversions/internalinterfaces"
+ v1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/client/listers/admiral/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// OutlierDetectionInformer provides access to a shared informer and lister for
+// OutlierDetections.
+type OutlierDetectionInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1alpha1.OutlierDetectionLister
+}
+
+type outlierDetectionInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewOutlierDetectionInformer constructs a new informer for OutlierDetection type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewOutlierDetectionInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredOutlierDetectionInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredOutlierDetectionInformer constructs a new informer for OutlierDetection type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredOutlierDetectionInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.AdmiralV1alpha1().OutlierDetections(namespace).List(context.TODO(), options)
+ },
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.AdmiralV1alpha1().OutlierDetections(namespace).Watch(context.TODO(), options)
+ },
+ },
+ &admiralv1alpha1.OutlierDetection{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *outlierDetectionInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredOutlierDetectionInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *outlierDetectionInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&admiralv1alpha1.OutlierDetection{}, f.defaultInformer)
+}
+
+func (f *outlierDetectionInformer) Lister() v1alpha1.OutlierDetectionLister {
+ return v1alpha1.NewOutlierDetectionLister(f.Informer().GetIndexer())
+}
diff --git a/admiral/pkg/client/informers/externalversions/admiral/v1alpha1/routingpolicy.go b/admiral/pkg/client/informers/externalversions/admiral/v1alpha1/routingpolicy.go
new file mode 100644
index 00000000..880a2c60
--- /dev/null
+++ b/admiral/pkg/client/informers/externalversions/admiral/v1alpha1/routingpolicy.go
@@ -0,0 +1,90 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ time "time"
+
+ admiralv1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ versioned "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned"
+ internalinterfaces "github.com/istio-ecosystem/admiral/admiral/pkg/client/informers/externalversions/internalinterfaces"
+ v1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/client/listers/admiral/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// RoutingPolicyInformer provides access to a shared informer and lister for
+// RoutingPolicies.
+type RoutingPolicyInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1alpha1.RoutingPolicyLister
+}
+
+type routingPolicyInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewRoutingPolicyInformer constructs a new informer for RoutingPolicy type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewRoutingPolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredRoutingPolicyInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredRoutingPolicyInformer constructs a new informer for RoutingPolicy type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredRoutingPolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.AdmiralV1alpha1().RoutingPolicies(namespace).List(context.TODO(), options)
+ },
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.AdmiralV1alpha1().RoutingPolicies(namespace).Watch(context.TODO(), options)
+ },
+ },
+ &admiralv1alpha1.RoutingPolicy{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *routingPolicyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredRoutingPolicyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *routingPolicyInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&admiralv1alpha1.RoutingPolicy{}, f.defaultInformer)
+}
+
+func (f *routingPolicyInformer) Lister() v1alpha1.RoutingPolicyLister {
+ return v1alpha1.NewRoutingPolicyLister(f.Informer().GetIndexer())
+}
diff --git a/admiral/pkg/client/informers/externalversions/admiral/v1alpha1/trafficconfig.go b/admiral/pkg/client/informers/externalversions/admiral/v1alpha1/trafficconfig.go
new file mode 100644
index 00000000..8131415d
--- /dev/null
+++ b/admiral/pkg/client/informers/externalversions/admiral/v1alpha1/trafficconfig.go
@@ -0,0 +1,90 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ time "time"
+
+ admiralv1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ versioned "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned"
+ internalinterfaces "github.com/istio-ecosystem/admiral/admiral/pkg/client/informers/externalversions/internalinterfaces"
+ v1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/client/listers/admiral/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// TrafficConfigInformer provides access to a shared informer and lister for
+// TrafficConfigs.
+type TrafficConfigInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1alpha1.TrafficConfigLister
+}
+
+type trafficConfigInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewTrafficConfigInformer constructs a new informer for TrafficConfig type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewTrafficConfigInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredTrafficConfigInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredTrafficConfigInformer constructs a new informer for TrafficConfig type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredTrafficConfigInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.AdmiralV1alpha1().TrafficConfigs(namespace).List(context.TODO(), options)
+ },
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.AdmiralV1alpha1().TrafficConfigs(namespace).Watch(context.TODO(), options)
+ },
+ },
+ &admiralv1alpha1.TrafficConfig{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *trafficConfigInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredTrafficConfigInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *trafficConfigInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&admiralv1alpha1.TrafficConfig{}, f.defaultInformer)
+}
+
+func (f *trafficConfigInformer) Lister() v1alpha1.TrafficConfigLister {
+ return v1alpha1.NewTrafficConfigLister(f.Informer().GetIndexer())
+}
From 5862a707af1b56a5ccdc11c4aeac92ae7e1ad7e9 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:51:36 -0700
Subject: [PATCH 036/235] add file admiral/pkg/client/listers/admiral/v1alpha1/
---
.../v1alpha1/clientconnectionconfig.go | 99 +++++++++++++++++++
.../listers/admiral/v1alpha1/dependency.go | 99 +++++++++++++++++++
.../admiral/v1alpha1/dependencyproxy.go | 99 +++++++++++++++++++
.../admiral/v1alpha1/expansion_generated.go | 75 ++++++++++++++
.../admiral/v1alpha1/globaltrafficpolicy.go | 99 +++++++++++++++++++
.../admiral/v1alpha1/outlierdetection.go | 99 +++++++++++++++++++
.../listers/admiral/v1alpha1/routingpolicy.go | 99 +++++++++++++++++++
.../listers/admiral/v1alpha1/trafficconfig.go | 99 +++++++++++++++++++
8 files changed, 768 insertions(+)
create mode 100644 admiral/pkg/client/listers/admiral/v1alpha1/clientconnectionconfig.go
create mode 100644 admiral/pkg/client/listers/admiral/v1alpha1/dependency.go
create mode 100644 admiral/pkg/client/listers/admiral/v1alpha1/dependencyproxy.go
create mode 100644 admiral/pkg/client/listers/admiral/v1alpha1/expansion_generated.go
create mode 100644 admiral/pkg/client/listers/admiral/v1alpha1/globaltrafficpolicy.go
create mode 100644 admiral/pkg/client/listers/admiral/v1alpha1/outlierdetection.go
create mode 100644 admiral/pkg/client/listers/admiral/v1alpha1/routingpolicy.go
create mode 100644 admiral/pkg/client/listers/admiral/v1alpha1/trafficconfig.go
diff --git a/admiral/pkg/client/listers/admiral/v1alpha1/clientconnectionconfig.go b/admiral/pkg/client/listers/admiral/v1alpha1/clientconnectionconfig.go
new file mode 100644
index 00000000..2e993d56
--- /dev/null
+++ b/admiral/pkg/client/listers/admiral/v1alpha1/clientconnectionconfig.go
@@ -0,0 +1,99 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// ClientConnectionConfigLister helps list ClientConnectionConfigs.
+// All objects returned here must be treated as read-only.
+type ClientConnectionConfigLister interface {
+ // List lists all ClientConnectionConfigs in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1alpha1.ClientConnectionConfig, err error)
+ // ClientConnectionConfigs returns an object that can list and get ClientConnectionConfigs.
+ ClientConnectionConfigs(namespace string) ClientConnectionConfigNamespaceLister
+ ClientConnectionConfigListerExpansion
+}
+
+// clientConnectionConfigLister implements the ClientConnectionConfigLister interface.
+type clientConnectionConfigLister struct {
+ indexer cache.Indexer
+}
+
+// NewClientConnectionConfigLister returns a new ClientConnectionConfigLister.
+func NewClientConnectionConfigLister(indexer cache.Indexer) ClientConnectionConfigLister {
+ return &clientConnectionConfigLister{indexer: indexer}
+}
+
+// List lists all ClientConnectionConfigs in the indexer.
+func (s *clientConnectionConfigLister) List(selector labels.Selector) (ret []*v1alpha1.ClientConnectionConfig, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.ClientConnectionConfig))
+ })
+ return ret, err
+}
+
+// ClientConnectionConfigs returns an object that can list and get ClientConnectionConfigs.
+func (s *clientConnectionConfigLister) ClientConnectionConfigs(namespace string) ClientConnectionConfigNamespaceLister {
+ return clientConnectionConfigNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// ClientConnectionConfigNamespaceLister helps list and get ClientConnectionConfigs.
+// All objects returned here must be treated as read-only.
+type ClientConnectionConfigNamespaceLister interface {
+ // List lists all ClientConnectionConfigs in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1alpha1.ClientConnectionConfig, err error)
+ // Get retrieves the ClientConnectionConfig from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1alpha1.ClientConnectionConfig, error)
+ ClientConnectionConfigNamespaceListerExpansion
+}
+
+// clientConnectionConfigNamespaceLister implements the ClientConnectionConfigNamespaceLister
+// interface.
+type clientConnectionConfigNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all ClientConnectionConfigs in the indexer for a given namespace.
+func (s clientConnectionConfigNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.ClientConnectionConfig, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.ClientConnectionConfig))
+ })
+ return ret, err
+}
+
+// Get retrieves the ClientConnectionConfig from the indexer for a given namespace and name.
+func (s clientConnectionConfigNamespaceLister) Get(name string) (*v1alpha1.ClientConnectionConfig, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1alpha1.Resource("clientconnectionconfig"), name)
+ }
+ return obj.(*v1alpha1.ClientConnectionConfig), nil
+}
diff --git a/admiral/pkg/client/listers/admiral/v1alpha1/dependency.go b/admiral/pkg/client/listers/admiral/v1alpha1/dependency.go
new file mode 100644
index 00000000..d91d20d3
--- /dev/null
+++ b/admiral/pkg/client/listers/admiral/v1alpha1/dependency.go
@@ -0,0 +1,99 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// DependencyLister helps list Dependencies.
+// All objects returned here must be treated as read-only.
+type DependencyLister interface {
+ // List lists all Dependencies in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1alpha1.Dependency, err error)
+ // Dependencies returns an object that can list and get Dependencies.
+ Dependencies(namespace string) DependencyNamespaceLister
+ DependencyListerExpansion
+}
+
+// dependencyLister implements the DependencyLister interface.
+type dependencyLister struct {
+ indexer cache.Indexer
+}
+
+// NewDependencyLister returns a new DependencyLister.
+func NewDependencyLister(indexer cache.Indexer) DependencyLister {
+ return &dependencyLister{indexer: indexer}
+}
+
+// List lists all Dependencies in the indexer.
+func (s *dependencyLister) List(selector labels.Selector) (ret []*v1alpha1.Dependency, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.Dependency))
+ })
+ return ret, err
+}
+
+// Dependencies returns an object that can list and get Dependencies.
+func (s *dependencyLister) Dependencies(namespace string) DependencyNamespaceLister {
+ return dependencyNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// DependencyNamespaceLister helps list and get Dependencies.
+// All objects returned here must be treated as read-only.
+type DependencyNamespaceLister interface {
+ // List lists all Dependencies in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1alpha1.Dependency, err error)
+ // Get retrieves the Dependency from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1alpha1.Dependency, error)
+ DependencyNamespaceListerExpansion
+}
+
+// dependencyNamespaceLister implements the DependencyNamespaceLister
+// interface.
+type dependencyNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all Dependencies in the indexer for a given namespace.
+func (s dependencyNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Dependency, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.Dependency))
+ })
+ return ret, err
+}
+
+// Get retrieves the Dependency from the indexer for a given namespace and name.
+func (s dependencyNamespaceLister) Get(name string) (*v1alpha1.Dependency, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1alpha1.Resource("dependency"), name)
+ }
+ return obj.(*v1alpha1.Dependency), nil
+}
diff --git a/admiral/pkg/client/listers/admiral/v1alpha1/dependencyproxy.go b/admiral/pkg/client/listers/admiral/v1alpha1/dependencyproxy.go
new file mode 100644
index 00000000..cf8e8677
--- /dev/null
+++ b/admiral/pkg/client/listers/admiral/v1alpha1/dependencyproxy.go
@@ -0,0 +1,99 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// DependencyProxyLister helps list DependencyProxies.
+// All objects returned here must be treated as read-only.
+type DependencyProxyLister interface {
+ // List lists all DependencyProxies in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1alpha1.DependencyProxy, err error)
+ // DependencyProxies returns an object that can list and get DependencyProxies.
+ DependencyProxies(namespace string) DependencyProxyNamespaceLister
+ DependencyProxyListerExpansion
+}
+
+// dependencyProxyLister implements the DependencyProxyLister interface.
+type dependencyProxyLister struct {
+ indexer cache.Indexer
+}
+
+// NewDependencyProxyLister returns a new DependencyProxyLister.
+func NewDependencyProxyLister(indexer cache.Indexer) DependencyProxyLister {
+ return &dependencyProxyLister{indexer: indexer}
+}
+
+// List lists all DependencyProxies in the indexer.
+func (s *dependencyProxyLister) List(selector labels.Selector) (ret []*v1alpha1.DependencyProxy, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.DependencyProxy))
+ })
+ return ret, err
+}
+
+// DependencyProxies returns an object that can list and get DependencyProxies.
+func (s *dependencyProxyLister) DependencyProxies(namespace string) DependencyProxyNamespaceLister {
+ return dependencyProxyNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// DependencyProxyNamespaceLister helps list and get DependencyProxies.
+// All objects returned here must be treated as read-only.
+type DependencyProxyNamespaceLister interface {
+ // List lists all DependencyProxies in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1alpha1.DependencyProxy, err error)
+ // Get retrieves the DependencyProxy from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1alpha1.DependencyProxy, error)
+ DependencyProxyNamespaceListerExpansion
+}
+
+// dependencyProxyNamespaceLister implements the DependencyProxyNamespaceLister
+// interface.
+type dependencyProxyNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all DependencyProxies in the indexer for a given namespace.
+func (s dependencyProxyNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.DependencyProxy, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.DependencyProxy))
+ })
+ return ret, err
+}
+
+// Get retrieves the DependencyProxy from the indexer for a given namespace and name.
+func (s dependencyProxyNamespaceLister) Get(name string) (*v1alpha1.DependencyProxy, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1alpha1.Resource("dependencyproxy"), name)
+ }
+ return obj.(*v1alpha1.DependencyProxy), nil
+}
diff --git a/admiral/pkg/client/listers/admiral/v1alpha1/expansion_generated.go b/admiral/pkg/client/listers/admiral/v1alpha1/expansion_generated.go
new file mode 100644
index 00000000..9bb8bac5
--- /dev/null
+++ b/admiral/pkg/client/listers/admiral/v1alpha1/expansion_generated.go
@@ -0,0 +1,75 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// ClientConnectionConfigListerExpansion allows custom methods to be added to
+// ClientConnectionConfigLister.
+type ClientConnectionConfigListerExpansion interface{}
+
+// ClientConnectionConfigNamespaceListerExpansion allows custom methods to be added to
+// ClientConnectionConfigNamespaceLister.
+type ClientConnectionConfigNamespaceListerExpansion interface{}
+
+// DependencyListerExpansion allows custom methods to be added to
+// DependencyLister.
+type DependencyListerExpansion interface{}
+
+// DependencyNamespaceListerExpansion allows custom methods to be added to
+// DependencyNamespaceLister.
+type DependencyNamespaceListerExpansion interface{}
+
+// DependencyProxyListerExpansion allows custom methods to be added to
+// DependencyProxyLister.
+type DependencyProxyListerExpansion interface{}
+
+// DependencyProxyNamespaceListerExpansion allows custom methods to be added to
+// DependencyProxyNamespaceLister.
+type DependencyProxyNamespaceListerExpansion interface{}
+
+// GlobalTrafficPolicyListerExpansion allows custom methods to be added to
+// GlobalTrafficPolicyLister.
+type GlobalTrafficPolicyListerExpansion interface{}
+
+// GlobalTrafficPolicyNamespaceListerExpansion allows custom methods to be added to
+// GlobalTrafficPolicyNamespaceLister.
+type GlobalTrafficPolicyNamespaceListerExpansion interface{}
+
+// OutlierDetectionListerExpansion allows custom methods to be added to
+// OutlierDetectionLister.
+type OutlierDetectionListerExpansion interface{}
+
+// OutlierDetectionNamespaceListerExpansion allows custom methods to be added to
+// OutlierDetectionNamespaceLister.
+type OutlierDetectionNamespaceListerExpansion interface{}
+
+// RoutingPolicyListerExpansion allows custom methods to be added to
+// RoutingPolicyLister.
+type RoutingPolicyListerExpansion interface{}
+
+// RoutingPolicyNamespaceListerExpansion allows custom methods to be added to
+// RoutingPolicyNamespaceLister.
+type RoutingPolicyNamespaceListerExpansion interface{}
+
+// TrafficConfigListerExpansion allows custom methods to be added to
+// TrafficConfigLister.
+type TrafficConfigListerExpansion interface{}
+
+// TrafficConfigNamespaceListerExpansion allows custom methods to be added to
+// TrafficConfigNamespaceLister.
+type TrafficConfigNamespaceListerExpansion interface{}
diff --git a/admiral/pkg/client/listers/admiral/v1alpha1/globaltrafficpolicy.go b/admiral/pkg/client/listers/admiral/v1alpha1/globaltrafficpolicy.go
new file mode 100644
index 00000000..afd2c987
--- /dev/null
+++ b/admiral/pkg/client/listers/admiral/v1alpha1/globaltrafficpolicy.go
@@ -0,0 +1,99 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// GlobalTrafficPolicyLister helps list GlobalTrafficPolicies.
+// All objects returned here must be treated as read-only.
+type GlobalTrafficPolicyLister interface {
+ // List lists all GlobalTrafficPolicies in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1alpha1.GlobalTrafficPolicy, err error)
+ // GlobalTrafficPolicies returns an object that can list and get GlobalTrafficPolicies.
+ GlobalTrafficPolicies(namespace string) GlobalTrafficPolicyNamespaceLister
+ GlobalTrafficPolicyListerExpansion
+}
+
+// globalTrafficPolicyLister implements the GlobalTrafficPolicyLister interface.
+type globalTrafficPolicyLister struct {
+ indexer cache.Indexer
+}
+
+// NewGlobalTrafficPolicyLister returns a new GlobalTrafficPolicyLister.
+func NewGlobalTrafficPolicyLister(indexer cache.Indexer) GlobalTrafficPolicyLister {
+ return &globalTrafficPolicyLister{indexer: indexer}
+}
+
+// List lists all GlobalTrafficPolicies in the indexer.
+func (s *globalTrafficPolicyLister) List(selector labels.Selector) (ret []*v1alpha1.GlobalTrafficPolicy, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.GlobalTrafficPolicy))
+ })
+ return ret, err
+}
+
+// GlobalTrafficPolicies returns an object that can list and get GlobalTrafficPolicies.
+func (s *globalTrafficPolicyLister) GlobalTrafficPolicies(namespace string) GlobalTrafficPolicyNamespaceLister {
+ return globalTrafficPolicyNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// GlobalTrafficPolicyNamespaceLister helps list and get GlobalTrafficPolicies.
+// All objects returned here must be treated as read-only.
+type GlobalTrafficPolicyNamespaceLister interface {
+ // List lists all GlobalTrafficPolicies in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1alpha1.GlobalTrafficPolicy, err error)
+ // Get retrieves the GlobalTrafficPolicy from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1alpha1.GlobalTrafficPolicy, error)
+ GlobalTrafficPolicyNamespaceListerExpansion
+}
+
+// globalTrafficPolicyNamespaceLister implements the GlobalTrafficPolicyNamespaceLister
+// interface.
+type globalTrafficPolicyNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all GlobalTrafficPolicies in the indexer for a given namespace.
+func (s globalTrafficPolicyNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.GlobalTrafficPolicy, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.GlobalTrafficPolicy))
+ })
+ return ret, err
+}
+
+// Get retrieves the GlobalTrafficPolicy from the indexer for a given namespace and name.
+func (s globalTrafficPolicyNamespaceLister) Get(name string) (*v1alpha1.GlobalTrafficPolicy, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1alpha1.Resource("globaltrafficpolicy"), name)
+ }
+ return obj.(*v1alpha1.GlobalTrafficPolicy), nil
+}
diff --git a/admiral/pkg/client/listers/admiral/v1alpha1/outlierdetection.go b/admiral/pkg/client/listers/admiral/v1alpha1/outlierdetection.go
new file mode 100644
index 00000000..5277aa9e
--- /dev/null
+++ b/admiral/pkg/client/listers/admiral/v1alpha1/outlierdetection.go
@@ -0,0 +1,99 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// OutlierDetectionLister helps list OutlierDetections.
+// All objects returned here must be treated as read-only.
+type OutlierDetectionLister interface {
+ // List lists all OutlierDetections in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1alpha1.OutlierDetection, err error)
+ // OutlierDetections returns an object that can list and get OutlierDetections.
+ OutlierDetections(namespace string) OutlierDetectionNamespaceLister
+ OutlierDetectionListerExpansion
+}
+
+// outlierDetectionLister implements the OutlierDetectionLister interface.
+type outlierDetectionLister struct {
+ indexer cache.Indexer
+}
+
+// NewOutlierDetectionLister returns a new OutlierDetectionLister.
+func NewOutlierDetectionLister(indexer cache.Indexer) OutlierDetectionLister {
+ return &outlierDetectionLister{indexer: indexer}
+}
+
+// List lists all OutlierDetections in the indexer.
+func (s *outlierDetectionLister) List(selector labels.Selector) (ret []*v1alpha1.OutlierDetection, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.OutlierDetection))
+ })
+ return ret, err
+}
+
+// OutlierDetections returns an object that can list and get OutlierDetections.
+func (s *outlierDetectionLister) OutlierDetections(namespace string) OutlierDetectionNamespaceLister {
+ return outlierDetectionNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// OutlierDetectionNamespaceLister helps list and get OutlierDetections.
+// All objects returned here must be treated as read-only.
+type OutlierDetectionNamespaceLister interface {
+ // List lists all OutlierDetections in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1alpha1.OutlierDetection, err error)
+ // Get retrieves the OutlierDetection from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1alpha1.OutlierDetection, error)
+ OutlierDetectionNamespaceListerExpansion
+}
+
+// outlierDetectionNamespaceLister implements the OutlierDetectionNamespaceLister
+// interface.
+type outlierDetectionNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all OutlierDetections in the indexer for a given namespace.
+func (s outlierDetectionNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.OutlierDetection, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.OutlierDetection))
+ })
+ return ret, err
+}
+
+// Get retrieves the OutlierDetection from the indexer for a given namespace and name.
+func (s outlierDetectionNamespaceLister) Get(name string) (*v1alpha1.OutlierDetection, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1alpha1.Resource("outlierdetection"), name)
+ }
+ return obj.(*v1alpha1.OutlierDetection), nil
+}
diff --git a/admiral/pkg/client/listers/admiral/v1alpha1/routingpolicy.go b/admiral/pkg/client/listers/admiral/v1alpha1/routingpolicy.go
new file mode 100644
index 00000000..fefbcd95
--- /dev/null
+++ b/admiral/pkg/client/listers/admiral/v1alpha1/routingpolicy.go
@@ -0,0 +1,99 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// RoutingPolicyLister helps list RoutingPolicies.
+// All objects returned here must be treated as read-only.
+type RoutingPolicyLister interface {
+ // List lists all RoutingPolicies in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1alpha1.RoutingPolicy, err error)
+ // RoutingPolicies returns an object that can list and get RoutingPolicies.
+ RoutingPolicies(namespace string) RoutingPolicyNamespaceLister
+ RoutingPolicyListerExpansion
+}
+
+// routingPolicyLister implements the RoutingPolicyLister interface.
+type routingPolicyLister struct {
+ indexer cache.Indexer
+}
+
+// NewRoutingPolicyLister returns a new RoutingPolicyLister.
+func NewRoutingPolicyLister(indexer cache.Indexer) RoutingPolicyLister {
+ return &routingPolicyLister{indexer: indexer}
+}
+
+// List lists all RoutingPolicies in the indexer.
+func (s *routingPolicyLister) List(selector labels.Selector) (ret []*v1alpha1.RoutingPolicy, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.RoutingPolicy))
+ })
+ return ret, err
+}
+
+// RoutingPolicies returns an object that can list and get RoutingPolicies.
+func (s *routingPolicyLister) RoutingPolicies(namespace string) RoutingPolicyNamespaceLister {
+ return routingPolicyNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// RoutingPolicyNamespaceLister helps list and get RoutingPolicies.
+// All objects returned here must be treated as read-only.
+type RoutingPolicyNamespaceLister interface {
+ // List lists all RoutingPolicies in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1alpha1.RoutingPolicy, err error)
+ // Get retrieves the RoutingPolicy from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1alpha1.RoutingPolicy, error)
+ RoutingPolicyNamespaceListerExpansion
+}
+
+// routingPolicyNamespaceLister implements the RoutingPolicyNamespaceLister
+// interface.
+type routingPolicyNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all RoutingPolicies in the indexer for a given namespace.
+func (s routingPolicyNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.RoutingPolicy, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.RoutingPolicy))
+ })
+ return ret, err
+}
+
+// Get retrieves the RoutingPolicy from the indexer for a given namespace and name.
+func (s routingPolicyNamespaceLister) Get(name string) (*v1alpha1.RoutingPolicy, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1alpha1.Resource("routingpolicy"), name)
+ }
+ return obj.(*v1alpha1.RoutingPolicy), nil
+}
diff --git a/admiral/pkg/client/listers/admiral/v1alpha1/trafficconfig.go b/admiral/pkg/client/listers/admiral/v1alpha1/trafficconfig.go
new file mode 100644
index 00000000..2c3c2621
--- /dev/null
+++ b/admiral/pkg/client/listers/admiral/v1alpha1/trafficconfig.go
@@ -0,0 +1,99 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1alpha1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// TrafficConfigLister helps list TrafficConfigs.
+// All objects returned here must be treated as read-only.
+type TrafficConfigLister interface {
+ // List lists all TrafficConfigs in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1alpha1.TrafficConfig, err error)
+ // TrafficConfigs returns an object that can list and get TrafficConfigs.
+ TrafficConfigs(namespace string) TrafficConfigNamespaceLister
+ TrafficConfigListerExpansion
+}
+
+// trafficConfigLister implements the TrafficConfigLister interface.
+type trafficConfigLister struct {
+ indexer cache.Indexer
+}
+
+// NewTrafficConfigLister returns a new TrafficConfigLister.
+func NewTrafficConfigLister(indexer cache.Indexer) TrafficConfigLister {
+ return &trafficConfigLister{indexer: indexer}
+}
+
+// List lists all TrafficConfigs in the indexer.
+func (s *trafficConfigLister) List(selector labels.Selector) (ret []*v1alpha1.TrafficConfig, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.TrafficConfig))
+ })
+ return ret, err
+}
+
+// TrafficConfigs returns an object that can list and get TrafficConfigs.
+func (s *trafficConfigLister) TrafficConfigs(namespace string) TrafficConfigNamespaceLister {
+ return trafficConfigNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// TrafficConfigNamespaceLister helps list and get TrafficConfigs.
+// All objects returned here must be treated as read-only.
+type TrafficConfigNamespaceLister interface {
+ // List lists all TrafficConfigs in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1alpha1.TrafficConfig, err error)
+ // Get retrieves the TrafficConfig from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1alpha1.TrafficConfig, error)
+ TrafficConfigNamespaceListerExpansion
+}
+
+// trafficConfigNamespaceLister implements the TrafficConfigNamespaceLister
+// interface.
+type trafficConfigNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all TrafficConfigs in the indexer for a given namespace.
+func (s trafficConfigNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.TrafficConfig, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.TrafficConfig))
+ })
+ return ret, err
+}
+
+// Get retrieves the TrafficConfig from the indexer for a given namespace and name.
+func (s trafficConfigNamespaceLister) Get(name string) (*v1alpha1.TrafficConfig, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1alpha1.Resource("trafficconfig"), name)
+ }
+ return obj.(*v1alpha1.TrafficConfig), nil
+}
From 4c7f43cc2aa3d5b51e50df210cf193b77e12ac6a Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:51:39 -0700
Subject: [PATCH 037/235] add file admiral/pkg/client/loader/
---
admiral/pkg/client/loader/client_loader.go | 25 ++++++
admiral/pkg/client/loader/fake_loader.go | 88 ++++++++++++++++++++++
admiral/pkg/client/loader/kube_loader.go | 85 +++++++++++++++++++++
3 files changed, 198 insertions(+)
create mode 100644 admiral/pkg/client/loader/client_loader.go
create mode 100644 admiral/pkg/client/loader/fake_loader.go
create mode 100644 admiral/pkg/client/loader/kube_loader.go
diff --git a/admiral/pkg/client/loader/client_loader.go b/admiral/pkg/client/loader/client_loader.go
new file mode 100644
index 00000000..114b933c
--- /dev/null
+++ b/admiral/pkg/client/loader/client_loader.go
@@ -0,0 +1,25 @@
+package loader
+
+import (
+ argo "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned"
+ admiral "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned"
+ istio "istio.io/client-go/pkg/clientset/versioned"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
+)
+
+const FakeKubeconfigPath = "fake.config"
+
+type ClientLoader interface {
+ LoadAdmiralClientFromPath(path string) (admiral.Interface, error)
+ LoadAdmiralClientFromConfig(config *rest.Config) (admiral.Interface, error)
+
+ LoadIstioClientFromPath(path string) (istio.Interface, error)
+ LoadIstioClientFromConfig(config *rest.Config) (istio.Interface, error)
+
+ LoadArgoClientFromPath(path string) (argo.Interface, error)
+ LoadArgoClientFromConfig(config *rest.Config) (argo.Interface, error)
+
+ LoadKubeClientFromPath(path string) (kubernetes.Interface, error)
+ LoadKubeClientFromConfig(config *rest.Config) (kubernetes.Interface, error)
+}
diff --git a/admiral/pkg/client/loader/fake_loader.go b/admiral/pkg/client/loader/fake_loader.go
new file mode 100644
index 00000000..cd390d4b
--- /dev/null
+++ b/admiral/pkg/client/loader/fake_loader.go
@@ -0,0 +1,88 @@
+package loader
+
+import (
+ argo "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned"
+ argofake "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned/fake"
+ admiral "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned"
+ admiralfake "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned/fake"
+ istio "istio.io/client-go/pkg/clientset/versioned"
+ istiofake "istio.io/client-go/pkg/clientset/versioned/fake"
+ "k8s.io/client-go/kubernetes"
+ kubefake "k8s.io/client-go/kubernetes/fake"
+ "k8s.io/client-go/rest"
+)
+
+const FakePrefix = "fake"
+
+// fake clients for the Admiral cluster
+var FakeAdmiralClient admiral.Interface = admiralfake.NewSimpleClientset()
+var FakeIstioClient istio.Interface = istiofake.NewSimpleClientset()
+var FakeKubeClient kubernetes.Interface = kubefake.NewSimpleClientset()
+var FakeArgoClient argo.Interface = argofake.NewSimpleClientset()
+
+// fake clients for dependent clusters
+var FakeAdmiralClientMap map[string]admiral.Interface = make(map[string]admiral.Interface)
+var FakeIstioClientMap map[string]istio.Interface = make(map[string]istio.Interface)
+var FakeKubeClientMap map[string]kubernetes.Interface = make(map[string]kubernetes.Interface)
+var FakeArgoClientMap map[string]argo.Interface = make(map[string]argo.Interface)
+
+type FakeClientLoader struct{}
+
+// Singleton
+var fakeClientLoader = &FakeClientLoader{}
+
+func GetFakeClientLoader() ClientLoader {
+ return fakeClientLoader
+}
+
+func (loader *FakeClientLoader) LoadAdmiralClientFromPath(path string) (admiral.Interface, error) {
+ return FakeAdmiralClient, nil
+}
+
+func (*FakeClientLoader) LoadAdmiralClientFromConfig(config *rest.Config) (admiral.Interface, error) {
+ admiralClient, ok := FakeAdmiralClientMap[config.Host]
+ if !ok {
+ admiralClient = admiralfake.NewSimpleClientset()
+ FakeAdmiralClientMap[config.Host] = admiralClient
+ }
+ return admiralClient, nil
+}
+
+func (loader *FakeClientLoader) LoadIstioClientFromPath(path string) (istio.Interface, error) {
+ return FakeIstioClient, nil
+}
+
+func (loader *FakeClientLoader) LoadIstioClientFromConfig(config *rest.Config) (istio.Interface, error) {
+ istioClient, ok := FakeIstioClientMap[config.Host]
+ if !ok {
+ istioClient = istiofake.NewSimpleClientset()
+ FakeIstioClientMap[config.Host] = istioClient
+ }
+ return istioClient, nil
+}
+
+func (loader *FakeClientLoader) LoadArgoClientFromPath(path string) (argo.Interface, error) {
+ return FakeArgoClient, nil
+}
+
+func (loader *FakeClientLoader) LoadArgoClientFromConfig(config *rest.Config) (argo.Interface, error) {
+ argoClient, ok := FakeArgoClientMap[config.Host]
+ if !ok {
+ argoClient = argofake.NewSimpleClientset()
+ FakeArgoClientMap[config.Host] = argoClient
+ }
+ return argoClient, nil
+}
+
+func (loader *FakeClientLoader) LoadKubeClientFromPath(path string) (kubernetes.Interface, error) {
+ return FakeKubeClient, nil
+}
+
+func (loader *FakeClientLoader) LoadKubeClientFromConfig(config *rest.Config) (kubernetes.Interface, error) {
+ kubeClient, ok := FakeKubeClientMap[config.Host]
+ if !ok {
+ kubeClient = kubefake.NewSimpleClientset()
+ FakeKubeClientMap[config.Host] = kubeClient
+ }
+ return kubeClient, nil
+}
diff --git a/admiral/pkg/client/loader/kube_loader.go b/admiral/pkg/client/loader/kube_loader.go
new file mode 100644
index 00000000..6fe03bf1
--- /dev/null
+++ b/admiral/pkg/client/loader/kube_loader.go
@@ -0,0 +1,85 @@
+package loader
+
+import (
+ "fmt"
+
+ argo "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned"
+ admiral "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned"
+ log "github.com/sirupsen/logrus"
+ istio "istio.io/client-go/pkg/clientset/versioned"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
+ "k8s.io/client-go/tools/clientcmd"
+)
+
+type KubeClientLoader struct{}
+
+// Singleton
+var kubeClientLoader = &KubeClientLoader{}
+
+func GetKubeClientLoader() ClientLoader {
+ return kubeClientLoader
+}
+
+func (loader *KubeClientLoader) LoadAdmiralClientFromPath(kubeConfigPath string) (admiral.Interface, error) {
+ config, err := getConfig(kubeConfigPath)
+ if err != nil || config == nil {
+ return nil, err
+ }
+
+ return loader.LoadAdmiralClientFromConfig(config)
+}
+
+func (*KubeClientLoader) LoadAdmiralClientFromConfig(config *rest.Config) (admiral.Interface, error) {
+ return admiral.NewForConfig(config)
+}
+
+func (loader *KubeClientLoader) LoadIstioClientFromPath(kubeConfigPath string) (istio.Interface, error) {
+ config, err := getConfig(kubeConfigPath)
+ if err != nil || config == nil {
+ return nil, err
+ }
+
+ return loader.LoadIstioClientFromConfig(config)
+}
+
+func (loader *KubeClientLoader) LoadIstioClientFromConfig(config *rest.Config) (istio.Interface, error) {
+ return istio.NewForConfig(config)
+}
+
+func (loader *KubeClientLoader) LoadArgoClientFromPath(kubeConfigPath string) (argo.Interface, error) {
+ config, err := getConfig(kubeConfigPath)
+ if err != nil || config == nil {
+ return nil, err
+ }
+
+ return loader.LoadArgoClientFromConfig(config)
+}
+
+func (loader *KubeClientLoader) LoadArgoClientFromConfig(config *rest.Config) (argo.Interface, error) {
+ return argo.NewForConfig(config)
+}
+
+func (loader *KubeClientLoader) LoadKubeClientFromPath(kubeConfigPath string) (kubernetes.Interface, error) {
+ config, err := getConfig(kubeConfigPath)
+ if err != nil || config == nil {
+ return nil, err
+ }
+
+ return loader.LoadKubeClientFromConfig(config)
+}
+
+func (loader *KubeClientLoader) LoadKubeClientFromConfig(config *rest.Config) (kubernetes.Interface, error) {
+ return kubernetes.NewForConfig(config)
+}
+
+func getConfig(kubeConfigPath string) (*rest.Config, error) {
+ log.Infof("getting kubeconfig from: %#v", kubeConfigPath)
+ // create the config from the path
+ config, err := clientcmd.BuildConfigFromFlags("", kubeConfigPath)
+
+ if err != nil || config == nil {
+ return nil, fmt.Errorf("could not retrieve kubeconfig: %v", err)
+ }
+ return config, err
+}
From 6ca80b302c62b815c13f93fab4d6ac428829e3a7 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:51:42 -0700
Subject: [PATCH 038/235] add file
admiral/pkg/clusters/clientconnectionconfig_handler.go
---
.../clientconnectionconfig_handler.go | 139 ++++++++++++++++++
1 file changed, 139 insertions(+)
create mode 100644 admiral/pkg/clusters/clientconnectionconfig_handler.go
diff --git a/admiral/pkg/clusters/clientconnectionconfig_handler.go b/admiral/pkg/clusters/clientconnectionconfig_handler.go
new file mode 100644
index 00000000..2b2c2f16
--- /dev/null
+++ b/admiral/pkg/clusters/clientconnectionconfig_handler.go
@@ -0,0 +1,139 @@
+package clusters
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+
+ v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ log "github.com/sirupsen/logrus"
+)
+
+type ClientConnectionConfigHandler struct {
+ RemoteRegistry *RemoteRegistry
+ ClusterID string
+}
+
+type ClientConnectionConfigCache interface {
+ GetFromIdentity(identity string, environment string) (*v1.ClientConnectionConfig, error)
+ Put(clientConnectionSettings *v1.ClientConnectionConfig) error
+ Delete(identity string, environment string) error
+}
+
+type clientConnectionSettingsCache struct {
+ identityCache map[string]*v1.ClientConnectionConfig
+ mutex *sync.RWMutex
+}
+
+func NewClientConnectionConfigCache() ClientConnectionConfigCache {
+ return &clientConnectionSettingsCache{
+ identityCache: make(map[string]*v1.ClientConnectionConfig),
+ mutex: &sync.RWMutex{},
+ }
+}
+
+func (c *clientConnectionSettingsCache) GetFromIdentity(identity string,
+ environment string) (*v1.ClientConnectionConfig, error) {
+ c.mutex.RLock()
+ defer c.mutex.RUnlock()
+ return c.identityCache[common.ConstructKeyWithEnvAndIdentity(environment, identity)], nil
+}
+
+func (c *clientConnectionSettingsCache) Put(clientConnectionSettings *v1.ClientConnectionConfig) error {
+ if clientConnectionSettings.Name == "" {
+ return errors.New(
+ "skipped adding to clientConnectionSettingsCache, missing name in clientConnectionSettings")
+ }
+ defer c.mutex.Unlock()
+ c.mutex.Lock()
+ var clientConnectionSettingsIdentity = common.GetClientConnectionConfigIdentity(clientConnectionSettings)
+ var clientConnectionSettingsEnv = common.GetClientConnectionConfigEnv(clientConnectionSettings)
+
+ log.Infof(
+ "adding clientConnectionSettings with name %v to clientConnectionSettingsCache. LabelMatch=%v env=%v",
+ clientConnectionSettings.Name, clientConnectionSettingsIdentity, clientConnectionSettingsEnv)
+
+ key := common.ConstructKeyWithEnvAndIdentity(clientConnectionSettingsEnv, clientConnectionSettingsIdentity)
+ c.identityCache[key] = clientConnectionSettings
+ return nil
+}
+
+func (c *clientConnectionSettingsCache) Delete(identity string, environment string) error {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+ key := common.ConstructKeyWithEnvAndIdentity(environment, identity)
+ if _, ok := c.identityCache[key]; ok {
+ log.Infof("deleting clientConnectionSettings with key=%s from clientConnectionSettingsCache", key)
+ delete(c.identityCache, key)
+ return nil
+ }
+ return fmt.Errorf("clientConnectionSettings with key %s not found in clientConnectionSettingsCache", key)
+}
+
+func (c *ClientConnectionConfigHandler) Added(ctx context.Context,
+ clientConnectionSettings *v1.ClientConnectionConfig) error {
+ log.Infof(
+ LogFormat, common.Add, common.ClientConnectionConfig, clientConnectionSettings.Name, c.ClusterID, "received")
+ err := HandleEventForClientConnectionConfig(
+ ctx, admiral.Add, clientConnectionSettings, c.RemoteRegistry, c.ClusterID, modifyServiceEntryForNewServiceOrPod)
+ if err != nil {
+ return fmt.Errorf(
+ LogErrFormat, common.Add, common.ClientConnectionConfig, clientConnectionSettings.Name, c.ClusterID, err.Error())
+ }
+ return nil
+}
+
+func (c *ClientConnectionConfigHandler) Updated(
+ ctx context.Context, clientConnectionSettings *v1.ClientConnectionConfig) error {
+ log.Infof(
+ LogFormat, common.Update, common.ClientConnectionConfig, clientConnectionSettings.Name, c.ClusterID, common.ReceivedStatus)
+ err := HandleEventForClientConnectionConfig(
+ ctx, admiral.Update, clientConnectionSettings, c.RemoteRegistry, c.ClusterID, modifyServiceEntryForNewServiceOrPod)
+ if err != nil {
+ return fmt.Errorf(
+ LogErrFormat, common.Update, common.ClientConnectionConfig, clientConnectionSettings.Name, c.ClusterID, err.Error())
+ }
+ return nil
+}
+
+func (c *ClientConnectionConfigHandler) Deleted(
+ ctx context.Context, clientConnectionSettings *v1.ClientConnectionConfig) error {
+ log.Infof(
+ LogFormat, common.Delete, common.ClientConnectionConfig, clientConnectionSettings.Name, c.ClusterID, common.ReceivedStatus)
+ err := HandleEventForClientConnectionConfig(
+ ctx, admiral.Update, clientConnectionSettings, c.RemoteRegistry, c.ClusterID, modifyServiceEntryForNewServiceOrPod)
+ if err != nil {
+ return fmt.Errorf(
+ LogErrFormat, common.Delete, common.ClientConnectionConfig, clientConnectionSettings.Name, c.ClusterID, err.Error())
+ }
+ return nil
+}
+
+func HandleEventForClientConnectionConfig(
+ ctx context.Context, event admiral.EventType, clientConnectionSettings *v1.ClientConnectionConfig,
+ registry *RemoteRegistry, clusterName string, modifySE ModifySEFunc) error {
+
+ identity := common.GetClientConnectionConfigIdentity(clientConnectionSettings)
+ if len(identity) <= 0 {
+ return fmt.Errorf(
+ LogFormat, "Event", common.ClientConnectionConfig, clientConnectionSettings.Name, clusterName,
+ "skipped as label "+common.GetAdmiralCRDIdentityLabel()+" was not found, namespace="+clientConnectionSettings.Namespace)
+ }
+
+ env := common.GetClientConnectionConfigEnv(clientConnectionSettings)
+ if len(env) <= 0 {
+ return fmt.Errorf(
+ LogFormat, "Event", common.ClientConnectionConfig, clientConnectionSettings.Name, clusterName,
+ "skipped as env "+env+" was not found, namespace="+clientConnectionSettings.Namespace)
+ }
+
+ ctx = context.WithValue(ctx, common.ClusterName, clusterName)
+ ctx = context.WithValue(ctx, common.EventResourceType, common.ClientConnectionConfig)
+
+ _, err := modifySE(ctx, admiral.Update, env, identity, registry)
+
+ return err
+}
From 79a47346fad754b800c1aacf80b1ccc802efb9dc Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:51:45 -0700
Subject: [PATCH 039/235] add file
admiral/pkg/clusters/clientconnectionconfig_handler_test.go
---
.../clientconnectionconfig_handler_test.go | 337 ++++++++++++++++++
1 file changed, 337 insertions(+)
create mode 100644 admiral/pkg/clusters/clientconnectionconfig_handler_test.go
diff --git a/admiral/pkg/clusters/clientconnectionconfig_handler_test.go b/admiral/pkg/clusters/clientconnectionconfig_handler_test.go
new file mode 100644
index 00000000..f665d57d
--- /dev/null
+++ b/admiral/pkg/clusters/clientconnectionconfig_handler_test.go
@@ -0,0 +1,337 @@
+package clusters
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "testing"
+
+ v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ "github.com/stretchr/testify/assert"
+ networkingAlpha3 "istio.io/api/networking/v1alpha3"
+ apiMachineryMetaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+func TestHandleEventForClientConnectionConfig(t *testing.T) {
+ p := common.AdmiralParams{
+ LabelSet: &common.LabelSet{
+ EnvKey: "admiral.io/env",
+ AdmiralCRDIdentityLabel: "identity",
+ },
+ }
+ common.ResetSync()
+ common.InitializeConfig(p)
+
+ testCases := []struct {
+ name string
+ ctx context.Context
+ clientConnectionSettings *v1.ClientConnectionConfig
+ modifySE ModifySEFunc
+ expectedError error
+ }{
+ {
+ name: "Given valid params to HandleEventForClientConnectionConfig func " +
+ "When identity is not set on the ClientConnectionConfig " +
+ "Then the func should return an error",
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ },
+ },
+ },
+ expectedError: fmt.Errorf(
+ "op=Event type=ClientConnectionConfig name=ccsName cluster=testCluster message=skipped as label identity was not found, namespace=testns"),
+ ctx: context.Background(),
+ modifySE: mockModifySE,
+ },
+ {
+ name: "Given valid params to HandleEventForClientConnectionConfig func " +
+ "When admiral.io/env is not set on the ClientConnectionConfig " +
+ "Then the func should not return an error",
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ ctx: context.Background(),
+ modifySE: mockModifySE,
+ expectedError: nil,
+ },
+ {
+ name: "Given valid params to HandleEventForClientConnectionConfig func " +
+ "When modifySE func returns an error " +
+ "Then the func should return an error",
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ ctx: context.WithValue(context.Background(), "hasErrors", "modifySE failed"),
+ modifySE: mockModifySE,
+ expectedError: fmt.Errorf("modifySE failed"),
+ },
+ {
+ name: "Given valid params to HandleEventForClientConnectionConfig func " +
+ "When modifySE func does not return any error " +
+ "Then the func should not return any error either",
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ ctx: context.Background(),
+ modifySE: mockModifySE,
+ expectedError: nil,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+
+ actualError := HandleEventForClientConnectionConfig(tc.ctx, common.UPDATE, tc.clientConnectionSettings, nil, "testCluster", tc.modifySE)
+ if tc.expectedError != nil {
+ if actualError == nil {
+ t.Fatalf("expected error %s but got nil", tc.expectedError.Error())
+ }
+ assert.Equal(t, tc.expectedError.Error(), actualError.Error())
+ } else {
+ if actualError != nil {
+ t.Fatalf("expected error nil but got %s", actualError.Error())
+ }
+ }
+
+ })
+ }
+
+}
+
+func TestDelete(t *testing.T) {
+
+ p := common.AdmiralParams{
+ LabelSet: &common.LabelSet{
+ EnvKey: "admiral.io/env",
+ AdmiralCRDIdentityLabel: "identity",
+ },
+ }
+ common.InitializeConfig(p)
+
+ testCases := []struct {
+ name string
+ env string
+ identity string
+ clientConnectionSettingsCache *clientConnectionSettingsCache
+ expectedError error
+ }{
+ {
+ name: "Given clientConnectionSettingsCache " +
+ "When Delete func is called with clientConnectionSettings " +
+ "And the passed identity and env key is not in the cache " +
+ "Then the func should return an error",
+ env: "foo",
+ identity: "bar",
+ clientConnectionSettingsCache: &clientConnectionSettingsCache{
+ identityCache: make(map[string]*v1.ClientConnectionConfig),
+ mutex: &sync.RWMutex{},
+ },
+ expectedError: fmt.Errorf(
+ "clientConnectionSettings with key foo.bar not found in clientConnectionSettingsCache"),
+ },
+ {
+ name: "Given clientConnectionSettingsCache " +
+ "When Delete func is called " +
+ "And the passed identity and env key is in the cache " +
+ "Then the func should not return an error and should successfully delete the entry",
+ env: "testEnv",
+ identity: "testId",
+ clientConnectionSettingsCache: &clientConnectionSettingsCache{
+ identityCache: map[string]*v1.ClientConnectionConfig{
+ "testEnv.testId": {
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ },
+ mutex: &sync.RWMutex{},
+ },
+ expectedError: nil,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+
+ err := tc.clientConnectionSettingsCache.Delete(tc.identity, tc.env)
+ if tc.expectedError != nil {
+ if err == nil {
+ t.Fatalf("expected error %s but got nil", tc.expectedError.Error())
+ }
+ assert.Equal(t, tc.expectedError.Error(), err.Error())
+ } else {
+ if err != nil {
+ t.Fatalf("expected nil error but got %s error", err.Error())
+ }
+ assert.Nil(t, tc.clientConnectionSettingsCache.identityCache[tc.env+"."+tc.identity])
+ }
+
+ })
+ }
+
+}
+
+func TestPut(t *testing.T) {
+
+ p := common.AdmiralParams{
+ LabelSet: &common.LabelSet{
+ EnvKey: "admiral.io/env",
+ AdmiralCRDIdentityLabel: "identity",
+ },
+ }
+ common.InitializeConfig(p)
+
+ testCases := []struct {
+ name string
+ clientConnectionSettings *v1.ClientConnectionConfig
+ clientConnectionSettingsCache *clientConnectionSettingsCache
+ expectedError error
+ }{
+ {
+ name: "Given clientConnectionSettingsCache " +
+ "When Put func is called with clientConnectionSettings " +
+ "And the passed clientConnectionSettings is missing the name " +
+ "Then the func should return an error",
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Namespace: "testns",
+ },
+ },
+ clientConnectionSettingsCache: &clientConnectionSettingsCache{
+ identityCache: make(map[string]*v1.ClientConnectionConfig),
+ mutex: &sync.RWMutex{},
+ },
+ expectedError: fmt.Errorf(
+ "skipped adding to clientConnectionSettingsCache, missing name in clientConnectionSettings"),
+ },
+ {
+ name: "Given clientConnectionSettingsCache " +
+ "When Put func is called with clientConnectionSettings " +
+ "And the passed clientConnectionSettings is missing the name " +
+ "Then the func should not return any error and should successfully add the entry",
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ clientConnectionSettingsCache: &clientConnectionSettingsCache{
+ identityCache: make(map[string]*v1.ClientConnectionConfig),
+ mutex: &sync.RWMutex{},
+ },
+ expectedError: nil,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+
+ err := tc.clientConnectionSettingsCache.Put(tc.clientConnectionSettings)
+ if tc.expectedError != nil {
+ if err == nil {
+ t.Fatalf("expected error %s but got nil", tc.expectedError.Error())
+ }
+ assert.Equal(t, tc.expectedError.Error(), err.Error())
+ } else {
+ if err != nil {
+ t.Fatalf("expected nil error but got %s error", err.Error())
+ }
+ assert.Equal(t, tc.clientConnectionSettings, tc.clientConnectionSettingsCache.identityCache["testEnv.testId"])
+ }
+
+ })
+ }
+
+}
+
+func TestGetFromIdentity(t *testing.T) {
+
+ clientConnectionSettings := &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ }
+
+ testCases := []struct {
+ name string
+ identity string
+ env string
+ clientConnectionSettingsCache *clientConnectionSettingsCache
+ }{
+ {
+ name: "Given clientConnectionSettingsCache " +
+ "When GetFromIdentity func is called with valid identity and env " +
+ "Then the func should return clientConnectionSettings from cache",
+ identity: "testId",
+ env: "testEnv",
+ clientConnectionSettingsCache: &clientConnectionSettingsCache{
+ identityCache: map[string]*v1.ClientConnectionConfig{
+ "testEnv.testId": clientConnectionSettings,
+ },
+ mutex: &sync.RWMutex{},
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+
+ actualClientConnectionConfig, err := tc.clientConnectionSettingsCache.GetFromIdentity(tc.identity, tc.env)
+ assert.Nil(t, err)
+ assert.Equal(t, clientConnectionSettings, actualClientConnectionConfig)
+
+ })
+ }
+
+}
+
+func mockModifySE(ctx context.Context, event admiral.EventType, env string,
+ sourceIdentity string, remoteRegistry *RemoteRegistry) (map[string]*networkingAlpha3.ServiceEntry, error) {
+
+ if ctx.Value("hasErrors") != nil {
+ return nil, fmt.Errorf(ctx.Value("hasErrors").(string))
+ }
+
+ return nil, nil
+}
From 519102e38aae2266e6fd96b3c0529f449bb6a841 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:51:48 -0700
Subject: [PATCH 040/235] add file
admiral/pkg/clusters/clusterIdentitySyncer.go
---
admiral/pkg/clusters/clusterIdentitySyncer.go | 44 +++++++++++++++++++
1 file changed, 44 insertions(+)
create mode 100644 admiral/pkg/clusters/clusterIdentitySyncer.go
diff --git a/admiral/pkg/clusters/clusterIdentitySyncer.go b/admiral/pkg/clusters/clusterIdentitySyncer.go
new file mode 100644
index 00000000..a9beaa62
--- /dev/null
+++ b/admiral/pkg/clusters/clusterIdentitySyncer.go
@@ -0,0 +1,44 @@
+package clusters
+
+import (
+ "fmt"
+
+ "github.com/istio-ecosystem/admiral/admiral/pkg/registry"
+ log "github.com/sirupsen/logrus"
+)
+
+func updateClusterIdentityCache(
+ remoteRegistry *RemoteRegistry,
+ sourceClusters []string,
+ identity string) error {
+
+ if remoteRegistry == nil {
+ return fmt.Errorf("remote registry is not initialized")
+ }
+ if remoteRegistry.AdmiralCache == nil {
+ return fmt.Errorf("admiral cache is not initialized")
+ }
+
+ if remoteRegistry.AdmiralCache.SourceToDestinations == nil {
+ return fmt.Errorf("source to destination cache is not populated")
+ }
+ // find assets this identity needs to call
+ destinationAssets := remoteRegistry.AdmiralCache.SourceToDestinations.Get(identity)
+ for _, cluster := range sourceClusters {
+ sourceClusterIdentity := registry.NewClusterIdentity(identity, true)
+ err := remoteRegistry.ClusterIdentityStoreHandler.AddUpdateIdentityToCluster(sourceClusterIdentity, cluster)
+ if err != nil {
+ return err
+ }
+ for _, destinationAsset := range destinationAssets {
+ destinationClusterIdentity := registry.NewClusterIdentity(destinationAsset, false)
+ err := remoteRegistry.ClusterIdentityStoreHandler.AddUpdateIdentityToCluster(destinationClusterIdentity, cluster)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ log.Infof("source asset=%s is present in clusters=%v, and has destinations=%v",
+ identity, sourceClusters, destinationAssets)
+ return nil
+}
From 17d1fd265a9b17eb5d32ea3c80990be816f8f27f Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:51:51 -0700
Subject: [PATCH 041/235] add file
admiral/pkg/clusters/clusterIdentitySyncer_test.go
---
.../clusters/clusterIdentitySyncer_test.go | 122 ++++++++++++++++++
1 file changed, 122 insertions(+)
create mode 100644 admiral/pkg/clusters/clusterIdentitySyncer_test.go
diff --git a/admiral/pkg/clusters/clusterIdentitySyncer_test.go b/admiral/pkg/clusters/clusterIdentitySyncer_test.go
new file mode 100644
index 00000000..8b54af43
--- /dev/null
+++ b/admiral/pkg/clusters/clusterIdentitySyncer_test.go
@@ -0,0 +1,122 @@
+package clusters
+
+import (
+ "fmt"
+ "reflect"
+ "sync"
+ "testing"
+
+ "github.com/istio-ecosystem/admiral/admiral/pkg/registry"
+)
+
+func TestUpdateClusterIdentityState(t *testing.T) {
+ var (
+ sourceCluster1 = "cluster1"
+ foobarIdentity = "intuit.foobar.service"
+ helloWorldIdentity = "intuit.helloworld.service"
+ remoteRegistryHappyCase = &RemoteRegistry{
+ ClusterIdentityStoreHandler: registry.NewClusterIdentityStoreHandler(),
+ AdmiralCache: &AdmiralCache{
+ SourceToDestinations: &sourceToDestinations{
+ sourceDestinations: map[string][]string{
+ foobarIdentity: {helloWorldIdentity},
+ },
+ mutex: &sync.Mutex{},
+ },
+ },
+ }
+ )
+ cases := []struct {
+ name string
+ remoteRegistry *RemoteRegistry
+ sourceClusters []string
+ assertFunc func() error
+ expectedErr error
+ }{
+ {
+ name: "Given remote registry is empty, " +
+ "When the function is called, " +
+ "It should return an error",
+ expectedErr: fmt.Errorf("remote registry is not initialized"),
+ },
+ {
+ name: "Given remote registry admiral cache is empty, " +
+ "When the function is called, " +
+ "It should return an error",
+ remoteRegistry: &RemoteRegistry{},
+ expectedErr: fmt.Errorf("admiral cache is not initialized"),
+ },
+ {
+ name: "Given source to destination cache is empty, " +
+ "When the function is called, " +
+ "It should return an error",
+ remoteRegistry: &RemoteRegistry{
+ AdmiralCache: &AdmiralCache{},
+ },
+ expectedErr: fmt.Errorf("source to destination cache is not populated"),
+ },
+ {
+ name: "Given all caches are initialized, " +
+ "When the function is called for an asset '" + foobarIdentity + "', which is present in cluster A, " +
+ "And which has 1 destination asset '" + helloWorldIdentity + "', " +
+ "It should update the cluster identity, such that, " +
+ "cluster A has two assets - '" + foobarIdentity + "' as a source asset, " +
+ "and '" + helloWorldIdentity + "' as a regular asset",
+ sourceClusters: []string{sourceCluster1},
+ remoteRegistry: remoteRegistryHappyCase,
+ assertFunc: func() error {
+ identityStore, err := remoteRegistryHappyCase.ClusterIdentityStoreHandler.GetAllIdentitiesForCluster(sourceCluster1)
+ if err != nil {
+ return err
+ }
+ if len(identityStore.Store) != 2 {
+ return fmt.Errorf("expected two identities, got=%v", len(identityStore.Store))
+ }
+ var (
+ foundFoobar bool
+ foundHelloWorld bool
+ )
+ for identity, clusterIdentity := range identityStore.Store {
+ if identity == foobarIdentity {
+ if !clusterIdentity.SourceIdentity {
+ return fmt.Errorf("expected '%s' to be a source identity, but it was not", foobarIdentity)
+ }
+ foundFoobar = true
+ }
+ if identity == helloWorldIdentity {
+ if clusterIdentity.SourceIdentity {
+ return fmt.Errorf("expected '%s' to be a regular identity, but it was a source identity", helloWorldIdentity)
+ }
+ foundHelloWorld = true
+ }
+ }
+ if !foundFoobar {
+ return fmt.Errorf("expected to find 'foobar', but it was not found")
+ }
+ if !foundHelloWorld {
+ return fmt.Errorf("expected to find 'helloWorld', but it was not found")
+ }
+ return nil
+ },
+ expectedErr: nil,
+ },
+ }
+
+ for _, c := range cases {
+ t.Run(c.name, func(t *testing.T) {
+ err := updateClusterIdentityCache(
+ c.remoteRegistry, c.sourceClusters, foobarIdentity,
+ )
+ if !reflect.DeepEqual(err, c.expectedErr) {
+ t.Errorf("got=%v, want=%v", err, c.expectedErr)
+ }
+ if c.expectedErr == nil && c.assertFunc != nil {
+ // validate the configuration got updated
+ err = c.assertFunc()
+ if err != nil {
+ t.Errorf("got=%v, want=nil", err)
+ }
+ }
+ })
+ }
+}
From 4497eec6c56d99798f4b5b3dbdca0778e8218b2d Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:51:54 -0700
Subject: [PATCH 042/235] add file admiral/pkg/clusters/clusters.go
---
admiral/pkg/clusters/clusters.go | 11 +++++++++++
1 file changed, 11 insertions(+)
create mode 100644 admiral/pkg/clusters/clusters.go
diff --git a/admiral/pkg/clusters/clusters.go b/admiral/pkg/clusters/clusters.go
new file mode 100644
index 00000000..1875f325
--- /dev/null
+++ b/admiral/pkg/clusters/clusters.go
@@ -0,0 +1,11 @@
+package clusters
+
+const (
+ ReadWriteEnabled = false
+ ReadOnlyEnabled = true
+ StateNotInitialized = false
+ StateInitialized = true
+ ignoreIdentityChecker = "dynamodbbasedignoreidentitylistchecker"
+ drStateChecker = "dynamodbbasedstatechecker"
+ AdmiralLeaseTableName = "admiral-lease"
+)
From 57901c9cc8839640ba6be12b3607823b3875fb5a Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:51:57 -0700
Subject: [PATCH 043/235] add file admiral/pkg/clusters/dependency_handler.go
---
admiral/pkg/clusters/dependency_handler.go | 235 +++++++++++++++++++++
1 file changed, 235 insertions(+)
create mode 100644 admiral/pkg/clusters/dependency_handler.go
diff --git a/admiral/pkg/clusters/dependency_handler.go b/admiral/pkg/clusters/dependency_handler.go
new file mode 100644
index 00000000..6338e70f
--- /dev/null
+++ b/admiral/pkg/clusters/dependency_handler.go
@@ -0,0 +1,235 @@
+package clusters
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ log "github.com/sirupsen/logrus"
+)
+
+type DestinationServiceProcessor interface {
+ Process(ctx context.Context, dependency *v1.Dependency,
+ remoteRegistry *RemoteRegistry, eventType admiral.EventType,
+ modifySE ModifySEFunc) error
+}
+type ProcessDestinationService struct {
+}
+type DependencyHandler struct {
+ RemoteRegistry *RemoteRegistry
+ DepController *admiral.DependencyController
+ DestinationServiceProcessor DestinationServiceProcessor
+}
+
+func (dh *DependencyHandler) Added(ctx context.Context, obj *v1.Dependency) error {
+ log.Debugf(LogFormat, common.Add, common.DependencyResourceType, obj.Name, "", common.ReceivedStatus)
+ return dh.HandleDependencyRecord(ctx, obj, dh.RemoteRegistry, admiral.Add)
+}
+
+func (dh *DependencyHandler) Updated(ctx context.Context, obj *v1.Dependency) error {
+ log.Debugf(LogFormat, common.Update, common.DependencyResourceType, obj.Name, "", common.ReceivedStatus)
+ // need clean up before handle it as added, I need to handle update that delete the dependency, find diff first
+ // this is more complex cos want to make sure no other service depend on the same service (which we just removed the dependancy).
+ // need to make sure nothing depend on that before cleaning up the SE for that service
+ return dh.HandleDependencyRecord(ctx, obj, dh.RemoteRegistry, admiral.Update)
+}
+
+func (dh *DependencyHandler) Deleted(ctx context.Context, obj *v1.Dependency) error {
+ // special case of update, delete the dependency crd file for one service, need to loop through all ones we plan to update
+ // and make sure nobody else is relying on the same SE in same cluster
+ log.Debugf(LogFormat, common.Delete, common.DependencyResourceType, obj.Name, "", "Skipping Delete operation")
+ return nil
+}
+
+func (dh *DependencyHandler) HandleDependencyRecord(ctx context.Context, obj *v1.Dependency,
+ remoteRegistry *RemoteRegistry, eventType admiral.EventType) error {
+ sourceIdentity := obj.Spec.Source
+ if len(sourceIdentity) == 0 {
+ log.Infof(LogFormat, string(eventType), common.DependencyResourceType, obj.Name, "", "No identity found namespace="+obj.Namespace)
+ return nil
+ }
+
+ err := updateIdentityDependencyCache(sourceIdentity, remoteRegistry.AdmiralCache.IdentityDependencyCache, obj)
+ if err != nil {
+ log.Errorf(LogErrFormat, string(eventType), common.DependencyResourceType, obj.Name, "", "error adding into dependency cache ="+err.Error())
+ return err
+ }
+
+ log.Debugf(LogFormat, string(eventType), common.DependencyResourceType, obj.Name, "", fmt.Sprintf("added destinations to admiral sourceToDestinations cache. destinationsLength=%d", len(obj.Spec.Destinations)))
+
+ var handleDepRecordErrors error
+
+ // Generate SE/DR/VS for all newly added destination services in the source's cluster
+ err = dh.DestinationServiceProcessor.Process(ctx,
+ obj,
+ remoteRegistry,
+ eventType,
+ modifyServiceEntryForNewServiceOrPod)
+ if err != nil {
+ log.Errorf(LogErrFormat, string(eventType),
+ common.DependencyResourceType, obj.Name, "", err.Error())
+ handleDepRecordErrors = common.AppendError(handleDepRecordErrors, err)
+ // This will be re-queued and retried
+ return handleDepRecordErrors
+ }
+
+ remoteRegistry.AdmiralCache.SourceToDestinations.put(obj)
+ return handleDepRecordErrors
+}
+
+func isIdentityMeshEnabled(identity string, remoteRegistry *RemoteRegistry) bool {
+ if remoteRegistry.AdmiralCache.IdentityClusterCache.Get(identity) != nil {
+ return true
+ }
+ return false
+}
+
+func getDestinationsToBeProcessed(
+ updatedDependency *v1.Dependency, remoteRegistry *RemoteRegistry) ([]string, bool) {
+ updatedDestinations := make([]string, 0)
+ existingDestination := remoteRegistry.AdmiralCache.SourceToDestinations.Get(updatedDependency.Spec.Source)
+
+ var nonMeshEnabledExists bool
+ lookup := make(map[string]bool)
+ for _, dest := range existingDestination {
+ lookup[dest] = true
+ }
+
+ for _, destination := range updatedDependency.Spec.Destinations {
+ if !isIdentityMeshEnabled(destination, remoteRegistry) {
+ nonMeshEnabledExists = true
+ }
+ if ok := lookup[destination]; !ok {
+ updatedDestinations = append(updatedDestinations, destination)
+ }
+ }
+ return updatedDestinations, nonMeshEnabledExists
+}
+
+func (d *ProcessDestinationService) Process(ctx context.Context, dependency *v1.Dependency,
+ remoteRegistry *RemoteRegistry, eventType admiral.EventType, modifySE ModifySEFunc) error {
+
+ if IsCacheWarmupTimeForDependency(remoteRegistry) {
+ log.Debugf(LogFormat, string(eventType), common.DependencyResourceType, dependency.Name, "", "processing skipped during cache warm up state")
+ return nil
+ }
+
+ if !common.IsDependencyProcessingEnabled() {
+ log.Infof(LogFormat, string(eventType), common.DependencyResourceType, dependency.Name, "", "dependency processing is disabled")
+ return nil
+ }
+
+ destinations, hasNonMeshDestination := getDestinationsToBeProcessed(dependency, remoteRegistry)
+ log.Infof(LogFormat, string(eventType), common.DependencyResourceType, dependency.Name, "", fmt.Sprintf("found %d new destinations: %v", len(destinations), destinations))
+
+ var processingErrors error
+ var message string
+ counter := 1
+ totalDestinations := len(destinations)
+ // find source cluster for source identity
+ sourceClusters := remoteRegistry.AdmiralCache.IdentityClusterCache.Get(dependency.Spec.Source)
+ if sourceClusters == nil {
+ // Identity cluster cache does not have entry for identity because
+ // the rollout/deployment event hasn't gone through yet.
+ // This can be ignored, and not be added back to the dependency controller queue
+ // because it will be processed by the rollout/deployment controller
+ log.Infof(LogFormat, string(eventType), common.DependencyResourceType, dependency.Name, "", fmt.Sprintf("identity: %s, does not have any clusters. Skipping calling modifySE", dependency.Spec.Source))
+ return nil
+ }
+
+ for _, destinationIdentity := range destinations {
+ if strings.Contains(strings.ToLower(destinationIdentity), strings.ToLower(common.ServicesGatewayIdentity)) &&
+ !hasNonMeshDestination {
+ log.Infof(LogFormat, string(eventType), common.DependencyResourceType, dependency.Name, "",
+ fmt.Sprintf("All destinations are MESH enabled. Skipping processing: %v. Destinations: %v", destinationIdentity, dependency.Spec.Destinations))
+ continue
+ }
+
+ // In case of self on-boarding skip the update for the destination as it is the same as the source
+ if strings.EqualFold(dependency.Spec.Source, destinationIdentity) {
+ log.Infof(LogFormat, string(eventType), common.DependencyResourceType, dependency.Name, "",
+ fmt.Sprintf("Destination identity is same as source identity. Skipping processing: %v.", destinationIdentity))
+ continue
+ }
+
+ destinationClusters := remoteRegistry.AdmiralCache.IdentityClusterCache.Get(destinationIdentity)
+ log.Infof(LogFormat, string(eventType), common.DependencyResourceType, dependency.Name, "", fmt.Sprintf("processing destination %d/%d destinationIdentity=%s", counter, totalDestinations, destinationIdentity))
+ clusters := remoteRegistry.AdmiralCache.IdentityClusterCache.Get(destinationIdentity)
+ if destinationClusters == nil || destinationClusters.Len() == 0 {
+ listOfSourceClusters := strings.Join(sourceClusters.GetKeys(), ",")
+ log.Infof(LogFormat, string(eventType), common.DependencyResourceType, dependency.Name, listOfSourceClusters,
+ fmt.Sprintf("destinationClusters does not have any clusters. Skipping processing: %v.", destinationIdentity))
+ continue
+ }
+ if clusters == nil {
+ // When destination identity's cluster is not found, then
+ // skip calling modify SE because:
+ // 1. The destination identity might be NON MESH. Which means this error will always happen
+ // and there is no point calling modifySE.
+ // 2. It could be that the IdentityClusterCache is not updated.
+ // It is the deployment/rollout controllers responsibility to update the cache
+ // without which the cache will always be empty. Now when deployment/rollout event occurs
+ // that will result in calling modify SE and perform the same operations which this function is trying to do
+ log.Infof(LogFormat, string(eventType), common.DependencyResourceType, dependency.Name, "",
+ fmt.Sprintf("no cluster found for destinationIdentity: %s. Skipping calling modifySE", destinationIdentity))
+ continue
+ }
+
+ for _, destinationClusterID := range clusters.GetKeys() {
+ message = fmt.Sprintf("processing cluster=%s for destinationIdentity=%s", destinationClusterID, destinationIdentity)
+ log.Infof(LogFormat, string(eventType), common.DependencyResourceType, dependency.Name, "", message)
+ rc := remoteRegistry.GetRemoteController(destinationClusterID)
+ if rc == nil {
+ processingErrors = common.AppendError(processingErrors,
+ fmt.Errorf("no remote controller found in cache for cluster %s", destinationClusterID))
+ continue
+ }
+ ctx = context.WithValue(ctx, "clusterName", destinationClusterID)
+
+ if rc.DeploymentController != nil {
+ deploymentEnvMap := rc.DeploymentController.Cache.GetByIdentity(destinationIdentity)
+ if len(deploymentEnvMap) != 0 {
+ ctx = context.WithValue(ctx, "eventResourceType", common.Deployment)
+ ctx = context.WithValue(ctx, common.DependentClusterOverride, sourceClusters)
+ for env := range deploymentEnvMap {
+ message = fmt.Sprintf("calling modifySE for env=%s destinationIdentity=%s", env, destinationIdentity)
+ log.Infof(LogFormat, string(eventType), common.DependencyResourceType, dependency.Name, "", message)
+ _, err := modifySE(ctx, eventType, env, destinationIdentity, remoteRegistry)
+ if err != nil {
+ message = fmt.Sprintf("error occurred in modifySE func for env=%s destinationIdentity=%s", env, destinationIdentity)
+ log.Errorf(LogErrFormat, string(eventType), common.DependencyResourceType, dependency.Name, "", err.Error()+". "+message)
+ processingErrors = common.AppendError(processingErrors, err)
+ }
+ }
+ continue
+ }
+ }
+ if rc.RolloutController != nil {
+ rolloutEnvMap := rc.RolloutController.Cache.GetByIdentity(destinationIdentity)
+ if len(rolloutEnvMap) != 0 {
+ ctx = context.WithValue(ctx, "eventResourceType", common.Rollout)
+ ctx = context.WithValue(ctx, common.DependentClusterOverride, sourceClusters)
+ for env := range rolloutEnvMap {
+ message = fmt.Sprintf("calling modifySE for env=%s destinationIdentity=%s", env, destinationIdentity)
+ log.Infof(LogFormat, string(eventType), common.DependencyResourceType, dependency.Name, "", message)
+ _, err := modifySE(ctx, eventType, env, destinationIdentity, remoteRegistry)
+ if err != nil {
+ message = fmt.Sprintf("error occurred in modifySE func for env=%s destinationIdentity=%s", env, destinationIdentity)
+ log.Errorf(LogErrFormat, string(eventType), common.DependencyResourceType, dependency.Name, "", err.Error()+". "+message)
+ processingErrors = common.AppendError(processingErrors, err)
+ }
+ }
+ continue
+ }
+ }
+ log.Infof(LogFormat, string(eventType), common.DependencyResourceType, dependency.Name, "", fmt.Sprintf("done processing destinationIdentity=%s", destinationIdentity))
+ log.Warnf(LogFormat, string(eventType), common.DependencyResourceType, dependency.Name, "",
+ fmt.Sprintf("neither deployment or rollout controller initialized in cluster %s and destination identity %s", destinationClusterID, destinationIdentity))
+ counter++
+ }
+ }
+ return processingErrors
+}
From f3affd7404cedece064959b94c801a46e14bab30 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:52:00 -0700
Subject: [PATCH 044/235] add file
admiral/pkg/clusters/dependency_handler_test.go
---
.../pkg/clusters/dependency_handler_test.go | 696 ++++++++++++++++++
1 file changed, 696 insertions(+)
create mode 100644 admiral/pkg/clusters/dependency_handler_test.go
diff --git a/admiral/pkg/clusters/dependency_handler_test.go b/admiral/pkg/clusters/dependency_handler_test.go
new file mode 100644
index 00000000..4679115b
--- /dev/null
+++ b/admiral/pkg/clusters/dependency_handler_test.go
@@ -0,0 +1,696 @@
+package clusters
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/model"
+ v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ "github.com/stretchr/testify/assert"
+ "istio.io/api/networking/v1alpha3"
+ k8sAppsV1 "k8s.io/api/apps/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+type MockDestinationServiceProcessor struct {
+ invocation int
+}
+
+func (m *MockDestinationServiceProcessor) Process(ctx context.Context, dependency *v1.Dependency,
+ remoteRegistry *RemoteRegistry, eventType admiral.EventType, modifySE ModifySEFunc) error {
+ m.invocation++
+ return nil
+}
+
+func TestProcessDestinationService(t *testing.T) {
+
+ admiralParams := common.AdmiralParams{
+ CacheReconcileDuration: 10 * time.Minute,
+ LabelSet: &common.LabelSet{
+ EnvKey: "env",
+ },
+ }
+ identityClusterCache := common.NewMapOfMaps()
+ identityClusterCache.Put("foo", "testCluster", "testCluster")
+ identityClusterCache.Put("bar", "testCluster", "testCluster")
+ identityClusterCache.Put("testSource", "testCluster", "testCluster")
+ identityClusterCache.Put("testSource", "testCluster1", "testCluster1")
+
+ identityClusterCacheWithOnlyTestSource := common.NewMapOfMaps()
+ identityClusterCacheWithOnlyTestSource.Put("testSource", "testCluster", "testCluster")
+
+ identityClusterCacheWithServicesGateway := common.NewMapOfMaps()
+ identityClusterCacheWithServicesGateway.Put("foo", "testCluster", "testCluster")
+ identityClusterCacheWithServicesGateway.Put("bar", "testCluster", "testCluster")
+ identityClusterCacheWithServicesGateway.Put("testSource", "testCluster", "testCluster")
+ identityClusterCacheWithServicesGateway.Put(common.ServicesGatewayIdentity, "testCluster", "testCluster")
+ identityClusterCacheWithServicesGateway.Put(strings.ToLower(common.ServicesGatewayIdentity), "testCluster", "testCluster")
+
+ identityClusterCacheWithServicesGatewayAndFoo := common.NewMapOfMaps()
+ identityClusterCacheWithServicesGatewayAndFoo.Put("foo", "testCluster", "testCluster")
+ identityClusterCacheWithServicesGatewayAndFoo.Put("testSource", "testCluster", "testCluster")
+ identityClusterCacheWithServicesGatewayAndFoo.Put(common.ServicesGatewayIdentity, "testCluster", "testCluster")
+
+ deploymentCache := admiral.NewDeploymentCache()
+ deploymentCache.UpdateDeploymentToClusterCache("foo", &k8sAppsV1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{"env": "stage"},
+ },
+ })
+ deploymentCache.UpdateDeploymentToClusterCache("bar", &k8sAppsV1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{"env": "stage"},
+ },
+ })
+
+ rolloutCache := admiral.NewRolloutCache()
+ rolloutCache.UpdateRolloutToClusterCache("foo", &v1alpha1.Rollout{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{"env": "stage"},
+ },
+ })
+ rolloutCache.UpdateRolloutToClusterCache("bar", &v1alpha1.Rollout{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{"env": "stage"},
+ },
+ })
+
+ testCases := []struct {
+ name string
+ dependency *v1.Dependency
+ modifySEFunc ModifySEFunc
+ remoteRegistry *RemoteRegistry
+ isDependencyProcessingEnabled bool
+ expectedError error
+ }{
+ {
+ name: "Given valid params " +
+ "When admiral is in cache warmup state " +
+ "Then the func should just return without processing and no errors",
+ remoteRegistry: &RemoteRegistry{
+ StartTime: time.Now(),
+ },
+ dependency: &v1.Dependency{
+ ObjectMeta: metav1.ObjectMeta{Name: "testDepRec"},
+ },
+ modifySEFunc: func(ctx context.Context, event admiral.EventType, env string,
+ sourceIdentity string, remoteRegistry *RemoteRegistry) (map[string]*v1alpha3.ServiceEntry, error) {
+ return nil, nil
+ },
+ expectedError: nil,
+ },
+ {
+ name: "Given valid params " +
+ "When dependency processing is disabled " +
+ "Then the func should just return without processing and no errors",
+ remoteRegistry: &RemoteRegistry{
+ StartTime: time.Now().Add(-time.Minute * 15),
+ },
+ dependency: &v1.Dependency{
+ ObjectMeta: metav1.ObjectMeta{Name: "testDepRec"},
+ },
+ modifySEFunc: func(ctx context.Context, event admiral.EventType, env string,
+ sourceIdentity string, remoteRegistry *RemoteRegistry) (map[string]*v1alpha3.ServiceEntry, error) {
+ return nil, nil
+ },
+ isDependencyProcessingEnabled: false,
+ expectedError: nil,
+ },
+ {
+ name: "Given valid params " +
+ "When destination identity is not in IdentityClusterCache " +
+ "Then the func should not return an error",
+ remoteRegistry: &RemoteRegistry{
+ StartTime: time.Now().Add(-time.Minute * 30),
+ AdmiralCache: &AdmiralCache{
+ IdentityClusterCache: identityClusterCacheWithOnlyTestSource,
+ SourceToDestinations: &sourceToDestinations{
+ sourceDestinations: map[string][]string{"testSource": {"foo"}},
+ mutex: &sync.Mutex{},
+ },
+ },
+ },
+ dependency: &v1.Dependency{
+ ObjectMeta: metav1.ObjectMeta{Name: "testDepRec"},
+ Spec: model.Dependency{
+ Source: "testSource",
+ Destinations: []string{"foo", "bar"},
+ },
+ },
+ modifySEFunc: func(ctx context.Context, event admiral.EventType, env string,
+ sourceIdentity string, remoteRegistry *RemoteRegistry) (map[string]*v1alpha3.ServiceEntry, error) {
+ return nil, nil
+ },
+ isDependencyProcessingEnabled: true,
+ expectedError: nil,
+ },
+ {
+ name: "Given valid params " +
+ "When destination identity's cluster is not in remote controller cache " +
+ "Then the func should NOT return an error",
+ remoteRegistry: &RemoteRegistry{
+ StartTime: time.Now().Add(-time.Minute * 30),
+ AdmiralCache: &AdmiralCache{
+ IdentityClusterCache: identityClusterCache,
+ SourceToDestinations: &sourceToDestinations{
+ sourceDestinations: map[string][]string{"testSource": {"foo", "bar"}},
+ mutex: &sync.Mutex{},
+ },
+ },
+ remoteControllers: map[string]*RemoteController{},
+ },
+ dependency: &v1.Dependency{
+ ObjectMeta: metav1.ObjectMeta{Name: "testDepRec"},
+ Spec: model.Dependency{
+ Source: "testSource",
+ Destinations: []string{"foo"},
+ },
+ },
+ modifySEFunc: func(ctx context.Context, event admiral.EventType, env string,
+ sourceIdentity string, remoteRegistry *RemoteRegistry) (map[string]*v1alpha3.ServiceEntry, error) {
+ return nil, nil
+ },
+ isDependencyProcessingEnabled: true,
+ expectedError: nil,
+ },
+ {
+ name: "Given valid params " +
+ "When destination identity is in the deployment controller cache " +
+ "And the modifySE func returns an error " +
+ "Then the func should return an error",
+ remoteRegistry: &RemoteRegistry{
+ StartTime: time.Now().Add(-time.Minute * 30),
+ AdmiralCache: &AdmiralCache{
+ IdentityClusterCache: identityClusterCache,
+ SourceToDestinations: &sourceToDestinations{
+ sourceDestinations: map[string][]string{"testSource": {"foo"}},
+ mutex: &sync.Mutex{},
+ },
+ },
+ remoteControllers: map[string]*RemoteController{
+ "testCluster": {
+ DeploymentController: &admiral.DeploymentController{
+ Cache: deploymentCache,
+ },
+ },
+ },
+ },
+ dependency: &v1.Dependency{
+ ObjectMeta: metav1.ObjectMeta{Name: "testDepRec"},
+ Spec: model.Dependency{
+ Source: "testSource",
+ Destinations: []string{"foo", "bar"},
+ },
+ },
+ modifySEFunc: func(ctx context.Context, event admiral.EventType, env string,
+ sourceIdentity string, remoteRegistry *RemoteRegistry) (map[string]*v1alpha3.ServiceEntry, error) {
+ return nil, fmt.Errorf("error occurred while processing the deployment")
+ },
+ isDependencyProcessingEnabled: true,
+ expectedError: fmt.Errorf("error occurred while processing the deployment"),
+ },
+ {
+ name: "Given valid params " +
+ "When destination identity is in the rollout controller cache " +
+ "And the modifySE func returns an error " +
+ "Then the func should return an error",
+ remoteRegistry: &RemoteRegistry{
+ StartTime: time.Now().Add(-time.Minute * 30),
+ AdmiralCache: &AdmiralCache{
+ IdentityClusterCache: identityClusterCache,
+ SourceToDestinations: &sourceToDestinations{
+ sourceDestinations: map[string][]string{"testSource": {"foo"}},
+ mutex: &sync.Mutex{},
+ },
+ },
+ remoteControllers: map[string]*RemoteController{
+ "testCluster": {
+ DeploymentController: &admiral.DeploymentController{
+ Cache: admiral.NewDeploymentCache(),
+ },
+ RolloutController: &admiral.RolloutController{
+ Cache: rolloutCache,
+ },
+ },
+ },
+ },
+ dependency: &v1.Dependency{
+ ObjectMeta: metav1.ObjectMeta{Name: "testDepRec"},
+ Spec: model.Dependency{
+ Source: "testSource",
+ Destinations: []string{"foo", "bar"},
+ },
+ },
+ modifySEFunc: func(ctx context.Context, event admiral.EventType, env string,
+ sourceIdentity string, remoteRegistry *RemoteRegistry) (map[string]*v1alpha3.ServiceEntry, error) {
+ return nil, fmt.Errorf("error occurred while processing the rollout")
+ },
+ isDependencyProcessingEnabled: true,
+ expectedError: fmt.Errorf("error occurred while processing the rollout"),
+ },
+ {
+ name: "Given valid params " +
+ "When destination identity is in the rollout controller cache " +
+ "And the modifySE func returns successfully " +
+ "Then the func should not return an error",
+ remoteRegistry: &RemoteRegistry{
+ StartTime: time.Now().Add(-time.Minute * 30),
+ AdmiralCache: &AdmiralCache{
+ IdentityClusterCache: identityClusterCache,
+ SourceToDestinations: &sourceToDestinations{
+ sourceDestinations: map[string][]string{"testSource": {"foo", "bar"}},
+ mutex: &sync.Mutex{},
+ },
+ },
+ remoteControllers: map[string]*RemoteController{
+ "testCluster": {
+ DeploymentController: &admiral.DeploymentController{
+ Cache: admiral.NewDeploymentCache(),
+ },
+ RolloutController: &admiral.RolloutController{
+ Cache: rolloutCache,
+ },
+ },
+ },
+ },
+ dependency: &v1.Dependency{
+ ObjectMeta: metav1.ObjectMeta{Name: "testSource"},
+ Spec: model.Dependency{
+ Source: "testSource",
+ Destinations: []string{"foo"},
+ },
+ },
+ modifySEFunc: func(ctx context.Context, event admiral.EventType, env string,
+ sourceIdentity string, remoteRegistry *RemoteRegistry) (map[string]*v1alpha3.ServiceEntry, error) {
+ return nil, nil
+ },
+ isDependencyProcessingEnabled: true,
+ expectedError: nil,
+ },
+ {
+ name: "Given valid params " +
+ "When destination identity is in the deployment controller cache " +
+ "And the modifySE func returns successfully " +
+ "Then the func should not return an error",
+ remoteRegistry: &RemoteRegistry{
+ StartTime: time.Now().Add(-time.Minute * 30),
+ AdmiralCache: &AdmiralCache{
+ IdentityClusterCache: identityClusterCache,
+ SourceToDestinations: &sourceToDestinations{
+ sourceDestinations: map[string][]string{"testSource": {"foo"}},
+ mutex: &sync.Mutex{},
+ },
+ },
+ remoteControllers: map[string]*RemoteController{
+ "testCluster": {
+ DeploymentController: &admiral.DeploymentController{
+ Cache: deploymentCache,
+ },
+ },
+ },
+ },
+ dependency: &v1.Dependency{
+ ObjectMeta: metav1.ObjectMeta{Name: "testSource"},
+ Spec: model.Dependency{
+ Source: "testSource",
+ Destinations: []string{"foo", "bar"},
+ },
+ },
+ modifySEFunc: func(ctx context.Context, event admiral.EventType, env string,
+ sourceIdentity string, remoteRegistry *RemoteRegistry) (map[string]*v1alpha3.ServiceEntry, error) {
+ return nil, nil
+ },
+ isDependencyProcessingEnabled: true,
+ expectedError: nil,
+ },
+ {
+ name: "Given valid params " +
+ "When a new destination is in the dependency record " +
+ "Then the func should process only the new destination and not return an error",
+ remoteRegistry: &RemoteRegistry{
+ StartTime: time.Now().Add(-time.Minute * 30),
+ AdmiralCache: &AdmiralCache{
+ SourceToDestinations: &sourceToDestinations{
+ sourceDestinations: map[string][]string{"testSource": {"foo"}},
+ mutex: &sync.Mutex{},
+ },
+ IdentityClusterCache: identityClusterCache,
+ },
+ remoteControllers: map[string]*RemoteController{
+ "testCluster": {
+ DeploymentController: &admiral.DeploymentController{
+ Cache: deploymentCache,
+ },
+ },
+ },
+ },
+ dependency: &v1.Dependency{
+ ObjectMeta: metav1.ObjectMeta{Name: "testDepRec"},
+ Spec: model.Dependency{
+ Source: "testSource",
+ Destinations: []string{"foo", "bar"},
+ },
+ },
+ modifySEFunc: func(ctx context.Context, event admiral.EventType, env string,
+ sourceIdentity string, remoteRegistry *RemoteRegistry) (map[string]*v1alpha3.ServiceEntry, error) {
+ return nil, nil
+ },
+ isDependencyProcessingEnabled: true,
+ expectedError: nil,
+ },
+ {
+ name: "Given valid params " +
+ "When a new dependency record event is received, " +
+ "When all destinations are MESH enabled, " +
+ "Then, modifySE is not called for " + common.ServicesGatewayIdentity + " identity",
+ remoteRegistry: &RemoteRegistry{
+ StartTime: time.Now().Add(-time.Minute * 30),
+ AdmiralCache: &AdmiralCache{
+ SourceToDestinations: &sourceToDestinations{
+ sourceDestinations: map[string][]string{},
+ mutex: &sync.Mutex{},
+ },
+ IdentityClusterCache: identityClusterCacheWithServicesGateway,
+ },
+ remoteControllers: map[string]*RemoteController{
+ "testCluster": {
+ DeploymentController: &admiral.DeploymentController{
+ Cache: deploymentCache,
+ },
+ },
+ },
+ },
+ dependency: &v1.Dependency{
+ ObjectMeta: metav1.ObjectMeta{Name: "testDepRec"},
+ Spec: model.Dependency{
+ Source: "testSource",
+ Destinations: []string{"foo", "bar", common.ServicesGatewayIdentity},
+ },
+ },
+ modifySEFunc: func(ctx context.Context, event admiral.EventType, env string,
+ sourceIdentity string, remoteRegistry *RemoteRegistry) (map[string]*v1alpha3.ServiceEntry, error) {
+ if sourceIdentity == common.ServicesGatewayIdentity {
+ return nil, fmt.Errorf("did not expect to be called for %s", common.ServicesGatewayIdentity)
+ }
+ return nil, nil
+ },
+ isDependencyProcessingEnabled: true,
+ expectedError: nil,
+ },
+ {
+ name: "Given valid params " +
+ "When a new dependency record event is received, " +
+ "When all destinations are MESH enabled, " +
+ "When " + common.ServicesGatewayIdentity + " is in lower case in the dependency record, " +
+ "Then, modifySE is not called for " + strings.ToLower(common.ServicesGatewayIdentity) + " identity",
+ remoteRegistry: &RemoteRegistry{
+ StartTime: time.Now().Add(-time.Minute * 30),
+ AdmiralCache: &AdmiralCache{
+ SourceToDestinations: &sourceToDestinations{
+ sourceDestinations: map[string][]string{},
+ mutex: &sync.Mutex{},
+ },
+ IdentityClusterCache: identityClusterCacheWithServicesGateway,
+ },
+ remoteControllers: map[string]*RemoteController{
+ "testCluster": {
+ DeploymentController: &admiral.DeploymentController{
+ Cache: deploymentCache,
+ },
+ },
+ },
+ },
+ dependency: &v1.Dependency{
+ ObjectMeta: metav1.ObjectMeta{Name: "testDepRec"},
+ Spec: model.Dependency{
+ Source: "testSource",
+ Destinations: []string{"foo", "bar", strings.ToLower(common.ServicesGatewayIdentity)},
+ },
+ },
+ modifySEFunc: func(ctx context.Context, event admiral.EventType, env string,
+ sourceIdentity string, remoteRegistry *RemoteRegistry) (map[string]*v1alpha3.ServiceEntry, error) {
+ if strings.EqualFold(sourceIdentity, common.ServicesGatewayIdentity) {
+ return nil, fmt.Errorf("did not expect to be called for %s", common.ServicesGatewayIdentity)
+ }
+ return nil, nil
+ },
+ isDependencyProcessingEnabled: true,
+ expectedError: nil,
+ },
+ {
+ name: "Given valid params " +
+ "When a new dependency record event is received, " +
+ "When one destination is NOT MESH enabled, " +
+ "Then, modifySE is called for " + common.ServicesGatewayIdentity + " identity",
+ remoteRegistry: &RemoteRegistry{
+ StartTime: time.Now().Add(-time.Minute * 30),
+ AdmiralCache: &AdmiralCache{
+ SourceToDestinations: &sourceToDestinations{
+ sourceDestinations: map[string][]string{"testSource": {"foo", "bar"}},
+ mutex: &sync.Mutex{},
+ },
+ IdentityClusterCache: identityClusterCacheWithServicesGatewayAndFoo,
+ },
+ remoteControllers: map[string]*RemoteController{
+ "testCluster": {
+ DeploymentController: &admiral.DeploymentController{
+ Cache: deploymentCache,
+ },
+ },
+ },
+ },
+ dependency: &v1.Dependency{
+ ObjectMeta: metav1.ObjectMeta{Name: "testDepRec"},
+ Spec: model.Dependency{
+ Source: "testSource",
+ Destinations: []string{"foo", "bar", common.ServicesGatewayIdentity},
+ },
+ },
+ modifySEFunc: func(ctx context.Context, event admiral.EventType, env string,
+ sourceIdentity string, remoteRegistry *RemoteRegistry) (map[string]*v1alpha3.ServiceEntry, error) {
+ if sourceIdentity == common.ServicesGatewayIdentity {
+ return nil, nil
+ }
+ return nil, fmt.Errorf("was not called for %s", common.ServicesGatewayIdentity)
+ },
+ isDependencyProcessingEnabled: true,
+ expectedError: nil,
+ },
+ {
+ name: "Given valid params " +
+ "When a new dependency record event is received, " +
+ "When dependency source does not have a cluster in cache, " +
+ "Then, do not return an error",
+ remoteRegistry: &RemoteRegistry{
+ StartTime: time.Now().Add(-time.Minute * 30),
+ AdmiralCache: &AdmiralCache{
+ SourceToDestinations: &sourceToDestinations{
+ sourceDestinations: map[string][]string{},
+ mutex: &sync.Mutex{},
+ },
+ IdentityClusterCache: identityClusterCache,
+ },
+ remoteControllers: map[string]*RemoteController{
+ "testCluster": {
+ DeploymentController: &admiral.DeploymentController{
+ Cache: deploymentCache,
+ },
+ },
+ },
+ },
+ dependency: &v1.Dependency{
+ ObjectMeta: metav1.ObjectMeta{Name: "testDepRec"},
+ Spec: model.Dependency{
+ Source: "testSource",
+ Destinations: []string{"foo", "bar", common.ServicesGatewayIdentity},
+ },
+ },
+ modifySEFunc: func(ctx context.Context, event admiral.EventType, env string,
+ sourceIdentity string, remoteRegistry *RemoteRegistry) (map[string]*v1alpha3.ServiceEntry, error) {
+ return nil, nil
+ },
+ isDependencyProcessingEnabled: true,
+ expectedError: nil,
+ },
+ {
+ name: "Given valid params " +
+ "When a new dependency record event is received, " +
+ "When source does not have a cluster in cache, " +
+ "Then, do not return an error, " +
+ "And do not call modifySE",
+ remoteRegistry: &RemoteRegistry{
+ StartTime: time.Now().Add(-time.Minute * 30),
+ AdmiralCache: &AdmiralCache{
+ SourceToDestinations: &sourceToDestinations{
+ sourceDestinations: map[string][]string{},
+ mutex: &sync.Mutex{},
+ },
+ IdentityClusterCache: common.NewMapOfMaps(),
+ },
+ remoteControllers: map[string]*RemoteController{
+ "testCluster": {
+ DeploymentController: &admiral.DeploymentController{
+ Cache: deploymentCache,
+ },
+ },
+ },
+ },
+ dependency: &v1.Dependency{
+ ObjectMeta: metav1.ObjectMeta{Name: "testDepRec"},
+ Spec: model.Dependency{
+ Source: "testSource",
+ Destinations: []string{"foo", "bar", common.ServicesGatewayIdentity},
+ },
+ },
+ modifySEFunc: func(ctx context.Context, event admiral.EventType, env string,
+ sourceIdentity string, remoteRegistry *RemoteRegistry) (map[string]*v1alpha3.ServiceEntry, error) {
+ return nil, fmt.Errorf("this should not be called")
+ },
+ isDependencyProcessingEnabled: true,
+ expectedError: nil,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ common.ResetSync()
+ admiralParams.EnableDependencyProcessing = tc.isDependencyProcessingEnabled
+ common.InitializeConfig(admiralParams)
+
+ processDestinationService := &ProcessDestinationService{}
+
+ actualErr := processDestinationService.Process(context.TODO(), tc.dependency, tc.remoteRegistry, admiral.Add, tc.modifySEFunc)
+
+ if tc.expectedError != nil {
+ assert.NotNil(t, actualErr)
+ assert.Equal(t, tc.expectedError.Error(), actualErr.Error())
+ } else {
+ assert.Nil(t, actualErr)
+ }
+ })
+ }
+
+}
+
+func TestGetDestinationDiff(t *testing.T) {
+ var (
+ identityClusterCacheWithOnlyFoo = common.NewMapOfMaps()
+ identityClusterCacheWithAllMeshEnabled = common.NewMapOfMaps()
+ )
+ identityClusterCacheWithOnlyFoo.Put("foo", "cluster1", "cluster1")
+ identityClusterCacheWithAllMeshEnabled.Put("foo", "cluster1", "cluster1")
+ identityClusterCacheWithAllMeshEnabled.Put("bar", "cluster1", "cluster1")
+ testCases := []struct {
+ name string
+ remoteRegistry *RemoteRegistry
+ dependency *v1.Dependency
+ expectedDestinations []string
+ expectedIsNonMeshEnabled bool
+ }{
+ {
+ name: "Given valid params " +
+ "When the cache is empty" +
+ "Then the func should return all the destinations as is",
+ remoteRegistry: &RemoteRegistry{
+ AdmiralCache: &AdmiralCache{
+ SourceToDestinations: &sourceToDestinations{
+ sourceDestinations: map[string][]string{},
+ mutex: &sync.Mutex{},
+ },
+ IdentityClusterCache: identityClusterCacheWithAllMeshEnabled,
+ },
+ },
+ dependency: &v1.Dependency{
+ ObjectMeta: metav1.ObjectMeta{Name: "testDepRec"},
+ Spec: model.Dependency{
+ Source: "testSource",
+ Destinations: []string{"foo", "bar"},
+ },
+ },
+ expectedDestinations: []string{"foo", "bar"},
+ },
+ {
+ name: "Given valid params" +
+ "When all the destinations are already in the cache" +
+ "Then the func should return an empty list",
+ remoteRegistry: &RemoteRegistry{
+ AdmiralCache: &AdmiralCache{
+ SourceToDestinations: &sourceToDestinations{
+ sourceDestinations: map[string][]string{"testSource": {"foo", "bar"}},
+ mutex: &sync.Mutex{},
+ },
+ IdentityClusterCache: identityClusterCacheWithAllMeshEnabled,
+ },
+ },
+ dependency: &v1.Dependency{
+ ObjectMeta: metav1.ObjectMeta{Name: "testDepRec"},
+ Spec: model.Dependency{
+ Source: "testSource",
+ Destinations: []string{"foo", "bar"},
+ },
+ },
+ expectedDestinations: []string{},
+ },
+ {
+ name: "Given valid params" +
+ "When there is an additional destination that is not in the cache" +
+ "Then the func should return only the one that is missing in the cache",
+ remoteRegistry: &RemoteRegistry{
+ AdmiralCache: &AdmiralCache{
+ SourceToDestinations: &sourceToDestinations{
+ sourceDestinations: map[string][]string{"testSource": {"foo"}},
+ mutex: &sync.Mutex{},
+ },
+ IdentityClusterCache: identityClusterCacheWithAllMeshEnabled,
+ },
+ },
+ dependency: &v1.Dependency{
+ ObjectMeta: metav1.ObjectMeta{Name: "testDepRec"},
+ Spec: model.Dependency{
+ Source: "testSource",
+ Destinations: []string{"foo", "bar"},
+ },
+ },
+ expectedDestinations: []string{"bar"},
+ },
+ {
+ name: "Given valid params" +
+ "When there is a NON mesh enabled service" +
+ "Then the function should return new services, and true",
+ remoteRegistry: &RemoteRegistry{
+ AdmiralCache: &AdmiralCache{
+ SourceToDestinations: &sourceToDestinations{
+ sourceDestinations: map[string][]string{"testSource": {"foo"}},
+ mutex: &sync.Mutex{},
+ },
+ IdentityClusterCache: identityClusterCacheWithOnlyFoo,
+ },
+ },
+ dependency: &v1.Dependency{
+ ObjectMeta: metav1.ObjectMeta{Name: "testDepRec"},
+ Spec: model.Dependency{
+ Source: "testSource",
+ Destinations: []string{"foo", "bar"},
+ },
+ },
+ expectedDestinations: []string{"bar"},
+ expectedIsNonMeshEnabled: true,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ actualDestinations, nonMeshEnabledExists := getDestinationsToBeProcessed(tc.dependency, tc.remoteRegistry)
+ assert.Equal(t, tc.expectedDestinations, actualDestinations)
+ assert.Equal(t, tc.expectedIsNonMeshEnabled, nonMeshEnabledExists)
+ })
+ }
+
+}
From d5b730403727b9df20c3e0ec9f88b459e1abd126 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:52:03 -0700
Subject: [PATCH 045/235] add file admiral/pkg/clusters/deployment_handler.go
---
admiral/pkg/clusters/deployment_handler.go | 77 ++++++++++++++++++++++
1 file changed, 77 insertions(+)
create mode 100644 admiral/pkg/clusters/deployment_handler.go
diff --git a/admiral/pkg/clusters/deployment_handler.go b/admiral/pkg/clusters/deployment_handler.go
new file mode 100644
index 00000000..97014fb3
--- /dev/null
+++ b/admiral/pkg/clusters/deployment_handler.go
@@ -0,0 +1,77 @@
+package clusters
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ log "github.com/sirupsen/logrus"
+ k8sAppsV1 "k8s.io/api/apps/v1"
+)
+
+type DeploymentHandler struct {
+ RemoteRegistry *RemoteRegistry
+ ClusterID string
+}
+
+func (pc *DeploymentHandler) Added(ctx context.Context, obj *k8sAppsV1.Deployment) error {
+ err := HandleEventForDeployment(ctx, admiral.Add, obj, pc.RemoteRegistry, pc.ClusterID)
+ if err != nil {
+ return fmt.Errorf(LogErrFormat, common.Add, common.DeploymentResourceType, obj.Name, pc.ClusterID, err)
+ }
+ return nil
+}
+
+func (pc *DeploymentHandler) Deleted(ctx context.Context, obj *k8sAppsV1.Deployment) error {
+ err := HandleEventForDeployment(ctx, admiral.Delete, obj, pc.RemoteRegistry, pc.ClusterID)
+ if err != nil {
+ return fmt.Errorf(LogErrFormat, common.Delete, common.DeploymentResourceType, obj.Name, pc.ClusterID, err)
+ }
+ return nil
+}
+
+// HandleEventForDeploymentFunc is a handler function for deployment events
+type HandleEventForDeploymentFunc func(
+ ctx context.Context, event admiral.EventType, obj *k8sAppsV1.Deployment,
+ remoteRegistry *RemoteRegistry, clusterName string) error
+
+// helper function to handle add and delete for DeploymentHandler
+func HandleEventForDeployment(ctx context.Context, event admiral.EventType, obj *k8sAppsV1.Deployment,
+ remoteRegistry *RemoteRegistry, clusterName string) error {
+
+ log.Infof(LogFormat, event, common.DeploymentResourceType, obj.Name, clusterName, common.ReceivedStatus)
+ globalIdentifier := common.GetDeploymentGlobalIdentifier(obj)
+ log.Infof(LogFormat, event, common.DeploymentResourceType, obj.Name, clusterName, "globalIdentifier is "+globalIdentifier)
+ originalIdentifier := common.GetDeploymentOriginalIdentifier(obj)
+ log.Infof(LogFormat, event, common.DeploymentResourceType, obj.Name, clusterName, "originalIdentifier is "+originalIdentifier)
+
+ if len(globalIdentifier) == 0 {
+ log.Infof(LogFormat, event, common.DeploymentResourceType, obj.Name, clusterName, "Skipped as '"+common.GetWorkloadIdentifier()+" was not found', namespace="+obj.Namespace)
+ return nil
+ }
+
+ env := common.GetEnv(obj)
+
+ ctx = context.WithValue(ctx, common.ClusterName, clusterName)
+ ctx = context.WithValue(ctx, common.EventResourceType, common.Deployment)
+
+ if remoteRegistry.AdmiralCache != nil {
+ if remoteRegistry.AdmiralCache.IdentityClusterCache != nil {
+ remoteRegistry.AdmiralCache.IdentityClusterCache.Put(globalIdentifier, clusterName, clusterName)
+ }
+ if common.EnableSWAwareNSCaches() {
+ if remoteRegistry.AdmiralCache.IdentityClusterNamespaceCache != nil {
+ remoteRegistry.AdmiralCache.IdentityClusterNamespaceCache.Put(globalIdentifier, clusterName, obj.Namespace, obj.Namespace)
+ }
+ if remoteRegistry.AdmiralCache.PartitionIdentityCache != nil && len(common.GetDeploymentIdentityPartition(obj)) > 0 {
+ remoteRegistry.AdmiralCache.PartitionIdentityCache.Put(globalIdentifier, originalIdentifier)
+ log.Infof(LogFormat, event, common.DeploymentResourceType, obj.Name, clusterName, "PartitionIdentityCachePut "+globalIdentifier+" for "+originalIdentifier)
+ }
+ }
+ }
+
+ // Use the same function as added deployment function to update and put new service entry in place to replace old one
+ _, err := modifyServiceEntryForNewServiceOrPod(ctx, event, env, globalIdentifier, remoteRegistry)
+ return err
+}
From 1ecddca4ae3193d0fe746a926bee7006ba70ba34 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:52:06 -0700
Subject: [PATCH 046/235] add file
admiral/pkg/clusters/deployment_handler_test.go
---
.../pkg/clusters/deployment_handler_test.go | 238 ++++++++++++++++++
1 file changed, 238 insertions(+)
create mode 100644 admiral/pkg/clusters/deployment_handler_test.go
diff --git a/admiral/pkg/clusters/deployment_handler_test.go b/admiral/pkg/clusters/deployment_handler_test.go
new file mode 100644
index 00000000..3172ee31
--- /dev/null
+++ b/admiral/pkg/clusters/deployment_handler_test.go
@@ -0,0 +1,238 @@
+package clusters
+
+import (
+ "context"
+ "sync"
+ "testing"
+ "time"
+
+ v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ admiralFake "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned/fake"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+
+ appsV1 "k8s.io/api/apps/v1"
+ coreV1 "k8s.io/api/core/v1"
+ metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+var deploymentHandlerTestSingleton sync.Once
+
+func admiralParamsForDeploymentHandlerTests() common.AdmiralParams {
+ return common.AdmiralParams{
+ KubeconfigPath: "testdata/fake.config",
+ LabelSet: &common.LabelSet{
+ WorkloadIdentityKey: "identity",
+ EnvKey: "admiral.io/env",
+ AdmiralCRDIdentityLabel: "identity",
+ PriorityKey: "priority",
+ IdentityPartitionKey: "admiral.io/identityPartition",
+ },
+ EnableSAN: true,
+ SANPrefix: "prefix",
+ HostnameSuffix: "mesh",
+ SyncNamespace: "ns",
+ CacheReconcileDuration: time.Minute,
+ ClusterRegistriesNamespace: "default",
+ DependenciesNamespace: "default",
+ EnableRoutingPolicy: true,
+ EnvoyFilterVersion: "1.13",
+ Profile: common.AdmiralProfileDefault,
+ EnableSWAwareNSCaches: true,
+ ExportToIdentityList: []string{"*"},
+ ExportToMaxNamespaces: 35,
+ }
+}
+
+func setupForDeploymentHandlerTests() {
+ deploymentHandlerTestSingleton.Do(func() {
+ common.ResetSync()
+ common.InitializeConfig(admiralParamsForDeploymentHandlerTests())
+ })
+}
+
+func TestDeploymentHandlerPartitionCache(t *testing.T) {
+ setupForDeploymentHandlerTests()
+ admiralParams := admiralParamsForDeploymentHandlerTests()
+ ctx := context.Background()
+ remoteRegistry, _ := InitAdmiral(ctx, admiralParams)
+ remoteRegistry.AdmiralCache.PartitionIdentityCache = common.NewMap()
+ partitionIdentifier := "admiral.io/identityPartition"
+ clusterName := "test-k8s"
+
+ testCases := []struct {
+ name string
+ deployment appsV1.Deployment
+ expected string
+ }{
+ {
+ name: "Given the deployment has the partition label, " +
+ "Then the PartitionIdentityCache should contain an entry for that deployment",
+ deployment: appsV1.Deployment{Spec: appsV1.DeploymentSpec{Template: coreV1.PodTemplateSpec{ObjectMeta: metaV1.ObjectMeta{Labels: map[string]string{partitionIdentifier: "sw1", "env": "stage", "identity": "services.gateway"}}}}},
+ expected: "services.gateway",
+ },
+ {
+ name: "Given the deployment has the partition annotation, " +
+ "Then the PartitionIdentityCache should contain an entry for that deployment",
+ deployment: appsV1.Deployment{Spec: appsV1.DeploymentSpec{Template: coreV1.PodTemplateSpec{ObjectMeta: metaV1.ObjectMeta{Annotations: map[string]string{partitionIdentifier: "sw1", "env": "stage", "identity": "services.gateway"}}}}},
+ expected: "services.gateway",
+ },
+ {
+ name: "Given the deployment doesn't have the partition label or annotation, " +
+ "Then the PartitionIdentityCache should not contain an entry for that deployment",
+ deployment: appsV1.Deployment{Spec: appsV1.DeploymentSpec{Template: coreV1.PodTemplateSpec{ObjectMeta: metaV1.ObjectMeta{Labels: map[string]string{"identity": "services.gateway"}, Annotations: map[string]string{}}}}},
+ expected: "",
+ },
+ }
+
+ for _, c := range testCases {
+ t.Run(c.name, func(t *testing.T) {
+ _ = HandleEventForDeployment(ctx, admiral.Add, &c.deployment, remoteRegistry, clusterName)
+ iVal := ""
+ if len(c.expected) > 0 {
+ globalIdentifier := common.GetDeploymentGlobalIdentifier(&c.deployment)
+ iVal = remoteRegistry.AdmiralCache.PartitionIdentityCache.Get(globalIdentifier)
+ }
+ if !(iVal == c.expected) {
+ t.Errorf("Expected cache to contain: %s, got: %s", c.expected, iVal)
+ }
+ })
+ }
+}
+
+func TestDeploymentHandler(t *testing.T) {
+ setupForDeploymentHandlerTests()
+ ctx := context.Background()
+
+ p := common.AdmiralParams{
+ KubeconfigPath: "testdata/fake.config",
+ }
+
+ registry, _ := InitAdmiral(context.Background(), p)
+
+ handler := DeploymentHandler{}
+
+ gtpCache := &globalTrafficCache{}
+ gtpCache.identityCache = make(map[string]*v1.GlobalTrafficPolicy)
+ gtpCache.mutex = &sync.Mutex{}
+
+ fakeCrdClient := admiralFake.NewSimpleClientset()
+
+ gtpController := &admiral.GlobalTrafficController{CrdClient: fakeCrdClient}
+ remoteController, _ := createMockRemoteController(func(i interface{}) {
+
+ })
+ remoteController.GlobalTraffic = gtpController
+
+ registry.remoteControllers = map[string]*RemoteController{"cluster-1": remoteController}
+
+ registry.AdmiralCache.GlobalTrafficCache = gtpCache
+ handler.RemoteRegistry = registry
+ handler.ClusterID = "cluster-1"
+
+ deployment := appsV1.Deployment{
+ ObjectMeta: metaV1.ObjectMeta{
+ Name: "test",
+ Namespace: "namespace",
+ Labels: map[string]string{"identity": "app1"},
+ },
+ Spec: appsV1.DeploymentSpec{
+ Selector: &metaV1.LabelSelector{
+ MatchLabels: map[string]string{"identity": "bar"},
+ },
+ Template: coreV1.PodTemplateSpec{
+ ObjectMeta: metaV1.ObjectMeta{
+ Labels: map[string]string{"identity": "bar", "istio-injected": "true", "env": "dev"},
+ },
+ },
+ },
+ }
+
+ //Struct of test case info. Name is required.
+ testCases := []struct {
+ name string
+ addedDeployment *appsV1.Deployment
+ expectedDeploymentCacheKey string
+ expectedIdentityCacheValue *v1.GlobalTrafficPolicy
+ expectedDeploymentCacheValue *appsV1.Deployment
+ }{
+ {
+ name: "Shouldn't throw errors when called",
+ addedDeployment: &deployment,
+ expectedDeploymentCacheKey: "myGTP1",
+ expectedIdentityCacheValue: nil,
+ expectedDeploymentCacheValue: nil,
+ },
+ }
+
+ //Rather annoying, but wasn't able to get the autogenerated fake k8s client for GTP objects to allow me to list resources, so this test is only for not throwing errors. I'll be testing the rest of the fucntionality picemeal.
+ //Side note, if anyone knows how to fix `level=error msg="Failed to list deployments in cluster, error: no kind \"GlobalTrafficPolicyList\" is registered for version \"admiral.io/v1\" in scheme \"pkg/runtime/scheme.go:101\""`, I'd love to hear it!
+ //Already tried working through this: https://github.com/camilamacedo86/operator-sdk/blob/e40d7db97f0d132333b1e46ddf7b7f3cab1e379f/doc/user/unit-testing.md with no luck
+
+ //Run the test for every provided case
+ for _, c := range testCases {
+ t.Run(c.name, func(t *testing.T) {
+ gtpCache = &globalTrafficCache{}
+ gtpCache.identityCache = make(map[string]*v1.GlobalTrafficPolicy)
+ gtpCache.mutex = &sync.Mutex{}
+ handler.RemoteRegistry.AdmiralCache.GlobalTrafficCache = gtpCache
+
+ handler.Added(ctx, &deployment)
+ ns := handler.RemoteRegistry.AdmiralCache.IdentityClusterNamespaceCache.Get("bar").Get("cluster-1").GetKeys()[0]
+ if ns != "namespace" {
+ t.Errorf("expected namespace: %v but got %v", "namespace", ns)
+ }
+ handler.Deleted(ctx, &deployment)
+ })
+ }
+}
+
+type fakeHandleEventForDeployment struct {
+ handleEventForDeploymentFunc func() HandleEventForDeploymentFunc
+ calledDeploymentByNamespace map[string]map[string]bool
+}
+
+func (f *fakeHandleEventForDeployment) CalledDeploymentForNamespace(name, namespace string) bool {
+ if f.calledDeploymentByNamespace[namespace] != nil {
+ return f.calledDeploymentByNamespace[namespace][name]
+ }
+ return false
+}
+
+func newFakeHandleEventForDeploymentsByError(errByDeployment map[string]map[string]error) *fakeHandleEventForDeployment {
+ f := &fakeHandleEventForDeployment{
+ calledDeploymentByNamespace: make(map[string]map[string]bool, 0),
+ }
+ f.handleEventForDeploymentFunc = func() HandleEventForDeploymentFunc {
+ return func(
+ ctx context.Context,
+ event admiral.EventType,
+ deployment *appsV1.Deployment,
+ remoteRegistry *RemoteRegistry,
+ clusterName string) error {
+ if f.calledDeploymentByNamespace[deployment.Namespace] == nil {
+ f.calledDeploymentByNamespace[deployment.Namespace] = map[string]bool{
+ deployment.Name: true,
+ }
+ } else {
+ f.calledDeploymentByNamespace[deployment.Namespace][deployment.Name] = true
+ }
+ return errByDeployment[deployment.Namespace][deployment.Name]
+ }
+ }
+ return f
+}
+
+func newFakeDeployment(name, namespace string, matchLabels map[string]string) *appsV1.Deployment {
+ return &appsV1.Deployment{
+ ObjectMeta: metaV1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ },
+ Spec: appsV1.DeploymentSpec{
+ Selector: &metaV1.LabelSelector{
+ MatchLabels: matchLabels,
+ },
+ },
+ }
+}
From 9eea075aa671a3221c333167b707b2f4e27f7eb1 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:52:09 -0700
Subject: [PATCH 047/235] add file
admiral/pkg/clusters/destinationrule_handler.go
---
.../pkg/clusters/destinationrule_handler.go | 606 ++++++++++++++++++
1 file changed, 606 insertions(+)
create mode 100644 admiral/pkg/clusters/destinationrule_handler.go
diff --git a/admiral/pkg/clusters/destinationrule_handler.go b/admiral/pkg/clusters/destinationrule_handler.go
new file mode 100644
index 00000000..633ef78b
--- /dev/null
+++ b/admiral/pkg/clusters/destinationrule_handler.go
@@ -0,0 +1,606 @@
+package clusters
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "strings"
+ "time"
+
+ networkingV1Alpha3 "istio.io/api/networking/v1alpha3"
+
+ v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ "google.golang.org/protobuf/types/known/durationpb"
+
+ commonUtil "github.com/istio-ecosystem/admiral/admiral/pkg/util"
+
+ "github.com/golang/protobuf/ptypes/duration"
+ "github.com/golang/protobuf/ptypes/wrappers"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/model"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/util"
+ "github.com/sirupsen/logrus"
+ log "github.com/sirupsen/logrus"
+ "istio.io/client-go/pkg/apis/networking/v1alpha3"
+ k8sErrors "k8s.io/apimachinery/pkg/api/errors"
+ metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// DestinationRuleHandler responsible for handling Add/Update/Delete events for
+// DestinationRule resources
+type DestinationRuleHandler struct {
+ RemoteRegistry *RemoteRegistry
+ ClusterID string
+}
+
+func getDestinationRule(se *networkingV1Alpha3.ServiceEntry, locality string, gtpTrafficPolicy *model.TrafficPolicy,
+ outlierDetection *v1.OutlierDetection, clientConnectionSettings *v1.ClientConnectionConfig, currentDR *v1alpha3.DestinationRule, eventResourceType string, ctxLogger *logrus.Entry, event admiral.EventType) *networkingV1Alpha3.DestinationRule {
+ var (
+ processGtp = true
+ dr = &networkingV1Alpha3.DestinationRule{}
+ )
+
+ dr.Host = se.Hosts[0]
+ if common.EnableExportTo(dr.Host) {
+ dr.ExportTo = se.ExportTo
+ }
+ dr.TrafficPolicy = &networkingV1Alpha3.TrafficPolicy{
+ Tls: &networkingV1Alpha3.ClientTLSSettings{
+ Mode: networkingV1Alpha3.ClientTLSSettings_ISTIO_MUTUAL,
+ },
+ LoadBalancer: &networkingV1Alpha3.LoadBalancerSettings{
+ LbPolicy: &networkingV1Alpha3.LoadBalancerSettings_Simple{
+ Simple: networkingV1Alpha3.LoadBalancerSettings_LEAST_REQUEST,
+ },
+ WarmupDurationSecs: &duration.Duration{Seconds: common.GetDefaultWarmupDurationSecs()},
+ },
+ }
+
+ if common.EnableActivePassive() &&
+ ((eventResourceType != common.GTP) || (eventResourceType == common.GTP && event != admiral.Delete)) {
+ distribute := calculateDistribution(se, currentDR)
+
+ // This is present to avoid adding the LocalityLbSetting to DRs associated to application which to do
+ // not need it
+ if len(distribute) != 0 {
+ dr.TrafficPolicy.LoadBalancer.LocalityLbSetting = &networkingV1Alpha3.LocalityLoadBalancerSetting{
+ Distribute: distribute,
+ }
+ }
+ }
+
+ if len(locality) == 0 {
+ log.Warnf(LogErrFormat, "Process", "GlobalTrafficPolicy", dr.Host, "", "Skipping gtp processing, locality of the cluster nodes cannot be determined. Is this minikube?")
+ processGtp = false
+ }
+
+ if gtpTrafficPolicy != nil && processGtp {
+ var loadBalancerSettings = &networkingV1Alpha3.LoadBalancerSettings{
+ LbPolicy: &networkingV1Alpha3.LoadBalancerSettings_Simple{Simple: networkingV1Alpha3.LoadBalancerSettings_LEAST_REQUEST},
+ WarmupDurationSecs: &duration.Duration{Seconds: common.GetDefaultWarmupDurationSecs()},
+ }
+
+ if len(gtpTrafficPolicy.Target) > 0 {
+ var localityLbSettings = &networkingV1Alpha3.LocalityLoadBalancerSetting{}
+ if gtpTrafficPolicy.LbType == model.TrafficPolicy_FAILOVER {
+ distribute := make([]*networkingV1Alpha3.LocalityLoadBalancerSetting_Distribute, 0)
+ targetTrafficMap := make(map[string]uint32)
+ for _, tg := range gtpTrafficPolicy.Target {
+ //skip 0 values from GTP as that's implicit for locality settings
+ if tg.Weight != int32(0) {
+ targetTrafficMap[tg.Region] = uint32(tg.Weight)
+ }
+ }
+ distribute = append(distribute, &networkingV1Alpha3.LocalityLoadBalancerSetting_Distribute{
+ From: locality + "/*",
+ To: targetTrafficMap,
+ })
+ localityLbSettings.Distribute = distribute
+ }
+ // else default behavior
+ loadBalancerSettings.LocalityLbSetting = localityLbSettings
+ }
+ dr.TrafficPolicy.LoadBalancer = loadBalancerSettings
+ }
+
+ if dr.TrafficPolicy.LoadBalancer.LocalityLbSetting != nil {
+ if dr.TrafficPolicy.LoadBalancer.LocalityLbSetting.Distribute != nil {
+ ctxLogger.Infof(common.CtxLogFormat,
+ "getDestinationRule", dr.Host, "", "", "Running in Active-Passive Mode")
+ } else {
+ ctxLogger.Infof(common.CtxLogFormat,
+ "getDestinationRule", dr.Host, "", "", "Running in Active-Active Mode")
+ }
+ } else {
+ ctxLogger.Infof(common.CtxLogFormat,
+ "getDestinationRule", dr.Host, "", "", "Running in Active-Active Mode")
+ }
+
+ derivedOutlierDetection := getOutlierDetection(se, locality, gtpTrafficPolicy, outlierDetection, common.DisableDefaultAutomaticFailover())
+ if derivedOutlierDetection != nil {
+ dr.TrafficPolicy.OutlierDetection = derivedOutlierDetection
+ }
+
+ clientConnectionSettingsOverride := getClientConnectionPoolOverrides(clientConnectionSettings)
+ if clientConnectionSettingsOverride != nil {
+ dr.TrafficPolicy.ConnectionPool = clientConnectionSettingsOverride
+ }
+
+ return dr
+}
+
+func calculateDistribution(se *networkingV1Alpha3.ServiceEntry, currentDR *v1alpha3.DestinationRule) []*networkingV1Alpha3.LocalityLoadBalancerSetting_Distribute {
+ distribute := make([]*networkingV1Alpha3.LocalityLoadBalancerSetting_Distribute, 0)
+
+ // There are two conditions here:
+ // 1. If there is only one endpoint in the SE it means that the application is only available in one region.
+ // We will configure the traffic to be routed from all the regions to this region if it is a new application
+ // and maintain the same configuration if we have already converted it to an A/P before.
+ // 2. If there are multiple endpoints in the SE it means that the application is available in multiple regions.
+ // We then check the DR cache to check which is the region that is primary at the moment and retain that information.
+ // NOTE: We are ignoring events from the GTP controller as they will be overriden further in the code
+ numOfSEendpoints := len(se.Endpoints)
+ if numOfSEendpoints == 1 {
+ defaultAPDistribution := &networkingV1Alpha3.LocalityLoadBalancerSetting_Distribute{
+ From: "*",
+ To: map[string]uint32{se.Endpoints[0].Locality: 100},
+ }
+
+ if currentDR != nil {
+ if ¤tDR.Spec != (&networkingV1Alpha3.DestinationRule{}) &&
+ currentDR.Spec.TrafficPolicy != nil &&
+ currentDR.Spec.TrafficPolicy.LoadBalancer != nil {
+ if currentDR.Spec.TrafficPolicy.LoadBalancer.LocalityLbSetting == nil {
+ // If the application is Active-Active and only in one region convert to Active-Passive
+ distribute = append(distribute, defaultAPDistribution)
+ }
+
+ if currentDR.Spec.TrafficPolicy.LoadBalancer.LocalityLbSetting != nil &&
+ currentDR.Spec.TrafficPolicy.LoadBalancer.LocalityLbSetting.Distribute != nil &&
+ len(currentDR.Spec.TrafficPolicy.LoadBalancer.LocalityLbSetting.Distribute) == 1 &&
+ currentDR.Spec.TrafficPolicy.LoadBalancer.LocalityLbSetting.Distribute[0].From == "*" {
+ // Maintain the same configuration if we have already converted it to an Active-Passive before
+ distribute = append(distribute, currentDR.Spec.TrafficPolicy.LoadBalancer.LocalityLbSetting.Distribute...)
+ }
+ }
+ } else {
+ // Configure the traffic to be routed from all the regions to this region if it is a new application
+ distribute = append(distribute, defaultAPDistribution)
+ }
+ } else if numOfSEendpoints != 0 {
+ if currentDR != nil {
+ if ¤tDR.Spec != (&networkingV1Alpha3.DestinationRule{}) &&
+ currentDR.Spec.TrafficPolicy != nil &&
+ currentDR.Spec.TrafficPolicy.LoadBalancer != nil &&
+ currentDR.Spec.TrafficPolicy.LoadBalancer.LocalityLbSetting != nil &&
+ currentDR.Spec.TrafficPolicy.LoadBalancer.LocalityLbSetting.Distribute != nil {
+ distribute = append(distribute, currentDR.Spec.TrafficPolicy.LoadBalancer.LocalityLbSetting.Distribute...)
+ }
+ }
+ }
+
+ return distribute
+}
+
+func getClientConnectionPoolOverrides(clientConnectionSettings *v1.ClientConnectionConfig) *networkingV1Alpha3.ConnectionPoolSettings {
+
+ connectionPoolSettings := &networkingV1Alpha3.ConnectionPoolSettings{
+ Http: &networkingV1Alpha3.ConnectionPoolSettings_HTTPSettings{
+ MaxRequestsPerConnection: common.MaxRequestsPerConnection(),
+ },
+ }
+
+ if clientConnectionSettings == nil {
+ return connectionPoolSettings
+ }
+
+ if clientConnectionSettings.Spec.ConnectionPool.Http != nil {
+
+ if clientConnectionSettings.Spec.ConnectionPool.Http.Http2MaxRequests > 0 {
+ connectionPoolSettings.Http.Http2MaxRequests =
+ clientConnectionSettings.Spec.ConnectionPool.Http.Http2MaxRequests
+ }
+
+ if clientConnectionSettings.Spec.ConnectionPool.Http.MaxRequestsPerConnection > 0 {
+ connectionPoolSettings.Http.MaxRequestsPerConnection =
+ clientConnectionSettings.Spec.ConnectionPool.Http.MaxRequestsPerConnection
+ }
+
+ if clientConnectionSettings.Spec.ConnectionPool.Http.IdleTimeout != "" {
+ idleTimeout, err := time.ParseDuration(clientConnectionSettings.Spec.ConnectionPool.Http.IdleTimeout)
+ if err != nil {
+ log.Warnf(
+ LogErrFormat, "ClientConnectionConfigOverride", common.ClientConnectionConfig,
+ clientConnectionSettings.Name, "", "failed parsing IdleTimeout due to error: "+err.Error())
+ } else {
+ connectionPoolSettings.Http.IdleTimeout = durationpb.New(idleTimeout)
+ }
+ }
+ }
+
+ if clientConnectionSettings.Spec.ConnectionPool.Tcp != nil {
+ if clientConnectionSettings.Spec.ConnectionPool.Tcp.MaxConnectionDuration != "" {
+ maxConnectionDuration, err := time.ParseDuration(clientConnectionSettings.Spec.ConnectionPool.Tcp.MaxConnectionDuration)
+ if err != nil {
+ log.Warnf(
+ LogErrFormat, "ClientConnectionConfigOverride", common.ClientConnectionConfig,
+ clientConnectionSettings.Name, "", "failed parsing MaxConnectionDuration due to error: "+err.Error())
+ } else {
+ connectionPoolSettings.Tcp = &networkingV1Alpha3.ConnectionPoolSettings_TCPSettings{
+ MaxConnectionDuration: durationpb.New(maxConnectionDuration),
+ }
+ }
+ }
+ }
+
+ return connectionPoolSettings
+}
+
+func getOutlierDetection(
+ se *networkingV1Alpha3.ServiceEntry,
+ locality string,
+ gtpTrafficPolicy *model.TrafficPolicy,
+ outlierDetectionCrd *v1.OutlierDetection,
+ disableDefaultAutomaticFailover bool) *networkingV1Alpha3.OutlierDetection {
+ if disableDefaultAutomaticFailover {
+ log.Infoln("default automatic failover is disabled. outlier detection " +
+ "will be configured only if OutlierDetection OR GTP resource is present")
+ if (outlierDetectionCrd == nil || (outlierDetectionCrd.Spec.OutlierConfig == nil)) &&
+ (gtpTrafficPolicy == nil || gtpTrafficPolicy.OutlierDetection == nil) {
+ log.Infoln("Neither outlier not GTP configured, will not set outlier configuration")
+ return &networkingV1Alpha3.OutlierDetection{
+ ConsecutiveGatewayErrors: &wrappers.UInt32Value{Value: 0},
+ Consecutive_5XxErrors: &wrappers.UInt32Value{Value: 0},
+ }
+ }
+ }
+
+ // When only one endpoint present in the Service Entry:
+ // 1. It points to kubernetes service (ends in svc.cluster.local)
+ // 2. It is an IPv4 address
+ // Then return nil
+ if len(se.Endpoints) == 1 &&
+ (strings.Contains(se.Endpoints[0].Address, common.DotLocalDomainSuffix) ||
+ net.ParseIP(se.Endpoints[0].Address).To4() != nil) {
+ log.Infof("service entry endpoint (%v) contains only one endpoint which "+
+ "is either kubernetes service or ipv4 address. Not setting outlier", se.Endpoints)
+ return nil
+ }
+ outlierDetection := getOutlierDetectionSkeleton(disableDefaultAutomaticFailover)
+ //Give priority to outlier detection crd than GTP. Eventually support for outlier detection via GTP will be stopped.
+ if outlierDetectionCrd != nil && outlierDetectionCrd.Spec.OutlierConfig != nil {
+ log.Infof("Using outlier detection config from Admiral Outlier Detection CRD. Hosts - %s", se.Hosts)
+ outlierDetection.ConsecutiveGatewayErrors = &wrappers.UInt32Value{Value: outlierDetectionCrd.Spec.OutlierConfig.ConsecutiveGatewayErrors}
+ outlierDetection.Interval = &duration.Duration{Seconds: outlierDetectionCrd.Spec.OutlierConfig.Interval}
+ outlierDetection.BaseEjectionTime = &duration.Duration{Seconds: outlierDetectionCrd.Spec.OutlierConfig.BaseEjectionTime}
+ } else if gtpTrafficPolicy != nil && gtpTrafficPolicy.OutlierDetection != nil {
+ log.Infof("Using outlier detection config from Admiral Global Traffic Policy CRD. Hosts - %s", se.Hosts)
+ setDefaultValuesOfOutlierDetection(outlierDetection)
+ if gtpTrafficPolicy.OutlierDetection.BaseEjectionTime > 0 {
+ outlierDetection.BaseEjectionTime = &duration.Duration{
+ Seconds: gtpTrafficPolicy.OutlierDetection.BaseEjectionTime,
+ }
+ }
+ if gtpTrafficPolicy.OutlierDetection.ConsecutiveGatewayErrors > 0 {
+ outlierDetection.ConsecutiveGatewayErrors = &wrappers.UInt32Value{
+ Value: gtpTrafficPolicy.OutlierDetection.ConsecutiveGatewayErrors,
+ }
+ }
+ if gtpTrafficPolicy.OutlierDetection.Interval > 0 {
+ outlierDetection.Interval = &duration.Duration{
+ Seconds: gtpTrafficPolicy.OutlierDetection.Interval,
+ }
+ }
+ }
+
+ if len(se.Endpoints) == 1 {
+ //Scenario 1: Only one endpoint present and is remote - outlier detection with 33% ejection (protection against zone specific issues)
+ //Making the %33 as 34% will eject 2 endpoints, %33 will eject one
+ outlierDetection.MaxEjectionPercent = 33
+ } else {
+ //Scenario 2: Two endpoints present each with different locality and both remote - outlier detection with 100% ejection
+ //Scenario 3: Two endpoints present each with different locality with one local and other remote - outlier detection with 100% ejection
+ //for service entries with more than 2 endpoints eject 100% to failover to other endpoint within or outside the same region
+ outlierDetection.MaxEjectionPercent = 100
+ }
+ return outlierDetection
+}
+
+func getOutlierDetectionSkeleton(disableDefaultAutomaticFailover bool) *networkingV1Alpha3.OutlierDetection {
+ if disableDefaultAutomaticFailover {
+ return &networkingV1Alpha3.OutlierDetection{
+ // The default Consecutive5XXErrors is set to 5 in envoy, setting to 0 disables 5XX error outlier detection so that ConsecutiveGatewayErrors rule can get evaluated
+ Consecutive_5XxErrors: &wrappers.UInt32Value{Value: 0},
+ }
+ }
+ return &networkingV1Alpha3.OutlierDetection{
+ BaseEjectionTime: &duration.Duration{Seconds: DefaultBaseEjectionTime},
+ ConsecutiveGatewayErrors: &wrappers.UInt32Value{Value: DefaultConsecutiveGatewayErrors},
+ // The default Consecutive5XXErrors is set to 5 in envoy, setting to 0 disables 5XX error outlier detection so that ConsecutiveGatewayErrors rule can get evaluated
+ Consecutive_5XxErrors: &wrappers.UInt32Value{Value: DefaultConsecutive5xxErrors},
+ Interval: &duration.Duration{Seconds: DefaultInterval},
+ }
+}
+
+func setDefaultValuesOfOutlierDetection(outlierDetection *networkingV1Alpha3.OutlierDetection) {
+ outlierDetection.BaseEjectionTime = &duration.Duration{Seconds: DefaultBaseEjectionTime}
+ outlierDetection.ConsecutiveGatewayErrors = &wrappers.UInt32Value{Value: DefaultConsecutiveGatewayErrors}
+ outlierDetection.Interval = &duration.Duration{Seconds: DefaultInterval}
+}
+
+func (dh *DestinationRuleHandler) Added(ctx context.Context, obj *v1alpha3.DestinationRule) error {
+ if commonUtil.IsAdmiralReadOnly() {
+ log.Infof(LogFormat, "Add", "DestinationRule", obj.Name, dh.ClusterID, "Admiral is in read-only mode. Skipping resource from namespace="+obj.Namespace)
+ return nil
+ }
+ if IgnoreIstioResource(obj.Spec.ExportTo, obj.Annotations, obj.Namespace) {
+ log.Infof(LogFormat, "Add", "DestinationRule", obj.Name, dh.ClusterID, "Skipping resource from namespace="+obj.Namespace)
+ if len(obj.Annotations) > 0 && obj.Annotations[common.AdmiralIgnoreAnnotation] == "true" {
+ log.Infof(LogFormat, "admiralIoIgnoreAnnotationCheck", "DestinationRule", obj.Name, dh.ClusterID, "Value=true namespace="+obj.Namespace)
+ }
+ return nil
+ }
+ txId := common.FetchTxIdOrGenNew(ctx)
+ ctxLogger := log.WithFields(log.Fields{
+ "type": "destinationRule",
+ "txId": txId,
+ "op": "Add",
+ })
+ return handleDestinationRuleEvent(ctxLogger, ctx, obj, dh, common.Add, common.DestinationRuleResourceType)
+}
+
+func (dh *DestinationRuleHandler) Updated(ctx context.Context, obj *v1alpha3.DestinationRule) error {
+ if commonUtil.IsAdmiralReadOnly() {
+ log.Infof(LogFormat, "Update", "DestinationRule", obj.Name, dh.ClusterID, "Admiral is in read-only mode. Skipping resource from namespace="+obj.Namespace)
+ return nil
+ }
+ if IgnoreIstioResource(obj.Spec.ExportTo, obj.Annotations, obj.Namespace) {
+ log.Infof(LogFormat, "Update", "DestinationRule", obj.Name, dh.ClusterID, "Skipping resource from namespace="+obj.Namespace)
+ if len(obj.Annotations) > 0 && obj.Annotations[common.AdmiralIgnoreAnnotation] == "true" {
+ log.Infof(LogFormat, "admiralIoIgnoreAnnotationCheck", "DestinationRule", obj.Name, dh.ClusterID, "Value=true namespace="+obj.Namespace)
+ }
+ return nil
+ }
+ txId := common.FetchTxIdOrGenNew(ctx)
+ ctxLogger := log.WithFields(log.Fields{
+ "type": "destinationRule",
+ "txId": txId,
+ "op": "Update",
+ })
+ return handleDestinationRuleEvent(ctxLogger, ctx, obj, dh, common.Update, common.DestinationRuleResourceType)
+}
+
+func (dh *DestinationRuleHandler) Deleted(ctx context.Context, obj *v1alpha3.DestinationRule) error {
+ if commonUtil.IsAdmiralReadOnly() {
+ log.Infof(LogFormat, "Delete", "DestinationRule", obj.Name, dh.ClusterID, "Admiral is in read-only mode. Skipping resource from namespace="+obj.Namespace)
+ return nil
+ }
+ if IgnoreIstioResource(obj.Spec.ExportTo, obj.Annotations, obj.Namespace) {
+ log.Infof(LogFormat, "Delete", "DestinationRule", obj.Name, dh.ClusterID, "Skipping resource from namespace="+obj.Namespace)
+ if len(obj.Annotations) > 0 && obj.Annotations[common.AdmiralIgnoreAnnotation] == "true" {
+ log.Infof(LogFormat, "admiralIoIgnoreAnnotationCheck", "DestinationRule", obj.Name, dh.ClusterID, "Value=true namespace="+obj.Namespace)
+ }
+ return nil
+ }
+ txId := common.FetchTxIdOrGenNew(ctx)
+ ctxLogger := log.WithFields(log.Fields{
+ "type": "destinationRule",
+ "txId": txId,
+ "op": "Delete",
+ })
+ return handleDestinationRuleEvent(ctxLogger, ctx, obj, dh, common.Delete, common.DestinationRuleResourceType)
+}
+
+func handleDestinationRuleEvent(ctxLogger *log.Entry, ctx context.Context, obj *v1alpha3.DestinationRule, dh *DestinationRuleHandler, event common.Event, resourceType common.ResourceType) error {
+ var (
+ //nolint
+ destinationRule = obj.Spec
+ clusterId = dh.ClusterID
+ syncNamespace = common.GetSyncNamespace()
+ r = dh.RemoteRegistry
+ dependentClusters = r.AdmiralCache.CnameDependentClusterCache.Get(destinationRule.Host).Copy()
+ allDependentClusters = make(map[string]string)
+ )
+
+ if len(dependentClusters) > 0 {
+ log.Infof(LogFormat, "Event", resourceType, obj.Name, clusterId, "Processing")
+ util.MapCopy(allDependentClusters, dependentClusters)
+ allDependentClusters[clusterId] = clusterId
+ for _, dependentCluster := range allDependentClusters {
+ rc := r.GetRemoteController(dependentCluster)
+ if rc == nil {
+ return fmt.Errorf(LogFormat, "Event", resourceType, obj.Name, dependentCluster, "remote controller not initialized for cluster")
+ }
+ if rc.DestinationRuleController == nil {
+ return fmt.Errorf(LogFormat, "Event", resourceType, obj.Name, dependentCluster, "DestinationRule controller not initialized for cluster")
+ }
+ if event == common.Delete {
+ err := rc.DestinationRuleController.IstioClient.NetworkingV1alpha3().DestinationRules(syncNamespace).Delete(ctx, obj.Name, metaV1.DeleteOptions{})
+ if err != nil {
+ if k8sErrors.IsNotFound(err) {
+ log.Infof(LogFormat, "Delete", resourceType, obj.Name, clusterId, "Either DestinationRule was already deleted, or it never existed")
+ } else {
+ log.Errorf(LogErrFormat, "Delete", resourceType, obj.Name, clusterId, err)
+ }
+ } else {
+ log.Infof(LogFormat, "Delete", resourceType, obj.Name, clusterId, "Success")
+ }
+ } else {
+ exist, _ := rc.DestinationRuleController.IstioClient.NetworkingV1alpha3().DestinationRules(syncNamespace).Get(ctx, obj.Name, metaV1.GetOptions{})
+ //copy destination rule only to other clusters
+ if dependentCluster != clusterId {
+ addUpdateDestinationRule(ctxLogger, ctx, obj, exist, syncNamespace, rc, r)
+ }
+ }
+ }
+ return nil
+ } else {
+ log.Infof(LogFormat, "Event", resourceType, obj.Name, clusterId, "No dependent clusters found")
+ }
+
+ //copy the DestinationRule `as is` if they are not generated by Admiral
+ remoteClusters := r.GetClusterIds()
+ for _, ClusterID := range remoteClusters {
+ if ClusterID != clusterId {
+ rc := r.GetRemoteController(ClusterID)
+ if rc == nil {
+ return fmt.Errorf(LogFormat, "Event", resourceType, obj.Name, ClusterID, "remote controller not initialized for cluster")
+ }
+ if rc.DestinationRuleController == nil {
+ return fmt.Errorf(LogFormat, "Event", resourceType, obj.Name, ClusterID, "DestinationRule controller not initialized for cluster")
+ }
+ if event == common.Delete {
+ err := rc.DestinationRuleController.IstioClient.NetworkingV1alpha3().DestinationRules(syncNamespace).Delete(ctx, obj.Name, metaV1.DeleteOptions{})
+ if err != nil {
+ if k8sErrors.IsNotFound(err) {
+ log.Infof(LogFormat, "Delete", "DestinationRule", obj.Name, clusterId, "Either DestinationRule was already deleted, or it never existed")
+ } else {
+ log.Errorf(LogErrFormat, "Delete", "DestinationRule", obj.Name, clusterId, err)
+ }
+ } else {
+ log.Infof(LogFormat, "Delete", "DestinationRule", obj.Name, clusterId, "Success")
+ }
+
+ } else {
+ exist, _ := rc.DestinationRuleController.IstioClient.NetworkingV1alpha3().DestinationRules(syncNamespace).Get(ctx, obj.Name, metaV1.GetOptions{})
+ addUpdateDestinationRule(ctxLogger, ctx, obj, exist, syncNamespace, rc, r)
+ }
+ }
+ }
+ return nil
+}
+
+func addUpdateDestinationRule(
+ ctxLogger *log.Entry,
+ ctx context.Context,
+ dr *v1alpha3.DestinationRule,
+ exist *v1alpha3.DestinationRule,
+ namespace string,
+ rc *RemoteController, rr *RemoteRegistry) error {
+ var err error
+ var op string
+ var drAlreadyExists bool
+ obj := copyDestinationRule(dr)
+ if obj.Annotations == nil {
+ obj.Annotations = map[string]string{}
+ }
+ obj.Annotations["app.kubernetes.io/created-by"] = "admiral"
+ // At this step we check to make sure the DR does not already have an exportTo value before setting the exportTo value
+ // This is because there are two ways to enter this function
+ // 1. Through modifyse, in which case obj will already have exportTo filled and we don't want to do a repeat call of getSortedDependentNamespaces
+ // 2. Through the flow where we copy customer created DRs to other clusters, in which case it shouldn't have exportTo set and we need to calculate it here.
+ if common.EnableExportTo(obj.Spec.Host) && len(obj.Spec.ExportTo) == 0 {
+ sortedDependentNamespaces := getSortedDependentNamespaces(rr.AdmiralCache, obj.Spec.Host, rc.ClusterID, ctxLogger)
+ obj.Spec.ExportTo = sortedDependentNamespaces
+ }
+ drIsNew := exist == nil || exist.Name == "" || exist.Spec.Host == ""
+ if drIsNew {
+ obj.Namespace = namespace
+ obj.ResourceVersion = ""
+ _, err = rc.DestinationRuleController.IstioClient.NetworkingV1alpha3().DestinationRules(namespace).Create(ctx, obj, metaV1.CreateOptions{})
+ if k8sErrors.IsAlreadyExists(err) {
+ // op=%v name=%v namespace=%s cluster=%s message=%v
+ ctxLogger.Infof(common.CtxLogFormat, "addUpdateDestinationRule", obj.Name, obj.Namespace, rc.ClusterID, "object already exists. Will update instead")
+ drAlreadyExists = true
+ } else {
+ return err
+ }
+ op = "Add"
+ }
+ if !drIsNew || drAlreadyExists {
+ if drAlreadyExists {
+ exist, err = rc.DestinationRuleController.IstioClient.
+ NetworkingV1alpha3().
+ DestinationRules(namespace).
+ Get(ctx, obj.Name, metav1.GetOptions{})
+ if err != nil {
+ // when there is an error, assign exist to obj,
+ // which will fail in the update operation, but will be retried
+ // in the retry logic
+ exist = obj
+ ctxLogger.Warnf(common.CtxLogFormat, "Update", exist.Name, exist.Namespace, rc.ClusterID, "got error on fetching destinationrule, will retry updating")
+ }
+ }
+ exist.Labels = obj.Labels
+ exist.Annotations = obj.Annotations
+ //nolint
+ exist.Spec = obj.Spec
+ op = "Update"
+ _, err = rc.DestinationRuleController.IstioClient.NetworkingV1alpha3().DestinationRules(namespace).Update(ctx, exist, metaV1.UpdateOptions{})
+ if err != nil {
+ err = retryUpdatingDR(ctxLogger, ctx, exist, namespace, rc, err)
+ }
+ }
+
+ if err != nil {
+ ctxLogger.Errorf(LogErrFormat, op, "DestinationRule", obj.Name, rc.ClusterID, err)
+ return err
+ } else {
+ ctxLogger.Infof(LogFormat, op, "DestinationRule", obj.Name, rc.ClusterID, "Success")
+ }
+ return nil
+}
+
+func deleteDestinationRule(ctx context.Context, exist *v1alpha3.DestinationRule, namespace string, rc *RemoteController) error {
+ if exist != nil {
+ err := rc.DestinationRuleController.IstioClient.NetworkingV1alpha3().DestinationRules(namespace).Delete(ctx, exist.Name, metaV1.DeleteOptions{})
+ if err != nil {
+ if k8sErrors.IsNotFound(err) {
+ log.Infof(LogFormat, "Delete", "DestinationRule", exist.Name, rc.ClusterID, "Either DestinationRule was already deleted, or it never existed")
+ } else {
+ log.Errorf(LogErrFormat, "Delete", "DestinationRule", exist.Name, rc.ClusterID, err)
+ return err
+ }
+ } else {
+ log.Infof(LogFormat, "Delete", "DestinationRule", exist.Name, rc.ClusterID, "Success")
+ }
+ }
+ return nil
+}
+
+// nolint
+func createDestinationRuleSkeleton(dr networkingV1Alpha3.DestinationRule, name string, namespace string) *v1alpha3.DestinationRule {
+ return &v1alpha3.DestinationRule{Spec: dr, ObjectMeta: metaV1.ObjectMeta{Name: name, Namespace: namespace}}
+}
+
+func retryUpdatingDR(
+ ctxLogger *log.Entry, ctx context.Context,
+ exist *v1alpha3.DestinationRule, namespace string,
+ rc *RemoteController, err error) error {
+ numRetries := 5
+ if err != nil {
+ if k8sErrors.IsConflict(err) {
+ for i := 1; i <= numRetries; i++ {
+ ctxLogger.Errorf(common.CtxLogFormat, "Update",
+ exist.Name, exist.Namespace, rc.ClusterID, fmt.Sprintf("error=%v retrying=%d/%d", err.Error(), i, numRetries))
+ updatedServiceEntry, err := rc.DestinationRuleController.IstioClient.
+ NetworkingV1alpha3().
+ DestinationRules(namespace).
+ Get(ctx, exist.Name, metav1.GetOptions{})
+ if err != nil {
+ ctxLogger.Errorf(common.CtxLogFormat, "Update",
+ exist.Name, exist.Namespace, rc.ClusterID, fmt.Sprintf("error=%v", err.Error()))
+ continue
+ }
+ ctxLogger.Infof(common.CtxLogFormat, "Update", exist.Name, exist.Namespace, rc.ClusterID,
+ fmt.Sprintf("existingResourceVersion=%s resourceVersionUsedForUpdate=%s", updatedServiceEntry.ResourceVersion, exist.ResourceVersion))
+ //nolint
+ updatedServiceEntry.Spec = exist.Spec
+ updatedServiceEntry.Labels = exist.Labels
+ updatedServiceEntry.Annotations = exist.Labels
+ _, err = rc.DestinationRuleController.IstioClient.
+ NetworkingV1alpha3().
+ DestinationRules(namespace).
+ Update(ctx, exist, metaV1.UpdateOptions{})
+ if err == nil {
+ return nil
+ }
+ }
+ } else {
+ ctxLogger.Errorf(common.CtxLogFormat, "Update", exist.Name, exist.Namespace, rc.ClusterID, "Not retrying error="+err.Error())
+ }
+ }
+ return err
+}
From 8b9b5c83f3bd2a62c49d35ea0663bdeb76f035a6 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:52:12 -0700
Subject: [PATCH 048/235] add file
admiral/pkg/clusters/destinationrule_handler_test.go
---
.../clusters/destinationrule_handler_test.go | 1774 +++++++++++++++++
1 file changed, 1774 insertions(+)
create mode 100644 admiral/pkg/clusters/destinationrule_handler_test.go
diff --git a/admiral/pkg/clusters/destinationrule_handler_test.go b/admiral/pkg/clusters/destinationrule_handler_test.go
new file mode 100644
index 00000000..44802048
--- /dev/null
+++ b/admiral/pkg/clusters/destinationrule_handler_test.go
@@ -0,0 +1,1774 @@
+package clusters
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
+
+ v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+
+ commonUtil "github.com/istio-ecosystem/admiral/admiral/pkg/util"
+ log "github.com/sirupsen/logrus"
+
+ "github.com/golang/protobuf/ptypes/duration"
+ "github.com/golang/protobuf/ptypes/wrappers"
+ cmp "github.com/google/go-cmp/cmp"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/model"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/istio"
+ "github.com/stretchr/testify/assert"
+ "google.golang.org/protobuf/testing/protocmp"
+ "istio.io/api/networking/v1alpha3"
+ networkingV1Alpha3 "istio.io/api/networking/v1alpha3"
+ v1alpha32 "istio.io/client-go/pkg/apis/networking/v1alpha3"
+ istioFake "istio.io/client-go/pkg/clientset/versioned/fake"
+ k8sErrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ k8stesting "k8s.io/client-go/testing"
+
+ fakenetworkingv1alpha3 "istio.io/client-go/pkg/clientset/versioned/typed/networking/v1alpha3/fake"
+
+ metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+func TestRetryUpdatingDR(t *testing.T) {
+ // Create a mock logger
+ logger := log.New()
+ admiralParams := common.AdmiralParams{
+ LabelSet: &common.LabelSet{},
+ SyncNamespace: "test-sync-ns",
+ }
+ common.ResetSync()
+ common.InitializeConfig(admiralParams)
+ //Create a context with timeout for testing
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+
+ admiralParams = common.GetAdmiralParams()
+ log.Info("admiralSyncNS: " + admiralParams.SyncNamespace)
+ // Create mock objects
+ exist := &v1alpha32.DestinationRule{
+ ObjectMeta: metaV1.ObjectMeta{
+ Namespace: admiralParams.SyncNamespace,
+ Name: "test-serviceentry-seRetriesTest",
+ Annotations: map[string]string{
+ "admiral.istio.io/ignore": "true",
+ },
+ ResourceVersion: "12345",
+ },
+ Spec: v1alpha3.DestinationRule{
+ Host: "test-host",
+ },
+ }
+ namespace := admiralParams.SyncNamespace
+ rc := &RemoteController{
+ DestinationRuleController: &istio.DestinationRuleController{
+ IstioClient: istioFake.NewSimpleClientset(),
+ },
+ }
+
+ _, err := rc.DestinationRuleController.IstioClient.
+ NetworkingV1alpha3().
+ DestinationRules(namespace).
+ Create(ctx, exist, metaV1.CreateOptions{})
+ if err != nil {
+ t.Error(err)
+ }
+ errConflict := k8sErrors.NewConflict(schema.GroupResource{}, "", nil)
+ errOther := errors.New("Some other error")
+
+ // Test when err is nil
+ err = retryUpdatingDR(logger.WithField("test", "success"), ctx, exist, namespace, rc, nil)
+ if err != nil {
+ t.Errorf("Expected nil error, got %v", err)
+ }
+
+ // get the SE here, it should still have the old resource version.
+ se, err := rc.DestinationRuleController.IstioClient.NetworkingV1alpha3().DestinationRules(namespace).Get(ctx, exist.Name, metaV1.GetOptions{})
+ assert.Nil(t, err)
+ assert.Equal(t, "12345", se.ObjectMeta.ResourceVersion)
+
+ // Test when err is a conflict error
+ err = retryUpdatingDR(logger.WithField("test", "conflict"), ctx, exist, namespace, rc, errConflict)
+ if err != nil {
+ t.Errorf("Expected nil error, got %v", err)
+ }
+
+ // get the SE and the resourceVersion should have been updated to 12345
+ se, err = rc.DestinationRuleController.IstioClient.NetworkingV1alpha3().DestinationRules(admiralParams.SyncNamespace).Get(ctx, exist.Name, metaV1.GetOptions{})
+ assert.Nil(t, err)
+ assert.Equal(t, "12345", se.ObjectMeta.ResourceVersion)
+
+ // Test when err is a non-conflict error
+ err = retryUpdatingDR(logger.WithField("test", "error"), ctx, exist, namespace, rc, errOther)
+ if err == nil {
+ t.Error("Expected non-nil error, got nil")
+ }
+}
+
+func TestGetDestinationRule(t *testing.T) {
+ admiralParams := common.AdmiralParams{
+ LabelSet: &common.LabelSet{},
+ SyncNamespace: "test-sync-ns",
+ DefaultWarmupDurationSecs: 45,
+ }
+ common.ResetSync()
+ common.InitializeConfig(admiralParams)
+ ctxLogger := log.WithFields(log.Fields{
+ "type": "destinationRule",
+ })
+ //Do setup here
+ outlierDetection := &v1alpha3.OutlierDetection{
+ BaseEjectionTime: &duration.Duration{Seconds: 300},
+ ConsecutiveGatewayErrors: &wrappers.UInt32Value{Value: 50},
+ Consecutive_5XxErrors: &wrappers.UInt32Value{Value: 0},
+ Interval: &duration.Duration{Seconds: 60},
+ MaxEjectionPercent: 100,
+ }
+ mTLS := &v1alpha3.TrafficPolicy{
+ Tls: &v1alpha3.ClientTLSSettings{
+ Mode: v1alpha3.ClientTLSSettings_ISTIO_MUTUAL,
+ },
+ OutlierDetection: outlierDetection,
+ ConnectionPool: &v1alpha3.ConnectionPoolSettings{
+ Http: &v1alpha3.ConnectionPoolSettings_HTTPSettings{
+ MaxRequestsPerConnection: common.MaxRequestsPerConnection(),
+ },
+ },
+ LoadBalancer: &v1alpha3.LoadBalancerSettings{
+ LbPolicy: &v1alpha3.LoadBalancerSettings_Simple{
+ Simple: v1alpha3.LoadBalancerSettings_LEAST_REQUEST,
+ },
+ WarmupDurationSecs: &duration.Duration{Seconds: 45},
+ },
+ }
+
+ se := &v1alpha3.ServiceEntry{Hosts: []string{"qa.myservice.global"}, Endpoints: []*v1alpha3.WorkloadEntry{
+ {Address: "east.com", Locality: "us-east-2"}, {Address: "west.com", Locality: "us-west-2"},
+ }}
+ noGtpDr := v1alpha3.DestinationRule{
+ Host: "qa.myservice.global",
+ TrafficPolicy: mTLS,
+ }
+
+ basicGtpDr := v1alpha3.DestinationRule{
+ Host: "qa.myservice.global",
+ TrafficPolicy: &v1alpha3.TrafficPolicy{
+ Tls: &v1alpha3.ClientTLSSettings{Mode: v1alpha3.ClientTLSSettings_ISTIO_MUTUAL},
+ LoadBalancer: &v1alpha3.LoadBalancerSettings{
+ LbPolicy: &v1alpha3.LoadBalancerSettings_Simple{Simple: v1alpha3.LoadBalancerSettings_LEAST_REQUEST},
+ LocalityLbSetting: &v1alpha3.LocalityLoadBalancerSetting{},
+ WarmupDurationSecs: &duration.Duration{Seconds: 45},
+ },
+ OutlierDetection: outlierDetection,
+ ConnectionPool: &v1alpha3.ConnectionPoolSettings{
+ Http: &v1alpha3.ConnectionPoolSettings_HTTPSettings{
+ MaxRequestsPerConnection: common.MaxRequestsPerConnection(),
+ },
+ },
+ },
+ }
+
+ failoverGtpDr := v1alpha3.DestinationRule{
+ Host: "qa.myservice.global",
+ TrafficPolicy: &v1alpha3.TrafficPolicy{
+ Tls: &v1alpha3.ClientTLSSettings{Mode: v1alpha3.ClientTLSSettings_ISTIO_MUTUAL},
+ LoadBalancer: &v1alpha3.LoadBalancerSettings{
+ LbPolicy: &v1alpha3.LoadBalancerSettings_Simple{Simple: v1alpha3.LoadBalancerSettings_LEAST_REQUEST},
+ LocalityLbSetting: &v1alpha3.LocalityLoadBalancerSetting{
+ Distribute: []*v1alpha3.LocalityLoadBalancerSetting_Distribute{
+ {
+ From: "uswest2/*",
+ To: map[string]uint32{"us-west-2": 100},
+ },
+ },
+ },
+ WarmupDurationSecs: &duration.Duration{Seconds: 45},
+ },
+ OutlierDetection: outlierDetection,
+ ConnectionPool: &v1alpha3.ConnectionPoolSettings{
+ Http: &v1alpha3.ConnectionPoolSettings_HTTPSettings{
+ MaxRequestsPerConnection: common.MaxRequestsPerConnection(),
+ },
+ },
+ },
+ }
+
+ topologyGTPPolicy := &model.TrafficPolicy{
+ LbType: model.TrafficPolicy_TOPOLOGY,
+ Target: []*model.TrafficGroup{
+ {
+ Region: "us-west-2",
+ Weight: 100,
+ },
+ },
+ }
+
+ failoverGTPPolicy := &model.TrafficPolicy{
+ LbType: model.TrafficPolicy_FAILOVER,
+ Target: []*model.TrafficGroup{
+ {
+ Region: "us-west-2",
+ Weight: 100,
+ },
+ {
+ Region: "us-east-2",
+ Weight: 0,
+ },
+ },
+ }
+
+ //Struct of test case info. Name is required.
+ testCases := []struct {
+ name string
+ se *v1alpha3.ServiceEntry
+ locality string
+ gtpPolicy *model.TrafficPolicy
+ destinationRule *v1alpha3.DestinationRule
+ }{
+ {
+ name: "Should handle a nil GTP",
+ se: se,
+ locality: "uswest2",
+ gtpPolicy: nil,
+ destinationRule: &noGtpDr,
+ },
+ {
+ name: "Should return default DR with empty locality",
+ se: se,
+ locality: "",
+ gtpPolicy: failoverGTPPolicy,
+ destinationRule: &noGtpDr,
+ },
+ {
+ name: "Should handle a topology GTP",
+ se: se,
+ locality: "uswest2",
+ gtpPolicy: topologyGTPPolicy,
+ destinationRule: &basicGtpDr,
+ },
+ {
+ name: "Should handle a failover GTP",
+ se: se,
+ locality: "uswest2",
+ gtpPolicy: failoverGTPPolicy,
+ destinationRule: &failoverGtpDr,
+ },
+ }
+
+ //Run the test for every provided case
+ for _, c := range testCases {
+ t.Run(c.name, func(t *testing.T) {
+ result := getDestinationRule(c.se, c.locality, c.gtpPolicy, nil, nil, nil, common.GTP, ctxLogger, admiral.Add)
+ if !cmp.Equal(result, c.destinationRule, protocmp.Transform()) {
+ t.Fatalf("DestinationRule Mismatch. Diff: %v", cmp.Diff(result, c.destinationRule, protocmp.Transform()))
+ }
+ })
+ }
+}
+
+func TestGetDestinationRuleActivePassive(t *testing.T) {
+ ctxLogger := log.WithFields(log.Fields{
+ "type": "destinationRule",
+ })
+ // Enable Active-Passive
+ admiralParams := common.AdmiralParams{
+ CacheReconcileDuration: 10 * time.Minute,
+ LabelSet: &common.LabelSet{
+ EnvKey: "env",
+ },
+ DefaultWarmupDurationSecs: 45,
+ }
+ admiralParams.EnableActivePassive = true
+ common.ResetSync()
+ common.InitializeConfig(admiralParams)
+
+ mTLSWestNoDistribution := &v1alpha3.TrafficPolicy{
+ Tls: &v1alpha3.ClientTLSSettings{
+ Mode: v1alpha3.ClientTLSSettings_ISTIO_MUTUAL,
+ },
+ ConnectionPool: &v1alpha3.ConnectionPoolSettings{
+ Http: &v1alpha3.ConnectionPoolSettings_HTTPSettings{
+ MaxRequestsPerConnection: common.MaxRequestsPerConnection(),
+ },
+ },
+ LoadBalancer: &v1alpha3.LoadBalancerSettings{
+ LbPolicy: &v1alpha3.LoadBalancerSettings_Simple{
+ Simple: v1alpha3.LoadBalancerSettings_LEAST_REQUEST,
+ },
+ WarmupDurationSecs: &duration.Duration{Seconds: 45},
+ },
+ }
+
+ mTLSWest := &v1alpha3.TrafficPolicy{
+ Tls: &v1alpha3.ClientTLSSettings{
+ Mode: v1alpha3.ClientTLSSettings_ISTIO_MUTUAL,
+ },
+ ConnectionPool: &v1alpha3.ConnectionPoolSettings{
+ Http: &v1alpha3.ConnectionPoolSettings_HTTPSettings{
+ MaxRequestsPerConnection: common.MaxRequestsPerConnection(),
+ },
+ },
+ LoadBalancer: &v1alpha3.LoadBalancerSettings{
+ LbPolicy: &v1alpha3.LoadBalancerSettings_Simple{
+ Simple: v1alpha3.LoadBalancerSettings_LEAST_REQUEST,
+ },
+ LocalityLbSetting: &v1alpha3.LocalityLoadBalancerSetting{
+ Distribute: []*v1alpha3.LocalityLoadBalancerSetting_Distribute{
+ {
+ From: "*",
+ To: map[string]uint32{"us-west-2": 100},
+ },
+ },
+ },
+ WarmupDurationSecs: &duration.Duration{Seconds: 45},
+ },
+ }
+
+ mTLSAAWest := &v1alpha3.TrafficPolicy{
+ Tls: &v1alpha3.ClientTLSSettings{
+ Mode: v1alpha3.ClientTLSSettings_ISTIO_MUTUAL,
+ },
+ ConnectionPool: &v1alpha3.ConnectionPoolSettings{
+ Http: &v1alpha3.ConnectionPoolSettings_HTTPSettings{
+ MaxRequestsPerConnection: common.MaxRequestsPerConnection(),
+ },
+ },
+ LoadBalancer: &v1alpha3.LoadBalancerSettings{
+ LbPolicy: &v1alpha3.LoadBalancerSettings_Simple{
+ Simple: v1alpha3.LoadBalancerSettings_LEAST_REQUEST,
+ },
+ WarmupDurationSecs: &duration.Duration{Seconds: 45},
+ },
+ }
+
+ mTLSWestAfterGTP := &v1alpha3.TrafficPolicy{
+ Tls: &v1alpha3.ClientTLSSettings{
+ Mode: v1alpha3.ClientTLSSettings_ISTIO_MUTUAL,
+ },
+ ConnectionPool: &v1alpha3.ConnectionPoolSettings{
+ Http: &v1alpha3.ConnectionPoolSettings_HTTPSettings{
+ MaxRequestsPerConnection: common.MaxRequestsPerConnection(),
+ },
+ },
+ LoadBalancer: &v1alpha3.LoadBalancerSettings{
+ LbPolicy: &v1alpha3.LoadBalancerSettings_Simple{
+ Simple: v1alpha3.LoadBalancerSettings_LEAST_REQUEST,
+ },
+ LocalityLbSetting: &v1alpha3.LocalityLoadBalancerSetting{
+ Distribute: []*v1alpha3.LocalityLoadBalancerSetting_Distribute{
+ {
+ From: "us-west-2/*",
+ To: map[string]uint32{"us-west-2": 70, "us-east-2": 30},
+ },
+ },
+ },
+ WarmupDurationSecs: &duration.Duration{Seconds: 45},
+ },
+ }
+
+ mTLSSingleEndpointWest := &v1alpha3.TrafficPolicy{
+ Tls: &v1alpha3.ClientTLSSettings{
+ Mode: v1alpha3.ClientTLSSettings_ISTIO_MUTUAL,
+ },
+ ConnectionPool: &v1alpha3.ConnectionPoolSettings{
+ Http: &v1alpha3.ConnectionPoolSettings_HTTPSettings{
+ MaxRequestsPerConnection: common.MaxRequestsPerConnection(),
+ },
+ },
+ LoadBalancer: &v1alpha3.LoadBalancerSettings{
+ LbPolicy: &v1alpha3.LoadBalancerSettings_Simple{
+ Simple: v1alpha3.LoadBalancerSettings_LEAST_REQUEST,
+ },
+ LocalityLbSetting: &v1alpha3.LocalityLoadBalancerSetting{
+ Distribute: []*v1alpha3.LocalityLoadBalancerSetting_Distribute{
+ {
+ From: "*",
+ To: map[string]uint32{"us-west-2": 100},
+ },
+ },
+ },
+ WarmupDurationSecs: &duration.Duration{Seconds: 45},
+ },
+ }
+
+ mTLSEast := &v1alpha3.TrafficPolicy{
+ Tls: &v1alpha3.ClientTLSSettings{
+ Mode: v1alpha3.ClientTLSSettings_ISTIO_MUTUAL,
+ },
+ ConnectionPool: &v1alpha3.ConnectionPoolSettings{
+ Http: &v1alpha3.ConnectionPoolSettings_HTTPSettings{
+ MaxRequestsPerConnection: common.MaxRequestsPerConnection(),
+ },
+ },
+ LoadBalancer: &v1alpha3.LoadBalancerSettings{
+ LbPolicy: &v1alpha3.LoadBalancerSettings_Simple{
+ Simple: v1alpha3.LoadBalancerSettings_LEAST_REQUEST,
+ },
+ LocalityLbSetting: &v1alpha3.LocalityLoadBalancerSetting{
+ Distribute: []*v1alpha3.LocalityLoadBalancerSetting_Distribute{
+ {
+ From: "*",
+ To: map[string]uint32{"us-east-2": 100},
+ },
+ },
+ },
+ WarmupDurationSecs: &duration.Duration{Seconds: 45},
+ },
+ }
+
+ mTLSEastNoLocalityLbSetting := &v1alpha3.TrafficPolicy{
+ Tls: &v1alpha3.ClientTLSSettings{
+ Mode: v1alpha3.ClientTLSSettings_ISTIO_MUTUAL,
+ },
+ ConnectionPool: &v1alpha3.ConnectionPoolSettings{
+ Http: &v1alpha3.ConnectionPoolSettings_HTTPSettings{
+ MaxRequestsPerConnection: common.MaxRequestsPerConnection(),
+ },
+ },
+ LoadBalancer: &v1alpha3.LoadBalancerSettings{
+ LbPolicy: &v1alpha3.LoadBalancerSettings_Simple{
+ Simple: v1alpha3.LoadBalancerSettings_LEAST_REQUEST,
+ },
+ WarmupDurationSecs: &duration.Duration{Seconds: 45},
+ },
+ }
+
+ seSingleEndpoint := &v1alpha3.ServiceEntry{
+ Hosts: []string{"qa.myservice.global"},
+ Endpoints: []*v1alpha3.WorkloadEntry{
+ {Address: "west.com", Locality: "us-west-2"},
+ }}
+
+ noGtpDrSingleEndpoint := v1alpha3.DestinationRule{
+ Host: "qa.myservice.global",
+ TrafficPolicy: mTLSSingleEndpointWest,
+ }
+
+ noGtpDrInCacheSingleEndpointWest := v1alpha32.DestinationRule{
+ Spec: v1alpha3.DestinationRule{
+ Host: "qa.myservice.global",
+ TrafficPolicy: mTLSWest,
+ },
+ }
+
+ noGtpAADrInCacheSingleEndpointWest := v1alpha32.DestinationRule{
+ Spec: v1alpha3.DestinationRule{
+ Host: "qa.myservice.global",
+ TrafficPolicy: mTLSAAWest,
+ },
+ }
+
+ noGtpDrInCacheSingleEndpointEast := v1alpha32.DestinationRule{
+ Spec: v1alpha3.DestinationRule{
+ Host: "qa.myservice.global",
+ TrafficPolicy: mTLSEast,
+ },
+ }
+
+ outlierDetectionSingleEndpoint := &v1alpha3.OutlierDetection{
+ BaseEjectionTime: &duration.Duration{Seconds: 300},
+ ConsecutiveGatewayErrors: &wrappers.UInt32Value{Value: 50},
+ Consecutive_5XxErrors: &wrappers.UInt32Value{Value: 0},
+ Interval: &duration.Duration{Seconds: 60},
+ MaxEjectionPercent: 33,
+ }
+
+ noGtpDrSingleEndpoint.TrafficPolicy.OutlierDetection = outlierDetectionSingleEndpoint
+
+ seMultipleEndpoint := &v1alpha3.ServiceEntry{
+ Hosts: []string{"qa.myservice.global"},
+ Endpoints: []*v1alpha3.WorkloadEntry{
+ {Address: "east.com", Locality: "us-east-2"},
+ {Address: "west.com", Locality: "us-west-2"},
+ }}
+
+ noGtpDrMultipleEndpointWest := v1alpha3.DestinationRule{
+ Host: "qa.myservice.global",
+ TrafficPolicy: mTLSWest,
+ }
+
+ noGtpDrMultipleEndpointDeleteWest := v1alpha3.DestinationRule{
+ Host: "qa.myservice.global",
+ TrafficPolicy: mTLSWestNoDistribution,
+ }
+
+ noGtpDrMultipleEndpointEast := v1alpha3.DestinationRule{
+ Host: "qa.myservice.global",
+ TrafficPolicy: mTLSEast,
+ }
+
+ noGtpDrMultipleEndpointEastNoLocalityLbSetting := v1alpha3.DestinationRule{
+ Host: "qa.myservice.global",
+ TrafficPolicy: mTLSEastNoLocalityLbSetting,
+ }
+
+ DrWithGTPMultipleEndpointWest := v1alpha3.DestinationRule{
+ Host: "qa.myservice.global",
+ TrafficPolicy: mTLSWestAfterGTP,
+ }
+
+ outlierDetectionMultipleEndpoint := &v1alpha3.OutlierDetection{
+ BaseEjectionTime: &duration.Duration{Seconds: 300},
+ ConsecutiveGatewayErrors: &wrappers.UInt32Value{Value: 50},
+ Consecutive_5XxErrors: &wrappers.UInt32Value{Value: 0},
+ Interval: &duration.Duration{Seconds: 60},
+ MaxEjectionPercent: 100,
+ }
+
+ noGtpDrMultipleEndpointWest.TrafficPolicy.OutlierDetection = outlierDetectionMultipleEndpoint
+ noGtpDrMultipleEndpointEast.TrafficPolicy.OutlierDetection = outlierDetectionMultipleEndpoint
+ DrWithGTPMultipleEndpointWest.TrafficPolicy.OutlierDetection = outlierDetectionMultipleEndpoint
+ noGtpDrMultipleEndpointDeleteWest.TrafficPolicy.OutlierDetection = outlierDetectionMultipleEndpoint
+ noGtpDrMultipleEndpointEastNoLocalityLbSetting.TrafficPolicy.OutlierDetection = outlierDetectionMultipleEndpoint
+
+ GTPPolicy := &model.TrafficPolicy{
+ LbType: model.TrafficPolicy_FAILOVER,
+ Target: []*model.TrafficGroup{
+ {
+ Region: "us-west-2",
+ Weight: 70,
+ },
+ {
+ Region: "us-east-2",
+ Weight: 30,
+ },
+ },
+ }
+
+ GTPPolicyNoTargets := &model.TrafficPolicy{
+ LbType: model.TrafficPolicy_TOPOLOGY,
+ }
+
+ testCases := []struct {
+ name string
+ se *v1alpha3.ServiceEntry
+ locality string
+ gtpPolicy *model.TrafficPolicy
+ destinationRuleInCache *v1alpha32.DestinationRule
+ eventResourceType string
+ eventType admiral.EventType
+ expectedDestinationRule *v1alpha3.DestinationRule
+ }{
+ {
+ name: "Given the application is onboarding for the first time in west" +
+ "And the DR cache does not have this entry" +
+ "And there is no GTP" +
+ "Then the DR should have the traffic distribution set to 100% to west",
+ se: seSingleEndpoint,
+ locality: "us-west-2",
+ gtpPolicy: nil,
+ destinationRuleInCache: nil,
+ eventResourceType: common.Deployment,
+ eventType: admiral.Add,
+ expectedDestinationRule: &noGtpDrSingleEndpoint,
+ },
+
+ {
+ name: "Given the application is is Active-Passive and only in one region" +
+ "And the DR cache does have this entry" +
+ "And there is no GTP" +
+ "Then the DR should have the traffic distribution as it was before",
+ se: seSingleEndpoint,
+ locality: "us-west-2",
+ gtpPolicy: nil,
+ destinationRuleInCache: &noGtpDrInCacheSingleEndpointWest,
+ eventResourceType: common.Deployment,
+ eventType: admiral.Add,
+ expectedDestinationRule: &noGtpDrSingleEndpoint,
+ },
+
+ {
+ name: "Given the application is Active-Active and only in west region" +
+ "And the DR cache does have this entry" +
+ "And there is no GTP" +
+ "Then the DR should have the traffic distribution set to 100% to west",
+ se: seSingleEndpoint,
+ locality: "us-west-2",
+ gtpPolicy: nil,
+ destinationRuleInCache: &noGtpAADrInCacheSingleEndpointWest,
+ eventResourceType: common.Deployment,
+ eventType: admiral.Add,
+ expectedDestinationRule: &noGtpDrSingleEndpoint,
+ },
+ {
+ name: "Given the application is onboarding to east region" +
+ "And was first onboarded to west" +
+ "And the DR cache does have an entry" +
+ "And there is no GTP" +
+ "Then the DR should still have the traffic distribution set to 100% to west",
+ se: seMultipleEndpoint,
+ locality: "us-west-2",
+ gtpPolicy: nil,
+ destinationRuleInCache: &noGtpDrInCacheSingleEndpointWest,
+ eventResourceType: common.Deployment,
+ eventType: admiral.Add,
+ expectedDestinationRule: &noGtpDrMultipleEndpointWest,
+ },
+ {
+ name: "Given the application is onboarding to west region" +
+ "And was first onboarded to east" +
+ "And the DR cache does have an entry" +
+ "And there is no GTP" +
+ "Then the DR should still have the traffic distribution set to 100% to east",
+ se: seMultipleEndpoint,
+ locality: "us-west-2",
+ gtpPolicy: nil,
+ destinationRuleInCache: &noGtpDrInCacheSingleEndpointEast,
+ eventResourceType: common.Deployment,
+ eventType: admiral.Add,
+ expectedDestinationRule: &noGtpDrMultipleEndpointEast,
+ },
+ {
+ name: "Given the application is onboarding to west region" +
+ "And was first onboarded to east" +
+ "And the DR cache does have an entry" +
+ "And there is a GTP being applied" +
+ "Then the DR should still have the traffic distribution set to that defined by the GTP",
+ se: seMultipleEndpoint,
+ locality: "us-west-2",
+ gtpPolicy: GTPPolicy,
+ destinationRuleInCache: &noGtpDrInCacheSingleEndpointWest,
+ eventResourceType: common.Deployment,
+ eventType: admiral.Add,
+ expectedDestinationRule: &DrWithGTPMultipleEndpointWest,
+ },
+ {
+ name: "Given the application is onboarding to west region" +
+ "And was first onboarded to east" +
+ "And the DR cache does have an entry" +
+ "And there is a GTP being applied with no targets" +
+ "Then the DR should change to Active-Active behavior",
+ se: seMultipleEndpoint,
+ locality: "us-west-2",
+ gtpPolicy: GTPPolicyNoTargets,
+ destinationRuleInCache: &noGtpDrInCacheSingleEndpointWest,
+ eventResourceType: common.Deployment,
+ eventType: admiral.Add,
+ expectedDestinationRule: &noGtpDrMultipleEndpointEastNoLocalityLbSetting,
+ },
+ {
+ name: "Given the application is onboarding to west region" +
+ "And was first onboarded to east" +
+ "And the DR cache does have an entry" +
+ "And there is a GTP being applied with no targets" +
+ "Then the DR should change to Active-Active behavior",
+ se: seMultipleEndpoint,
+ locality: "us-west-2",
+ gtpPolicy: GTPPolicyNoTargets,
+ destinationRuleInCache: &noGtpDrInCacheSingleEndpointWest,
+ eventResourceType: common.GTP,
+ eventType: admiral.Add,
+ expectedDestinationRule: &noGtpDrMultipleEndpointEastNoLocalityLbSetting,
+ },
+ {
+ name: "Given the application is onboarding to west region" +
+ "And was first onboarded to east" +
+ "And the DR cache does have an entry" +
+ "And there is a GTP being applied with no targets" +
+ "Then the DR should change to Active-Active behavior",
+ se: seMultipleEndpoint,
+ locality: "us-west-2",
+ gtpPolicy: GTPPolicyNoTargets,
+ destinationRuleInCache: &noGtpDrInCacheSingleEndpointWest,
+ eventResourceType: common.GTP,
+ eventType: admiral.Update,
+ expectedDestinationRule: &noGtpDrMultipleEndpointEastNoLocalityLbSetting,
+ },
+ {
+ name: "Given the application is onboarding to west region" +
+ "And was first onboarded to east" +
+ "And the DR cache does have an entry" +
+ "And the GTP is being deleted" +
+ "Then the DR should not have any traffic distribution set",
+ se: seMultipleEndpoint,
+ locality: "us-west-2",
+ gtpPolicy: nil,
+ destinationRuleInCache: &noGtpDrInCacheSingleEndpointWest,
+ eventResourceType: common.GTP,
+ eventType: admiral.Delete,
+ expectedDestinationRule: &noGtpDrMultipleEndpointDeleteWest,
+ },
+ }
+
+ for _, c := range testCases {
+ t.Run(c.name, func(t *testing.T) {
+ result := getDestinationRule(c.se, c.locality, c.gtpPolicy, nil, nil, c.destinationRuleInCache, c.eventResourceType, ctxLogger, c.eventType)
+ if !cmp.Equal(result, c.expectedDestinationRule, protocmp.Transform()) {
+ t.Fatalf("DestinationRule Mismatch. Diff: %v", cmp.Diff(result, c.expectedDestinationRule, protocmp.Transform()))
+ }
+ })
+ }
+}
+
+func TestCalculateDistribution(t *testing.T) {
+ mTLSWest := &v1alpha3.TrafficPolicy{
+ Tls: &v1alpha3.ClientTLSSettings{
+ Mode: v1alpha3.ClientTLSSettings_ISTIO_MUTUAL,
+ },
+ ConnectionPool: &v1alpha3.ConnectionPoolSettings{
+ Http: &v1alpha3.ConnectionPoolSettings_HTTPSettings{
+ MaxRequestsPerConnection: common.MaxRequestsPerConnection(),
+ },
+ },
+ LoadBalancer: &v1alpha3.LoadBalancerSettings{
+ LbPolicy: &v1alpha3.LoadBalancerSettings_Simple{
+ Simple: v1alpha3.LoadBalancerSettings_LEAST_REQUEST,
+ },
+ LocalityLbSetting: &v1alpha3.LocalityLoadBalancerSetting{
+ Distribute: []*v1alpha3.LocalityLoadBalancerSetting_Distribute{
+ {
+ From: "*",
+ To: map[string]uint32{"us-west-2": 100},
+ },
+ },
+ },
+ WarmupDurationSecs: &duration.Duration{Seconds: 45},
+ },
+ }
+
+ mTLSWestNoDistribution := &v1alpha3.TrafficPolicy{
+ Tls: &v1alpha3.ClientTLSSettings{
+ Mode: v1alpha3.ClientTLSSettings_ISTIO_MUTUAL,
+ },
+ ConnectionPool: &v1alpha3.ConnectionPoolSettings{
+ Http: &v1alpha3.ConnectionPoolSettings_HTTPSettings{
+ MaxRequestsPerConnection: common.MaxRequestsPerConnection(),
+ },
+ },
+ LoadBalancer: &v1alpha3.LoadBalancerSettings{
+ LbPolicy: &v1alpha3.LoadBalancerSettings_Simple{
+ Simple: v1alpha3.LoadBalancerSettings_LEAST_REQUEST,
+ },
+ WarmupDurationSecs: &duration.Duration{Seconds: 45},
+ },
+ }
+
+ dRInCache := v1alpha32.DestinationRule{
+ Spec: v1alpha3.DestinationRule{
+ Host: "qa.myservice.global",
+ TrafficPolicy: mTLSWest,
+ },
+ }
+
+ dRInCacheNoDistribution := v1alpha32.DestinationRule{
+ Spec: v1alpha3.DestinationRule{
+ Host: "qa.myservice.global",
+ TrafficPolicy: mTLSWestNoDistribution,
+ },
+ }
+
+ seSingleEndpoint := &v1alpha3.ServiceEntry{
+ Hosts: []string{"qa.myservice.global"},
+ Endpoints: []*v1alpha3.WorkloadEntry{
+ {Address: "west.com", Locality: "us-west-2"},
+ }}
+
+ singleEndpointDistribution := []*v1alpha3.LocalityLoadBalancerSetting_Distribute{
+ {From: "*",
+ To: map[string]uint32{"us-west-2": 100},
+ },
+ }
+
+ seMultipleEndpoint := &v1alpha3.ServiceEntry{
+ Hosts: []string{"qa.myservice.global"},
+ Endpoints: []*v1alpha3.WorkloadEntry{
+ {Address: "east.com", Locality: "us-east-2"},
+ {Address: "west.com", Locality: "us-west-2"},
+ }}
+
+ multipleEndpointDistribution := []*v1alpha3.LocalityLoadBalancerSetting_Distribute{
+ {From: "*",
+ To: map[string]uint32{"us-west-2": 100},
+ },
+ }
+
+ seDeleted := &v1alpha3.ServiceEntry{
+ Hosts: []string{"qa.myservice.global"},
+ }
+
+ testCases := []struct {
+ name string
+ se *v1alpha3.ServiceEntry
+ destinationRuleInCache *v1alpha32.DestinationRule
+ expectedDistribution []*v1alpha3.LocalityLoadBalancerSetting_Distribute
+ }{
+ {
+ name: "Given the SE of the application is only present in 1 region" +
+ "And this is a new application" +
+ "And the locality for that west" +
+ "Then the traffic distribution should be set to 100% to west",
+ se: seSingleEndpoint,
+ destinationRuleInCache: nil,
+ expectedDistribution: singleEndpointDistribution,
+ },
+ {
+ name: "Given the SE of the application is only present in 1 region" +
+ "And the locality for that west" +
+ "And is currently Active-Active" +
+ "Then the traffic distribution should be set to 100% to west",
+ se: seSingleEndpoint,
+ destinationRuleInCache: &dRInCacheNoDistribution,
+ expectedDistribution: singleEndpointDistribution,
+ },
+ {
+ name: "Given the SE of the application is only present in 1 region" +
+ "And the locality for that west" +
+ "And is currently Active-Passive" +
+ "Then the traffic distribution should be set to 100% to west",
+ se: seSingleEndpoint,
+ destinationRuleInCache: &dRInCache,
+ expectedDistribution: singleEndpointDistribution,
+ },
+ {
+ name: "Given the SE of the application is present in multiple regions" +
+ "And the DR is present in the cache" +
+ "Then the traffic distribution should be set what is present in the cache",
+ se: seMultipleEndpoint,
+ destinationRuleInCache: &dRInCache,
+ expectedDistribution: multipleEndpointDistribution,
+ },
+ {
+ name: "Given the SE of the application is present in multiple regions" +
+ "And the DR is not present in cache" +
+ "Then the traffic distribution should be set to empty",
+ se: seMultipleEndpoint,
+ destinationRuleInCache: nil,
+ expectedDistribution: make([]*networkingV1Alpha3.LocalityLoadBalancerSetting_Distribute, 0),
+ },
+ {
+ name: "Given the SE of the application is present in multiple regions" +
+ "And the DR is present in the cache but no distribution is set" +
+ "Then the traffic distribution should be set to empty",
+ se: seMultipleEndpoint,
+ destinationRuleInCache: &dRInCacheNoDistribution,
+ expectedDistribution: make([]*networkingV1Alpha3.LocalityLoadBalancerSetting_Distribute, 0),
+ },
+ {
+ name: "Given the application is being deleted" +
+ "Then the traffic distribution should be set to empty",
+ se: seDeleted,
+ destinationRuleInCache: &dRInCache,
+ expectedDistribution: make([]*networkingV1Alpha3.LocalityLoadBalancerSetting_Distribute, 0),
+ },
+ }
+
+ for _, c := range testCases {
+ t.Run(c.name, func(t *testing.T) {
+ result := calculateDistribution(c.se, c.destinationRuleInCache)
+ if !cmp.Equal(result, c.expectedDistribution, protocmp.Transform()) {
+ t.Fatalf("Distribution Mismatch. Diff: %v", cmp.Diff(result, c.expectedDistribution, protocmp.Transform()))
+ }
+ })
+ }
+}
+
+func TestGetOutlierDetection(t *testing.T) {
+ outlierDetectionDisabledSpec := &v1alpha3.OutlierDetection{
+ ConsecutiveGatewayErrors: &wrappers.UInt32Value{Value: 0},
+ Consecutive_5XxErrors: &wrappers.UInt32Value{Value: 0},
+ }
+ outlierDetectionFromGTP := &v1alpha3.OutlierDetection{
+ BaseEjectionTime: &duration.Duration{Seconds: 100},
+ ConsecutiveGatewayErrors: &wrappers.UInt32Value{Value: 100},
+ Consecutive_5XxErrors: &wrappers.UInt32Value{Value: DefaultConsecutive5xxErrors},
+ Interval: &duration.Duration{Seconds: 100},
+ MaxEjectionPercent: 100,
+ }
+
+ outlierDetectionFromOutlierCRD := &v1alpha3.OutlierDetection{
+ BaseEjectionTime: &duration.Duration{Seconds: 10},
+ ConsecutiveGatewayErrors: &wrappers.UInt32Value{Value: 10},
+ Consecutive_5XxErrors: &wrappers.UInt32Value{Value: DefaultConsecutive5xxErrors},
+ Interval: &duration.Duration{Seconds: 10},
+ MaxEjectionPercent: 100,
+ }
+
+ outlierDetectionWithRemoteHostUsingGTP := &v1alpha3.OutlierDetection{
+ BaseEjectionTime: &duration.Duration{Seconds: 100},
+ ConsecutiveGatewayErrors: &wrappers.UInt32Value{Value: 100},
+ Consecutive_5XxErrors: &wrappers.UInt32Value{Value: DefaultConsecutive5xxErrors},
+ Interval: &duration.Duration{Seconds: 100},
+ MaxEjectionPercent: 33,
+ }
+
+ gtpPolicyWithOutlierDetection := &model.TrafficPolicy{
+ OutlierDetection: &model.TrafficPolicy_OutlierDetection{
+ BaseEjectionTime: 100,
+ ConsecutiveGatewayErrors: 100,
+ Interval: 100,
+ },
+ }
+
+ se := &v1alpha3.ServiceEntry{Hosts: []string{"qa.myservice.global"}, Endpoints: []*v1alpha3.WorkloadEntry{
+ {Address: "east.com", Locality: "us-east-2"}, {Address: "west.com", Locality: "us-west-2"},
+ }}
+
+ seOneHostRemote := &v1alpha3.ServiceEntry{Hosts: []string{"qa.myservice.global"}, Endpoints: []*v1alpha3.WorkloadEntry{
+ {Address: "east.com", Locality: "us-east-2"},
+ }}
+
+ seOneHostLocal := &v1alpha3.ServiceEntry{Hosts: []string{"qa.myservice.global"}, Endpoints: []*v1alpha3.WorkloadEntry{
+ {Address: "hello.ns.svc.cluster.local", Locality: "us-east-2"},
+ }}
+
+ seOneHostRemoteIp := &v1alpha3.ServiceEntry{Hosts: []string{"qa.myservice.global"}, Endpoints: []*v1alpha3.WorkloadEntry{
+ {Address: "95.45.25.34", Locality: "us-east-2"},
+ }}
+
+ //Struct of test case info. Name is required.
+ testCases := []struct {
+ name string
+ se *v1alpha3.ServiceEntry
+ locality string
+ gtpPolicy *model.TrafficPolicy
+ expectedOutlierDetection *v1alpha3.OutlierDetection
+ admiralOutlierDetectionCRD *v1.OutlierDetection
+ disableDefaultAutomaticFailover bool
+ }{
+ {
+ name: "Given both outlier detection and global traffic policy exists, " +
+ "When GTP contains configurations for outlier detection, " +
+ "When both specs are passed to the function, " +
+ "Then outlier configurations should be derived from outlier detection, " +
+ "and not from global traffic policy",
+ se: se,
+ locality: "uswest2",
+ gtpPolicy: gtpPolicyWithOutlierDetection,
+ expectedOutlierDetection: outlierDetectionFromOutlierCRD,
+ admiralOutlierDetectionCRD: &v1.OutlierDetection{
+ TypeMeta: metaV1.TypeMeta{},
+ ObjectMeta: metaV1.ObjectMeta{},
+ Spec: model.OutlierDetection{
+ OutlierConfig: &model.OutlierConfig{
+ BaseEjectionTime: 10,
+ ConsecutiveGatewayErrors: 10,
+ Interval: 10,
+ },
+ },
+ Status: v1.OutlierDetectionStatus{},
+ },
+ },
+ {
+ name: "Given outlier detection policy exists, " +
+ "And there is no GTP policy, " +
+ "Then outlier configurations should be derived from outlier detection, " +
+ "and not from global traffic policy",
+ se: se,
+ locality: "uswest2",
+ gtpPolicy: nil,
+ expectedOutlierDetection: outlierDetectionFromOutlierCRD,
+ admiralOutlierDetectionCRD: &v1.OutlierDetection{
+ TypeMeta: metaV1.TypeMeta{},
+ ObjectMeta: metaV1.ObjectMeta{},
+ Spec: model.OutlierDetection{
+ OutlierConfig: &model.OutlierConfig{
+ BaseEjectionTime: 10,
+ ConsecutiveGatewayErrors: 10,
+ Interval: 10,
+ },
+ },
+ Status: v1.OutlierDetectionStatus{},
+ },
+ },
+ {
+ name: "Given an asset is deployed only in one region, " +
+ "And, a GTP exists for this asset, " +
+ "And the associated service entry only has the local endpoint, " +
+ "When the function is called, " +
+ "Then, it should not return any outlier configuration",
+ se: seOneHostLocal,
+ locality: "uswest2",
+ gtpPolicy: gtpPolicyWithOutlierDetection,
+ expectedOutlierDetection: nil,
+ admiralOutlierDetectionCRD: nil,
+ },
+ {
+ name: "Given an asset is deployed only in one region, " +
+ "And, a GTP exists for this asset, " +
+ "And the associated service entry only has the remote IP endpoint, " +
+ "When the function is called, " +
+ "Then, it should not return any outlier configuration",
+ se: seOneHostRemoteIp,
+ locality: "uswest2",
+ gtpPolicy: gtpPolicyWithOutlierDetection,
+ expectedOutlierDetection: nil,
+ admiralOutlierDetectionCRD: nil,
+ },
+ {
+ name: "Given an asset is deployed only in one region, " +
+ "And the associated service entry has an endpoint, which is neither an IP nor a local endpoint, " +
+ "Then the the max ejection percentage should be set to 33%",
+ se: seOneHostRemote,
+ locality: "uswest2",
+ gtpPolicy: gtpPolicyWithOutlierDetection,
+ expectedOutlierDetection: outlierDetectionWithRemoteHostUsingGTP,
+ admiralOutlierDetectionCRD: nil,
+ },
+ {
+ name: "Given an asset is deployed in two regions, " +
+ "And the associated service entry has two endpoints, " +
+ "Then the max ejection percentage should be set to 100%",
+ se: se,
+ locality: "uswest2",
+ gtpPolicy: gtpPolicyWithOutlierDetection,
+ expectedOutlierDetection: outlierDetectionFromGTP,
+ admiralOutlierDetectionCRD: nil,
+ },
+ {
+ name: "Given there is neither outlier custom resource, nor any GTP for a given asset, " +
+ "And default automatic failover is not enabled, " +
+ "Then, the outlier detection property should exist but should be empty",
+ se: se,
+ locality: "uswest2",
+ gtpPolicy: nil,
+ expectedOutlierDetection: outlierDetectionDisabledSpec,
+ admiralOutlierDetectionCRD: nil,
+ disableDefaultAutomaticFailover: true,
+ },
+ {
+ name: "Given there is neither outlier custom resource, nor any GTP for a given asset, " +
+ "And default automatic failover is not disabled, " +
+ "Then, the outlier detection should return with default values",
+ se: se,
+ locality: "uswest2",
+ gtpPolicy: nil,
+ expectedOutlierDetection: &v1alpha3.OutlierDetection{
+ BaseEjectionTime: &duration.Duration{Seconds: DefaultBaseEjectionTime},
+ ConsecutiveGatewayErrors: &wrappers.UInt32Value{Value: DefaultConsecutiveGatewayErrors},
+ // The default Consecutive5XXErrors is set to 5 in envoy, setting to 0 disables 5XX error outlier detection so that ConsecutiveGatewayErrors rule can get evaluated
+ Consecutive_5XxErrors: &wrappers.UInt32Value{Value: DefaultConsecutive5xxErrors},
+ Interval: &duration.Duration{Seconds: DefaultInterval},
+ MaxEjectionPercent: 100,
+ },
+ admiralOutlierDetectionCRD: nil,
+ disableDefaultAutomaticFailover: false,
+ },
+ {
+ name: "Given base ejection is not configured in the Global Traffic Policy, " +
+ "When there is no outlier resource, " +
+ "Then the default value of BaseEjectionTime should be used",
+ se: se,
+ locality: "uswest2",
+ gtpPolicy: &model.TrafficPolicy{
+ OutlierDetection: &model.TrafficPolicy_OutlierDetection{
+ ConsecutiveGatewayErrors: 10,
+ Interval: 60,
+ },
+ },
+ expectedOutlierDetection: &v1alpha3.OutlierDetection{
+ BaseEjectionTime: &duration.Duration{Seconds: DefaultBaseEjectionTime},
+ ConsecutiveGatewayErrors: &wrappers.UInt32Value{Value: 10},
+ Consecutive_5XxErrors: &wrappers.UInt32Value{Value: 0},
+ Interval: &duration.Duration{Seconds: 60},
+ MaxEjectionPercent: 100,
+ },
+ admiralOutlierDetectionCRD: nil,
+ },
+ {
+ name: "Given base ejection is not configured in the Global Traffic Policy, " +
+ "When there is no outlier resource, " +
+ "Then the default value of ConsecutiveGatewayErrors should be used",
+ se: se,
+ locality: "uswest2",
+ gtpPolicy: &model.TrafficPolicy{
+ OutlierDetection: &model.TrafficPolicy_OutlierDetection{
+ BaseEjectionTime: 600,
+ Interval: 60,
+ },
+ },
+ expectedOutlierDetection: &v1alpha3.OutlierDetection{
+ BaseEjectionTime: &duration.Duration{Seconds: 600},
+ ConsecutiveGatewayErrors: &wrappers.UInt32Value{Value: DefaultConsecutiveGatewayErrors},
+ Consecutive_5XxErrors: &wrappers.UInt32Value{Value: DefaultConsecutive5xxErrors},
+ Interval: &duration.Duration{Seconds: 60},
+ MaxEjectionPercent: 100,
+ },
+ admiralOutlierDetectionCRD: nil,
+ },
+ {
+ name: "Given base ejection is not configured in the Global Traffic Policy, " +
+ "When there is no outlier resource, " +
+ "Then the default value of Interval should be used",
+ se: se,
+ locality: "uswest2",
+ gtpPolicy: &model.TrafficPolicy{
+ OutlierDetection: &model.TrafficPolicy_OutlierDetection{
+ BaseEjectionTime: 600,
+ ConsecutiveGatewayErrors: 50,
+ },
+ },
+ expectedOutlierDetection: &v1alpha3.OutlierDetection{
+ BaseEjectionTime: &duration.Duration{Seconds: 600},
+ ConsecutiveGatewayErrors: &wrappers.UInt32Value{Value: 50},
+ Consecutive_5XxErrors: &wrappers.UInt32Value{Value: 0},
+ Interval: &duration.Duration{Seconds: DefaultInterval},
+ MaxEjectionPercent: 100,
+ },
+ admiralOutlierDetectionCRD: nil,
+ },
+ {
+ name: "Given there is a GTP for an asset, " +
+ "When the GTP contains overrides for BaseEjectionTime, ConsecutiveGatewayErrors, and Interval, " +
+ "Then the overrides should be used for the outlier detection configuration",
+ se: se,
+ locality: "uswest2",
+ gtpPolicy: &model.TrafficPolicy{
+ OutlierDetection: &model.TrafficPolicy_OutlierDetection{
+ BaseEjectionTime: 600,
+ ConsecutiveGatewayErrors: 10,
+ Interval: 60,
+ },
+ },
+ expectedOutlierDetection: &v1alpha3.OutlierDetection{
+ BaseEjectionTime: &duration.Duration{Seconds: 600},
+ ConsecutiveGatewayErrors: &wrappers.UInt32Value{Value: 10},
+ Consecutive_5XxErrors: &wrappers.UInt32Value{Value: 0},
+ Interval: &duration.Duration{Seconds: 60},
+ MaxEjectionPercent: 100,
+ },
+ admiralOutlierDetectionCRD: nil,
+ },
+ {
+ name: "Given there is a GTP for an asset, " +
+ "When the GTP contains all possible overrides, " +
+ "Then the Consecutive_5XxErrors should be 0",
+ se: se,
+ locality: "uswest2",
+ gtpPolicy: &model.TrafficPolicy{
+ OutlierDetection: &model.TrafficPolicy_OutlierDetection{
+ BaseEjectionTime: 600,
+ ConsecutiveGatewayErrors: 10,
+ Interval: 60,
+ },
+ },
+ expectedOutlierDetection: &v1alpha3.OutlierDetection{
+ BaseEjectionTime: &duration.Duration{Seconds: 600},
+ ConsecutiveGatewayErrors: &wrappers.UInt32Value{Value: 10},
+ Consecutive_5XxErrors: &wrappers.UInt32Value{Value: 0},
+ Interval: &duration.Duration{Seconds: 60},
+ MaxEjectionPercent: 100,
+ },
+ admiralOutlierDetectionCRD: nil,
+ },
+ {
+ name: "Given outlier detection policy exists, " +
+ "When outlier contains all possible configurations, " +
+ "Then the Consecutive_5XxErrors should be 0",
+ se: se,
+ locality: "uswest2",
+ gtpPolicy: nil,
+ expectedOutlierDetection: &v1alpha3.OutlierDetection{
+ BaseEjectionTime: &duration.Duration{Seconds: 10},
+ ConsecutiveGatewayErrors: &wrappers.UInt32Value{Value: 10},
+ Consecutive_5XxErrors: &wrappers.UInt32Value{Value: 0},
+ Interval: &duration.Duration{Seconds: 10},
+ MaxEjectionPercent: 100,
+ },
+ admiralOutlierDetectionCRD: &v1.OutlierDetection{
+ TypeMeta: metaV1.TypeMeta{},
+ ObjectMeta: metaV1.ObjectMeta{},
+ Spec: model.OutlierDetection{
+ OutlierConfig: &model.OutlierConfig{
+ BaseEjectionTime: 10,
+ ConsecutiveGatewayErrors: 10,
+ Interval: 10,
+ },
+ },
+ Status: v1.OutlierDetectionStatus{},
+ },
+ },
+ }
+
+ //Run the test for every provided case
+ for _, c := range testCases {
+ t.Run(c.name, func(t *testing.T) {
+ result := getOutlierDetection(c.se, c.locality, c.gtpPolicy, c.admiralOutlierDetectionCRD, c.disableDefaultAutomaticFailover)
+ if c.expectedOutlierDetection != nil {
+ assert.Equal(t, result.BaseEjectionTime, c.expectedOutlierDetection.BaseEjectionTime, "BaseEjectionTime for Outlier Detection for "+c.name)
+ assert.Equal(t, result.Interval, c.expectedOutlierDetection.Interval, "Interval for Outlier Detection for "+c.name)
+ assert.Equal(t, result.ConsecutiveGatewayErrors, c.expectedOutlierDetection.ConsecutiveGatewayErrors, "ConsecutiveGatewayErrors for Outlier Detection for "+c.name)
+ assert.Equal(t, result.Consecutive_5XxErrors, c.expectedOutlierDetection.Consecutive_5XxErrors, "Consecutive_5XxErrors for Outlier Detection for "+c.name)
+ assert.Equal(t, result.MaxEjectionPercent, c.expectedOutlierDetection.MaxEjectionPercent, "MaxEjectionPercent for Outlier Detection for "+c.name)
+ } else {
+ assert.Equal(t, result, c.expectedOutlierDetection)
+ }
+ })
+ }
+}
+
+func TestDestRuleHandlerCUDScenarios(t *testing.T) {
+ dr := &v1alpha32.DestinationRule{
+ ObjectMeta: metaV1.ObjectMeta{
+ Name: "my-dr",
+ Namespace: "test-ns",
+ },
+ Spec: v1alpha3.DestinationRule{
+ Host: "e2e.blah.global",
+ TrafficPolicy: &v1alpha3.TrafficPolicy{},
+ },
+ }
+
+ admiralParams := common.AdmiralParams{
+ LabelSet: &common.LabelSet{},
+ SyncNamespace: "test-sync-ns",
+ }
+ common.InitializeConfig(admiralParams)
+
+ var (
+ goodCnameCache = common.NewMapOfMaps()
+ fullFakeIstioClient = istioFake.NewSimpleClientset()
+ )
+ goodCnameCache.Put("e2e.blah.global", "cluster.k8s.global", "cluster.k8s.global")
+
+ ctx := context.Background()
+ r := NewRemoteRegistry(ctx, admiralParams)
+ r.AdmiralCache = &AdmiralCache{
+ CnameDependentClusterCache: goodCnameCache,
+ SeClusterCache: common.NewMapOfMaps(),
+ }
+
+ r.PutRemoteController("cluster.k8s.global", &RemoteController{
+ DestinationRuleController: &istio.DestinationRuleController{
+ IstioClient: fullFakeIstioClient,
+ },
+ })
+
+ drHandler := &DestinationRuleHandler{
+ ClusterID: "cluster.k8s.global",
+ RemoteRegistry: r,
+ }
+
+ rr := NewRemoteRegistry(ctx, admiralParams)
+ rr.PutRemoteController("diff.cluster.k8s.global", &RemoteController{
+ DestinationRuleController: &istio.DestinationRuleController{
+ IstioClient: fullFakeIstioClient,
+ },
+ })
+ drHandler2 := &DestinationRuleHandler{
+ ClusterID: "cluster.k8s.global",
+ RemoteRegistry: rr,
+ }
+
+ testcases := []struct {
+ name string
+ admiralReadState bool
+ ns string
+ druleHandler *DestinationRuleHandler
+ }{
+ {
+ name: "Encountered non-istio resource in RW state- No dependent clusters case",
+ admiralReadState: false,
+ ns: "test-ns",
+ druleHandler: drHandler2,
+ },
+ {
+ name: "Admiral in read-only state",
+ admiralReadState: true,
+ ns: "test-ns",
+ druleHandler: drHandler,
+ },
+ {
+ name: "Encountered istio resource",
+ admiralReadState: false,
+ ns: "istio-system",
+ druleHandler: drHandler,
+ },
+ {
+ name: "Encountered non-istio resource in RW state",
+ admiralReadState: false,
+ ns: "test-ns",
+ druleHandler: drHandler,
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ commonUtil.CurrentAdmiralState.ReadOnly = tc.admiralReadState
+ dr.ObjectMeta.Namespace = tc.ns
+
+ err := tc.druleHandler.Added(ctx, dr)
+ assert.NoError(t, err)
+
+ dr.ObjectMeta.Namespace = tc.ns
+ err = tc.druleHandler.Updated(ctx, dr)
+ assert.NoError(t, err)
+
+ err = tc.druleHandler.Deleted(ctx, dr)
+ assert.NoError(t, err)
+ })
+ }
+}
+
+func TestDestinationRuleHandlerError(t *testing.T) {
+ ctxLogger := log.WithFields(log.Fields{
+ "type": "destinationRule",
+ })
+ dr := &v1alpha32.DestinationRule{
+ ObjectMeta: metaV1.ObjectMeta{
+ Name: "my-dr",
+ Namespace: "test-ns",
+ },
+ Spec: v1alpha3.DestinationRule{
+ Host: "env.blah.global",
+ TrafficPolicy: &v1alpha3.TrafficPolicy{},
+ },
+ }
+
+ admiralParams := common.AdmiralParams{
+ LabelSet: &common.LabelSet{},
+ SyncNamespace: "test-sync-ns",
+ }
+
+ common.ResetSync()
+ common.InitializeConfig(admiralParams)
+
+ var (
+ ctx = context.Background()
+ rr1 = NewRemoteRegistry(ctx, admiralParams)
+ rr2 = NewRemoteRegistry(ctx, admiralParams)
+ rr3 = NewRemoteRegistry(ctx, admiralParams)
+ rr4 = NewRemoteRegistry(ctx, admiralParams)
+ badCnameCache = common.NewMapOfMaps()
+ )
+
+ badCnameCache.Put("env.blah.global", "fakecluster.k8s.global", "fakecluster.k8s.global")
+
+ rr1.AdmiralCache = &AdmiralCache{
+ CnameDependentClusterCache: badCnameCache,
+ SeClusterCache: common.NewMapOfMaps(),
+ }
+
+ rr2.AdmiralCache = &AdmiralCache{
+ CnameDependentClusterCache: badCnameCache,
+ SeClusterCache: common.NewMapOfMaps(),
+ }
+ rr2.PutRemoteController("fakecluster.k8s.global", &RemoteController{
+ DestinationRuleController: nil,
+ })
+
+ rr3.PutRemoteController("fakecluster.k8s.global", nil)
+
+ rr4.PutRemoteController("fakecluster.k8s.global", &RemoteController{
+ DestinationRuleController: nil,
+ })
+
+ drHandler1 := &DestinationRuleHandler{
+ ClusterID: "fakecluster.k8s.global",
+ RemoteRegistry: rr2,
+ }
+
+ drHandler2 := &DestinationRuleHandler{
+ ClusterID: "fakecluster.k8s.global",
+ RemoteRegistry: rr1,
+ }
+
+ drHandler3 := &DestinationRuleHandler{
+ ClusterID: "foobar",
+ RemoteRegistry: rr3,
+ }
+
+ drHandler4 := &DestinationRuleHandler{
+ ClusterID: "foobar",
+ RemoteRegistry: rr4,
+ }
+
+ cases := []struct {
+ name string
+ admiralReadState bool
+ ns string
+ druleHandler *DestinationRuleHandler
+ expectedError error
+ }{
+ {
+ name: "Destination controller for a given dependent cluster is not initialized",
+ admiralReadState: false,
+ ns: "test-ns",
+ druleHandler: drHandler1,
+ expectedError: fmt.Errorf("op=Event type=DestinationRule name=my-dr cluster=fakecluster.k8s.global message=DestinationRule controller not initialized for cluster"),
+ },
+ {
+ name: "Remote controller for a given dependent cluster is not initialized",
+ admiralReadState: false,
+ ns: "test-ns",
+ druleHandler: drHandler2,
+ expectedError: fmt.Errorf("op=Event type=DestinationRule name=my-dr cluster=fakecluster.k8s.global message=remote controller not initialized for cluster"),
+ },
+ {
+ name: "Remote controller for a given remote cluster is not initialized",
+ admiralReadState: false,
+ ns: "test-ns",
+ druleHandler: drHandler3,
+ expectedError: fmt.Errorf("op=Event type=DestinationRule name=my-dr cluster=fakecluster.k8s.global message=remote controller not initialized for cluster"),
+ },
+ {
+ name: "Remote controller for a given remote cluster is initialized, " +
+ "And Destination controller for a given dependent cluster is not initialized",
+ admiralReadState: false,
+ ns: "test-ns",
+ druleHandler: drHandler4,
+ expectedError: fmt.Errorf("op=Event type=DestinationRule name=my-dr cluster=fakecluster.k8s.global message=DestinationRule controller not initialized for cluster"),
+ },
+ }
+
+ for _, c := range cases {
+ t.Run(c.name, func(t *testing.T) {
+ commonUtil.CurrentAdmiralState.ReadOnly = c.admiralReadState
+ dr.ObjectMeta.Namespace = c.ns
+ err := handleDestinationRuleEvent(ctxLogger, ctx, dr, c.druleHandler, common.Add, common.DestinationRuleResourceType)
+ if err != nil && c.expectedError == nil {
+ t.Errorf("expected error to be nil but got %v", err)
+ }
+ if err != nil && c.expectedError != nil {
+ if !(err.Error() == c.expectedError.Error()) {
+ t.Errorf("error mismatch, expected %v but got %v", c.expectedError, err)
+ }
+ }
+ if err == nil && c.expectedError != nil {
+ t.Errorf("expected error %v but got %v", c.expectedError, err)
+ }
+ })
+ }
+}
+
+func TestDeleteDestinationRule(t *testing.T) {
+ ctxLogger := log.WithFields(log.Fields{
+ "type": "destinationRule",
+ })
+ dr := &v1alpha32.DestinationRule{
+ ObjectMeta: metaV1.ObjectMeta{
+ Name: "my-dr",
+ Namespace: "test-ns",
+ },
+ Spec: v1alpha3.DestinationRule{
+ Host: "e2e.blah.global",
+ TrafficPolicy: &v1alpha3.TrafficPolicy{},
+ },
+ }
+
+ admiralParams := common.AdmiralParams{
+ LabelSet: &common.LabelSet{},
+ SyncNamespace: "test-sync-ns",
+ }
+ common.InitializeConfig(admiralParams)
+
+ ctx := context.Background()
+
+ rc := &RemoteController{
+ DestinationRuleController: &istio.DestinationRuleController{
+ IstioClient: istioFake.NewSimpleClientset(),
+ },
+ }
+ rr := NewRemoteRegistry(ctx, admiralParams)
+ err := deleteDestinationRule(ctx, dr, admiralParams.SyncNamespace, rc)
+ assert.Nil(t, err)
+
+ addUpdateDestinationRule(ctxLogger, ctx, dr, nil, admiralParams.SyncNamespace, rc, rr)
+ assert.Nil(t, err)
+
+ err = deleteDestinationRule(ctx, dr, admiralParams.SyncNamespace, rc)
+ assert.Nil(t, err)
+
+ rc.DestinationRuleController.IstioClient.NetworkingV1alpha3().(*fakenetworkingv1alpha3.FakeNetworkingV1alpha3).PrependReactor("delete", "destinationrules", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {
+ return true, &v1alpha32.DestinationRule{}, errors.New("Error deleting destination rule")
+ })
+ err = deleteDestinationRule(ctx, dr, admiralParams.SyncNamespace, rc)
+
+ assert.NotNil(t, err, "should return the error for any error apart from not found")
+}
+
+func TestAddUpdateDestinationRule(t *testing.T) {
+ ctxLogger := log.WithFields(log.Fields{
+ "type": "destinationRule",
+ })
+ dr := &v1alpha32.DestinationRule{
+ ObjectMeta: metaV1.ObjectMeta{
+ Name: "my-dr",
+ Namespace: "test-ns",
+ },
+ Spec: v1alpha3.DestinationRule{
+ Host: "e2e.blah.global",
+ TrafficPolicy: &v1alpha3.TrafficPolicy{},
+ },
+ }
+
+ admiralParams := common.AdmiralParams{
+ LabelSet: &common.LabelSet{},
+ SyncNamespace: "test-sync-ns",
+ }
+ common.InitializeConfig(admiralParams)
+
+ ctx := context.Background()
+
+ rc := &RemoteController{
+ DestinationRuleController: &istio.DestinationRuleController{
+ IstioClient: istioFake.NewSimpleClientset(),
+ },
+ }
+ rr := NewRemoteRegistry(ctx, admiralParams)
+ rc.DestinationRuleController.IstioClient.NetworkingV1alpha3().(*fakenetworkingv1alpha3.FakeNetworkingV1alpha3).PrependReactor("create", "destinationrules", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {
+ return true, &v1alpha32.DestinationRule{}, errors.New("Error creating destination rule")
+ })
+
+ err := addUpdateDestinationRule(ctxLogger, ctx, dr, nil, admiralParams.SyncNamespace, rc, rr)
+ assert.NotNil(t, err, "should return the error if not success")
+}
+
+func TestAddUpdateDestinationRule2(t *testing.T) {
+ var (
+ namespace = "test-ns"
+ ctxLogger = log.WithFields(log.Fields{
+ "type": "destinationRule",
+ })
+ dr = &v1alpha32.DestinationRule{
+ ObjectMeta: metaV1.ObjectMeta{
+ Name: "my-dr",
+ Namespace: "test-ns",
+ },
+ Spec: v1alpha3.DestinationRule{
+ Host: "e2e.blah.global",
+ TrafficPolicy: &v1alpha3.TrafficPolicy{},
+ },
+ }
+ ctx = context.Background()
+ rc = &RemoteController{
+ ClusterID: "test-cluster",
+ DestinationRuleController: &istio.DestinationRuleController{
+ IstioClient: istioFake.NewSimpleClientset(),
+ },
+ }
+ admiralParams = common.AdmiralParams{
+ LabelSet: &common.LabelSet{},
+ SyncNamespace: "test-sync-ns",
+ EnableSWAwareNSCaches: true,
+ ExportToIdentityList: []string{"blah"},
+ ExportToMaxNamespaces: 35,
+ }
+ )
+ common.ResetSync()
+ common.InitializeConfig(admiralParams)
+ rr := NewRemoteRegistry(ctx, admiralParams)
+ rr.AdmiralCache.CnameDependentClusterNamespaceCache.Put(dr.Spec.Host, rc.ClusterID, "dep-ns", "dep-ns")
+ _, err := rc.DestinationRuleController.IstioClient.
+ NetworkingV1alpha3().
+ DestinationRules(namespace).
+ Create(ctx, dr, metaV1.CreateOptions{})
+ if err != nil {
+ t.Error(err)
+ }
+
+ cases := []struct {
+ name string
+ newDR *v1alpha32.DestinationRule
+ existingDR *v1alpha32.DestinationRule
+ expErr error
+ }{
+ {
+ name: "Given destinationrule does not exist, " +
+ "And the existing object obtained from Get is nil, " +
+ "When another thread create the destinationrule, " +
+ "When this thread attempts to create destinationrule and fails, " +
+ "Then, then an Update operation should be run, " +
+ "And there should be no panic," +
+ "And no errors should be returned",
+ newDR: dr,
+ existingDR: nil,
+ expErr: nil,
+ },
+ }
+ for _, c := range cases {
+ t.Run(c.name, func(t *testing.T) {
+ err := addUpdateDestinationRule(ctxLogger, ctx, c.newDR, c.existingDR, namespace, rc, rr)
+ if c.expErr == nil {
+ assert.Equal(t, c.expErr, err)
+ }
+ if c.expErr != nil {
+ assert.Equal(t, c.expErr, err)
+ }
+ })
+ }
+}
+
+// write test for getClientConnectionPoolOverrides
+func TestGetClientConnectionPoolOverrides(t *testing.T) {
+
+ admiralParams := common.AdmiralParams{
+ MaxRequestsPerConnection: DefaultMaxRequestsPerConnection,
+ }
+ common.ResetSync()
+ common.InitializeConfig(admiralParams)
+
+ cases := []struct {
+ name string
+ overrides *v1.ClientConnectionConfig
+ expectedSettings *v1alpha3.ConnectionPoolSettings
+ }{
+ {
+ name: "Given overrides is nil, " +
+ "When getClientConnectionPoolOverrides is called, " +
+ "Then, the default settings should be returned",
+ overrides: nil,
+ expectedSettings: &v1alpha3.ConnectionPoolSettings{
+ Http: &networkingV1Alpha3.ConnectionPoolSettings_HTTPSettings{
+ MaxRequestsPerConnection: DefaultMaxRequestsPerConnection,
+ },
+ },
+ },
+ {
+ name: "Given overrides is not nil, " +
+ "When getClientConnectionPoolOverrides is called, " +
+ "And the ClientConnectionConfig spec is empty" +
+ "Then, the default overrides should be returned",
+ overrides: &v1.ClientConnectionConfig{
+ Spec: v1.ClientConnectionConfigSpec{},
+ },
+ expectedSettings: &v1alpha3.ConnectionPoolSettings{
+ Http: &networkingV1Alpha3.ConnectionPoolSettings_HTTPSettings{
+ MaxRequestsPerConnection: DefaultMaxRequestsPerConnection,
+ },
+ },
+ },
+ {
+ name: "Given overrides is not nil, " +
+ "When getClientConnectionPoolOverrides is called, " +
+ "And the ClientConnectionConfig ConnectionPool settings are empty" +
+ "Then, the default overrides should be returned",
+ overrides: &v1.ClientConnectionConfig{
+ Spec: v1.ClientConnectionConfigSpec{},
+ },
+ expectedSettings: &v1alpha3.ConnectionPoolSettings{
+ Http: &networkingV1Alpha3.ConnectionPoolSettings_HTTPSettings{
+ MaxRequestsPerConnection: DefaultMaxRequestsPerConnection,
+ },
+ },
+ },
+ {
+ name: "Given overrides is not nil, " +
+ "When getClientConnectionPoolOverrides is called, " +
+ "And the ClientConnectionConfig's only ConnectionPool.Http.Http2MaxRequests is being overwritten " +
+ "Then, only the ConnectionPool.Http.Http2MaxRequests should be overwritten",
+ overrides: &v1.ClientConnectionConfig{
+ Spec: v1.ClientConnectionConfigSpec{
+ ConnectionPool: model.ConnectionPool{
+ Http: &model.ConnectionPool_HTTP{
+ Http2MaxRequests: 100,
+ },
+ },
+ },
+ },
+ expectedSettings: &v1alpha3.ConnectionPoolSettings{
+ Http: &networkingV1Alpha3.ConnectionPoolSettings_HTTPSettings{
+ Http2MaxRequests: 100,
+ MaxRequestsPerConnection: DefaultMaxRequestsPerConnection,
+ },
+ },
+ },
+ {
+ name: "Given overrides is not nil, " +
+ "When getClientConnectionPoolOverrides is called, " +
+ "And the ClientConnectionConfig's only ConnectionPool.Http.MaxRequestsPerConnection is being overwritten " +
+ "Then, only the ConnectionPool.Http.MaxRequestsPerConnection should be overwritten",
+ overrides: &v1.ClientConnectionConfig{
+ Spec: v1.ClientConnectionConfigSpec{
+ ConnectionPool: model.ConnectionPool{
+ Http: &model.ConnectionPool_HTTP{
+ MaxRequestsPerConnection: 5,
+ },
+ },
+ },
+ },
+ expectedSettings: &v1alpha3.ConnectionPoolSettings{
+ Http: &networkingV1Alpha3.ConnectionPoolSettings_HTTPSettings{
+ MaxRequestsPerConnection: 5,
+ },
+ },
+ },
+ {
+ name: "Given overrides is not nil, " +
+ "When getClientConnectionPoolOverrides is called, " +
+ "And the ClientConnectionConfig's only ConnectionPool.Http.IdleTimeout is being overwritten " +
+ "Then, only the ConnectionPool.Http.IdleTimeout should be overwritten",
+ overrides: &v1.ClientConnectionConfig{
+ Spec: v1.ClientConnectionConfigSpec{
+ ConnectionPool: model.ConnectionPool{
+ Http: &model.ConnectionPool_HTTP{
+ IdleTimeout: "1s",
+ },
+ },
+ },
+ },
+ expectedSettings: &v1alpha3.ConnectionPoolSettings{
+ Http: &networkingV1Alpha3.ConnectionPoolSettings_HTTPSettings{
+ MaxRequestsPerConnection: DefaultMaxRequestsPerConnection,
+ IdleTimeout: &duration.Duration{Seconds: 1},
+ },
+ },
+ },
+ {
+ name: "Given overrides is not nil, " +
+ "When getClientConnectionPoolOverrides is called, " +
+ "And the ClientConnectionConfig's only ConnectionPool.TCP.MaxConnectionDuration is being overwritten " +
+ "Then, only the ConnectionPool.TCP.MaxConnectionDuration should be overwritten",
+ overrides: &v1.ClientConnectionConfig{
+ Spec: v1.ClientConnectionConfigSpec{
+ ConnectionPool: model.ConnectionPool{
+ Tcp: &model.ConnectionPool_TCP{
+ MaxConnectionDuration: "1s",
+ },
+ },
+ },
+ },
+ expectedSettings: &v1alpha3.ConnectionPoolSettings{
+ Http: &networkingV1Alpha3.ConnectionPoolSettings_HTTPSettings{
+ MaxRequestsPerConnection: DefaultMaxRequestsPerConnection,
+ },
+ Tcp: &networkingV1Alpha3.ConnectionPoolSettings_TCPSettings{
+ MaxConnectionDuration: &duration.Duration{Seconds: 1},
+ },
+ },
+ },
+ {
+ name: "Given overrides is not nil, " +
+ "When getClientConnectionPoolOverrides is called, " +
+ "And the ConnectionPool.TCP.MaxConnectionDuration is set to 0 " +
+ "And the ConnectionPool.Http.Http2MaxRequests is set to 0 " +
+ "And the ConnectionPool.Http.MaxRequestsPerConnection is set to 0 " +
+ "And the ConnectionPool.Http.IdleTimeout is set to 0 " +
+ "Then, all the overrides should be set to 0",
+ overrides: &v1.ClientConnectionConfig{
+ Spec: v1.ClientConnectionConfigSpec{
+ ConnectionPool: model.ConnectionPool{
+ Tcp: &model.ConnectionPool_TCP{
+ MaxConnectionDuration: "0s",
+ },
+ Http: &model.ConnectionPool_HTTP{
+ IdleTimeout: "0s",
+ MaxRequestsPerConnection: 0,
+ Http2MaxRequests: 0,
+ },
+ },
+ },
+ },
+ expectedSettings: &v1alpha3.ConnectionPoolSettings{
+ Http: &networkingV1Alpha3.ConnectionPoolSettings_HTTPSettings{
+ MaxRequestsPerConnection: DefaultMaxRequestsPerConnection,
+ IdleTimeout: &duration.Duration{Seconds: 0},
+ },
+ Tcp: &networkingV1Alpha3.ConnectionPoolSettings_TCPSettings{
+ MaxConnectionDuration: &duration.Duration{Seconds: 0},
+ },
+ },
+ },
+ }
+ for _, c := range cases {
+ t.Run(c.name, func(t *testing.T) {
+ actual := getClientConnectionPoolOverrides(c.overrides)
+ assert.Equal(t, c.expectedSettings, actual)
+ })
+ }
+}
From 39298053c9de15d88814b86f5beecc16e32dfd90 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:52:27 -0700
Subject: [PATCH 049/235] remove file
admiral/pkg/apis/admiral/model/dependencyproxy.pb.go
---
.../apis/admiral/model/dependencyproxy.pb.go | 237 ------------------
1 file changed, 237 deletions(-)
delete mode 100644 admiral/pkg/apis/admiral/model/dependencyproxy.pb.go
diff --git a/admiral/pkg/apis/admiral/model/dependencyproxy.pb.go b/admiral/pkg/apis/admiral/model/dependencyproxy.pb.go
deleted file mode 100644
index 4268f109..00000000
--- a/admiral/pkg/apis/admiral/model/dependencyproxy.pb.go
+++ /dev/null
@@ -1,237 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: dependencyproxy.proto
-
-package model
-
-import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-// The below example of DependencyProxy
-//```yaml
-// apiVersion: admiral.io/v1alpha1
-// kind: DependencyProxy
-// metadata:
-// name: dependency-proxy-example
-// namespace: admiral
-// annotations:
-// admiral.io/env: stage
-// spec:
-// destination:
-// identity: greeting
-// dns_suffix: "xyz"
-// dns_prefix:
-// - "test0"
-// - "test1"
-// proxy:
-// identity: nginx-gw
-//```
-// The above DependencyProxy will generate the following
-// VirtualService object
-//```yaml
-// apiVersion: networking.istio.io/v1alpha3
-// kind: VirtualService
-// metadata:
-// name: httpbin-vs
-// spec:
-// hosts:
-// - test0.stage.greeting.xyz
-// - test1.stage.greeting.xyz
-// - stage.greeting.xyz
-// http:
-// - route:
-// - destination:
-// host: stage.gateway.global
-// port:
-// number: 80
-//```
-//
-type DependencyProxy struct {
- // Configuration of the destination identity for which the
- // requests should be proxied.
- Destination *Destination `protobuf:"bytes,1,opt,name=destination,proto3" json:"destination,omitempty"`
- // Configuration of the proxy's identity through which the requests
- // to the destination will be proxied through.
- Proxy *Proxy `protobuf:"bytes,2,opt,name=proxy,proto3" json:"proxy,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DependencyProxy) Reset() { *m = DependencyProxy{} }
-func (m *DependencyProxy) String() string { return proto.CompactTextString(m) }
-func (*DependencyProxy) ProtoMessage() {}
-func (*DependencyProxy) Descriptor() ([]byte, []int) {
- return fileDescriptor_edf7120455c08e23, []int{0}
-}
-
-func (m *DependencyProxy) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_DependencyProxy.Unmarshal(m, b)
-}
-func (m *DependencyProxy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_DependencyProxy.Marshal(b, m, deterministic)
-}
-func (m *DependencyProxy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DependencyProxy.Merge(m, src)
-}
-func (m *DependencyProxy) XXX_Size() int {
- return xxx_messageInfo_DependencyProxy.Size(m)
-}
-func (m *DependencyProxy) XXX_DiscardUnknown() {
- xxx_messageInfo_DependencyProxy.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DependencyProxy proto.InternalMessageInfo
-
-func (m *DependencyProxy) GetDestination() *Destination {
- if m != nil {
- return m.Destination
- }
- return nil
-}
-
-func (m *DependencyProxy) GetProxy() *Proxy {
- if m != nil {
- return m.Proxy
- }
- return nil
-}
-
-type Destination struct {
- // Identifier of the destination workload.
- Identity string `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"`
- // An ordered list of all DNS prefixes.
- DnsPrefixes []string `protobuf:"bytes,2,rep,name=dns_prefixes,json=dnsPrefixes,proto3" json:"dns_prefixes,omitempty"`
- // The DNS suffix that should be appended while
- // constructing the endpoint of the destination service.
- DnsSuffix string `protobuf:"bytes,3,opt,name=dns_suffix,json=dnsSuffix,proto3" json:"dns_suffix,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Destination) Reset() { *m = Destination{} }
-func (m *Destination) String() string { return proto.CompactTextString(m) }
-func (*Destination) ProtoMessage() {}
-func (*Destination) Descriptor() ([]byte, []int) {
- return fileDescriptor_edf7120455c08e23, []int{1}
-}
-
-func (m *Destination) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Destination.Unmarshal(m, b)
-}
-func (m *Destination) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Destination.Marshal(b, m, deterministic)
-}
-func (m *Destination) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Destination.Merge(m, src)
-}
-func (m *Destination) XXX_Size() int {
- return xxx_messageInfo_Destination.Size(m)
-}
-func (m *Destination) XXX_DiscardUnknown() {
- xxx_messageInfo_Destination.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Destination proto.InternalMessageInfo
-
-func (m *Destination) GetIdentity() string {
- if m != nil {
- return m.Identity
- }
- return ""
-}
-
-func (m *Destination) GetDnsPrefixes() []string {
- if m != nil {
- return m.DnsPrefixes
- }
- return nil
-}
-
-func (m *Destination) GetDnsSuffix() string {
- if m != nil {
- return m.DnsSuffix
- }
- return ""
-}
-
-type Proxy struct {
- // Identifier of the proxy's workload
- Identity string `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Proxy) Reset() { *m = Proxy{} }
-func (m *Proxy) String() string { return proto.CompactTextString(m) }
-func (*Proxy) ProtoMessage() {}
-func (*Proxy) Descriptor() ([]byte, []int) {
- return fileDescriptor_edf7120455c08e23, []int{2}
-}
-
-func (m *Proxy) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Proxy.Unmarshal(m, b)
-}
-func (m *Proxy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Proxy.Marshal(b, m, deterministic)
-}
-func (m *Proxy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Proxy.Merge(m, src)
-}
-func (m *Proxy) XXX_Size() int {
- return xxx_messageInfo_Proxy.Size(m)
-}
-func (m *Proxy) XXX_DiscardUnknown() {
- xxx_messageInfo_Proxy.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Proxy proto.InternalMessageInfo
-
-func (m *Proxy) GetIdentity() string {
- if m != nil {
- return m.Identity
- }
- return ""
-}
-
-func init() {
- proto.RegisterType((*DependencyProxy)(nil), "admiral.global.v1alpha.DependencyProxy")
- proto.RegisterType((*Destination)(nil), "admiral.global.v1alpha.Destination")
- proto.RegisterType((*Proxy)(nil), "admiral.global.v1alpha.Proxy")
-}
-
-func init() { proto.RegisterFile("dependencyproxy.proto", fileDescriptor_edf7120455c08e23) }
-
-var fileDescriptor_edf7120455c08e23 = []byte{
- // 233 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x90, 0x41, 0x4b, 0x03, 0x31,
- 0x10, 0x85, 0xd9, 0x96, 0x55, 0x77, 0x56, 0x10, 0x02, 0xca, 0x22, 0x14, 0xea, 0xf6, 0xd2, 0x53,
- 0x40, 0xfb, 0x0f, 0xa4, 0xde, 0x4b, 0xbc, 0x79, 0x91, 0xd4, 0x99, 0xd5, 0x60, 0x3a, 0x09, 0x49,
- 0x94, 0xdd, 0x1f, 0xe1, 0x7f, 0x96, 0xa6, 0x52, 0x7b, 0xd0, 0x1e, 0xf3, 0x78, 0xef, 0x7b, 0x2f,
- 0x03, 0x97, 0x48, 0x9e, 0x18, 0x89, 0x5f, 0x06, 0x1f, 0x5c, 0x3f, 0x48, 0x1f, 0x5c, 0x72, 0xe2,
- 0x4a, 0xe3, 0xc6, 0x04, 0x6d, 0xe5, 0xab, 0x75, 0x6b, 0x6d, 0xe5, 0xe7, 0xad, 0xb6, 0xfe, 0x4d,
- 0xb7, 0x5f, 0x05, 0x5c, 0x2c, 0xf7, 0x89, 0xd5, 0x36, 0x21, 0x1e, 0xa0, 0x46, 0x8a, 0xc9, 0xb0,
- 0x4e, 0xc6, 0x71, 0x53, 0x4c, 0x8b, 0x79, 0x7d, 0x37, 0x93, 0x7f, 0x13, 0xe4, 0xf2, 0xd7, 0xaa,
- 0x0e, 0x73, 0x62, 0x01, 0x65, 0x5e, 0xd0, 0x8c, 0x32, 0x60, 0xf2, 0x1f, 0x20, 0x97, 0xaa, 0x9d,
- 0xb7, 0x7d, 0x87, 0xfa, 0x00, 0x28, 0xae, 0xe1, 0xcc, 0x20, 0x71, 0x32, 0x69, 0xc8, 0x3b, 0x2a,
- 0xb5, 0x7f, 0x8b, 0x1b, 0x38, 0x47, 0x8e, 0xcf, 0x3e, 0x50, 0x67, 0x7a, 0x8a, 0xcd, 0x68, 0x3a,
- 0x9e, 0x57, 0xaa, 0x46, 0x8e, 0xab, 0x1f, 0x49, 0x4c, 0x00, 0xb6, 0x96, 0xf8, 0xd1, 0x75, 0xa6,
- 0x6f, 0xc6, 0x19, 0x50, 0x21, 0xc7, 0xc7, 0x2c, 0xb4, 0x33, 0x28, 0x77, 0x3f, 0x3e, 0x52, 0x73,
- 0x7f, 0xfa, 0x54, 0x6e, 0x1c, 0x92, 0x5d, 0x9f, 0xe4, 0x4b, 0x2e, 0xbe, 0x03, 0x00, 0x00, 0xff,
- 0xff, 0x6d, 0x70, 0x0d, 0x3b, 0x62, 0x01, 0x00, 0x00,
-}
From 9c1ca88877e88c5bb784f8060f3189591186612a Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:52:30 -0700
Subject: [PATCH 050/235] remove file
admiral/pkg/apis/admiral/model/dependencyproxy.proto
---
.../apis/admiral/model/dependencyproxy.proto | 72 -------------------
1 file changed, 72 deletions(-)
delete mode 100644 admiral/pkg/apis/admiral/model/dependencyproxy.proto
diff --git a/admiral/pkg/apis/admiral/model/dependencyproxy.proto b/admiral/pkg/apis/admiral/model/dependencyproxy.proto
deleted file mode 100644
index d6459571..00000000
--- a/admiral/pkg/apis/admiral/model/dependencyproxy.proto
+++ /dev/null
@@ -1,72 +0,0 @@
-syntax = "proto3";
-
-package admiral.global.v1alpha;
-
-option go_package = "model";
-
-// The below example of DependencyProxy
-//```yaml
-// apiVersion: admiral.io/v1alpha1
-// kind: DependencyProxy
-// metadata:
-// name: dependency-proxy-example
-// namespace: admiral
-// annotations:
-// admiral.io/env: stage
-// spec:
-// destination:
-// identity: greeting
-// dns_suffix: "xyz"
-// dns_prefix:
-// - "test0"
-// - "test1"
-// proxy:
-// identity: nginx-gw
-//```
-// The above DependencyProxy will generate the following
-// VirtualService object
-//```yaml
-// apiVersion: networking.istio.io/v1alpha3
-// kind: VirtualService
-// metadata:
-// name: httpbin-vs
-// spec:
-// hosts:
-// - test0.stage.greeting.xyz
-// - test1.stage.greeting.xyz
-// - stage.greeting.xyz
-// http:
-// - route:
-// - destination:
-// host: stage.gateway.global
-// port:
-// number: 80
-//```
-//
-message DependencyProxy {
- // Configuration of the destination identity for which the
- // requests should be proxied.
- Destination destination = 1;
-
- // Configuration of the proxy's identity through which the requests
- // to the destination will be proxied through.
- Proxy proxy = 2;
-
-}
-
-message Destination {
- // Identifier of the destination workload.
- string identity = 1;
-
- // An ordered list of all DNS prefixes.
- repeated string dns_prefixes = 2;
-
- // The DNS suffix that should be appended while
- // constructing the endpoint of the destination service.
- string dns_suffix = 3;
-}
-
-message Proxy {
- // Identifier of the proxy's workload
- string identity = 1;
-}
\ No newline at end of file
From f57028eb8aec5d136bc7c2fe3df4f6cf6800180e Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:52:33 -0700
Subject: [PATCH 051/235] remove file admiral/pkg/apis/admiral/v1/doc.go
---
admiral/pkg/apis/admiral/v1/doc.go | 3 ---
1 file changed, 3 deletions(-)
delete mode 100644 admiral/pkg/apis/admiral/v1/doc.go
diff --git a/admiral/pkg/apis/admiral/v1/doc.go b/admiral/pkg/apis/admiral/v1/doc.go
deleted file mode 100644
index ab460376..00000000
--- a/admiral/pkg/apis/admiral/v1/doc.go
+++ /dev/null
@@ -1,3 +0,0 @@
-// +k8s:deepcopy-gen=package
-// +groupName=admiral.io
-package v1
From 43d39a9266b57d83c57975e584efdcf21c54f0cd Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:52:36 -0700
Subject: [PATCH 052/235] remove file admiral/pkg/apis/admiral/v1/register.go
---
admiral/pkg/apis/admiral/v1/register.go | 64 -------------------------
1 file changed, 64 deletions(-)
delete mode 100644 admiral/pkg/apis/admiral/v1/register.go
diff --git a/admiral/pkg/apis/admiral/v1/register.go b/admiral/pkg/apis/admiral/v1/register.go
deleted file mode 100644
index d08e95b3..00000000
--- a/admiral/pkg/apis/admiral/v1/register.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package v1
-
-import (
- "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral"
-
- meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
-)
-
-// GroupVersion is the identifier for the API which includes
-// the name of the group and the version of the API
-var SchemeGroupVersion = schema.GroupVersion{
- Group: admiral.GroupName,
- Version: "v1alpha1",
-}
-
-// create a SchemeBuilder which uses functions to add types to
-// the scheme
-var (
- SchemeBuilder runtime.SchemeBuilder
- localSchemeBuilder = &SchemeBuilder
- AddToScheme = localSchemeBuilder.AddToScheme
-)
-
-func Resource(resource string) schema.GroupResource {
- return SchemeGroupVersion.WithResource(resource).GroupResource()
-}
-
-func init() {
- // We only register manually written functions here. The registration of the
- // generated functions takes place in the generated files. The separation
- // makes the code compile even when the generated files are missing.
- localSchemeBuilder.Register(addKnownTypes)
-}
-
-// addKnownTypes adds our types to the API scheme by registering
-// MyResource and MyResourceList
-func addKnownTypes(scheme *runtime.Scheme) error {
- //scheme.AddUnversionedTypes(
- // SchemeGroupVersion,
- // &Dependency{},
- // &DependencyList{},
- // &GlobalTrafficPolicy{},
- // &GlobalTrafficPolicyList{},
- //)
-
- scheme.AddKnownTypes(
- SchemeGroupVersion,
- &Dependency{},
- &DependencyList{},
- &GlobalTrafficPolicy{},
- &GlobalTrafficPolicyList{},
- &RoutingPolicy{},
- &RoutingPolicyList{},
- &DependencyProxy{},
- &DependencyProxyList{},
- )
-
- // register the type in the scheme
- meta_v1.AddToGroupVersion(scheme, SchemeGroupVersion)
- return nil
-}
From 01d1c54c65bd942dcde4b73564c252fc870439b3 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:52:39 -0700
Subject: [PATCH 053/235] remove file admiral/pkg/apis/admiral/v1/type.go
---
admiral/pkg/apis/admiral/v1/type.go | 110 ----------------------------
1 file changed, 110 deletions(-)
delete mode 100644 admiral/pkg/apis/admiral/v1/type.go
diff --git a/admiral/pkg/apis/admiral/v1/type.go b/admiral/pkg/apis/admiral/v1/type.go
deleted file mode 100644
index c5b841a7..00000000
--- a/admiral/pkg/apis/admiral/v1/type.go
+++ /dev/null
@@ -1,110 +0,0 @@
-package v1
-
-import (
- "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/model"
- meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// +genclient
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-//generic cdr object to wrap the dependency api
-type Dependency struct {
- meta_v1.TypeMeta `json:",inline"`
- meta_v1.ObjectMeta `json:"metadata"`
- Spec model.Dependency `json:"spec"`
- Status DependencyStatus `json:"status"`
-}
-
-// FooStatus is the status for a Foo resource
-type DependencyStatus struct {
- ClusterSynced int32 `json:"clustersSynced"`
- State string `json:"state"`
-}
-
-// FooList is a list of Foo resources
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-type DependencyList struct {
- meta_v1.TypeMeta `json:",inline"`
- meta_v1.ListMeta `json:"metadata"`
-
- Items []Dependency `json:"items"`
-}
-
-//generic cdr object to wrap the GlobalTrafficPolicy api
-// +genclient
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-type GlobalTrafficPolicy struct {
- meta_v1.TypeMeta `json:",inline"`
- meta_v1.ObjectMeta `json:"metadata"`
- Spec model.GlobalTrafficPolicy `json:"spec"`
- Status GlobalTrafficPolicyStatus `json:"status"`
-}
-
-// FooStatus is the status for a Foo resource
-
-type GlobalTrafficPolicyStatus struct {
- ClusterSynced int32 `json:"clustersSynced"`
- State string `json:"state"`
-}
-
-// FooList is a list of Foo resources
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-type GlobalTrafficPolicyList struct {
- meta_v1.TypeMeta `json:",inline"`
- meta_v1.ListMeta `json:"metadata"`
-
- Items []GlobalTrafficPolicy `json:"items"`
-}
-
-//generic cdr object to wrap the GlobalTrafficPolicy api
-// +genclient
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-type RoutingPolicy struct {
- meta_v1.TypeMeta `json:",inline"`
- meta_v1.ObjectMeta `json:"metadata"`
- Spec model.RoutingPolicy `json:"spec"`
- Status RoutingPolicyStatus `json:"status"`
-}
-
-// FooStatus is the status for a Foo resource
-
-type RoutingPolicyStatus struct {
- ClusterSynced int32 `json:"clustersSynced"`
- State string `json:"state"`
-}
-
-// FooList is a list of Foo resources
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-type RoutingPolicyList struct {
- meta_v1.TypeMeta `json:",inline"`
- meta_v1.ListMeta `json:"metadata"`
-
- Items []RoutingPolicy `json:"items"`
-}
-
-// +genclient
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +k8s:openapi-gen=true
-// +kubebuilder:printcolumn:name="Destination",type="string",JSONPath=`.spec.destination.identity`
-// +kubebuilder:printcolumn:name="Proxy",type="string",JSONPath=`.spec.proxy.identity`
-// +kubebuilder:resource:shortName=dp
-type DependencyProxy struct {
- meta_v1.TypeMeta `json:",inline"`
- meta_v1.ObjectMeta `json:"metadata"`
- Spec model.DependencyProxy `json:"spec"`
- Status DependencyProxyStatus `json:"status"`
-}
-
-// DependencyProxyStatus is the status for a DependencyProxy resource
-type DependencyProxyStatus struct {
- State string `json:"state"`
-}
-
-// DependencyProxyList is a list of DependencyProxy resources
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-type DependencyProxyList struct {
- meta_v1.TypeMeta `json:",inline"`
- meta_v1.ListMeta `json:"metadata"`
-
- Items []DependencyProxy `json:"items"`
-}
From 2c2e36503d8211fe4e4266e56166c364e1e27457 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:52:43 -0700
Subject: [PATCH 054/235] remove file
admiral/pkg/apis/admiral/v1/zz_generated.deepcopy.go
---
.../apis/admiral/v1/zz_generated.deepcopy.go | 334 ------------------
1 file changed, 334 deletions(-)
delete mode 100644 admiral/pkg/apis/admiral/v1/zz_generated.deepcopy.go
diff --git a/admiral/pkg/apis/admiral/v1/zz_generated.deepcopy.go b/admiral/pkg/apis/admiral/v1/zz_generated.deepcopy.go
deleted file mode 100644
index f17accd2..00000000
--- a/admiral/pkg/apis/admiral/v1/zz_generated.deepcopy.go
+++ /dev/null
@@ -1,334 +0,0 @@
-//go:build !ignore_autogenerated
-// +build !ignore_autogenerated
-
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by deepcopy-gen. DO NOT EDIT.
-
-package v1
-
-import (
- runtime "k8s.io/apimachinery/pkg/runtime"
-)
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Dependency) DeepCopyInto(out *Dependency) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- out.Status = in.Status
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Dependency.
-func (in *Dependency) DeepCopy() *Dependency {
- if in == nil {
- return nil
- }
- out := new(Dependency)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *Dependency) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *DependencyList) DeepCopyInto(out *DependencyList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]Dependency, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DependencyList.
-func (in *DependencyList) DeepCopy() *DependencyList {
- if in == nil {
- return nil
- }
- out := new(DependencyList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *DependencyList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *DependencyProxy) DeepCopyInto(out *DependencyProxy) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- out.Status = in.Status
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DependencyProxy.
-func (in *DependencyProxy) DeepCopy() *DependencyProxy {
- if in == nil {
- return nil
- }
- out := new(DependencyProxy)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *DependencyProxy) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *DependencyProxyList) DeepCopyInto(out *DependencyProxyList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]DependencyProxy, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DependencyProxyList.
-func (in *DependencyProxyList) DeepCopy() *DependencyProxyList {
- if in == nil {
- return nil
- }
- out := new(DependencyProxyList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *DependencyProxyList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *DependencyProxyStatus) DeepCopyInto(out *DependencyProxyStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DependencyProxyStatus.
-func (in *DependencyProxyStatus) DeepCopy() *DependencyProxyStatus {
- if in == nil {
- return nil
- }
- out := new(DependencyProxyStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *DependencyStatus) DeepCopyInto(out *DependencyStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DependencyStatus.
-func (in *DependencyStatus) DeepCopy() *DependencyStatus {
- if in == nil {
- return nil
- }
- out := new(DependencyStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *GlobalTrafficPolicy) DeepCopyInto(out *GlobalTrafficPolicy) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- out.Status = in.Status
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalTrafficPolicy.
-func (in *GlobalTrafficPolicy) DeepCopy() *GlobalTrafficPolicy {
- if in == nil {
- return nil
- }
- out := new(GlobalTrafficPolicy)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *GlobalTrafficPolicy) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *GlobalTrafficPolicyList) DeepCopyInto(out *GlobalTrafficPolicyList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]GlobalTrafficPolicy, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalTrafficPolicyList.
-func (in *GlobalTrafficPolicyList) DeepCopy() *GlobalTrafficPolicyList {
- if in == nil {
- return nil
- }
- out := new(GlobalTrafficPolicyList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *GlobalTrafficPolicyList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *GlobalTrafficPolicyStatus) DeepCopyInto(out *GlobalTrafficPolicyStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalTrafficPolicyStatus.
-func (in *GlobalTrafficPolicyStatus) DeepCopy() *GlobalTrafficPolicyStatus {
- if in == nil {
- return nil
- }
- out := new(GlobalTrafficPolicyStatus)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *RoutingPolicy) DeepCopyInto(out *RoutingPolicy) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- out.Status = in.Status
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingPolicy.
-func (in *RoutingPolicy) DeepCopy() *RoutingPolicy {
- if in == nil {
- return nil
- }
- out := new(RoutingPolicy)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *RoutingPolicy) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *RoutingPolicyList) DeepCopyInto(out *RoutingPolicyList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]RoutingPolicy, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingPolicyList.
-func (in *RoutingPolicyList) DeepCopy() *RoutingPolicyList {
- if in == nil {
- return nil
- }
- out := new(RoutingPolicyList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *RoutingPolicyList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *RoutingPolicyStatus) DeepCopyInto(out *RoutingPolicyStatus) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingPolicyStatus.
-func (in *RoutingPolicyStatus) DeepCopy() *RoutingPolicyStatus {
- if in == nil {
- return nil
- }
- out := new(RoutingPolicyStatus)
- in.DeepCopyInto(out)
- return out
-}
From 49af37d9cb26f2661b583d99a879767a634e9c16 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:52:46 -0700
Subject: [PATCH 055/235] remove file
admiral/pkg/client/clientset/versioned/typed/admiral/v1/admiral_client.go
---
.../typed/admiral/v1/admiral_client.go | 122 ------------------
1 file changed, 122 deletions(-)
delete mode 100644 admiral/pkg/client/clientset/versioned/typed/admiral/v1/admiral_client.go
diff --git a/admiral/pkg/client/clientset/versioned/typed/admiral/v1/admiral_client.go b/admiral/pkg/client/clientset/versioned/typed/admiral/v1/admiral_client.go
deleted file mode 100644
index 9d79a32b..00000000
--- a/admiral/pkg/client/clientset/versioned/typed/admiral/v1/admiral_client.go
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by client-gen. DO NOT EDIT.
-
-package v1
-
-import (
- "net/http"
-
- v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
- "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned/scheme"
- rest "k8s.io/client-go/rest"
-)
-
-type AdmiralV1Interface interface {
- RESTClient() rest.Interface
- DependenciesGetter
- DependencyProxiesGetter
- GlobalTrafficPoliciesGetter
- RoutingPoliciesGetter
-}
-
-// AdmiralV1Client is used to interact with features provided by the admiral.io group.
-type AdmiralV1Client struct {
- restClient rest.Interface
-}
-
-func (c *AdmiralV1Client) Dependencies(namespace string) DependencyInterface {
- return newDependencies(c, namespace)
-}
-
-func (c *AdmiralV1Client) DependencyProxies(namespace string) DependencyProxyInterface {
- return newDependencyProxies(c, namespace)
-}
-
-func (c *AdmiralV1Client) GlobalTrafficPolicies(namespace string) GlobalTrafficPolicyInterface {
- return newGlobalTrafficPolicies(c, namespace)
-}
-
-func (c *AdmiralV1Client) RoutingPolicies(namespace string) RoutingPolicyInterface {
- return newRoutingPolicies(c, namespace)
-}
-
-// NewForConfig creates a new AdmiralV1Client for the given config.
-// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
-// where httpClient was generated with rest.HTTPClientFor(c).
-func NewForConfig(c *rest.Config) (*AdmiralV1Client, error) {
- config := *c
- if err := setConfigDefaults(&config); err != nil {
- return nil, err
- }
- httpClient, err := rest.HTTPClientFor(&config)
- if err != nil {
- return nil, err
- }
- return NewForConfigAndClient(&config, httpClient)
-}
-
-// NewForConfigAndClient creates a new AdmiralV1Client for the given config and http client.
-// Note the http client provided takes precedence over the configured transport values.
-func NewForConfigAndClient(c *rest.Config, h *http.Client) (*AdmiralV1Client, error) {
- config := *c
- if err := setConfigDefaults(&config); err != nil {
- return nil, err
- }
- client, err := rest.RESTClientForConfigAndClient(&config, h)
- if err != nil {
- return nil, err
- }
- return &AdmiralV1Client{client}, nil
-}
-
-// NewForConfigOrDie creates a new AdmiralV1Client for the given config and
-// panics if there is an error in the config.
-func NewForConfigOrDie(c *rest.Config) *AdmiralV1Client {
- client, err := NewForConfig(c)
- if err != nil {
- panic(err)
- }
- return client
-}
-
-// New creates a new AdmiralV1Client for the given RESTClient.
-func New(c rest.Interface) *AdmiralV1Client {
- return &AdmiralV1Client{c}
-}
-
-func setConfigDefaults(config *rest.Config) error {
- gv := v1.SchemeGroupVersion
- config.GroupVersion = &gv
- config.APIPath = "/apis"
- config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
-
- if config.UserAgent == "" {
- config.UserAgent = rest.DefaultKubernetesUserAgent()
- }
-
- return nil
-}
-
-// RESTClient returns a RESTClient that is used to communicate
-// with API server by this client implementation.
-func (c *AdmiralV1Client) RESTClient() rest.Interface {
- if c == nil {
- return nil
- }
- return c.restClient
-}
From 94a75020abf6c560dd18fb959b2952e77d7205b2 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:52:49 -0700
Subject: [PATCH 056/235] remove file
admiral/pkg/client/clientset/versioned/typed/admiral/v1/dependency.go
---
.../versioned/typed/admiral/v1/dependency.go | 195 ------------------
1 file changed, 195 deletions(-)
delete mode 100644 admiral/pkg/client/clientset/versioned/typed/admiral/v1/dependency.go
diff --git a/admiral/pkg/client/clientset/versioned/typed/admiral/v1/dependency.go b/admiral/pkg/client/clientset/versioned/typed/admiral/v1/dependency.go
deleted file mode 100644
index 8b533c0b..00000000
--- a/admiral/pkg/client/clientset/versioned/typed/admiral/v1/dependency.go
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by client-gen. DO NOT EDIT.
-
-package v1
-
-import (
- "context"
- "time"
-
- v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
- scheme "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned/scheme"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- types "k8s.io/apimachinery/pkg/types"
- watch "k8s.io/apimachinery/pkg/watch"
- rest "k8s.io/client-go/rest"
-)
-
-// DependenciesGetter has a method to return a DependencyInterface.
-// A group's client should implement this interface.
-type DependenciesGetter interface {
- Dependencies(namespace string) DependencyInterface
-}
-
-// DependencyInterface has methods to work with Dependency resources.
-type DependencyInterface interface {
- Create(ctx context.Context, dependency *v1.Dependency, opts metav1.CreateOptions) (*v1.Dependency, error)
- Update(ctx context.Context, dependency *v1.Dependency, opts metav1.UpdateOptions) (*v1.Dependency, error)
- UpdateStatus(ctx context.Context, dependency *v1.Dependency, opts metav1.UpdateOptions) (*v1.Dependency, error)
- Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
- DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Dependency, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.DependencyList, error)
- Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Dependency, err error)
- DependencyExpansion
-}
-
-// dependencies implements DependencyInterface
-type dependencies struct {
- client rest.Interface
- ns string
-}
-
-// newDependencies returns a Dependencies
-func newDependencies(c *AdmiralV1Client, namespace string) *dependencies {
- return &dependencies{
- client: c.RESTClient(),
- ns: namespace,
- }
-}
-
-// Get takes name of the dependency, and returns the corresponding dependency object, and an error if there is any.
-func (c *dependencies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Dependency, err error) {
- result = &v1.Dependency{}
- err = c.client.Get().
- Namespace(c.ns).
- Resource("dependencies").
- Name(name).
- VersionedParams(&options, scheme.ParameterCodec).
- Do(ctx).
- Into(result)
- return
-}
-
-// List takes label and field selectors, and returns the list of Dependencies that match those selectors.
-func (c *dependencies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DependencyList, err error) {
- var timeout time.Duration
- if opts.TimeoutSeconds != nil {
- timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
- }
- result = &v1.DependencyList{}
- err = c.client.Get().
- Namespace(c.ns).
- Resource("dependencies").
- VersionedParams(&opts, scheme.ParameterCodec).
- Timeout(timeout).
- Do(ctx).
- Into(result)
- return
-}
-
-// Watch returns a watch.Interface that watches the requested dependencies.
-func (c *dependencies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
- var timeout time.Duration
- if opts.TimeoutSeconds != nil {
- timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
- }
- opts.Watch = true
- return c.client.Get().
- Namespace(c.ns).
- Resource("dependencies").
- VersionedParams(&opts, scheme.ParameterCodec).
- Timeout(timeout).
- Watch(ctx)
-}
-
-// Create takes the representation of a dependency and creates it. Returns the server's representation of the dependency, and an error, if there is any.
-func (c *dependencies) Create(ctx context.Context, dependency *v1.Dependency, opts metav1.CreateOptions) (result *v1.Dependency, err error) {
- result = &v1.Dependency{}
- err = c.client.Post().
- Namespace(c.ns).
- Resource("dependencies").
- VersionedParams(&opts, scheme.ParameterCodec).
- Body(dependency).
- Do(ctx).
- Into(result)
- return
-}
-
-// Update takes the representation of a dependency and updates it. Returns the server's representation of the dependency, and an error, if there is any.
-func (c *dependencies) Update(ctx context.Context, dependency *v1.Dependency, opts metav1.UpdateOptions) (result *v1.Dependency, err error) {
- result = &v1.Dependency{}
- err = c.client.Put().
- Namespace(c.ns).
- Resource("dependencies").
- Name(dependency.Name).
- VersionedParams(&opts, scheme.ParameterCodec).
- Body(dependency).
- Do(ctx).
- Into(result)
- return
-}
-
-// UpdateStatus was generated because the type contains a Status member.
-// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-func (c *dependencies) UpdateStatus(ctx context.Context, dependency *v1.Dependency, opts metav1.UpdateOptions) (result *v1.Dependency, err error) {
- result = &v1.Dependency{}
- err = c.client.Put().
- Namespace(c.ns).
- Resource("dependencies").
- Name(dependency.Name).
- SubResource("status").
- VersionedParams(&opts, scheme.ParameterCodec).
- Body(dependency).
- Do(ctx).
- Into(result)
- return
-}
-
-// Delete takes name of the dependency and deletes it. Returns an error if one occurs.
-func (c *dependencies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
- return c.client.Delete().
- Namespace(c.ns).
- Resource("dependencies").
- Name(name).
- Body(&opts).
- Do(ctx).
- Error()
-}
-
-// DeleteCollection deletes a collection of objects.
-func (c *dependencies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
- var timeout time.Duration
- if listOpts.TimeoutSeconds != nil {
- timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
- }
- return c.client.Delete().
- Namespace(c.ns).
- Resource("dependencies").
- VersionedParams(&listOpts, scheme.ParameterCodec).
- Timeout(timeout).
- Body(&opts).
- Do(ctx).
- Error()
-}
-
-// Patch applies the patch and returns the patched dependency.
-func (c *dependencies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Dependency, err error) {
- result = &v1.Dependency{}
- err = c.client.Patch(pt).
- Namespace(c.ns).
- Resource("dependencies").
- Name(name).
- SubResource(subresources...).
- VersionedParams(&opts, scheme.ParameterCodec).
- Body(data).
- Do(ctx).
- Into(result)
- return
-}
From f7332c477a312089b6e809e3f80f0b09ee80b490 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:52:52 -0700
Subject: [PATCH 057/235] remove file
admiral/pkg/client/clientset/versioned/typed/admiral/v1/dependencyproxy.go
---
.../typed/admiral/v1/dependencyproxy.go | 195 ------------------
1 file changed, 195 deletions(-)
delete mode 100644 admiral/pkg/client/clientset/versioned/typed/admiral/v1/dependencyproxy.go
diff --git a/admiral/pkg/client/clientset/versioned/typed/admiral/v1/dependencyproxy.go b/admiral/pkg/client/clientset/versioned/typed/admiral/v1/dependencyproxy.go
deleted file mode 100644
index efde18b9..00000000
--- a/admiral/pkg/client/clientset/versioned/typed/admiral/v1/dependencyproxy.go
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by client-gen. DO NOT EDIT.
-
-package v1
-
-import (
- "context"
- "time"
-
- v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
- scheme "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned/scheme"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- types "k8s.io/apimachinery/pkg/types"
- watch "k8s.io/apimachinery/pkg/watch"
- rest "k8s.io/client-go/rest"
-)
-
-// DependencyProxiesGetter has a method to return a DependencyProxyInterface.
-// A group's client should implement this interface.
-type DependencyProxiesGetter interface {
- DependencyProxies(namespace string) DependencyProxyInterface
-}
-
-// DependencyProxyInterface has methods to work with DependencyProxy resources.
-type DependencyProxyInterface interface {
- Create(ctx context.Context, dependencyProxy *v1.DependencyProxy, opts metav1.CreateOptions) (*v1.DependencyProxy, error)
- Update(ctx context.Context, dependencyProxy *v1.DependencyProxy, opts metav1.UpdateOptions) (*v1.DependencyProxy, error)
- UpdateStatus(ctx context.Context, dependencyProxy *v1.DependencyProxy, opts metav1.UpdateOptions) (*v1.DependencyProxy, error)
- Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
- DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.DependencyProxy, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.DependencyProxyList, error)
- Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DependencyProxy, err error)
- DependencyProxyExpansion
-}
-
-// dependencyProxies implements DependencyProxyInterface
-type dependencyProxies struct {
- client rest.Interface
- ns string
-}
-
-// newDependencyProxies returns a DependencyProxies
-func newDependencyProxies(c *AdmiralV1Client, namespace string) *dependencyProxies {
- return &dependencyProxies{
- client: c.RESTClient(),
- ns: namespace,
- }
-}
-
-// Get takes name of the dependencyProxy, and returns the corresponding dependencyProxy object, and an error if there is any.
-func (c *dependencyProxies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.DependencyProxy, err error) {
- result = &v1.DependencyProxy{}
- err = c.client.Get().
- Namespace(c.ns).
- Resource("dependencyproxies").
- Name(name).
- VersionedParams(&options, scheme.ParameterCodec).
- Do(ctx).
- Into(result)
- return
-}
-
-// List takes label and field selectors, and returns the list of DependencyProxies that match those selectors.
-func (c *dependencyProxies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DependencyProxyList, err error) {
- var timeout time.Duration
- if opts.TimeoutSeconds != nil {
- timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
- }
- result = &v1.DependencyProxyList{}
- err = c.client.Get().
- Namespace(c.ns).
- Resource("dependencyproxies").
- VersionedParams(&opts, scheme.ParameterCodec).
- Timeout(timeout).
- Do(ctx).
- Into(result)
- return
-}
-
-// Watch returns a watch.Interface that watches the requested dependencyProxies.
-func (c *dependencyProxies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
- var timeout time.Duration
- if opts.TimeoutSeconds != nil {
- timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
- }
- opts.Watch = true
- return c.client.Get().
- Namespace(c.ns).
- Resource("dependencyproxies").
- VersionedParams(&opts, scheme.ParameterCodec).
- Timeout(timeout).
- Watch(ctx)
-}
-
-// Create takes the representation of a dependencyProxy and creates it. Returns the server's representation of the dependencyProxy, and an error, if there is any.
-func (c *dependencyProxies) Create(ctx context.Context, dependencyProxy *v1.DependencyProxy, opts metav1.CreateOptions) (result *v1.DependencyProxy, err error) {
- result = &v1.DependencyProxy{}
- err = c.client.Post().
- Namespace(c.ns).
- Resource("dependencyproxies").
- VersionedParams(&opts, scheme.ParameterCodec).
- Body(dependencyProxy).
- Do(ctx).
- Into(result)
- return
-}
-
-// Update takes the representation of a dependencyProxy and updates it. Returns the server's representation of the dependencyProxy, and an error, if there is any.
-func (c *dependencyProxies) Update(ctx context.Context, dependencyProxy *v1.DependencyProxy, opts metav1.UpdateOptions) (result *v1.DependencyProxy, err error) {
- result = &v1.DependencyProxy{}
- err = c.client.Put().
- Namespace(c.ns).
- Resource("dependencyproxies").
- Name(dependencyProxy.Name).
- VersionedParams(&opts, scheme.ParameterCodec).
- Body(dependencyProxy).
- Do(ctx).
- Into(result)
- return
-}
-
-// UpdateStatus was generated because the type contains a Status member.
-// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-func (c *dependencyProxies) UpdateStatus(ctx context.Context, dependencyProxy *v1.DependencyProxy, opts metav1.UpdateOptions) (result *v1.DependencyProxy, err error) {
- result = &v1.DependencyProxy{}
- err = c.client.Put().
- Namespace(c.ns).
- Resource("dependencyproxies").
- Name(dependencyProxy.Name).
- SubResource("status").
- VersionedParams(&opts, scheme.ParameterCodec).
- Body(dependencyProxy).
- Do(ctx).
- Into(result)
- return
-}
-
-// Delete takes name of the dependencyProxy and deletes it. Returns an error if one occurs.
-func (c *dependencyProxies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
- return c.client.Delete().
- Namespace(c.ns).
- Resource("dependencyproxies").
- Name(name).
- Body(&opts).
- Do(ctx).
- Error()
-}
-
-// DeleteCollection deletes a collection of objects.
-func (c *dependencyProxies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
- var timeout time.Duration
- if listOpts.TimeoutSeconds != nil {
- timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
- }
- return c.client.Delete().
- Namespace(c.ns).
- Resource("dependencyproxies").
- VersionedParams(&listOpts, scheme.ParameterCodec).
- Timeout(timeout).
- Body(&opts).
- Do(ctx).
- Error()
-}
-
-// Patch applies the patch and returns the patched dependencyProxy.
-func (c *dependencyProxies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DependencyProxy, err error) {
- result = &v1.DependencyProxy{}
- err = c.client.Patch(pt).
- Namespace(c.ns).
- Resource("dependencyproxies").
- Name(name).
- SubResource(subresources...).
- VersionedParams(&opts, scheme.ParameterCodec).
- Body(data).
- Do(ctx).
- Into(result)
- return
-}
From 167bb2e825c695ea3a7294b9911e8345b10b37bd Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:52:55 -0700
Subject: [PATCH 058/235] remove file
admiral/pkg/client/clientset/versioned/typed/admiral/v1/doc.go
---
.../versioned/typed/admiral/v1/doc.go | 20 -------------------
1 file changed, 20 deletions(-)
delete mode 100644 admiral/pkg/client/clientset/versioned/typed/admiral/v1/doc.go
diff --git a/admiral/pkg/client/clientset/versioned/typed/admiral/v1/doc.go b/admiral/pkg/client/clientset/versioned/typed/admiral/v1/doc.go
deleted file mode 100644
index 3af5d054..00000000
--- a/admiral/pkg/client/clientset/versioned/typed/admiral/v1/doc.go
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by client-gen. DO NOT EDIT.
-
-// This package has the automatically generated typed clients.
-package v1
From 56df81e1202801750ec3da4ccf794741fb0b4757 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:52:58 -0700
Subject: [PATCH 059/235] remove file
admiral/pkg/client/clientset/versioned/typed/admiral/v1/fake/doc.go
---
.../versioned/typed/admiral/v1/fake/doc.go | 20 -------------------
1 file changed, 20 deletions(-)
delete mode 100644 admiral/pkg/client/clientset/versioned/typed/admiral/v1/fake/doc.go
diff --git a/admiral/pkg/client/clientset/versioned/typed/admiral/v1/fake/doc.go b/admiral/pkg/client/clientset/versioned/typed/admiral/v1/fake/doc.go
deleted file mode 100644
index 16f44399..00000000
--- a/admiral/pkg/client/clientset/versioned/typed/admiral/v1/fake/doc.go
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by client-gen. DO NOT EDIT.
-
-// Package fake has the automatically generated clients.
-package fake
From f8c77a587df3be8f0d5244c38f50d4d98f3b09b9 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:53:02 -0700
Subject: [PATCH 060/235] remove file
admiral/pkg/client/clientset/versioned/typed/admiral/v1/fake/fake_admiral_client.go
---
.../admiral/v1/fake/fake_admiral_client.go | 52 -------------------
1 file changed, 52 deletions(-)
delete mode 100644 admiral/pkg/client/clientset/versioned/typed/admiral/v1/fake/fake_admiral_client.go
diff --git a/admiral/pkg/client/clientset/versioned/typed/admiral/v1/fake/fake_admiral_client.go b/admiral/pkg/client/clientset/versioned/typed/admiral/v1/fake/fake_admiral_client.go
deleted file mode 100644
index 47e97644..00000000
--- a/admiral/pkg/client/clientset/versioned/typed/admiral/v1/fake/fake_admiral_client.go
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by client-gen. DO NOT EDIT.
-
-package fake
-
-import (
- v1 "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned/typed/admiral/v1"
- rest "k8s.io/client-go/rest"
- testing "k8s.io/client-go/testing"
-)
-
-type FakeAdmiralV1 struct {
- *testing.Fake
-}
-
-func (c *FakeAdmiralV1) Dependencies(namespace string) v1.DependencyInterface {
- return &FakeDependencies{c, namespace}
-}
-
-func (c *FakeAdmiralV1) DependencyProxies(namespace string) v1.DependencyProxyInterface {
- return &FakeDependencyProxies{c, namespace}
-}
-
-func (c *FakeAdmiralV1) GlobalTrafficPolicies(namespace string) v1.GlobalTrafficPolicyInterface {
- return &FakeGlobalTrafficPolicies{c, namespace}
-}
-
-func (c *FakeAdmiralV1) RoutingPolicies(namespace string) v1.RoutingPolicyInterface {
- return &FakeRoutingPolicies{c, namespace}
-}
-
-// RESTClient returns a RESTClient that is used to communicate
-// with API server by this client implementation.
-func (c *FakeAdmiralV1) RESTClient() rest.Interface {
- var ret *rest.RESTClient
- return ret
-}
From 7f8e764e1cd08435d8d0ee08db9ee9684b143218 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:53:05 -0700
Subject: [PATCH 061/235] remove file
admiral/pkg/client/clientset/versioned/typed/admiral/v1/fake/fake_dependency.go
---
.../typed/admiral/v1/fake/fake_dependency.go | 142 ------------------
1 file changed, 142 deletions(-)
delete mode 100644 admiral/pkg/client/clientset/versioned/typed/admiral/v1/fake/fake_dependency.go
diff --git a/admiral/pkg/client/clientset/versioned/typed/admiral/v1/fake/fake_dependency.go b/admiral/pkg/client/clientset/versioned/typed/admiral/v1/fake/fake_dependency.go
deleted file mode 100644
index 3b9391a8..00000000
--- a/admiral/pkg/client/clientset/versioned/typed/admiral/v1/fake/fake_dependency.go
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by client-gen. DO NOT EDIT.
-
-package fake
-
-import (
- "context"
-
- admiralv1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- labels "k8s.io/apimachinery/pkg/labels"
- schema "k8s.io/apimachinery/pkg/runtime/schema"
- types "k8s.io/apimachinery/pkg/types"
- watch "k8s.io/apimachinery/pkg/watch"
- testing "k8s.io/client-go/testing"
-)
-
-// FakeDependencies implements DependencyInterface
-type FakeDependencies struct {
- Fake *FakeAdmiralV1
- ns string
-}
-
-var dependenciesResource = schema.GroupVersionResource{Group: "admiral.io", Version: "v1", Resource: "dependencies"}
-
-var dependenciesKind = schema.GroupVersionKind{Group: "admiral.io", Version: "v1", Kind: "Dependency"}
-
-// Get takes name of the dependency, and returns the corresponding dependency object, and an error if there is any.
-func (c *FakeDependencies) Get(ctx context.Context, name string, options v1.GetOptions) (result *admiralv1.Dependency, err error) {
- obj, err := c.Fake.
- Invokes(testing.NewGetAction(dependenciesResource, c.ns, name), &admiralv1.Dependency{})
-
- if obj == nil {
- return nil, err
- }
- return obj.(*admiralv1.Dependency), err
-}
-
-// List takes label and field selectors, and returns the list of Dependencies that match those selectors.
-func (c *FakeDependencies) List(ctx context.Context, opts v1.ListOptions) (result *admiralv1.DependencyList, err error) {
- obj, err := c.Fake.
- Invokes(testing.NewListAction(dependenciesResource, dependenciesKind, c.ns, opts), &admiralv1.DependencyList{})
-
- if obj == nil {
- return nil, err
- }
-
- label, _, _ := testing.ExtractFromListOptions(opts)
- if label == nil {
- label = labels.Everything()
- }
- list := &admiralv1.DependencyList{ListMeta: obj.(*admiralv1.DependencyList).ListMeta}
- for _, item := range obj.(*admiralv1.DependencyList).Items {
- if label.Matches(labels.Set(item.Labels)) {
- list.Items = append(list.Items, item)
- }
- }
- return list, err
-}
-
-// Watch returns a watch.Interface that watches the requested dependencies.
-func (c *FakeDependencies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
- return c.Fake.
- InvokesWatch(testing.NewWatchAction(dependenciesResource, c.ns, opts))
-
-}
-
-// Create takes the representation of a dependency and creates it. Returns the server's representation of the dependency, and an error, if there is any.
-func (c *FakeDependencies) Create(ctx context.Context, dependency *admiralv1.Dependency, opts v1.CreateOptions) (result *admiralv1.Dependency, err error) {
- obj, err := c.Fake.
- Invokes(testing.NewCreateAction(dependenciesResource, c.ns, dependency), &admiralv1.Dependency{})
-
- if obj == nil {
- return nil, err
- }
- return obj.(*admiralv1.Dependency), err
-}
-
-// Update takes the representation of a dependency and updates it. Returns the server's representation of the dependency, and an error, if there is any.
-func (c *FakeDependencies) Update(ctx context.Context, dependency *admiralv1.Dependency, opts v1.UpdateOptions) (result *admiralv1.Dependency, err error) {
- obj, err := c.Fake.
- Invokes(testing.NewUpdateAction(dependenciesResource, c.ns, dependency), &admiralv1.Dependency{})
-
- if obj == nil {
- return nil, err
- }
- return obj.(*admiralv1.Dependency), err
-}
-
-// UpdateStatus was generated because the type contains a Status member.
-// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-func (c *FakeDependencies) UpdateStatus(ctx context.Context, dependency *admiralv1.Dependency, opts v1.UpdateOptions) (*admiralv1.Dependency, error) {
- obj, err := c.Fake.
- Invokes(testing.NewUpdateSubresourceAction(dependenciesResource, "status", c.ns, dependency), &admiralv1.Dependency{})
-
- if obj == nil {
- return nil, err
- }
- return obj.(*admiralv1.Dependency), err
-}
-
-// Delete takes name of the dependency and deletes it. Returns an error if one occurs.
-func (c *FakeDependencies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
- _, err := c.Fake.
- Invokes(testing.NewDeleteActionWithOptions(dependenciesResource, c.ns, name, opts), &admiralv1.Dependency{})
-
- return err
-}
-
-// DeleteCollection deletes a collection of objects.
-func (c *FakeDependencies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
- action := testing.NewDeleteCollectionAction(dependenciesResource, c.ns, listOpts)
-
- _, err := c.Fake.Invokes(action, &admiralv1.DependencyList{})
- return err
-}
-
-// Patch applies the patch and returns the patched dependency.
-func (c *FakeDependencies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admiralv1.Dependency, err error) {
- obj, err := c.Fake.
- Invokes(testing.NewPatchSubresourceAction(dependenciesResource, c.ns, name, pt, data, subresources...), &admiralv1.Dependency{})
-
- if obj == nil {
- return nil, err
- }
- return obj.(*admiralv1.Dependency), err
-}
From b60a524357a5bdeaca43285683b735a053ae8196 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:53:11 -0700
Subject: [PATCH 062/235] remove file
admiral/pkg/client/clientset/versioned/typed/admiral/v1/fake/fake_dependencyproxy.go
---
.../admiral/v1/fake/fake_dependencyproxy.go | 142 ------------------
1 file changed, 142 deletions(-)
delete mode 100644 admiral/pkg/client/clientset/versioned/typed/admiral/v1/fake/fake_dependencyproxy.go
diff --git a/admiral/pkg/client/clientset/versioned/typed/admiral/v1/fake/fake_dependencyproxy.go b/admiral/pkg/client/clientset/versioned/typed/admiral/v1/fake/fake_dependencyproxy.go
deleted file mode 100644
index 67c063ed..00000000
--- a/admiral/pkg/client/clientset/versioned/typed/admiral/v1/fake/fake_dependencyproxy.go
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by client-gen. DO NOT EDIT.
-
-package fake
-
-import (
- "context"
-
- admiralv1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- labels "k8s.io/apimachinery/pkg/labels"
- schema "k8s.io/apimachinery/pkg/runtime/schema"
- types "k8s.io/apimachinery/pkg/types"
- watch "k8s.io/apimachinery/pkg/watch"
- testing "k8s.io/client-go/testing"
-)
-
-// FakeDependencyProxies implements DependencyProxyInterface
-type FakeDependencyProxies struct {
- Fake *FakeAdmiralV1
- ns string
-}
-
-var dependencyproxiesResource = schema.GroupVersionResource{Group: "admiral.io", Version: "v1", Resource: "dependencyproxies"}
-
-var dependencyproxiesKind = schema.GroupVersionKind{Group: "admiral.io", Version: "v1", Kind: "DependencyProxy"}
-
-// Get takes name of the dependencyProxy, and returns the corresponding dependencyProxy object, and an error if there is any.
-func (c *FakeDependencyProxies) Get(ctx context.Context, name string, options v1.GetOptions) (result *admiralv1.DependencyProxy, err error) {
- obj, err := c.Fake.
- Invokes(testing.NewGetAction(dependencyproxiesResource, c.ns, name), &admiralv1.DependencyProxy{})
-
- if obj == nil {
- return nil, err
- }
- return obj.(*admiralv1.DependencyProxy), err
-}
-
-// List takes label and field selectors, and returns the list of DependencyProxies that match those selectors.
-func (c *FakeDependencyProxies) List(ctx context.Context, opts v1.ListOptions) (result *admiralv1.DependencyProxyList, err error) {
- obj, err := c.Fake.
- Invokes(testing.NewListAction(dependencyproxiesResource, dependencyproxiesKind, c.ns, opts), &admiralv1.DependencyProxyList{})
-
- if obj == nil {
- return nil, err
- }
-
- label, _, _ := testing.ExtractFromListOptions(opts)
- if label == nil {
- label = labels.Everything()
- }
- list := &admiralv1.DependencyProxyList{ListMeta: obj.(*admiralv1.DependencyProxyList).ListMeta}
- for _, item := range obj.(*admiralv1.DependencyProxyList).Items {
- if label.Matches(labels.Set(item.Labels)) {
- list.Items = append(list.Items, item)
- }
- }
- return list, err
-}
-
-// Watch returns a watch.Interface that watches the requested dependencyProxies.
-func (c *FakeDependencyProxies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
- return c.Fake.
- InvokesWatch(testing.NewWatchAction(dependencyproxiesResource, c.ns, opts))
-
-}
-
-// Create takes the representation of a dependencyProxy and creates it. Returns the server's representation of the dependencyProxy, and an error, if there is any.
-func (c *FakeDependencyProxies) Create(ctx context.Context, dependencyProxy *admiralv1.DependencyProxy, opts v1.CreateOptions) (result *admiralv1.DependencyProxy, err error) {
- obj, err := c.Fake.
- Invokes(testing.NewCreateAction(dependencyproxiesResource, c.ns, dependencyProxy), &admiralv1.DependencyProxy{})
-
- if obj == nil {
- return nil, err
- }
- return obj.(*admiralv1.DependencyProxy), err
-}
-
-// Update takes the representation of a dependencyProxy and updates it. Returns the server's representation of the dependencyProxy, and an error, if there is any.
-func (c *FakeDependencyProxies) Update(ctx context.Context, dependencyProxy *admiralv1.DependencyProxy, opts v1.UpdateOptions) (result *admiralv1.DependencyProxy, err error) {
- obj, err := c.Fake.
- Invokes(testing.NewUpdateAction(dependencyproxiesResource, c.ns, dependencyProxy), &admiralv1.DependencyProxy{})
-
- if obj == nil {
- return nil, err
- }
- return obj.(*admiralv1.DependencyProxy), err
-}
-
-// UpdateStatus was generated because the type contains a Status member.
-// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-func (c *FakeDependencyProxies) UpdateStatus(ctx context.Context, dependencyProxy *admiralv1.DependencyProxy, opts v1.UpdateOptions) (*admiralv1.DependencyProxy, error) {
- obj, err := c.Fake.
- Invokes(testing.NewUpdateSubresourceAction(dependencyproxiesResource, "status", c.ns, dependencyProxy), &admiralv1.DependencyProxy{})
-
- if obj == nil {
- return nil, err
- }
- return obj.(*admiralv1.DependencyProxy), err
-}
-
-// Delete takes name of the dependencyProxy and deletes it. Returns an error if one occurs.
-func (c *FakeDependencyProxies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
- _, err := c.Fake.
- Invokes(testing.NewDeleteActionWithOptions(dependencyproxiesResource, c.ns, name, opts), &admiralv1.DependencyProxy{})
-
- return err
-}
-
-// DeleteCollection deletes a collection of objects.
-func (c *FakeDependencyProxies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
- action := testing.NewDeleteCollectionAction(dependencyproxiesResource, c.ns, listOpts)
-
- _, err := c.Fake.Invokes(action, &admiralv1.DependencyProxyList{})
- return err
-}
-
-// Patch applies the patch and returns the patched dependencyProxy.
-func (c *FakeDependencyProxies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admiralv1.DependencyProxy, err error) {
- obj, err := c.Fake.
- Invokes(testing.NewPatchSubresourceAction(dependencyproxiesResource, c.ns, name, pt, data, subresources...), &admiralv1.DependencyProxy{})
-
- if obj == nil {
- return nil, err
- }
- return obj.(*admiralv1.DependencyProxy), err
-}
From f8b07669eccaae1c084815fae63aade466432fa5 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:53:15 -0700
Subject: [PATCH 063/235] remove file
admiral/pkg/client/clientset/versioned/typed/admiral/v1/fake/fake_globaltrafficpolicy.go
---
.../v1/fake/fake_globaltrafficpolicy.go | 142 ------------------
1 file changed, 142 deletions(-)
delete mode 100644 admiral/pkg/client/clientset/versioned/typed/admiral/v1/fake/fake_globaltrafficpolicy.go
diff --git a/admiral/pkg/client/clientset/versioned/typed/admiral/v1/fake/fake_globaltrafficpolicy.go b/admiral/pkg/client/clientset/versioned/typed/admiral/v1/fake/fake_globaltrafficpolicy.go
deleted file mode 100644
index 1158f48f..00000000
--- a/admiral/pkg/client/clientset/versioned/typed/admiral/v1/fake/fake_globaltrafficpolicy.go
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by client-gen. DO NOT EDIT.
-
-package fake
-
-import (
- "context"
-
- admiralv1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- labels "k8s.io/apimachinery/pkg/labels"
- schema "k8s.io/apimachinery/pkg/runtime/schema"
- types "k8s.io/apimachinery/pkg/types"
- watch "k8s.io/apimachinery/pkg/watch"
- testing "k8s.io/client-go/testing"
-)
-
-// FakeGlobalTrafficPolicies implements GlobalTrafficPolicyInterface
-type FakeGlobalTrafficPolicies struct {
- Fake *FakeAdmiralV1
- ns string
-}
-
-var globaltrafficpoliciesResource = schema.GroupVersionResource{Group: "admiral.io", Version: "v1", Resource: "globaltrafficpolicies"}
-
-var globaltrafficpoliciesKind = schema.GroupVersionKind{Group: "admiral.io", Version: "v1", Kind: "GlobalTrafficPolicy"}
-
-// Get takes name of the globalTrafficPolicy, and returns the corresponding globalTrafficPolicy object, and an error if there is any.
-func (c *FakeGlobalTrafficPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *admiralv1.GlobalTrafficPolicy, err error) {
- obj, err := c.Fake.
- Invokes(testing.NewGetAction(globaltrafficpoliciesResource, c.ns, name), &admiralv1.GlobalTrafficPolicy{})
-
- if obj == nil {
- return nil, err
- }
- return obj.(*admiralv1.GlobalTrafficPolicy), err
-}
-
-// List takes label and field selectors, and returns the list of GlobalTrafficPolicies that match those selectors.
-func (c *FakeGlobalTrafficPolicies) List(ctx context.Context, opts v1.ListOptions) (result *admiralv1.GlobalTrafficPolicyList, err error) {
- obj, err := c.Fake.
- Invokes(testing.NewListAction(globaltrafficpoliciesResource, globaltrafficpoliciesKind, c.ns, opts), &admiralv1.GlobalTrafficPolicyList{})
-
- if obj == nil {
- return nil, err
- }
-
- label, _, _ := testing.ExtractFromListOptions(opts)
- if label == nil {
- label = labels.Everything()
- }
- list := &admiralv1.GlobalTrafficPolicyList{ListMeta: obj.(*admiralv1.GlobalTrafficPolicyList).ListMeta}
- for _, item := range obj.(*admiralv1.GlobalTrafficPolicyList).Items {
- if label.Matches(labels.Set(item.Labels)) {
- list.Items = append(list.Items, item)
- }
- }
- return list, err
-}
-
-// Watch returns a watch.Interface that watches the requested globalTrafficPolicies.
-func (c *FakeGlobalTrafficPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
- return c.Fake.
- InvokesWatch(testing.NewWatchAction(globaltrafficpoliciesResource, c.ns, opts))
-
-}
-
-// Create takes the representation of a globalTrafficPolicy and creates it. Returns the server's representation of the globalTrafficPolicy, and an error, if there is any.
-func (c *FakeGlobalTrafficPolicies) Create(ctx context.Context, globalTrafficPolicy *admiralv1.GlobalTrafficPolicy, opts v1.CreateOptions) (result *admiralv1.GlobalTrafficPolicy, err error) {
- obj, err := c.Fake.
- Invokes(testing.NewCreateAction(globaltrafficpoliciesResource, c.ns, globalTrafficPolicy), &admiralv1.GlobalTrafficPolicy{})
-
- if obj == nil {
- return nil, err
- }
- return obj.(*admiralv1.GlobalTrafficPolicy), err
-}
-
-// Update takes the representation of a globalTrafficPolicy and updates it. Returns the server's representation of the globalTrafficPolicy, and an error, if there is any.
-func (c *FakeGlobalTrafficPolicies) Update(ctx context.Context, globalTrafficPolicy *admiralv1.GlobalTrafficPolicy, opts v1.UpdateOptions) (result *admiralv1.GlobalTrafficPolicy, err error) {
- obj, err := c.Fake.
- Invokes(testing.NewUpdateAction(globaltrafficpoliciesResource, c.ns, globalTrafficPolicy), &admiralv1.GlobalTrafficPolicy{})
-
- if obj == nil {
- return nil, err
- }
- return obj.(*admiralv1.GlobalTrafficPolicy), err
-}
-
-// UpdateStatus was generated because the type contains a Status member.
-// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-func (c *FakeGlobalTrafficPolicies) UpdateStatus(ctx context.Context, globalTrafficPolicy *admiralv1.GlobalTrafficPolicy, opts v1.UpdateOptions) (*admiralv1.GlobalTrafficPolicy, error) {
- obj, err := c.Fake.
- Invokes(testing.NewUpdateSubresourceAction(globaltrafficpoliciesResource, "status", c.ns, globalTrafficPolicy), &admiralv1.GlobalTrafficPolicy{})
-
- if obj == nil {
- return nil, err
- }
- return obj.(*admiralv1.GlobalTrafficPolicy), err
-}
-
-// Delete takes name of the globalTrafficPolicy and deletes it. Returns an error if one occurs.
-func (c *FakeGlobalTrafficPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
- _, err := c.Fake.
- Invokes(testing.NewDeleteActionWithOptions(globaltrafficpoliciesResource, c.ns, name, opts), &admiralv1.GlobalTrafficPolicy{})
-
- return err
-}
-
-// DeleteCollection deletes a collection of objects.
-func (c *FakeGlobalTrafficPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
- action := testing.NewDeleteCollectionAction(globaltrafficpoliciesResource, c.ns, listOpts)
-
- _, err := c.Fake.Invokes(action, &admiralv1.GlobalTrafficPolicyList{})
- return err
-}
-
-// Patch applies the patch and returns the patched globalTrafficPolicy.
-func (c *FakeGlobalTrafficPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admiralv1.GlobalTrafficPolicy, err error) {
- obj, err := c.Fake.
- Invokes(testing.NewPatchSubresourceAction(globaltrafficpoliciesResource, c.ns, name, pt, data, subresources...), &admiralv1.GlobalTrafficPolicy{})
-
- if obj == nil {
- return nil, err
- }
- return obj.(*admiralv1.GlobalTrafficPolicy), err
-}
From bc9ce35a3a280dabcc627f4534126ab9cf30b787 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:53:18 -0700
Subject: [PATCH 064/235] remove file
admiral/pkg/client/clientset/versioned/typed/admiral/v1/fake/fake_routingpolicy.go
---
.../admiral/v1/fake/fake_routingpolicy.go | 142 ------------------
1 file changed, 142 deletions(-)
delete mode 100644 admiral/pkg/client/clientset/versioned/typed/admiral/v1/fake/fake_routingpolicy.go
diff --git a/admiral/pkg/client/clientset/versioned/typed/admiral/v1/fake/fake_routingpolicy.go b/admiral/pkg/client/clientset/versioned/typed/admiral/v1/fake/fake_routingpolicy.go
deleted file mode 100644
index e1230875..00000000
--- a/admiral/pkg/client/clientset/versioned/typed/admiral/v1/fake/fake_routingpolicy.go
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by client-gen. DO NOT EDIT.
-
-package fake
-
-import (
- "context"
-
- admiralv1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- labels "k8s.io/apimachinery/pkg/labels"
- schema "k8s.io/apimachinery/pkg/runtime/schema"
- types "k8s.io/apimachinery/pkg/types"
- watch "k8s.io/apimachinery/pkg/watch"
- testing "k8s.io/client-go/testing"
-)
-
-// FakeRoutingPolicies implements RoutingPolicyInterface
-type FakeRoutingPolicies struct {
- Fake *FakeAdmiralV1
- ns string
-}
-
-var routingpoliciesResource = schema.GroupVersionResource{Group: "admiral.io", Version: "v1", Resource: "routingpolicies"}
-
-var routingpoliciesKind = schema.GroupVersionKind{Group: "admiral.io", Version: "v1", Kind: "RoutingPolicy"}
-
-// Get takes name of the routingPolicy, and returns the corresponding routingPolicy object, and an error if there is any.
-func (c *FakeRoutingPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *admiralv1.RoutingPolicy, err error) {
- obj, err := c.Fake.
- Invokes(testing.NewGetAction(routingpoliciesResource, c.ns, name), &admiralv1.RoutingPolicy{})
-
- if obj == nil {
- return nil, err
- }
- return obj.(*admiralv1.RoutingPolicy), err
-}
-
-// List takes label and field selectors, and returns the list of RoutingPolicies that match those selectors.
-func (c *FakeRoutingPolicies) List(ctx context.Context, opts v1.ListOptions) (result *admiralv1.RoutingPolicyList, err error) {
- obj, err := c.Fake.
- Invokes(testing.NewListAction(routingpoliciesResource, routingpoliciesKind, c.ns, opts), &admiralv1.RoutingPolicyList{})
-
- if obj == nil {
- return nil, err
- }
-
- label, _, _ := testing.ExtractFromListOptions(opts)
- if label == nil {
- label = labels.Everything()
- }
- list := &admiralv1.RoutingPolicyList{ListMeta: obj.(*admiralv1.RoutingPolicyList).ListMeta}
- for _, item := range obj.(*admiralv1.RoutingPolicyList).Items {
- if label.Matches(labels.Set(item.Labels)) {
- list.Items = append(list.Items, item)
- }
- }
- return list, err
-}
-
-// Watch returns a watch.Interface that watches the requested routingPolicies.
-func (c *FakeRoutingPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
- return c.Fake.
- InvokesWatch(testing.NewWatchAction(routingpoliciesResource, c.ns, opts))
-
-}
-
-// Create takes the representation of a routingPolicy and creates it. Returns the server's representation of the routingPolicy, and an error, if there is any.
-func (c *FakeRoutingPolicies) Create(ctx context.Context, routingPolicy *admiralv1.RoutingPolicy, opts v1.CreateOptions) (result *admiralv1.RoutingPolicy, err error) {
- obj, err := c.Fake.
- Invokes(testing.NewCreateAction(routingpoliciesResource, c.ns, routingPolicy), &admiralv1.RoutingPolicy{})
-
- if obj == nil {
- return nil, err
- }
- return obj.(*admiralv1.RoutingPolicy), err
-}
-
-// Update takes the representation of a routingPolicy and updates it. Returns the server's representation of the routingPolicy, and an error, if there is any.
-func (c *FakeRoutingPolicies) Update(ctx context.Context, routingPolicy *admiralv1.RoutingPolicy, opts v1.UpdateOptions) (result *admiralv1.RoutingPolicy, err error) {
- obj, err := c.Fake.
- Invokes(testing.NewUpdateAction(routingpoliciesResource, c.ns, routingPolicy), &admiralv1.RoutingPolicy{})
-
- if obj == nil {
- return nil, err
- }
- return obj.(*admiralv1.RoutingPolicy), err
-}
-
-// UpdateStatus was generated because the type contains a Status member.
-// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-func (c *FakeRoutingPolicies) UpdateStatus(ctx context.Context, routingPolicy *admiralv1.RoutingPolicy, opts v1.UpdateOptions) (*admiralv1.RoutingPolicy, error) {
- obj, err := c.Fake.
- Invokes(testing.NewUpdateSubresourceAction(routingpoliciesResource, "status", c.ns, routingPolicy), &admiralv1.RoutingPolicy{})
-
- if obj == nil {
- return nil, err
- }
- return obj.(*admiralv1.RoutingPolicy), err
-}
-
-// Delete takes name of the routingPolicy and deletes it. Returns an error if one occurs.
-func (c *FakeRoutingPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
- _, err := c.Fake.
- Invokes(testing.NewDeleteActionWithOptions(routingpoliciesResource, c.ns, name, opts), &admiralv1.RoutingPolicy{})
-
- return err
-}
-
-// DeleteCollection deletes a collection of objects.
-func (c *FakeRoutingPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
- action := testing.NewDeleteCollectionAction(routingpoliciesResource, c.ns, listOpts)
-
- _, err := c.Fake.Invokes(action, &admiralv1.RoutingPolicyList{})
- return err
-}
-
-// Patch applies the patch and returns the patched routingPolicy.
-func (c *FakeRoutingPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *admiralv1.RoutingPolicy, err error) {
- obj, err := c.Fake.
- Invokes(testing.NewPatchSubresourceAction(routingpoliciesResource, c.ns, name, pt, data, subresources...), &admiralv1.RoutingPolicy{})
-
- if obj == nil {
- return nil, err
- }
- return obj.(*admiralv1.RoutingPolicy), err
-}
From 8f4da9d9a53e529aa6e427cbdfa9e7049ced2b75 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:53:22 -0700
Subject: [PATCH 065/235] remove file
admiral/pkg/client/clientset/versioned/typed/admiral/v1/generated_expansion.go
---
.../typed/admiral/v1/generated_expansion.go | 27 -------------------
1 file changed, 27 deletions(-)
delete mode 100644 admiral/pkg/client/clientset/versioned/typed/admiral/v1/generated_expansion.go
diff --git a/admiral/pkg/client/clientset/versioned/typed/admiral/v1/generated_expansion.go b/admiral/pkg/client/clientset/versioned/typed/admiral/v1/generated_expansion.go
deleted file mode 100644
index f3156a4d..00000000
--- a/admiral/pkg/client/clientset/versioned/typed/admiral/v1/generated_expansion.go
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by client-gen. DO NOT EDIT.
-
-package v1
-
-type DependencyExpansion interface{}
-
-type DependencyProxyExpansion interface{}
-
-type GlobalTrafficPolicyExpansion interface{}
-
-type RoutingPolicyExpansion interface{}
From 49264a374621c536de62b46fecf70af4467c8ef0 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:53:25 -0700
Subject: [PATCH 066/235] remove file
admiral/pkg/client/clientset/versioned/typed/admiral/v1/globaltrafficpolicy.go
---
.../typed/admiral/v1/globaltrafficpolicy.go | 195 ------------------
1 file changed, 195 deletions(-)
delete mode 100644 admiral/pkg/client/clientset/versioned/typed/admiral/v1/globaltrafficpolicy.go
diff --git a/admiral/pkg/client/clientset/versioned/typed/admiral/v1/globaltrafficpolicy.go b/admiral/pkg/client/clientset/versioned/typed/admiral/v1/globaltrafficpolicy.go
deleted file mode 100644
index b3f430a9..00000000
--- a/admiral/pkg/client/clientset/versioned/typed/admiral/v1/globaltrafficpolicy.go
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by client-gen. DO NOT EDIT.
-
-package v1
-
-import (
- "context"
- "time"
-
- v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
- scheme "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned/scheme"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- types "k8s.io/apimachinery/pkg/types"
- watch "k8s.io/apimachinery/pkg/watch"
- rest "k8s.io/client-go/rest"
-)
-
-// GlobalTrafficPoliciesGetter has a method to return a GlobalTrafficPolicyInterface.
-// A group's client should implement this interface.
-type GlobalTrafficPoliciesGetter interface {
- GlobalTrafficPolicies(namespace string) GlobalTrafficPolicyInterface
-}
-
-// GlobalTrafficPolicyInterface has methods to work with GlobalTrafficPolicy resources.
-type GlobalTrafficPolicyInterface interface {
- Create(ctx context.Context, globalTrafficPolicy *v1.GlobalTrafficPolicy, opts metav1.CreateOptions) (*v1.GlobalTrafficPolicy, error)
- Update(ctx context.Context, globalTrafficPolicy *v1.GlobalTrafficPolicy, opts metav1.UpdateOptions) (*v1.GlobalTrafficPolicy, error)
- UpdateStatus(ctx context.Context, globalTrafficPolicy *v1.GlobalTrafficPolicy, opts metav1.UpdateOptions) (*v1.GlobalTrafficPolicy, error)
- Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
- DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.GlobalTrafficPolicy, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.GlobalTrafficPolicyList, error)
- Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.GlobalTrafficPolicy, err error)
- GlobalTrafficPolicyExpansion
-}
-
-// globalTrafficPolicies implements GlobalTrafficPolicyInterface
-type globalTrafficPolicies struct {
- client rest.Interface
- ns string
-}
-
-// newGlobalTrafficPolicies returns a GlobalTrafficPolicies
-func newGlobalTrafficPolicies(c *AdmiralV1Client, namespace string) *globalTrafficPolicies {
- return &globalTrafficPolicies{
- client: c.RESTClient(),
- ns: namespace,
- }
-}
-
-// Get takes name of the globalTrafficPolicy, and returns the corresponding globalTrafficPolicy object, and an error if there is any.
-func (c *globalTrafficPolicies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.GlobalTrafficPolicy, err error) {
- result = &v1.GlobalTrafficPolicy{}
- err = c.client.Get().
- Namespace(c.ns).
- Resource("globaltrafficpolicies").
- Name(name).
- VersionedParams(&options, scheme.ParameterCodec).
- Do(ctx).
- Into(result)
- return
-}
-
-// List takes label and field selectors, and returns the list of GlobalTrafficPolicies that match those selectors.
-func (c *globalTrafficPolicies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.GlobalTrafficPolicyList, err error) {
- var timeout time.Duration
- if opts.TimeoutSeconds != nil {
- timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
- }
- result = &v1.GlobalTrafficPolicyList{}
- err = c.client.Get().
- Namespace(c.ns).
- Resource("globaltrafficpolicies").
- VersionedParams(&opts, scheme.ParameterCodec).
- Timeout(timeout).
- Do(ctx).
- Into(result)
- return
-}
-
-// Watch returns a watch.Interface that watches the requested globalTrafficPolicies.
-func (c *globalTrafficPolicies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
- var timeout time.Duration
- if opts.TimeoutSeconds != nil {
- timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
- }
- opts.Watch = true
- return c.client.Get().
- Namespace(c.ns).
- Resource("globaltrafficpolicies").
- VersionedParams(&opts, scheme.ParameterCodec).
- Timeout(timeout).
- Watch(ctx)
-}
-
-// Create takes the representation of a globalTrafficPolicy and creates it. Returns the server's representation of the globalTrafficPolicy, and an error, if there is any.
-func (c *globalTrafficPolicies) Create(ctx context.Context, globalTrafficPolicy *v1.GlobalTrafficPolicy, opts metav1.CreateOptions) (result *v1.GlobalTrafficPolicy, err error) {
- result = &v1.GlobalTrafficPolicy{}
- err = c.client.Post().
- Namespace(c.ns).
- Resource("globaltrafficpolicies").
- VersionedParams(&opts, scheme.ParameterCodec).
- Body(globalTrafficPolicy).
- Do(ctx).
- Into(result)
- return
-}
-
-// Update takes the representation of a globalTrafficPolicy and updates it. Returns the server's representation of the globalTrafficPolicy, and an error, if there is any.
-func (c *globalTrafficPolicies) Update(ctx context.Context, globalTrafficPolicy *v1.GlobalTrafficPolicy, opts metav1.UpdateOptions) (result *v1.GlobalTrafficPolicy, err error) {
- result = &v1.GlobalTrafficPolicy{}
- err = c.client.Put().
- Namespace(c.ns).
- Resource("globaltrafficpolicies").
- Name(globalTrafficPolicy.Name).
- VersionedParams(&opts, scheme.ParameterCodec).
- Body(globalTrafficPolicy).
- Do(ctx).
- Into(result)
- return
-}
-
-// UpdateStatus was generated because the type contains a Status member.
-// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-func (c *globalTrafficPolicies) UpdateStatus(ctx context.Context, globalTrafficPolicy *v1.GlobalTrafficPolicy, opts metav1.UpdateOptions) (result *v1.GlobalTrafficPolicy, err error) {
- result = &v1.GlobalTrafficPolicy{}
- err = c.client.Put().
- Namespace(c.ns).
- Resource("globaltrafficpolicies").
- Name(globalTrafficPolicy.Name).
- SubResource("status").
- VersionedParams(&opts, scheme.ParameterCodec).
- Body(globalTrafficPolicy).
- Do(ctx).
- Into(result)
- return
-}
-
-// Delete takes name of the globalTrafficPolicy and deletes it. Returns an error if one occurs.
-func (c *globalTrafficPolicies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
- return c.client.Delete().
- Namespace(c.ns).
- Resource("globaltrafficpolicies").
- Name(name).
- Body(&opts).
- Do(ctx).
- Error()
-}
-
-// DeleteCollection deletes a collection of objects.
-func (c *globalTrafficPolicies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
- var timeout time.Duration
- if listOpts.TimeoutSeconds != nil {
- timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
- }
- return c.client.Delete().
- Namespace(c.ns).
- Resource("globaltrafficpolicies").
- VersionedParams(&listOpts, scheme.ParameterCodec).
- Timeout(timeout).
- Body(&opts).
- Do(ctx).
- Error()
-}
-
-// Patch applies the patch and returns the patched globalTrafficPolicy.
-func (c *globalTrafficPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.GlobalTrafficPolicy, err error) {
- result = &v1.GlobalTrafficPolicy{}
- err = c.client.Patch(pt).
- Namespace(c.ns).
- Resource("globaltrafficpolicies").
- Name(name).
- SubResource(subresources...).
- VersionedParams(&opts, scheme.ParameterCodec).
- Body(data).
- Do(ctx).
- Into(result)
- return
-}
From 94d7bdd3c4f6ae0a8dc1f10b85c8c8b8c6b65ead Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:53:28 -0700
Subject: [PATCH 067/235] remove file
admiral/pkg/client/clientset/versioned/typed/admiral/v1/routingpolicy.go
---
.../typed/admiral/v1/routingpolicy.go | 195 ------------------
1 file changed, 195 deletions(-)
delete mode 100644 admiral/pkg/client/clientset/versioned/typed/admiral/v1/routingpolicy.go
diff --git a/admiral/pkg/client/clientset/versioned/typed/admiral/v1/routingpolicy.go b/admiral/pkg/client/clientset/versioned/typed/admiral/v1/routingpolicy.go
deleted file mode 100644
index a84c923e..00000000
--- a/admiral/pkg/client/clientset/versioned/typed/admiral/v1/routingpolicy.go
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by client-gen. DO NOT EDIT.
-
-package v1
-
-import (
- "context"
- "time"
-
- v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
- scheme "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned/scheme"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- types "k8s.io/apimachinery/pkg/types"
- watch "k8s.io/apimachinery/pkg/watch"
- rest "k8s.io/client-go/rest"
-)
-
-// RoutingPoliciesGetter has a method to return a RoutingPolicyInterface.
-// A group's client should implement this interface.
-type RoutingPoliciesGetter interface {
- RoutingPolicies(namespace string) RoutingPolicyInterface
-}
-
-// RoutingPolicyInterface has methods to work with RoutingPolicy resources.
-type RoutingPolicyInterface interface {
- Create(ctx context.Context, routingPolicy *v1.RoutingPolicy, opts metav1.CreateOptions) (*v1.RoutingPolicy, error)
- Update(ctx context.Context, routingPolicy *v1.RoutingPolicy, opts metav1.UpdateOptions) (*v1.RoutingPolicy, error)
- UpdateStatus(ctx context.Context, routingPolicy *v1.RoutingPolicy, opts metav1.UpdateOptions) (*v1.RoutingPolicy, error)
- Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
- DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
- Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.RoutingPolicy, error)
- List(ctx context.Context, opts metav1.ListOptions) (*v1.RoutingPolicyList, error)
- Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RoutingPolicy, err error)
- RoutingPolicyExpansion
-}
-
-// routingPolicies implements RoutingPolicyInterface
-type routingPolicies struct {
- client rest.Interface
- ns string
-}
-
-// newRoutingPolicies returns a RoutingPolicies
-func newRoutingPolicies(c *AdmiralV1Client, namespace string) *routingPolicies {
- return &routingPolicies{
- client: c.RESTClient(),
- ns: namespace,
- }
-}
-
-// Get takes name of the routingPolicy, and returns the corresponding routingPolicy object, and an error if there is any.
-func (c *routingPolicies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RoutingPolicy, err error) {
- result = &v1.RoutingPolicy{}
- err = c.client.Get().
- Namespace(c.ns).
- Resource("routingpolicies").
- Name(name).
- VersionedParams(&options, scheme.ParameterCodec).
- Do(ctx).
- Into(result)
- return
-}
-
-// List takes label and field selectors, and returns the list of RoutingPolicies that match those selectors.
-func (c *routingPolicies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RoutingPolicyList, err error) {
- var timeout time.Duration
- if opts.TimeoutSeconds != nil {
- timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
- }
- result = &v1.RoutingPolicyList{}
- err = c.client.Get().
- Namespace(c.ns).
- Resource("routingpolicies").
- VersionedParams(&opts, scheme.ParameterCodec).
- Timeout(timeout).
- Do(ctx).
- Into(result)
- return
-}
-
-// Watch returns a watch.Interface that watches the requested routingPolicies.
-func (c *routingPolicies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
- var timeout time.Duration
- if opts.TimeoutSeconds != nil {
- timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
- }
- opts.Watch = true
- return c.client.Get().
- Namespace(c.ns).
- Resource("routingpolicies").
- VersionedParams(&opts, scheme.ParameterCodec).
- Timeout(timeout).
- Watch(ctx)
-}
-
-// Create takes the representation of a routingPolicy and creates it. Returns the server's representation of the routingPolicy, and an error, if there is any.
-func (c *routingPolicies) Create(ctx context.Context, routingPolicy *v1.RoutingPolicy, opts metav1.CreateOptions) (result *v1.RoutingPolicy, err error) {
- result = &v1.RoutingPolicy{}
- err = c.client.Post().
- Namespace(c.ns).
- Resource("routingpolicies").
- VersionedParams(&opts, scheme.ParameterCodec).
- Body(routingPolicy).
- Do(ctx).
- Into(result)
- return
-}
-
-// Update takes the representation of a routingPolicy and updates it. Returns the server's representation of the routingPolicy, and an error, if there is any.
-func (c *routingPolicies) Update(ctx context.Context, routingPolicy *v1.RoutingPolicy, opts metav1.UpdateOptions) (result *v1.RoutingPolicy, err error) {
- result = &v1.RoutingPolicy{}
- err = c.client.Put().
- Namespace(c.ns).
- Resource("routingpolicies").
- Name(routingPolicy.Name).
- VersionedParams(&opts, scheme.ParameterCodec).
- Body(routingPolicy).
- Do(ctx).
- Into(result)
- return
-}
-
-// UpdateStatus was generated because the type contains a Status member.
-// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-func (c *routingPolicies) UpdateStatus(ctx context.Context, routingPolicy *v1.RoutingPolicy, opts metav1.UpdateOptions) (result *v1.RoutingPolicy, err error) {
- result = &v1.RoutingPolicy{}
- err = c.client.Put().
- Namespace(c.ns).
- Resource("routingpolicies").
- Name(routingPolicy.Name).
- SubResource("status").
- VersionedParams(&opts, scheme.ParameterCodec).
- Body(routingPolicy).
- Do(ctx).
- Into(result)
- return
-}
-
-// Delete takes name of the routingPolicy and deletes it. Returns an error if one occurs.
-func (c *routingPolicies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
- return c.client.Delete().
- Namespace(c.ns).
- Resource("routingpolicies").
- Name(name).
- Body(&opts).
- Do(ctx).
- Error()
-}
-
-// DeleteCollection deletes a collection of objects.
-func (c *routingPolicies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
- var timeout time.Duration
- if listOpts.TimeoutSeconds != nil {
- timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
- }
- return c.client.Delete().
- Namespace(c.ns).
- Resource("routingpolicies").
- VersionedParams(&listOpts, scheme.ParameterCodec).
- Timeout(timeout).
- Body(&opts).
- Do(ctx).
- Error()
-}
-
-// Patch applies the patch and returns the patched routingPolicy.
-func (c *routingPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RoutingPolicy, err error) {
- result = &v1.RoutingPolicy{}
- err = c.client.Patch(pt).
- Namespace(c.ns).
- Resource("routingpolicies").
- Name(name).
- SubResource(subresources...).
- VersionedParams(&opts, scheme.ParameterCodec).
- Body(data).
- Do(ctx).
- Into(result)
- return
-}
From 0721422a72af260f91d7925a13ca4d2100704e55 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:53:31 -0700
Subject: [PATCH 068/235] remove file
admiral/pkg/client/informers/externalversions/admiral/v1/dependency.go
---
.../externalversions/admiral/v1/dependency.go | 90 -------------------
1 file changed, 90 deletions(-)
delete mode 100644 admiral/pkg/client/informers/externalversions/admiral/v1/dependency.go
diff --git a/admiral/pkg/client/informers/externalversions/admiral/v1/dependency.go b/admiral/pkg/client/informers/externalversions/admiral/v1/dependency.go
deleted file mode 100644
index 75a3f41e..00000000
--- a/admiral/pkg/client/informers/externalversions/admiral/v1/dependency.go
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by informer-gen. DO NOT EDIT.
-
-package v1
-
-import (
- "context"
- time "time"
-
- admiralv1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
- versioned "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned"
- internalinterfaces "github.com/istio-ecosystem/admiral/admiral/pkg/client/informers/externalversions/internalinterfaces"
- v1 "github.com/istio-ecosystem/admiral/admiral/pkg/client/listers/admiral/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- runtime "k8s.io/apimachinery/pkg/runtime"
- watch "k8s.io/apimachinery/pkg/watch"
- cache "k8s.io/client-go/tools/cache"
-)
-
-// DependencyInformer provides access to a shared informer and lister for
-// Dependencies.
-type DependencyInformer interface {
- Informer() cache.SharedIndexInformer
- Lister() v1.DependencyLister
-}
-
-type dependencyInformer struct {
- factory internalinterfaces.SharedInformerFactory
- tweakListOptions internalinterfaces.TweakListOptionsFunc
- namespace string
-}
-
-// NewDependencyInformer constructs a new informer for Dependency type.
-// Always prefer using an informer factory to get a shared informer instead of getting an independent
-// one. This reduces memory footprint and number of connections to the server.
-func NewDependencyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
- return NewFilteredDependencyInformer(client, namespace, resyncPeriod, indexers, nil)
-}
-
-// NewFilteredDependencyInformer constructs a new informer for Dependency type.
-// Always prefer using an informer factory to get a shared informer instead of getting an independent
-// one. This reduces memory footprint and number of connections to the server.
-func NewFilteredDependencyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
- return cache.NewSharedIndexInformer(
- &cache.ListWatch{
- ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
- if tweakListOptions != nil {
- tweakListOptions(&options)
- }
- return client.AdmiralV1().Dependencies(namespace).List(context.TODO(), options)
- },
- WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
- if tweakListOptions != nil {
- tweakListOptions(&options)
- }
- return client.AdmiralV1().Dependencies(namespace).Watch(context.TODO(), options)
- },
- },
- &admiralv1.Dependency{},
- resyncPeriod,
- indexers,
- )
-}
-
-func (f *dependencyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
- return NewFilteredDependencyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
-}
-
-func (f *dependencyInformer) Informer() cache.SharedIndexInformer {
- return f.factory.InformerFor(&admiralv1.Dependency{}, f.defaultInformer)
-}
-
-func (f *dependencyInformer) Lister() v1.DependencyLister {
- return v1.NewDependencyLister(f.Informer().GetIndexer())
-}
From 0946a7cd44f83a812b8908ac16bda4dff4c456e1 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:53:34 -0700
Subject: [PATCH 069/235] remove file
admiral/pkg/client/informers/externalversions/admiral/v1/dependencyproxy.go
---
.../admiral/v1/dependencyproxy.go | 90 -------------------
1 file changed, 90 deletions(-)
delete mode 100644 admiral/pkg/client/informers/externalversions/admiral/v1/dependencyproxy.go
diff --git a/admiral/pkg/client/informers/externalversions/admiral/v1/dependencyproxy.go b/admiral/pkg/client/informers/externalversions/admiral/v1/dependencyproxy.go
deleted file mode 100644
index 03fc00f5..00000000
--- a/admiral/pkg/client/informers/externalversions/admiral/v1/dependencyproxy.go
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by informer-gen. DO NOT EDIT.
-
-package v1
-
-import (
- "context"
- time "time"
-
- admiralv1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
- versioned "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned"
- internalinterfaces "github.com/istio-ecosystem/admiral/admiral/pkg/client/informers/externalversions/internalinterfaces"
- v1 "github.com/istio-ecosystem/admiral/admiral/pkg/client/listers/admiral/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- runtime "k8s.io/apimachinery/pkg/runtime"
- watch "k8s.io/apimachinery/pkg/watch"
- cache "k8s.io/client-go/tools/cache"
-)
-
-// DependencyProxyInformer provides access to a shared informer and lister for
-// DependencyProxies.
-type DependencyProxyInformer interface {
- Informer() cache.SharedIndexInformer
- Lister() v1.DependencyProxyLister
-}
-
-type dependencyProxyInformer struct {
- factory internalinterfaces.SharedInformerFactory
- tweakListOptions internalinterfaces.TweakListOptionsFunc
- namespace string
-}
-
-// NewDependencyProxyInformer constructs a new informer for DependencyProxy type.
-// Always prefer using an informer factory to get a shared informer instead of getting an independent
-// one. This reduces memory footprint and number of connections to the server.
-func NewDependencyProxyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
- return NewFilteredDependencyProxyInformer(client, namespace, resyncPeriod, indexers, nil)
-}
-
-// NewFilteredDependencyProxyInformer constructs a new informer for DependencyProxy type.
-// Always prefer using an informer factory to get a shared informer instead of getting an independent
-// one. This reduces memory footprint and number of connections to the server.
-func NewFilteredDependencyProxyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
- return cache.NewSharedIndexInformer(
- &cache.ListWatch{
- ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
- if tweakListOptions != nil {
- tweakListOptions(&options)
- }
- return client.AdmiralV1().DependencyProxies(namespace).List(context.TODO(), options)
- },
- WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
- if tweakListOptions != nil {
- tweakListOptions(&options)
- }
- return client.AdmiralV1().DependencyProxies(namespace).Watch(context.TODO(), options)
- },
- },
- &admiralv1.DependencyProxy{},
- resyncPeriod,
- indexers,
- )
-}
-
-func (f *dependencyProxyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
- return NewFilteredDependencyProxyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
-}
-
-func (f *dependencyProxyInformer) Informer() cache.SharedIndexInformer {
- return f.factory.InformerFor(&admiralv1.DependencyProxy{}, f.defaultInformer)
-}
-
-func (f *dependencyProxyInformer) Lister() v1.DependencyProxyLister {
- return v1.NewDependencyProxyLister(f.Informer().GetIndexer())
-}
From 9a1b00eb292e2744624fde1e966f7a41b24b1af1 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:53:38 -0700
Subject: [PATCH 070/235] remove file
admiral/pkg/client/informers/externalversions/admiral/v1/globaltrafficpolicy.go
---
.../admiral/v1/globaltrafficpolicy.go | 90 -------------------
1 file changed, 90 deletions(-)
delete mode 100644 admiral/pkg/client/informers/externalversions/admiral/v1/globaltrafficpolicy.go
diff --git a/admiral/pkg/client/informers/externalversions/admiral/v1/globaltrafficpolicy.go b/admiral/pkg/client/informers/externalversions/admiral/v1/globaltrafficpolicy.go
deleted file mode 100644
index 54f8bb6e..00000000
--- a/admiral/pkg/client/informers/externalversions/admiral/v1/globaltrafficpolicy.go
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by informer-gen. DO NOT EDIT.
-
-package v1
-
-import (
- "context"
- time "time"
-
- admiralv1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
- versioned "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned"
- internalinterfaces "github.com/istio-ecosystem/admiral/admiral/pkg/client/informers/externalversions/internalinterfaces"
- v1 "github.com/istio-ecosystem/admiral/admiral/pkg/client/listers/admiral/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- runtime "k8s.io/apimachinery/pkg/runtime"
- watch "k8s.io/apimachinery/pkg/watch"
- cache "k8s.io/client-go/tools/cache"
-)
-
-// GlobalTrafficPolicyInformer provides access to a shared informer and lister for
-// GlobalTrafficPolicies.
-type GlobalTrafficPolicyInformer interface {
- Informer() cache.SharedIndexInformer
- Lister() v1.GlobalTrafficPolicyLister
-}
-
-type globalTrafficPolicyInformer struct {
- factory internalinterfaces.SharedInformerFactory
- tweakListOptions internalinterfaces.TweakListOptionsFunc
- namespace string
-}
-
-// NewGlobalTrafficPolicyInformer constructs a new informer for GlobalTrafficPolicy type.
-// Always prefer using an informer factory to get a shared informer instead of getting an independent
-// one. This reduces memory footprint and number of connections to the server.
-func NewGlobalTrafficPolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
- return NewFilteredGlobalTrafficPolicyInformer(client, namespace, resyncPeriod, indexers, nil)
-}
-
-// NewFilteredGlobalTrafficPolicyInformer constructs a new informer for GlobalTrafficPolicy type.
-// Always prefer using an informer factory to get a shared informer instead of getting an independent
-// one. This reduces memory footprint and number of connections to the server.
-func NewFilteredGlobalTrafficPolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
- return cache.NewSharedIndexInformer(
- &cache.ListWatch{
- ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
- if tweakListOptions != nil {
- tweakListOptions(&options)
- }
- return client.AdmiralV1().GlobalTrafficPolicies(namespace).List(context.TODO(), options)
- },
- WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
- if tweakListOptions != nil {
- tweakListOptions(&options)
- }
- return client.AdmiralV1().GlobalTrafficPolicies(namespace).Watch(context.TODO(), options)
- },
- },
- &admiralv1.GlobalTrafficPolicy{},
- resyncPeriod,
- indexers,
- )
-}
-
-func (f *globalTrafficPolicyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
- return NewFilteredGlobalTrafficPolicyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
-}
-
-func (f *globalTrafficPolicyInformer) Informer() cache.SharedIndexInformer {
- return f.factory.InformerFor(&admiralv1.GlobalTrafficPolicy{}, f.defaultInformer)
-}
-
-func (f *globalTrafficPolicyInformer) Lister() v1.GlobalTrafficPolicyLister {
- return v1.NewGlobalTrafficPolicyLister(f.Informer().GetIndexer())
-}
From b45314aa1d8b4f02dfa95e300082ebddb751c1f6 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:53:41 -0700
Subject: [PATCH 071/235] remove file
admiral/pkg/client/informers/externalversions/admiral/v1/interface.go
---
.../externalversions/admiral/v1/interface.go | 66 -------------------
1 file changed, 66 deletions(-)
delete mode 100644 admiral/pkg/client/informers/externalversions/admiral/v1/interface.go
diff --git a/admiral/pkg/client/informers/externalversions/admiral/v1/interface.go b/admiral/pkg/client/informers/externalversions/admiral/v1/interface.go
deleted file mode 100644
index 56f9d5aa..00000000
--- a/admiral/pkg/client/informers/externalversions/admiral/v1/interface.go
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by informer-gen. DO NOT EDIT.
-
-package v1
-
-import (
- internalinterfaces "github.com/istio-ecosystem/admiral/admiral/pkg/client/informers/externalversions/internalinterfaces"
-)
-
-// Interface provides access to all the informers in this group version.
-type Interface interface {
- // Dependencies returns a DependencyInformer.
- Dependencies() DependencyInformer
- // DependencyProxies returns a DependencyProxyInformer.
- DependencyProxies() DependencyProxyInformer
- // GlobalTrafficPolicies returns a GlobalTrafficPolicyInformer.
- GlobalTrafficPolicies() GlobalTrafficPolicyInformer
- // RoutingPolicies returns a RoutingPolicyInformer.
- RoutingPolicies() RoutingPolicyInformer
-}
-
-type version struct {
- factory internalinterfaces.SharedInformerFactory
- namespace string
- tweakListOptions internalinterfaces.TweakListOptionsFunc
-}
-
-// New returns a new Interface.
-func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
- return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
-}
-
-// Dependencies returns a DependencyInformer.
-func (v *version) Dependencies() DependencyInformer {
- return &dependencyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
-}
-
-// DependencyProxies returns a DependencyProxyInformer.
-func (v *version) DependencyProxies() DependencyProxyInformer {
- return &dependencyProxyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
-}
-
-// GlobalTrafficPolicies returns a GlobalTrafficPolicyInformer.
-func (v *version) GlobalTrafficPolicies() GlobalTrafficPolicyInformer {
- return &globalTrafficPolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
-}
-
-// RoutingPolicies returns a RoutingPolicyInformer.
-func (v *version) RoutingPolicies() RoutingPolicyInformer {
- return &routingPolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
-}
From 422cfc4b750ef4948eef8b437a3eb6d62afaeb8b Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:53:44 -0700
Subject: [PATCH 072/235] remove file
admiral/pkg/client/informers/externalversions/admiral/v1/routingpolicy.go
---
.../admiral/v1/routingpolicy.go | 90 -------------------
1 file changed, 90 deletions(-)
delete mode 100644 admiral/pkg/client/informers/externalversions/admiral/v1/routingpolicy.go
diff --git a/admiral/pkg/client/informers/externalversions/admiral/v1/routingpolicy.go b/admiral/pkg/client/informers/externalversions/admiral/v1/routingpolicy.go
deleted file mode 100644
index a7f2fb1b..00000000
--- a/admiral/pkg/client/informers/externalversions/admiral/v1/routingpolicy.go
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by informer-gen. DO NOT EDIT.
-
-package v1
-
-import (
- "context"
- time "time"
-
- admiralv1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
- versioned "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned"
- internalinterfaces "github.com/istio-ecosystem/admiral/admiral/pkg/client/informers/externalversions/internalinterfaces"
- v1 "github.com/istio-ecosystem/admiral/admiral/pkg/client/listers/admiral/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- runtime "k8s.io/apimachinery/pkg/runtime"
- watch "k8s.io/apimachinery/pkg/watch"
- cache "k8s.io/client-go/tools/cache"
-)
-
-// RoutingPolicyInformer provides access to a shared informer and lister for
-// RoutingPolicies.
-type RoutingPolicyInformer interface {
- Informer() cache.SharedIndexInformer
- Lister() v1.RoutingPolicyLister
-}
-
-type routingPolicyInformer struct {
- factory internalinterfaces.SharedInformerFactory
- tweakListOptions internalinterfaces.TweakListOptionsFunc
- namespace string
-}
-
-// NewRoutingPolicyInformer constructs a new informer for RoutingPolicy type.
-// Always prefer using an informer factory to get a shared informer instead of getting an independent
-// one. This reduces memory footprint and number of connections to the server.
-func NewRoutingPolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
- return NewFilteredRoutingPolicyInformer(client, namespace, resyncPeriod, indexers, nil)
-}
-
-// NewFilteredRoutingPolicyInformer constructs a new informer for RoutingPolicy type.
-// Always prefer using an informer factory to get a shared informer instead of getting an independent
-// one. This reduces memory footprint and number of connections to the server.
-func NewFilteredRoutingPolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
- return cache.NewSharedIndexInformer(
- &cache.ListWatch{
- ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
- if tweakListOptions != nil {
- tweakListOptions(&options)
- }
- return client.AdmiralV1().RoutingPolicies(namespace).List(context.TODO(), options)
- },
- WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
- if tweakListOptions != nil {
- tweakListOptions(&options)
- }
- return client.AdmiralV1().RoutingPolicies(namespace).Watch(context.TODO(), options)
- },
- },
- &admiralv1.RoutingPolicy{},
- resyncPeriod,
- indexers,
- )
-}
-
-func (f *routingPolicyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
- return NewFilteredRoutingPolicyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
-}
-
-func (f *routingPolicyInformer) Informer() cache.SharedIndexInformer {
- return f.factory.InformerFor(&admiralv1.RoutingPolicy{}, f.defaultInformer)
-}
-
-func (f *routingPolicyInformer) Lister() v1.RoutingPolicyLister {
- return v1.NewRoutingPolicyLister(f.Informer().GetIndexer())
-}
From 1699f2970c0595db95e30b22e904814507745a71 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:53:47 -0700
Subject: [PATCH 073/235] remove file
admiral/pkg/client/listers/admiral/v1/dependency.go
---
.../client/listers/admiral/v1/dependency.go | 99 -------------------
1 file changed, 99 deletions(-)
delete mode 100644 admiral/pkg/client/listers/admiral/v1/dependency.go
diff --git a/admiral/pkg/client/listers/admiral/v1/dependency.go b/admiral/pkg/client/listers/admiral/v1/dependency.go
deleted file mode 100644
index 406d0284..00000000
--- a/admiral/pkg/client/listers/admiral/v1/dependency.go
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by lister-gen. DO NOT EDIT.
-
-package v1
-
-import (
- v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
- "k8s.io/apimachinery/pkg/api/errors"
- "k8s.io/apimachinery/pkg/labels"
- "k8s.io/client-go/tools/cache"
-)
-
-// DependencyLister helps list Dependencies.
-// All objects returned here must be treated as read-only.
-type DependencyLister interface {
- // List lists all Dependencies in the indexer.
- // Objects returned here must be treated as read-only.
- List(selector labels.Selector) (ret []*v1.Dependency, err error)
- // Dependencies returns an object that can list and get Dependencies.
- Dependencies(namespace string) DependencyNamespaceLister
- DependencyListerExpansion
-}
-
-// dependencyLister implements the DependencyLister interface.
-type dependencyLister struct {
- indexer cache.Indexer
-}
-
-// NewDependencyLister returns a new DependencyLister.
-func NewDependencyLister(indexer cache.Indexer) DependencyLister {
- return &dependencyLister{indexer: indexer}
-}
-
-// List lists all Dependencies in the indexer.
-func (s *dependencyLister) List(selector labels.Selector) (ret []*v1.Dependency, err error) {
- err = cache.ListAll(s.indexer, selector, func(m interface{}) {
- ret = append(ret, m.(*v1.Dependency))
- })
- return ret, err
-}
-
-// Dependencies returns an object that can list and get Dependencies.
-func (s *dependencyLister) Dependencies(namespace string) DependencyNamespaceLister {
- return dependencyNamespaceLister{indexer: s.indexer, namespace: namespace}
-}
-
-// DependencyNamespaceLister helps list and get Dependencies.
-// All objects returned here must be treated as read-only.
-type DependencyNamespaceLister interface {
- // List lists all Dependencies in the indexer for a given namespace.
- // Objects returned here must be treated as read-only.
- List(selector labels.Selector) (ret []*v1.Dependency, err error)
- // Get retrieves the Dependency from the indexer for a given namespace and name.
- // Objects returned here must be treated as read-only.
- Get(name string) (*v1.Dependency, error)
- DependencyNamespaceListerExpansion
-}
-
-// dependencyNamespaceLister implements the DependencyNamespaceLister
-// interface.
-type dependencyNamespaceLister struct {
- indexer cache.Indexer
- namespace string
-}
-
-// List lists all Dependencies in the indexer for a given namespace.
-func (s dependencyNamespaceLister) List(selector labels.Selector) (ret []*v1.Dependency, err error) {
- err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
- ret = append(ret, m.(*v1.Dependency))
- })
- return ret, err
-}
-
-// Get retrieves the Dependency from the indexer for a given namespace and name.
-func (s dependencyNamespaceLister) Get(name string) (*v1.Dependency, error) {
- obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
- if err != nil {
- return nil, err
- }
- if !exists {
- return nil, errors.NewNotFound(v1.Resource("dependency"), name)
- }
- return obj.(*v1.Dependency), nil
-}
From 0eec0772bce4fdb233d20284b4206be36bbfc6b0 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:53:50 -0700
Subject: [PATCH 074/235] remove file
admiral/pkg/client/listers/admiral/v1/dependencyproxy.go
---
.../listers/admiral/v1/dependencyproxy.go | 99 -------------------
1 file changed, 99 deletions(-)
delete mode 100644 admiral/pkg/client/listers/admiral/v1/dependencyproxy.go
diff --git a/admiral/pkg/client/listers/admiral/v1/dependencyproxy.go b/admiral/pkg/client/listers/admiral/v1/dependencyproxy.go
deleted file mode 100644
index 7ce8b33c..00000000
--- a/admiral/pkg/client/listers/admiral/v1/dependencyproxy.go
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by lister-gen. DO NOT EDIT.
-
-package v1
-
-import (
- v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
- "k8s.io/apimachinery/pkg/api/errors"
- "k8s.io/apimachinery/pkg/labels"
- "k8s.io/client-go/tools/cache"
-)
-
-// DependencyProxyLister helps list DependencyProxies.
-// All objects returned here must be treated as read-only.
-type DependencyProxyLister interface {
- // List lists all DependencyProxies in the indexer.
- // Objects returned here must be treated as read-only.
- List(selector labels.Selector) (ret []*v1.DependencyProxy, err error)
- // DependencyProxies returns an object that can list and get DependencyProxies.
- DependencyProxies(namespace string) DependencyProxyNamespaceLister
- DependencyProxyListerExpansion
-}
-
-// dependencyProxyLister implements the DependencyProxyLister interface.
-type dependencyProxyLister struct {
- indexer cache.Indexer
-}
-
-// NewDependencyProxyLister returns a new DependencyProxyLister.
-func NewDependencyProxyLister(indexer cache.Indexer) DependencyProxyLister {
- return &dependencyProxyLister{indexer: indexer}
-}
-
-// List lists all DependencyProxies in the indexer.
-func (s *dependencyProxyLister) List(selector labels.Selector) (ret []*v1.DependencyProxy, err error) {
- err = cache.ListAll(s.indexer, selector, func(m interface{}) {
- ret = append(ret, m.(*v1.DependencyProxy))
- })
- return ret, err
-}
-
-// DependencyProxies returns an object that can list and get DependencyProxies.
-func (s *dependencyProxyLister) DependencyProxies(namespace string) DependencyProxyNamespaceLister {
- return dependencyProxyNamespaceLister{indexer: s.indexer, namespace: namespace}
-}
-
-// DependencyProxyNamespaceLister helps list and get DependencyProxies.
-// All objects returned here must be treated as read-only.
-type DependencyProxyNamespaceLister interface {
- // List lists all DependencyProxies in the indexer for a given namespace.
- // Objects returned here must be treated as read-only.
- List(selector labels.Selector) (ret []*v1.DependencyProxy, err error)
- // Get retrieves the DependencyProxy from the indexer for a given namespace and name.
- // Objects returned here must be treated as read-only.
- Get(name string) (*v1.DependencyProxy, error)
- DependencyProxyNamespaceListerExpansion
-}
-
-// dependencyProxyNamespaceLister implements the DependencyProxyNamespaceLister
-// interface.
-type dependencyProxyNamespaceLister struct {
- indexer cache.Indexer
- namespace string
-}
-
-// List lists all DependencyProxies in the indexer for a given namespace.
-func (s dependencyProxyNamespaceLister) List(selector labels.Selector) (ret []*v1.DependencyProxy, err error) {
- err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
- ret = append(ret, m.(*v1.DependencyProxy))
- })
- return ret, err
-}
-
-// Get retrieves the DependencyProxy from the indexer for a given namespace and name.
-func (s dependencyProxyNamespaceLister) Get(name string) (*v1.DependencyProxy, error) {
- obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
- if err != nil {
- return nil, err
- }
- if !exists {
- return nil, errors.NewNotFound(v1.Resource("dependencyproxy"), name)
- }
- return obj.(*v1.DependencyProxy), nil
-}
From a0b3af3564fd3ca2d786824d6afee0a9a7b5115d Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:53:53 -0700
Subject: [PATCH 075/235] remove file
admiral/pkg/client/listers/admiral/v1/expansion_generated.go
---
.../listers/admiral/v1/expansion_generated.go | 51 -------------------
1 file changed, 51 deletions(-)
delete mode 100644 admiral/pkg/client/listers/admiral/v1/expansion_generated.go
diff --git a/admiral/pkg/client/listers/admiral/v1/expansion_generated.go b/admiral/pkg/client/listers/admiral/v1/expansion_generated.go
deleted file mode 100644
index 5e5a4e15..00000000
--- a/admiral/pkg/client/listers/admiral/v1/expansion_generated.go
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by lister-gen. DO NOT EDIT.
-
-package v1
-
-// DependencyListerExpansion allows custom methods to be added to
-// DependencyLister.
-type DependencyListerExpansion interface{}
-
-// DependencyNamespaceListerExpansion allows custom methods to be added to
-// DependencyNamespaceLister.
-type DependencyNamespaceListerExpansion interface{}
-
-// DependencyProxyListerExpansion allows custom methods to be added to
-// DependencyProxyLister.
-type DependencyProxyListerExpansion interface{}
-
-// DependencyProxyNamespaceListerExpansion allows custom methods to be added to
-// DependencyProxyNamespaceLister.
-type DependencyProxyNamespaceListerExpansion interface{}
-
-// GlobalTrafficPolicyListerExpansion allows custom methods to be added to
-// GlobalTrafficPolicyLister.
-type GlobalTrafficPolicyListerExpansion interface{}
-
-// GlobalTrafficPolicyNamespaceListerExpansion allows custom methods to be added to
-// GlobalTrafficPolicyNamespaceLister.
-type GlobalTrafficPolicyNamespaceListerExpansion interface{}
-
-// RoutingPolicyListerExpansion allows custom methods to be added to
-// RoutingPolicyLister.
-type RoutingPolicyListerExpansion interface{}
-
-// RoutingPolicyNamespaceListerExpansion allows custom methods to be added to
-// RoutingPolicyNamespaceLister.
-type RoutingPolicyNamespaceListerExpansion interface{}
From d65acafbc6b18ac99302839089a775ada69cf797 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:53:56 -0700
Subject: [PATCH 076/235] remove file
admiral/pkg/client/listers/admiral/v1/globaltrafficpolicy.go
---
.../listers/admiral/v1/globaltrafficpolicy.go | 99 -------------------
1 file changed, 99 deletions(-)
delete mode 100644 admiral/pkg/client/listers/admiral/v1/globaltrafficpolicy.go
diff --git a/admiral/pkg/client/listers/admiral/v1/globaltrafficpolicy.go b/admiral/pkg/client/listers/admiral/v1/globaltrafficpolicy.go
deleted file mode 100644
index d982afe4..00000000
--- a/admiral/pkg/client/listers/admiral/v1/globaltrafficpolicy.go
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by lister-gen. DO NOT EDIT.
-
-package v1
-
-import (
- v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
- "k8s.io/apimachinery/pkg/api/errors"
- "k8s.io/apimachinery/pkg/labels"
- "k8s.io/client-go/tools/cache"
-)
-
-// GlobalTrafficPolicyLister helps list GlobalTrafficPolicies.
-// All objects returned here must be treated as read-only.
-type GlobalTrafficPolicyLister interface {
- // List lists all GlobalTrafficPolicies in the indexer.
- // Objects returned here must be treated as read-only.
- List(selector labels.Selector) (ret []*v1.GlobalTrafficPolicy, err error)
- // GlobalTrafficPolicies returns an object that can list and get GlobalTrafficPolicies.
- GlobalTrafficPolicies(namespace string) GlobalTrafficPolicyNamespaceLister
- GlobalTrafficPolicyListerExpansion
-}
-
-// globalTrafficPolicyLister implements the GlobalTrafficPolicyLister interface.
-type globalTrafficPolicyLister struct {
- indexer cache.Indexer
-}
-
-// NewGlobalTrafficPolicyLister returns a new GlobalTrafficPolicyLister.
-func NewGlobalTrafficPolicyLister(indexer cache.Indexer) GlobalTrafficPolicyLister {
- return &globalTrafficPolicyLister{indexer: indexer}
-}
-
-// List lists all GlobalTrafficPolicies in the indexer.
-func (s *globalTrafficPolicyLister) List(selector labels.Selector) (ret []*v1.GlobalTrafficPolicy, err error) {
- err = cache.ListAll(s.indexer, selector, func(m interface{}) {
- ret = append(ret, m.(*v1.GlobalTrafficPolicy))
- })
- return ret, err
-}
-
-// GlobalTrafficPolicies returns an object that can list and get GlobalTrafficPolicies.
-func (s *globalTrafficPolicyLister) GlobalTrafficPolicies(namespace string) GlobalTrafficPolicyNamespaceLister {
- return globalTrafficPolicyNamespaceLister{indexer: s.indexer, namespace: namespace}
-}
-
-// GlobalTrafficPolicyNamespaceLister helps list and get GlobalTrafficPolicies.
-// All objects returned here must be treated as read-only.
-type GlobalTrafficPolicyNamespaceLister interface {
- // List lists all GlobalTrafficPolicies in the indexer for a given namespace.
- // Objects returned here must be treated as read-only.
- List(selector labels.Selector) (ret []*v1.GlobalTrafficPolicy, err error)
- // Get retrieves the GlobalTrafficPolicy from the indexer for a given namespace and name.
- // Objects returned here must be treated as read-only.
- Get(name string) (*v1.GlobalTrafficPolicy, error)
- GlobalTrafficPolicyNamespaceListerExpansion
-}
-
-// globalTrafficPolicyNamespaceLister implements the GlobalTrafficPolicyNamespaceLister
-// interface.
-type globalTrafficPolicyNamespaceLister struct {
- indexer cache.Indexer
- namespace string
-}
-
-// List lists all GlobalTrafficPolicies in the indexer for a given namespace.
-func (s globalTrafficPolicyNamespaceLister) List(selector labels.Selector) (ret []*v1.GlobalTrafficPolicy, err error) {
- err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
- ret = append(ret, m.(*v1.GlobalTrafficPolicy))
- })
- return ret, err
-}
-
-// Get retrieves the GlobalTrafficPolicy from the indexer for a given namespace and name.
-func (s globalTrafficPolicyNamespaceLister) Get(name string) (*v1.GlobalTrafficPolicy, error) {
- obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
- if err != nil {
- return nil, err
- }
- if !exists {
- return nil, errors.NewNotFound(v1.Resource("globaltrafficpolicy"), name)
- }
- return obj.(*v1.GlobalTrafficPolicy), nil
-}
From f71f00f8a85995674c1039438e7d1439d1cd76c2 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 17:53:59 -0700
Subject: [PATCH 077/235] remove file
admiral/pkg/client/listers/admiral/v1/routingpolicy.go
---
.../listers/admiral/v1/routingpolicy.go | 99 -------------------
1 file changed, 99 deletions(-)
delete mode 100644 admiral/pkg/client/listers/admiral/v1/routingpolicy.go
diff --git a/admiral/pkg/client/listers/admiral/v1/routingpolicy.go b/admiral/pkg/client/listers/admiral/v1/routingpolicy.go
deleted file mode 100644
index 33a066af..00000000
--- a/admiral/pkg/client/listers/admiral/v1/routingpolicy.go
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by lister-gen. DO NOT EDIT.
-
-package v1
-
-import (
- v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
- "k8s.io/apimachinery/pkg/api/errors"
- "k8s.io/apimachinery/pkg/labels"
- "k8s.io/client-go/tools/cache"
-)
-
-// RoutingPolicyLister helps list RoutingPolicies.
-// All objects returned here must be treated as read-only.
-type RoutingPolicyLister interface {
- // List lists all RoutingPolicies in the indexer.
- // Objects returned here must be treated as read-only.
- List(selector labels.Selector) (ret []*v1.RoutingPolicy, err error)
- // RoutingPolicies returns an object that can list and get RoutingPolicies.
- RoutingPolicies(namespace string) RoutingPolicyNamespaceLister
- RoutingPolicyListerExpansion
-}
-
-// routingPolicyLister implements the RoutingPolicyLister interface.
-type routingPolicyLister struct {
- indexer cache.Indexer
-}
-
-// NewRoutingPolicyLister returns a new RoutingPolicyLister.
-func NewRoutingPolicyLister(indexer cache.Indexer) RoutingPolicyLister {
- return &routingPolicyLister{indexer: indexer}
-}
-
-// List lists all RoutingPolicies in the indexer.
-func (s *routingPolicyLister) List(selector labels.Selector) (ret []*v1.RoutingPolicy, err error) {
- err = cache.ListAll(s.indexer, selector, func(m interface{}) {
- ret = append(ret, m.(*v1.RoutingPolicy))
- })
- return ret, err
-}
-
-// RoutingPolicies returns an object that can list and get RoutingPolicies.
-func (s *routingPolicyLister) RoutingPolicies(namespace string) RoutingPolicyNamespaceLister {
- return routingPolicyNamespaceLister{indexer: s.indexer, namespace: namespace}
-}
-
-// RoutingPolicyNamespaceLister helps list and get RoutingPolicies.
-// All objects returned here must be treated as read-only.
-type RoutingPolicyNamespaceLister interface {
- // List lists all RoutingPolicies in the indexer for a given namespace.
- // Objects returned here must be treated as read-only.
- List(selector labels.Selector) (ret []*v1.RoutingPolicy, err error)
- // Get retrieves the RoutingPolicy from the indexer for a given namespace and name.
- // Objects returned here must be treated as read-only.
- Get(name string) (*v1.RoutingPolicy, error)
- RoutingPolicyNamespaceListerExpansion
-}
-
-// routingPolicyNamespaceLister implements the RoutingPolicyNamespaceLister
-// interface.
-type routingPolicyNamespaceLister struct {
- indexer cache.Indexer
- namespace string
-}
-
-// List lists all RoutingPolicies in the indexer for a given namespace.
-func (s routingPolicyNamespaceLister) List(selector labels.Selector) (ret []*v1.RoutingPolicy, err error) {
- err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
- ret = append(ret, m.(*v1.RoutingPolicy))
- })
- return ret, err
-}
-
-// Get retrieves the RoutingPolicy from the indexer for a given namespace and name.
-func (s routingPolicyNamespaceLister) Get(name string) (*v1.RoutingPolicy, error) {
- obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
- if err != nil {
- return nil, err
- }
- if !exists {
- return nil, errors.NewNotFound(v1.Resource("routingpolicy"), name)
- }
- return obj.(*v1.RoutingPolicy), nil
-}
From 42790c25e955abfe5b354666520a04ef30ccb2f6 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 18:36:53 -0700
Subject: [PATCH 078/235] fix code owners file
---
.github/CODEOWNERS | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 124a48c3..21432be6 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -1,10 +1,10 @@
-# List of source code paths and code owners
-# For more information on the CODEOWNERS file go to:
-# https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners#codeowners-syntax
-
-# Uncomment line 10 and add the correct owners's usernames.
-# These owners will be the default owners for everything in
-# the repo. Unless a later match takes precedence,
-# @global-owner1 and @global-owner2 will be requested for
-# review when someone opens a pull request.
-* @services-mesh/service-mesh
+(@anil attuluri)[https://github.com/aattuluri]
+(@anubhav aeron)[https://github.com/nirvanagit]
+(@shriram sharma)[https://github.com/nirvanagit]
+(@kartikeya pharasi)[https://github.com/nirvanagit]
+(@vinay gonuguntla)[https://github.com/nirvanagit]
+(@vrushali joshi)[https://github.com/nirvanagit]
+(@viraj kulkarni)[https://github.com/nirvanagit]
+(@ryan tay)[https://github.com/nirvanagit]
+(@punakshi chaand)[https://github.com/nirvanagit]
+(@pankaj sikka)[https://github.com/nirvanagit]
From 61eafd20fdb7a90754fa9481f53546e6acb9e3a6 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 18:43:10 -0700
Subject: [PATCH 079/235] fix codeowners
---
.github/CODEOWNERS | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 21432be6..7ad77b67 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -1,10 +1,10 @@
-(@anil attuluri)[https://github.com/aattuluri]
-(@anubhav aeron)[https://github.com/nirvanagit]
-(@shriram sharma)[https://github.com/nirvanagit]
-(@kartikeya pharasi)[https://github.com/nirvanagit]
-(@vinay gonuguntla)[https://github.com/nirvanagit]
-(@vrushali joshi)[https://github.com/nirvanagit]
-(@viraj kulkarni)[https://github.com/nirvanagit]
-(@ryan tay)[https://github.com/nirvanagit]
-(@punakshi chaand)[https://github.com/nirvanagit]
-(@pankaj sikka)[https://github.com/nirvanagit]
+@aattuluri
+@nirvanagit
+@shriramsharma
+@kpharasi
+@vinay-g
+@vrushalijoshi
+@virajrk
+@rtay1188
+@Punakshi
+@psikka1
From 25ad78ebb4dc12d49b07f6f5d5c13c53e51dc263 Mon Sep 17 00:00:00 2001
From: nirvanagit
Date: Mon, 22 Jul 2024 18:46:45 -0700
Subject: [PATCH 080/235] update readme
---
README.md | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/README.md b/README.md
index dda9171d..452dfe1f 100644
--- a/README.md
+++ b/README.md
@@ -112,3 +112,15 @@ sequenceDiagram
end
end
```
+
+# Core contributors
+- [Anil Attuluri](https://github.com/aattuluri)
+- [Anubhav Aeron](https://github.com/nirvanagit)
+- [Shriram Sharma](https://github.com/shriramsharma)
+- [Kartikeya Pharasi](https://github.com/kpharasi)
+- [Vinay Gonuguntla](https://github.com/vinay-g)
+- [Vrushali Joshi](https://github.com/vrushalijoshi)
+- [Viraj Kulkarni](https://github.com/virajrk)
+- [Ryan Tay](https://github.com/rtay1188)
+- [Punakshi Chaand](https://github.com/Punakshi)
+- [Pankaj Sikka](https://github.com/psikka1)
From c144d4dc7ff3fe11af1718e8374a81c4c9712794 Mon Sep 17 00:00:00 2001
From: vinay-g
Date: Sun, 21 Jul 2024 00:41:01 -0700
Subject: [PATCH 081/235] Updating install_sample_services
Signed-off-by: vinay-g
---
install/scripts/install_sample_services.sh | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/install/scripts/install_sample_services.sh b/install/scripts/install_sample_services.sh
index b5ef05db..807dbbb9 100755
--- a/install/scripts/install_sample_services.sh
+++ b/install/scripts/install_sample_services.sh
@@ -7,8 +7,12 @@ install_dir=$1
#verify KUBECONFIG is set
if [ -z "$KUBECONFIG" ]
then
- echo "\$KUBECONFIG is not set"
- exit 1;
+ echo "\$KUBECONFIG is not set"
+ exit 1
+elif [[ $KUBECONFIG == *"ppd"* || $KUBECONFIG == *"prd"* || $KUBECONFIG == *"prod"* ]]
+then
+ echo "\$KUBECONFIG is not for a dev cluster"
+ exit 1
fi
#Install test services
@@ -103,4 +107,4 @@ for identity in stage.httpbin.foo-vs qal.httpbin.foo-vs; do
then
exit 1
fi
-done
\ No newline at end of file
+done
From e39c0ea80abfab7b5c3076c702efe3115e1b45b7 Mon Sep 17 00:00:00 2001
From: vinay-g
Date: Sun, 21 Jul 2024 00:42:26 -0700
Subject: [PATCH 082/235] Updating install_rollouts
Signed-off-by: vinay-g
---
install/scripts/install_rollouts.sh | 12 +++++++++++-
1 file changed, 11 insertions(+), 1 deletion(-)
diff --git a/install/scripts/install_rollouts.sh b/install/scripts/install_rollouts.sh
index 27f4c1a1..5bc142b5 100755
--- a/install/scripts/install_rollouts.sh
+++ b/install/scripts/install_rollouts.sh
@@ -1,7 +1,17 @@
#!/bin/bash
+if [ -z "$KUBECONFIG" ]
+then
+ echo "\$KUBECONFIG is not set"
+ exit 1
+elif [[ $KUBECONFIG == *"ppd"* || $KUBECONFIG == *"prd"* || $KUBECONFIG == *"prod"* ]]
+then
+ echo "\$KUBECONFIG is not for the dev cluster"
+ exit 1
+fi
+
echo "**********Installing rollouts *********"
kubectl create namespace argo-rollouts
kubectl apply -n argo-rollouts -f https://raw.githubusercontent.com/argoproj/argo-rollouts/stable/manifests/install.yaml
kubectl rollout status deployment argo-rollouts -n argo-rollouts
-echo "****Rollouts installed*******"
\ No newline at end of file
+echo "****Rollouts installed*******"
From e7cf251806c735a152989ec95d61deba2b0a0bd3 Mon Sep 17 00:00:00 2001
From: vinay-g
Date: Sun, 21 Jul 2024 00:42:59 -0700
Subject: [PATCH 083/235] Updating install_prometheus
Signed-off-by: vinay-g
---
install/scripts/install_prometheus.sh | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/install/scripts/install_prometheus.sh b/install/scripts/install_prometheus.sh
index a85bfffe..82b41e0a 100755
--- a/install/scripts/install_prometheus.sh
+++ b/install/scripts/install_prometheus.sh
@@ -7,9 +7,13 @@ install_dir=$1
#verify KUBECONFIG is set
if [ -z "$KUBECONFIG" ]
then
- echo "\$KUBECONFIG is not set"
- exit 1;
+ echo "\$KUBECONFIG is not set"
+ exit 1
+elif [[ $KUBECONFIG == *"ppd"* || $KUBECONFIG == *"prd"* || $KUBECONFIG == *"prod"* ]]
+then
+ echo "\$KUBECONFIG is not for a dev cluster"
+ exit 1
fi
kubectl delete svc prometheus -n istio-system
-kubectl apply -f $install_dir/yaml/prometheus.yaml
\ No newline at end of file
+kubectl apply -f $install_dir/yaml/prometheus.yaml
From 61b9c044f1a22de9992f71abe6a1e570c92dae5a Mon Sep 17 00:00:00 2001
From: vinay-g
Date: Sun, 21 Jul 2024 00:43:30 -0700
Subject: [PATCH 084/235] Updating install_admiral
Signed-off-by: vinay-g
---
install/scripts/install_admiral.sh | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/install/scripts/install_admiral.sh b/install/scripts/install_admiral.sh
index 1ff5857a..5a4c2ad0 100755
--- a/install/scripts/install_admiral.sh
+++ b/install/scripts/install_admiral.sh
@@ -7,8 +7,12 @@ install_dir=$1
#verify KUBECONFIG is set
if [ -z "$KUBECONFIG" ]
then
- echo "\$KUBECONFIG is not set"
- exit 1;
+ echo "\$KUBECONFIG is not set"
+ exit 1
+elif [[ $KUBECONFIG == *"ppd"* || $KUBECONFIG == *"prd"* || $KUBECONFIG == *"prod"* ]]
+then
+ echo "\$KUBECONFIG is not for a dev cluster"
+ exit 1
fi
#Install admiral
@@ -19,4 +23,4 @@ kubectl apply -f $install_dir/yaml/remotecluster.yaml
kubectl apply -f $install_dir/yaml/demosinglecluster.yaml
#Verify admiral is running
-kubectl rollout status deployment admiral -n admiral
\ No newline at end of file
+kubectl rollout status deployment admiral -n admiral
From 58873d37c34e147f79d059af16c9426a4c577230 Mon Sep 17 00:00:00 2001
From: vinay-g
Date: Sun, 21 Jul 2024 00:44:12 -0700
Subject: [PATCH 085/235] Updating dev_setup
Signed-off-by: vinay-g
---
install/scripts/dev_setup.sh | 12 ++++++++----
1 file changed, 8 insertions(+), 4 deletions(-)
diff --git a/install/scripts/dev_setup.sh b/install/scripts/dev_setup.sh
index f3955c4d..06191068 100755
--- a/install/scripts/dev_setup.sh
+++ b/install/scripts/dev_setup.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/bash +x
if [ -z "$ADMIRAL_HOME" ]
then
@@ -8,8 +8,12 @@ fi
if [ -z "$KUBECONFIG" ]
then
- echo "\$KUBECONFIG is not set"
- exit 1
+ echo "\$KUBECONFIG is not set"
+ exit 1
+elif [[ $KUBECONFIG == *"ppd"* || $KUBECONFIG == *"prd"* || $KUBECONFIG == *"prod"* ]]
+then
+ echo "\$KUBECONFIG is not for a dev cluster"
+ exit 1
fi
cd $ADMIRAL_HOME
@@ -26,4 +30,4 @@ $ADMIRAL_HOME/install/scripts/cluster-secret.sh $KUBECONFIG $KUBECONFIG admiral
kubectl apply -f $ADMIRAL_HOME/out/yaml/sample.yaml
-kubectl apply -f $ADMIRAL_HOME/out/yaml/sample_dep.yaml
\ No newline at end of file
+kubectl apply -f $ADMIRAL_HOME/out/yaml/sample_dep.yaml
From a4a2f180318501b5b0f06f4a0e7adea45a8c1c0c Mon Sep 17 00:00:00 2001
From: vinay-g
Date: Sun, 21 Jul 2024 00:44:44 -0700
Subject: [PATCH 086/235] Updating cleanup
Signed-off-by: vinay-g
---
install/scripts/cleanup.sh | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/install/scripts/cleanup.sh b/install/scripts/cleanup.sh
index 083bd4c0..8c58b8cf 100755
--- a/install/scripts/cleanup.sh
+++ b/install/scripts/cleanup.sh
@@ -2,6 +2,12 @@
while true; do
clustername=$(kubectl config current-context)
+
+ if [[ $clustername == *"ppd"* || $clustername == *"prd"* || $clustername == *"prod"* ]]
+ then
+ echo "\$clustername is not a dev cluster"
+ exit 1
+ fi
printf "k8s cluster: %s\n" "$clustername"
printf "Namespaces ['admiral','admiral-sync','sample', 'sample-rollout-canary', 'sample-rollout-bluegreen'] will be deleted.\nDo you wish to proceed?\n"
options="Please enter yes/Y/y or no/N/n"
From 74bc7fc7e85aac7d37be297628125c94b38f9f08 Mon Sep 17 00:00:00 2001
From: vinay-g
Date: Sun, 21 Jul 2024 00:45:35 -0700
Subject: [PATCH 087/235] Updating cluster-secret
Signed-off-by: vinay-g
---
install/scripts/cluster-secret.sh | 18 ++++++++++++++++++
1 file changed, 18 insertions(+)
diff --git a/install/scripts/cluster-secret.sh b/install/scripts/cluster-secret.sh
index 90cd9237..f3d0bae8 100755
--- a/install/scripts/cluster-secret.sh
+++ b/install/scripts/cluster-secret.sh
@@ -16,6 +16,15 @@ fi
#TBD make sure you have context switched
export KUBECONFIG=$remote_cluster
+if [ -z "$KUBECONFIG" ]
+then
+ echo "\$KUBECONFIG is not set"
+ exit 1
+elif [[ $KUBECONFIG == *"ppd"* || $KUBECONFIG == *"prd"* || $KUBECONFIG == *"prod"* ]]
+then
+ echo "\$KUBECONFIG is not for a dev cluster"
+ exit 1
+fi
#prep for creating kubeconfig of remote cluster
export WORK_DIR=$(pwd)
@@ -68,6 +77,15 @@ source remote_cluster_env_vars
#export KUBECONFIG=~/.kube/config
#kubectx minikube
export KUBECONFIG=$local_cluster
+if [ -z "$KUBECONFIG" ]
+then
+ echo "\$KUBECONFIG is not set"
+ exit 1
+elif [[ $KUBECONFIG == *"ppd"* || $KUBECONFIG == *"prd"* || $KUBECONFIG == *"prod"* ]]
+then
+ echo "\$KUBECONFIG is not for a dev cluster"
+ exit 1
+fi
kubectl delete secret ${CLUSTER_NAME} -n $namespace_secrets
kubectl create secret generic ${CLUSTER_NAME} --from-file ${KUBECFG_FILE} -n $namespace_secrets
From a9be20e3caf8b76c27ef8a858d0e8a0f3ad202b6 Mon Sep 17 00:00:00 2001
From: vinay-g
Date: Sun, 21 Jul 2024 00:46:09 -0700
Subject: [PATCH 088/235] Updating greeting.yaml
Signed-off-by: vinay-g
---
install/sample/overlays/rollout-canary/greeting.yaml | 1 -
1 file changed, 1 deletion(-)
diff --git a/install/sample/overlays/rollout-canary/greeting.yaml b/install/sample/overlays/rollout-canary/greeting.yaml
index cf9059fd..ab8987fb 100644
--- a/install/sample/overlays/rollout-canary/greeting.yaml
+++ b/install/sample/overlays/rollout-canary/greeting.yaml
@@ -20,7 +20,6 @@ spec:
spec:
containers:
- image: nginx
- imagePullPolicy: IfNotPresent
name: greeting
ports:
- containerPort: 80
From 89930d9dc6a635461d28a239337ba493d7fe5f77 Mon Sep 17 00:00:00 2001
From: vinay-g
Date: Sun, 21 Jul 2024 00:46:36 -0700
Subject: [PATCH 089/235] Updating greeting adding IfNotPresent
Signed-off-by: vinay-g
---
install/sample/overlays/rollout-bluegreen/greeting.yaml | 1 -
1 file changed, 1 deletion(-)
diff --git a/install/sample/overlays/rollout-bluegreen/greeting.yaml b/install/sample/overlays/rollout-bluegreen/greeting.yaml
index 19622bd0..4a340c2b 100644
--- a/install/sample/overlays/rollout-bluegreen/greeting.yaml
+++ b/install/sample/overlays/rollout-bluegreen/greeting.yaml
@@ -21,7 +21,6 @@ spec:
spec:
containers:
- image: nginx
- imagePullPolicy: IfNotPresent
name: greeting
ports:
- containerPort: 80
From 6c97696eb6b33c292416e116272c4357b72886bd Mon Sep 17 00:00:00 2001
From: vinay-g
Date: Sun, 21 Jul 2024 00:50:25 -0700
Subject: [PATCH 090/235] Updating grpc-server adding IfNotPresent
Signed-off-by: vinay-g
---
install/sample/overlays/grpc/grpc-server.yaml | 1 -
1 file changed, 1 deletion(-)
diff --git a/install/sample/overlays/grpc/grpc-server.yaml b/install/sample/overlays/grpc/grpc-server.yaml
index cad92273..1a98fef1 100644
--- a/install/sample/overlays/grpc/grpc-server.yaml
+++ b/install/sample/overlays/grpc/grpc-server.yaml
@@ -20,7 +20,6 @@ spec:
containers:
- name: grpc-server
image: buoyantio/strest-grpc:latest
- imagePullPolicy: IfNotPresent
args:
- "server"
- "--address=:11111"
From 46a721d8839df78568f16b89b0dabe8760fe5825 Mon Sep 17 00:00:00 2001
From: vinay-g
Date: Sun, 21 Jul 2024 00:50:51 -0700
Subject: [PATCH 091/235] Updating greeting.yaml adding IfNotPresent
Signed-off-by: vinay-g
---
install/sample/overlays/deployment/greeting.yaml | 1 -
1 file changed, 1 deletion(-)
diff --git a/install/sample/overlays/deployment/greeting.yaml b/install/sample/overlays/deployment/greeting.yaml
index 2a146fb1..2c8593d9 100644
--- a/install/sample/overlays/deployment/greeting.yaml
+++ b/install/sample/overlays/deployment/greeting.yaml
@@ -22,7 +22,6 @@ spec:
spec:
containers:
- image: nginx
- imagePullPolicy: IfNotPresent
name: greeting
ports:
- containerPort: 80
From 89bd982b7bd479755ff1a49abb025fe807454902 Mon Sep 17 00:00:00 2001
From: vinay-g
Date: Sun, 21 Jul 2024 00:51:52 -0700
Subject: [PATCH 092/235] Adding outlier detection sample
Signed-off-by: vinay-g
---
install/sample/od.yaml | 15 +++++++++++++++
1 file changed, 15 insertions(+)
create mode 100644 install/sample/od.yaml
diff --git a/install/sample/od.yaml b/install/sample/od.yaml
new file mode 100644
index 00000000..1d260f45
--- /dev/null
+++ b/install/sample/od.yaml
@@ -0,0 +1,15 @@
+apiVersion: admiral.io/v1alpha1
+kind: OutlierDetection
+metadata:
+ name: sample-configuration
+ namespace: sample
+ annotations:
+ admiral.io/env: stage
+spec:
+ selector:
+ identity: greeting
+ env: stage
+ outlier_config: #TODO : How to disable it
+ base_ejection_time: 180
+ consecutive_gateway_errors: 10
+ interval: 28
From 3b5b78b3d4ee3dae246ce0f0447c7c45c008f0e0 Mon Sep 17 00:00:00 2001
From: vinay-g
Date: Sun, 21 Jul 2024 00:53:52 -0700
Subject: [PATCH 093/235] Adding integration render template
Signed-off-by: vinay-g
---
integration/render_template.go | 47 ++++++++++++++++++++++++++++++++++
1 file changed, 47 insertions(+)
create mode 100644 integration/render_template.go
diff --git a/integration/render_template.go b/integration/render_template.go
new file mode 100644
index 00000000..b225916f
--- /dev/null
+++ b/integration/render_template.go
@@ -0,0 +1,47 @@
+package main
+
+import (
+ "fmt"
+ "os"
+ "text/template"
+)
+
+type yamlInputs struct {
+ BuildImage string
+ AdmiralLabel string
+ SecretFilterTag string
+}
+
+func main() {
+
+ yaml := yamlInputs{
+ BuildImage: os.Getenv("ADMIRAL_BUILD_IMAGE"),
+ AdmiralLabel: os.Getenv("ADMIRAL_LABEL"),
+ SecretFilterTag: os.Getenv("SECRET_FILTER_TAG"),
+ }
+
+ fmt.Println("Rendering template with the following inputs:")
+ fmt.Println("BuildImage: ", yaml.BuildImage)
+ fmt.Println("AdmiralLabel: ", yaml.AdmiralLabel)
+ fmt.Println("SecretFilterTag: ", yaml.SecretFilterTag)
+
+ // Create the file
+ f, err := os.OpenFile("admiral_rendered_deployment.yaml", os.O_WRONLY|os.O_CREATE, 0600)
+ if err != nil {
+ panic(err)
+ }
+ defer f.Close()
+
+ // Render the template
+ var tmplFile = "admiral_deployment.tmpl"
+ tmpl, err := template.New(tmplFile).ParseFiles(tmplFile)
+ if err != nil {
+ panic(err)
+ }
+
+ // Execute the template to the file
+ err = tmpl.Execute(f, yaml)
+ if err != nil {
+ panic(err)
+ }
+}
From e6726af26ccbaea982c38e3f7a8fd90524c1613f Mon Sep 17 00:00:00 2001
From: vinay-g
Date: Sun, 21 Jul 2024 00:54:23 -0700
Subject: [PATCH 094/235] Adding deployment template to integration
Signed-off-by: vinay-g
---
integration/admiral_deployment.tmpl | 261 ++++++++++++++++++++++++++++
1 file changed, 261 insertions(+)
create mode 100644 integration/admiral_deployment.tmpl
diff --git a/integration/admiral_deployment.tmpl b/integration/admiral_deployment.tmpl
new file mode 100644
index 00000000..0c7803d3
--- /dev/null
+++ b/integration/admiral_deployment.tmpl
@@ -0,0 +1,261 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: pr-build-{{ .AdmiralLabel }}
+ applications.argoproj.io/app-name: services-admiral-usw2-qal
+ assetId: "8287766806579881856"
+ buildType: maven
+ l1: services
+ l2: mesh
+ name: pr-build-{{ .AdmiralLabel }}
+ namespace: services-admiral-usw2-qal
+spec:
+ progressDeadlineSeconds: 600
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ app: pr-build-{{ .AdmiralLabel }}
+ template:
+ metadata:
+ annotations:
+ iam.amazonaws.com/role: arn:aws:iam::857109464775:role/k8s-services-admiral-usw2-qal
+ labels:
+ app: pr-build-{{ .AdmiralLabel }}
+ applications.argoproj.io/app-name: services-admiral-usw2-qal
+ assetId: "8287766806579881856"
+ l1: services
+ l2: mesh
+ splunk-index: iks
+ spec:
+ containers:
+ - args:
+ - --dependency_namespace
+ - services-admiral-usw2-qal
+ - --secret_namespace
+ - services-admiral-usw2-qal
+ - --sync_namespace
+ - admiral-sync
+ - --secret_filter_tags
+ - {{ .SecretFilterTag }}
+ - --san_prefix
+ - pre-prod.api.intuit.com
+ - --secret_resolver
+ - intuit
+ - --secret_resolver_config_path
+ - /etc/admiral/config.yaml
+ - --hostname_suffix
+ - mesh
+ - --workload_identity_key
+ - alpha.istio.io/identity
+ - --admiral_crd_identity_label
+ - assetAlias
+ - --workload_sidecar_update
+ - enabled
+ - --workload_sidecar_name
+ - default
+ - --argo_rollouts=true
+ - --enable_san=true
+ - --sync_period
+ - 60s
+ - --log_level
+ - "5"
+ - --admiral_state_checker_name
+ - dynamodbbasedstatechecker
+ - --dr_state_store_config_path
+ - /etc/admiral/config.yaml
+ - --envoy_filter_version
+ - 1.13,1.17
+ - --enable_routing_policy=true
+ - --envoy_filter_additional_config
+ - 'dynamicRouterHost: internal.intuit.services.mesh.dynamicroutingservice.mesh'
+ - --additional_endpoint_suffixes
+ - intuit
+ - --additional_endpoint_label_filters
+ - iks.intuit.com/express-version
+ - --enable_workload_data_storage
+ - --enable_dependency_processing=true
+ - --se_ip_prefix
+ - "244.0"
+ - --se_address_configmap
+ - se-address-configmap-qal
+ - --max_requests_per_connection=5
+ - --disable_default_automatic_failover=true
+ - --log_to_file=true
+ - --log_file_path=/app/logs/admiral.log
+ - --enable_serviceentry_cache=true
+ - --enable_destinationrule_cache=true
+ - --enable_absolute_fqdn=true
+ - --alpha_identity_list=*
+ - --enable_absolute_fqdn_for_local_endpoints=true
+ - --enable_active_passive=true
+ - --disable_ip_generation=true
+ - --enable_client_connection_config_processing=true
+ - --enable_sw_aware_ns_caches=true
+ image: {{ .BuildImage }}
+ imagePullPolicy: IfNotPresent
+ name: admiral
+ ports:
+ - containerPort: 8082
+ name: debug
+ protocol: TCP
+ - containerPort: 8080
+ name: admiral
+ protocol: TCP
+ - containerPort: 6900
+ name: admiral-metrics
+ protocol: TCP
+ resources:
+ limits:
+ cpu: "4"
+ memory: 12288M
+ requests:
+ cpu: "2"
+ memory: 10240M
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ volumeMounts:
+ - mountPath: /etc/admiral
+ name: admiral-config-vol
+ - mountPath: /app/logs/
+ name: logdir
+ - env:
+ - name: SPLUNK_SECRET_PATH
+ value: /etc/secrets/splunk
+ - name: SERVICE_LOG_CONTAINER_NAME
+ value: admiral
+ image: docker.intuit.com/cloud/logging/k8ssplunkforwarder/service/base_splunk_forwarder:9.0.16
+ imagePullPolicy: IfNotPresent
+ name: splunk-forwarder
+ resources:
+ requests:
+ cpu: 300m
+ memory: 300M
+ startupProbe:
+ exec:
+ command:
+ - /opt/splunkforwarder/health.sh
+ failureThreshold: 30
+ initialDelaySeconds: 2
+ periodSeconds: 2
+ successThreshold: 1
+ timeoutSeconds: 1
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ volumeMounts:
+ - mountPath: /app/logs/
+ name: logdir
+ - mountPath: /etc/secrets/splunk
+ name: splunk-secrets
+ - mountPath: /etc/podinfo
+ name: podinfo
+ - mountPath: /etc/splunk
+ name: splunk-indexers-volume
+ - mountPath: /opt/splunkforwarder/etc/system/local/inputs.conf
+ name: splunk-inputs-conf
+ subPath: splunk.conf
+ - args:
+ - /usr/bin/envoy
+ - -c
+ - /etc/envoy/envoy.yaml
+ - --log-level
+ - info
+ command:
+ - sudo
+ image: docker.intuit.com/oicp/standard/envoy/debian11-envoy1:1.0.31
+ imagePullPolicy: IfNotPresent
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /health/ready
+ port: 443
+ scheme: HTTPS
+ initialDelaySeconds: 50
+ periodSeconds: 5
+ successThreshold: 1
+ timeoutSeconds: 1
+ name: envoy
+ ports:
+ - containerPort: 443
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /health/ready
+ port: 443
+ scheme: HTTPS
+ initialDelaySeconds: 50
+ periodSeconds: 5
+ successThreshold: 3
+ timeoutSeconds: 1
+ resources: {}
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ volumeMounts:
+ - mountPath: /etc/envoy
+ name: envoy-config-vol
+ - mountPath: /etc/envoy/ssl
+ name: envoy-tls-cert-vol
+ dnsPolicy: ClusterFirst
+ initContainers:
+ - env:
+ - name: MYSTIKO_CONFIG
+ value: /mystiko/config.yaml
+ image: docker.intuit.com/intgctls-platctls/mystiko-cli/service/mystiko:1.4.0
+ imagePullPolicy: IfNotPresent
+ name: mystiko-splunk-certs
+ resources: {}
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ volumeMounts:
+ - mountPath: /etc/secrets/splunk
+ name: splunk-secrets
+ - mountPath: /mystiko
+ name: mystiko-config
+ nodeSelector:
+ node.kubernetes.io/instancegroup: services-admiral-usw2-qal-default
+ restartPolicy: Always
+ schedulerName: default-scheduler
+ securityContext: {}
+ serviceAccount: admiral
+ serviceAccountName: admiral
+ terminationGracePeriodSeconds: 30
+ volumes:
+ - configMap:
+ defaultMode: 420
+ name: admiral-config-configmap
+ name: admiral-config-vol
+ - configMap:
+ defaultMode: 420
+ name: envoy-config-configmap
+ name: envoy-config-vol
+ - name: envoy-tls-cert-vol
+ secret:
+ defaultMode: 438
+ secretName: admiral-envoy-tls-cert
+ - configMap:
+ defaultMode: 420
+ name: mystiko-config
+ name: mystiko-config
+ - emptyDir: {}
+ name: logdir
+ - configMap:
+ defaultMode: 420
+ name: splunk-inputs-conf
+ name: splunk-inputs-conf
+ - configMap:
+ defaultMode: 420
+ name: splunk-indexers
+ name: splunk-indexers-volume
+ - emptyDir:
+ medium: Memory
+ name: splunk-secrets
+ - downwardAPI:
+ defaultMode: 420
+ items:
+ - fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.labels
+ path: labels
+ name: podinfo
From 2b6417363e5969375f1cb3f0da1228a2ce4e0152 Mon Sep 17 00:00:00 2001
From: vinay-g
Date: Sun, 21 Jul 2024 00:54:47 -0700
Subject: [PATCH 095/235] Adding integration go mod
Signed-off-by: vinay-g
---
integration/go.mod | 3 +++
1 file changed, 3 insertions(+)
create mode 100644 integration/go.mod
diff --git a/integration/go.mod b/integration/go.mod
new file mode 100644
index 00000000..20a79f0d
--- /dev/null
+++ b/integration/go.mod
@@ -0,0 +1,3 @@
+module integration
+
+go 1.18
From 8e92f585b7e12760efc3306c035b0b227258d164 Mon Sep 17 00:00:00 2001
From: vinay-g
Date: Sun, 21 Jul 2024 00:57:30 -0700
Subject: [PATCH 096/235] Adding traffic config and od
Signed-off-by: vinay-g
---
install/admiralremote/base/remote.yaml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/install/admiralremote/base/remote.yaml b/install/admiralremote/base/remote.yaml
index 94a85223..eb233207 100644
--- a/install/admiralremote/base/remote.yaml
+++ b/install/admiralremote/base/remote.yaml
@@ -24,7 +24,7 @@ rules:
resources: ['virtualservices', 'destinationrules', 'serviceentries', 'envoyfilters' ,'gateways', 'sidecars']
verbs: [ "get", "list", "watch"]
- apiGroups: ["admiral.io"]
- resources: ['globaltrafficpolicies', 'routingpolicies']
+ resources: ['globaltrafficpolicies', 'routingpolicies',"trafficonfigs", "outlierdetections"]
verbs: [ "get", "list", "watch"]
- apiGroups: ["argoproj.io"]
resources: ['rollouts']
From ea6d5ea78ed72b982d0f8ae64c8ef1e8e4911d7b Mon Sep 17 00:00:00 2001
From: vinay-g
Date: Sun, 21 Jul 2024 01:04:40 -0700
Subject: [PATCH 097/235] Adding alpha1
Signed-off-by: vinay-g
---
hack/update-codegen.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh
index ec94ca26..db30c408 100755
--- a/hack/update-codegen.sh
+++ b/hack/update-codegen.sh
@@ -30,7 +30,7 @@ chmod +x ${CODEGEN_PKG}/generate-groups.sh ##If you get a `no file or directory`
${CODEGEN_PKG}/generate-groups.sh all \
github.com/istio-ecosystem/admiral/admiral/pkg/client github.com/istio-ecosystem/admiral/admiral/pkg/apis \
- "admiral:v1" \
+ "admiral:v1alpha1" \
--output-base "${TEMP_DIR}" \
--go-header-file ${SCRIPT_ROOT}/hack/boilerplate.go.txt
From 3f14e064fdffd93232af4691ec15b534dbf59ae7 Mon Sep 17 00:00:00 2001
From: vinay-g
Date: Sun, 21 Jul 2024 01:11:02 -0700
Subject: [PATCH 098/235] Adding outlier crd
Signed-off-by: vinay-g
---
install/admiralremote/base/crds.yaml | 82 +++++++++++++++++++++++++++-
1 file changed, 81 insertions(+), 1 deletion(-)
diff --git a/install/admiralremote/base/crds.yaml b/install/admiralremote/base/crds.yaml
index a10512d5..fc69b8d7 100644
--- a/install/admiralremote/base/crds.yaml
+++ b/install/admiralremote/base/crds.yaml
@@ -96,4 +96,84 @@ spec:
- spec
type: object
served: true
- storage: true
\ No newline at end of file
+ storage: true
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: outlierdetections.admiral.io
+spec:
+ group: admiral.io
+ names:
+ kind: OutlierDetection
+ listKind: OutlierDetectionList
+ plural: outlierdetections
+ singular: outlierdetection
+ shortNames:
+ - od
+ - ods
+ scope: Namespaced
+ versions:
+ - name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ description: generic cdr object to wrap the OutlierDetection api
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ outlier_config:
+ description: 'REQUIRED: base outlier configuration.'
+ properties:
+ base_ejection_time:
+ description: 'REQUIRED: Minimum duration of time in seconds, the
+ endpoint will be ejected'
+ format: int64
+ type: integer
+ consecutive_gateway_errors:
+ description: 'REQUIRED: No. of consecutive failures in specified
+ interval after which the endpoint will be ejected'
+ format: int32
+ type: integer
+ interval:
+ description: 'REQUIRED: Time interval between ejection sweep analysis'
+ format: int64
+ type: integer
+ type: object
+ selector:
+ additionalProperties:
+ type: string
+ description: 'REQUIRED: One or more labels that indicate a specific
+ set of pods/VMs on which this outlier configuration should be applied.
+ The scope of label search is restricted to namespace mark for mesh
+ enablement this will scan all cluster and namespace'
+ type: object
+ type: object
+ status:
+ properties:
+ clustersSynced:
+ format: int32
+ type: integer
+ state:
+ type: string
+ required:
+ - clustersSynced
+ - state
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
\ No newline at end of file
From 0d16d7bdc16eec4e352a4fddd3fe8c8a7c9069d8 Mon Sep 17 00:00:00 2001
From: vinay-g
Date: Sun, 21 Jul 2024 01:13:10 -0700
Subject: [PATCH 099/235] Adding outlier crd
Signed-off-by: vinay-g
---
install/admiral/base/crds.yaml | 82 +++++++++++++++++++++++++++++++++-
1 file changed, 81 insertions(+), 1 deletion(-)
diff --git a/install/admiral/base/crds.yaml b/install/admiral/base/crds.yaml
index 615f53e8..973436c6 100644
--- a/install/admiral/base/crds.yaml
+++ b/install/admiral/base/crds.yaml
@@ -186,4 +186,84 @@ spec:
- spec
type: object
served: true
- storage: true
\ No newline at end of file
+ storage: true
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: outlierdetections.admiral.io
+spec:
+ group: admiral.io
+ names:
+ kind: OutlierDetection
+ listKind: OutlierDetectionList
+ plural: outlierdetections
+ singular: outlierdetection
+ shortNames:
+ - od
+ - ods
+ scope: Namespaced
+ versions:
+ - name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ description: generic cdr object to wrap the OutlierDetection api
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ outlier_config:
+ description: 'REQUIRED: base outlier configuration.'
+ properties:
+ base_ejection_time:
+ description: 'REQUIRED: Minimum duration of time in seconds, the
+ endpoint will be ejected'
+ format: int64
+ type: integer
+ consecutive_gateway_errors:
+ description: 'REQUIRED: No. of consecutive failures in specified
+ interval after which the endpoint will be ejected'
+ format: int32
+ type: integer
+ interval:
+ description: 'REQUIRED: Time interval between ejection sweep analysis'
+ format: int64
+ type: integer
+ type: object
+ selector:
+ additionalProperties:
+ type: string
+ description: 'REQUIRED: One or more labels that indicate a specific
+ set of pods/VMs on which this outlier configuration should be applied.
+ The scope of label search is restricted to namespace mark for mesh
+ enablement this will scan all cluster and namespace'
+ type: object
+ type: object
+ status:
+ properties:
+ clustersSynced:
+ format: int32
+ type: integer
+ state:
+ type: string
+ required:
+ - clustersSynced
+ - state
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
\ No newline at end of file
From a8cedc81a97794fd1646934f1121375bf2fb70d3 Mon Sep 17 00:00:00 2001
From: vinay-g
Date: Sun, 21 Jul 2024 01:14:30 -0700
Subject: [PATCH 100/235] Adding dependencyproxies to roles
Signed-off-by: vinay-g
---
install/admiral/base/roles.yaml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/install/admiral/base/roles.yaml b/install/admiral/base/roles.yaml
index 369141e7..1fba38d4 100644
--- a/install/admiral/base/roles.yaml
+++ b/install/admiral/base/roles.yaml
@@ -7,7 +7,7 @@ metadata:
namespace: admiral
rules:
- apiGroups: ["admiral.io"]
- resources: ["dependencies"]
+ resources: ["dependencies","dependencyproxies"]
verbs: ["get", "list", "watch"]
---
From 3248cdf9d380c9f7c5ef0e6b35b986ab34a2c3a1 Mon Sep 17 00:00:00 2001
From: vinay-g
Date: Sun, 21 Jul 2024 01:14:58 -0700
Subject: [PATCH 101/235] go.mod
Signed-off-by: vinay-g
---
go.mod | 90 +++++++++++++++++++++++++++++++++++++++++++++-------------
1 file changed, 70 insertions(+), 20 deletions(-)
diff --git a/go.mod b/go.mod
index fff863b3..f2a3a594 100644
--- a/go.mod
+++ b/go.mod
@@ -5,13 +5,13 @@ go 1.21
require (
github.com/argoproj/argo-rollouts v1.2.1
github.com/cenkalti/backoff v2.2.1+incompatible
- github.com/go-openapi/swag v0.19.15 // indirect
+ github.com/go-openapi/swag v0.22.3 // indirect
github.com/golang/protobuf v1.5.3
github.com/google/go-cmp v0.6.0
github.com/gorilla/mux v1.8.0
github.com/imdario/mergo v0.3.12 // indirect
- github.com/mailru/easyjson v0.7.6 // indirect
- github.com/onsi/gomega v1.19.0
+ github.com/mailru/easyjson v0.7.7 // indirect
+ github.com/onsi/gomega v1.30.0
github.com/prometheus/client_golang v1.19.1
github.com/prometheus/client_model v0.6.1
github.com/sirupsen/logrus v1.8.1
@@ -19,74 +19,111 @@ require (
github.com/stretchr/testify v1.9.0
golang.org/x/net v0.20.0 // indirect
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
- google.golang.org/genproto v0.0.0-20220531134929-86cf59382f1b // indirect
gopkg.in/yaml.v2 v2.4.0
- istio.io/api v0.0.0-20220621155648-3e39d064ab6b
+ istio.io/api v1.19.6
istio.io/client-go v1.14.0
- k8s.io/api v0.24.2
- k8s.io/apimachinery v0.24.2
+ k8s.io/api v0.28.0
+ k8s.io/apimachinery v0.28.0
k8s.io/client-go v0.24.2
sigs.k8s.io/yaml v1.3.0 // indirect
)
require (
+ github.com/aws/aws-sdk-go v1.44.105
+ github.com/golang/glog v1.1.0
+ github.com/jamiealquiza/tachymeter v2.0.0+incompatible
+ github.com/jedib0t/go-pretty/v6 v6.5.3
github.com/prometheus/common v0.53.0
+ github.intuit.com/idps/idps-go-sdk/v3 v3.9909.0
go.opentelemetry.io/otel v1.27.0
go.opentelemetry.io/otel/exporters/prometheus v0.49.0
go.opentelemetry.io/otel/metric v1.27.0
go.opentelemetry.io/otel/sdk/metric v1.27.0
google.golang.org/protobuf v1.34.1
+ gopkg.in/natefinch/lumberjack.v2 v2.2.1
)
require (
+ cloud.google.com/go/compute/metadata v0.2.3 // indirect
github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
+ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
+ github.com/google/pprof v0.0.0-20211214055906-6f57359322fd // indirect
+ github.com/google/s2a-go v0.1.4 // indirect
+ github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect
+ github.com/mattn/go-runewidth v0.0.15 // indirect
+ github.com/rivo/uniseg v0.2.0 // indirect
+ github.com/rogpeppe/go-internal v1.12.0 // indirect
go.opentelemetry.io/otel/sdk v1.27.0 // indirect
go.opentelemetry.io/otel/trace v1.27.0 // indirect
+ golang.org/x/tools v0.14.0 // indirect
+ google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20230920204549-e6e6cdab5c13 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c // indirect
)
require (
+ cloud.google.com/go/compute v1.23.0 // indirect
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/emicklei/go-restful/v3 v3.8.0 // indirect
+ github.com/emicklei/go-restful/v3 v3.10.1 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
- github.com/fsnotify/fsnotify v1.5.1 // indirect
+ github.com/fsnotify/fsnotify v1.5.4 // indirect
+ github.com/go-co-op/gocron v1.13.0 // indirect
github.com/go-logr/logr v1.4.1 // indirect
- github.com/go-openapi/jsonpointer v0.19.5 // indirect
- github.com/go-openapi/jsonreference v0.20.0 // indirect
+ github.com/go-openapi/jsonpointer v0.19.6 // indirect
+ github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
- github.com/google/gnostic v0.5.7-v3refs // indirect
- github.com/google/gofuzz v1.1.0 // indirect
- github.com/google/uuid v1.1.2 // indirect
+ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
+ github.com/google/gnostic v0.6.9 // indirect
+ github.com/google/gofuzz v1.2.0 // indirect
+ github.com/google/uuid v1.3.0
github.com/inconshreveable/mousetrap v1.0.0 // indirect
+ github.com/intuit/funnel v1.0.0 // indirect
+ github.com/jacobsa/crypto v0.0.0-20190317225127-9f44e2d11115 // indirect
+ github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/matryer/resync v0.0.0-20161211202428-d39c09a11215
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
+ github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
- github.com/onsi/ginkgo v1.16.5 // indirect
+ github.com/onsi/ginkgo/v2 v2.13.2
+ github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/procfs v0.15.0 // indirect
+ github.com/robfig/cron/v3 v3.0.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
+ github.com/stretchr/objx v0.5.2 // indirect
+ github.com/tevino/abool v1.2.0 // indirect
+ github.com/ugorji/go/codec v1.2.7 // indirect
+ github.intuit.com/idps/device-grant-flow/go/dgfsdk v0.0.0-20220428022612-cf054cda65f7 // indirect
+ github.intuit.com/idps/idps-go-commons/v3 v3.4.4 // indirect
+ github.intuit.com/idps/idps-go-swagger-clients v1.8.1 // indirect
+ go.opencensus.io v0.24.0 // indirect
+ golang.org/x/crypto v0.18.0 // indirect
golang.org/x/oauth2 v0.16.0 // indirect
+ golang.org/x/sync v0.7.0 // indirect
golang.org/x/sys v0.20.0 // indirect
golang.org/x/term v0.16.0 // indirect
golang.org/x/text v0.14.0 // indirect
+ google.golang.org/api v0.126.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
+ google.golang.org/grpc v1.57.0 // indirect
gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
- k8s.io/klog/v2 v2.60.1 // indirect
- k8s.io/kube-openapi v0.0.0-20220621154418-c39d0f63fac8 // indirect
- k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect
- sigs.k8s.io/json v0.0.0-20220525155127-227cbc7cc124 // indirect
- sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
+ k8s.io/klog/v2 v2.100.1 // indirect
+ k8s.io/kube-openapi v0.0.0-20230210211930-4b0756abdef5 // indirect
+ k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect
+ sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
+ sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
)
replace (
@@ -120,3 +157,16 @@ replace (
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.24.2
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.24.2
)
+
+exclude (
+ github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153
+ github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633
+ github.com/sassoftware/go-rpmutils v0.0.0-20190420191620-a8f1baeba37b
+ golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3
+ golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8
+ golang.org/x/net v0.0.0-20180724234803-3673e40ba225
+ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e
+ golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c
+ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c
+ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
+)
From a679b859cf5531ef9274d404f6936bdea85ec592 Mon Sep 17 00:00:00 2001
From: vinay-g
Date: Sun, 21 Jul 2024 01:15:16 -0700
Subject: [PATCH 102/235] go.sum
Signed-off-by: vinay-g
---
go.sum | 326 +++++++++++++++++++++++++++++++++++++++++++++++++--------
1 file changed, 282 insertions(+), 44 deletions(-)
diff --git a/go.sum b/go.sum
index f28809fe..bc4f9607 100644
--- a/go.sum
+++ b/go.sum
@@ -18,12 +18,30 @@ cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmW
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
+cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
+cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
+cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
+cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
+cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
+cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
+cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
+cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
+cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
+cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
+cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
+cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s=
+cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
+cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY=
+cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
+cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
+cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
@@ -46,6 +64,7 @@ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBp
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
@@ -57,11 +76,16 @@ github.com/argoproj/argo-rollouts v1.2.1 h1:4hSgKEqpQsZreZBv+XcLsB+oBaRGMVW19nMS
github.com/argoproj/argo-rollouts v1.2.1/go.mod h1:ETmWr9Lysxr9SgbqalMMBdytBcDHUt9qulFoKJ9b9ZU=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
+github.com/aws/aws-sdk-go v1.44.2/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
+github.com/aws/aws-sdk-go v1.44.105 h1:UUwoD1PRKIj3ltrDUYTDQj5fOTK3XsnqolLpRTMmSEM=
+github.com/aws/aws-sdk-go v1.44.105/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@@ -75,6 +99,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
@@ -83,27 +109,34 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug=
+github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
-github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
-github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw=
-github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ=
+github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
+github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-co-op/gocron v1.13.0 h1:BjkuNImPy5NuIPEifhWItFG7pYyr27cyjS6BN9w/D4c=
+github.com/go-co-op/gocron v1.13.0/go.mod h1:GD5EIEly1YNW+LovFVx5dzbYVcIc8544K99D8UVRpGo=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@@ -118,24 +151,29 @@ github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ4
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
+github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
-github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA=
-github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=
+github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
+github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
-github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM=
-github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
+github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
+github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=
+github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
@@ -145,6 +183,7 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -164,11 +203,13 @@ github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
-github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54=
github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
+github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0=
+github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -181,14 +222,17 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
+github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
@@ -200,11 +244,27 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20211214055906-6f57359322fd h1:1FjCyPC+syAzJ5/2S8fqdZK1R22vvA0J7JZKcuOIQ7Y=
+github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
+github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc=
+github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
+github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k=
+github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
+github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
+github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=
+github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
+github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4=
+github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
@@ -215,11 +275,32 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/intuit/funnel v1.0.0 h1:DL7tQjXpRXmTb6C/xU2Hn9hcHh7/VnHC0+vep4e3P7E=
+github.com/intuit/funnel v1.0.0/go.mod h1:mDE1DfyEnFN29i8pcDDjNvVRKiZU+/N3YCuEl3CGQEU=
+github.com/jacobsa/crypto v0.0.0-20190317225127-9f44e2d11115 h1:YuDUUFNM21CAbyPOpOP8BicaTD/0klJEKt5p8yuw+uY=
+github.com/jacobsa/crypto v0.0.0-20190317225127-9f44e2d11115/go.mod h1:LadVJg0XuawGk+8L1rYnIED8451UyNxEMdTWCEt5kmU=
+github.com/jacobsa/oglematchers v0.0.0-20150720000706-141901ea67cd h1:9GCSedGjMcLZCrusBZuo4tyKLpKUPenUUqi34AkuFmA=
+github.com/jacobsa/oglematchers v0.0.0-20150720000706-141901ea67cd/go.mod h1:TlmyIZDpGmwRoTWiakdr+HA1Tukze6C6XbRVidYq02M=
+github.com/jacobsa/oglemock v0.0.0-20150831005832-e94d794d06ff h1:2xRHTvkpJ5zJmglXLRqHiZQNjUoOkhUyhTAhEQvPAWw=
+github.com/jacobsa/oglemock v0.0.0-20150831005832-e94d794d06ff/go.mod h1:gJWba/XXGl0UoOmBQKRWCJdHrr3nE0T65t6ioaj3mLI=
+github.com/jacobsa/ogletest v0.0.0-20170503003838-80d50a735a11 h1:BMb8s3ENQLt5ulwVIHVDWFHp8eIXmbfSExkvdn9qMXI=
+github.com/jacobsa/ogletest v0.0.0-20170503003838-80d50a735a11/go.mod h1:+DBdDyfoO2McrOyDemRBq0q9CMEByef7sYl7JH5Q3BI=
+github.com/jacobsa/reqtrace v0.0.0-20150505043853-245c9e0234cb h1:uSWBjJdMf47kQlXMwWEfmc864bA1wAC+Kl3ApryuG9Y=
+github.com/jacobsa/reqtrace v0.0.0-20150505043853-245c9e0234cb/go.mod h1:ivcmUvxXWjb27NsPEaiYK7AidlZXS7oQ5PowUS9z3I4=
+github.com/jamiealquiza/tachymeter v2.0.0+incompatible h1:mGiF1DGo8l6vnGT8FXNNcIXht/YmjzfraiUprXYwJ6g=
+github.com/jamiealquiza/tachymeter v2.0.0+incompatible/go.mod h1:Ayf6zPZKEnLsc3winWEXJRkTBhdHo58HODAu1oFJkYU=
+github.com/jedib0t/go-pretty/v6 v6.5.3 h1:GIXn6Er/anHTkVUoufs7ptEvxdD6KIhR7Axa2wYCPF0=
+github.com/jedib0t/go-pretty/v6 v6.5.3/go.mod h1:5LQIxa52oJ/DlDSLv0HEkWOFMDGoWkJb9ss5KqPpJBg=
+github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
@@ -235,16 +316,22 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
+github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/matryer/resync v0.0.0-20161211202428-d39c09a11215 h1:hDa3vAq/Zo5gjfJ46XMsGFbH+hTizpR4fUzQCk2nxgk=
github.com/matryer/resync v0.0.0-20161211202428-d39c09a11215/go.mod h1:LH+NgPY9AJpDfqAFtzyer01N9MYNsAKUf3DC9DV1xIY=
+github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
+github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
@@ -257,30 +344,29 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw=
+github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
-github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
-github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
-github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
-github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
-github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc=
-github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
+github.com/onsi/ginkgo/v2 v2.13.2 h1:Bi2gGVkfn6gQcjNjZJVO8Gf0FHzMPf2phUei9tejVMs=
+github.com/onsi/ginkgo/v2 v2.13.2/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
-github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw=
-github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
+github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
+github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
+github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
+github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
@@ -297,12 +383,19 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.15.0 h1:A82kmvXJq2jTu5YUhSGNlYoxh85zLnKgPz4bMZgI5Ek=
github.com/prometheus/procfs v0.15.0/go.mod h1:Y0RJ/Y5g5wJpkTisOtqwDSo4HwhGmLB4VQSw2sQJLHk=
+github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
+github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
+github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
+github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
+github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU=
github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM=
@@ -310,19 +403,45 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.3.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/tevino/abool v0.0.0-20170917061928-9b9efcf221b5/go.mod h1:f1SCnEOt6sc3fOJfPQDRDzHOtSXuTtnz0ImG9kPRDV0=
+github.com/tevino/abool v1.2.0 h1:heAkClL8H6w+mK5md9dzsuohKeXHUpY7Vw0ZCKW+huA=
+github.com/tevino/abool v1.2.0/go.mod h1:qc66Pna1RiIsPa7O4Egxxs9OqkuxDX55zznh9K07Tzg=
+github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
+github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0=
+github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
+github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
+github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+github.intuit.com/idps/device-grant-flow/go/dgfsdk v0.0.0-20220428022612-cf054cda65f7 h1:nSypwHIJ7o0IzWYVfVzmogrF5HIz/HCiSeMo0Mo3ymU=
+github.intuit.com/idps/device-grant-flow/go/dgfsdk v0.0.0-20220428022612-cf054cda65f7/go.mod h1:maAd/rJYgSC2c9PvkGZZD/NrkVyhZL9/jDU75iTzgKE=
+github.intuit.com/idps/idps-go-commons/v3 v3.4.4 h1:DxyPs+Q6wi7doX/2Ers2KnTv5B+vRclKCNVeCgkt01Y=
+github.intuit.com/idps/idps-go-commons/v3 v3.4.4/go.mod h1:NMUz/MLrhUE4/SdxPGGc5KMk3kC9B8UdUAuelSYgA/0=
+github.intuit.com/idps/idps-go-sdk/v3 v3.9909.0 h1:NtujYowO6tlJTmSHS1OoVAJ1ftTMCYWnuQSvVML1agI=
+github.intuit.com/idps/idps-go-sdk/v3 v3.9909.0/go.mod h1:IIy+JIbUnqhjVqB+g6XXK1/Wd1J1Mnd26W1DPELs4Fo=
+github.intuit.com/idps/idps-go-swagger-clients v1.8.1 h1:f7unZbxkR4WQRxHOL5B97HfoAwnkHjfUW1xLvK6GcHg=
+github.intuit.com/idps/idps-go-swagger-clients v1.8.1/go.mod h1:L0XVKcoVv71IoVZBIgmQfJ0ux0E0cguZsxTyos9v6kg=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
@@ -330,6 +449,8 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
+go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg=
go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ=
go.opentelemetry.io/otel/exporters/prometheus v0.49.0 h1:Er5I1g/YhfYv9Affk9nJLfH/+qCCVVg1f2R9AbJfqDQ=
@@ -349,7 +470,12 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
+golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -373,6 +499,7 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
@@ -384,10 +511,11 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY=
+golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@@ -421,8 +549,15 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -436,7 +571,14 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -450,6 +592,9 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
+golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -489,7 +634,6 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -498,17 +642,33 @@ golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE=
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
-golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
@@ -517,6 +677,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -570,15 +731,22 @@ golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82u
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc=
+golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
@@ -600,6 +768,24 @@ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34q
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
+google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
+google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
+google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
+google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
+google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
+google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
+google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
+google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
+google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
+google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
+google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=
+google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=
+google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
+google.golang.org/api v0.76.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
+google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o=
+google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -649,8 +835,45 @@ google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
-google.golang.org/genproto v0.0.0-20220531134929-86cf59382f1b h1:X+VXcq/YthmZqFvppQm4Wleg4o//OmY2uttDv1vDvRo=
-google.golang.org/genproto v0.0.0-20220531134929-86cf59382f1b/go.mod h1:yKyY4AMRwFiC8yMMNaMi+RkCnjZJt9LoWuvhXjMs+To=
+google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
+google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
+google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
+google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
+google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220426171045-31bebdecfb46/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 h1:SeZZZx0cP0fqUyA+oRzP9k7cSwJlvDFiROO72uwD6i0=
+google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97/go.mod h1:t1VqOqqvce95G3hIDCT5FeO3YUc6Q4Oe24L/+rNMxRk=
+google.golang.org/genproto/googleapis/api v0.0.0-20230920204549-e6e6cdab5c13 h1:U7+wNaVuSTaUqNvK2+osJ9ejEZxbjHHk8F2b6Hpx0AE=
+google.golang.org/genproto/googleapis/api v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:RdyHbowztCGQySiCvQPgWQWgWhGnouTdCflKoDBt32U=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c h1:jHkCUWkseRf+W+edG5hMzr/Uh1xkDREY4caybAq4dpY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -670,7 +893,19 @@ google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA5
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
+google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw=
+google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -692,13 +927,15 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -709,9 +946,7 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
@@ -721,8 +956,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-istio.io/api v0.0.0-20220621155648-3e39d064ab6b h1:H/0SpurAugYS4nhEcYx6uEUv1EX2WqL8vejJrRCVQIA=
-istio.io/api v0.0.0-20220621155648-3e39d064ab6b/go.mod h1:SJ6R+VKPZwpWnQsNlQL5cVGjAUNm/alk0D/6P5tV+tM=
+istio.io/api v1.19.6 h1:xG5EKIy66WvlOg+UvfjK9nRiQTeuAm38avzUkvrGep0=
+istio.io/api v1.19.6/go.mod h1:KstZe4bKbXouALUJ5PqpjNEhu5nj90HrDFitZfpNhlU=
istio.io/client-go v1.14.0 h1:KKXMnxXx3U2866OP8FBYlJhjKdI3yIUQnt8L6hSzDHE=
istio.io/client-go v1.14.0/go.mod h1:C7K0CKQlvY84yQKkZhxQbD1riqvnsgXJm3jF5GOmzNg=
k8s.io/api v0.24.2 h1:g518dPU/L7VRLxWfcadQn2OnsiGWVOadTLpdnqgY2OI=
@@ -734,23 +969,26 @@ k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30=
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
-k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc=
k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
+k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg=
+k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk=
-k8s.io/kube-openapi v0.0.0-20220621154418-c39d0f63fac8 h1:30P0UV8MQgg4f1khIUT09xHmpI5B5Wg0Vg6JNkUqsQ0=
-k8s.io/kube-openapi v0.0.0-20220621154418-c39d0f63fac8/go.mod h1:PNbiP2hKArDh8cgJZTDL6Ss/z3wsbga8yjj/7VMB+I4=
+k8s.io/kube-openapi v0.0.0-20230210211930-4b0756abdef5 h1:/zkKSeCtGRHYqRmrpa9uPYDWMpmQ5bZijBSoOpW384c=
+k8s.io/kube-openapi v0.0.0-20230210211930-4b0756abdef5/go.mod h1:/BYxry62FuDzmI+i9B+X2pqfySRmSOW2ARmj5Zbqhj0=
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc=
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk=
+k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY=
-sigs.k8s.io/json v0.0.0-20220525155127-227cbc7cc124 h1:2sgAQQcY0dEW2SsQwTXhQV4vO6+rSslYx8K3XmM5hqQ=
-sigs.k8s.io/json v0.0.0-20220525155127-227cbc7cc124/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY=
+sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
+sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
-sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y=
sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
+sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
+sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
From 93306d43e35669cad8a3810c61bd0a88cc141679 Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 15:52:16 -0700
Subject: [PATCH 103/235] copy config.go from main branch
---
admiral/pkg/controller/common/config.go | 396 +++++++++++++++++++++---
1 file changed, 356 insertions(+), 40 deletions(-)
diff --git a/admiral/pkg/controller/common/config.go b/admiral/pkg/controller/common/config.go
index 6cf0c732..7e460dd4 100644
--- a/admiral/pkg/controller/common/config.go
+++ b/admiral/pkg/controller/common/config.go
@@ -1,33 +1,47 @@
package common
import (
- "github.com/istio-ecosystem/admiral/admiral/pkg/monitoring"
+ "strings"
+ "sync"
"time"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/monitoring"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/util"
"github.com/matryer/resync"
log "github.com/sirupsen/logrus"
)
-var admiralParams = AdmiralParams{
- LabelSet: &LabelSet{},
+type admiralParamsWrapper struct {
+ params AdmiralParams
+ sync.RWMutex
+ resync.Once
}
-var once resync.Once
+// Singleton
+var wrapper = admiralParamsWrapper{
+ params: AdmiralParams{
+ LabelSet: &LabelSet{},
+ },
+}
func ResetSync() {
- once.Reset()
+ wrapper.Reset()
}
func InitializeConfig(params AdmiralParams) {
var initHappened = false
- once.Do(func() {
- admiralParams = params
- initHappened = true
- InitializeMetrics()
+ wrapper.Do(func() {
+ wrapper.Lock()
+ defer wrapper.Unlock()
+ wrapper.params = params
+ if wrapper.params.LabelSet == nil {
+ wrapper.params.LabelSet = &LabelSet{}
+ }
err := monitoring.InitializeMonitoring()
if err != nil {
log.Errorf("failed to setup monitoring: %v", err)
}
+ initHappened = true
})
if initHappened {
log.Info("InitializeConfig was called.")
@@ -37,108 +51,410 @@ func InitializeConfig(params AdmiralParams) {
}
func GetAdmiralParams() AdmiralParams {
- return admiralParams
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params
+}
+
+func GetAdmiralProfile() string {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.Profile
}
func GetArgoRolloutsEnabled() bool {
- return admiralParams.ArgoRolloutsEnabled
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.ArgoRolloutsEnabled
+}
+
+func GetSecretFilterTags() string {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.SecretFilterTags
}
func GetKubeconfigPath() string {
- return admiralParams.KubeconfigPath
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.KubeconfigPath
}
func GetCacheRefreshDuration() time.Duration {
- return admiralParams.CacheRefreshDuration
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.CacheReconcileDuration
}
func GetClusterRegistriesNamespace() string {
- return admiralParams.ClusterRegistriesNamespace
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.ClusterRegistriesNamespace
}
func GetDependenciesNamespace() string {
- return admiralParams.DependenciesNamespace
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.DependenciesNamespace
}
func GetSyncNamespace() string {
- return admiralParams.SyncNamespace
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.SyncNamespace
}
func GetEnableSAN() bool {
- return admiralParams.EnableSAN
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.EnableSAN
}
func GetSANPrefix() string {
- return admiralParams.SANPrefix
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.SANPrefix
}
-func GetSecretResolver() string {
- return admiralParams.SecretResolver
+func GetAdmiralConfigPath() string {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.AdmiralConfig
}
func GetLabelSet() *LabelSet {
- return admiralParams.LabelSet
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.LabelSet
}
func GetAdditionalEndpointSuffixes() []string {
- return admiralParams.AdditionalEndpointSuffixes
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.AdditionalEndpointSuffixes
}
func GetAdditionalEndpointLabelFilters() []string {
- return admiralParams.AdditionalEndpointLabelFilters
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.AdditionalEndpointLabelFilters
+}
+
+func GetEnableWorkloadDataStorage() bool {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.EnableWorkloadDataStorage
}
func GetHostnameSuffix() string {
- return admiralParams.HostnameSuffix
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.HostnameSuffix
}
func GetWorkloadIdentifier() string {
- return admiralParams.LabelSet.WorkloadIdentityKey
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.LabelSet.WorkloadIdentityKey
}
-func GetGlobalTrafficDeploymentLabel() string {
- return admiralParams.LabelSet.GlobalTrafficDeploymentLabel
+func GetPartitionIdentifier() string {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.LabelSet.IdentityPartitionKey
+}
+
+func GetTrafficConfigIdentifier() string {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.LabelSet.TrafficConfigIdentityKey
+}
+
+func GetAdmiralCRDIdentityLabel() string {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.LabelSet.AdmiralCRDIdentityLabel
}
func GetRoutingPolicyLabel() string {
- return admiralParams.LabelSet.WorkloadIdentityKey
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.LabelSet.WorkloadIdentityKey
}
func GetWorkloadSidecarUpdate() string {
- return admiralParams.WorkloadSidecarUpdate
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.WorkloadSidecarUpdate
}
-func GetEnvoyFilterVersion() string {
- return admiralParams.EnvoyFilterVersion
+func GetEnvoyFilterVersion() []string {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ if len(strings.TrimSpace(wrapper.params.EnvoyFilterVersion)) == 0 {
+ return []string{}
+ }
+ return strings.Split(wrapper.params.EnvoyFilterVersion, ",")
+}
+
+func GetDeprecatedEnvoyFilterVersion() []string {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ if len(strings.TrimSpace(wrapper.params.DeprecatedEnvoyFilterVersion)) == 0 {
+ return []string{}
+ }
+ return strings.Split(wrapper.params.DeprecatedEnvoyFilterVersion, ",")
}
func GetEnvoyFilterAdditionalConfig() string {
- return admiralParams.EnvoyFilterAdditionalConfig
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.EnvoyFilterAdditionalConfig
}
func GetEnableRoutingPolicy() bool {
- return admiralParams.EnableRoutingPolicy
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.EnableRoutingPolicy
}
func GetWorkloadSidecarName() string {
- return admiralParams.WorkloadSidecarName
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.WorkloadSidecarName
}
func GetEnvKey() string {
- return admiralParams.LabelSet.EnvKey
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.LabelSet.EnvKey
}
func GetMetricsEnabled() bool {
- return admiralParams.MetricsEnabled
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.MetricsEnabled
}
-///Setters - be careful
+func IsPersonaTrafficConfig() bool {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.TrafficConfigPersona
+}
+
+// This function is used to determine if a feature is enabled or not.
+// If the feature is not present in the list, it is assumed to be enabled.
+// Also any value other than "disabled" is assumed to be enabled.
+func IsCartographerFeatureDisabled(featureName string) bool {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+
+ if wrapper.params.CartographerFeatures == nil {
+ return false
+ }
+ // If the feature exists in the list and is set to disabled, return true
+ if val, ok := wrapper.params.CartographerFeatures[featureName]; ok {
+ return val == "disabled"
+ } else {
+ return false
+ }
+}
+
+func IsDefaultPersona() bool {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return !wrapper.params.TrafficConfigPersona
+}
+
+func GetHAMode() string {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.HAMode
+}
+
+func GetDiffCheckEnabled() bool {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.EnableDiffCheck
+}
+
+func IsProxyEnvoyFilterEnabled() bool {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.EnableProxyEnvoyFilter
+}
+
+func IsDependencyProcessingEnabled() bool {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.EnableDependencyProcessing
+}
+
+func GetSeAddressConfigMap() string {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.SeAddressConfigmap
+}
+
+func DeploymentOrRolloutWorkerConcurrency() int {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.DeploymentOrRolloutWorkerConcurrency
+}
+
+func DependentClusterWorkerConcurrency() int {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.DependentClusterWorkerConcurrency
+}
+
+func DependencyWarmupMultiplier() int {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.DependencyWarmupMultiplier
+}
+
+func MaxRequestsPerConnection() int32 {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.MaxRequestsPerConnection
+}
+
+func IsAbsoluteFQDNEnabled() bool {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.EnableAbsoluteFQDN
+}
+
+func IsClientConnectionConfigProcessingEnabled() bool {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.EnableClientConnectionConfigProcessing
+}
+
+func IsAbsoluteFQDNEnabledForLocalEndpoints() bool {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.EnableAbsoluteFQDNForLocalEndpoints
+}
+
+func DisableDefaultAutomaticFailover() bool {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.DisableDefaultAutomaticFailover
+}
+
+func EnableServiceEntryCache() bool {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.EnableServiceEntryCache
+}
+
+func EnableDestinationRuleCache() bool {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.EnableDestinationRuleCache
+}
+
+func AlphaIdentityList() []string {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.AlphaIdentityList
+}
func SetKubeconfigPath(path string) {
- admiralParams.KubeconfigPath = path
+ wrapper.Lock()
+ defer wrapper.Unlock()
+ wrapper.params.KubeconfigPath = path
}
-// for unit test only
func SetEnablePrometheus(value bool) {
- admiralParams.MetricsEnabled = value
+ wrapper.Lock()
+ defer wrapper.Unlock()
+ wrapper.params.MetricsEnabled = value
+}
+
+func SetArgoRolloutsEnabled(value bool) {
+ wrapper.Lock()
+ defer wrapper.Unlock()
+ wrapper.params.ArgoRolloutsEnabled = value
+}
+
+func SetCartographerFeature(featureName string, val string) {
+ wrapper.Lock()
+ defer wrapper.Unlock()
+ if wrapper.params.CartographerFeatures == nil {
+ wrapper.params.CartographerFeatures = make(map[string]string)
+ }
+ wrapper.params.CartographerFeatures[featureName] = val
+}
+
+func GetGatewayAssetAliases() []string {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.GatewayAssetAliases
+}
+
+func DisableIPGeneration() bool {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.DisableIPGeneration
+}
+
+func EnableActivePassive() bool {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.EnableActivePassive
+}
+
+func EnableExportTo(identityOrCname string) bool {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ if wrapper.params.ExportToIdentityList != nil {
+ for _, identity := range wrapper.params.ExportToIdentityList {
+ if identity != "" && (identity == "*" || strings.Contains(strings.ToLower(identityOrCname), strings.ToLower(identity))) && wrapper.params.EnableSWAwareNSCaches {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func EnableSWAwareNSCaches() bool {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.EnableSWAwareNSCaches
+}
+
+func DoSyncIstioResourcesToSourceClusters() bool {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.EnableSyncIstioResourcesToSourceClusters
+}
+
+func GetResyncIntervals() util.ResyncIntervals {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return util.ResyncIntervals{
+ UniversalReconcileInterval: wrapper.params.CacheReconcileDuration,
+ SeAndDrReconcileInterval: wrapper.params.SeAndDrCacheReconcileDuration,
+ }
+}
+
+func GetExportToMaxNamespaces() int {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.ExportToMaxNamespaces
+}
+
+func IsAdmiralStateSyncerMode() bool {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.AdmiralStateSyncerMode
+}
+
+func GetDefaultWarmupDurationSecs() int64 {
+ wrapper.RLock()
+ defer wrapper.RUnlock()
+ return wrapper.params.DefaultWarmupDurationSecs
}
From 3a26a13bfbc2d970ca4000a94e497dcb9e976543 Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 15:53:12 -0700
Subject: [PATCH 104/235] copy config_test.go from main branch
---
admiral/pkg/controller/common/config_test.go | 179 ++++++++++++++-----
1 file changed, 139 insertions(+), 40 deletions(-)
diff --git a/admiral/pkg/controller/common/config_test.go b/admiral/pkg/controller/common/config_test.go
index 23a11be2..ce271aaf 100644
--- a/admiral/pkg/controller/common/config_test.go
+++ b/admiral/pkg/controller/common/config_test.go
@@ -1,42 +1,60 @@
package common
import (
+ "sync"
"testing"
"time"
+
+ "github.com/stretchr/testify/assert"
+
+ log "github.com/sirupsen/logrus"
)
-func TestConfigManagement(t *testing.T) {
+var configTestSingleton sync.Once
+
+func setupForConfigTests() {
+ var initHappened bool
+ configTestSingleton.Do(func() {
+ p := AdmiralParams{
+ KubeconfigPath: "testdata/fake.config",
+ LabelSet: &LabelSet{
+ WorkloadIdentityKey: "identity",
+ AdmiralCRDIdentityLabel: "identity",
+ IdentityPartitionKey: "admiral.io/identityPartition",
+ },
+ EnableSAN: true,
+ SANPrefix: "prefix",
+ HostnameSuffix: "mesh",
+ SyncNamespace: "admiral-sync",
+ SecretFilterTags: "admiral/sync",
+ CacheReconcileDuration: time.Minute,
+ ClusterRegistriesNamespace: "default",
+ DependenciesNamespace: "default",
+ Profile: "default",
+ WorkloadSidecarName: "default",
+ WorkloadSidecarUpdate: "disabled",
+ MetricsEnabled: true,
+ DeprecatedEnvoyFilterVersion: "1.10,1.17",
+ EnvoyFilterVersion: "1.10,1.13,1.17",
+ CartographerFeatures: map[string]string{"throttle_filter_gen": "disabled"},
+ DisableIPGeneration: false,
+ EnableSWAwareNSCaches: true,
+ ExportToIdentityList: []string{"*"},
+ ExportToMaxNamespaces: 35,
+ }
+ ResetSync()
+ initHappened = true
+ InitializeConfig(p)
+ })
+ if !initHappened {
+ log.Warn("InitializeConfig was NOT called from setupForConfigTests")
+ } else {
+ log.Info("InitializeConfig was called setupForConfigTests")
+ }
+}
- //Initial state comes from the init method in configInitializer.go
- //p := AdmiralParams{
- // KubeconfigPath: "testdata/fake.config",
- // LabelSet: &LabelSet{},
- // EnableSAN: true,
- // SANPrefix: "prefix",
- // HostnameSuffix: "mesh",
- // SyncNamespace: "ns",
- //}
- //
- //p.LabelSet.WorkloadIdentityKey="identity"
-
- //trying to initialize again. If the singleton pattern works, none of these will have changed
- p := AdmiralParams{
- KubeconfigPath: "DIFFERENT",
- LabelSet: &LabelSet{},
- EnableSAN: false,
- SANPrefix: "BAD_PREFIX",
- HostnameSuffix: "NOT_MESH",
- SyncNamespace: "NOT_A_NAMESPACE",
- CacheRefreshDuration: time.Hour,
- ClusterRegistriesNamespace: "NOT_DEFAULT",
- DependenciesNamespace: "NOT_DEFAULT",
- SecretResolver: "INSECURE_RESOLVER",
- }
-
- p.LabelSet.WorkloadIdentityKey = "BAD_LABEL"
- p.LabelSet.GlobalTrafficDeploymentLabel = "ANOTHER_BAD_LABEL"
-
- InitializeConfig(p)
+func TestConfigManagement(t *testing.T) {
+ setupForConfigTests()
if GetWorkloadIdentifier() != "identity" {
t.Errorf("Workload identifier mismatch, expected identity, got %v", GetWorkloadIdentifier())
@@ -44,20 +62,23 @@ func TestConfigManagement(t *testing.T) {
if GetKubeconfigPath() != "testdata/fake.config" {
t.Errorf("Kubeconfig path mismatch, expected testdata/fake.config, got %v", GetKubeconfigPath())
}
+ if GetSecretFilterTags() != "admiral/sync" {
+ t.Errorf("Filter tags mismatch, expected admiral/sync, got %v", GetSecretFilterTags())
+ }
if GetSANPrefix() != "prefix" {
t.Errorf("San prefix mismatch, expected prefix, got %v", GetSANPrefix())
}
if GetHostnameSuffix() != "mesh" {
t.Errorf("Hostname suffix mismatch, expected mesh, got %v", GetHostnameSuffix())
}
- if GetSyncNamespace() != "ns" {
+ if GetSyncNamespace() != "admiral-sync" {
t.Errorf("Sync namespace mismatch, expected ns, got %v", GetSyncNamespace())
}
if GetEnableSAN() != true {
t.Errorf("Enable SAN mismatch, expected true, got %v", GetEnableSAN())
}
if GetCacheRefreshDuration() != time.Minute {
- t.Errorf("Cachee refresh duration mismatch, expected %v, got %v", time.Minute, GetCacheRefreshDuration())
+ t.Errorf("Cache refresh duration mismatch, expected %v, got %v", time.Minute, GetCacheRefreshDuration())
}
if GetClusterRegistriesNamespace() != "default" {
t.Errorf("Cluster registry namespace mismatch, expected default, got %v", GetClusterRegistriesNamespace())
@@ -65,14 +86,11 @@ func TestConfigManagement(t *testing.T) {
if GetDependenciesNamespace() != "default" {
t.Errorf("Dependency namespace mismatch, expected default, got %v", GetDependenciesNamespace())
}
- if GetSecretResolver() != "" {
- t.Errorf("Secret resolver mismatch, expected empty string, got %v", GetSecretResolver())
+ if GetAdmiralProfile() != "default" {
+ t.Errorf("Secret resolver mismatch, expected empty string, got %v", GetAdmiralProfile())
}
- if GetGlobalTrafficDeploymentLabel() != "identity" {
- t.Fatalf("GTP Deployment label mismatch. Expected identity, got %v", GetGlobalTrafficDeploymentLabel())
- }
- if GetGlobalTrafficDeploymentLabel() != "identity" {
- t.Fatalf("GTP Deployment label mismatch. Expected identity, got %v", GetGlobalTrafficDeploymentLabel())
+ if GetAdmiralCRDIdentityLabel() != "identity" {
+ t.Fatalf("Admiral CRD Identity label mismatch. Expected identity, got %v", GetAdmiralCRDIdentityLabel())
}
if GetWorkloadSidecarName() != "default" {
t.Fatalf("Workload Sidecar Name mismatch. Expected default, got %v", GetWorkloadSidecarName())
@@ -91,4 +109,85 @@ func TestConfigManagement(t *testing.T) {
t.Errorf("Enable Prometheus mismatch, expected false, got %v", GetMetricsEnabled())
}
+ if IsPersonaTrafficConfig() != false {
+ t.Errorf("Enable Traffic Persona mismatch, expected false, got %v", IsPersonaTrafficConfig())
+ }
+
+ if IsDefaultPersona() != true {
+ t.Errorf("Enable Default Persona mismatch, expected false, got %v", IsDefaultPersona())
+ }
+
+ if len(GetDeprecatedEnvoyFilterVersion()) != 2 {
+ t.Errorf("Get deprecated envoy filter version by splitting with ',', expected 2, got %v", len(GetDeprecatedEnvoyFilterVersion()))
+ }
+
+ if len(GetEnvoyFilterVersion()) != 3 {
+ t.Errorf("Get envoy filter version by splitting with ',', expected 3, got %v", len(GetEnvoyFilterVersion()))
+ }
+
+ if IsCartographerFeatureDisabled("router_filter_gen") {
+ t.Errorf("If the feature is not present in the list should be assumed as enabled/true ',', expected false, got %v", IsCartographerFeatureDisabled("router_filter_gen"))
+ }
+
+ if !IsCartographerFeatureDisabled("throttle_filter_gen") {
+ t.Errorf("If the feature is present in the list with valure disabled. ',', expected true, got %v", IsCartographerFeatureDisabled("throttle_filter_gen"))
+ }
+
+ if DisableIPGeneration() {
+ t.Errorf("Disable IP Address Generation mismatch, expected false, got %v", DisableIPGeneration())
+ }
+
+ if GetPartitionIdentifier() != "admiral.io/identityPartition" {
+ t.Errorf("Get identity partition mismatch, expected admiral.io/identityPartition, got %v", GetPartitionIdentifier())
+ }
+
+ if !EnableSWAwareNSCaches() {
+ t.Errorf("enable SW aware namespace caches mismatch, expected true, got %v", EnableSWAwareNSCaches())
+ }
+
+ if !EnableExportTo("fakeIdentity") {
+ t.Errorf("enable exportTo mismatch, expected true, got %v", EnableExportTo("fakeIdentity"))
+ }
+
+ if GetExportToMaxNamespaces() != 35 {
+ t.Errorf("exportTo max namespaces mismatch, expected 35, got %v", GetExportToMaxNamespaces())
+ }
}
+
+func TestGetCRDIdentityLabelWithCRDIdentity(t *testing.T) {
+
+ admiralParams := GetAdmiralParams()
+ backOldIdentity := admiralParams.LabelSet.AdmiralCRDIdentityLabel
+ admiralParams.LabelSet.AdmiralCRDIdentityLabel = "identityOld"
+
+ assert.Equalf(t, "identityOld", GetAdmiralCRDIdentityLabel(), "GetCRDIdentityLabel()")
+
+ admiralParams.LabelSet.AdmiralCRDIdentityLabel = backOldIdentity
+}
+
+//func TestGetCRDIdentityLabelWithLabel(t *testing.T) {
+//
+// admiralParams := GetAdmiralParams()
+// backOldIdentity := admiralParams.LabelSet.AdmiralCRDIdentityLabel
+// backOldGTPLabel := admiralParams.LabelSet.GlobalTrafficDeploymentLabel
+// admiralParams.LabelSet.GlobalTrafficDeploymentLabel = "identityGTP"
+//
+// assert.Equalf(t, "identityGTP", GetAdmiralCRDIdentityLabel(), "GetAdmiralCRDIdentityLabel()")
+//
+// admiralParams.LabelSet.CRDIdentityLabel = backOldIdentity
+// admiralParams.LabelSet.GlobalTrafficDeploymentLabel = backOldGTPLabel
+//}
+
+//func TestGetCRDIdentityLabelWithEmptyLabel(t *testing.T) {
+//
+// admiralParams := GetAdmiralParams()
+// backOldIdentity := admiralParams.LabelSet.CRDIdentityLabel
+// backOldGTPLabel := admiralParams.LabelSet.GlobalTrafficDeploymentLabel
+// admiralParams.LabelSet.GlobalTrafficDeploymentLabel = ""
+//
+// assert.Equalf(t, "", GetCRDIdentityLabel(), "GetCRDIdentityLabel()")
+//
+// admiralParams.LabelSet.GlobalTrafficDeploymentLabel = ""
+// admiralParams.LabelSet.CRDIdentityLabel = backOldIdentity
+// admiralParams.LabelSet.GlobalTrafficDeploymentLabel = backOldGTPLabel
+//}
From 9918428c57aa37c5be0dc3f87cc0cf8054c0154a Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 15:53:59 -0700
Subject: [PATCH 105/235] copy metrics.go from main branch
---
admiral/pkg/controller/common/metrics.go | 90 +++---------------------
1 file changed, 11 insertions(+), 79 deletions(-)
diff --git a/admiral/pkg/controller/common/metrics.go b/admiral/pkg/controller/common/metrics.go
index c022cfcf..6da097c8 100644
--- a/admiral/pkg/controller/common/metrics.go
+++ b/admiral/pkg/controller/common/metrics.go
@@ -1,100 +1,32 @@
package common
-import (
- "github.com/prometheus/client_golang/prometheus"
- "sync"
-)
+import "github.com/prometheus/client_golang/prometheus"
-const (
- ClustersMonitoredMetricName = "clusters_monitored"
- EventsProcessedTotalMetricName = "events_processed_total"
-
- AddEventLabelValue = "add"
- UpdateEventLabelValue = "update"
- DeleteEventLabelValue = "delete"
-)
-
-var (
- metricsOnce sync.Once
- RemoteClustersMetric Gauge
- EventsProcessed Counter
-)
+const ClustersMonitoredMetricName = "clusters_monitored"
+const DependencyProxyServiceCacheSizeMetricName = "dependency_proxy_service_cache_size"
type Gauge interface {
- With(labelValues ...string) Gauge
Set(value float64)
}
-type Counter interface {
- With(labelValues ...string) Counter
- Inc()
-}
-
-/*
-InitializeMetrics depends on AdmiralParams for metrics enablement.
-*/
-func InitializeMetrics() {
- metricsOnce.Do(func() {
- RemoteClustersMetric = NewGaugeFrom(ClustersMonitoredMetricName, "Gauge for the clusters monitored by Admiral", []string{})
- EventsProcessed = NewCounterFrom(EventsProcessedTotalMetricName, "Counter for the events processed by Admiral", []string{"cluster", "object_type", "event_type"})
- })
-}
-
-func NewGaugeFrom(name string, help string, labelNames []string) Gauge {
+func NewGaugeFrom(name string, help string) Gauge {
if !GetMetricsEnabled() {
- return &NoopGauge{}
+ return &Noop{}
}
opts := prometheus.GaugeOpts{Name: name, Help: help}
- g := prometheus.NewGaugeVec(opts, labelNames)
+ g := prometheus.NewGauge(opts)
prometheus.MustRegister(g)
- return &PromGauge{g, labelNames}
+ return &PromGauge{g}
}
-func NewCounterFrom(name string, help string, labelNames []string) Counter {
- if !GetMetricsEnabled() {
- return &NoopCounter{}
- }
- opts := prometheus.CounterOpts{Name: name, Help: help}
- c := prometheus.NewCounterVec(opts, labelNames)
- prometheus.MustRegister(c)
- return &PromCounter{c, labelNames}
-}
-
-type NoopGauge struct{}
-type NoopCounter struct{}
+type Noop struct{}
type PromGauge struct {
- g *prometheus.GaugeVec
- lvs []string
-}
-
-type PromCounter struct {
- c *prometheus.CounterVec
- lvs []string
-}
-
-func (g *PromGauge) With(labelValues ...string) Gauge {
- g.lvs = append([]string{}, labelValues...)
-
- return g
+ g prometheus.Gauge
}
func (g *PromGauge) Set(value float64) {
- g.g.WithLabelValues(g.lvs...).Set(value)
-}
-
-func (c *PromCounter) With(labelValues ...string) Counter {
- c.lvs = append([]string{}, labelValues...)
-
- return c
+ g.g.Set(value)
}
-func (c *PromCounter) Inc() {
- c.c.WithLabelValues(c.lvs...).Inc()
-}
-
-func (g *NoopGauge) Set(float64) {}
-func (g *NoopGauge) With(...string) Gauge { return g }
-
-func (g *NoopCounter) Inc() {}
-func (g *NoopCounter) With(...string) Counter { return g }
+func (g *Noop) Set(value float64) {}
From 39abb3c63da810658177c5a19fa72da70f627a0f Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 15:55:24 -0700
Subject: [PATCH 106/235] copy metrics_test.go from main branch
---
admiral/pkg/controller/common/metrics_test.go | 124 +++---------------
1 file changed, 16 insertions(+), 108 deletions(-)
diff --git a/admiral/pkg/controller/common/metrics_test.go b/admiral/pkg/controller/common/metrics_test.go
index 0f881246..18e9b602 100644
--- a/admiral/pkg/controller/common/metrics_test.go
+++ b/admiral/pkg/controller/common/metrics_test.go
@@ -1,132 +1,40 @@
package common
import (
+ "testing"
+
"github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/stretchr/testify/assert"
- "io/ioutil"
- "net/http"
- "net/http/httptest"
- "regexp"
- "strconv"
- "testing"
)
func TestNewGaugeFrom(t *testing.T) {
type args struct {
- prom bool
- name string
- help string
- value int64
- labelNames []string
- labelValues []string
- }
- tc := []struct {
- name string
- args args
- wantMetric bool
- wantValue int64
- }{
- {
- name: "Should return a Prometheus gauge",
- args: args{true, "mygauge", "", 10, []string{"l1", "l2"}, []string{"v1", "v2"}},
- wantMetric: true,
- wantValue: 10,
- },
- {
- name: "Should return a Noop gauge",
- args: args{false, "mygauge", "", 10, []string{}, []string{}},
- wantMetric: false,
- },
- }
-
- for _, tt := range tc {
- t.Run(tt.name, func(t *testing.T) {
- SetEnablePrometheus(tt.args.prom)
-
- // exercise metric
- actual := NewGaugeFrom(tt.args.name, tt.args.help, tt.args.labelNames)
- actual.With(tt.args.labelValues...).Set(float64(tt.args.value))
-
- // query metrics endpoint
- s := httptest.NewServer(promhttp.HandlerFor(prometheus.DefaultGatherer, promhttp.HandlerOpts{}))
- defer s.Close()
-
- // parse response
- resp, _ := http.Get(s.URL)
- buf, _ := ioutil.ReadAll(resp.Body)
- actualString := string(buf)
-
- // verify
- if tt.wantMetric {
- pattern := tt.args.name + `{l1="v1",l2="v2"} ([0-9]+)`
- re := regexp.MustCompile(pattern)
- matches := re.FindStringSubmatch(actualString)
- f, _ := strconv.ParseInt(matches[1], 0, 64)
- assert.Equal(t, tt.wantValue, f)
- }
- assert.Equal(t, 200, resp.StatusCode)
- })
- }
-}
-
-func TestNewCounterFrom(t *testing.T) {
- type args struct {
- prom bool
- name string
- help string
- value int64
- labelNames []string
- labelValues []string
+ prom bool
+ Name string
+ Help string
}
tc := []struct {
- name string
- args args
- wantMetric bool
- wantValue int64
+ name string
+ args args
+ want Gauge
}{
{
- name: "Should return a Noop counter",
- args: args{false, "mycounter", "", 10, []string{}, []string{}},
- wantMetric: false,
+ "Should return a Prometheus gauge",
+ args{true, "gauge", ""},
+ &PromGauge{prometheus.NewGauge(prometheus.GaugeOpts{Name: "gauge", Help: ""})},
},
{
- name: "Should return a Prometheus counter",
- args: args{true, "mycounter", "", 1, []string{"l1", "l2"}, []string{"v1", "v2"}},
- wantMetric: true,
- wantValue: 1,
+ "Should return a Noop gauge",
+ args{false, "gauge", ""},
+ &Noop{},
},
}
for _, tt := range tc {
t.Run(tt.name, func(t *testing.T) {
SetEnablePrometheus(tt.args.prom)
-
- // exercise metric
- actual := NewCounterFrom(tt.args.name, tt.args.help, tt.args.labelNames)
- var i int64
- for i = 0; i < tt.args.value; i++ {
- actual.With(tt.args.labelValues...).Inc()
- }
-
- // query metrics endpoint
- s := httptest.NewServer(promhttp.HandlerFor(prometheus.DefaultGatherer, promhttp.HandlerOpts{}))
- defer s.Close()
-
- // parse response
- resp, _ := http.Get(s.URL)
- buf, _ := ioutil.ReadAll(resp.Body)
- actualString := string(buf)
-
- // verify
- if tt.wantMetric {
- pattern := tt.args.name + `{l1="v1",l2="v2"} ([0-9]+)`
- re := regexp.MustCompile(pattern)
- s2 := re.FindStringSubmatch(actualString)[1]
- f, _ := strconv.ParseInt(s2, 0, 64)
- assert.Equal(t, tt.wantValue, f)
- }
- assert.Equal(t, 200, resp.StatusCode)
+ actual := NewGaugeFrom(tt.args.Name, tt.args.Help)
+ assert.Equal(t, tt.want, actual, "want: %#v, got: %#v", tt.want, actual)
})
}
}
From 1988af09c6c7daa7583172d64bf2f40dc5ed9b6d Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 15:56:08 -0700
Subject: [PATCH 107/235] copy rolloutcommon.go from main branch
---
.../pkg/controller/common/rolloutcommon.go | 50 +++++++++++++------
1 file changed, 36 insertions(+), 14 deletions(-)
diff --git a/admiral/pkg/controller/common/rolloutcommon.go b/admiral/pkg/controller/common/rolloutcommon.go
index 5500d03d..a88eb821 100644
--- a/admiral/pkg/controller/common/rolloutcommon.go
+++ b/admiral/pkg/controller/common/rolloutcommon.go
@@ -1,11 +1,12 @@
package common
import (
- argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
- "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
- log "github.com/sirupsen/logrus"
"sort"
"strings"
+
+ argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
+ v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ log "github.com/sirupsen/logrus"
)
// GetCname returns cname in the format ..global, Ex: stage.Admiral.services.registry.global
@@ -13,11 +14,11 @@ func GetCnameForRollout(rollout *argo.Rollout, identifier string, nameSuffix str
var environment = GetEnvForRollout(rollout)
alias := GetValueForKeyFromRollout(identifier, rollout)
if len(alias) == 0 {
- log.Warnf("%v label missing on deployment %v in namespace %v. Falling back to annotation to create cname.", identifier, rollout.Name, rollout.Namespace)
+ log.Warnf("%v label missing on rollout %v in namespace %v. Falling back to annotation to create cname.", identifier, rollout.Name, rollout.Namespace)
alias = rollout.Spec.Template.Annotations[identifier]
}
if len(alias) == 0 {
- log.Errorf("Unable to get cname for deployment with name %v in namespace %v as it doesn't have the %v annotation", rollout.Name, rollout.Namespace, identifier)
+ log.Errorf("Unable to get cname for rollout with name %v in namespace %v as it doesn't have the %v annotation", rollout.Name, rollout.Namespace, identifier)
return ""
}
cname := environment + Sep + alias + Sep + nameSuffix
@@ -45,16 +46,16 @@ func GetSANForRollout(domain string, rollout *argo.Rollout, identifier string) s
func GetValueForKeyFromRollout(key string, rollout *argo.Rollout) string {
value := rollout.Spec.Template.Labels[key]
if len(value) == 0 {
- log.Warnf("%v label missing on deployment %v in namespace %v. Falling back to annotation.", key, rollout.Name, rollout.Namespace)
+ log.Warnf("%v label missing on rollout %v in namespace %v. Falling back to annotation.", key, rollout.Name, rollout.Namespace)
value = rollout.Spec.Template.Annotations[key]
}
return value
}
-//Returns the list of rollouts to which this GTP should apply. It is assumed that all inputs already are an identity match
-//If the GTP has an identity label, it should match all rollouts which share that label
-//If the GTP does not have an identity label, it should return all rollouts without an identity label
-//IMPORTANT: If an environment label is specified on either the GTP or the rollout, the same value must be specified on the other for them to match
+// Returns the list of rollouts to which this GTP should apply. It is assumed that all inputs already are an identity match
+// If the GTP has an identity label, it should match all rollouts which share that label
+// If the GTP does not have an identity label, it should return all rollouts without an identity label
+// IMPORTANT: If an environment label is specified on either the GTP or the rollout, the same value must be specified on the other for them to match
func MatchRolloutsToGTP(gtp *v1.GlobalTrafficPolicy, rollouts []argo.Rollout) []argo.Rollout {
if gtp == nil || gtp.Name == "" {
log.Warn("Nil or empty GlobalTrafficPolicy provided for rollout match. Returning nil.")
@@ -92,15 +93,36 @@ func GetRolloutGlobalIdentifier(rollout *argo.Rollout) string {
//TODO can this be removed now? This was for backward compatibility
identity = rollout.Spec.Template.Annotations[GetWorkloadIdentifier()]
}
+ if EnableSWAwareNSCaches() && len(identity) > 0 && len(GetRolloutIdentityPartition(rollout)) > 0 {
+ identity = GetRolloutIdentityPartition(rollout) + Sep + strings.ToLower(identity)
+ }
return identity
}
-//Find the GTP that best matches the rollout.
-//It's assumed that the set of GTPs passed in has already been matched via the GtprolloutLabel. Now it's our job to choose the best one.
-//In order:
+func GetRolloutOriginalIdentifier(rollout *argo.Rollout) string {
+ identity := rollout.Spec.Template.Labels[GetWorkloadIdentifier()]
+ if len(identity) == 0 {
+ //TODO can this be removed now? This was for backward compatibility
+ identity = rollout.Spec.Template.Annotations[GetWorkloadIdentifier()]
+ }
+ return identity
+}
+
+func GetRolloutIdentityPartition(rollout *argo.Rollout) string {
+ identityPartition := rollout.Spec.Template.Annotations[GetPartitionIdentifier()]
+ if len(identityPartition) == 0 {
+ //In case partition is accidentally applied as Label
+ identityPartition = rollout.Spec.Template.Labels[GetPartitionIdentifier()]
+ }
+ return identityPartition
+}
+
+// Find the GTP that best matches the rollout.
+// It's assumed that the set of GTPs passed in has already been matched via the GtprolloutLabel. Now it's our job to choose the best one.
+// In order:
// - If one and only one GTP matches the env label of the rollout - use that one. Use "default" as the default env label for all GTPs and rollout.
// - If multiple GTPs match the rollout label, use the oldest one (Using an old one has less chance of new behavior which could impact workflows)
-//IMPORTANT: If an environment label is specified on either the GTP or the rollout, the same value must be specified on the other for them to match
+// IMPORTANT: If an environment label is specified on either the GTP or the rollout, the same value must be specified on the other for them to match
func MatchGTPsToRollout(gtpList []v1.GlobalTrafficPolicy, rollout *argo.Rollout) *v1.GlobalTrafficPolicy {
if rollout == nil || rollout.Name == "" {
log.Warn("Nil or empty GlobalTrafficPolicy provided for rollout match. Returning nil.")
From de0e1dda38b58754a6091b2218b3e88be60c7711 Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 15:56:39 -0700
Subject: [PATCH 108/235] copy rolloutcommon_test.go from main branch
---
.../controller/common/rolloutcommon_test.go | 134 ++++++++++++++----
1 file changed, 106 insertions(+), 28 deletions(-)
diff --git a/admiral/pkg/controller/common/rolloutcommon_test.go b/admiral/pkg/controller/common/rolloutcommon_test.go
index 0306bba7..482840b3 100644
--- a/admiral/pkg/controller/common/rolloutcommon_test.go
+++ b/admiral/pkg/controller/common/rolloutcommon_test.go
@@ -1,41 +1,59 @@
package common
import (
- argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
- "github.com/google/go-cmp/cmp"
- v12 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
- corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/apis/meta/v1"
"reflect"
"strings"
+ "sync"
"testing"
"time"
+
+ argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
+ "github.com/google/go-cmp/cmp"
+ v12 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ log "github.com/sirupsen/logrus"
+ corev1 "k8s.io/api/core/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
-func init() {
- p := AdmiralParams{
- KubeconfigPath: "testdata/fake.config",
- LabelSet: &LabelSet{},
- EnableSAN: true,
- SANPrefix: "prefix",
- HostnameSuffix: "mesh",
- SyncNamespace: "ns",
- CacheRefreshDuration: time.Minute,
- ClusterRegistriesNamespace: "default",
- DependenciesNamespace: "default",
- SecretResolver: "",
- WorkloadSidecarName: "default",
- WorkloadSidecarUpdate: "disabled",
+var rolloutCommonTestSingleton sync.Once
+
+func setupForRolloutCommonTests() {
+ var initHappened bool
+ rolloutCommonTestSingleton.Do(func() {
+ p := AdmiralParams{
+ KubeconfigPath: "testdata/fake.config",
+ LabelSet: &LabelSet{
+ WorkloadIdentityKey: "identity",
+ AdmiralCRDIdentityLabel: "identity",
+ EnvKey: "admiral.io/env",
+ IdentityPartitionKey: "admiral.io/identityPartition",
+ },
+ EnableSAN: true,
+ SANPrefix: "prefix",
+ HostnameSuffix: "mesh",
+ SyncNamespace: "ns",
+ CacheReconcileDuration: time.Minute,
+ ClusterRegistriesNamespace: "default",
+ DependenciesNamespace: "default",
+ WorkloadSidecarName: "default",
+ WorkloadSidecarUpdate: "disabled",
+ EnableSWAwareNSCaches: true,
+ ExportToIdentityList: []string{"*"},
+ }
+
+ ResetSync()
+ initHappened = true
+ InitializeConfig(p)
+ })
+ if !initHappened {
+ log.Warn("InitializeConfig was NOT called from setupForRolloutCommonTests")
+ } else {
+ log.Info("InitializeConfig was called setupForRolloutCommonTests")
}
-
- p.LabelSet.WorkloadIdentityKey = "identity"
- p.LabelSet.GlobalTrafficDeploymentLabel = "identity"
-
- InitializeConfig(p)
}
func TestGetEnvForRollout(t *testing.T) {
-
+ setupForRolloutCommonTests()
testCases := []struct {
name string
rollout argo.Rollout
@@ -52,8 +70,17 @@ func TestGetEnvForRollout(t *testing.T) {
expected: "stage2",
},
{
- name: "should return valid env from new env annotation",
- rollout: argo.Rollout{Spec: argo.RolloutSpec{Template: corev1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{"admiral.io/env": "stage1"}, Labels: map[string]string{"env": "stage2"}}}}},
+ name: "should return valid env from new env annotation",
+ rollout: argo.Rollout{
+ Spec: argo.RolloutSpec{
+ Template: corev1.PodTemplateSpec{
+ ObjectMeta: v1.ObjectMeta{
+ Annotations: map[string]string{"admiral.io/env": "stage1"},
+ Labels: map[string]string{"env": "stage2"},
+ },
+ },
+ },
+ },
expected: "stage1",
},
{
@@ -232,7 +259,7 @@ func TestMatchGTPsToRollout(t *testing.T) {
}
func TestGetRolloutGlobalIdentifier(t *testing.T) {
-
+ setupForRolloutCommonTests()
identifier := "identity"
identifierVal := "company.platform.server"
@@ -240,21 +267,31 @@ func TestGetRolloutGlobalIdentifier(t *testing.T) {
name string
rollout argo.Rollout
expected string
+ original string
}{
{
name: "should return valid identifier from label",
rollout: argo.Rollout{Spec: argo.RolloutSpec{Template: corev1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Labels: map[string]string{identifier: identifierVal, "env": "stage"}}}}},
expected: identifierVal,
+ original: identifierVal,
},
{
name: "should return valid identifier from annotations",
rollout: argo.Rollout{Spec: argo.RolloutSpec{Template: corev1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{identifier: identifierVal, "env": "stage"}}}}},
expected: identifierVal,
+ original: identifierVal,
+ },
+ {
+ name: "should return partitioned identifier",
+ rollout: argo.Rollout{Spec: argo.RolloutSpec{Template: corev1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{identifier: identifierVal, "env": "stage", "admiral.io/identityPartition": "pid"}}}}},
+ expected: "pid." + identifierVal,
+ original: identifierVal,
},
{
name: "should return empty identifier",
rollout: argo.Rollout{Spec: argo.RolloutSpec{Template: corev1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Labels: map[string]string{}, Annotations: map[string]string{}}}}},
expected: "",
+ original: "",
},
}
@@ -264,6 +301,47 @@ func TestGetRolloutGlobalIdentifier(t *testing.T) {
if !(iVal == c.expected) {
t.Errorf("Wanted identity value: %s, got: %s", c.expected, iVal)
}
+ oiVal := GetRolloutOriginalIdentifier(&c.rollout)
+ if !(oiVal == c.original) {
+ t.Errorf("Wanted original identity value: %s, got: %s", c.original, oiVal)
+ }
+ })
+ }
+}
+
+func TestGetRolloutIdentityPartition(t *testing.T) {
+ setupForRolloutCommonTests()
+ partitionIdentifier := "admiral.io/identityPartition"
+ identifierVal := "swX"
+
+ testCases := []struct {
+ name string
+ rollout argo.Rollout
+ expected string
+ }{
+ {
+ name: "should return valid identifier from label",
+ rollout: argo.Rollout{Spec: argo.RolloutSpec{Template: corev1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Labels: map[string]string{partitionIdentifier: identifierVal, "env": "stage"}}}}},
+ expected: identifierVal,
+ },
+ {
+ name: "should return valid identifier from annotations",
+ rollout: argo.Rollout{Spec: argo.RolloutSpec{Template: corev1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{partitionIdentifier: identifierVal, "env": "stage"}}}}},
+ expected: identifierVal,
+ },
+ {
+ name: "should return empty identifier",
+ rollout: argo.Rollout{Spec: argo.RolloutSpec{Template: corev1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Labels: map[string]string{}, Annotations: map[string]string{}}}}},
+ expected: "",
+ },
+ }
+
+ for _, c := range testCases {
+ t.Run(c.name, func(t *testing.T) {
+ iVal := GetRolloutIdentityPartition(&c.rollout)
+ if !(iVal == c.expected) {
+ t.Errorf("Wanted identityPartition value: %s, got: %s", c.expected, iVal)
+ }
})
}
}
From eec84b25db5b74fd8e5ee0582a4cd00c536fdf0a Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 15:57:20 -0700
Subject: [PATCH 109/235] copy types.go from main branch
---
admiral/pkg/controller/common/types.go | 303 +++++++++++++++++++++----
1 file changed, 264 insertions(+), 39 deletions(-)
diff --git a/admiral/pkg/controller/common/types.go b/admiral/pkg/controller/common/types.go
index 210ba4f6..fb624bab 100644
--- a/admiral/pkg/controller/common/types.go
+++ b/admiral/pkg/controller/common/types.go
@@ -1,9 +1,14 @@
package common
import (
+ "context"
"fmt"
"sync"
"time"
+
+ log "github.com/sirupsen/logrus"
+
+ v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
)
type Map struct {
@@ -13,63 +18,122 @@ type Map struct {
type MapOfMaps struct {
cache map[string]*Map
- mutex *sync.Mutex
+ mutex *sync.RWMutex
+}
+
+type MapOfMapOfMaps struct {
+ cache map[string]*MapOfMaps
+ mutex *sync.RWMutex
}
type SidecarEgress struct {
Namespace string
FQDN string
- CNAMEs map[string]string
+ CNAMEs *Map
}
-//maintains a map from workload identity -> map[namespace]SidecarEgress
+// maintains a map from workload identity -> map[namespace]SidecarEgress
type SidecarEgressMap struct {
cache map[string]map[string]SidecarEgress
mutex *sync.Mutex
}
type AdmiralParams struct {
- ArgoRolloutsEnabled bool
- KubeconfigPath string
- CacheRefreshDuration time.Duration
- ClusterRegistriesNamespace string
- DependenciesNamespace string
- SyncNamespace string
- EnableSAN bool
- SANPrefix string
- SecretResolver string
- LabelSet *LabelSet
- LogLevel int
- HostnameSuffix string
- PreviewHostnamePrefix string
- MetricsEnabled bool
- WorkloadSidecarUpdate string
- WorkloadSidecarName string
- AdmiralStateCheckerName string
- DRStateStoreConfigPath string
- ServiceEntryIPPrefix string
- EnvoyFilterVersion string
- EnvoyFilterAdditionalConfig string
- EnableRoutingPolicy bool
- ExcludedIdentityList []string
- AdditionalEndpointSuffixes []string
- AdditionalEndpointLabelFilters []string
+ ArgoRolloutsEnabled bool
+ KubeconfigPath string
+ SecretFilterTags string
+ CacheReconcileDuration time.Duration
+ SeAndDrCacheReconcileDuration time.Duration
+ ClusterRegistriesNamespace string
+ DependenciesNamespace string
+ DnsConfigFile string
+ DNSTimeoutMs int
+ DNSRetries int
+ TrafficConfigNamespace string
+ SyncNamespace string
+ EnableSAN bool
+ SANPrefix string
+ AdmiralConfig string
+ Profile string
+ LabelSet *LabelSet
+ LogLevel int
+ HostnameSuffix string
+ PreviewHostnamePrefix string
+ MetricsEnabled bool
+ ChannelCapacity int
+ WorkloadSidecarUpdate string
+ WorkloadSidecarName string
+ AdmiralStateCheckerName string
+ DRStateStoreConfigPath string
+ ServiceEntryIPPrefix string
+ EnvoyFilterVersion string
+ DeprecatedEnvoyFilterVersion string
+ EnvoyFilterAdditionalConfig string
+ EnableRoutingPolicy bool
+ ExcludedIdentityList []string
+ AdditionalEndpointSuffixes []string
+ AdditionalEndpointLabelFilters []string
+ HAMode string
+ EnableWorkloadDataStorage bool
+ EnableDiffCheck bool
+ EnableProxyEnvoyFilter bool
+ EnableDependencyProcessing bool
+ DeploymentOrRolloutWorkerConcurrency int
+ DependentClusterWorkerConcurrency int
+ SeAddressConfigmap string
+ DependencyWarmupMultiplier int
+ EnableOutlierDetection bool
+ EnableClientConnectionConfigProcessing bool
+ MaxRequestsPerConnection int32
+ EnableAbsoluteFQDN bool
+ EnableAbsoluteFQDNForLocalEndpoints bool
+ DisableDefaultAutomaticFailover bool
+ EnableServiceEntryCache bool
+ AlphaIdentityList []string
+ EnableDestinationRuleCache bool
+ DisableIPGeneration bool
+ EnableActivePassive bool
+ EnableSWAwareNSCaches bool
+ ExportToIdentityList []string
+ ExportToMaxNamespaces int
+ EnableSyncIstioResourcesToSourceClusters bool
+ AdmiralStateSyncerMode bool
+ DefaultWarmupDurationSecs int64
+
+ // Cartographer specific params
+ TrafficConfigPersona bool
+ TrafficConfigIgnoreAssets []string // used to ignore applying of client side envoy filters
+ CartographerFeatures map[string]string
+ TrafficConfigScope string
+ LogToFile bool
+ LogFilePath string
+ LogFileSizeInMBs int
+
+ // Air specific
+ GatewayAssetAliases []string
}
func (b AdmiralParams) String() string {
return fmt.Sprintf("KubeconfigPath=%v ", b.KubeconfigPath) +
- fmt.Sprintf("CacheRefreshDuration=%v ", b.CacheRefreshDuration) +
+ fmt.Sprintf("CacheRefreshDuration=%v ", b.CacheReconcileDuration) +
+ fmt.Sprintf("SEAndDRCacheRefreshDuration=%v ", b.SeAndDrCacheReconcileDuration) +
fmt.Sprintf("ClusterRegistriesNamespace=%v ", b.ClusterRegistriesNamespace) +
fmt.Sprintf("DependenciesNamespace=%v ", b.DependenciesNamespace) +
fmt.Sprintf("EnableSAN=%v ", b.EnableSAN) +
fmt.Sprintf("SANPrefix=%v ", b.SANPrefix) +
fmt.Sprintf("LabelSet=%v ", b.LabelSet) +
- fmt.Sprintf("SecretResolver=%v ", b.SecretResolver) +
- fmt.Sprintf("AdmiralStateCheckername=%v ", b.AdmiralStateCheckerName) +
+ fmt.Sprintf("SecretResolver=%v ", b.Profile) +
+ fmt.Sprintf("Profile=%v ", b.Profile) +
+ fmt.Sprintf("AdmiralStateCheckerName=%v ", b.AdmiralStateCheckerName) +
fmt.Sprintf("DRStateStoreConfigPath=%v ", b.DRStateStoreConfigPath) +
fmt.Sprintf("ServiceEntryIPPrefix=%v ", b.ServiceEntryIPPrefix) +
fmt.Sprintf("EnvoyFilterVersion=%v ", b.EnvoyFilterVersion) +
- fmt.Sprintf("EnableRoutingPolicy=%v ", b.EnableRoutingPolicy)
+ fmt.Sprintf("DeprecatedEnvoyFilterVersion=%v ", b.DeprecatedEnvoyFilterVersion) +
+ fmt.Sprintf("EnableRoutingPolicy=%v ", b.EnableRoutingPolicy) +
+ fmt.Sprintf("TrafficConfigNamespace=%v ", b.TrafficConfigNamespace) +
+ fmt.Sprintf("TrafficConfigPersona=%v ", b.TrafficConfigPersona) +
+ fmt.Sprintf("CartographerFeatures=%v ", b.CartographerFeatures) +
+ fmt.Sprintf("DefaultWarmupDuration=%v ", b.DefaultWarmupDurationSecs)
}
type LabelSet struct {
@@ -80,9 +144,24 @@ type LabelSet struct {
AdmiralIgnoreLabel string
PriorityKey string
WorkloadIdentityKey string //Should always be used for both label and annotation (using label as the primary, and falling back to annotation if the label is not found)
- GlobalTrafficDeploymentLabel string //label used to tie together deployments and globaltrafficpolicy objects. Configured separately from the identity key because this one _must_ be a label
+ TrafficConfigIdentityKey string //Should always be used for both label and annotation (using label as the primary, and falling back to annotation if the label is not found)
EnvKey string //key used to group deployments by env. The order would be to use annotation `EnvKey` and then label `EnvKey` and then fallback to label `env` label
GatewayApp string //the value for `app` key that will be used to fetch the loadblancer for cross cluster calls, also referred to as east west gateway
+ AdmiralCRDIdentityLabel string //Label Used to identify identity label for crd
+ IdentityPartitionKey string //Label used for partitioning assets with same identity into groups
+}
+
+type TrafficObject struct {
+ TrafficConfig *v1.TrafficConfig
+ ClusterID string
+ Ctx *Context
+ Event string
+}
+
+type Context struct {
+ Ctx context.Context
+ Log *log.Entry
+ Property map[string]string
}
func NewSidecarEgressMap() *SidecarEgressMap {
@@ -102,7 +181,14 @@ func NewMap() *Map {
func NewMapOfMaps() *MapOfMaps {
n := new(MapOfMaps)
n.cache = make(map[string]*Map)
- n.mutex = &sync.Mutex{}
+ n.mutex = &sync.RWMutex{}
+ return n
+}
+
+func NewMapOfMapOfMaps() *MapOfMapOfMaps {
+ n := new(MapOfMapOfMaps)
+ n.cache = make(map[string]*MapOfMaps)
+ n.mutex = &sync.RWMutex{}
return n
}
@@ -113,9 +199,26 @@ func (s *Map) Put(key string, value string) {
}
func (s *Map) Get(key string) string {
+ defer s.mutex.Unlock()
+ s.mutex.Lock()
return s.cache[key]
}
+func (s *Map) CheckIfPresent(key string) bool {
+ defer s.mutex.Unlock()
+ s.mutex.Lock()
+ if _, ok := s.cache[key]; ok {
+ return true
+ }
+ return false
+}
+
+func (s *Map) Len() int {
+ defer s.mutex.Unlock()
+ s.mutex.Lock()
+ return len(s.cache)
+}
+
func (s *Map) Delete(key string) {
defer s.mutex.Unlock()
s.mutex.Lock()
@@ -136,6 +239,18 @@ func (s *Map) Copy() map[string]string {
}
}
+func (s *Map) CopyJustValues() []string {
+ var copy []string
+ if s != nil {
+ defer s.mutex.Unlock()
+ s.mutex.Lock()
+ for _, v := range s.cache {
+ copy = append(copy, v)
+ }
+ }
+ return copy
+}
+
func (s *Map) Range(fn func(k string, v string)) {
s.mutex.Lock()
for k, v := range s.cache {
@@ -155,6 +270,17 @@ func (s *MapOfMaps) Put(pkey string, key string, value string) {
s.cache[pkey] = mapVal
}
+func (s *MapOfMaps) DeleteMap(pkey string, key string) {
+ defer s.mutex.Unlock()
+ s.mutex.Lock()
+ var mapVal = s.cache[pkey]
+ if mapVal == nil {
+ return
+ }
+ mapVal.Delete(key)
+ s.cache[pkey] = mapVal
+}
+
func (s *MapOfMaps) PutMap(pkey string, inputMap *Map) {
defer s.mutex.Unlock()
s.mutex.Lock()
@@ -174,10 +300,6 @@ func (s *MapOfMaps) Delete(key string) {
delete(s.cache, key)
}
-func (s *MapOfMaps) Map() map[string]*Map {
- return s.cache
-}
-
func (s *MapOfMaps) Range(fn func(k string, v *Map)) {
s.mutex.Lock()
for k, v := range s.cache {
@@ -186,6 +308,62 @@ func (s *MapOfMaps) Range(fn func(k string, v *Map)) {
s.mutex.Unlock()
}
+func (s *MapOfMaps) Len() int {
+ defer s.mutex.Unlock()
+ s.mutex.Lock()
+ return len(s.cache)
+}
+
+func (s *MapOfMaps) GetKeys() []string {
+ defer s.mutex.RUnlock()
+ s.mutex.RLock()
+ keys := []string{}
+ for k := range s.cache {
+ keys = append(keys, k)
+ }
+ return keys
+}
+
+func (s *MapOfMapOfMaps) Put(pkey string, skey string, key, value string) {
+ defer s.mutex.Unlock()
+ s.mutex.Lock()
+ var mapOfMapsVal = s.cache[pkey]
+ if mapOfMapsVal == nil {
+ mapOfMapsVal = NewMapOfMaps()
+ }
+ mapOfMapsVal.Put(skey, key, value)
+ s.cache[pkey] = mapOfMapsVal
+}
+
+func (s *MapOfMapOfMaps) PutMapofMaps(key string, value *MapOfMaps) {
+ defer s.mutex.Unlock()
+ s.mutex.Lock()
+ s.cache[key] = value
+}
+
+func (s *MapOfMapOfMaps) Get(key string) *MapOfMaps {
+ s.mutex.RLock()
+ val := s.cache[key]
+ s.mutex.RUnlock()
+ return val
+}
+
+func (s *MapOfMapOfMaps) Len() int {
+ defer s.mutex.RUnlock()
+ s.mutex.RLock()
+ return len(s.cache)
+}
+
+func (s *Map) GetKeys() []string {
+ defer s.mutex.Unlock()
+ s.mutex.Lock()
+ keys := make([]string, 0)
+ for _, val := range s.cache {
+ keys = append(keys, val)
+ }
+ return keys
+}
+
func (s *SidecarEgressMap) Put(identity string, namespace string, fqdn string, cnames map[string]string) {
defer s.mutex.Unlock()
s.mutex.Lock()
@@ -193,7 +371,11 @@ func (s *SidecarEgressMap) Put(identity string, namespace string, fqdn string, c
if mapVal == nil {
mapVal = make(map[string]SidecarEgress)
}
- mapVal[namespace] = SidecarEgress{Namespace: namespace, FQDN: fqdn, CNAMEs: cnames}
+ cnameMap := NewMap()
+ for k, v := range cnames {
+ cnameMap.Put(k, v)
+ }
+ mapVal[namespace] = SidecarEgress{Namespace: namespace, FQDN: fqdn, CNAMEs: cnameMap}
s.cache[identity] = mapVal
}
@@ -217,3 +399,46 @@ func (s *SidecarEgressMap) Range(fn func(k string, v map[string]SidecarEgress))
fn(k, v)
}
}
+
+type ProxyFilterRequestObject struct {
+ Identity string
+ ProxiedServiceInfo *ProxiedServiceInfo
+ DnsConfigFile string
+ DnsRetries int
+ DnsTimeoutMs int
+ ClusterID string
+ Ctx *Context
+ Event string
+}
+
+type ProxyFilterConfig struct {
+ ConfigFile string `json:"configFile"`
+ DNSTimeoutMs int `json:"dnsTimeoutMs"`
+ DNSRetries int `json:"dnsRetries"`
+ GatewayAssetAlias string `json:"gatewayAssetAlias"`
+ Services []*ProxiedServiceInfo `json:"services"`
+}
+
+type ProxiedServiceInfo struct {
+ Identity string `json:"assetAlias"`
+ ProxyAlias string `json:"-"`
+ Environments []*ProxiedServiceEnvironment `json:"environments"`
+}
+
+type ProxiedServiceEnvironment struct {
+ Environment string `json:"environment"`
+ DnsName string `json:"dnsName"`
+ CNames []string `json:"cNames"`
+}
+
+func (c *ProxyFilterConfig) String() string {
+ return fmt.Sprintf("{ConfigFile: %s, DNSTimeoutMs:%d, DNSRetries: %d, GatewayAssetAlias: %s, Services: %s}", c.ConfigFile, c.DNSTimeoutMs, c.DNSRetries, c.GatewayAssetAlias, c.Services)
+}
+
+func (s *ProxiedServiceInfo) String() string {
+ return fmt.Sprintf("{Identity:%s, Enviroments: %v}", s.Identity, s.Environments)
+}
+
+func (s *ProxiedServiceEnvironment) String() string {
+ return fmt.Sprintf("{Environment:%s, DnsName: %s, CNames: %s}", s.Environment, s.DnsName, s.CNames)
+}
From 23f7b3a8c30f2c462bc4bde95cd55fd86b43c843 Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 15:57:50 -0700
Subject: [PATCH 110/235] copy types_test.go from main branch
---
admiral/pkg/controller/common/types_test.go | 68 ++++++++++++++++++++-
1 file changed, 67 insertions(+), 1 deletion(-)
diff --git a/admiral/pkg/controller/common/types_test.go b/admiral/pkg/controller/common/types_test.go
index b56c9a4d..20bc60b6 100644
--- a/admiral/pkg/controller/common/types_test.go
+++ b/admiral/pkg/controller/common/types_test.go
@@ -45,6 +45,72 @@ func TestMapOfMaps(t *testing.T) {
if map3 != nil {
t.Fail()
}
+
+}
+
+func TestDeleteMapOfMaps(t *testing.T) {
+ t.Parallel()
+ mapOfMaps := NewMapOfMaps()
+ mapOfMaps.Put("pkey1", "dev.a.global1", "127.0.10.1")
+ mapOfMaps.Put("pkey1", "dev.a.global2", "127.0.10.2")
+ mapOfMaps.DeleteMap("pkey1", "dev.a.global1")
+
+ mapValue := mapOfMaps.Get("pkey1")
+ if len(mapValue.Get("dev.a.global1")) > 0 {
+ t.Errorf("expected=nil, got=%v", mapValue.Get("dev.a.global1"))
+ }
+ if mapValue.Get("dev.a.global2") != "127.0.10.2" {
+ t.Errorf("expected=%v, got=%v", "127.0.10.2", mapValue.Get("dev.a.global2"))
+ }
+}
+
+func TestMapOfMapOfMaps(t *testing.T) {
+ t.Parallel()
+ mapOfMapOfMaps := NewMapOfMapOfMaps()
+ mapOfMapOfMaps.Put("pkey1", "dev.a.global1", "127.0.10.1", "ns1")
+ mapOfMapOfMaps.Put("pkey1", "dev.a.global2", "127.0.10.2", "ns2")
+ mapOfMapOfMaps.Put("pkey2", "qa.a.global", "127.0.10.1", "ns3")
+ mapOfMapOfMaps.Put("pkey2", "qa.a.global", "127.0.10.2", "ns4")
+
+ mapOfMaps1 := mapOfMapOfMaps.Get("pkey1")
+ if mapOfMaps1 == nil || mapOfMaps1.Get("dev.a.global1").Get("127.0.10.1") != "ns1" {
+ t.Fail()
+ }
+ if mapOfMapOfMaps.Len() != 2 {
+ t.Fail()
+ }
+
+ mapOfMaps1.Delete("dev.a.global2")
+
+ mapOfMaps2 := mapOfMapOfMaps.Get("pkey1")
+ if mapOfMaps2.Get("dev.a.global2") != nil {
+ t.Fail()
+ }
+
+ keyList := mapOfMapOfMaps.Get("pkey2").Get("qa.a.global").GetKeys()
+ if len(keyList) != 2 {
+ t.Fail()
+ }
+
+ mapOfMapOfMaps.Put("pkey3", "prod.a.global", "127.0.10.1", "ns5")
+
+ mapOfMaps3 := mapOfMapOfMaps.Get("pkey3")
+ if mapOfMaps3 == nil || mapOfMaps3.Get("prod.a.global").Get("127.0.10.1") != "ns5" {
+ t.Fail()
+ }
+
+ mapOfMaps4 := mapOfMapOfMaps.Get("pkey4")
+ if mapOfMaps4 != nil {
+ t.Fail()
+ }
+
+ mapOfMaps5 := NewMapOfMaps()
+ mapOfMaps5.Put("dev.b.global", "ns6", "ns6")
+ mapOfMapOfMaps.PutMapofMaps("pkey5", mapOfMaps5)
+ if mapOfMapOfMaps.Get("pkey5") == nil || mapOfMapOfMaps.Get("pkey5").Get("dev.b.global").Get("ns6") != "ns6" {
+ t.Fail()
+ }
+
}
func TestAdmiralParams(t *testing.T) {
@@ -92,7 +158,7 @@ func TestMapOfMapsRange(t *testing.T) {
mapOfMaps.Put("pkey2", "qa.a.global", "127.0.10.1")
mapOfMaps.Put("pkey3", "stage.a.global", "127.0.10.1")
- keys := make(map[string]string, len(mapOfMaps.Map()))
+ keys := make(map[string]string, len(mapOfMaps.cache))
for _, k := range keys {
keys[k] = k
}
From 10fc941c7ce1b43bf31daa8f68410de36ffa7c16 Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 15:58:38 -0700
Subject: [PATCH 111/235] copy destinationrule.go from main branch
---
.../pkg/controller/istio/destinationrule.go | 183 ++++++++++++++++--
1 file changed, 167 insertions(+), 16 deletions(-)
diff --git a/admiral/pkg/controller/istio/destinationrule.go b/admiral/pkg/controller/istio/destinationrule.go
index f3a6c53c..35a301cd 100644
--- a/admiral/pkg/controller/istio/destinationrule.go
+++ b/admiral/pkg/controller/istio/destinationrule.go
@@ -3,8 +3,13 @@ package istio
import (
"context"
"fmt"
+ "sync"
"time"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/client/loader"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ log "github.com/sirupsen/logrus"
+
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
networking "istio.io/client-go/pkg/apis/networking/v1alpha3"
versioned "istio.io/client-go/pkg/clientset/versioned"
@@ -16,9 +21,9 @@ import (
// Handler interface contains the methods that are required
type DestinationRuleHandler interface {
- Added(ctx context.Context, obj *networking.DestinationRule)
- Updated(ctx context.Context, obj *networking.DestinationRule)
- Deleted(ctx context.Context, obj *networking.DestinationRule)
+ Added(ctx context.Context, obj *networking.DestinationRule) error
+ Updated(ctx context.Context, obj *networking.DestinationRule) error
+ Deleted(ctx context.Context, obj *networking.DestinationRule) error
}
type DestinationRuleEntry struct {
@@ -30,16 +35,118 @@ type DestinationRuleController struct {
IstioClient versioned.Interface
DestinationRuleHandler DestinationRuleHandler
informer cache.SharedIndexInformer
+ Cache *DestinationRuleCache
+ Cluster string
+}
+
+type DestinationRuleItem struct {
+ DestinationRule *networking.DestinationRule
+ Status string
+}
+
+type DestinationRuleCache struct {
+ cache map[string]*DestinationRuleItem
+ mutex *sync.RWMutex
+}
+
+func NewDestinationRuleCache() *DestinationRuleCache {
+ return &DestinationRuleCache{
+ cache: map[string]*DestinationRuleItem{},
+ mutex: &sync.RWMutex{},
+ }
+}
+
+func (d *DestinationRuleCache) getKey(dr *networking.DestinationRule) string {
+ return makeKey(dr.Name, dr.Namespace)
+}
+
+func makeKey(str1, str2 string) string {
+ return str1 + "/" + str2
+}
+
+func (d *DestinationRuleCache) Put(dr *networking.DestinationRule) {
+ defer d.mutex.Unlock()
+ d.mutex.Lock()
+
+ key := d.getKey(dr)
+
+ d.cache[key] = &DestinationRuleItem{
+ DestinationRule: dr,
+ Status: common.ProcessingInProgress,
+ }
+}
+
+func (d *DestinationRuleCache) Get(identity string, namespace string) *networking.DestinationRule {
+ defer d.mutex.Unlock()
+ d.mutex.Lock()
+
+ drItem, ok := d.cache[makeKey(identity, namespace)]
+ if ok {
+ return drItem.DestinationRule
+ }
+
+ log.Infof("no destinationrule found in cache for identity=%s", identity)
+ return nil
+}
+
+func (d *DestinationRuleCache) Delete(dr *networking.DestinationRule) {
+ defer d.mutex.Unlock()
+ d.mutex.Lock()
+
+ key := d.getKey(dr)
+
+ _, ok := d.cache[key]
+ if ok {
+ delete(d.cache, key)
+ }
}
-func NewDestinationRuleController(clusterID string, stopCh <-chan struct{}, handler DestinationRuleHandler, config *rest.Config, resyncPeriod time.Duration) (*DestinationRuleController, error) {
+func (d *DestinationRuleCache) GetDRProcessStatus(dr *networking.DestinationRule) string {
+ defer d.mutex.Unlock()
+ d.mutex.Lock()
+
+ key := d.getKey(dr)
+
+ dc, ok := d.cache[key]
+ if ok {
+ return dc.Status
+ }
+ return common.NotProcessed
+}
+
+func (d *DestinationRuleCache) UpdateDRProcessStatus(dr *networking.DestinationRule, status string) error {
+ defer d.mutex.Unlock()
+ d.mutex.Lock()
+
+ key := d.getKey(dr)
+
+ dc, ok := d.cache[key]
+ if ok {
+
+ dc.Status = status
+ d.cache[key] = dc
+ return nil
+ }
+
+ return fmt.Errorf("op=%s type=%v name=%v namespace=%s cluster=%s message=%s", "Update", "DestinationRule",
+ dr.Name, dr.Namespace, "", "nothing to update, destinationrule not found in cache")
+}
+
+func NewDestinationRuleController(stopCh <-chan struct{}, handler DestinationRuleHandler, clusterID string, config *rest.Config, resyncPeriod time.Duration, clientLoader loader.ClientLoader) (*DestinationRuleController, error) {
drController := DestinationRuleController{}
drController.DestinationRuleHandler = handler
+ drCache := DestinationRuleCache{}
+ drCache.cache = make(map[string]*DestinationRuleItem)
+ drCache.mutex = &sync.RWMutex{}
+ drController.Cache = &drCache
+
+ drController.Cluster = clusterID
+
var err error
- ic, err := versioned.NewForConfig(config)
+ ic, err := clientLoader.LoadIstioClientFromConfig(config)
if err != nil {
return nil, fmt.Errorf("failed to create destination rule controller k8s client: %v", err)
}
@@ -48,24 +155,68 @@ func NewDestinationRuleController(clusterID string, stopCh <-chan struct{}, hand
drController.informer = informers.NewDestinationRuleInformer(ic, k8sV1.NamespaceAll, resyncPeriod, cache.Indexers{})
- mcd := admiral.NewMonitoredDelegator(&drController, clusterID, "destinationrule")
- admiral.NewController("destinationrule-ctrl-"+config.Host, stopCh, mcd, drController.informer)
+ admiral.NewController("destinationrule-ctrl", config.Host, stopCh, &drController, drController.informer)
return &drController, nil
}
-func (sec *DestinationRuleController) Added(ctx context.Context, ojb interface{}) {
- dr := ojb.(*networking.DestinationRule)
- sec.DestinationRuleHandler.Added(ctx, dr)
+func (drc *DestinationRuleController) Added(ctx context.Context, obj interface{}) error {
+ dr, ok := obj.(*networking.DestinationRule)
+ if !ok {
+ return fmt.Errorf("type assertion failed, %v is not of type *v1alpha3.DestinationRule", obj)
+ }
+ drc.Cache.Put(dr)
+ return drc.DestinationRuleHandler.Added(ctx, dr)
+}
+
+func (drc *DestinationRuleController) Updated(ctx context.Context, obj interface{}, oldObj interface{}) error {
+ dr, ok := obj.(*networking.DestinationRule)
+ if !ok {
+ return fmt.Errorf("type assertion failed, %v is not of type *v1alpha3.DestinationRule", obj)
+ }
+ drc.Cache.Put(dr)
+ return drc.DestinationRuleHandler.Updated(ctx, dr)
+}
+
+func (drc *DestinationRuleController) Deleted(ctx context.Context, obj interface{}) error {
+ dr, ok := obj.(*networking.DestinationRule)
+ if !ok {
+ return fmt.Errorf("type assertion failed, %v is not of type *v1alpha3.DestinationRule", obj)
+ }
+ drc.Cache.Delete(dr)
+ return drc.DestinationRuleHandler.Deleted(ctx, dr)
}
-func (sec *DestinationRuleController) Updated(ctx context.Context, ojb interface{}, oldObj interface{}) {
- dr := ojb.(*networking.DestinationRule)
- sec.DestinationRuleHandler.Updated(ctx, dr)
+func (drc *DestinationRuleController) GetProcessItemStatus(obj interface{}) (string, error) {
+ dr, ok := obj.(*networking.DestinationRule)
+ if !ok {
+ return common.NotProcessed, fmt.Errorf("type assertion failed, %v is not of type *v1alpha3.DestinationRule", obj)
+ }
+ return drc.Cache.GetDRProcessStatus(dr), nil
+}
+
+func (drc *DestinationRuleController) UpdateProcessItemStatus(obj interface{}, status string) error {
+ dr, ok := obj.(*networking.DestinationRule)
+ if !ok {
+ return fmt.Errorf("type assertion failed, %v is not of type *v1alpha3.DestinationRule", obj)
+ }
+ return drc.Cache.UpdateDRProcessStatus(dr, status)
}
-func (sec *DestinationRuleController) Deleted(ctx context.Context, ojb interface{}) {
- dr := ojb.(*networking.DestinationRule)
- sec.DestinationRuleHandler.Deleted(ctx, dr)
+func (drc *DestinationRuleController) LogValueOfAdmiralIoIgnore(obj interface{}) {
+ dr, ok := obj.(*networking.DestinationRule)
+ if !ok {
+ return
+ }
+ if len(dr.Annotations) > 0 && dr.Annotations[common.AdmiralIgnoreAnnotation] == "true" {
+ log.Infof("op=%s type=%v name=%v namespace=%s cluster=%s message=%s", "admiralIoIgnoreAnnotationCheck", "DestinationRule", dr.Name, dr.Namespace, "", "Value=true")
+ }
+}
+func (drc *DestinationRuleController) Get(ctx context.Context, isRetry bool, obj interface{}) (interface{}, error) {
+ /*dr, ok := obj.(*networking.DestinationRule)
+ if ok && d.IstioClient != nil {
+ return d.IstioClient.NetworkingV1alpha3().DestinationRules(dr.Namespace).Get(ctx, dr.Name, meta_v1.GetOptions{})
+ }*/
+ return nil, fmt.Errorf("istio client is not initialized, txId=%s", ctx.Value("txId"))
}
From fc163ce7643f5634ab39a14818eb80d4ce8811f5 Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 15:59:09 -0700
Subject: [PATCH 112/235] copy destinationrule_test.go from main branch
---
.../controller/istio/destinationrule_test.go | 340 +++++++++++++++++-
1 file changed, 339 insertions(+), 1 deletion(-)
diff --git a/admiral/pkg/controller/istio/destinationrule_test.go b/admiral/pkg/controller/istio/destinationrule_test.go
index 5112e0c4..c4ce9106 100644
--- a/admiral/pkg/controller/istio/destinationrule_test.go
+++ b/admiral/pkg/controller/istio/destinationrule_test.go
@@ -2,18 +2,193 @@ package istio
import (
"context"
+ "fmt"
+ "strings"
+ "sync"
"testing"
"time"
+ coreV1 "k8s.io/api/core/v1"
+
+ "github.com/istio-ecosystem/admiral/admiral/pkg/client/loader"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+
"github.com/google/go-cmp/cmp"
"github.com/istio-ecosystem/admiral/admiral/pkg/test"
+ "github.com/stretchr/testify/assert"
"google.golang.org/protobuf/testing/protocmp"
v1alpha32 "istio.io/api/networking/v1alpha3"
"istio.io/client-go/pkg/apis/networking/v1alpha3"
+ networking "istio.io/client-go/pkg/apis/networking/v1alpha3"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"
)
+func TestDestinationRuleAdded(t *testing.T) {
+
+ mockDestinationRuleHandler := &test.MockDestinationRuleHandler{}
+ ctx := context.Background()
+ destinationRuleController := DestinationRuleController{
+ DestinationRuleHandler: mockDestinationRuleHandler,
+ Cache: NewDestinationRuleCache(),
+ }
+
+ testCases := []struct {
+ name string
+ destinationRule interface{}
+ expectedError error
+ }{
+ {
+ name: "Given context and DestinationRule " +
+ "When DestinationRule param is nil " +
+ "Then func should return an error",
+ destinationRule: nil,
+ expectedError: fmt.Errorf("type assertion failed, is not of type *v1alpha3.DestinationRule"),
+ },
+ {
+ name: "Given context and DestinationRule " +
+ "When DestinationRule param is not of type *v1alpha3.DestinationRule " +
+ "Then func should return an error",
+ destinationRule: struct{}{},
+ expectedError: fmt.Errorf("type assertion failed, {} is not of type *v1alpha3.DestinationRule"),
+ },
+ {
+ name: "Given context and DestinationRule " +
+ "When DestinationRule param is of type *v1alpha3.DestinationRule " +
+ "Then func should not return an error",
+ destinationRule: &networking.DestinationRule{},
+ expectedError: nil,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+
+ err := destinationRuleController.Added(ctx, tc.destinationRule)
+ if tc.expectedError != nil {
+ assert.NotNil(t, err)
+ assert.Equal(t, tc.expectedError.Error(), err.Error())
+ } else {
+ if err != nil {
+ assert.Fail(t, "expected error to be nil but got %v", err)
+ }
+ }
+
+ })
+ }
+
+}
+
+func TestDestinationRuleUpdated(t *testing.T) {
+
+ mockDestinationRuleHandler := &test.MockDestinationRuleHandler{}
+ ctx := context.Background()
+ destinationRuleController := DestinationRuleController{
+ DestinationRuleHandler: mockDestinationRuleHandler,
+ Cache: NewDestinationRuleCache(),
+ }
+
+ testCases := []struct {
+ name string
+ destinationRule interface{}
+ expectedError error
+ }{
+ {
+ name: "Given context and DestinationRule " +
+ "When DestinationRule param is nil " +
+ "Then func should return an error",
+ destinationRule: nil,
+ expectedError: fmt.Errorf("type assertion failed, is not of type *v1alpha3.DestinationRule"),
+ },
+ {
+ name: "Given context and DestinationRule " +
+ "When DestinationRule param is not of type *v1alpha3.DestinationRule " +
+ "Then func should return an error",
+ destinationRule: struct{}{},
+ expectedError: fmt.Errorf("type assertion failed, {} is not of type *v1alpha3.DestinationRule"),
+ },
+ {
+ name: "Given context and DestinationRule " +
+ "When DestinationRule param is of type *v1alpha3.DestinationRule " +
+ "Then func should not return an error",
+ destinationRule: &networking.DestinationRule{},
+ expectedError: nil,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+
+ err := destinationRuleController.Updated(ctx, tc.destinationRule, nil)
+ if tc.expectedError != nil {
+ assert.NotNil(t, err)
+ assert.Equal(t, tc.expectedError.Error(), err.Error())
+ } else {
+ if err != nil {
+ assert.Fail(t, "expected error to be nil but got %v", err)
+ }
+ }
+
+ })
+ }
+
+}
+
+func TestDestinationRuleDeleted(t *testing.T) {
+
+ mockDestinationRuleHandler := &test.MockDestinationRuleHandler{}
+ ctx := context.Background()
+ destinationRuleController := DestinationRuleController{
+ DestinationRuleHandler: mockDestinationRuleHandler,
+ Cache: NewDestinationRuleCache(),
+ }
+
+ testCases := []struct {
+ name string
+ destinationRule interface{}
+ expectedError error
+ }{
+ {
+ name: "Given context and DestinationRule " +
+ "When DestinationRule param is nil " +
+ "Then func should return an error",
+ destinationRule: nil,
+ expectedError: fmt.Errorf("type assertion failed, is not of type *v1alpha3.DestinationRule"),
+ },
+ {
+ name: "Given context and DestinationRule " +
+ "When DestinationRule param is not of type *v1alpha3.DestinationRule " +
+ "Then func should return an error",
+ destinationRule: struct{}{},
+ expectedError: fmt.Errorf("type assertion failed, {} is not of type *v1alpha3.DestinationRule"),
+ },
+ {
+ name: "Given context and DestinationRule " +
+ "When DestinationRule param is of type *v1alpha3.DestinationRule " +
+ "Then func should not return an error",
+ destinationRule: &networking.DestinationRule{},
+ expectedError: nil,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+
+ err := destinationRuleController.Deleted(ctx, tc.destinationRule)
+ if tc.expectedError != nil {
+ assert.NotNil(t, err)
+ assert.Equal(t, tc.expectedError.Error(), err.Error())
+ } else {
+ if err != nil {
+ assert.Fail(t, "expected error to be nil but got %v", err)
+ }
+ }
+
+ })
+ }
+
+}
+
func TestNewDestinationRuleController(t *testing.T) {
config, err := clientcmd.BuildConfigFromFlags("", "../../test/resources/admins@fake-cluster.k8s.local")
if err != nil {
@@ -22,7 +197,7 @@ func TestNewDestinationRuleController(t *testing.T) {
stop := make(chan struct{})
handler := test.MockDestinationRuleHandler{}
- destinationRuleController, err := NewDestinationRuleController("", stop, &handler, config, time.Duration(1000))
+ destinationRuleController, err := NewDestinationRuleController(stop, &handler, "cluster-id1", config, time.Duration(1000), loader.GetFakeClientLoader())
if err != nil {
t.Errorf("Unexpected err %v", err)
@@ -54,3 +229,166 @@ func TestNewDestinationRuleController(t *testing.T) {
t.Errorf("Handler should have no obj")
}
}
+
+// TODO: This is just a placeholder for when we add diff check for other types
+func TestDestinationRuleGetProcessItemStatus(t *testing.T) {
+ destinationRuleController := DestinationRuleController{}
+ testCases := []struct {
+ name string
+ obj interface{}
+ expectedRes string
+ }{
+ {
+ name: "TODO: Currently always returns false",
+ obj: nil,
+ expectedRes: common.NotProcessed,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ res, _ := destinationRuleController.GetProcessItemStatus(tc.obj)
+ assert.Equal(t, tc.expectedRes, res)
+ })
+ }
+}
+
+func TestDestinationRuleUpdateProcessItemStatus(t *testing.T) {
+ var (
+ serviceAccount = &coreV1.ServiceAccount{}
+
+ dr1 = &networking.DestinationRule{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "debug-incache",
+ Namespace: "namespace1",
+ Annotations: map[string]string{"other-annotation": "value"}}}
+
+ dr2 = &networking.DestinationRule{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "debug2-incache",
+ Namespace: "namespace1",
+ Annotations: map[string]string{"other-annotation": "value"}}}
+
+ drNotInCache = &networking.DestinationRule{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "debug",
+ Namespace: "namespace1",
+ Annotations: map[string]string{"other-annotation": "value"}}}
+
+ diffNsDrNotInCache = &networking.DestinationRule{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "debug",
+ Namespace: "namespace2",
+ Annotations: map[string]string{"other-annotation": "value"}}}
+ )
+
+ drCache := &DestinationRuleCache{
+ cache: make(map[string]*DestinationRuleItem),
+ mutex: &sync.RWMutex{},
+ }
+
+ destinationRuleController := &DestinationRuleController{
+ Cache: drCache,
+ }
+
+ drCache.Put(dr1)
+ drCache.Put(dr2)
+
+ cases := []struct {
+ name string
+ obj interface{}
+ statusToSet string
+ expectedErr error
+ expectedStatus string
+ }{
+ {
+ name: "Given dr cache has a valid dr in its cache, " +
+ "Then, the status for the valid dr should be updated to processed",
+ obj: dr1,
+ statusToSet: common.Processed,
+ expectedErr: nil,
+ expectedStatus: common.Processed,
+ },
+ {
+ name: "Given dr cache has a valid dr in its cache, " +
+ "Then, the status for the valid dr should be updated to not processed",
+ obj: dr2,
+ statusToSet: common.NotProcessed,
+ expectedErr: nil,
+ expectedStatus: common.NotProcessed,
+ },
+ {
+ name: "Given dr cache does not has a valid dr in its cache, " +
+ "Then, the status for the valid dr should be not processed, " +
+ "And an error should be returned with the dr not found message",
+ obj: drNotInCache,
+ statusToSet: common.NotProcessed,
+ expectedErr: fmt.Errorf("op=%s type=%v name=%v namespace=%s cluster=%s message=%s", "Update", "DestinationRule",
+ drNotInCache.Name, drNotInCache.Namespace, "", "nothing to update, destinationrule not found in cache"),
+ expectedStatus: common.NotProcessed,
+ },
+ {
+ name: "Given dr cache does not has a valid dr in its cache, " +
+ "And dr is in a different namespace, " +
+ "Then, the status for the valid dr should be not processed, " +
+ "And an error should be returned with the dr not found message",
+ obj: diffNsDrNotInCache,
+ statusToSet: common.NotProcessed,
+ expectedErr: fmt.Errorf("op=%s type=%v name=%v namespace=%s cluster=%s message=%s", "Update", "DestinationRule",
+ diffNsDrNotInCache.Name, diffNsDrNotInCache.Namespace, "", "nothing to update, destinationrule not found in cache"),
+ expectedStatus: common.NotProcessed,
+ },
+ {
+ name: "Given ServiceAccount is passed to the function, " +
+ "Then, the function should not panic, " +
+ "And return an error",
+ obj: serviceAccount,
+ expectedErr: fmt.Errorf("type assertion failed"),
+ expectedStatus: common.NotProcessed,
+ },
+ }
+
+ for _, c := range cases {
+ t.Run(c.name, func(t *testing.T) {
+ err := destinationRuleController.UpdateProcessItemStatus(c.obj, c.statusToSet)
+ if err != nil && c.expectedErr == nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ if err == nil && c.expectedErr != nil {
+ t.Errorf("expected error: %v", c.expectedErr)
+ }
+ if err != nil && c.expectedErr != nil && !strings.Contains(err.Error(), c.expectedErr.Error()) {
+ t.Errorf("expected: %v, got: %v", c.expectedErr, err)
+ }
+ status, _ := destinationRuleController.GetProcessItemStatus(c.obj)
+ assert.Equal(t, c.expectedStatus, status)
+ })
+ }
+}
+
+func TestLogValueOfAdmiralIoIgnore(t *testing.T) {
+ // Test case 1: obj is not a DestinationRule object
+ sec := &DestinationRuleController{}
+ sec.LogValueOfAdmiralIoIgnore("not a destination rule")
+ // No error should occur
+
+ // Test case 2: DestinationRule has no annotations
+ sec = &DestinationRuleController{}
+ sec.LogValueOfAdmiralIoIgnore(&networking.DestinationRule{})
+ // No error should occur
+
+ // Test case 3: AdmiralIgnoreAnnotation is not set
+ sec = &DestinationRuleController{}
+ dr := &networking.DestinationRule{
+ ObjectMeta: v1.ObjectMeta{
+ Annotations: map[string]string{"other-annotation": "value"}}}
+ sec.LogValueOfAdmiralIoIgnore(dr)
+ // No error should occur
+
+ // Test case 4: AdmiralIgnoreAnnotation is set
+ sec = &DestinationRuleController{}
+ dr = &networking.DestinationRule{ObjectMeta: v1.ObjectMeta{
+ Annotations: map[string]string{common.AdmiralIgnoreAnnotation: "true"}}}
+ sec.LogValueOfAdmiralIoIgnore(dr)
+ // No error should occur
+}
From 1c0d228f0baac860a0c974080674d9fb0d73947b Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 15:59:47 -0700
Subject: [PATCH 113/235] copy serviceentry.go from main branch
---
admiral/pkg/controller/istio/serviceentry.go | 201 +++++++++++++++++--
1 file changed, 180 insertions(+), 21 deletions(-)
diff --git a/admiral/pkg/controller/istio/serviceentry.go b/admiral/pkg/controller/istio/serviceentry.go
index 2fd97375..7c2ab9ef 100644
--- a/admiral/pkg/controller/istio/serviceentry.go
+++ b/admiral/pkg/controller/istio/serviceentry.go
@@ -5,6 +5,12 @@ import (
"fmt"
"time"
+ "sync"
+
+ "github.com/istio-ecosystem/admiral/admiral/pkg/client/loader"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ log "github.com/sirupsen/logrus"
+
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
networking "istio.io/client-go/pkg/apis/networking/v1alpha3"
versioned "istio.io/client-go/pkg/clientset/versioned"
@@ -16,30 +22,137 @@ import (
// Handler interface contains the methods that are required
type ServiceEntryHandler interface {
- Added(obj *networking.ServiceEntry)
- Updated(obj *networking.ServiceEntry)
- Deleted(obj *networking.ServiceEntry)
-}
-
-type ServiceEntryEntry struct {
- Identity string
- ServiceEntry *networking.ServiceEntry
+ Added(obj *networking.ServiceEntry) error
+ Updated(obj *networking.ServiceEntry) error
+ Deleted(obj *networking.ServiceEntry) error
}
type ServiceEntryController struct {
IstioClient versioned.Interface
ServiceEntryHandler ServiceEntryHandler
informer cache.SharedIndexInformer
+ Cache *ServiceEntryCache
+ Cluster string
+}
+
+type ServiceEntryItem struct {
+ ServiceEntry *networking.ServiceEntry
+ Status string
+}
+
+type ServiceEntryCache struct {
+ cache map[string]map[string]*ServiceEntryItem
+ mutex *sync.RWMutex
+}
+
+func NewServiceEntryCache() *ServiceEntryCache {
+ return &ServiceEntryCache{
+ cache: map[string]map[string]*ServiceEntryItem{},
+ mutex: &sync.RWMutex{},
+ }
+}
+
+func (d *ServiceEntryCache) getKey(se *networking.ServiceEntry) string {
+ return se.Name
+}
+
+func (d *ServiceEntryCache) Put(se *networking.ServiceEntry, cluster string) {
+ defer d.mutex.Unlock()
+ d.mutex.Lock()
+ key := d.getKey(se)
+
+ var (
+ seInCluster map[string]*ServiceEntryItem
+ )
+
+ if value, ok := d.cache[cluster]; !ok {
+ seInCluster = make(map[string]*ServiceEntryItem)
+ } else {
+ seInCluster = value
+ }
+
+ seInCluster[key] = &ServiceEntryItem{
+ ServiceEntry: se,
+ Status: common.ProcessingInProgress,
+ }
+
+ d.cache[cluster] = seInCluster
+}
+
+func (d *ServiceEntryCache) Get(identity string, cluster string) *networking.ServiceEntry {
+ defer d.mutex.Unlock()
+ d.mutex.Lock()
+
+ seInCluster, ok := d.cache[cluster]
+ if ok {
+ se, ok := seInCluster[identity]
+ if ok {
+ return se.ServiceEntry
+ }
+ }
+ log.Infof("no service entry found in cache for identity=%s cluster=%s", identity, cluster)
+ return nil
+}
+
+func (d *ServiceEntryCache) Delete(se *networking.ServiceEntry, cluster string) {
+ defer d.mutex.Unlock()
+ d.mutex.Lock()
+
+ seInCluster, ok := d.cache[cluster]
+ if ok {
+ delete(seInCluster, d.getKey(se))
+ }
+}
+
+func (d *ServiceEntryCache) GetSEProcessStatus(se *networking.ServiceEntry, cluster string) string {
+ defer d.mutex.Unlock()
+ d.mutex.Lock()
+
+ seInCluster, ok := d.cache[cluster]
+ if ok {
+ key := d.getKey(se)
+ sec, ok := seInCluster[key]
+ if ok {
+ return sec.Status
+ }
+ }
+
+ return common.NotProcessed
}
-func NewServiceEntryController(clusterID string, stopCh <-chan struct{}, handler ServiceEntryHandler, config *rest.Config, resyncPeriod time.Duration) (*ServiceEntryController, error) {
+func (d *ServiceEntryCache) UpdateSEProcessStatus(se *networking.ServiceEntry, cluster string, status string) error {
+ defer d.mutex.Unlock()
+ d.mutex.Lock()
+
+ seInCluster, ok := d.cache[cluster]
+ if ok {
+ key := d.getKey(se)
+ sec, ok := seInCluster[key]
+ if ok {
+ sec.Status = status
+ seInCluster[key] = sec
+ return nil
+ }
+ }
+
+ return fmt.Errorf("op=%s type=%v name=%v namespace=%s cluster=%s message=%s", "Update", "ServiceEntry",
+ se.Name, se.Namespace, "", "nothing to update, serviceentry not found in cache")
+}
+func NewServiceEntryController(stopCh <-chan struct{}, handler ServiceEntryHandler, clusterID string, config *rest.Config, resyncPeriod time.Duration, clientLoader loader.ClientLoader) (*ServiceEntryController, error) {
seController := ServiceEntryController{}
seController.ServiceEntryHandler = handler
+ seCache := ServiceEntryCache{}
+ seCache.cache = make(map[string]map[string]*ServiceEntryItem)
+ seCache.mutex = &sync.RWMutex{}
+ seController.Cache = &seCache
+
+ seController.Cluster = clusterID
+
var err error
- ic, err := versioned.NewForConfig(config)
+ ic, err := clientLoader.LoadIstioClientFromConfig(config)
if err != nil {
return nil, fmt.Errorf("failed to create service entry k8s client: %v", err)
}
@@ -48,23 +161,69 @@ func NewServiceEntryController(clusterID string, stopCh <-chan struct{}, handler
seController.informer = informers.NewServiceEntryInformer(ic, k8sV1.NamespaceAll, resyncPeriod, cache.Indexers{})
- mcd := admiral.NewMonitoredDelegator(&seController, clusterID, "serviceentry")
- admiral.NewController("serviceentry-ctrl-"+config.Host, stopCh, mcd, seController.informer)
+ admiral.NewController("serviceentry-ctrl", config.Host, stopCh, &seController, seController.informer)
return &seController, nil
}
-func (sec *ServiceEntryController) Added(ctx context.Context, ojb interface{}) {
- se := ojb.(*networking.ServiceEntry)
- sec.ServiceEntryHandler.Added(se)
+func (sec *ServiceEntryController) Added(ctx context.Context, obj interface{}) error {
+ se, ok := obj.(*networking.ServiceEntry)
+ if !ok {
+ return fmt.Errorf("type assertion failed, %v is not of type *v1alpha3.ServiceEntry", obj)
+ }
+ sec.Cache.Put(se, sec.Cluster)
+ return sec.ServiceEntryHandler.Added(se)
+}
+
+func (sec *ServiceEntryController) Updated(ctx context.Context, obj interface{}, oldObj interface{}) error {
+ se, ok := obj.(*networking.ServiceEntry)
+ if !ok {
+ return fmt.Errorf("type assertion failed, %v is not of type *v1alpha3.ServiceEntry", obj)
+ }
+ sec.Cache.Put(se, sec.Cluster)
+ return sec.ServiceEntryHandler.Updated(se)
+}
+
+func (sec *ServiceEntryController) Deleted(ctx context.Context, obj interface{}) error {
+ se, ok := obj.(*networking.ServiceEntry)
+ if !ok {
+ return fmt.Errorf("type assertion failed, %v is not of type *v1alpha3.ServiceEntry", obj)
+ }
+
+ sec.Cache.Delete(se, sec.Cluster)
+ return sec.ServiceEntryHandler.Deleted(se)
+}
+
+func (sec *ServiceEntryController) GetProcessItemStatus(obj interface{}) (string, error) {
+ se, ok := obj.(*networking.ServiceEntry)
+ if !ok {
+ return common.NotProcessed, fmt.Errorf("type assertion failed, %v is not of type *v1alpha3.ServiceEntry", obj)
+ }
+ return sec.Cache.GetSEProcessStatus(se, sec.Cluster), nil
}
-func (sec *ServiceEntryController) Updated(ctx context.Context, ojb interface{}, oldObj interface{}) {
- se := ojb.(*networking.ServiceEntry)
- sec.ServiceEntryHandler.Updated(se)
+func (sec *ServiceEntryController) UpdateProcessItemStatus(obj interface{}, status string) error {
+ se, ok := obj.(*networking.ServiceEntry)
+ if !ok {
+ return fmt.Errorf("type assertion failed, %v is not of type *v1alpha3.ServiceEntry", obj)
+ }
+ return sec.Cache.UpdateSEProcessStatus(se, sec.Cluster, status)
+}
+
+func (sec *ServiceEntryController) LogValueOfAdmiralIoIgnore(obj interface{}) {
+ se, ok := obj.(*networking.ServiceEntry)
+ if !ok {
+ return
+ }
+ if len(se.Annotations) > 0 && se.Annotations[common.AdmiralIgnoreAnnotation] == "true" {
+ log.Infof("op=%s type=%v name=%v namespace=%s cluster=%s message=%s", "admiralIoIgnoreAnnotationCheck", "ServiceEntry", se.Name, se.Namespace, "", "Value=true")
+ }
}
-func (sec *ServiceEntryController) Deleted(ctx context.Context, ojb interface{}) {
- se := ojb.(*networking.ServiceEntry)
- sec.ServiceEntryHandler.Deleted(se)
+func (sec *ServiceEntryController) Get(ctx context.Context, isRetry bool, obj interface{}) (interface{}, error) {
+ /*se, ok := obj.(*networking.ServiceEntry)
+ if ok && sec.IstioClient != nil {
+ return sec.IstioClient.NetworkingV1alpha3().ServiceEntries(se.Namespace).Get(ctx, se.Name, meta_v1.GetOptions{})
+ }*/
+ return nil, fmt.Errorf("istio client is not initialized, txId=%s", ctx.Value("txId"))
}
From 2b745255dcf121341c44cf81ace83203ee6ad19d Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 16:00:15 -0700
Subject: [PATCH 114/235] copy serviceentry_test.go from main branch
---
.../pkg/controller/istio/serviceentry_test.go | 349 +++++++++++++++++-
1 file changed, 348 insertions(+), 1 deletion(-)
diff --git a/admiral/pkg/controller/istio/serviceentry_test.go b/admiral/pkg/controller/istio/serviceentry_test.go
index 31b0ba20..bc56e387 100644
--- a/admiral/pkg/controller/istio/serviceentry_test.go
+++ b/admiral/pkg/controller/istio/serviceentry_test.go
@@ -2,18 +2,204 @@ package istio
import (
"context"
+ "fmt"
+ "strings"
+ "sync"
"testing"
"time"
+ coreV1 "k8s.io/api/core/v1"
+
+ "github.com/istio-ecosystem/admiral/admiral/pkg/client/loader"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+
"github.com/google/go-cmp/cmp"
"github.com/istio-ecosystem/admiral/admiral/pkg/test"
+ "github.com/stretchr/testify/assert"
"google.golang.org/protobuf/testing/protocmp"
"istio.io/api/networking/v1alpha3"
+ networking "istio.io/client-go/pkg/apis/networking/v1alpha3"
v1alpha32 "istio.io/client-go/pkg/apis/networking/v1alpha3"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"
)
+func TestServiceEntryAdded(t *testing.T) {
+
+ mockServiceEntryHandler := &test.MockServiceEntryHandler{}
+ ctx := context.Background()
+ serviceEntryController := ServiceEntryController{
+ ServiceEntryHandler: mockServiceEntryHandler,
+ Cluster: "testCluster",
+ Cache: &ServiceEntryCache{
+ cache: map[string]map[string]*ServiceEntryItem{},
+ mutex: &sync.RWMutex{},
+ },
+ }
+
+ testCases := []struct {
+ name string
+ serviceEntry interface{}
+ expectedError error
+ }{
+ {
+ name: "Given context and ServiceEntry " +
+ "When ServiceEntry param is nil " +
+ "Then func should return an error",
+ serviceEntry: nil,
+ expectedError: fmt.Errorf("type assertion failed, is not of type *v1alpha3.ServiceEntry"),
+ },
+ {
+ name: "Given context and ServiceEntry " +
+ "When ServiceEntry param is not of type *v1alpha3.ServiceEntry " +
+ "Then func should return an error",
+ serviceEntry: struct{}{},
+ expectedError: fmt.Errorf("type assertion failed, {} is not of type *v1alpha3.ServiceEntry"),
+ },
+ {
+ name: "Given context and ServiceEntry " +
+ "When ServiceEntry param is of type *v1alpha3.ServiceEntry " +
+ "Then func should not return an error",
+ serviceEntry: &networking.ServiceEntry{},
+ expectedError: nil,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ err := serviceEntryController.Added(ctx, tc.serviceEntry)
+ if tc.expectedError != nil {
+ assert.NotNil(t, err)
+ assert.Equal(t, tc.expectedError.Error(), err.Error())
+ } else {
+ if err != nil {
+ assert.Fail(t, "expected error to be nil but got %v", err)
+ }
+ }
+
+ })
+ }
+
+}
+
+func TestServiceEntryUpdated(t *testing.T) {
+
+ mockServiceEntryHandler := &test.MockServiceEntryHandler{}
+ ctx := context.Background()
+ serviceEntryController := ServiceEntryController{
+ ServiceEntryHandler: mockServiceEntryHandler,
+ Cluster: "testCluster",
+ Cache: &ServiceEntryCache{
+ cache: map[string]map[string]*ServiceEntryItem{},
+ mutex: &sync.RWMutex{},
+ },
+ }
+
+ testCases := []struct {
+ name string
+ serviceEntry interface{}
+ expectedError error
+ }{
+ {
+ name: "Given context and ServiceEntry " +
+ "When ServiceEntry param is nil " +
+ "Then func should return an error",
+ serviceEntry: nil,
+ expectedError: fmt.Errorf("type assertion failed, is not of type *v1alpha3.ServiceEntry"),
+ },
+ {
+ name: "Given context and ServiceEntry " +
+ "When ServiceEntry param is not of type *v1alpha3.ServiceEntry " +
+ "Then func should return an error",
+ serviceEntry: struct{}{},
+ expectedError: fmt.Errorf("type assertion failed, {} is not of type *v1alpha3.ServiceEntry"),
+ },
+ {
+ name: "Given context and ServiceEntry " +
+ "When ServiceEntry param is of type *v1alpha3.ServiceEntry " +
+ "Then func should not return an error",
+ serviceEntry: &networking.ServiceEntry{},
+ expectedError: nil,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+
+ err := serviceEntryController.Updated(ctx, tc.serviceEntry, nil)
+ if tc.expectedError != nil {
+ assert.NotNil(t, err)
+ assert.Equal(t, tc.expectedError.Error(), err.Error())
+ } else {
+ if err != nil {
+ assert.Fail(t, "expected error to be nil but got %v", err)
+ }
+ }
+
+ })
+ }
+
+}
+
+func TestServiceEntryDeleted(t *testing.T) {
+
+ mockServiceEntryHandler := &test.MockServiceEntryHandler{}
+ ctx := context.Background()
+ serviceEntryController := ServiceEntryController{
+ ServiceEntryHandler: mockServiceEntryHandler,
+ Cluster: "testCluster",
+ Cache: &ServiceEntryCache{
+ cache: map[string]map[string]*ServiceEntryItem{},
+ mutex: &sync.RWMutex{},
+ },
+ }
+
+ testCases := []struct {
+ name string
+ serviceEntry interface{}
+ expectedError error
+ }{
+ {
+ name: "Given context and ServiceEntry " +
+ "When ServiceEntry param is nil " +
+ "Then func should return an error",
+ serviceEntry: nil,
+ expectedError: fmt.Errorf("type assertion failed, is not of type *v1alpha3.ServiceEntry"),
+ },
+ {
+ name: "Given context and ServiceEntry " +
+ "When ServiceEntry param is not of type *v1alpha3.ServiceEntry " +
+ "Then func should return an error",
+ serviceEntry: struct{}{},
+ expectedError: fmt.Errorf("type assertion failed, {} is not of type *v1alpha3.ServiceEntry"),
+ },
+ {
+ name: "Given context and ServiceEntry " +
+ "When ServiceEntry param is of type *v1alpha3.ServiceEntry " +
+ "Then func should not return an error",
+ serviceEntry: &networking.ServiceEntry{},
+ expectedError: nil,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+
+ err := serviceEntryController.Deleted(ctx, tc.serviceEntry)
+ if tc.expectedError != nil {
+ assert.NotNil(t, err)
+ assert.Equal(t, tc.expectedError.Error(), err.Error())
+ } else {
+ if err != nil {
+ assert.Fail(t, "expected error to be nil but got %v", err)
+ }
+ }
+
+ })
+ }
+
+}
+
func TestNewServiceEntryController(t *testing.T) {
config, err := clientcmd.BuildConfigFromFlags("", "../../test/resources/admins@fake-cluster.k8s.local")
if err != nil {
@@ -22,7 +208,7 @@ func TestNewServiceEntryController(t *testing.T) {
stop := make(chan struct{})
handler := test.MockServiceEntryHandler{}
- serviceEntryController, err := NewServiceEntryController("test", stop, &handler, config, time.Duration(1000))
+ serviceEntryController, err := NewServiceEntryController(stop, &handler, "testCluster", config, time.Duration(1000), loader.GetFakeClientLoader())
if err != nil {
t.Errorf("Unexpected err %v", err)
@@ -55,3 +241,164 @@ func TestNewServiceEntryController(t *testing.T) {
t.Errorf("Handler should have no obj")
}
}
+
+// TODO: This is just a placeholder for when we add diff check for other types
+func TestServiceEntryGetProcessItemStatus(t *testing.T) {
+ serviceEntryController := ServiceEntryController{}
+ testCases := []struct {
+ name string
+ obj interface{}
+ expectedRes string
+ }{
+ {
+ name: "TODO: Currently always returns false",
+ obj: nil,
+ expectedRes: common.NotProcessed,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ res, _ := serviceEntryController.GetProcessItemStatus(tc.obj)
+ assert.Equal(t, tc.expectedRes, res)
+ })
+ }
+}
+
+func TestServiceEntryUpdateProcessItemStatus(t *testing.T) {
+ var (
+ serviceAccount = &coreV1.ServiceAccount{}
+
+ se1 = &networking.ServiceEntry{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "debug-incache",
+ Namespace: "namespace1",
+ Annotations: map[string]string{"other-annotation": "value"}}}
+
+ se2 = &networking.ServiceEntry{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "debug2-incache",
+ Namespace: "namespace1",
+ Annotations: map[string]string{"other-annotation": "value"}}}
+
+ seNotInCache = &networking.ServiceEntry{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "debug",
+ Namespace: "namespace1",
+ Annotations: map[string]string{"other-annotation": "value"}}}
+
+ diffNsSeNotInCache = &networking.ServiceEntry{
+ ObjectMeta: v1.ObjectMeta{
+ Name: "debug",
+ Namespace: "namespace2",
+ Annotations: map[string]string{"other-annotation": "value"}}}
+ )
+
+ seCache := &ServiceEntryCache{
+ cache: make(map[string]map[string]*ServiceEntryItem),
+ mutex: &sync.RWMutex{},
+ }
+
+ serviceentryController := &ServiceEntryController{
+ Cluster: "cluster1",
+ Cache: seCache,
+ }
+
+ seCache.Put(se1, "cluster1")
+ seCache.Put(se2, "cluster1")
+
+ cases := []struct {
+ name string
+ obj interface{}
+ statusToSet string
+ expectedErr error
+ expectedStatus string
+ }{
+ {
+ name: "Given se cache has a valid se in its cache, " +
+ "Then, the status for the valid se should be updated to processed",
+ obj: se1,
+ statusToSet: common.Processed,
+ expectedErr: nil,
+ expectedStatus: common.Processed,
+ },
+ {
+ name: "Given se cache has a valid se in its cache, " +
+ "Then, the status for the valid se should be updated to not processed",
+ obj: se2,
+ statusToSet: common.NotProcessed,
+ expectedErr: nil,
+ expectedStatus: common.NotProcessed,
+ },
+ {
+ name: "Given se cache does not has a valid se in its cache, " +
+ "Then, the status for the valid se should be not processed, " +
+ "And an error should be returned with the se not found message",
+ obj: seNotInCache,
+ statusToSet: common.NotProcessed,
+ expectedErr: fmt.Errorf("op=%s type=%v name=%v namespace=%s cluster=%s message=%s", "Update", "ServiceEntry",
+ seNotInCache.Name, seNotInCache.Namespace, "", "nothing to update, serviceentry not found in cache"),
+ expectedStatus: common.NotProcessed,
+ },
+ {
+ name: "Given se cache does not has a valid se in its cache, " +
+ "And se is in a different namespace, " +
+ "Then, the status for the valid se should be not processed, " +
+ "And an error should be returned with the se not found message",
+ obj: diffNsSeNotInCache,
+ statusToSet: common.NotProcessed,
+ expectedErr: fmt.Errorf("op=%s type=%v name=%v namespace=%s cluster=%s message=%s", "Update", "ServiceEntry",
+ diffNsSeNotInCache.Name, diffNsSeNotInCache.Namespace, "", "nothing to update, serviceentry not found in cache"),
+ expectedStatus: common.NotProcessed,
+ },
+ {
+ name: "Given ServiceAccount is passed to the function, " +
+ "Then, the function should not panic, " +
+ "And return an error",
+ obj: serviceAccount,
+ expectedErr: fmt.Errorf("type assertion failed"),
+ expectedStatus: common.NotProcessed,
+ },
+ }
+
+ for _, c := range cases {
+ t.Run(c.name, func(t *testing.T) {
+ err := serviceentryController.UpdateProcessItemStatus(c.obj, c.statusToSet)
+ if err != nil && c.expectedErr == nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ if err == nil && c.expectedErr != nil {
+ t.Errorf("expected error: %v", c.expectedErr)
+ }
+ if err != nil && c.expectedErr != nil && !strings.Contains(err.Error(), c.expectedErr.Error()) {
+ t.Errorf("expected: %v, got: %v", c.expectedErr, err)
+ }
+ status, _ := serviceentryController.GetProcessItemStatus(c.obj)
+ assert.Equal(t, c.expectedStatus, status)
+ })
+ }
+}
+
+func TestServiceEntryLogValueOfAdmiralIoIgnore(t *testing.T) {
+ // Test case 1: obj is not a ServiceEntry object
+ sec := &ServiceEntryController{}
+ sec.LogValueOfAdmiralIoIgnore("not a service entry")
+ // No error should occur
+
+ // Test case 2: ServiceEntry has no annotations
+ sec = &ServiceEntryController{}
+ sec.LogValueOfAdmiralIoIgnore(&networking.ServiceEntry{})
+ // No error should occur
+
+ // Test case 3: AdmiralIgnoreAnnotation is not set
+ sec = &ServiceEntryController{}
+ se := &networking.ServiceEntry{ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{"other-annotation": "value"}}}
+ sec.LogValueOfAdmiralIoIgnore(se)
+ // No error should occur
+
+ // Test case 4: AdmiralIgnoreAnnotation is set in annotations
+ sec = &ServiceEntryController{}
+ se = &networking.ServiceEntry{ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{common.AdmiralIgnoreAnnotation: "true"}}}
+ sec.LogValueOfAdmiralIoIgnore(se)
+ // No error should occur
+}
From 8a13bcc4a907d327f9a5a1092d6b11aa1e6292c3 Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 16:00:55 -0700
Subject: [PATCH 115/235] copy sidecar.go from main branch
---
admiral/pkg/controller/istio/sidecar.go | 61 ++++++++++++++++++-------
1 file changed, 45 insertions(+), 16 deletions(-)
diff --git a/admiral/pkg/controller/istio/sidecar.go b/admiral/pkg/controller/istio/sidecar.go
index 9a59a112..12b8a1eb 100644
--- a/admiral/pkg/controller/istio/sidecar.go
+++ b/admiral/pkg/controller/istio/sidecar.go
@@ -5,6 +5,9 @@ import (
"fmt"
"time"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/client/loader"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
networking "istio.io/client-go/pkg/apis/networking/v1alpha3"
versioned "istio.io/client-go/pkg/clientset/versioned"
@@ -16,9 +19,9 @@ import (
// SidecarHandler interface contains the methods that are required
type SidecarHandler interface {
- Added(ctx context.Context, obj *networking.Sidecar)
- Updated(ctx context.Context, obj *networking.Sidecar)
- Deleted(ctx context.Context, obj *networking.Sidecar)
+ Added(ctx context.Context, obj *networking.Sidecar) error
+ Updated(ctx context.Context, obj *networking.Sidecar) error
+ Deleted(ctx context.Context, obj *networking.Sidecar) error
}
type SidecarEntry struct {
@@ -32,14 +35,14 @@ type SidecarController struct {
informer cache.SharedIndexInformer
}
-func NewSidecarController(clusterID string, stopCh <-chan struct{}, handler SidecarHandler, config *rest.Config, resyncPeriod time.Duration) (*SidecarController, error) {
+func NewSidecarController(stopCh <-chan struct{}, handler SidecarHandler, config *rest.Config, resyncPeriod time.Duration, clientLoader loader.ClientLoader) (*SidecarController, error) {
sidecarController := SidecarController{}
sidecarController.SidecarHandler = handler
var err error
- ic, err := versioned.NewForConfig(config)
+ ic, err := clientLoader.LoadIstioClientFromConfig(config)
if err != nil {
return nil, fmt.Errorf("failed to create sidecar controller k8s client: %v", err)
}
@@ -48,24 +51,50 @@ func NewSidecarController(clusterID string, stopCh <-chan struct{}, handler Side
sidecarController.informer = informers.NewSidecarInformer(ic, k8sV1.NamespaceAll, resyncPeriod, cache.Indexers{})
- mcd := admiral.NewMonitoredDelegator(&sidecarController, clusterID, "sidecar")
- admiral.NewController("sidecar-ctrl-"+config.Host, stopCh, mcd, sidecarController.informer)
+ admiral.NewController("sidecar-ctrl", config.Host, stopCh, &sidecarController, sidecarController.informer)
return &sidecarController, nil
}
-func (sec *SidecarController) Added(ctx context.Context, ojb interface{}) {
- sidecar := ojb.(*networking.Sidecar)
- sec.SidecarHandler.Added(ctx, sidecar)
+func (sec *SidecarController) Added(ctx context.Context, obj interface{}) error {
+ sidecar, ok := obj.(*networking.Sidecar)
+ if !ok {
+ return fmt.Errorf("type assertion failed, %v is not of type *v1alpha3.Sidecar", obj)
+ }
+ return sec.SidecarHandler.Added(ctx, sidecar)
+}
+
+func (sec *SidecarController) Updated(ctx context.Context, obj interface{}, oldObj interface{}) error {
+ sidecar, ok := obj.(*networking.Sidecar)
+ if !ok {
+ return fmt.Errorf("type assertion failed, %v is not of type *v1alpha3.Sidecar", obj)
+ }
+ return sec.SidecarHandler.Updated(ctx, sidecar)
}
-func (sec *SidecarController) Updated(ctx context.Context, ojb interface{}, oldObj interface{}) {
- sidecar := ojb.(*networking.Sidecar)
- sec.SidecarHandler.Updated(ctx, sidecar)
+func (sec *SidecarController) Deleted(ctx context.Context, obj interface{}) error {
+ sidecar, ok := obj.(*networking.Sidecar)
+ if !ok {
+ return fmt.Errorf("type assertion failed, %v is not of type *v1alpha3.Sidecar", obj)
+ }
+ return sec.SidecarHandler.Deleted(ctx, sidecar)
}
-func (sec *SidecarController) Deleted(ctx context.Context, ojb interface{}) {
- sidecar := ojb.(*networking.Sidecar)
- sec.SidecarHandler.Deleted(ctx, sidecar)
+func (sec *SidecarController) GetProcessItemStatus(obj interface{}) (string, error) {
+ return common.NotProcessed, nil
+}
+
+func (sec *SidecarController) UpdateProcessItemStatus(obj interface{}, status string) error {
+ return nil
+}
+
+func (sec *SidecarController) LogValueOfAdmiralIoIgnore(obj interface{}) {
+}
+func (sec *SidecarController) Get(ctx context.Context, isRetry bool, obj interface{}) (interface{}, error) {
+ /*sidecar, ok := obj.(*networking.Sidecar)
+ if ok && sec.IstioClient != nil {
+ return sec.IstioClient.NetworkingV1alpha3().Sidecars(sidecar.Namespace).Get(ctx, sidecar.Name, meta_v1.GetOptions{})
+ }*/
+ return nil, fmt.Errorf("istio client is not initialized, txId=%s", ctx.Value("txId"))
}
From 2a79a28b7207144f132886c455d158754614b7f2 Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 16:01:24 -0700
Subject: [PATCH 116/235] copy sidecar_test.go from main branch
---
admiral/pkg/controller/istio/sidecar_test.go | 215 ++++++++++++++++++-
1 file changed, 214 insertions(+), 1 deletion(-)
diff --git a/admiral/pkg/controller/istio/sidecar_test.go b/admiral/pkg/controller/istio/sidecar_test.go
index 9b2a95a3..45ec3f95 100644
--- a/admiral/pkg/controller/istio/sidecar_test.go
+++ b/admiral/pkg/controller/istio/sidecar_test.go
@@ -2,11 +2,16 @@ package istio
import (
"context"
+ "fmt"
"testing"
"time"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/client/loader"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+
"github.com/google/go-cmp/cmp"
"github.com/istio-ecosystem/admiral/admiral/pkg/test"
+ "github.com/stretchr/testify/assert"
"google.golang.org/protobuf/testing/protocmp"
v1alpha32 "istio.io/api/networking/v1alpha3"
"istio.io/client-go/pkg/apis/networking/v1alpha3"
@@ -14,6 +19,168 @@ import (
"k8s.io/client-go/tools/clientcmd"
)
+func TestSidecarAdded(t *testing.T) {
+
+ mockSidecarHandler := &test.MockSidecarHandler{}
+ ctx := context.Background()
+ sidecarController := SidecarController{
+ SidecarHandler: mockSidecarHandler,
+ }
+
+ testCases := []struct {
+ name string
+ sidecar interface{}
+ expectedError error
+ }{
+ {
+ name: "Given context and sidecar " +
+ "When sidecar param is nil " +
+ "Then func should return an error",
+ sidecar: nil,
+ expectedError: fmt.Errorf("type assertion failed, is not of type *v1alpha3.Sidecar"),
+ },
+ {
+ name: "Given context and sidecar " +
+ "When sidecar param is not of type *v1alpha3.Sidecar " +
+ "Then func should return an error",
+ sidecar: struct{}{},
+ expectedError: fmt.Errorf("type assertion failed, {} is not of type *v1alpha3.Sidecar"),
+ },
+ {
+ name: "Given context and Sidecar " +
+ "When Sidecar param is of type *v1alpha3.Sidecar " +
+ "Then func should not return an error",
+ sidecar: &v1alpha3.Sidecar{},
+ expectedError: nil,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+
+ err := sidecarController.Added(ctx, tc.sidecar)
+ if tc.expectedError != nil {
+ assert.NotNil(t, err)
+ assert.Equal(t, tc.expectedError.Error(), err.Error())
+ } else {
+ if err != nil {
+ assert.Fail(t, "expected error to be nil but got %v", err)
+ }
+ }
+
+ })
+ }
+
+}
+
+func TestSidecarUpdated(t *testing.T) {
+
+ mockSidecarHandler := &test.MockSidecarHandler{}
+ ctx := context.Background()
+ sidecarController := SidecarController{
+ SidecarHandler: mockSidecarHandler,
+ }
+
+ testCases := []struct {
+ name string
+ sidecar interface{}
+ expectedError error
+ }{
+ {
+ name: "Given context and sidecar " +
+ "When sidecar param is nil " +
+ "Then func should return an error",
+ sidecar: nil,
+ expectedError: fmt.Errorf("type assertion failed, is not of type *v1alpha3.Sidecar"),
+ },
+ {
+ name: "Given context and sidecar " +
+ "When sidecar param is not of type *v1alpha3.Sidecar " +
+ "Then func should return an error",
+ sidecar: struct{}{},
+ expectedError: fmt.Errorf("type assertion failed, {} is not of type *v1alpha3.Sidecar"),
+ },
+ {
+ name: "Given context and Sidecar " +
+ "When Sidecar param is of type *v1alpha3.Sidecar " +
+ "Then func should not return an error",
+ sidecar: &v1alpha3.Sidecar{},
+ expectedError: nil,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+
+ err := sidecarController.Updated(ctx, tc.sidecar, nil)
+ if tc.expectedError != nil {
+ assert.NotNil(t, err)
+ assert.Equal(t, tc.expectedError.Error(), err.Error())
+ } else {
+ if err != nil {
+ assert.Fail(t, "expected error to be nil but got %v", err)
+ }
+ }
+
+ })
+ }
+
+}
+
+func TestSidecarDeleted(t *testing.T) {
+
+ mockSidecarHandler := &test.MockSidecarHandler{}
+ ctx := context.Background()
+ sidecarController := SidecarController{
+ SidecarHandler: mockSidecarHandler,
+ }
+
+ testCases := []struct {
+ name string
+ sidecar interface{}
+ expectedError error
+ }{
+ {
+ name: "Given context and sidecar " +
+ "When sidecar param is nil " +
+ "Then func should return an error",
+ sidecar: nil,
+ expectedError: fmt.Errorf("type assertion failed, is not of type *v1alpha3.Sidecar"),
+ },
+ {
+ name: "Given context and sidecar " +
+ "When sidecar param is not of type *v1alpha3.Sidecar " +
+ "Then func should return an error",
+ sidecar: struct{}{},
+ expectedError: fmt.Errorf("type assertion failed, {} is not of type *v1alpha3.Sidecar"),
+ },
+ {
+ name: "Given context and Sidecar " +
+ "When Sidecar param is of type *v1alpha3.Sidecar " +
+ "Then func should not return an error",
+ sidecar: &v1alpha3.Sidecar{},
+ expectedError: nil,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+
+ err := sidecarController.Deleted(ctx, tc.sidecar)
+ if tc.expectedError != nil {
+ assert.NotNil(t, err)
+ assert.Equal(t, tc.expectedError.Error(), err.Error())
+ } else {
+ if err != nil {
+ assert.Fail(t, "expected error to be nil but got %v", err)
+ }
+ }
+
+ })
+ }
+
+}
+
func TestNewSidecarController(t *testing.T) {
config, err := clientcmd.BuildConfigFromFlags("", "../../test/resources/admins@fake-cluster.k8s.local")
if err != nil {
@@ -22,7 +189,7 @@ func TestNewSidecarController(t *testing.T) {
stop := make(chan struct{})
handler := test.MockSidecarHandler{}
- sidecarController, err := NewSidecarController("", stop, &handler, config, time.Duration(1000))
+ sidecarController, err := NewSidecarController(stop, &handler, config, time.Duration(1000), loader.GetFakeClientLoader())
if err != nil {
t.Errorf("Unexpected err %v", err)
@@ -55,3 +222,49 @@ func TestNewSidecarController(t *testing.T) {
t.Errorf("Handler should have no obj")
}
}
+
+// TODO: This is just a placeholder for when we add diff check for other types
+func TestSideCarGetProcessItemStatus(t *testing.T) {
+ sidecarController := SidecarController{}
+ testCases := []struct {
+ name string
+ obj interface{}
+ expectedRes string
+ }{
+ {
+ name: "TODO: Currently always returns false",
+ obj: nil,
+ expectedRes: common.NotProcessed,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ res, _ := sidecarController.GetProcessItemStatus(tc.obj)
+ assert.Equal(t, tc.expectedRes, res)
+ })
+ }
+}
+
+// TODO: This is just a placeholder for when we add diff check for other types
+func TestSideCarUpdateProcessItemStatus(t *testing.T) {
+ sidecarController := SidecarController{}
+ testCases := []struct {
+ name string
+ obj interface{}
+ expectedErr error
+ }{
+ {
+ name: "TODO: Currently always returns nil",
+ obj: nil,
+ expectedErr: nil,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ err := sidecarController.UpdateProcessItemStatus(tc.obj, common.NotProcessed)
+ assert.Equal(t, tc.expectedErr, err)
+ })
+ }
+}
From efd7bec1f9cfe032035324a607efb41f250a5eaa Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 16:01:58 -0700
Subject: [PATCH 117/235] copy virtualservice.go from main branch
---
.../pkg/controller/istio/virtualservice.go | 78 ++++++++++++++-----
1 file changed, 57 insertions(+), 21 deletions(-)
diff --git a/admiral/pkg/controller/istio/virtualservice.go b/admiral/pkg/controller/istio/virtualservice.go
index 6914f049..0f98880c 100644
--- a/admiral/pkg/controller/istio/virtualservice.go
+++ b/admiral/pkg/controller/istio/virtualservice.go
@@ -5,6 +5,10 @@ import (
"fmt"
"time"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/client/loader"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ log "github.com/sirupsen/logrus"
+
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
networking "istio.io/client-go/pkg/apis/networking/v1alpha3"
"istio.io/client-go/pkg/clientset/versioned"
@@ -16,9 +20,9 @@ import (
// VirtualServiceHandler interface contains the methods that are required
type VirtualServiceHandler interface {
- Added(ctx context.Context, obj *networking.VirtualService)
- Updated(ctx context.Context, obj *networking.VirtualService)
- Deleted(ctx context.Context, obj *networking.VirtualService)
+ Added(ctx context.Context, obj *networking.VirtualService) error
+ Updated(ctx context.Context, obj *networking.VirtualService) error
+ Deleted(ctx context.Context, obj *networking.VirtualService) error
}
type VirtualServiceController struct {
@@ -27,40 +31,72 @@ type VirtualServiceController struct {
informer cache.SharedIndexInformer
}
-func NewVirtualServiceController(clusterID string, stopCh <-chan struct{}, handler VirtualServiceHandler, config *rest.Config, resyncPeriod time.Duration) (*VirtualServiceController, error) {
+func NewVirtualServiceController(stopCh <-chan struct{}, handler VirtualServiceHandler, config *rest.Config, resyncPeriod time.Duration, clientLoader loader.ClientLoader) (*VirtualServiceController, error) {
- drController := VirtualServiceController{}
- drController.VirtualServiceHandler = handler
+ vsController := VirtualServiceController{}
+ vsController.VirtualServiceHandler = handler
var err error
- ic, err := versioned.NewForConfig(config)
+ ic, err := clientLoader.LoadIstioClientFromConfig(config)
if err != nil {
return nil, fmt.Errorf("failed to create virtual service controller k8s client: %v", err)
}
- drController.IstioClient = ic
+ vsController.IstioClient = ic
+ vsController.informer = informers.NewVirtualServiceInformer(ic, k8sV1.NamespaceAll, resyncPeriod, cache.Indexers{})
+
+ admiral.NewController("virtualservice-ctrl", config.Host, stopCh, &vsController, vsController.informer)
+
+ return &vsController, nil
+}
- drController.informer = informers.NewVirtualServiceInformer(ic, k8sV1.NamespaceAll, resyncPeriod, cache.Indexers{})
+func (sec *VirtualServiceController) Added(ctx context.Context, obj interface{}) error {
+ dr, ok := obj.(*networking.VirtualService)
+ if !ok {
+ return fmt.Errorf("type assertion failed, %v is not of type *v1alpha3.VirtualService", obj)
+ }
+ return sec.VirtualServiceHandler.Added(ctx, dr)
+}
- mcd := admiral.NewMonitoredDelegator(&drController, clusterID, "virtualservice")
- admiral.NewController("virtualservice-ctrl-"+config.Host, stopCh, mcd, drController.informer)
+func (sec *VirtualServiceController) Updated(ctx context.Context, obj interface{}, oldObj interface{}) error {
+ dr, ok := obj.(*networking.VirtualService)
+ if !ok {
+ return fmt.Errorf("type assertion failed, %v is not of type *v1alpha3.VirtualService", obj)
+ }
+ return sec.VirtualServiceHandler.Updated(ctx, dr)
+}
- return &drController, nil
+func (sec *VirtualServiceController) Deleted(ctx context.Context, obj interface{}) error {
+ dr, ok := obj.(*networking.VirtualService)
+ if !ok {
+ return fmt.Errorf("type assertion failed, %v is not of type *v1alpha3.VirtualService", obj)
+ }
+ return sec.VirtualServiceHandler.Deleted(ctx, dr)
}
-func (sec *VirtualServiceController) Added(ctx context.Context, ojb interface{}) {
- dr := ojb.(*networking.VirtualService)
- sec.VirtualServiceHandler.Added(ctx, dr)
+func (sec *VirtualServiceController) GetProcessItemStatus(obj interface{}) (string, error) {
+ return common.NotProcessed, nil
}
-func (sec *VirtualServiceController) Updated(ctx context.Context, ojb interface{}, oldObj interface{}) {
- dr := ojb.(*networking.VirtualService)
- sec.VirtualServiceHandler.Updated(ctx, dr)
+func (sec *VirtualServiceController) UpdateProcessItemStatus(obj interface{}, status string) error {
+ return nil
}
-func (sec *VirtualServiceController) Deleted(ctx context.Context, ojb interface{}) {
- dr := ojb.(*networking.VirtualService)
- sec.VirtualServiceHandler.Deleted(ctx, dr)
+func (sec *VirtualServiceController) LogValueOfAdmiralIoIgnore(obj interface{}) {
+ vs, ok := obj.(*networking.VirtualService)
+ if !ok {
+ return
+ }
+ if len(vs.Annotations) > 0 && vs.Annotations[common.AdmiralIgnoreAnnotation] == "true" {
+ log.Infof("op=%s type=%v name=%v namespace=%s cluster=%s message=%s", "admiralIoIgnoreAnnotationCheck", "VirtualService", vs.Name, vs.Namespace, "", "Value=true")
+ }
+}
+func (sec *VirtualServiceController) Get(ctx context.Context, isRetry bool, obj interface{}) (interface{}, error) {
+ /*vs, ok := obj.(*networking.VirtualService)
+ if ok && sec.IstioClient != nil {
+ return sec.IstioClient.NetworkingV1alpha3().VirtualServices(vs.Namespace).Get(ctx, vs.Name, meta_v1.GetOptions{})
+ }*/
+ return nil, fmt.Errorf("istio client is not initialized, txId=%s", ctx.Value("txId"))
}
From a39e2e9b402c048adad2e018eeab5a2a1c0e86cf Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 16:02:26 -0700
Subject: [PATCH 118/235] copy virtualservice_test.go from main branch
---
.../controller/istio/virtualservice_test.go | 239 +++++++++++++++++-
1 file changed, 238 insertions(+), 1 deletion(-)
diff --git a/admiral/pkg/controller/istio/virtualservice_test.go b/admiral/pkg/controller/istio/virtualservice_test.go
index b905cafa..5a30616e 100644
--- a/admiral/pkg/controller/istio/virtualservice_test.go
+++ b/admiral/pkg/controller/istio/virtualservice_test.go
@@ -2,11 +2,16 @@ package istio
import (
"context"
+ "fmt"
"testing"
"time"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/client/loader"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+
"github.com/google/go-cmp/cmp"
"github.com/istio-ecosystem/admiral/admiral/pkg/test"
+ "github.com/stretchr/testify/assert"
"google.golang.org/protobuf/testing/protocmp"
v1alpha32 "istio.io/api/networking/v1alpha3"
"istio.io/client-go/pkg/apis/networking/v1alpha3"
@@ -14,6 +19,168 @@ import (
"k8s.io/client-go/tools/clientcmd"
)
+func TestAdded(t *testing.T) {
+
+ mockVirtualServiceHandler := &test.MockVirtualServiceHandler{}
+ ctx := context.Background()
+ virtualServiceController := VirtualServiceController{
+ VirtualServiceHandler: mockVirtualServiceHandler,
+ }
+
+ testCases := []struct {
+ name string
+ virtualService interface{}
+ expectedError error
+ }{
+ {
+ name: "Given context and virtualService " +
+ "When virtualservice param is nil " +
+ "Then func should return an error",
+ virtualService: nil,
+ expectedError: fmt.Errorf("type assertion failed, is not of type *v1alpha3.VirtualService"),
+ },
+ {
+ name: "Given context and virtualService " +
+ "When virtualservice param is not of type *v1alpha3.VirtualService " +
+ "Then func should return an error",
+ virtualService: struct{}{},
+ expectedError: fmt.Errorf("type assertion failed, {} is not of type *v1alpha3.VirtualService"),
+ },
+ {
+ name: "Given context and virtualService " +
+ "When virtualservice param is of type *v1alpha3.VirtualService " +
+ "Then func should not return an error",
+ virtualService: &v1alpha3.VirtualService{},
+ expectedError: nil,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+
+ err := virtualServiceController.Added(ctx, tc.virtualService)
+ if tc.expectedError != nil {
+ assert.NotNil(t, err)
+ assert.Equal(t, tc.expectedError.Error(), err.Error())
+ } else {
+ if err != nil {
+ assert.Fail(t, "expected error to be nil but got %v", err)
+ }
+ }
+
+ })
+ }
+
+}
+
+func TestUpdated(t *testing.T) {
+
+ mockVirtualServiceHandler := &test.MockVirtualServiceHandler{}
+ ctx := context.Background()
+ virtualServiceController := VirtualServiceController{
+ VirtualServiceHandler: mockVirtualServiceHandler,
+ }
+
+ testCases := []struct {
+ name string
+ virtualService interface{}
+ expectedError error
+ }{
+ {
+ name: "Given context and virtualService " +
+ "When virtualservice param is nil " +
+ "Then func should return an error",
+ virtualService: nil,
+ expectedError: fmt.Errorf("type assertion failed, is not of type *v1alpha3.VirtualService"),
+ },
+ {
+ name: "Given context and virtualService " +
+ "When virtualservice param is not of type *v1alpha3.VirtualService " +
+ "Then func should return an error",
+ virtualService: struct{}{},
+ expectedError: fmt.Errorf("type assertion failed, {} is not of type *v1alpha3.VirtualService"),
+ },
+ {
+ name: "Given context and virtualService " +
+ "When virtualservice param is of type *v1alpha3.VirtualService " +
+ "Then func should not return an error",
+ virtualService: &v1alpha3.VirtualService{},
+ expectedError: nil,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+
+ err := virtualServiceController.Updated(ctx, tc.virtualService, nil)
+ if tc.expectedError != nil {
+ assert.NotNil(t, err)
+ assert.Equal(t, tc.expectedError.Error(), err.Error())
+ } else {
+ if err != nil {
+ assert.Fail(t, "expected error to be nil but got %v", err)
+ }
+ }
+
+ })
+ }
+
+}
+
+func TestDeleted(t *testing.T) {
+
+ mockVirtualServiceHandler := &test.MockVirtualServiceHandler{}
+ ctx := context.Background()
+ virtualServiceController := VirtualServiceController{
+ VirtualServiceHandler: mockVirtualServiceHandler,
+ }
+
+ testCases := []struct {
+ name string
+ virtualService interface{}
+ expectedError error
+ }{
+ {
+ name: "Given context and virtualService " +
+ "When virtualservice param is nil " +
+ "Then func should return an error",
+ virtualService: nil,
+ expectedError: fmt.Errorf("type assertion failed, is not of type *v1alpha3.VirtualService"),
+ },
+ {
+ name: "Given context and virtualService " +
+ "When virtualservice param is not of type *v1alpha3.VirtualService " +
+ "Then func should return an error",
+ virtualService: struct{}{},
+ expectedError: fmt.Errorf("type assertion failed, {} is not of type *v1alpha3.VirtualService"),
+ },
+ {
+ name: "Given context and virtualService " +
+ "When virtualservice param is of type *v1alpha3.VirtualService " +
+ "Then func should not return an error",
+ virtualService: &v1alpha3.VirtualService{},
+ expectedError: nil,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+
+ err := virtualServiceController.Deleted(ctx, tc.virtualService)
+ if tc.expectedError != nil {
+ assert.NotNil(t, err)
+ assert.Equal(t, tc.expectedError.Error(), err.Error())
+ } else {
+ if err != nil {
+ assert.Fail(t, "expected error to be nil but got %v", err)
+ }
+ }
+
+ })
+ }
+
+}
+
func TestNewVirtualServiceController(t *testing.T) {
config, err := clientcmd.BuildConfigFromFlags("", "../../test/resources/admins@fake-cluster.k8s.local")
if err != nil {
@@ -22,7 +189,7 @@ func TestNewVirtualServiceController(t *testing.T) {
stop := make(chan struct{})
handler := test.MockVirtualServiceHandler{}
- virtualServiceController, err := NewVirtualServiceController("", stop, &handler, config, time.Duration(1000))
+ virtualServiceController, err := NewVirtualServiceController(stop, &handler, config, time.Duration(1000), loader.GetFakeClientLoader())
if err != nil {
t.Errorf("Unexpected err %v", err)
@@ -56,3 +223,73 @@ func TestNewVirtualServiceController(t *testing.T) {
t.Errorf("Handler should have no obj")
}
}
+
+// TODO: This is just a placeholder for when we add diff check for other types
+func TestVirtualServiceGetProcessItemStatus(t *testing.T) {
+ virtualServiceController := VirtualServiceController{}
+ testCases := []struct {
+ name string
+ obj interface{}
+ expectedRes string
+ }{
+ {
+ name: "TODO: Currently always returns false",
+ obj: nil,
+ expectedRes: common.NotProcessed,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ res, _ := virtualServiceController.GetProcessItemStatus(tc.obj)
+ assert.Equal(t, tc.expectedRes, res)
+ })
+ }
+}
+
+// TODO: This is just a placeholder for when we add diff check for other types
+func TestVirtualServiceUpdateProcessItemStatus(t *testing.T) {
+ virtualServiceController := VirtualServiceController{}
+ testCases := []struct {
+ name string
+ obj interface{}
+ expectedErr error
+ }{
+ {
+ name: "TODO: Currently always returns nil",
+ obj: nil,
+ expectedErr: nil,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ err := virtualServiceController.UpdateProcessItemStatus(tc.obj, common.NotProcessed)
+ assert.Equal(t, tc.expectedErr, err)
+ })
+ }
+}
+
+func TestVirtualServiceLogValueOfAdmiralIoIgnore(t *testing.T) {
+ // Test case 1: obj is not a VirtualService object
+ sec := &VirtualServiceController{}
+ sec.LogValueOfAdmiralIoIgnore("not a virtual service")
+ // No error should occur
+
+ // Test case 2: VirtualService has no annotations
+ sec = &VirtualServiceController{}
+ sec.LogValueOfAdmiralIoIgnore(&v1alpha3.VirtualService{})
+ // No error should occur
+
+ // Test case 3: AdmiralIgnoreAnnotation is not set
+ sec = &VirtualServiceController{}
+ vs := &v1alpha3.VirtualService{ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{"other-annotation": "value"}}}
+ sec.LogValueOfAdmiralIoIgnore(vs)
+ // No error should occur
+
+ // Test case 4: AdmiralIgnoreAnnotation is set in annotations
+ sec = &VirtualServiceController{}
+ vs = &v1alpha3.VirtualService{ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{common.AdmiralIgnoreAnnotation: "true"}}}
+ sec.LogValueOfAdmiralIoIgnore(vs)
+ // No error should occur
+}
From 7bcebac10db3d8253b03169401c284f4b53b1fa6 Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 16:03:44 -0700
Subject: [PATCH 119/235] copy defaultresolver_test.go from main branch
---
.../secret/resolver/defaultresolver_test.go | 41 +++++++++++++++++++
1 file changed, 41 insertions(+)
create mode 100644 admiral/pkg/controller/secret/resolver/defaultresolver_test.go
diff --git a/admiral/pkg/controller/secret/resolver/defaultresolver_test.go b/admiral/pkg/controller/secret/resolver/defaultresolver_test.go
new file mode 100644
index 00000000..2a0fdaf7
--- /dev/null
+++ b/admiral/pkg/controller/secret/resolver/defaultresolver_test.go
@@ -0,0 +1,41 @@
+package resolver
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestNewDefaultResolver(t *testing.T) {
+ resolver, err := NewDefaultResolver()
+ assert.NotNil(t, resolver, "DefaultResolver should not be nil")
+ assert.Nil(t, err, "Error while new instance creation should be nil")
+}
+
+func TestDefaultResolver_FetchKubeConfig(t *testing.T) {
+ expectedKubeConfig := `
+apiVersion: v1
+clusters:
+- cluster:
+ certificate-authority-data: ca_data
+ server: https://example.com
+ name: example-cluster
+contexts:
+- context:
+ cluster: example-cluster
+ user: example-user
+ name: example-context
+current-context: example-context
+kind: Config
+preferences: {}
+users:
+- name: example-user
+ user:
+ client-certificate-data: cert_data
+ client-key-data: key_data
+`
+ resolver, _ := NewDefaultResolver()
+ kconfig, err := resolver.FetchKubeConfig("", []byte(expectedKubeConfig))
+ assert.Equal(t, []byte(expectedKubeConfig), kconfig)
+ assert.Nil(t, err, "Expected error to be nil")
+}
From e970336159d28b9059012c05867c34c5073a0095 Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 16:04:40 -0700
Subject: [PATCH 120/235] copy secretcontroller.go from main branch
---
.../pkg/controller/secret/secretcontroller.go | 160 +++++++++++++-----
1 file changed, 113 insertions(+), 47 deletions(-)
diff --git a/admiral/pkg/controller/secret/secretcontroller.go b/admiral/pkg/controller/secret/secretcontroller.go
index 5e8d1674..0440609e 100644
--- a/admiral/pkg/controller/secret/secretcontroller.go
+++ b/admiral/pkg/controller/secret/secretcontroller.go
@@ -20,52 +20,59 @@ import (
"fmt"
"time"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/client"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/registry"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/util"
+ idps_sdk "github.intuit.com/idps/idps-go-sdk/v3/idps-sdk"
+
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
- "github.com/istio-ecosystem/admiral/admiral/pkg/controller/secret/resolver"
- log "github.com/sirupsen/logrus"
+ "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
+ "k8s.io/client-go/tools/cache"
+ "k8s.io/client-go/tools/clientcmd"
+ "k8s.io/client-go/util/workqueue"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/secret/resolver"
+ log "github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
- "k8s.io/client-go/kubernetes"
- "k8s.io/client-go/tools/cache"
- "k8s.io/client-go/tools/clientcmd"
- "k8s.io/client-go/util/workqueue"
)
const (
- filterLabel = "admiral/sync"
- maxRetries = 5
+ maxRetries = 5
)
// LoadKubeConfig is a unit test override variable for loading the k8s config.
// DO NOT USE - TEST ONLY.
var LoadKubeConfig = clientcmd.Load
+var remoteClustersMetric common.Gauge
+
// addSecretCallback prototype for the add secret callback function.
-type addSecretCallback func(config *rest.Config, dataKey string, resyncPeriod time.Duration) error
+type addSecretCallback func(config *rest.Config, dataKey string, resyncPeriod util.ResyncIntervals) error
// updateSecretCallback prototype for the update secret callback function.
-type updateSecretCallback func(config *rest.Config, dataKey string, resyncPeriod time.Duration) error
+type updateSecretCallback func(config *rest.Config, dataKey string, resyncPeriod util.ResyncIntervals) error
// removeSecretCallback prototype for the remove secret callback function.
type removeSecretCallback func(dataKey string) error
// Controller is the controller implementation for Secret resources
type Controller struct {
- kubeclientset kubernetes.Interface
- namespace string
- Cs *ClusterStore
- queue workqueue.RateLimitingInterface
- informer cache.SharedIndexInformer
- addCallback addSecretCallback
- updateCallback updateSecretCallback
- removeCallback removeSecretCallback
- secretResolver resolver.SecretResolver
+ kubeclientset kubernetes.Interface
+ namespace string
+ Cs *ClusterStore
+ queue workqueue.RateLimitingInterface
+ informer cache.SharedIndexInformer
+ addCallback addSecretCallback
+ updateCallback updateSecretCallback
+ removeCallback removeSecretCallback
+ secretResolver resolver.SecretResolver
+ clusterShardStoreHandler registry.ClusterShardStore
}
// RemoteCluster defines cluster structZZ
@@ -86,6 +93,12 @@ func newClustersStore() *ClusterStore {
}
}
+type IdpsSdkWrapper struct{}
+
+func (c *IdpsSdkWrapper) IdpsClientInstanceFromMap(props map[string]string) (client.IdpsClientInterface, error) {
+ return idps_sdk.IdpsClientInstanceFromMap(props)
+}
+
// NewController returns a new secret controller
func NewController(
kubeclientset kubernetes.Interface,
@@ -94,17 +107,18 @@ func NewController(
addCallback addSecretCallback,
updateCallback updateSecretCallback,
removeCallback removeSecretCallback,
- secretResolverType string) *Controller {
+ admiralProfile string,
+ secretResolverConfig string) *Controller {
ctx := context.Background()
secretsInformer := cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(opts meta_v1.ListOptions) (runtime.Object, error) {
- opts.LabelSelector = filterLabel + "=true"
+ opts.LabelSelector = common.GetSecretFilterTags() + "=true"
return kubeclientset.CoreV1().Secrets(namespace).List(ctx, opts)
},
WatchFunc: func(opts meta_v1.ListOptions) (watch.Interface, error) {
- opts.LabelSelector = filterLabel + "=true"
+ opts.LabelSelector = common.GetSecretFilterTags() + "=true"
return kubeclientset.CoreV1().Secrets(namespace).Watch(ctx, opts)
},
},
@@ -115,11 +129,16 @@ func NewController(
var secretResolver resolver.SecretResolver
var err error
- if len(secretResolverType) == 0 {
+
+ if admiralProfile == common.AdmiralProfileIntuit {
+ log.Info("Initializing Intuit secret resolver")
+ idpsClientProviderWrapper := &IdpsSdkWrapper{}
+ secretResolver, err = resolver.NewIDPSResolver(secretResolverConfig, idpsClientProviderWrapper)
+ } else if admiralProfile == common.AdmiralProfileDefault || admiralProfile == common.AdmiralProfilePerf {
log.Info("Initializing default secret resolver")
secretResolver, err = resolver.NewDefaultResolver()
} else {
- err = fmt.Errorf("unrecognized secret resolver type %v specified", secretResolverType)
+ err = fmt.Errorf("unrecognized secret resolver type %v specified", admiralProfile)
}
if err != nil {
@@ -128,15 +147,16 @@ func NewController(
}
controller := &Controller{
- kubeclientset: kubeclientset,
- namespace: namespace,
- Cs: cs,
- informer: secretsInformer,
- queue: queue,
- addCallback: addCallback,
- updateCallback: updateCallback,
- removeCallback: removeCallback,
- secretResolver: secretResolver,
+ kubeclientset: kubeclientset,
+ namespace: namespace,
+ Cs: cs,
+ informer: secretsInformer,
+ queue: queue,
+ addCallback: addCallback,
+ updateCallback: updateCallback,
+ removeCallback: removeCallback,
+ secretResolver: secretResolver,
+ clusterShardStoreHandler: registry.NewClusterShardStoreHandler(),
}
log.Info("Setting up event handlers")
@@ -163,12 +183,17 @@ func NewController(
}
},
})
+
+ remoteClustersMetric = common.NewGaugeFrom(common.ClustersMonitoredMetricName, "Gauge for the clusters monitored by Admiral")
return controller
}
// Run starts the controller until it receives a message over stopCh
func (c *Controller) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
+ if c == nil {
+ return
+ }
defer c.queue.ShutDown()
log.Info("Starting Secrets controller")
@@ -188,16 +213,12 @@ func (c *Controller) Run(stopCh <-chan struct{}) {
// StartSecretController creates the secret controller.
func StartSecretController(
- ctx context.Context,
- k8s kubernetes.Interface,
- addCallback addSecretCallback,
- updateCallback updateSecretCallback,
- removeCallback removeSecretCallback,
- namespace string,
- secretResolverType string) (*Controller, error) {
+ ctx context.Context, k8s kubernetes.Interface, addCallback addSecretCallback,
+ updateCallback updateSecretCallback, removeCallback removeSecretCallback,
+ namespace, admiralProfile, secretResolverConfig string) (*Controller, error) {
clusterStore := newClustersStore()
- controller := NewController(k8s, namespace, clusterStore, addCallback, updateCallback, removeCallback, secretResolverType)
+ controller := NewController(k8s, namespace, clusterStore, addCallback, updateCallback, removeCallback, admiralProfile, secretResolverConfig)
go controller.Run(ctx.Done())
@@ -289,6 +310,10 @@ func (c *Controller) createRemoteCluster(kubeConfig []byte, secretName string, c
}
func (c *Controller) addMemberCluster(secretName string, s *corev1.Secret) {
+ shard, err := getShardNameFromClusterSecret(s)
+ if err != nil {
+ log.Errorf("unable to find shard information from secret")
+ }
for clusterID, kubeConfig := range s.Data {
// clusterID must be unique even across multiple secrets
if prev, ok := c.Cs.RemoteClusters[clusterID]; !ok {
@@ -304,11 +329,15 @@ func (c *Controller) addMemberCluster(secretName string, s *corev1.Secret) {
c.Cs.RemoteClusters[clusterID] = remoteCluster
- if err := c.addCallback(restConfig, clusterID, common.GetAdmiralParams().CacheRefreshDuration); err != nil {
+ if err := c.addCallback(restConfig, clusterID, common.GetResyncIntervals()); err != nil {
log.Errorf("error during secret loading for clusterID: %s %v", clusterID, err)
continue
}
-
+ err = c.addClusterToShard(clusterID, shard)
+ if err != nil {
+ log.Errorf("error adding cluster=%s to shard=%s", clusterID, shard)
+ continue
+ }
log.Infof("Secret loaded for cluster %s in the secret %s in namespace %s.", clusterID, c.Cs.RemoteClusters[clusterID].secretName, s.ObjectMeta.Namespace)
} else {
@@ -328,14 +357,19 @@ func (c *Controller) addMemberCluster(secretName string, s *corev1.Secret) {
}
c.Cs.RemoteClusters[clusterID] = remoteCluster
- if err := c.updateCallback(restConfig, clusterID, common.GetAdmiralParams().CacheRefreshDuration); err != nil {
+ if err := c.updateCallback(restConfig, clusterID, common.GetResyncIntervals()); err != nil {
log.Errorf("Error updating cluster_id from secret=%v: %s %v",
clusterID, secretName, err)
}
+ err = c.addClusterToShard(clusterID, shard)
+ if err != nil {
+ log.Errorf("error adding cluster=%s to shard=%s", clusterID, shard)
+ continue
+ }
}
-
}
- common.RemoteClustersMetric.Set(float64(len(c.Cs.RemoteClusters)))
+
+ remoteClustersMetric.Set(float64(len(c.Cs.RemoteClusters)))
log.Infof("Number of remote clusters: %d", len(c.Cs.RemoteClusters))
}
@@ -350,6 +384,38 @@ func (c *Controller) deleteMemberCluster(secretName string) {
delete(c.Cs.RemoteClusters, clusterID)
}
}
- common.RemoteClustersMetric.Set(float64(len(c.Cs.RemoteClusters)))
+ remoteClustersMetric.Set(float64(len(c.Cs.RemoteClusters)))
log.Infof("Number of remote clusters: %d", len(c.Cs.RemoteClusters))
}
+
+func getShardNameFromClusterSecret(secret *corev1.Secret) (string, error) {
+ if !common.IsAdmiralStateSyncerMode() {
+ return "", nil
+ }
+ if secret == nil {
+ return "", fmt.Errorf("nil secret passed")
+ }
+ annotation := secret.GetAnnotations()
+ if len(annotation) == 0 {
+ return "", fmt.Errorf("no annotations found on secret=%s", secret.GetName())
+ }
+ shard, ok := annotation[util.SecretShardKey]
+ if ok {
+ return shard, nil
+ }
+ return "", fmt.Errorf("shard not found")
+}
+func (c *Controller) addClusterToShard(cluster, shard string) error {
+ if !common.IsAdmiralStateSyncerMode() {
+ return nil
+ }
+ return c.clusterShardStoreHandler.AddClusterToShard(cluster, shard)
+}
+
+// TODO: invoke function in delete workflow
+func (c *Controller) removeClusterFromShard(cluster, shard string) error {
+ if !common.IsAdmiralStateSyncerMode() {
+ return nil
+ }
+ return c.clusterShardStoreHandler.RemoveClusterFromShard(cluster, shard)
+}
From 5544328b0f734395fe65d46896a9fb83cdba9c1e Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 16:05:10 -0700
Subject: [PATCH 121/235] copy secretcontroller_test.go from main branch
---
.../secret/secretcontroller_test.go | 371 +++++++++++++++---
1 file changed, 310 insertions(+), 61 deletions(-)
diff --git a/admiral/pkg/controller/secret/secretcontroller_test.go b/admiral/pkg/controller/secret/secretcontroller_test.go
index 6e02bddf..d7e131b0 100644
--- a/admiral/pkg/controller/secret/secretcontroller_test.go
+++ b/admiral/pkg/controller/secret/secretcontroller_test.go
@@ -17,37 +17,61 @@ package secret
import (
"context"
"fmt"
+ "reflect"
"sync"
"testing"
"time"
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/util"
"github.com/prometheus/client_golang/prometheus"
io_prometheus_client "github.com/prometheus/client_model/go"
+ coreV1 "k8s.io/api/core/v1"
+ corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/rest"
. "github.com/onsi/gomega"
- v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
- pkgtest "github.com/istio-ecosystem/admiral/admiral/pkg/test"
+ registryMocks "github.com/istio-ecosystem/admiral/admiral/pkg/registry/mocks"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
)
-const secretName string = "testSecretName"
-const secretNameSpace string = "istio-system"
+const (
+ secretName string = "testSecretName"
+ secretNameSpace string = "istio-system"
+)
-var testCreateControllerCalled bool
-var testDeleteControllerCalled bool
+var (
+ testCreateControllerCalled bool
+ testDeleteControllerCalled bool
+)
-func makeSecret(secret, clusterID string, kubeconfig []byte) *v1.Secret {
- return &v1.Secret{
+func makeSecret(secret, clusterID string, kubeconfig []byte) *coreV1.Secret {
+ return &coreV1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secret,
Namespace: secretNameSpace,
Labels: map[string]string{
- filterLabel: "true",
+ common.GetSecretFilterTags(): "true",
+ },
+ },
+ Data: map[string][]byte{
+ clusterID: kubeconfig,
+ },
+ }
+}
+
+func makeSecretWithCustomFilterTag(secret, clusterID string, kubeconfig []byte, secretFilterTag string) *coreV1.Secret {
+ return &coreV1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: secret,
+ Namespace: secretNameSpace,
+ Labels: map[string]string{
+ secretFilterTag: "true",
},
},
Data: map[string][]byte{
@@ -63,14 +87,14 @@ var (
deleted string
)
-func addCallback(config *rest.Config, id string, resyncPeriod time.Duration) error {
+func addCallback(config *rest.Config, id string, resyncPeriod util.ResyncIntervals) error {
mu.Lock()
defer mu.Unlock()
added = id
return nil
}
-func updateCallback(config *rest.Config, id string, resyncPeriod time.Duration) error {
+func updateCallback(config *rest.Config, id string, resyncPeriod util.ResyncIntervals) error {
mu.Lock()
defer mu.Unlock()
updated = id
@@ -102,7 +126,7 @@ func testDeleteController(clusterID string) error {
func createMultiClusterSecret(k8s *fake.Clientset) error {
data := map[string][]byte{}
- secret := v1.Secret{
+ secret := coreV1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: secretNameSpace,
@@ -140,61 +164,47 @@ func mockLoadKubeConfig(kubeconfig []byte) (*clientcmdapi.Config, error) {
return config, nil
}
-func verifyControllerDeleted(t *testing.T, timeoutName string) {
- pkgtest.NewEventualOpts(10*time.Millisecond, 5*time.Second).Eventually(t, timeoutName, func() bool {
- return testDeleteControllerCalled == true
- })
-}
-
-func verifyControllerCreated(t *testing.T, timeoutName string) {
- pkgtest.NewEventualOpts(10*time.Millisecond, 5*time.Second).Eventually(t, timeoutName, func() bool {
- return testCreateControllerCalled == true
- })
-}
+func Test_SecretFilterTags(t *testing.T) {
+ g := NewWithT(t)
-/*
-func Test_SecretController(t *testing.T) {
LoadKubeConfig = mockLoadKubeConfig
- clientset := fake.NewSimpleClientset()
+ secretFilterTag := "admiral/test-filter-tag"
- // Start the secret controller and sleep to allow secret process to start.
- err := StartSecretController(
- clientset, testCreateController, testDeleteController, secretNameSpace, context.TODO(), "")
- if err != nil {
- t.Fatalf("Could not start secret controller: %v", err)
+ p := common.AdmiralParams{
+ MetricsEnabled: true,
+ SecretFilterTags: secretFilterTag,
}
- time.Sleep(100 * time.Millisecond)
- // Create the multicluster secret.
- err = createMultiClusterSecret(clientset)
- if err != nil {
- t.Fatalf("Unexpected error on secret create: %v", err)
- }
+ common.InitializeConfig(p)
- verifyControllerCreated(t, "Create remote secret controller")
+ secret := makeSecretWithCustomFilterTag("s0", "c0", []byte("kubeconfig0-0"), secretFilterTag)
- if testDeleteControllerCalled != false {
- t.Fatalf("Test failed on create secret, delete callback function called")
- }
+ g.Expect(common.GetSecretFilterTags()).Should(Equal(secretFilterTag)) // Check if the secret filter tag is set correctly on the config
+ g.Expect(secret.Labels[common.GetSecretFilterTags()]).Should(Equal("true")) // Check if the secret filter tag matches the one set on the config to watch.
+
+}
- // Reset test variables and delete the multicluster secret.
- testCreateControllerCalled = false
- testDeleteControllerCalled = false
+func Test_SecretFilterTagsMismatch(t *testing.T) {
+ g := NewWithT(t)
- err = deleteMultiClusterSecret(clientset)
- if err != nil {
- t.Fatalf("Unexpected error on secret delete: %v", err)
- }
+ LoadKubeConfig = mockLoadKubeConfig
- // Test - Verify that the remote controller has been removed.
- verifyControllerDeleted(t, "delete remote secret controller")
+ secretFilterTag := "admiral/test-filter-tag"
- // Test
- if testCreateControllerCalled != false {
- t.Fatalf("Test failed on delete secret, create callback function called")
+ p := common.AdmiralParams{
+ MetricsEnabled: true,
+ SecretFilterTags: secretFilterTag,
}
-}*/
+
+ common.InitializeConfig(p)
+
+ secret := makeSecretWithCustomFilterTag("s0", "c0", []byte("kubeconfig0-0"), "admiral/other-filter-tag")
+
+ g.Expect(common.GetSecretFilterTags()).Should(Equal(secretFilterTag)) // Check if the secret filter tag is set correctly on the config
+ g.Expect(secret.Labels[common.GetSecretFilterTags()]).Should(Equal("")) // Check if the secret filter tag doesnt match the one set on the config to watch, hence it should be empty.
+
+}
func Test_SecretController(t *testing.T) {
g := NewWithT(t)
@@ -203,20 +213,23 @@ func Test_SecretController(t *testing.T) {
clientset := fake.NewSimpleClientset()
+ p := common.AdmiralParams{
+ MetricsEnabled: true,
+ SecretFilterTags: "admiral/sync",
+ }
+ common.InitializeConfig(p)
+
var (
secret0 = makeSecret("s0", "c0", []byte("kubeconfig0-0"))
secret0UpdateKubeconfigChanged = makeSecret("s0", "c0", []byte("kubeconfig0-1"))
secret1 = makeSecret("s1", "c1", []byte("kubeconfig1-0"))
)
- p := common.AdmiralParams{MetricsEnabled: true}
- common.InitializeConfig(p)
-
steps := []struct {
// only set one of these per step. The others should be nil.
- add *v1.Secret
- update *v1.Secret
- delete *v1.Secret
+ add *coreV1.Secret
+ update *coreV1.Secret
+ delete *coreV1.Secret
// only set one of these per step. The others should be empty.
wantAdded string
@@ -237,7 +250,7 @@ func Test_SecretController(t *testing.T) {
// The assertion ShouldNot(BeNil()) make sure that start secret controller return a not nil controller and nil error
registry := prometheus.DefaultGatherer
g.Expect(
- StartSecretController(context.TODO(), clientset, addCallback, updateCallback, deleteCallback, secretNameSpace, "")).
+ StartSecretController(context.TODO(), clientset, addCallback, updateCallback, deleteCallback, secretNameSpace, common.AdmiralProfileDefault, "")).
ShouldNot(BeNil())
ctx := context.Background()
@@ -299,3 +312,239 @@ func Test_SecretController(t *testing.T) {
})
}
}
+
+func TestGetShardNameFromClusterSecret(t *testing.T) {
+ cases := []struct {
+ name string
+ secret *corev1.Secret
+ stateSyncerMode bool
+ want string
+ wantErr error
+ }{
+ {
+ name: "Given secret is empty" +
+ "When function is invoked, " +
+ "It should return an error",
+ stateSyncerMode: true,
+ secret: nil,
+ want: "",
+ wantErr: fmt.Errorf("nil secret passed"),
+ },
+ {
+ name: "Given secret is empty, " +
+ "And, state syncer mode is false, " +
+ "When function is invoked, " +
+ "It should return an error",
+ secret: nil,
+ want: "",
+ wantErr: nil,
+ },
+ {
+ name: "Given secret is valid, but does not have annotations" +
+ "When function is invoked, " +
+ "It should return an error",
+ stateSyncerMode: true,
+ secret: &coreV1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: secretName,
+ Namespace: secretNameSpace,
+ Labels: map[string]string{
+ common.GetSecretFilterTags(): "true",
+ },
+ },
+ },
+ want: "",
+ wantErr: fmt.Errorf("no annotations found on secret=%s", secretName),
+ },
+ {
+ name: "Given secret is valid, and has valid annotations" +
+ "When function is invoked, " +
+ "It should return a valid value, without any error",
+ stateSyncerMode: true,
+ secret: &coreV1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: secretName,
+ Namespace: secretNameSpace,
+ Annotations: map[string]string{
+ util.SecretShardKey: "shard1",
+ },
+ },
+ },
+ want: "shard1",
+ wantErr: nil,
+ },
+ }
+ for _, c := range cases {
+ t.Run(c.name, func(t *testing.T) {
+ common.ResetSync()
+ common.InitializeConfig(common.AdmiralParams{
+ AdmiralStateSyncerMode: c.stateSyncerMode,
+ })
+ got, err := getShardNameFromClusterSecret(c.secret)
+ if got != c.want {
+ t.Errorf("want=%s, got=%s", c.want, got)
+ }
+ if !reflect.DeepEqual(err, c.wantErr) {
+ t.Errorf("want=%v, got=%v", c.wantErr, err)
+ }
+ })
+ }
+}
+
+func TestAddClusterToShard(t *testing.T) {
+ var (
+ cluster1 = "cluster1"
+ shard1 = "shard1"
+ err1 = fmt.Errorf("error1")
+ simpleShardMock = ®istryMocks.ClusterShardStore{}
+ )
+ shardMockWithoutErr := ®istryMocks.ClusterShardStore{}
+ shardMockWithoutErr.On(
+ "AddClusterToShard",
+ mock.AnythingOfType("string"),
+ mock.AnythingOfType("string")).Return(nil)
+ shardMockWithErr := ®istryMocks.ClusterShardStore{}
+ shardMockWithErr.On(
+ "AddClusterToShard",
+ mock.AnythingOfType("string"),
+ mock.AnythingOfType("string")).Return(err1)
+ cases := []struct {
+ name string
+ stateSyncerMode bool
+ cluster string
+ shard string
+ clusterShardStoreHandler *registryMocks.ClusterShardStore
+ clusterShardStoreHandlerCalls int
+ wantErr error
+ }{
+ {
+ name: "Given state syncer mode is set to false, " +
+ "When function is invoked, " +
+ "It should not invoke cluster shard store handler, and should return nil",
+ cluster: cluster1,
+ shard: shard1,
+ clusterShardStoreHandler: simpleShardMock,
+ clusterShardStoreHandlerCalls: 0,
+ wantErr: nil,
+ },
+ {
+ name: "Given state syncer mode is set to true, " +
+ "When function is invoked, " +
+ "And AddClusterToShard returns an error, " +
+ "It should return an error",
+ stateSyncerMode: true,
+ cluster: cluster1,
+ shard: shard1,
+ clusterShardStoreHandler: shardMockWithErr,
+ clusterShardStoreHandlerCalls: 1,
+ wantErr: err1,
+ },
+ {
+ name: "Given state syncer mode is set to true, " +
+ "When function is invoked, " +
+ "And AddClusterToShard does not return any error , " +
+ "It should not return any error",
+ stateSyncerMode: true,
+ cluster: cluster1,
+ shard: shard1,
+ clusterShardStoreHandler: shardMockWithoutErr,
+ clusterShardStoreHandlerCalls: 1,
+ wantErr: nil,
+ },
+ }
+ for _, c := range cases {
+ t.Run(c.name, func(t *testing.T) {
+ common.ResetSync()
+ common.InitializeConfig(common.AdmiralParams{
+ AdmiralStateSyncerMode: c.stateSyncerMode,
+ })
+ controller := &Controller{
+ clusterShardStoreHandler: c.clusterShardStoreHandler,
+ }
+ err := controller.addClusterToShard(c.cluster, c.shard)
+ if !reflect.DeepEqual(err, c.wantErr) {
+ t.Errorf("want=%v, got=%v", c.wantErr, err)
+ }
+ assert.Equal(t, len(c.clusterShardStoreHandler.ExpectedCalls), c.clusterShardStoreHandlerCalls)
+ })
+ }
+}
+
+func TestRemoveClusterFromShard(t *testing.T) {
+ var (
+ cluster1 = "cluster1"
+ shard1 = "shard1"
+ err1 = fmt.Errorf("error1")
+ simpleShardMock = ®istryMocks.ClusterShardStore{}
+ )
+ shardMockWithoutErr := ®istryMocks.ClusterShardStore{}
+ shardMockWithoutErr.On(
+ "RemoveClusterFromShard",
+ mock.AnythingOfType("string"),
+ mock.AnythingOfType("string")).Return(nil)
+ shardMockWithErr := ®istryMocks.ClusterShardStore{}
+ shardMockWithErr.On(
+ "RemoveClusterFromShard",
+ mock.AnythingOfType("string"),
+ mock.AnythingOfType("string")).Return(err1)
+ cases := []struct {
+ name string
+ stateSyncerMode bool
+ cluster string
+ shard string
+ clusterShardStoreHandler *registryMocks.ClusterShardStore
+ clusterShardStoreHandlerCalls int
+ wantErr error
+ }{
+ {
+ name: "Given state syncer mode is set to false, " +
+ "When function is invoked, " +
+ "It should not invoke cluster shard store handler, and should return nil",
+ cluster: cluster1,
+ shard: shard1,
+ clusterShardStoreHandler: simpleShardMock,
+ clusterShardStoreHandlerCalls: 0,
+ wantErr: nil,
+ },
+ {
+ name: "Given state syncer mode is set to true, " +
+ "When function is invoked, " +
+ "And RemoveClusterFromShard returns an error, " +
+ "It should return an error",
+ stateSyncerMode: true,
+ cluster: cluster1,
+ shard: shard1,
+ clusterShardStoreHandler: shardMockWithErr,
+ clusterShardStoreHandlerCalls: 1,
+ wantErr: err1,
+ },
+ {
+ name: "Given state syncer mode is set to true, " +
+ "When function is invoked, " +
+ "And RemoveClusterFromShard does not return any error , " +
+ "It should not return any error",
+ stateSyncerMode: true,
+ cluster: cluster1,
+ shard: shard1,
+ clusterShardStoreHandler: shardMockWithoutErr,
+ clusterShardStoreHandlerCalls: 1,
+ wantErr: nil,
+ },
+ }
+ for _, c := range cases {
+ t.Run(c.name, func(t *testing.T) {
+ common.ResetSync()
+ common.InitializeConfig(common.AdmiralParams{
+ AdmiralStateSyncerMode: c.stateSyncerMode,
+ })
+ controller := &Controller{
+ clusterShardStoreHandler: c.clusterShardStoreHandler,
+ }
+ err := controller.removeClusterFromShard(c.cluster, c.shard)
+ if !reflect.DeepEqual(err, c.wantErr) {
+ t.Errorf("want=%v, got=%v", c.wantErr, err)
+ }
+ assert.Equal(t, len(c.clusterShardStoreHandler.ExpectedCalls), c.clusterShardStoreHandlerCalls)
+ })
+ }
+}
From 5a0c27877bf3973e27b2d2792c7cba1c10ec1dbb Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 16:06:27 -0700
Subject: [PATCH 122/235] copy migration.go from main branch
---
admiral/pkg/controller/util/migration.go | 59 ++++++++++++++++++++++++
1 file changed, 59 insertions(+)
create mode 100644 admiral/pkg/controller/util/migration.go
diff --git a/admiral/pkg/controller/util/migration.go b/admiral/pkg/controller/util/migration.go
new file mode 100644
index 00000000..0357afeb
--- /dev/null
+++ b/admiral/pkg/controller/util/migration.go
@@ -0,0 +1,59 @@
+package util
+
+import (
+ "fmt"
+
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ networking "istio.io/api/networking/v1alpha3"
+ k8sV1 "k8s.io/api/core/v1"
+)
+
+// UpdateEndpointsForDeployToRolloutMigration creates an SE with the endpoints for both the rollout and deployment
+// This is for Deployment <-> Rollout migration
+func UpdateEndpointsForDeployToRolloutMigration(serviceInstance map[string]*k8sV1.Service,
+ serviceEntry *networking.ServiceEntry, meshPorts map[string]map[string]uint32, clusterIngress string,
+ clusterAppDeleteMap map[string]string, clusterName string,
+ clusterDeployRolloutPresent map[string]map[string]bool) error {
+ if serviceInstance[common.Deployment] == nil || serviceInstance[common.Rollout] == nil {
+ return fmt.Errorf("serviceInstance for Deployment/Rollout is nil as the service cache has not updated yet")
+ }
+
+ deployLocalFqdn := serviceInstance[common.Deployment].Name + common.Sep + serviceInstance[common.Deployment].Namespace + common.GetLocalDomainSuffix()
+ rolloutFqdn := serviceInstance[common.Rollout].Name + common.Sep + serviceInstance[common.Rollout].Namespace + common.GetLocalDomainSuffix()
+
+ var uniqueEndpointsList []*networking.WorkloadEntry
+ for _, ep := range serviceEntry.Endpoints {
+ // only if the ep.Address is equal to clusterIngress do we append the deployment
+ // and rollout endpoint for add and update events.
+ // For delete events we check for which cluster did we get the event for and then
+ // decide which cluster to remove the deployment or rollout endpoint for.
+ if ep.Address == clusterIngress {
+ if clusterAppDeleteMap[clusterName] != common.Deployment && clusterDeployRolloutPresent[clusterName][common.Deployment] {
+ deployEp := &networking.WorkloadEntry{
+ Address: deployLocalFqdn,
+ Locality: ep.Locality,
+ Ports: meshPorts[common.Deployment],
+ Labels: map[string]string{"type": common.Deployment},
+ }
+ uniqueEndpointsList = append(uniqueEndpointsList, deployEp)
+ }
+
+ if clusterAppDeleteMap[clusterName] != common.Rollout && clusterDeployRolloutPresent[clusterName][common.Rollout] {
+ rolloutEp := &networking.WorkloadEntry{
+ Address: rolloutFqdn,
+ Locality: ep.Locality,
+ Ports: meshPorts[common.Rollout],
+ Labels: map[string]string{"type": common.Rollout},
+ }
+ uniqueEndpointsList = append(uniqueEndpointsList, rolloutEp)
+ }
+ } else {
+ ep.Labels = nil
+ uniqueEndpointsList = append(uniqueEndpointsList, ep)
+ }
+ }
+
+ serviceEntry.Endpoints = uniqueEndpointsList
+
+ return nil
+}
From 38921ae0bff85609531c718fabc85fbdd868938f Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 16:07:08 -0700
Subject: [PATCH 123/235] copy migration_test.go from main branch
---
admiral/pkg/controller/util/migration_test.go | 272 ++++++++++++++++++
1 file changed, 272 insertions(+)
create mode 100644 admiral/pkg/controller/util/migration_test.go
diff --git a/admiral/pkg/controller/util/migration_test.go b/admiral/pkg/controller/util/migration_test.go
new file mode 100644
index 00000000..aee81971
--- /dev/null
+++ b/admiral/pkg/controller/util/migration_test.go
@@ -0,0 +1,272 @@
+package util
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ "github.com/stretchr/testify/assert"
+ networking "istio.io/api/networking/v1alpha3"
+ coreV1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+func TestUpdateEndpointsForDeployToRolloutMigration(t *testing.T) {
+ var (
+ foobarMetadataName = "foobar"
+ foobarMetadataNamespace = "foobar-ns"
+ identity = "identity"
+ meshPorts = make(map[string]map[string]uint32)
+ serviceInstanceDeployNil = make(map[string]*coreV1.Service)
+ serviceInstanceRolloutNil = make(map[string]*coreV1.Service)
+ serviceInstance = make(map[string]*coreV1.Service)
+ clusterName = "test-k8s"
+ )
+
+ localAddress := common.LocalAddressPrefix + ".10.1"
+
+ seDeployment := &networking.ServiceEntry{
+ Hosts: []string{"e2e.my-first-service.mesh"},
+ Addresses: []string{localAddress},
+ Ports: []*networking.ServicePort{{Number: uint32(common.DefaultServiceEntryPort),
+ Name: "http", Protocol: "http"}},
+ Location: networking.ServiceEntry_MESH_INTERNAL,
+ Resolution: networking.ServiceEntry_DNS,
+ SubjectAltNames: []string{"spiffe://prefix/my-first-service"},
+ Endpoints: []*networking.WorkloadEntry{
+ {Address: "dummy.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2", Labels: map[string]string{"type": common.Deployment}},
+ },
+ }
+
+ seRollout := &networking.ServiceEntry{
+ Hosts: []string{"e2e.my-first-service.mesh"},
+ Addresses: []string{localAddress},
+ Ports: []*networking.ServicePort{{Number: uint32(common.DefaultServiceEntryPort),
+ Name: "http", Protocol: "http"}},
+ Location: networking.ServiceEntry_MESH_INTERNAL,
+ Resolution: networking.ServiceEntry_DNS,
+ SubjectAltNames: []string{"spiffe://prefix/my-first-service"},
+ Endpoints: []*networking.WorkloadEntry{
+ {Address: "dummy.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2", Labels: map[string]string{"type": common.Rollout}},
+ },
+ }
+
+ seDeployAndRolloutSingleCluster := &networking.ServiceEntry{
+ Hosts: []string{"e2e.my-first-service.mesh"},
+ Addresses: []string{localAddress},
+ Ports: []*networking.ServicePort{{Number: uint32(common.DefaultServiceEntryPort),
+ Name: "http", Protocol: "http"}},
+ Location: networking.ServiceEntry_MESH_INTERNAL,
+ Resolution: networking.ServiceEntry_DNS,
+ SubjectAltNames: []string{"spiffe://prefix/my-first-service"},
+ Endpoints: []*networking.WorkloadEntry{
+ {Address: "dummy.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2", Labels: map[string]string{"type": common.Rollout}},
+ },
+ }
+
+ seDeployAndRolloutMulticluster := &networking.ServiceEntry{
+ Hosts: []string{"e2e.my-first-service.mesh"},
+ Addresses: []string{localAddress},
+ Ports: []*networking.ServicePort{{Number: uint32(common.DefaultServiceEntryPort),
+ Name: "http", Protocol: "http"}},
+ Location: networking.ServiceEntry_MESH_INTERNAL,
+ Resolution: networking.ServiceEntry_DNS,
+ SubjectAltNames: []string{"spiffe://prefix/my-first-service"},
+ Endpoints: []*networking.WorkloadEntry{
+ {Address: "east.elb.aws.com", Ports: map[string]uint32{"http": 0}, Locality: "us-east-2", Labels: map[string]string{"type": common.Deployment}},
+ {Address: "west.elb.aws.com", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2", Labels: map[string]string{"type": common.Rollout}},
+ },
+ }
+
+ seDeployAndRolloutMulticluster1 := seDeployAndRolloutMulticluster.DeepCopy()
+ seDeployAndRolloutMulticluster2 := seDeployAndRolloutMulticluster.DeepCopy()
+
+ service := &coreV1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: foobarMetadataName,
+ Namespace: foobarMetadataNamespace,
+ },
+ Spec: coreV1.ServiceSpec{
+ Selector: map[string]string{"app": identity},
+ Ports: []coreV1.ServicePort{
+ {
+ Name: "http",
+ Port: 8090,
+ },
+ },
+ },
+ }
+
+ meshPorts[common.Deployment] = map[string]uint32{"http": uint32(8090)}
+ meshPorts[common.Rollout] = map[string]uint32{"http": uint32(8090)}
+
+ serviceInstanceDeployNil[common.Deployment] = nil
+ serviceInstanceRolloutNil[common.Rollout] = nil
+ serviceInstance[common.Deployment] = service
+ serviceInstance[common.Rollout] = service
+
+ clusterDeployRolloutPresent := make(map[string]map[string]bool)
+ clusterDeployRolloutPresent[clusterName] = make(map[string]bool)
+ clusterDeployRolloutPresent[clusterName][common.Deployment] = true
+ clusterDeployRolloutPresent[clusterName][common.Rollout] = true
+
+ testCases := []struct {
+ name string
+ serviceInstance map[string]*coreV1.Service
+ serviceEntry *networking.ServiceEntry
+ clusterAppDeleteMap map[string]string
+ clusterIngress string
+ clusterDeployRolloutPresent map[string]map[string]bool
+ expectedSeEndpoints []*networking.WorkloadEntry
+ expectedErr error
+ }{
+ {
+ name: "Given service resource for the deployment type is nil," +
+ "Then there is an error returned",
+ serviceInstance: serviceInstanceDeployNil,
+ serviceEntry: seDeployment,
+ clusterAppDeleteMap: nil,
+ clusterIngress: "dummy.admiral.global",
+ clusterDeployRolloutPresent: nil,
+ expectedSeEndpoints: nil,
+ expectedErr: fmt.Errorf("serviceInstance for Deployment/Rollout is nil as the service cache has not updated yet"),
+ },
+ {
+ name: "Given service resource for the rollout type is nil," +
+ "Then there is an error returned",
+ serviceInstance: serviceInstanceRolloutNil,
+ serviceEntry: seRollout,
+ clusterAppDeleteMap: nil,
+ clusterIngress: "dummy.admiral.global",
+ clusterDeployRolloutPresent: nil,
+ expectedSeEndpoints: nil,
+ expectedErr: fmt.Errorf("serviceInstance for Deployment/Rollout is nil as the service cache has not updated yet"),
+ },
+ {
+ name: "Given all valid parameters," +
+ "And there is a deployment and rollout in a single cluster," +
+ "Then there is no error returned and 2 endpoints for deployment and rollout",
+ serviceInstance: serviceInstance,
+ serviceEntry: seDeployAndRolloutSingleCluster,
+ clusterIngress: "dummy.admiral.global",
+ clusterAppDeleteMap: nil,
+ clusterDeployRolloutPresent: clusterDeployRolloutPresent,
+ expectedSeEndpoints: []*networking.WorkloadEntry{
+ {
+ Address: "foobar.foobar-ns.svc.cluster.local",
+ Locality: "us-west-2",
+ Ports: meshPorts[common.Deployment],
+ Labels: map[string]string{"type": common.Deployment},
+ },
+ {
+ Address: "foobar.foobar-ns.svc.cluster.local",
+ Locality: "us-west-2",
+ Ports: meshPorts[common.Rollout],
+ Labels: map[string]string{"type": common.Rollout},
+ },
+ },
+ expectedErr: nil,
+ },
+ {
+ name: "Given all valid parameters," +
+ "And there is a deployment and rollout in a multi cluster," +
+ "Then there is no error returned and 3 endpoints for deployment, rollout and LB",
+ serviceInstance: serviceInstance,
+ serviceEntry: seDeployAndRolloutMulticluster,
+ clusterIngress: "east.elb.aws.com",
+ clusterAppDeleteMap: nil,
+ clusterDeployRolloutPresent: clusterDeployRolloutPresent,
+ expectedSeEndpoints: []*networking.WorkloadEntry{
+ {
+ Address: "foobar.foobar-ns.svc.cluster.local",
+ Locality: "us-east-2",
+ Ports: meshPorts[common.Deployment],
+ Labels: map[string]string{"type": common.Deployment},
+ },
+ {
+ Address: "foobar.foobar-ns.svc.cluster.local",
+ Locality: "us-east-2",
+ Ports: meshPorts[common.Rollout],
+ Labels: map[string]string{"type": common.Rollout},
+ },
+ {
+ Address: "west.elb.aws.com",
+ Locality: "us-west-2",
+ Ports: map[string]uint32{"http": 0},
+ },
+ },
+ expectedErr: nil,
+ },
+ {
+ name: "Given all valid parameters," +
+ "And there is a deployment and rollout in a multi cluster," +
+ "And there is a delete for a deployment in one of the cluster," +
+ "When we are computing the SE for the source cluster," +
+ "Then there is no error returned and 2 endpoints for rollout and LB in that cluster",
+ serviceInstance: serviceInstance,
+ serviceEntry: seDeployAndRolloutMulticluster2,
+ clusterIngress: "east.elb.aws.com",
+ clusterAppDeleteMap: map[string]string{"test-k8s": common.Deployment},
+ clusterDeployRolloutPresent: clusterDeployRolloutPresent,
+ expectedSeEndpoints: []*networking.WorkloadEntry{
+ {
+ Address: "foobar.foobar-ns.svc.cluster.local",
+ Locality: "us-east-2",
+ Ports: meshPorts[common.Rollout],
+ Labels: map[string]string{"type": common.Rollout},
+ },
+ {
+ Address: "west.elb.aws.com",
+ Locality: "us-west-2",
+ Ports: map[string]uint32{"http": 0},
+ },
+ },
+ expectedErr: nil,
+ },
+ {
+ name: "Given all valid parameters," +
+ "And there is a deployment and rollout in a multi cluster," +
+ "And there is a delete for a deployment in one of the cluster," +
+ "When we are computing the SE for the other cluster," +
+ "Then there is no error returned and still 3 endpoints for deployment, rollout and LB",
+ serviceInstance: serviceInstance,
+ serviceEntry: seDeployAndRolloutMulticluster1,
+ clusterIngress: "east.elb.aws.com",
+ clusterAppDeleteMap: nil,
+ clusterDeployRolloutPresent: clusterDeployRolloutPresent,
+ expectedSeEndpoints: []*networking.WorkloadEntry{
+ {
+ Address: "foobar.foobar-ns.svc.cluster.local",
+ Locality: "us-east-2",
+ Ports: meshPorts[common.Deployment],
+ Labels: map[string]string{"type": common.Deployment},
+ },
+ {
+ Address: "foobar.foobar-ns.svc.cluster.local",
+ Locality: "us-east-2",
+ Ports: meshPorts[common.Rollout],
+ Labels: map[string]string{"type": common.Rollout},
+ },
+ {
+ Address: "west.elb.aws.com",
+ Locality: "us-west-2",
+ Ports: map[string]uint32{"http": 0},
+ },
+ },
+ expectedErr: nil,
+ },
+ }
+
+ for _, c := range testCases {
+ t.Run(c.name, func(t *testing.T) {
+ err := UpdateEndpointsForDeployToRolloutMigration(c.serviceInstance, c.serviceEntry, meshPorts, c.clusterIngress, c.clusterAppDeleteMap, clusterName, c.clusterDeployRolloutPresent)
+ assert.Equal(t, c.expectedErr, err)
+ if err == nil {
+ if !reflect.DeepEqual(c.expectedSeEndpoints, c.serviceEntry.Endpoints) {
+ t.Errorf("Expected endpoints: %v, got: %v", c.expectedSeEndpoints, c.serviceEntry.Endpoints)
+ }
+ }
+ })
+ }
+}
From b2b1449c684f6b02c13371daa0835d41225a17c3 Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 16:07:45 -0700
Subject: [PATCH 124/235] copy util.go from main branch
---
admiral/pkg/controller/util/util.go | 27 +++++++++++++++++++++++++--
1 file changed, 25 insertions(+), 2 deletions(-)
diff --git a/admiral/pkg/controller/util/util.go b/admiral/pkg/controller/util/util.go
index ff603927..e5ad56ce 100644
--- a/admiral/pkg/controller/util/util.go
+++ b/admiral/pkg/controller/util/util.go
@@ -1,9 +1,11 @@
package util
import (
- log "github.com/sirupsen/logrus"
"reflect"
"time"
+
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ log "github.com/sirupsen/logrus"
)
func MapCopy(dst, src interface{}) {
@@ -46,6 +48,27 @@ func LogElapsedTime(op, identity, env, clusterId string) func() {
}
}
+func LogElapsedTimeController(logger *log.Entry, logMessage string) func() {
+ start := time.Now()
+ return func() {
+ logger.Infof("%s txTime=%v",
+ logMessage,
+ time.Since(start).Milliseconds())
+ }
+}
+
+func LogElapsedTimeForModifySE(logger *log.Entry, op, name, namespace, cluster, message string) func() {
+ start := time.Now()
+ return func() {
+ LogElapsedTimeSinceForModifySE(logger, op, name, namespace, cluster, message, start)
+ }
+}
+
+func LogElapsedTimeSinceForModifySE(logger *log.Entry, op, name, namespace, cluster, message string, start time.Time) {
+ // op=%v name=%v namespace=%s cluster=%s message=%v txId=%v
+ logger.Infof(common.CtxLogFormatWithTime, op, name, namespace, cluster, message, time.Since(start).Milliseconds())
+}
+
func LogElapsedTimeSince(op, identity, env, clusterId string, start time.Time) {
- log.Infof("op=%s identity=%s env=%s cluster=%s time=%v\n", op, identity, env, clusterId, time.Since(start).Milliseconds())
+ log.Infof("op=%s identity=%s env=%s cluster=%s txTime=%v", op, identity, env, clusterId, time.Since(start).Milliseconds())
}
From 5ae6c665f4b4288fd0c0cd545ccfe77bdae9ec12 Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 16:08:11 -0700
Subject: [PATCH 125/235] copy util_test.go from main branch
---
admiral/pkg/controller/util/util_test.go | 21 +++++++++++++++++++++
1 file changed, 21 insertions(+)
diff --git a/admiral/pkg/controller/util/util_test.go b/admiral/pkg/controller/util/util_test.go
index 5e07eb8f..7e9afcd9 100644
--- a/admiral/pkg/controller/util/util_test.go
+++ b/admiral/pkg/controller/util/util_test.go
@@ -1,8 +1,12 @@
package util
import (
+ "bytes"
"reflect"
"testing"
+
+ log "github.com/sirupsen/logrus"
+ "github.com/stretchr/testify/assert"
)
func TestCopyMap(t *testing.T) {
@@ -81,6 +85,12 @@ func TestSubset(t *testing.T) {
m2: m2,
result: false,
},
+ {
+ name: "non-empty m1 is not a subset of non-empty m2 due to value mis-match",
+ m1: map[string]string{"env": "e2e", "version": "v1"},
+ m2: map[string]string{"env": "stage", "version": "v1"},
+ result: false,
+ },
}
for _, c := range testCases {
@@ -128,3 +138,14 @@ func TestContains(t *testing.T) {
})
}
}
+
+func TestLogElapsedTime(t *testing.T) {
+ logFunc := LogElapsedTime("test_op", "test_identity", "test_env", "test_clusterId")
+ oldOut := log.StandardLogger().Out
+ buf := bytes.Buffer{}
+ log.SetOutput(&buf)
+ logFunc()
+
+ assert.Contains(t, buf.String(), "op=test_op identity=test_identity env=test_env cluster=test_clusterId txTime=")
+ log.SetOutput(oldOut)
+}
From 4acc5e40dc9fde0f31006428b60004603343a571 Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 16:09:01 -0700
Subject: [PATCH 126/235] copy clusterIdentity.go from main branch
---
admiral/pkg/registry/clusterIdentity.go | 106 ++++++++++++++++++++++++
1 file changed, 106 insertions(+)
create mode 100644 admiral/pkg/registry/clusterIdentity.go
diff --git a/admiral/pkg/registry/clusterIdentity.go b/admiral/pkg/registry/clusterIdentity.go
new file mode 100644
index 00000000..49dea026
--- /dev/null
+++ b/admiral/pkg/registry/clusterIdentity.go
@@ -0,0 +1,106 @@
+package registry
+
+import (
+ "fmt"
+ "sync"
+)
+
+// ClusterIdentityStore stores mapping of identity and
+// the cluster in which resources for them need to be
+// created
+type ClusterIdentityStore interface {
+ AddUpdateIdentityToCluster(identity ClusterIdentity, clusterName string) error
+ RemoveIdentityToCluster(identity ClusterIdentity, clusterName string) error
+ GetAllIdentitiesForCluster(clusterName string) (IdentityStore, error)
+ AddIdentityConfiguration() error
+}
+
+type clusterIdentityStoreHandler struct {
+ store clusterStore
+}
+type ClusterIdentity struct {
+ IdentityName string
+ SourceIdentity bool
+}
+
+func NewClusterIdentity(name string, sourceIdentity bool) ClusterIdentity {
+ return ClusterIdentity{
+ IdentityName: name,
+ SourceIdentity: sourceIdentity,
+ }
+}
+
+type IdentityStore struct {
+ Store map[string]ClusterIdentity
+}
+
+type clusterStore struct {
+ cache map[string]IdentityStore
+ mutex *sync.RWMutex
+}
+
+func newClusterStore() clusterStore {
+ return clusterStore{
+ cache: make(map[string]IdentityStore),
+ mutex: &sync.RWMutex{},
+ }
+}
+
+func NewClusterIdentityStoreHandler() *clusterIdentityStoreHandler {
+ return &clusterIdentityStoreHandler{
+ store: newClusterStore(),
+ }
+}
+
+func (s *clusterIdentityStoreHandler) AddUpdateIdentityToCluster(identity ClusterIdentity, clusterName string) error {
+ err := s.addUpdateCache(identity, clusterName)
+ return err
+}
+
+func (s *clusterIdentityStoreHandler) RemoveIdentityToCluster(identity ClusterIdentity, clusterName string) error {
+ err := s.deleteCache(identity, clusterName)
+ return err
+}
+
+func (s *clusterIdentityStoreHandler) GetAllIdentitiesForCluster(clusterName string) (IdentityStore, error) {
+ if clusterName == "" {
+ return IdentityStore{}, fmt.Errorf("empty cluster name=''")
+ }
+ cache, ok := s.store.cache[clusterName]
+ if !ok {
+ return IdentityStore{}, fmt.Errorf("no record for cluster=%s", clusterName)
+ }
+ return cache, nil
+}
+
+func (s *clusterIdentityStoreHandler) AddIdentityConfiguration() error {
+ return nil
+}
+
+func (s *clusterIdentityStoreHandler) addUpdateCache(identity ClusterIdentity, clusterName string) error {
+ defer s.store.mutex.Unlock()
+ s.store.mutex.Lock()
+ cache, ok := s.store.cache[clusterName]
+ if !ok {
+ s.store.cache[clusterName] = IdentityStore{
+ Store: map[string]ClusterIdentity{
+ identity.IdentityName: identity,
+ },
+ }
+ return nil
+ }
+ cache.Store[identity.IdentityName] = identity
+ return nil
+}
+
+func (s *clusterIdentityStoreHandler) deleteCache(identity ClusterIdentity, clusterName string) error {
+ defer s.store.mutex.Unlock()
+ s.store.mutex.Lock()
+ cache, ok := s.store.cache[clusterName]
+ if !ok {
+ return nil
+ }
+ delete(cache.Store, identity.IdentityName)
+ s.store.cache[clusterName] = cache
+ return nil
+}
From 8aaa1d2f94bb8e53200132d6b1e6726ae68fc667 Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 16:09:38 -0700
Subject: [PATCH 127/235] copy clusterShard.go from main branch
---
admiral/pkg/registry/clusterShard.go | 28 ++++++++++++++++++++++++++++
1 file changed, 28 insertions(+)
create mode 100644 admiral/pkg/registry/clusterShard.go
diff --git a/admiral/pkg/registry/clusterShard.go b/admiral/pkg/registry/clusterShard.go
new file mode 100644
index 00000000..042728ca
--- /dev/null
+++ b/admiral/pkg/registry/clusterShard.go
@@ -0,0 +1,28 @@
+package registry
+
+// ClusterShardStore stores mapping of clusters
+// and the shard they belong to
+type ClusterShardStore interface {
+ AddClusterToShard(cluster, shard string) error
+ RemoveClusterFromShard(cluster, shard string) error
+ AddAllClustersToShard(clusters []string, shard string) error
+}
+
+type clusterShardStoreHandler struct {
+}
+
+func NewClusterShardStoreHandler() *clusterShardStoreHandler {
+ return &clusterShardStoreHandler{}
+}
+
+func (c *clusterShardStoreHandler) AddClusterToShard(cluster, shard string) error {
+ return nil
+}
+
+func (c *clusterShardStoreHandler) RemoveClusterFromShard(cluster, shard string) error {
+ return nil
+}
+
+func (c *clusterShardStoreHandler) AddAllClustersToShard(clusters []string, shard string) error {
+ return nil
+}
From d9c70faea7e53b329ec4c6ef4c81cda3906955b9 Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 16:10:11 -0700
Subject: [PATCH 128/235] copy clusterShard_test.go from main branch
---
admiral/pkg/registry/clusterShard_test.go | 1 +
1 file changed, 1 insertion(+)
create mode 100644 admiral/pkg/registry/clusterShard_test.go
diff --git a/admiral/pkg/registry/clusterShard_test.go b/admiral/pkg/registry/clusterShard_test.go
new file mode 100644
index 00000000..b2a276fb
--- /dev/null
+++ b/admiral/pkg/registry/clusterShard_test.go
@@ -0,0 +1 @@
+package registry
From 578e39aac0e928d95c4b09b86d7a873111ecc893 Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 16:10:42 -0700
Subject: [PATCH 129/235] copy clusterdentity_test.go from main branch
---
admiral/pkg/registry/clusterdentity_test.go | 1 +
1 file changed, 1 insertion(+)
create mode 100644 admiral/pkg/registry/clusterdentity_test.go
diff --git a/admiral/pkg/registry/clusterdentity_test.go b/admiral/pkg/registry/clusterdentity_test.go
new file mode 100644
index 00000000..b2a276fb
--- /dev/null
+++ b/admiral/pkg/registry/clusterdentity_test.go
@@ -0,0 +1 @@
+package registry
From 18c2af77cdf2b1b5f9313e3370fa82e9d8d97fc5 Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 16:11:25 -0700
Subject: [PATCH 130/235] copy configSyncer.go from main branch
---
admiral/pkg/registry/configSyncer.go | 40 ++++++++++++++++++++++++++++
1 file changed, 40 insertions(+)
create mode 100644 admiral/pkg/registry/configSyncer.go
diff --git a/admiral/pkg/registry/configSyncer.go b/admiral/pkg/registry/configSyncer.go
new file mode 100644
index 00000000..df0e03c7
--- /dev/null
+++ b/admiral/pkg/registry/configSyncer.go
@@ -0,0 +1,40 @@
+package registry
+
+type ConfigSyncer interface {
+ SyncDeployment() error
+ SyncService() error
+
+ // argo custom resources
+ SyncArgoRollout() error
+
+ // admiral custom resources
+ SyncGlobalTrafficPolicy() error
+ SyncClientConnectionConfigurations() error
+ SyncOutlierDetectionConfigurations() error
+}
+
+type configSync struct{}
+
+func NewConfigSync() *configSync {
+ return &configSync{}
+}
+
+func (c *configSync) SyncDeployment() error {
+ return nil
+}
+
+func (c *configSync) SyncService() error {
+ return nil
+}
+func (c *configSync) SyncArgoRollout() error {
+ return nil
+}
+func (c *configSync) SyncGlobalTrafficPolicy() error {
+ return nil
+}
+func (c *configSync) SyncClientConnectionConfigurations() error {
+ return nil
+}
+func (c *configSync) SyncOutlierDetectionConfigurations() error {
+ return nil
+}
From c54bac50830b12416409f93a9187c866764989a5 Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 16:12:20 -0700
Subject: [PATCH 131/235] copy ClusterShardStore.go from main branch
---
.../pkg/registry/mocks/ClusterShardStore.go | 66 +++++++++++++++++++
1 file changed, 66 insertions(+)
create mode 100644 admiral/pkg/registry/mocks/ClusterShardStore.go
diff --git a/admiral/pkg/registry/mocks/ClusterShardStore.go b/admiral/pkg/registry/mocks/ClusterShardStore.go
new file mode 100644
index 00000000..4f0d5966
--- /dev/null
+++ b/admiral/pkg/registry/mocks/ClusterShardStore.go
@@ -0,0 +1,66 @@
+// Code generated by mockery v2.37.1. DO NOT EDIT.
+
+package mocks
+
+import mock "github.com/stretchr/testify/mock"
+
+// ClusterShardStore is an autogenerated mock type for the ClusterShardStore type
+type ClusterShardStore struct {
+ mock.Mock
+}
+
+// AddAllClustersToShard provides a mock function with given fields: clusters, shard
+func (_m *ClusterShardStore) AddAllClustersToShard(clusters []string, shard string) error {
+ ret := _m.Called(clusters, shard)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func([]string, string) error); ok {
+ r0 = rf(clusters, shard)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// AddClusterToShard provides a mock function with given fields: cluster, shard
+func (_m *ClusterShardStore) AddClusterToShard(cluster string, shard string) error {
+ ret := _m.Called(cluster, shard)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(string, string) error); ok {
+ r0 = rf(cluster, shard)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// RemoveClusterFromShard provides a mock function with given fields: cluster, shard
+func (_m *ClusterShardStore) RemoveClusterFromShard(cluster string, shard string) error {
+ ret := _m.Called(cluster, shard)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(string, string) error); ok {
+ r0 = rf(cluster, shard)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// NewClusterShardStore creates a new instance of ClusterShardStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewClusterShardStore(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *ClusterShardStore {
+ mock := &ClusterShardStore{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
\ No newline at end of file
From 4c56908276e61ddd281a29ddeb5cc2567d34c93d Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 16:13:03 -0700
Subject: [PATCH 132/235] copy registry.go from main branch
---
admiral/pkg/registry/registry.go | 104 ++++++++++++++++++++++++++++++-
1 file changed, 102 insertions(+), 2 deletions(-)
diff --git a/admiral/pkg/registry/registry.go b/admiral/pkg/registry/registry.go
index 9d11bfb3..67c34583 100644
--- a/admiral/pkg/registry/registry.go
+++ b/admiral/pkg/registry/registry.go
@@ -1,10 +1,110 @@
package registry
+import (
+ "context"
+ "encoding/json"
+ "os"
+
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ networkingV1Alpha3 "istio.io/api/networking/v1alpha3"
+ coreV1 "k8s.io/api/core/v1"
+)
+
// IdentityConfiguration is an interface to fetch configuration from a registry
// backend. The backend can provide an API to give configurations per identity,
// or if given a cluster name, it will provide the configurations for all
// the identities present in that cluster.
type IdentityConfiguration interface {
- GetByIdentityByName(identityAlias string) error
- GetByClusterName(clusterName string) error
+ GetByIdentityName(identityAlias string, ctx context.Context) (IdentityConfig, error)
+ GetByClusterName(clusterName string, ctx context.Context) ([]IdentityConfig, error)
+}
+
+type registryClient struct {
+ registryEndpoint string
+ operatorCluster string
+}
+
+func NewRegistryClient(options ...func(client *registryClient)) *registryClient {
+ registryClient := ®istryClient{}
+ for _, o := range options {
+ o(registryClient)
+ }
+ return registryClient
+}
+
+func WithRegistryEndpoint(registryEndpoint string) func(*registryClient) {
+ return func(c *registryClient) {
+ c.registryEndpoint = registryEndpoint
+ }
+}
+
+func WithOperatorCluster(operatorCluster string) func(*registryClient) {
+ return func(c *registryClient) {
+ c.operatorCluster = operatorCluster
+ }
+}
+
+type IdentityConfig struct {
+ Assetname string `json:"assetname"`
+ Clusters []IdentityConfigCluster `json:"clusters"`
+}
+
+type IdentityConfigCluster struct {
+ Name string `json:"name"`
+ Locality string `json:"locality"`
+ IngressEndpoint string `json:"ingressEndpoint"`
+ IngressPort string `json:"ingressPort"`
+ IngressPortName string `json:"ingressPortName"`
+ Environment []IdentityConfigEnvironment `json:"environment"`
+ ClientAssets []map[string]string `json:"clientAssets"`
+ // Why is clientAssets under cluster? shouldn't it be regardless of cluster??/???
+}
+
+type IdentityConfigEnvironment struct {
+ Name string `json:"name"`
+ Namespace string `json:"namespace"`
+ ServiceName string `json:"serviceName"`
+ Type string `json:"type"`
+ Selectors map[string]string `json:"selectors"`
+ Ports []coreV1.ServicePort `json:"ports"`
+ TrafficPolicy networkingV1Alpha3.TrafficPolicy `json:"trafficPolicy"`
+}
+
+// GetByIdentityName calls the registry API to fetch the IdentityConfig for
+// the given identityAlias
+func (c *registryClient) GetByIdentityName(identityAlias string, ctx context.Context) (IdentityConfig, error) {
+ //jsonResult = os.request(/asset/identityAlias/configurations)
+ ctxLogger := common.GetCtxLogger(ctx, identityAlias, "")
+ ctxLogger.Infof(common.CtxLogFormat, "GetByIdentityName", identityAlias, "", c.operatorCluster, "")
+ byteValue, err := os.ReadFile("testdata/" + identityAlias + "IdentityConfiguration.json")
+ if err != nil {
+ ctxLogger.Infof(common.CtxLogFormat, "GetByIdentityName", identityAlias, "", c.operatorCluster, err)
+ }
+ var identityConfigUnmarshalResult IdentityConfig
+ err = json.Unmarshal(byteValue, &identityConfigUnmarshalResult)
+ if err != nil {
+ ctxLogger.Infof(common.CtxLogFormat, "GetByIdentityName", identityAlias, "", c.operatorCluster, err)
+ }
+ return identityConfigUnmarshalResult, err
+}
+
+// GetByClusterName calls the registry API to fetch the IdentityConfigs for
+// every identity on the cluster.
+func (c *registryClient) GetByClusterName(clusterName string, ctx context.Context) ([]IdentityConfig, error) {
+ //jsonResult = os.request(/cluster/{cluster_id}/configurations
+ ctxLogger := common.GetCtxLogger(ctx, "", "")
+ ctxLogger.Infof(common.CtxLogFormat, "GetByClusterName", "", "", clusterName, "")
+ //identities := getIdentitiesForCluster(clusterName) - either queries shard CRD or shard CRD controller calls this func with those as parameters
+ identities := []string{clusterName}
+ identityConfigs := []IdentityConfig{}
+ var err error
+ for _, identity := range identities {
+ identityConfig, identityErr := c.GetByIdentityName(identity, ctx)
+ if identityErr != nil {
+ err = identityErr
+ ctxLogger.Infof(common.CtxLogFormat, "GetByClusterName", "", "", clusterName, identityErr)
+ }
+ identityConfigs = append(identityConfigs, identityConfig)
+ }
+ return identityConfigs, err
}
From 7bc5b9c9b032efe84819ce4adc4ec6a3d2de7088 Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 16:13:39 -0700
Subject: [PATCH 133/235] copy registry_test.go from main branch
---
admiral/pkg/registry/registry_test.go | 196 ++++++++++++++++++++++++++
1 file changed, 196 insertions(+)
create mode 100644 admiral/pkg/registry/registry_test.go
diff --git a/admiral/pkg/registry/registry_test.go b/admiral/pkg/registry/registry_test.go
new file mode 100644
index 00000000..7f598c6a
--- /dev/null
+++ b/admiral/pkg/registry/registry_test.go
@@ -0,0 +1,196 @@
+package registry
+
+import (
+ "context"
+ json "encoding/json"
+ "errors"
+ "reflect"
+ "testing"
+
+ "github.com/golang/protobuf/ptypes/duration"
+ "github.com/golang/protobuf/ptypes/wrappers"
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/cmpopts"
+ networkingV1Alpha3 "istio.io/api/networking/v1alpha3"
+ coreV1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+func getSampleIdentityConfigEnvironment(env string, namespace string) IdentityConfigEnvironment {
+ identityConfigEnvironment := IdentityConfigEnvironment{
+ Name: env,
+ Namespace: namespace,
+ ServiceName: "partner-data-to-tax-spk-root-service",
+ Type: "rollout",
+ Selectors: map[string]string{"app": "partner-data-to-tax"},
+ Ports: []coreV1.ServicePort{{Name: "http-service-mesh", Port: int32(8090), Protocol: coreV1.ProtocolTCP, TargetPort: intstr.FromInt(8090)}},
+ TrafficPolicy: networkingV1Alpha3.TrafficPolicy{
+ LoadBalancer: &networkingV1Alpha3.LoadBalancerSettings{
+ LbPolicy: &networkingV1Alpha3.LoadBalancerSettings_Simple{Simple: networkingV1Alpha3.LoadBalancerSettings_LEAST_REQUEST},
+ LocalityLbSetting: &networkingV1Alpha3.LocalityLoadBalancerSetting{
+ Distribute: []*networkingV1Alpha3.LocalityLoadBalancerSetting_Distribute{{
+ From: "*",
+ To: map[string]uint32{"us-west-2": 100},
+ }},
+ },
+ WarmupDurationSecs: &duration.Duration{Seconds: 45},
+ },
+ ConnectionPool: &networkingV1Alpha3.ConnectionPoolSettings{
+ Http: &networkingV1Alpha3.ConnectionPoolSettings_HTTPSettings{
+ Http2MaxRequests: 1000,
+ MaxRequestsPerConnection: 5,
+ },
+ },
+ OutlierDetection: &networkingV1Alpha3.OutlierDetection{
+ ConsecutiveGatewayErrors: &wrappers.UInt32Value{Value: 0},
+ Consecutive_5XxErrors: &wrappers.UInt32Value{Value: 0},
+ },
+ },
+ }
+ return identityConfigEnvironment
+}
+
+func getSampleIdentityConfig() IdentityConfig {
+ prfEnv := getSampleIdentityConfigEnvironment("prf", "ctg-taxprep-partnerdatatotax-usw2-prf")
+ e2eEnv := getSampleIdentityConfigEnvironment("e2e", "ctg-taxprep-partnerdatatotax-usw2-e2e")
+ qalEnv := getSampleIdentityConfigEnvironment("qal", "ctg-taxprep-partnerdatatotax-usw2-qal")
+ environments := []IdentityConfigEnvironment{prfEnv, e2eEnv, qalEnv}
+ clientAssets := []map[string]string{{"name": "intuit.cto.dev_portal"}, {"name": "intuit.ctg.tto.browserclient"}, {"name": "intuit.ctg.taxprep.partnerdatatotaxtestclient"}, {"name": "intuit.productmarketing.ipu.pmec"}, {"name": "intuit.tax.taxdev.txo"}, {"name": "intuit.CTO.oauth2"}, {"name": "intuit.platform.servicesgateway.servicesgateway"}, {"name": "intuit.ctg.taxprep.partnerdatatotax"}, {"name": "sample"}}
+ cluster := IdentityConfigCluster{
+ Name: "cg-tax-ppd-usw2-k8s",
+ Locality: "us-west-2",
+ IngressEndpoint: "internal-a96ffe9cdbb4c4d81b796cc6a37d3e1d-2123389388.us-west-2.elb.amazonaws.com.",
+ IngressPort: "15443",
+ IngressPortName: "http",
+ Environment: environments,
+ ClientAssets: clientAssets,
+ }
+ identityConfig := IdentityConfig{
+ Assetname: "Intuit.ctg.taxprep.partnerdatatotax",
+ Clusters: []IdentityConfigCluster{cluster},
+ }
+ return identityConfig
+}
+
+func TestParseIdentityConfigJSON(t *testing.T) {
+ identityConfig := getSampleIdentityConfig()
+ testCases := []struct {
+ name string
+ identityConfig IdentityConfig
+ }{
+ {
+ name: "Given a JSON identity configuration file, " +
+ "When the file is parsed, " +
+ "Then the file should be read into the IdentityConfig struct",
+ identityConfig: identityConfig,
+ },
+ }
+ for _, c := range testCases {
+ t.Run(c.name, func(t *testing.T) {
+ jsonResult, err := json.MarshalIndent(c.identityConfig, "", " ")
+ if err != nil {
+ t.Errorf("While marshaling IdentityConfig struct into JSON, got error: %s", err)
+ }
+ var identityConfigUnmarshalResult IdentityConfig
+ err = json.Unmarshal(jsonResult, &identityConfigUnmarshalResult)
+ if err != nil {
+ t.Errorf("While unmarshaling JSON into IdentityConfig struct, got error: %s", err)
+ }
+ if !reflect.DeepEqual(identityConfigUnmarshalResult, c.identityConfig) {
+ t.Errorf("Mismatch between original IdentityConfig and unmarshaled IdentityConfig")
+ }
+ })
+ }
+}
+
+func TestGetByIdentityName(t *testing.T) {
+ sampleIdentityConfig := getSampleIdentityConfig()
+ registryClient := NewRegistryClient(WithRegistryEndpoint("endpoint"), WithOperatorCluster("test-k8s"))
+ var jsonErr *json.SyntaxError
+ testCases := []struct {
+ name string
+ expectedIdentityConfig IdentityConfig
+ expectedError any
+ identityAlias string
+ }{
+ {
+ name: "Given an identity, " +
+ "When the identity config JSON is parsed, " +
+ "Then the resulting struct should match the expected config",
+ expectedIdentityConfig: sampleIdentityConfig,
+ expectedError: nil,
+ identityAlias: "sample",
+ },
+ {
+ name: "Given an identity, " +
+ "When the identity config JSON doesn't exist for it, " +
+ "Then there should be a non-nil error",
+ expectedIdentityConfig: IdentityConfig{},
+ expectedError: jsonErr,
+ identityAlias: "failed",
+ },
+ }
+ for _, c := range testCases {
+ t.Run(c.name, func(t *testing.T) {
+ ctx := context.Background()
+ identityConfig, err := registryClient.GetByIdentityName(c.identityAlias, ctx)
+ if err != nil && c.expectedError == nil {
+ t.Errorf("error while getting identityConfig by name with error: %v", err)
+ } else if err != nil && c.expectedError != nil && !errors.As(err, &c.expectedError) {
+ t.Errorf("failed to get correct error: %v, instead got error: %v", c.expectedError, err)
+ } else {
+ opts := cmpopts.IgnoreUnexported(networkingV1Alpha3.TrafficPolicy{}, networkingV1Alpha3.LoadBalancerSettings{}, networkingV1Alpha3.LocalityLoadBalancerSetting{}, networkingV1Alpha3.LocalityLoadBalancerSetting_Distribute{}, duration.Duration{}, networkingV1Alpha3.ConnectionPoolSettings{}, networkingV1Alpha3.ConnectionPoolSettings_HTTPSettings{}, networkingV1Alpha3.OutlierDetection{}, wrappers.UInt32Value{})
+ if !cmp.Equal(identityConfig, c.expectedIdentityConfig, opts) {
+ t.Errorf("mismatch between parsed JSON file and expected identity config for alias: %s", c.identityAlias)
+ t.Errorf(cmp.Diff(identityConfig, c.expectedIdentityConfig, opts))
+ }
+ }
+ })
+ }
+}
+
+func TestGetByClusterName(t *testing.T) {
+ sampleIdentityConfig := getSampleIdentityConfig()
+ registryClient := NewRegistryClient(WithRegistryEndpoint("endpoint"), WithOperatorCluster("test-k8s"))
+ var jsonErr *json.SyntaxError
+ testCases := []struct {
+ name string
+ expectedIdentityConfig IdentityConfig
+ expectedError any
+ clusterName string
+ }{
+ {
+ name: "Given a cluster name, " +
+ "When all the identity configs for the identities in that cluster are processed, " +
+ "Then the structs returned should match the expected configs",
+ expectedIdentityConfig: sampleIdentityConfig,
+ expectedError: nil,
+ clusterName: "sample",
+ },
+ {
+ name: "Given a cluster name, " +
+ "When there exists no identity config for that cluster, " +
+ "Then there should be a non-nil error",
+ expectedIdentityConfig: IdentityConfig{},
+ expectedError: jsonErr,
+ clusterName: "failed",
+ },
+ }
+ for _, c := range testCases {
+ t.Run(c.name, func(t *testing.T) {
+ ctx := context.Background()
+ identityConfigs, err := registryClient.GetByClusterName(c.clusterName, ctx)
+ if err != nil && c.expectedError == nil {
+ t.Errorf("error while getting identityConfigs by cluster name with error: %v", err)
+ } else if err != nil && c.expectedError != nil && !errors.As(err, &c.expectedError) {
+ t.Errorf("failed to get correct error: %v, instead got error: %v", c.expectedError, err)
+ } else {
+ opts := cmpopts.IgnoreUnexported(networkingV1Alpha3.TrafficPolicy{}, networkingV1Alpha3.LoadBalancerSettings{}, networkingV1Alpha3.LocalityLoadBalancerSetting{}, networkingV1Alpha3.LocalityLoadBalancerSetting_Distribute{}, duration.Duration{}, networkingV1Alpha3.ConnectionPoolSettings{}, networkingV1Alpha3.ConnectionPoolSettings_HTTPSettings{}, networkingV1Alpha3.OutlierDetection{}, wrappers.UInt32Value{})
+ if !cmp.Equal(identityConfigs[0], c.expectedIdentityConfig, opts) {
+ t.Errorf("mismatch between parsed JSON file and expected identity config for file: %s", c.clusterName)
+ t.Errorf(cmp.Diff(identityConfigs[0], c.expectedIdentityConfig, opts))
+ }
+ }
+ })
+ }
+}
From ca1c853e61f980300e99da14a2f1b86cc49957f5 Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 16:14:23 -0700
Subject: [PATCH 134/235] copy serviceentry.go from main branch
---
admiral/pkg/registry/serviceentry.go | 204 +++++++++++++++++++++++++++
1 file changed, 204 insertions(+)
create mode 100644 admiral/pkg/registry/serviceentry.go
diff --git a/admiral/pkg/registry/serviceentry.go b/admiral/pkg/registry/serviceentry.go
new file mode 100644
index 00000000..d6dc9e79
--- /dev/null
+++ b/admiral/pkg/registry/serviceentry.go
@@ -0,0 +1,204 @@
+package registry
+
+import (
+ "context"
+ "errors"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/util"
+ "github.com/sirupsen/logrus"
+ networkingV1Alpha3 "istio.io/api/networking/v1alpha3"
+)
+
+// IstioSEBuilder is an interface to construct Service Entry objects
+// from IdentityConfig objects. It can construct multiple Service Entries
+// from an IdentityConfig or construct just one given a IdentityConfigEnvironment.
+type IstioSEBuilder interface {
+ BuildServiceEntriesFromIdentityConfig(ctxLogger *logrus.Entry, ctx context.Context, event admiral.EventType, identityConfig IdentityConfig) ([]*networkingV1Alpha3.ServiceEntry, error)
+}
+
+type ServiceEntryBuilder struct {
+ OperatorCluster string
+}
+
+// BuildServiceEntriesFromIdentityConfig builds service entries to write to the operator cluster
+// by looping through the IdentityConfig clusters and environments to get spec information. It
+// builds one SE per environment per cluster the identity is deployed in.
+func (b *ServiceEntryBuilder) BuildServiceEntriesFromIdentityConfig(ctxLogger *logrus.Entry, ctx context.Context, event admiral.EventType, identityConfig IdentityConfig) ([]*networkingV1Alpha3.ServiceEntry, error) {
+ identity := identityConfig.Assetname
+ serviceEntries := []*networkingV1Alpha3.ServiceEntry{}
+ var err error
+ if event == admiral.Add || event == admiral.Update {
+ ctxLogger.Infof(common.CtxLogFormat, "buildServiceEntry", identity, common.GetSyncNamespace(), b.OperatorCluster, "Beginning to build the SE spec")
+ ingressEndpoints, ingressErr := getIngressEndpoints(identityConfig.Clusters)
+ if ingressErr != nil {
+ err = ingressErr
+ return serviceEntries, err
+ }
+ for i, identityConfigCluster := range identityConfig.Clusters {
+ sourceCluster := identityConfigCluster.Name
+ for _, identityConfigEnvironment := range identityConfigCluster.Environment {
+ se, buildErr := buildServiceEntryForClusterByEnv(ctxLogger, ctx, b.OperatorCluster, sourceCluster, identity, identityConfigCluster.ClientAssets, ingressEndpoints, ingressEndpoints[i].Address, identityConfigEnvironment)
+ if buildErr != nil {
+ err = buildErr
+ }
+ serviceEntries = append(serviceEntries, se)
+ }
+ }
+ return serviceEntries, err
+ }
+ return serviceEntries, err
+}
+
+// buildServiceEntryForClusterByEnv builds a service entry based on cluster and IdentityConfigEnvironment information
+// to be written to the operator cluster.
+func buildServiceEntryForClusterByEnv(ctxLogger *logrus.Entry, ctx context.Context, operatorCluster string, sourceCluster string, identity string, clientAssets []map[string]string, ingressEndpoints []*networkingV1Alpha3.WorkloadEntry, remoteEndpointAddress string, identityConfigEnvironment IdentityConfigEnvironment) (*networkingV1Alpha3.ServiceEntry, error) {
+ ctxLogger.Infof(common.CtxLogFormat, "buildServiceEntry", identity, common.GetSyncNamespace(), operatorCluster, "build the SE spec from IdentityConfigEnvironment")
+ env := identityConfigEnvironment.Name
+ fqdn := common.GetCnameVal([]string{env, strings.ToLower(identity), common.GetHostnameSuffix()})
+ san := common.SpiffePrefix + common.GetSANPrefix() + common.Slash + identity
+ ports, err := getServiceEntryPorts(identityConfigEnvironment)
+ if err != nil {
+ return nil, err
+ }
+ endpoints, err := getServiceEntryEndpoints(ctxLogger, operatorCluster, sourceCluster, ingressEndpoints, remoteEndpointAddress, identityConfigEnvironment)
+ if err != nil {
+ return nil, err
+ }
+ dependentNamespaces, err := getSortedDependentNamespaces(ctxLogger, ctx, operatorCluster, sourceCluster, fqdn, env, clientAssets)
+ if err != nil {
+ return nil, err
+ }
+ return &networkingV1Alpha3.ServiceEntry{
+ Hosts: []string{fqdn},
+ Ports: ports,
+ Location: networkingV1Alpha3.ServiceEntry_MESH_INTERNAL,
+ Resolution: networkingV1Alpha3.ServiceEntry_DNS,
+ SubjectAltNames: []string{san},
+ Endpoints: endpoints,
+ ExportTo: dependentNamespaces,
+ }, err
+}
+
+// getIngressEndpoint constructs the endpoint of the ingress gateway/remote endpoint for an identity
+// by reading the information directly from the IdentityConfigCluster.
+func getIngressEndpoints(clusters []IdentityConfigCluster) ([]*networkingV1Alpha3.WorkloadEntry, error) {
+ ingressEndpoints := []*networkingV1Alpha3.WorkloadEntry{}
+ var err error
+ for _, cluster := range clusters {
+ portNumber, parseErr := strconv.ParseInt(cluster.IngressPort, 10, 64)
+ if parseErr != nil {
+ err = parseErr
+ continue
+ }
+ ingressEndpoint := &networkingV1Alpha3.WorkloadEntry{
+ Address: cluster.IngressEndpoint,
+ Locality: cluster.Locality,
+ Ports: map[string]uint32{cluster.IngressPortName: uint32(portNumber)},
+ Labels: map[string]string{"security.istio.io/tlsMode": "istio"},
+ }
+ ingressEndpoints = append(ingressEndpoints, ingressEndpoint)
+ }
+ return ingressEndpoints, err
+}
+
+// getServiceEntryPorts constructs the ServicePorts of the service entry that should be built
+// for the given identityConfigEnvironment.
+func getServiceEntryPorts(identityConfigEnvironment IdentityConfigEnvironment) ([]*networkingV1Alpha3.ServicePort, error) {
+ //TODO: Verify this is how ports should be set
+ //Find Port with targetPort that matches inbound common.SidecarEnabledPorts
+ //Set port name and protocol based on that
+ port := &networkingV1Alpha3.ServicePort{Number: uint32(common.DefaultServiceEntryPort), Name: util.Http, Protocol: util.Http}
+ var err error
+ if len(identityConfigEnvironment.Ports) == 0 {
+ err = errors.New("identityConfigEnvironment had no ports for: " + identityConfigEnvironment.Name)
+ }
+ for _, servicePort := range identityConfigEnvironment.Ports {
+ //TODO: 8090 is supposed to be set as the common.SidecarEnabledPorts (includeInboundPorts), and we check that in the rollout, but we don't have that information here
+ if servicePort.TargetPort.IntValue() == 8090 {
+ protocol := util.GetPortProtocol(servicePort.Name)
+ port.Name = protocol
+ port.Protocol = protocol
+ }
+ }
+ ports := []*networkingV1Alpha3.ServicePort{port}
+ return ports, err
+}
+
+// getServiceEntryEndpoints constructs the remote or local endpoint of the service entry that
+// should be built for the given identityConfigEnvironment.
+func getServiceEntryEndpoints(ctxLogger *logrus.Entry, operatorCluster string, sourceCluster string, ingressEndpoints []*networkingV1Alpha3.WorkloadEntry, remoteEndpointAddress string, identityConfigEnvironment IdentityConfigEnvironment) ([]*networkingV1Alpha3.WorkloadEntry, error) {
+ //TODO: Verify Local and Remote Endpoints are constructed correctly
+ endpoints := []*networkingV1Alpha3.WorkloadEntry{}
+ var err error
+ for _, endpoint := range ingressEndpoints {
+ tmpEp := endpoint.DeepCopy()
+ tmpEp.Labels["type"] = identityConfigEnvironment.Type
+ if operatorCluster == sourceCluster && tmpEp.Address == remoteEndpointAddress {
+ //Local Endpoint Address if the identity is deployed on the same cluster as it's client and the endpoint is the remote endpoint for the cluster
+ tmpEp.Address = identityConfigEnvironment.ServiceName + common.Sep + identityConfigEnvironment.Namespace + common.GetLocalDomainSuffix()
+ for _, servicePort := range identityConfigEnvironment.Ports {
+ //There should only be one mesh port here (http-service-mesh), but we are preserving ability to have multiple ports
+ protocol := util.GetPortProtocol(servicePort.Name)
+ if _, ok := tmpEp.Ports[protocol]; ok {
+ tmpEp.Ports[protocol] = uint32(servicePort.Port)
+ ctxLogger.Infof(common.CtxLogFormat, "LocalMeshPort", servicePort.Port, "", sourceCluster, "Protocol: "+protocol)
+ } else {
+ err = errors.New("failed to get Port for protocol: " + protocol)
+ }
+ }
+ }
+ endpoints = append(endpoints, tmpEp)
+ }
+ return endpoints, err
+}
+
+// getSortedDependentNamespaces constructs a sorted list of unique namespaces for a given cluster, client assets,
+// and cname, where each namespace is where a client asset of the cname is deployed on the cluster. If the cname
+// is also deployed on the cluster then the istio-system namespace is also in the list.
+func getSortedDependentNamespaces(ctxLogger *logrus.Entry, ctx context.Context, operatorCluster string, sourceCluster string, cname string, env string, clientAssets []map[string]string) ([]string, error) {
+ clientNamespaces := []string{}
+ var err error
+ var clientIdentityConfig IdentityConfig
+ for _, clientAsset := range clientAssets {
+ //TODO: Need to do registry client initialization better, maybe pass it in
+ registryClient := NewRegistryClient(WithRegistryEndpoint("endpoint"), WithOperatorCluster(operatorCluster))
+ // For each client asset of cname, we fetch its identityConfig
+ clientIdentityConfig, err = registryClient.GetByIdentityName(clientAsset["name"], ctx)
+ if err != nil {
+ ctxLogger.Infof(common.CtxLogFormat, "buildServiceEntry", cname, common.GetSyncNamespace(), clientAsset["name"], "Failed to fetch IdentityConfig: "+err.Error())
+ continue
+ }
+ for _, clientIdentityConfigCluster := range clientIdentityConfig.Clusters {
+ // For each cluster the client asset is deployed on, we check if that cluster is the operator cluster we are writing to
+ if operatorCluster == clientIdentityConfigCluster.Name {
+ for _, clientIdentityConfigEnvironment := range clientIdentityConfigCluster.Environment {
+ // For each environment of the client asset on the operator cluster, we add the namespace to our list
+ if clientIdentityConfigEnvironment.Name == env {
+ //Do we need to check if ENV matches here for exportTo?
+ clientNamespaces = append(clientNamespaces, clientIdentityConfigEnvironment.Namespace)
+ }
+ }
+ }
+ }
+ }
+ if operatorCluster == sourceCluster {
+ clientNamespaces = append(clientNamespaces, common.NamespaceIstioSystem)
+ }
+ if len(clientNamespaces) > common.GetExportToMaxNamespaces() {
+ clientNamespaces = []string{"*"}
+ ctxLogger.Infof("exceeded max namespaces for cname=%s in cluster=%s", cname, operatorCluster)
+ }
+ sort.Strings(clientNamespaces)
+ var dedupClientNamespaces []string
+ for i := 0; i < len(clientNamespaces); i++ {
+ if i == 0 || clientNamespaces[i] != clientNamespaces[i-1] {
+ dedupClientNamespaces = append(dedupClientNamespaces, clientNamespaces[i])
+ }
+ }
+ return clientNamespaces, err
+}
From 856e045165a53d3a247f9f2d665fdc96a97e6875 Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 16:15:00 -0700
Subject: [PATCH 135/235] copy serviceentry_test.go from main branch
---
admiral/pkg/registry/serviceentry_test.go | 358 ++++++++++++++++++++++
1 file changed, 358 insertions(+)
create mode 100644 admiral/pkg/registry/serviceentry_test.go
diff --git a/admiral/pkg/registry/serviceentry_test.go b/admiral/pkg/registry/serviceentry_test.go
new file mode 100644
index 00000000..92b04969
--- /dev/null
+++ b/admiral/pkg/registry/serviceentry_test.go
@@ -0,0 +1,358 @@
+package registry
+
+import (
+ "context"
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/cmpopts"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/util"
+ networkingV1Alpha3 "istio.io/api/networking/v1alpha3"
+)
+
+func admiralParamsForServiceEntryTests() common.AdmiralParams {
+ return common.AdmiralParams{
+ KubeconfigPath: "testdata/fake.config",
+ LabelSet: &common.LabelSet{
+ GatewayApp: "gatewayapp",
+ WorkloadIdentityKey: "identity",
+ PriorityKey: "priority",
+ EnvKey: "env",
+ AdmiralCRDIdentityLabel: "identity",
+ },
+ EnableSAN: true,
+ SANPrefix: "prefix",
+ HostnameSuffix: "mesh",
+ SyncNamespace: "ns",
+ CacheReconcileDuration: 0,
+ ClusterRegistriesNamespace: "default",
+ DependenciesNamespace: "default",
+ WorkloadSidecarName: "default",
+ Profile: common.AdmiralProfileDefault,
+ DependentClusterWorkerConcurrency: 5,
+ EnableSWAwareNSCaches: true,
+ ExportToIdentityList: []string{"*"},
+ ExportToMaxNamespaces: 35,
+ EnableAbsoluteFQDN: true,
+ EnableAbsoluteFQDNForLocalEndpoints: true,
+ }
+}
+
+func createMockServiceEntry(env string, identity string, endpointAddress string, endpointPort int, exportTo []string) networkingV1Alpha3.ServiceEntry {
+ serviceEntry := networkingV1Alpha3.ServiceEntry{
+ Hosts: []string{env + "." + strings.ToLower(identity) + ".mesh"},
+ Addresses: nil,
+ Ports: []*networkingV1Alpha3.ServicePort{{Number: uint32(common.DefaultServiceEntryPort), Name: util.Http, Protocol: util.Http}},
+ Location: 1,
+ Resolution: 2,
+ Endpoints: []*networkingV1Alpha3.WorkloadEntry{{Address: endpointAddress,
+ Locality: "us-west-2",
+ Ports: map[string]uint32{"http": uint32(endpointPort)},
+ Labels: map[string]string{"security.istio.io/tlsMode": "istio", "type": "rollout"}}},
+ WorkloadSelector: nil,
+ ExportTo: exportTo,
+ SubjectAltNames: []string{"spiffe://prefix/" + identity},
+ }
+ return serviceEntry
+}
+
+func TestGetIngressEndpoints(t *testing.T) {
+ identityConfig := getSampleIdentityConfig()
+ expectedIngressEndpoints := []*networkingV1Alpha3.WorkloadEntry{{
+ Address: "internal-a96ffe9cdbb4c4d81b796cc6a37d3e1d-2123389388.us-west-2.elb.amazonaws.com.",
+ Locality: "us-west-2",
+ Ports: map[string]uint32{"http": uint32(15443)},
+ Labels: map[string]string{"security.istio.io/tlsMode": "istio"},
+ }}
+ testCases := []struct {
+ name string
+ identityConfigClusters []IdentityConfigCluster
+ expectedIngressEndpoints []*networkingV1Alpha3.WorkloadEntry
+ }{
+ {
+ name: "Given an IdentityConfigCluster, " +
+ "Then the constructed endpoint should be the ingress endpoint",
+ identityConfigClusters: identityConfig.Clusters,
+ expectedIngressEndpoints: expectedIngressEndpoints,
+ },
+ }
+ for _, c := range testCases {
+ t.Run(c.name, func(t *testing.T) {
+ ingressEndpoints, err := getIngressEndpoints(c.identityConfigClusters)
+ if err != nil {
+ t.Errorf("While constructing ingressEndpoint, got error: %v", err)
+ }
+ if !reflect.DeepEqual(ingressEndpoints, c.expectedIngressEndpoints) {
+ t.Errorf("Mismatch between constructed ingressEndpoint and expected ingressEndpoint")
+ }
+ })
+ }
+}
+
+func TestGetServiceEntryPorts(t *testing.T) {
+ e2eEnv := getSampleIdentityConfigEnvironment("e2e", "ctg-taxprep-partnerdatatotax-usw2-e2e")
+ expectedSEPorts := []*networkingV1Alpha3.ServicePort{{Number: uint32(common.DefaultServiceEntryPort), Name: util.Http, Protocol: util.Http}}
+ testCases := []struct {
+ name string
+ identityConfigEnvironment IdentityConfigEnvironment
+ expectedSEPorts []*networkingV1Alpha3.ServicePort
+ }{
+ {
+ name: "Given an IdentityConfigEnvironment, " +
+ "Then the constructed ServiceEntryPorts should be as expected",
+ identityConfigEnvironment: e2eEnv,
+ expectedSEPorts: expectedSEPorts,
+ },
+ }
+ for _, c := range testCases {
+ t.Run(c.name, func(t *testing.T) {
+ sePorts, err := getServiceEntryPorts(e2eEnv)
+ if err != nil {
+ t.Errorf("While constructing serviceEntryPorts, got error: %v", err)
+ }
+ if !reflect.DeepEqual(sePorts, c.expectedSEPorts) {
+ t.Errorf("Mismatch between constructed ingressEndpoint and expected ingressEndpoint")
+ }
+ })
+ }
+}
+
+func TestGetServiceEntryEndpoints(t *testing.T) {
+ admiralParams := admiralParamsForServiceEntryTests()
+ common.ResetSync()
+ common.InitializeConfig(admiralParams)
+ e2eEnv := getSampleIdentityConfigEnvironment("e2e", "ctg-taxprep-partnerdatatotax-usw2-e2e")
+ ingressEndpoints := []*networkingV1Alpha3.WorkloadEntry{{
+ Address: "internal-a96ffe9cdbb4c4d81b796cc6a37d3e1d-2123389388.us-west-2.elb.amazonaws.com.",
+ Locality: "us-west-2",
+ Ports: map[string]uint32{"http": uint32(15443)},
+ Labels: map[string]string{"security.istio.io/tlsMode": "istio"},
+ }}
+ remoteEndpoint := []*networkingV1Alpha3.WorkloadEntry{{
+ Address: "internal-a96ffe9cdbb4c4d81b796cc6a37d3e1d-2123389388.us-west-2.elb.amazonaws.com.",
+ Locality: "us-west-2",
+ Ports: map[string]uint32{"http": uint32(15443)},
+ Labels: map[string]string{"security.istio.io/tlsMode": "istio", "type": "rollout"},
+ }}
+ localEndpoint := []*networkingV1Alpha3.WorkloadEntry{{
+ Address: "partner-data-to-tax-spk-root-service.ctg-taxprep-partnerdatatotax-usw2-e2e.svc.cluster.local.",
+ Locality: "us-west-2",
+ Ports: map[string]uint32{"http": uint32(8090)},
+ Labels: map[string]string{"security.istio.io/tlsMode": "istio", "type": "rollout"},
+ }}
+ ctx := context.Background()
+ ctxLogger := common.GetCtxLogger(ctx, "ctg-taxprep-partnerdatatotax", "")
+ testCases := []struct {
+ name string
+ identityConfigEnvironment IdentityConfigEnvironment
+ ingressEndpoints []*networkingV1Alpha3.WorkloadEntry
+ operatorCluster string
+ sourceCluster string
+ remoteEndpointAddress string
+ expectedSEEndpoints []*networkingV1Alpha3.WorkloadEntry
+ }{
+ {
+ name: "Given an IdentityConfigEnvironment and ingressEndpoint, " +
+ "When the operator cluster is not the same as the source cluster" +
+ "Then the constructed endpoint should be a remote endpoint",
+ identityConfigEnvironment: e2eEnv,
+ ingressEndpoints: ingressEndpoints,
+ operatorCluster: "cg-tax-ppd-usw2-k8s",
+ sourceCluster: "apigw-cx-ppd-usw2-k8s",
+ remoteEndpointAddress: "internal-a96ffe9cdbb4c4d81b796cc6a37d3e1d-2123389388.us-west-2.elb.amazonaws.com.",
+ expectedSEEndpoints: remoteEndpoint,
+ },
+ {
+ name: "Given an IdentityConfigEnvironment and ingressEndpoint, " +
+ "When the operator cluster is the same as the source cluster" +
+ "Then the constructed endpoint should be a local endpoint",
+ identityConfigEnvironment: e2eEnv,
+ ingressEndpoints: ingressEndpoints,
+ operatorCluster: "cg-tax-ppd-usw2-k8s",
+ sourceCluster: "cg-tax-ppd-usw2-k8s",
+ remoteEndpointAddress: "internal-a96ffe9cdbb4c4d81b796cc6a37d3e1d-2123389388.us-west-2.elb.amazonaws.com.",
+ expectedSEEndpoints: localEndpoint,
+ },
+ }
+ for _, c := range testCases {
+ t.Run(c.name, func(t *testing.T) {
+ seEndpoint, err := getServiceEntryEndpoints(ctxLogger, c.operatorCluster, c.sourceCluster, c.ingressEndpoints, c.remoteEndpointAddress, c.identityConfigEnvironment)
+ if err != nil {
+ t.Errorf("While constructing serviceEntryPortEndpoint, got error: %v", err)
+ }
+ opts := cmpopts.IgnoreUnexported(networkingV1Alpha3.WorkloadEntry{})
+ if !cmp.Equal(seEndpoint, c.expectedSEEndpoints, opts) {
+ t.Errorf("Mismatch between constructed ingressEndpoint and expected ingressEndpoint")
+ t.Errorf(cmp.Diff(seEndpoint, c.expectedSEEndpoints, opts))
+ }
+ })
+ }
+}
+
+func TestGetSortedDependentNamespaces(t *testing.T) {
+ admiralParams := admiralParamsForServiceEntryTests()
+ common.ResetSync()
+ common.InitializeConfig(admiralParams)
+ ctx := context.Background()
+ ctxLogger := common.GetCtxLogger(ctx, "ctg-taxprep-partnerdatatotax", "")
+ testCases := []struct {
+ name string
+ operatorCluster string
+ sourceCluster string
+ cname string
+ env string
+ clientAssets []map[string]string
+ expectedNamespaces []string
+ }{
+ {
+ name: "Given asset info, cluster info, and client info, " +
+ "When the operator cluster is the same as the source cluster" +
+ "Then the constructed dependent namespaces should include istio-system",
+ operatorCluster: "cg-tax-ppd-usw2-k8s",
+ sourceCluster: "cg-tax-ppd-usw2-k8s",
+ cname: "e2e.intuit.ctg.taxprep.partnerdatatotax.mesh",
+ env: "e2e",
+ clientAssets: []map[string]string{{"name": "sample"}},
+ expectedNamespaces: []string{"ctg-taxprep-partnerdatatotax-usw2-e2e", "istio-system"},
+ },
+ {
+ name: "Given asset info, cluster info, and client info, " +
+ "When the operator cluster is not the same as the source cluster" +
+ "Then the constructed dependent namespaces should not include istio-system",
+ operatorCluster: "cg-tax-ppd-usw2-k8s",
+ sourceCluster: "cg-tax-ppd-use2-k8s",
+ cname: "e2e.intuit.ctg.taxprep.partnerdatatotax.mesh",
+ env: "e2e",
+ clientAssets: []map[string]string{{"name": "sample"}},
+ expectedNamespaces: []string{"ctg-taxprep-partnerdatatotax-usw2-e2e"},
+ },
+ }
+ for _, c := range testCases {
+ t.Run(c.name, func(t *testing.T) {
+ namespaces, err := getSortedDependentNamespaces(ctxLogger, ctx, c.operatorCluster, c.sourceCluster, c.name, c.env, c.clientAssets)
+ if err != nil {
+ t.Errorf("While constructing sorted dependent namespaces, got error: %v", err)
+ }
+ if !cmp.Equal(namespaces, c.expectedNamespaces) {
+ t.Errorf("Mismatch between constructed sortedDependentNamespaces and expected sortedDependentNamespaces")
+ t.Errorf(cmp.Diff(namespaces, c.expectedNamespaces))
+ }
+ })
+ }
+}
+
+func TestBuildServiceEntryForClusterByEnv(t *testing.T) {
+ admiralParams := admiralParamsForServiceEntryTests()
+ common.ResetSync()
+ common.InitializeConfig(admiralParams)
+ ctx := context.Background()
+ ctxLogger := common.GetCtxLogger(ctx, "ctg-taxprep-partnerdatatotax", "")
+ expectedLocalServiceEntry := createMockServiceEntry("e2e", "Intuit.ctg.taxprep.partnerdatatotax", "partner-data-to-tax-spk-root-service.ctg-taxprep-partnerdatatotax-usw2-e2e.svc.cluster.local.", 8090, []string{"ctg-taxprep-partnerdatatotax-usw2-e2e", "istio-system"})
+ expectedRemoteServiceEntry := createMockServiceEntry("e2e", "Intuit.ctg.taxprep.partnerdatatotax", "internal-a96ffe9cdbb4c4d81b796cc6a37d3e1d-2123389388.us-west-2.elb.amazonaws.com.", 15443, []string{"ctg-taxprep-partnerdatatotax-usw2-e2e"})
+ e2eEnv := getSampleIdentityConfigEnvironment("e2e", "ctg-taxprep-partnerdatatotax-usw2-e2e")
+ ingressEndpoints := []*networkingV1Alpha3.WorkloadEntry{{
+ Address: "internal-a96ffe9cdbb4c4d81b796cc6a37d3e1d-2123389388.us-west-2.elb.amazonaws.com.",
+ Locality: "us-west-2",
+ Ports: map[string]uint32{"http": uint32(15443)},
+ Labels: map[string]string{"security.istio.io/tlsMode": "istio"},
+ }}
+ testCases := []struct {
+ name string
+ operatorCluster string
+ sourceCluster string
+ identity string
+ clientAssets []map[string]string
+ ingressEndpoints []*networkingV1Alpha3.WorkloadEntry
+ remoteEndpointAddress string
+ identityConfigEnvironment IdentityConfigEnvironment
+ expectedServiceEntry *networkingV1Alpha3.ServiceEntry
+ }{
+ {
+ name: "Given information to build an se, " +
+ "When the operator cluster is not the same as the source cluster" +
+ "Then the constructed se should have remote endpoint and no istio-system in exportTo",
+ operatorCluster: "cg-tax-ppd-usw2-k8s",
+ sourceCluster: "apigw-cx-ppd-usw2-k8s",
+ identity: "Intuit.ctg.taxprep.partnerdatatotax",
+ clientAssets: []map[string]string{{"name": "sample"}},
+ ingressEndpoints: ingressEndpoints,
+ remoteEndpointAddress: "internal-a96ffe9cdbb4c4d81b796cc6a37d3e1d-2123389388.us-west-2.elb.amazonaws.com.",
+ identityConfigEnvironment: e2eEnv,
+ expectedServiceEntry: &expectedRemoteServiceEntry,
+ },
+ {
+ name: "Given information to build an se, " +
+ "When the operator cluster is the same as the source cluster" +
+ "Then the constructed se should have local endpoint and istio-system in exportTo",
+ operatorCluster: "cg-tax-ppd-usw2-k8s",
+ sourceCluster: "cg-tax-ppd-usw2-k8s",
+ identity: "Intuit.ctg.taxprep.partnerdatatotax",
+ clientAssets: []map[string]string{{"name": "sample"}},
+ ingressEndpoints: ingressEndpoints,
+ remoteEndpointAddress: "internal-a96ffe9cdbb4c4d81b796cc6a37d3e1d-2123389388.us-west-2.elb.amazonaws.com.",
+ identityConfigEnvironment: e2eEnv,
+ expectedServiceEntry: &expectedLocalServiceEntry,
+ },
+ }
+ for _, c := range testCases {
+ t.Run(c.name, func(t *testing.T) {
+ se, err := buildServiceEntryForClusterByEnv(ctxLogger, ctx, c.operatorCluster, c.sourceCluster, c.identity, c.clientAssets, c.ingressEndpoints, c.remoteEndpointAddress, c.identityConfigEnvironment)
+ if err != nil {
+ t.Errorf("While constructing serviceEntry, got error: %v", err)
+ }
+ opts := cmpopts.IgnoreUnexported(networkingV1Alpha3.ServiceEntry{}, networkingV1Alpha3.ServicePort{}, networkingV1Alpha3.WorkloadEntry{})
+ if !cmp.Equal(se, c.expectedServiceEntry, opts) {
+ t.Errorf("Mismatch between constructed serviceEntry and expected sortedEntry")
+ t.Errorf(cmp.Diff(se, c.expectedServiceEntry, opts))
+ }
+ })
+ }
+}
+
+func TestBuildServiceEntriesFromIdentityConfig(t *testing.T) {
+ admiralParams := admiralParamsForServiceEntryTests()
+ common.ResetSync()
+ common.InitializeConfig(admiralParams)
+ ctx := context.Background()
+ ctxLogger := common.GetCtxLogger(ctx, "ctg-taxprep-partnerdatatotax", "")
+ identityConfig := getSampleIdentityConfig()
+ expectedLocalServiceEntryprf := createMockServiceEntry("prf", "Intuit.ctg.taxprep.partnerdatatotax", "partner-data-to-tax-spk-root-service.ctg-taxprep-partnerdatatotax-usw2-prf.svc.cluster.local.", 8090, []string{"ctg-taxprep-partnerdatatotax-usw2-prf", "istio-system"})
+ expectedLocalServiceEntrye2e := createMockServiceEntry("e2e", "Intuit.ctg.taxprep.partnerdatatotax", "partner-data-to-tax-spk-root-service.ctg-taxprep-partnerdatatotax-usw2-e2e.svc.cluster.local.", 8090, []string{"ctg-taxprep-partnerdatatotax-usw2-e2e", "istio-system"})
+ expectedLocalServiceEntryqal := createMockServiceEntry("qal", "Intuit.ctg.taxprep.partnerdatatotax", "partner-data-to-tax-spk-root-service.ctg-taxprep-partnerdatatotax-usw2-qal.svc.cluster.local.", 8090, []string{"ctg-taxprep-partnerdatatotax-usw2-qal", "istio-system"})
+ expectedLocalServiceEntries := []*networkingV1Alpha3.ServiceEntry{&expectedLocalServiceEntryprf, &expectedLocalServiceEntrye2e, &expectedLocalServiceEntryqal}
+ testCases := []struct {
+ name string
+ operatorCluster string
+ event admiral.EventType
+ identityConfig IdentityConfig
+ expectedServiceEntries []*networkingV1Alpha3.ServiceEntry
+ }{
+ {
+ name: "Given information to build an se, " +
+ "When the operator cluster is the same as the source cluster" +
+ "Then the constructed se should have local endpoint and istio-system in exportTo",
+ operatorCluster: "cg-tax-ppd-usw2-k8s",
+ event: admiral.Add,
+ identityConfig: identityConfig,
+ expectedServiceEntries: expectedLocalServiceEntries,
+ },
+ }
+ for _, c := range testCases {
+ t.Run(c.name, func(t *testing.T) {
+ serviceEntryBuilder := ServiceEntryBuilder{OperatorCluster: c.operatorCluster}
+ serviceEntries, err := serviceEntryBuilder.BuildServiceEntriesFromIdentityConfig(ctxLogger, ctx, c.event, c.identityConfig)
+ if err != nil {
+ t.Errorf("While constructing service entries, got error: %v", err)
+ }
+ opts := cmpopts.IgnoreUnexported(networkingV1Alpha3.ServiceEntry{}, networkingV1Alpha3.ServicePort{}, networkingV1Alpha3.WorkloadEntry{})
+ if !cmp.Equal(serviceEntries, c.expectedServiceEntries, opts) {
+ t.Errorf("Mismatch between constructed sorted entries and expected service entries")
+ t.Errorf(cmp.Diff(serviceEntries, c.expectedServiceEntries, opts))
+ }
+ })
+ }
+}
From 67f18e43374482bb3d1c0d38f200856afe6fc528 Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 16:15:35 -0700
Subject: [PATCH 136/235] copy mock.go from main branch
---
admiral/pkg/test/mock.go | 300 ++++++++++++++++++++++++++++++++++-----
1 file changed, 262 insertions(+), 38 deletions(-)
diff --git a/admiral/pkg/test/mock.go b/admiral/pkg/test/mock.go
index 0e5a380a..2c72a5a0 100644
--- a/admiral/pkg/test/mock.go
+++ b/admiral/pkg/test/mock.go
@@ -2,14 +2,26 @@ package test
import (
"context"
+ "errors"
+
+ argoprojv1alpha1 "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned/typed/rollouts/v1alpha1"
+ metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/watch"
+ "k8s.io/client-go/rest"
argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
- v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
+ v1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
+ admiralV1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
v1alpha32 "istio.io/client-go/pkg/apis/networking/v1alpha3"
k8sAppsV1 "k8s.io/api/apps/v1"
k8sCoreV1 "k8s.io/api/core/v1"
)
+var (
+ RolloutNamespace = "test-ns"
+)
+
type MockIstioConfigStore struct {
TestHook func(interface{})
}
@@ -30,42 +42,72 @@ func (m *MockIstioConfigStore) Delete(typ, name, namespace string) error {
type MockDeploymentHandler struct {
}
-func (m *MockDeploymentHandler) Added(ctx context.Context, obj *k8sAppsV1.Deployment) {
-
+func (m *MockDeploymentHandler) Added(ctx context.Context, obj *k8sAppsV1.Deployment) error {
+ return nil
}
-func (m *MockDeploymentHandler) Deleted(ctx context.Context, obj *k8sAppsV1.Deployment) {
+func (m *MockDeploymentHandler) Deleted(ctx context.Context, obj *k8sAppsV1.Deployment) error {
+ return nil
+}
+type MockDeploymentHandlerError struct {
}
-type MockRolloutHandler struct {
+func (m *MockDeploymentHandlerError) Added(ctx context.Context, obj *k8sAppsV1.Deployment) error {
+ return nil
}
-func (m *MockRolloutHandler) Added(ctx context.Context, obj *argo.Rollout) {
+func (m *MockDeploymentHandlerError) Deleted(ctx context.Context, obj *k8sAppsV1.Deployment) error {
+ return errors.New("error while deleting deployment")
+}
+type MockRolloutHandler struct {
+ Obj *argo.Rollout
}
-func (m *MockRolloutHandler) Deleted(ctx context.Context, obj *argo.Rollout) {
+func (m *MockRolloutHandler) Added(ctx context.Context, obj *argo.Rollout) error {
+ m.Obj = obj
+ return nil
+}
+func (m *MockRolloutHandler) Deleted(ctx context.Context, obj *argo.Rollout) error {
+ return nil
}
-func (m *MockRolloutHandler) Updated(ctx context.Context, obj *argo.Rollout) {
+func (m *MockRolloutHandler) Updated(ctx context.Context, obj *argo.Rollout) error {
+ return nil
+}
+type MockRolloutHandlerError struct {
+ Obj *argo.Rollout
}
-type MockServiceHandler struct {
+func (m *MockRolloutHandlerError) Added(ctx context.Context, obj *argo.Rollout) error {
+ m.Obj = obj
+ return nil
}
-func (m *MockServiceHandler) Added(ctx context.Context, obj *k8sCoreV1.Service) {
+func (m *MockRolloutHandlerError) Deleted(ctx context.Context, obj *argo.Rollout) error {
+ return errors.New("error while deleting rollout")
+}
+func (m *MockRolloutHandlerError) Updated(ctx context.Context, obj *argo.Rollout) error {
+ return nil
}
-func (m *MockServiceHandler) Updated(ctx context.Context, obj *k8sCoreV1.Service) {
+type MockServiceHandler struct {
+}
+func (m *MockServiceHandler) Added(ctx context.Context, obj *k8sCoreV1.Service) error {
+ return nil
}
-func (m *MockServiceHandler) Deleted(ctx context.Context, obj *k8sCoreV1.Service) {
+func (m *MockServiceHandler) Updated(ctx context.Context, obj *k8sCoreV1.Service) error {
+ return nil
+}
+func (m *MockServiceHandler) Deleted(ctx context.Context, obj *k8sCoreV1.Service) error {
+ return nil
}
type MockPodHandler struct {
@@ -94,110 +136,292 @@ func (m *MockNodeHandler) Deleted(obj *k8sCoreV1.Node) {
type MockDependencyHandler struct {
}
-func (m *MockDependencyHandler) Added(ctx context.Context, obj *v1.Dependency) {
-
+func (m *MockDependencyHandler) Added(ctx context.Context, obj *admiralV1.Dependency) error {
+ return nil
}
-func (m *MockDependencyHandler) Updated(ctx context.Context, obj *v1.Dependency) {
-
+func (m *MockDependencyHandler) Updated(ctx context.Context, obj *admiralV1.Dependency) error {
+ return nil
}
-func (m *MockDependencyHandler) Deleted(ctx context.Context, obj *v1.Dependency) {
-
+func (m *MockDependencyHandler) Deleted(ctx context.Context, obj *admiralV1.Dependency) error {
+ return nil
}
type MockGlobalTrafficHandler struct {
- Obj *v1.GlobalTrafficPolicy
+ Obj *admiralV1.GlobalTrafficPolicy
}
-func (m *MockGlobalTrafficHandler) Added(ctx context.Context, obj *v1.GlobalTrafficPolicy) {
+func (m *MockGlobalTrafficHandler) Added(ctx context.Context, obj *admiralV1.GlobalTrafficPolicy) error {
m.Obj = obj
+ return nil
}
-func (m *MockGlobalTrafficHandler) Updated(ctx context.Context, obj *v1.GlobalTrafficPolicy) {
+func (m *MockGlobalTrafficHandler) Updated(ctx context.Context, obj *admiralV1.GlobalTrafficPolicy) error {
m.Obj = obj
+ return nil
}
-func (m *MockGlobalTrafficHandler) Deleted(ctx context.Context, obj *v1.GlobalTrafficPolicy) {
+func (m *MockGlobalTrafficHandler) Deleted(ctx context.Context, obj *admiralV1.GlobalTrafficPolicy) error {
m.Obj = nil
+ return nil
}
type MockServiceEntryHandler struct {
Obj *v1alpha32.ServiceEntry
}
-func (m *MockServiceEntryHandler) Added(obj *v1alpha32.ServiceEntry) {
+func (m *MockServiceEntryHandler) Added(obj *v1alpha32.ServiceEntry) error {
m.Obj = obj
+ return nil
}
-func (m *MockServiceEntryHandler) Updated(obj *v1alpha32.ServiceEntry) {
+func (m *MockServiceEntryHandler) Updated(obj *v1alpha32.ServiceEntry) error {
m.Obj = obj
+ return nil
}
-func (m *MockServiceEntryHandler) Deleted(obj *v1alpha32.ServiceEntry) {
+func (m *MockServiceEntryHandler) Deleted(obj *v1alpha32.ServiceEntry) error {
m.Obj = nil
+ return nil
}
type MockVirtualServiceHandler struct {
Obj *v1alpha32.VirtualService
}
-func (m *MockVirtualServiceHandler) Added(ctx context.Context, obj *v1alpha32.VirtualService) {
+func (m *MockVirtualServiceHandler) Added(ctx context.Context, obj *v1alpha32.VirtualService) error {
m.Obj = obj
+ return nil
}
-func (m *MockVirtualServiceHandler) Updated(ctx context.Context, obj *v1alpha32.VirtualService) {
+func (m *MockVirtualServiceHandler) Updated(ctx context.Context, obj *v1alpha32.VirtualService) error {
m.Obj = obj
+ return nil
}
-func (m *MockVirtualServiceHandler) Deleted(ctx context.Context, obj *v1alpha32.VirtualService) {
+func (m *MockVirtualServiceHandler) Deleted(ctx context.Context, obj *v1alpha32.VirtualService) error {
m.Obj = nil
+ return nil
}
type MockDestinationRuleHandler struct {
Obj *v1alpha32.DestinationRule
}
-func (m *MockDestinationRuleHandler) Added(ctx context.Context, obj *v1alpha32.DestinationRule) {
+func (m *MockDestinationRuleHandler) Added(ctx context.Context, obj *v1alpha32.DestinationRule) error {
m.Obj = obj
+ return nil
}
-func (m *MockDestinationRuleHandler) Updated(ctx context.Context, obj *v1alpha32.DestinationRule) {
+func (m *MockDestinationRuleHandler) Updated(ctx context.Context, obj *v1alpha32.DestinationRule) error {
m.Obj = obj
+ return nil
}
-func (m *MockDestinationRuleHandler) Deleted(ctx context.Context, obj *v1alpha32.DestinationRule) {
+func (m *MockDestinationRuleHandler) Deleted(ctx context.Context, obj *v1alpha32.DestinationRule) error {
m.Obj = nil
+ return nil
}
type MockSidecarHandler struct {
Obj *v1alpha32.Sidecar
}
-func (m *MockSidecarHandler) Added(ctx context.Context, obj *v1alpha32.Sidecar) {
+func (m *MockSidecarHandler) Added(ctx context.Context, obj *v1alpha32.Sidecar) error {
m.Obj = obj
+ return nil
}
-func (m *MockSidecarHandler) Updated(ctx context.Context, obj *v1alpha32.Sidecar) {
+func (m *MockSidecarHandler) Updated(ctx context.Context, obj *v1alpha32.Sidecar) error {
m.Obj = obj
+ return nil
}
-func (m *MockSidecarHandler) Deleted(ctx context.Context, obj *v1alpha32.Sidecar) {
+func (m *MockSidecarHandler) Deleted(ctx context.Context, obj *v1alpha32.Sidecar) error {
m.Obj = nil
+ return nil
}
type MockRoutingPolicyHandler struct {
- Obj *v1.RoutingPolicy
+ Obj *admiralV1.RoutingPolicy
+}
+
+func (m *MockRoutingPolicyHandler) Added(ctx context.Context, obj *admiralV1.RoutingPolicy) error {
+ m.Obj = obj
+ return nil
+}
+
+func (m *MockRoutingPolicyHandler) Deleted(ctx context.Context, obj *admiralV1.RoutingPolicy) error {
+ m.Obj = nil
+ return nil
+}
+
+func (m *MockRoutingPolicyHandler) Updated(ctx context.Context, obj *admiralV1.RoutingPolicy) error {
+ m.Obj = obj
+ return nil
+}
+
+type MockTrafficConfigHandler struct {
+ Obj *admiralV1.TrafficConfig
}
-func (m *MockRoutingPolicyHandler) Added(ctx context.Context, obj *v1.RoutingPolicy) {
+func (m *MockTrafficConfigHandler) Added(ctx context.Context, obj *admiralV1.TrafficConfig) {
m.Obj = obj
}
-func (m *MockRoutingPolicyHandler) Deleted(ctx context.Context, obj *v1.RoutingPolicy) {
+func (m *MockTrafficConfigHandler) Deleted(ctx context.Context, obj *admiralV1.TrafficConfig) {
m.Obj = nil
}
-func (m *MockRoutingPolicyHandler) Updated(ctx context.Context, obj *v1.RoutingPolicy) {
+func (m *MockTrafficConfigHandler) Updated(ctx context.Context, obj *admiralV1.TrafficConfig) {
m.Obj = obj
}
+
+type MockEnvoyFilterHandler struct {
+}
+
+func (m *MockEnvoyFilterHandler) Added(context.Context, *v1alpha32.EnvoyFilter) {
+}
+
+func (m *MockEnvoyFilterHandler) Deleted(context.Context, *v1alpha32.EnvoyFilter) {
+}
+
+func (m *MockEnvoyFilterHandler) Updated(context.Context, *v1alpha32.EnvoyFilter) {
+}
+
+type MockDependencyProxyHandler struct {
+}
+
+func (m *MockDependencyProxyHandler) Added(context.Context, *admiralV1.DependencyProxy) error {
+ return nil
+}
+
+func (m *MockDependencyProxyHandler) Deleted(context.Context, *admiralV1.DependencyProxy) error {
+ return nil
+}
+
+func (m *MockDependencyProxyHandler) Updated(context.Context, *admiralV1.DependencyProxy) error {
+ return nil
+}
+
+type MockRolloutsGetter struct{}
+type FakeRolloutsImpl struct{}
+
+func (f FakeRolloutsImpl) Create(ctx context.Context, rollout *v1alpha1.Rollout, opts metaV1.CreateOptions) (*v1alpha1.Rollout, error) {
+ return nil, nil
+}
+
+func (f FakeRolloutsImpl) Update(ctx context.Context, rollout *v1alpha1.Rollout, opts metaV1.UpdateOptions) (*v1alpha1.Rollout, error) {
+ return nil, nil
+}
+
+func (f FakeRolloutsImpl) UpdateStatus(ctx context.Context, rollout *v1alpha1.Rollout, opts metaV1.UpdateOptions) (*v1alpha1.Rollout, error) {
+ return nil, nil
+}
+
+func (f FakeRolloutsImpl) Delete(ctx context.Context, name string, opts metaV1.DeleteOptions) error {
+ return nil
+}
+
+func (f FakeRolloutsImpl) DeleteCollection(ctx context.Context, opts metaV1.DeleteOptions, listOpts metaV1.ListOptions) error {
+ return nil
+}
+
+func (f FakeRolloutsImpl) Get(ctx context.Context, name string, opts metaV1.GetOptions) (*v1alpha1.Rollout, error) {
+ return nil, nil
+}
+
+func (f FakeRolloutsImpl) List(ctx context.Context, opts metaV1.ListOptions) (*v1alpha1.RolloutList, error) {
+ rollout1 := v1alpha1.Rollout{
+ ObjectMeta: metaV1.ObjectMeta{
+ Name: "rollout-name",
+ Namespace: RolloutNamespace,
+ },
+ Spec: v1alpha1.RolloutSpec{
+ Strategy: v1alpha1.RolloutStrategy{
+ Canary: &v1alpha1.CanaryStrategy{
+ TrafficRouting: &v1alpha1.RolloutTrafficRouting{
+ Istio: &v1alpha1.IstioTrafficRouting{
+ VirtualService: &v1alpha1.IstioVirtualService{
+ Name: "virtual-service-1",
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+ rollout2 := v1alpha1.Rollout{
+ ObjectMeta: metaV1.ObjectMeta{
+ Name: "rollout-name2",
+ Namespace: RolloutNamespace,
+ },
+ Spec: v1alpha1.RolloutSpec{
+ Strategy: v1alpha1.RolloutStrategy{
+ Canary: &v1alpha1.CanaryStrategy{
+ TrafficRouting: &v1alpha1.RolloutTrafficRouting{
+ Istio: &v1alpha1.IstioTrafficRouting{
+ VirtualService: &v1alpha1.IstioVirtualService{
+ Name: "virtual-service-1",
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+ list := &v1alpha1.RolloutList{Items: []v1alpha1.Rollout{rollout1, rollout2}}
+ return list, nil
+}
+
+func (f FakeRolloutsImpl) Watch(ctx context.Context, opts metaV1.ListOptions) (watch.Interface, error) {
+ return nil, nil
+}
+
+func (f FakeRolloutsImpl) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metaV1.PatchOptions, subresources ...string) (result *v1alpha1.Rollout, err error) {
+ return nil, nil
+}
+
+func (m MockRolloutsGetter) RESTClient() rest.Interface {
+ return nil
+}
+
+func (m MockRolloutsGetter) AnalysisRuns(namespace string) argoprojv1alpha1.AnalysisRunInterface {
+ return nil
+}
+
+func (m MockRolloutsGetter) AnalysisTemplates(namespace string) argoprojv1alpha1.AnalysisTemplateInterface {
+ return nil
+}
+
+func (m MockRolloutsGetter) ClusterAnalysisTemplates() argoprojv1alpha1.ClusterAnalysisTemplateInterface {
+ return nil
+}
+
+func (m MockRolloutsGetter) Experiments(namespace string) argoprojv1alpha1.ExperimentInterface {
+ return nil
+}
+
+func (m MockRolloutsGetter) Rollouts(namespace string) argoprojv1alpha1.RolloutInterface {
+ return FakeRolloutsImpl{}
+}
+
+type MockOutlierDetectionHandler struct {
+ Obj *admiralV1.OutlierDetection
+}
+
+func (m *MockOutlierDetectionHandler) Added(ctx context.Context, obj *admiralV1.OutlierDetection) error {
+ m.Obj = obj
+ return nil
+}
+
+func (m *MockOutlierDetectionHandler) Updated(ctx context.Context, obj *admiralV1.OutlierDetection) error {
+ m.Obj = obj
+ return nil
+}
+
+func (m *MockOutlierDetectionHandler) Deleted(ctx context.Context, obj *admiralV1.OutlierDetection) error {
+ m.Obj = nil
+ return nil
+}
From 221250227b859bb85fdeaaba1bd1b669410ff5a2 Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 16:16:08 -0700
Subject: [PATCH 137/235] copy types.go from main branch
---
admiral/pkg/test/types.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/admiral/pkg/test/types.go b/admiral/pkg/test/types.go
index d556ceac..2ab7c65f 100644
--- a/admiral/pkg/test/types.go
+++ b/admiral/pkg/test/types.go
@@ -19,6 +19,6 @@ func (c *FakeConfigMapController) GetConfigMap(ctx context.Context) (*k8sCoreV1.
func (c *FakeConfigMapController) PutConfigMap(ctx context.Context, newMap *k8sCoreV1.ConfigMap) error {
return c.PutError
}
-func (c *FakeConfigMapController)GetIPPrefixForServiceEntries() (seIpPrefix string) {
+func (c *FakeConfigMapController) GetIPPrefixForServiceEntries() (seIpPrefix string) {
return "240.0"
}
From accfdb0c5acd876d403e2ae749bca16adba87701 Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 16:16:47 -0700
Subject: [PATCH 138/235] copy constants.go from main branch
---
admiral/pkg/util/constants.go | 9 +++++++++
1 file changed, 9 insertions(+)
create mode 100644 admiral/pkg/util/constants.go
diff --git a/admiral/pkg/util/constants.go b/admiral/pkg/util/constants.go
new file mode 100644
index 00000000..807a6199
--- /dev/null
+++ b/admiral/pkg/util/constants.go
@@ -0,0 +1,9 @@
+package util
+
+const (
+ Http = "http"
+ Grpc = "grpc"
+ GrpcWeb = "grpc-web"
+ Http2 = "http2"
+ SecretShardKey = "shard"
+)
From 7148c08b9f864863160641e1736906284415cc94 Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 16:17:21 -0700
Subject: [PATCH 139/235] copy util.go from main branch
---
admiral/pkg/util/util.go | 38 ++++++++++++++++++++++++++++++++++++++
1 file changed, 38 insertions(+)
create mode 100644 admiral/pkg/util/util.go
diff --git a/admiral/pkg/util/util.go b/admiral/pkg/util/util.go
new file mode 100644
index 00000000..5af737f2
--- /dev/null
+++ b/admiral/pkg/util/util.go
@@ -0,0 +1,38 @@
+package util
+
+import (
+ "strings"
+ "time"
+)
+
+type AdmiralState struct {
+ ReadOnly bool
+ IsStateInitialized bool
+}
+
+var (
+ CurrentAdmiralState AdmiralState
+)
+
+func IsAdmiralReadOnly() bool {
+ return CurrentAdmiralState.ReadOnly
+}
+
+// ResyncIntervals defines the different reconciliation intervals
+// for kubernetes operators
+type ResyncIntervals struct {
+ UniversalReconcileInterval time.Duration
+ SeAndDrReconcileInterval time.Duration
+}
+
+func GetPortProtocol(name string) string {
+ var protocol = Http
+ if strings.Index(name, GrpcWeb) == 0 {
+ protocol = GrpcWeb
+ } else if strings.Index(name, Grpc) == 0 {
+ protocol = Grpc
+ } else if strings.Index(name, Http2) == 0 {
+ protocol = Http2
+ }
+ return protocol
+}
From 48c16f4c0587b78be8153d6efba23ff737c01cdc Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 16:17:51 -0700
Subject: [PATCH 140/235] copy util_test.go from main branch
---
admiral/pkg/util/util_test.go | 48 +++++++++++++++++++++++++++++++++++
1 file changed, 48 insertions(+)
create mode 100644 admiral/pkg/util/util_test.go
diff --git a/admiral/pkg/util/util_test.go b/admiral/pkg/util/util_test.go
new file mode 100644
index 00000000..0bdac7db
--- /dev/null
+++ b/admiral/pkg/util/util_test.go
@@ -0,0 +1,48 @@
+package util
+
+import "testing"
+
+func TestGetPortProtocol(t *testing.T) {
+ cases := []struct {
+ name string
+ protocol string
+ expProtocol string
+ }{
+ {
+ name: "Given valid input parameters, " +
+ "When port name is " + Http + ", " +
+ "Then protocol should be " + Http,
+ protocol: Http,
+ expProtocol: Http,
+ },
+ {
+ name: "Given valid input parameters, " +
+ "When port name is " + GrpcWeb + ", " +
+ "Then protocol should be " + GrpcWeb,
+ protocol: GrpcWeb,
+ expProtocol: GrpcWeb,
+ },
+ {
+ name: "Given valid input parameters, " +
+ "When port name is " + Grpc + ", " +
+ "Then protocol should be " + Grpc,
+ protocol: Grpc,
+ expProtocol: Grpc,
+ },
+ {
+ name: "Given valid input parameters, " +
+ "When port name is " + Http2 + ", " +
+ "Then protocol should be " + Http2,
+ protocol: Http2,
+ expProtocol: Http2,
+ },
+ }
+ for _, c := range cases {
+ t.Run(c.name, func(t *testing.T) {
+ protocol := GetPortProtocol(c.protocol)
+ if protocol != c.expProtocol {
+ t.Errorf("expected=%v, got=%v", c.expProtocol, protocol)
+ }
+ })
+ }
+}
From 4a7cc155b92e093bf137ef77373bf18f4045a083 Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 16:18:29 -0700
Subject: [PATCH 141/235] copy variables.go from main branch
---
admiral/pkg/util/variables.go | 3 +++
1 file changed, 3 insertions(+)
create mode 100644 admiral/pkg/util/variables.go
diff --git a/admiral/pkg/util/variables.go b/admiral/pkg/util/variables.go
new file mode 100644
index 00000000..78cddff1
--- /dev/null
+++ b/admiral/pkg/util/variables.go
@@ -0,0 +1,3 @@
+package util
+
+var ()
From 34b9e96ffa7865bf297fc5b37601ffbaf87814cd Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 16:19:29 -0700
Subject: [PATCH 142/235] copy compatibility.md from main branch
---
docs/Compatibility.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/Compatibility.md b/docs/Compatibility.md
index 44d1de57..1bc594ab 100644
--- a/docs/Compatibility.md
+++ b/docs/Compatibility.md
@@ -31,4 +31,4 @@ v1.0 | AWS, GCP, Azure
v1.1 | AWS, GCP, Azure
v1.2 | AWS, GCP, Azure
-`Note`: Please submit a PR if admiral was tested on other cloud vendors
\ No newline at end of file
+`Note`: Please submit a PR if admiral was tested on other cloud vendors
From 2db24525caae5c92f23ff67815f530a2abb46841 Mon Sep 17 00:00:00 2001
From: kpharasi
Date: Tue, 23 Jul 2024 16:20:15 -0700
Subject: [PATCH 143/235] copy examples.md from main branch
---
docs/Examples.md | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/docs/Examples.md b/docs/Examples.md
index 6558c7d3..a9734d9a 100644
--- a/docs/Examples.md
+++ b/docs/Examples.md
@@ -71,7 +71,7 @@ $ADMIRAL_HOME/scripts/cluster-secret.sh $MAIN_CLUSTER $MAIN_CLUSTER admiral
4\. Install/Run Admiral-Sync in the remote clusters that admiral monitors
```
# Create admiral role and bindings on remote cluster
-kubectl apply --kubeconfig=$REMOTE_CLUSTER -f $ADMIRAL_HOME/yaml/remotecluster.yaml
+kubectl apply --context=$REMOTE_CLUSTER -f $ADMIRAL_HOME/yaml/remotecluster.yaml
```
5\. Add Remote Cluster to Admiral's watcher
```
@@ -357,4 +357,4 @@ Run the following script to cleanup admiral and its associated resources
```bash
$ADMIRAL_HOME/scripts/cleanup.sh
-```
+```
\ No newline at end of file
From 1a47464b8cb77be2b90bab981f2e8fe651212089 Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 10:35:01 -0400
Subject: [PATCH 144/235] copied envoyfilter_test changes from master
Signed-off-by: Shriram Sharma
---
admiral/pkg/clusters/envoyfilter_test.go | 271 ++++++++++++++++++-----
1 file changed, 211 insertions(+), 60 deletions(-)
diff --git a/admiral/pkg/clusters/envoyfilter_test.go b/admiral/pkg/clusters/envoyfilter_test.go
index e705edb2..8ff44359 100644
--- a/admiral/pkg/clusters/envoyfilter_test.go
+++ b/admiral/pkg/clusters/envoyfilter_test.go
@@ -7,39 +7,27 @@ import (
"testing"
"time"
+ "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
+
+ "github.com/google/go-cmp/cmp"
+ "google.golang.org/protobuf/testing/protocmp"
+ "istio.io/api/networking/v1alpha3"
+ networking "istio.io/client-go/pkg/apis/networking/v1alpha3"
+
"github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/model"
- v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
+ v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
"github.com/stretchr/testify/assert"
istiofake "istio.io/client-go/pkg/clientset/versioned/fake"
- "istio.io/client-go/pkg/clientset/versioned/typed/networking/v1alpha3/fake"
- time2 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- testing2 "k8s.io/client-go/testing"
+ k8sAppsV1 "k8s.io/api/apps/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ k8sCorev1 "k8s.io/api/core/v1"
)
func TestCreateOrUpdateEnvoyFilter(t *testing.T) {
- p := common.AdmiralParams{
- KubeconfigPath: "testdata/fake.config",
- LabelSet: &common.LabelSet{},
- EnableSAN: true,
- SANPrefix: "prefix",
- HostnameSuffix: "mesh",
- SyncNamespace: "ns",
- CacheRefreshDuration: time.Minute,
- ClusterRegistriesNamespace: "default",
- DependenciesNamespace: "default",
- SecretResolver: "",
- EnvoyFilterVersion: "1.13",
- }
-
- p.LabelSet.WorkloadIdentityKey = "identity"
- p.LabelSet.EnvKey = "admiral.io/env"
- p.LabelSet.GlobalTrafficDeploymentLabel = "identity"
-
- common.ResetSync()
- registry, _ := InitAdmiral(context.Background(), p)
+ registry := getRegistry("1.13,1.17")
handler := RoutingPolicyHandler{}
@@ -52,6 +40,45 @@ func TestCreateOrUpdateEnvoyFilter(t *testing.T) {
})
+ deployment := k8sAppsV1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "test",
+ Labels: map[string]string{"sidecar.istio.io/inject": "true", "identity": "bar", "env": "dev"},
+ },
+ Spec: k8sAppsV1.DeploymentSpec{
+ Selector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{"identity": "bar"},
+ },
+ Template: k8sCorev1.PodTemplateSpec{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{"sidecar.istio.io/inject": "true"},
+ Labels: map[string]string{"identity": "bar", "istio-injected": "true", "env": "dev"},
+ },
+ },
+ },
+ }
+
+ rollout := v1alpha1.Rollout{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: "test",
+ Labels: map[string]string{"sidecar.istio.io/inject": "true", "identity": "bar", "env": "dev"},
+ },
+ Spec: v1alpha1.RolloutSpec{
+ Selector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{"identity": "bar"},
+ },
+ Template: k8sCorev1.PodTemplateSpec{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{"sidecar.istio.io/inject": "true"},
+ Labels: map[string]string{"identity": "bar", "istio-injected": "true", "env": "dev"},
+ },
+ },
+ },
+ }
+ ctx := context.Background()
+ remoteController.RolloutController.Added(ctx, &rollout)
remoteController.RoutingPolicyController = routingPolicyController
registry.remoteControllers = map[string]*RemoteController{"cluster-1": remoteController}
@@ -64,11 +91,12 @@ func TestCreateOrUpdateEnvoyFilter(t *testing.T) {
handler.RemoteRegistry = registry
routingPolicyFoo := &v1.RoutingPolicy{
- TypeMeta: time2.TypeMeta{},
- ObjectMeta: time2.ObjectMeta{
+ TypeMeta: metav1.TypeMeta{},
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "routingpolicy-foo",
Labels: map[string]string{
"identity": "foo",
- "admiral.io/env": "stage",
+ "admiral.io/env": "dev",
},
},
Spec: model.RoutingPolicy{
@@ -79,41 +107,170 @@ func TestCreateOrUpdateEnvoyFilter(t *testing.T) {
"cachettlSec": "86400",
"routingServiceUrl": "e2e.test.routing.service.mesh",
"pathPrefix": "/sayhello,/v1/company/{id}/",
+ "wasmPath": "dummyPath",
},
},
Status: v1.RoutingPolicyStatus{},
}
- selectors := map[string]string{"one": "test1", "two": "test2"}
+ envoyFilter_113 := &networking.EnvoyFilter{
+ TypeMeta: metav1.TypeMeta{},
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-dr-70395ba3470fd8ce6062-f6ce3712830af1b15625-1.13",
+ },
+ Spec: v1alpha3.EnvoyFilter{
+ ConfigPatches: nil,
+ Priority: 0,
+ },
+ }
+ envoyFilter_117 := &networking.EnvoyFilter{
+ TypeMeta: metav1.TypeMeta{},
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-dr-70395ba3470fd8ce6062-f6ce3712830af1b15625-1.17",
+ },
+ Spec: v1alpha3.EnvoyFilter{
+ ConfigPatches: nil,
+ Priority: 0,
+ },
+ }
+
+ getSha1 = common.GetSha1
- getSha1 = getSha1Error
+ //Struct of test case info. Name is required.
+ testCases := []struct {
+ name string
+ workloadKey string
+ routingPolicy *v1.RoutingPolicy
+ eventType admiral.EventType
+ expectedEnvoyFilter *networking.EnvoyFilter
+ filterCount int
+ registry *AdmiralCache
+ shaMethod func(interface{}) (string, error)
+ matchingRollout bool
+ }{
+ {
+ name: "Given dynamic routing is enabled in admiral startup params, " +
+ "When an ADD event for routing policy is received but sha1 calculation fails" +
+ "Then 0 envoy filters are created and error is thrown",
+ workloadKey: "bar",
+ routingPolicy: routingPolicyFoo,
+ eventType: admiral.Add,
+ expectedEnvoyFilter: nil,
+ filterCount: 0,
+ registry: registry.AdmiralCache,
+ shaMethod: getSha1Error,
+ },
+ {
+ name: "Given 2 envoy filter versions are specified in Admiral startup params, " +
+ "And there exists a dependent service, which has a deployment, " +
+ "When an ADD event is received for routing policy" +
+ "Then 2 envoy filters are created, one for each version in each dependent cluster's istio-system ns",
+ workloadKey: "bar",
+ routingPolicy: routingPolicyFoo,
+ eventType: admiral.Add,
+ expectedEnvoyFilter: envoyFilter_113,
+ filterCount: 2,
+ registry: registry.AdmiralCache,
+ },
+ {
+ name: "Given 2 envoy filter versions are specified in Admiral startup params, " +
+ "When an UPDATE event is received for routing policy" +
+ "Then 2 envoy filters are created, one for each version in each dependent's ns",
+ workloadKey: "bar",
+ routingPolicy: routingPolicyFoo,
+ eventType: admiral.Update,
+ expectedEnvoyFilter: envoyFilter_113,
+ filterCount: 2,
+ registry: registry.AdmiralCache,
+ },
+ {
+ name: "Given 2 envoy filter versions are specified in Admiral startup params, " +
+ "And there exists a dependent service, which has a rollout, " +
+ "When an ADD event is received for routing policy" +
+ "Then 2 envoy filters are created, one for each version in dependent cluster's istio-system ns",
+ workloadKey: "bar",
+ routingPolicy: routingPolicyFoo,
+ eventType: admiral.Add,
+ expectedEnvoyFilter: envoyFilter_113,
+ filterCount: 2,
+ registry: registry.AdmiralCache,
+ matchingRollout: true,
+ },
+ }
- ctx := context.Background()
+ //Run the test for every provided case
+ for _, c := range testCases {
+ t.Run(c.name, func(t *testing.T) {
+ if c.shaMethod != nil {
+ getSha1 = c.shaMethod
+ } else {
+ getSha1 = common.GetSha1
+ }
+ if c.matchingRollout {
+ remoteController.DeploymentController.Deleted(ctx, &deployment)
+ } else {
+ remoteController.DeploymentController.Added(ctx, &deployment)
+ }
+ if c.eventType == admiral.Update {
+ remoteController.RoutingPolicyController.IstioClient.NetworkingV1alpha3().
+ EnvoyFilters(common.NamespaceIstioSystem).Create(context.Background(), envoyFilter_113, metav1.CreateOptions{})
+ remoteController.RoutingPolicyController.IstioClient.NetworkingV1alpha3().
+ EnvoyFilters(common.NamespaceIstioSystem).Create(context.Background(), envoyFilter_117, metav1.CreateOptions{})
- envoyfilter, err := createOrUpdateEnvoyFilter(ctx, remoteController, routingPolicyFoo, admiral.Add, "barstage", registry.AdmiralCache, selectors)
+ }
+ envoyfilterList, err := createOrUpdateEnvoyFilter(ctx, remoteController, c.routingPolicy, c.eventType, c.workloadKey, c.registry)
- assert.NotNil(t, err)
- assert.Nil(t, envoyfilter)
+ if err != nil && c.expectedEnvoyFilter != nil {
+ t.Fatalf("EnvoyFilter error: %v", err)
+ }
- getSha1 = common.GetSha1
+ if c.expectedEnvoyFilter != nil && c.filterCount == len(envoyfilterList) && !cmp.Equal(envoyfilterList[0].Name, c.expectedEnvoyFilter.Name, protocmp.Transform()) {
+ t.Fatalf("EnvoyFilter Mismatch. Diff: %v", cmp.Diff(envoyfilterList[0], c.expectedEnvoyFilter, protocmp.Transform()))
+ }
- envoyfilter, err = createOrUpdateEnvoyFilter(ctx, remoteController, routingPolicyFoo, admiral.Add, "bar", registry.AdmiralCache, selectors)
- assert.Equal(t, "test1", envoyfilter.Spec.WorkloadSelector.GetLabels()["one"])
- assert.Equal(t, "test2", envoyfilter.Spec.WorkloadSelector.GetLabels()["two"])
- assert.Equal(t, "test-dynamicrouting-d0fdd-1.13", envoyfilter.Name)
+ for _, ef := range envoyfilterList {
+ assert.Equal(t, "bar", ef.Spec.WorkloadSelector.Labels[common.AssetAlias])
+ assert.Equal(t, c.routingPolicy.Name, ef.Annotations[envoyfilterAssociatedRoutingPolicyNameAnnotation])
+ assert.Equal(t, common.GetRoutingPolicyIdentity(c.routingPolicy), ef.Annotations[envoyfilterAssociatedRoutingPolicyIdentityeAnnotation])
+ assert.Equal(t, "istio-system", ef.ObjectMeta.Namespace)
+ // assert filename in vm_config
+ assert.Contains(t, ef.Spec.ConfigPatches[0].Patch.Value.String(), common.WasmPathValue)
+ }
+ })
+ t.Cleanup(func() {
+ remoteController.RoutingPolicyController.IstioClient.NetworkingV1alpha3().
+ EnvoyFilters(common.NamespaceIstioSystem).Delete(context.Background(), "test-dr-70395ba3470fd8ce6062-f6ce3712830af1b15625-1.13", metav1.DeleteOptions{})
+ remoteController.RoutingPolicyController.IstioClient.NetworkingV1alpha3().
+ EnvoyFilters(common.NamespaceIstioSystem).Delete(context.Background(), "test-dr-70395ba3470fd8ce6062-f6ce3712830af1b15625-1.17", metav1.DeleteOptions{})
- envoyfilter, err = createOrUpdateEnvoyFilter(ctx, remoteController, routingPolicyFoo, admiral.Update, "bar", registry.AdmiralCache, selectors)
- assert.Nil(t, err)
+ })
+ }
+}
- remoteController.RoutingPolicyController.IstioClient.NetworkingV1alpha3().(*fake.FakeNetworkingV1alpha3).PrependReactor("create", "envoyfilters",
- func(action testing2.Action) (handled bool, ret runtime.Object, err error) {
- return true, nil, errors.New("error creating envoyfilter")
+func getRegistry(filterVersion string) *RemoteRegistry {
+ p := common.AdmiralParams{
+ LabelSet: &common.LabelSet{
+ DeploymentAnnotation: "sidecar.istio.io/inject",
},
- )
- envoyfilter3, err := createOrUpdateEnvoyFilter(ctx, remoteController, routingPolicyFoo, admiral.Add, "bar2", registry.AdmiralCache, selectors)
- assert.NotNil(t, err)
- assert.Nil(t, envoyfilter3)
+ KubeconfigPath: "testdata/fake.config",
+ EnableSAN: true,
+ SANPrefix: "prefix",
+ HostnameSuffix: "mesh",
+ SyncNamespace: "ns",
+ CacheReconcileDuration: time.Minute,
+ ClusterRegistriesNamespace: "default",
+ DependenciesNamespace: "default",
+ EnvoyFilterVersion: filterVersion,
+ Profile: common.AdmiralProfileDefault,
+ }
+
+ p.LabelSet.WorkloadIdentityKey = "identity"
+ p.LabelSet.EnvKey = "admiral.io/env"
+ p.LabelSet.AdmiralCRDIdentityLabel = "identity"
+ common.ResetSync()
+ registry, _ := InitAdmiral(context.Background(), p)
+ return registry
}
func getSha1Error(key interface{}) (string, error) {
@@ -122,8 +279,8 @@ func getSha1Error(key interface{}) (string, error) {
func TestGetHosts(t *testing.T) {
routingPolicyFoo := &v1.RoutingPolicy{
- TypeMeta: time2.TypeMeta{},
- ObjectMeta: time2.ObjectMeta{
+ TypeMeta: metav1.TypeMeta{},
+ ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"identity": "foo",
"admiral.io/env": "stage",
@@ -142,17 +299,14 @@ func TestGetHosts(t *testing.T) {
Status: v1.RoutingPolicyStatus{},
}
- hosts, err := getHosts(routingPolicyFoo)
- if err != nil {
- assert.Fail(t, err.Error())
- }
+ hosts := getHosts(routingPolicyFoo)
assert.Equal(t, "hosts: e2e.testservice.mesh,e2e2.testservice.mesh", hosts)
}
func TestGetPlugin(t *testing.T) {
routingPolicyFoo := &v1.RoutingPolicy{
- TypeMeta: time2.TypeMeta{},
- ObjectMeta: time2.ObjectMeta{
+ TypeMeta: metav1.TypeMeta{},
+ ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"identity": "foo",
"admiral.io/env": "stage",
@@ -171,9 +325,6 @@ func TestGetPlugin(t *testing.T) {
Status: v1.RoutingPolicyStatus{},
}
- plugin, err := getPlugin(routingPolicyFoo)
- if err != nil {
- assert.Fail(t, err.Error())
- }
+ plugin := getPlugin(routingPolicyFoo)
assert.Equal(t, "plugin: test", plugin)
}
From 51e82318ab5a1b8a7d976d8d51c7c943841bb1c4 Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 10:35:54 -0400
Subject: [PATCH 145/235] copied admiral/pkg/clusters/globaltraffic_handler.go
chages from master
Signed-off-by: Shriram Sharma
---
admiral/pkg/clusters/globaltraffic_handler.go | 115 ++++++++++++++++++
1 file changed, 115 insertions(+)
create mode 100644 admiral/pkg/clusters/globaltraffic_handler.go
diff --git a/admiral/pkg/clusters/globaltraffic_handler.go b/admiral/pkg/clusters/globaltraffic_handler.go
new file mode 100644
index 00000000..6dd367c1
--- /dev/null
+++ b/admiral/pkg/clusters/globaltraffic_handler.go
@@ -0,0 +1,115 @@
+package clusters
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+
+ v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ log "github.com/sirupsen/logrus"
+)
+
+type GlobalTrafficHandler struct {
+ RemoteRegistry *RemoteRegistry
+ ClusterID string
+}
+
+type GlobalTrafficCache interface {
+ GetFromIdentity(identity string, environment string) (*v1.GlobalTrafficPolicy, error)
+ Put(gtp *v1.GlobalTrafficPolicy) error
+ Delete(identity string, environment string) error
+}
+
+type globalTrafficCache struct {
+ //map of global traffic policies key=environment.identity, value:GlobalTrafficCache GlobalTrafficPolicy object
+ identityCache map[string]*v1.GlobalTrafficPolicy
+
+ mutex *sync.Mutex
+}
+
+func (g *globalTrafficCache) GetFromIdentity(identity string, environment string) (*v1.GlobalTrafficPolicy, error) {
+ g.mutex.Lock()
+ defer g.mutex.Unlock()
+ return g.identityCache[common.ConstructKeyWithEnvAndIdentity(environment, identity)], nil
+}
+
+func (g *globalTrafficCache) Put(gtp *v1.GlobalTrafficPolicy) error {
+ if gtp.Name == "" {
+ //no GTP, throw error
+ return errors.New("cannot add an empty globaltrafficpolicy to the cache")
+ }
+ defer g.mutex.Unlock()
+ g.mutex.Lock()
+ var gtpIdentity = common.GetGtpIdentity(gtp)
+ var gtpEnv = common.GetGtpEnv(gtp)
+
+ log.Infof("adding GTP with name %v to GTP cache. LabelMatch=%v env=%v", gtp.Name, gtpIdentity, gtpEnv)
+ key := common.ConstructKeyWithEnvAndIdentity(gtpEnv, gtpIdentity)
+ g.identityCache[key] = gtp
+ return nil
+}
+
+func (g *globalTrafficCache) Delete(identity string, environment string) error {
+ g.mutex.Lock()
+ defer g.mutex.Unlock()
+ key := common.ConstructKeyWithEnvAndIdentity(environment, identity)
+ if _, ok := g.identityCache[key]; ok {
+ log.Infof("deleting gtp with key=%s from global GTP cache", key)
+ delete(g.identityCache, key)
+ return nil
+ }
+ return fmt.Errorf("gtp with key %s not found in cache", key)
+}
+
+func (gtp *GlobalTrafficHandler) Added(ctx context.Context, obj *v1.GlobalTrafficPolicy) error {
+ log.Infof(LogFormat, "Added", "globaltrafficpolicy", obj.Name, gtp.ClusterID, "received")
+ err := HandleEventForGlobalTrafficPolicy(ctx, admiral.Add, obj, gtp.RemoteRegistry, gtp.ClusterID, modifyServiceEntryForNewServiceOrPod)
+ if err != nil {
+ return fmt.Errorf(LogErrFormat, "Added", "globaltrafficpolicy", obj.Name, gtp.ClusterID, err.Error())
+ }
+ return nil
+}
+
+func (gtp *GlobalTrafficHandler) Updated(ctx context.Context, obj *v1.GlobalTrafficPolicy) error {
+ log.Infof(LogFormat, "Updated", "globaltrafficpolicy", obj.Name, gtp.ClusterID, "received")
+ err := HandleEventForGlobalTrafficPolicy(ctx, admiral.Update, obj, gtp.RemoteRegistry, gtp.ClusterID, modifyServiceEntryForNewServiceOrPod)
+ if err != nil {
+ return fmt.Errorf(LogErrFormat, "Updated", "globaltrafficpolicy", obj.Name, gtp.ClusterID, err.Error())
+ }
+ return nil
+}
+
+func (gtp *GlobalTrafficHandler) Deleted(ctx context.Context, obj *v1.GlobalTrafficPolicy) error {
+ log.Infof(LogFormat, "Deleted", "globaltrafficpolicy", obj.Name, gtp.ClusterID, "received")
+ err := HandleEventForGlobalTrafficPolicy(ctx, admiral.Delete, obj, gtp.RemoteRegistry, gtp.ClusterID, modifyServiceEntryForNewServiceOrPod)
+ if err != nil {
+ return fmt.Errorf(LogErrFormat, "Deleted", "globaltrafficpolicy", obj.Name, gtp.ClusterID, err.Error())
+ }
+ return nil
+}
+
+// HandleEventForGlobalTrafficPolicy processes all the events related to GTPs
+func HandleEventForGlobalTrafficPolicy(ctx context.Context, event admiral.EventType, gtp *v1.GlobalTrafficPolicy,
+ remoteRegistry *RemoteRegistry, clusterName string, modifySE ModifySEFunc) error {
+ globalIdentifier := common.GetGtpIdentity(gtp)
+ if len(globalIdentifier) == 0 {
+ return fmt.Errorf(LogFormat, "Event", "globaltrafficpolicy", gtp.Name, clusterName, "Skipped as '"+common.GetWorkloadIdentifier()+" was not found', namespace="+gtp.Namespace)
+ }
+
+ env := common.GetGtpEnv(gtp)
+
+ // For now we're going to force all the events to update only in order to prevent
+ // the endpoints from being deleted.
+ // TODO: Need to come up with a way to prevent deleting default endpoints so that this hack can be removed.
+ // Use the same function as added deployment function to update and put new service entry in place to replace old one
+
+ ctx = context.WithValue(ctx, "clusterName", clusterName)
+ ctx = context.WithValue(ctx, "eventResourceType", common.GTP)
+ ctx = context.WithValue(ctx, common.EventType, event)
+
+ _, err := modifySE(ctx, admiral.Update, env, globalIdentifier, remoteRegistry)
+ return err
+}
From 5ce5f60b15eb599baf7c318ddfa7ac252cd49e7d Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 10:36:38 -0400
Subject: [PATCH 146/235] copied
admiral/pkg/clusters/globaltraffic_handler_test.go chages from master
Signed-off-by: Shriram Sharma
---
.../clusters/globaltraffic_handler_test.go | 102 ++++++++++++++++++
1 file changed, 102 insertions(+)
create mode 100644 admiral/pkg/clusters/globaltraffic_handler_test.go
diff --git a/admiral/pkg/clusters/globaltraffic_handler_test.go b/admiral/pkg/clusters/globaltraffic_handler_test.go
new file mode 100644
index 00000000..8f7934fc
--- /dev/null
+++ b/admiral/pkg/clusters/globaltraffic_handler_test.go
@@ -0,0 +1,102 @@
+package clusters
+
+import (
+ "context"
+ "fmt"
+ "testing"
+
+ v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ "github.com/stretchr/testify/assert"
+ networkingAlpha3 "istio.io/api/networking/v1alpha3"
+ apiMachineryMetaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+func setupForGlobalTrafficHandlerTests() {
+ typeTestSingleton.Do(func() {
+ common.ResetSync()
+ common.InitializeConfig(admiralParamsForTypesTests())
+ })
+}
+
+func TestHandleEventForGlobalTrafficPolicy(t *testing.T) {
+ setupForGlobalTrafficHandlerTests()
+ ctx := context.Background()
+ event := admiral.EventType("Add")
+ p := common.AdmiralParams{
+ KubeconfigPath: "testdata/fake.config",
+ }
+ registry, _ := InitAdmiral(context.Background(), p)
+
+ seFunc := func(ctx context.Context, event admiral.EventType, env string, sourceIdentity string, remoteRegistry *RemoteRegistry) (map[string]*networkingAlpha3.ServiceEntry, error) {
+ return nil, nil
+ }
+
+ seErrFunc := func(ctx context.Context, event admiral.EventType, env string, sourceIdentity string, remoteRegistry *RemoteRegistry) (map[string]*networkingAlpha3.ServiceEntry, error) {
+ return nil, fmt.Errorf("Error")
+ }
+ cases := []struct {
+ name string
+ gtp *v1.GlobalTrafficPolicy
+ seFunc ModifySEFunc
+ doesError bool
+ }{
+ {
+ name: "missing identity label in GTP should result in error being returned by the handler",
+ gtp: &v1.GlobalTrafficPolicy{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "testgtp",
+ Annotations: map[string]string{"admiral.io/env": "testenv"},
+ },
+ },
+ seFunc: seFunc,
+ doesError: true,
+ },
+ {
+ name: "empty identity label in GTP should result in error being returned by the handler",
+ gtp: &v1.GlobalTrafficPolicy{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "testgtp",
+ Labels: map[string]string{"identity": ""},
+ Annotations: map[string]string{"admiral.io/env": "testenv"},
+ },
+ },
+ seFunc: seFunc,
+ doesError: true,
+ },
+ {
+ name: "valid GTP config which is expected to pass",
+ gtp: &v1.GlobalTrafficPolicy{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "testgtp",
+ Labels: map[string]string{"identity": "testapp"},
+ Annotations: map[string]string{"admiral.io/env": "testenv"},
+ },
+ },
+ seFunc: seFunc,
+ doesError: false,
+ },
+ {
+ name: "Given a valid GTP config, " +
+ "And modifyServiceEntryForNewServiceOrPod returns an error" +
+ "Then, the function would return an error",
+ gtp: &v1.GlobalTrafficPolicy{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "testgtp",
+ Labels: map[string]string{"identity": "testapp"},
+ Annotations: map[string]string{"admiral.io/env": "testenv"},
+ },
+ },
+ seFunc: seErrFunc,
+ doesError: true,
+ },
+ }
+
+ for _, c := range cases {
+ t.Run(c.name, func(t *testing.T) {
+ err := HandleEventForGlobalTrafficPolicy(ctx, event, c.gtp, registry, "testcluster", c.seFunc)
+ assert.Equal(t, err != nil, c.doesError)
+ })
+ }
+}
From 325d70df1ff029e028a92ca9d4f03411ac52c1f2 Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 10:37:23 -0400
Subject: [PATCH 147/235] copied admiral/pkg/clusters/handler.go chages from
master
Signed-off-by: Shriram Sharma
---
admiral/pkg/clusters/handler.go | 938 +++++---------------------------
1 file changed, 148 insertions(+), 790 deletions(-)
diff --git a/admiral/pkg/clusters/handler.go b/admiral/pkg/clusters/handler.go
index 5cfa6812..10897ae6 100644
--- a/admiral/pkg/clusters/handler.go
+++ b/admiral/pkg/clusters/handler.go
@@ -1,30 +1,20 @@
package clusters
import (
- "bytes"
"context"
"fmt"
- "net"
+ "sort"
"strings"
- "time"
-
- argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
- "github.com/golang/protobuf/ptypes/duration"
- "github.com/golang/protobuf/ptypes/wrappers"
- "github.com/google/go-cmp/cmp"
- "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/model"
- v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
- "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
+
+ rolloutsV1Alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
+ admiralV1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/util"
log "github.com/sirupsen/logrus"
- "google.golang.org/protobuf/testing/protocmp"
- networkingv1alpha3 "istio.io/api/networking/v1alpha3"
- "istio.io/client-go/pkg/apis/networking/v1alpha3"
- k8sAppsV1 "k8s.io/api/apps/v1"
- k8sV1 "k8s.io/api/core/v1"
- k8sErrors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ networkingV1Alpha3 "istio.io/api/networking/v1alpha3"
+ appsV1 "k8s.io/api/apps/v1"
+ coreV1 "k8s.io/api/core/v1"
+ metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
@@ -32,277 +22,30 @@ const (
DefaultConsecutiveGatewayErrors uint32 = 50
DefaultConsecutive5xxErrors uint32 = 0
DefaultInterval int64 = 60
- DefaultHTTP2MaxRequests int32 = 1000
DefaultMaxRequestsPerConnection int32 = 100
)
-// ServiceEntryHandler responsible for handling Add/Update/Delete events for
-// ServiceEntry resources
-type ServiceEntryHandler struct {
- RemoteRegistry *RemoteRegistry
- ClusterID string
-}
-
-// DestinationRuleHandler responsible for handling Add/Update/Delete events for
-// DestinationRule resources
-type DestinationRuleHandler struct {
- RemoteRegistry *RemoteRegistry
- ClusterID string
-}
-
-// VirtualServiceHandler responsible for handling Add/Update/Delete events for
-// VirtualService resources
-type VirtualServiceHandler struct {
- RemoteRegistry *RemoteRegistry
- ClusterID string
-}
-
-// SidecarHandler responsible for handling Add/Update/Delete events for
-// Sidecar resources
-type SidecarHandler struct {
- RemoteRegistry *RemoteRegistry
- ClusterID string
-}
-
// WeightedService utility to store weighted services for argo rollouts
type WeightedService struct {
Weight int32
- Service *k8sV1.Service
+ Service *coreV1.Service
}
-func updateIdentityDependencyCache(sourceIdentity string, identityDependencyCache *common.MapOfMaps, dr *v1.Dependency) {
+func updateIdentityDependencyCache(sourceIdentity string, identityDependencyCache *common.MapOfMaps, dr *admiralV1.Dependency) error {
for _, dIdentity := range dr.Spec.Destinations {
identityDependencyCache.Put(dIdentity, sourceIdentity, sourceIdentity)
}
- log.Infof(LogFormat, "Update", "dependency-cache", dr.Name, "", "Updated=true namespace="+dr.Namespace)
+ log.Debugf(LogFormat, "Update", "dependency-cache", dr.Name, "", "Updated=true namespace="+dr.Namespace)
+ return nil
}
func getIstioResourceName(host string, suffix string) string {
return strings.ToLower(host) + suffix
}
-func getDestinationRule(se *networkingv1alpha3.ServiceEntry, locality string, gtpTrafficPolicy *model.TrafficPolicy) *networkingv1alpha3.DestinationRule {
- var (
- processGtp = true
- dr = &networkingv1alpha3.DestinationRule{}
- )
- dr.Host = se.Hosts[0]
- dr.TrafficPolicy = &networkingv1alpha3.TrafficPolicy{
- Tls: &networkingv1alpha3.ClientTLSSettings{
- Mode: networkingv1alpha3.ClientTLSSettings_ISTIO_MUTUAL,
- },
- ConnectionPool: &networkingv1alpha3.ConnectionPoolSettings{
- Http: &networkingv1alpha3.ConnectionPoolSettings_HTTPSettings{
- Http2MaxRequests: DefaultHTTP2MaxRequests,
- MaxRequestsPerConnection: DefaultMaxRequestsPerConnection,
- },
- },
- LoadBalancer: &networkingv1alpha3.LoadBalancerSettings{
- LbPolicy: &networkingv1alpha3.LoadBalancerSettings_Simple{
- Simple: networkingv1alpha3.LoadBalancerSettings_LEAST_REQUEST,
- },
- },
- }
-
- if len(locality) == 0 {
- log.Warnf(LogErrFormat, "Process", "GlobalTrafficPolicy", dr.Host, "", "Skipping gtp processing, locality of the cluster nodes cannot be determined. Is this minikube?")
- processGtp = false
- }
- if gtpTrafficPolicy != nil && processGtp {
- var loadBalancerSettings = &networkingv1alpha3.LoadBalancerSettings{
- LbPolicy: &networkingv1alpha3.LoadBalancerSettings_Simple{Simple: networkingv1alpha3.LoadBalancerSettings_LEAST_REQUEST},
- }
-
- if len(gtpTrafficPolicy.Target) > 0 {
- var localityLbSettings = &networkingv1alpha3.LocalityLoadBalancerSetting{}
- if gtpTrafficPolicy.LbType == model.TrafficPolicy_FAILOVER {
- distribute := make([]*networkingv1alpha3.LocalityLoadBalancerSetting_Distribute, 0)
- targetTrafficMap := make(map[string]uint32)
- for _, tg := range gtpTrafficPolicy.Target {
- //skip 0 values from GTP as that's implicit for locality settings
- if tg.Weight != int32(0) {
- targetTrafficMap[tg.Region] = uint32(tg.Weight)
- }
- }
- distribute = append(distribute, &networkingv1alpha3.LocalityLoadBalancerSetting_Distribute{
- From: locality + "/*",
- To: targetTrafficMap,
- })
- localityLbSettings.Distribute = distribute
- }
- // else default behavior
- loadBalancerSettings.LocalityLbSetting = localityLbSettings
- dr.TrafficPolicy.LoadBalancer = loadBalancerSettings
- }
- }
- dr.TrafficPolicy.OutlierDetection = getOutlierDetection(se, locality, gtpTrafficPolicy)
- return dr
-}
-
-func getOutlierDetection(se *networkingv1alpha3.ServiceEntry, locality string, gtpTrafficPolicy *model.TrafficPolicy) *networkingv1alpha3.OutlierDetection {
- outlierDetection := &networkingv1alpha3.OutlierDetection{
- BaseEjectionTime: &duration.Duration{Seconds: DefaultBaseEjectionTime},
- ConsecutiveGatewayErrors: &wrappers.UInt32Value{Value: DefaultConsecutiveGatewayErrors},
- // The default Consecutive5XXErrors is set to 5 in envoy, setting to 0 disables 5XX error outlier detection so that ConsecutiveGatewayErrors rule can get evaluated
- Consecutive_5XxErrors: &wrappers.UInt32Value{Value: DefaultConsecutive5xxErrors},
- Interval: &duration.Duration{Seconds: DefaultInterval},
- }
-
- if gtpTrafficPolicy != nil && gtpTrafficPolicy.OutlierDetection != nil {
- if gtpTrafficPolicy.OutlierDetection.BaseEjectionTime > 0 {
- outlierDetection.BaseEjectionTime = &duration.Duration{
- Seconds: gtpTrafficPolicy.OutlierDetection.BaseEjectionTime,
- }
- }
- if gtpTrafficPolicy.OutlierDetection.ConsecutiveGatewayErrors > 0 {
- outlierDetection.ConsecutiveGatewayErrors = &wrappers.UInt32Value{
- Value: gtpTrafficPolicy.OutlierDetection.ConsecutiveGatewayErrors,
- }
- }
- if gtpTrafficPolicy.OutlierDetection.Interval > 0 {
- outlierDetection.Interval = &duration.Duration{
- Seconds: gtpTrafficPolicy.OutlierDetection.Interval,
- }
- }
- }
-
- //Scenario 1: Only one endpoint present and is local service (ends in svc.cluster.local) - no outlier detection (optimize this for headless services in future?)
- if len(se.Endpoints) == 1 && (strings.Contains(se.Endpoints[0].Address, common.DotLocalDomainSuffix) || net.ParseIP(se.Endpoints[0].Address).To4() != nil) {
- return nil
- } else if len(se.Endpoints) == 1 {
- //Scenario 2: Only one endpoint present and is remote - outlier detection with 34% ejection (protection against zone specific issues)
- outlierDetection.MaxEjectionPercent = 34
- } else {
- //Scenario 3: Two endpoints present each with different locality and both remote - outlier detection with 100% ejection
- //Scenario 4: Two endpoints present each with different locality with one local and other remote - outlier detection with 100% ejection
- //for service entries with more than 2 endpoints eject 100% to failover to other endpoint within or outside the same region
- outlierDetection.MaxEjectionPercent = 100
- }
- return outlierDetection
-}
-
-func (se *ServiceEntryHandler) Added(obj *v1alpha3.ServiceEntry) {
- if CurrentAdmiralState.ReadOnly {
- log.Infof(LogFormat, "Add", "ServiceEntry", obj.Name, se.ClusterID, "Admiral is in read-only mode. Skipping resource from namespace="+obj.Namespace)
- return
- }
- if IgnoreIstioResource(obj.Spec.ExportTo, obj.Annotations, obj.Namespace) {
- log.Infof(LogFormat, "Add", "ServiceEntry", obj.Name, se.ClusterID, "Skipping resource from namespace="+obj.Namespace)
- return
- }
-}
-
-func (se *ServiceEntryHandler) Updated(obj *v1alpha3.ServiceEntry) {
- if CurrentAdmiralState.ReadOnly {
- log.Infof(LogFormat, "Update", "ServiceEntry", obj.Name, se.ClusterID, "Admiral is in read-only mode. Skipping resource from namespace="+obj.Namespace)
- return
- }
- if IgnoreIstioResource(obj.Spec.ExportTo, obj.Annotations, obj.Namespace) {
- log.Infof(LogFormat, "Update", "ServiceEntry", obj.Name, se.ClusterID, "Skipping resource from namespace="+obj.Namespace)
- return
- }
-}
-
-func (se *ServiceEntryHandler) Deleted(obj *v1alpha3.ServiceEntry) {
- if CurrentAdmiralState.ReadOnly {
- log.Infof(LogFormat, "Delete", "ServiceEntry", obj.Name, se.ClusterID, "Admiral is in read-only mode. Skipping resource from namespace="+obj.Namespace)
- return
- }
- if IgnoreIstioResource(obj.Spec.ExportTo, obj.Annotations, obj.Namespace) {
- log.Infof(LogFormat, "Delete", "ServiceEntry", obj.Name, se.ClusterID, "Skipping resource from namespace="+obj.Namespace)
- return
- }
-}
-
-func (dh *DestinationRuleHandler) Added(ctx context.Context, obj *v1alpha3.DestinationRule) {
- if CurrentAdmiralState.ReadOnly {
- log.Infof(LogFormat, "Add", "DestinationRule", obj.Name, dh.ClusterID, "Admiral is in read-only mode. Skipping resource from namespace="+obj.Namespace)
- return
- }
- if IgnoreIstioResource(obj.Spec.ExportTo, obj.Annotations, obj.Namespace) {
- log.Infof(LogFormat, "Add", "DestinationRule", obj.Name, dh.ClusterID, "Skipping resource from namespace="+obj.Namespace)
- return
- }
- handleDestinationRuleEvent(ctx, obj, dh, common.Add, common.DestinationRuleResourceType)
-}
-
-func (dh *DestinationRuleHandler) Updated(ctx context.Context, obj *v1alpha3.DestinationRule) {
- if CurrentAdmiralState.ReadOnly {
- log.Infof(LogFormat, "Update", "DestinationRule", obj.Name, dh.ClusterID, "Admiral is in read-only mode. Skipping resource from namespace="+obj.Namespace)
- return
- }
- if IgnoreIstioResource(obj.Spec.ExportTo, obj.Annotations, obj.Namespace) {
- log.Infof(LogFormat, "Update", "DestinationRule", obj.Name, dh.ClusterID, "Skipping resource from namespace="+obj.Namespace)
- return
- }
- handleDestinationRuleEvent(ctx, obj, dh, common.Update, common.DestinationRuleResourceType)
-}
-
-func (dh *DestinationRuleHandler) Deleted(ctx context.Context, obj *v1alpha3.DestinationRule) {
- if CurrentAdmiralState.ReadOnly {
- log.Infof(LogFormat, "Delete", "DestinationRule", obj.Name, dh.ClusterID, "Admiral is in read-only mode. Skipping resource from namespace="+obj.Namespace)
- return
- }
- if IgnoreIstioResource(obj.Spec.ExportTo, obj.Annotations, obj.Namespace) {
- log.Infof(LogFormat, "Delete", "DestinationRule", obj.Name, dh.ClusterID, "Skipping resource from namespace="+obj.Namespace)
- return
- }
- handleDestinationRuleEvent(ctx, obj, dh, common.Delete, common.DestinationRuleResourceType)
-}
-
-func (vh *VirtualServiceHandler) Added(ctx context.Context, obj *v1alpha3.VirtualService) {
- if CurrentAdmiralState.ReadOnly {
- log.Infof(LogFormat, "Add", "VirtualService", obj.Name, vh.ClusterID, "Admiral is in read-only mode. Skipping resource from namespace="+obj.Namespace)
- return
- }
- if IgnoreIstioResource(obj.Spec.ExportTo, obj.Annotations, obj.Namespace) {
- log.Infof(LogFormat, "Add", "VirtualService", obj.Name, vh.ClusterID, "Skipping resource from namespace="+obj.Namespace)
- return
- }
- err := handleVirtualServiceEvent(ctx, obj, vh, common.Add, common.VirtualServiceResourceType)
- if err != nil {
- log.Error(err)
- }
-}
-
-func (vh *VirtualServiceHandler) Updated(ctx context.Context, obj *v1alpha3.VirtualService) {
- if CurrentAdmiralState.ReadOnly {
- log.Infof(LogFormat, "Update", "VirtualService", obj.Name, vh.ClusterID, "Admiral is in read-only mode. Skipping resource from namespace="+obj.Namespace)
- return
- }
- if IgnoreIstioResource(obj.Spec.ExportTo, obj.Annotations, obj.Namespace) {
- log.Infof(LogFormat, "Update", "VirtualService", obj.Name, vh.ClusterID, "Skipping resource from namespace="+obj.Namespace)
- return
- }
- err := handleVirtualServiceEvent(ctx, obj, vh, common.Update, common.VirtualServiceResourceType)
- if err != nil {
- log.Error(err)
- }
-}
-
-func (vh *VirtualServiceHandler) Deleted(ctx context.Context, obj *v1alpha3.VirtualService) {
- if CurrentAdmiralState.ReadOnly {
- log.Infof(LogFormat, "Delete", "VirtualService", obj.Name, vh.ClusterID, "Admiral is in read-only mode. Skipping resource from namespace="+obj.Namespace)
- return
- }
- if IgnoreIstioResource(obj.Spec.ExportTo, obj.Annotations, obj.Namespace) {
- log.Infof(LogFormat, "Delete", "VirtualService", obj.Name, vh.ClusterID, "Skipping resource from namespace="+obj.Namespace)
- return
- }
- err := handleVirtualServiceEvent(ctx, obj, vh, common.Delete, common.VirtualServiceResourceType)
- if err != nil {
- log.Error(err)
- }
-}
-
-func (dh *SidecarHandler) Added(ctx context.Context, obj *v1alpha3.Sidecar) {}
-
-func (dh *SidecarHandler) Updated(ctx context.Context, obj *v1alpha3.Sidecar) {}
-
-func (dh *SidecarHandler) Deleted(ctx context.Context, obj *v1alpha3.Sidecar) {}
-
func IgnoreIstioResource(exportTo []string, annotations map[string]string, namespace string) bool {
if len(annotations) > 0 && annotations[common.AdmiralIgnoreAnnotation] == "true" {
+ log.Infof(LogFormat, "admiralIoIgnoreAnnotationCheck", "", "", "", "Value=true namespace="+namespace)
return true
}
@@ -319,500 +62,87 @@ func IgnoreIstioResource(exportTo []string, annotations map[string]string, names
}
}
}
- return true
-}
-
-func handleDestinationRuleEvent(ctx context.Context, obj *v1alpha3.DestinationRule, dh *DestinationRuleHandler, event common.Event, resourceType common.ResourceType) {
- var (
- //nolint
- destinationRule = obj.Spec
- clusterId = dh.ClusterID
- syncNamespace = common.GetSyncNamespace()
- r = dh.RemoteRegistry
- dependentClusters = r.AdmiralCache.CnameDependentClusterCache.Get(destinationRule.Host).Copy()
- allDependentClusters = make(map[string]string)
- )
-
- if len(dependentClusters) > 0 {
- log.Infof(LogFormat, "Event", "DestinationRule", obj.Name, clusterId, "Processing")
- util.MapCopy(allDependentClusters, dependentClusters)
- allDependentClusters[clusterId] = clusterId
- for _, dependentCluster := range allDependentClusters {
- rc := r.GetRemoteController(dependentCluster)
- if event == common.Delete {
- err := rc.DestinationRuleController.IstioClient.NetworkingV1alpha3().DestinationRules(syncNamespace).Delete(ctx, obj.Name, metav1.DeleteOptions{})
- if err != nil {
- if k8sErrors.IsNotFound(err) {
- log.Infof(LogFormat, "Delete", "DestinationRule", obj.Name, clusterId, "Either DestinationRule was already deleted, or it never existed")
- } else {
- log.Errorf(LogErrFormat, "Delete", "DestinationRule", obj.Name, clusterId, err)
- }
- } else {
- log.Infof(LogFormat, "Delete", "DestinationRule", obj.Name, clusterId, "Success")
- }
- } else {
- exist, _ := rc.DestinationRuleController.IstioClient.NetworkingV1alpha3().DestinationRules(syncNamespace).Get(ctx, obj.Name, metav1.GetOptions{})
- //copy destination rule only to other clusters
- if dependentCluster != clusterId {
- addUpdateDestinationRule(ctx, obj, exist, syncNamespace, rc)
- }
- }
- }
- return
- } else {
- log.Infof(LogFormat, "Event", "DestinationRule", obj.Name, clusterId, "No dependent clusters found")
- }
-
- //copy the DestinationRule `as is` if they are not generated by Admiral
- remoteClusters := r.GetClusterIds()
- for _, ClusterID := range remoteClusters {
- if ClusterID != clusterId {
- rc := r.GetRemoteController(ClusterID)
- if event == common.Delete {
- err := rc.DestinationRuleController.IstioClient.NetworkingV1alpha3().DestinationRules(syncNamespace).Delete(ctx, obj.Name, metav1.DeleteOptions{})
- if err != nil {
- if k8sErrors.IsNotFound(err) {
- log.Infof(LogFormat, "Delete", "DestinationRule", obj.Name, clusterId, "Either DestinationRule was already deleted, or it never existed")
- } else {
- log.Errorf(LogErrFormat, "Delete", "DestinationRule", obj.Name, clusterId, err)
- }
- } else {
- log.Infof(LogFormat, "Delete", "DestinationRule", obj.Name, clusterId, "Success")
- }
- } else {
- exist, _ := rc.DestinationRuleController.IstioClient.NetworkingV1alpha3().DestinationRules(syncNamespace).Get(ctx, obj.Name, metav1.GetOptions{})
- addUpdateDestinationRule(ctx, obj, exist, syncNamespace, rc)
- }
- }
- }
-}
-
-func handleVirtualServiceEvent(
- ctx context.Context, obj *v1alpha3.VirtualService, vh *VirtualServiceHandler,
- event common.Event, resourceType common.ResourceType) error {
- var (
- //nolint
- virtualService = obj.Spec
- clusterId = vh.ClusterID
- r = vh.RemoteRegistry
- syncNamespace = common.GetSyncNamespace()
- )
- log.Infof(LogFormat, "Event", resourceType, obj.Name, vh.ClusterID, "Received event")
-
- if len(virtualService.Hosts) > 1 {
- log.Errorf(LogFormat, "Event", resourceType, obj.Name, clusterId, "Skipping as multiple hosts not supported for virtual service namespace="+obj.Namespace)
- return nil
- }
-
- // check if this virtual service is used by Argo rollouts for canary strategy, if so, update the corresponding SE with appropriate weights
- if common.GetAdmiralParams().ArgoRolloutsEnabled {
- rollouts, err := vh.RemoteRegistry.GetRemoteController(clusterId).RolloutController.RolloutClient.Rollouts(obj.Namespace).List(ctx, metav1.ListOptions{})
-
- if err != nil {
- log.Errorf(LogErrFormat, "Get", "Rollout", "Error finding rollouts in namespace="+obj.Namespace, clusterId, err)
- } else {
- if len(rollouts.Items) > 0 {
- for _, rollout := range rollouts.Items {
- if rollout.Spec.Strategy.Canary != nil && rollout.Spec.Strategy.Canary.TrafficRouting != nil && rollout.Spec.Strategy.Canary.TrafficRouting.Istio != nil && rollout.Spec.Strategy.Canary.TrafficRouting.Istio.VirtualService.Name == obj.Name {
- HandleEventForRollout(ctx, admiral.Update, &rollout, vh.RemoteRegistry, clusterId)
- }
- }
- }
- }
- }
-
- if len(virtualService.Hosts) != 0 {
- dependentClusters := r.AdmiralCache.CnameDependentClusterCache.Get(virtualService.Hosts[0]).Copy()
- if len(dependentClusters) > 0 {
- for _, dependentCluster := range dependentClusters {
- rc := r.GetRemoteController(dependentCluster)
- if clusterId != dependentCluster {
- log.Infof(LogFormat, "Event", "VirtualService", obj.Name, clusterId, "Processing")
- if event == common.Delete {
- err := rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(syncNamespace).Delete(ctx, obj.Name, metav1.DeleteOptions{})
- if err != nil {
- if k8sErrors.IsNotFound(err) {
- log.Infof(LogFormat, "Delete", "VirtualService", obj.Name, clusterId, "Either VirtualService was already deleted, or it never existed")
- } else {
- log.Errorf(LogErrFormat, "Delete", "VirtualService", obj.Name, clusterId, err)
- }
- } else {
- log.Infof(LogFormat, "Delete", "VirtualService", obj.Name, clusterId, "Success")
- }
- } else {
- exist, _ := rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(syncNamespace).Get(ctx, obj.Name, metav1.GetOptions{})
- //change destination host for all http routes .. to same as host on the virtual service
- for _, httpRoute := range virtualService.Http {
- for _, destination := range httpRoute.Route {
- //get at index 0, we do not support wildcards or multiple hosts currently
- if strings.HasSuffix(destination.Destination.Host, common.DotLocalDomainSuffix) {
- destination.Destination.Host = virtualService.Hosts[0]
- }
- }
- }
- for _, tlsRoute := range virtualService.Tls {
- for _, destination := range tlsRoute.Route {
- //get at index 0, we do not support wildcards or multiple hosts currently
- if strings.HasSuffix(destination.Destination.Host, common.DotLocalDomainSuffix) {
- destination.Destination.Host = virtualService.Hosts[0]
- }
- }
- }
- // nolint
- addUpdateVirtualService(ctx, obj, exist, syncNamespace, rc)
- }
- }
- }
- return nil
- } else {
- log.Infof(LogFormat, "Event", "VirtualService", obj.Name, clusterId, "No dependent clusters found")
- }
- }
-
- // copy the VirtualService `as is` if they are not generated by Admiral (not in CnameDependentClusterCache)
- log.Infof(LogFormat, "Event", "VirtualService", obj.Name, clusterId, "Replicating 'as is' to all clusters")
- remoteClusters := r.GetClusterIds()
- for _, ClusterID := range remoteClusters {
- if ClusterID != clusterId {
- rc := r.GetRemoteController(ClusterID)
- if event == common.Delete {
- err := rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(syncNamespace).Delete(ctx, obj.Name, metav1.DeleteOptions{})
- if err != nil {
- if k8sErrors.IsNotFound(err) {
- log.Infof(LogFormat, "Delete", "VirtualService", obj.Name, clusterId, "Either VirtualService was already deleted, or it never existed")
- } else {
- log.Errorf(LogErrFormat, "Delete", "VirtualService", obj.Name, clusterId, err)
- }
- } else {
- log.Infof(LogFormat, "Delete", "VirtualService", obj.Name, clusterId, "Success")
- }
- } else {
- exist, _ := rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(syncNamespace).Get(ctx, obj.Name, metav1.GetOptions{})
- // nolint
- addUpdateVirtualService(ctx, obj, exist, syncNamespace, rc)
- }
- }
- }
- return nil
-}
-
-func addUpdateVirtualService(ctx context.Context, obj *v1alpha3.VirtualService, exist *v1alpha3.VirtualService, namespace string, rc *RemoteController) error {
- var (
- err error
- op string
- )
-
- format := "virtualservice %s before: %v, after: %v;"
-
- if obj.Annotations == nil {
- obj.Annotations = map[string]string{}
- }
- obj.Annotations["app.kubernetes.io/created-by"] = "admiral"
- if exist == nil || len(exist.Spec.Hosts) == 0 {
- obj.Namespace = namespace
- obj.ResourceVersion = ""
- _, err = rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(namespace).Create(ctx, obj, metav1.CreateOptions{})
- op = "Add"
- } else {
- op = "Update"
- log.Infof(format, op, exist.Spec.String(), obj.Spec.String())
- exist.Labels = obj.Labels
- exist.Annotations = obj.Annotations
- //nolint
- exist.Spec = obj.Spec
- _, err = rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(namespace).Update(ctx, exist, metav1.UpdateOptions{})
- }
-
- if err != nil {
- log.Errorf(LogErrFormat, op, "VirtualService", obj.Name, rc.ClusterID, err)
- return err
- }
- log.Infof(LogFormat, op, "VirtualService", obj.Name, rc.ClusterID, "Success")
- return nil
-}
-
-func validateAndProcessServiceEntryEndpoints(obj *v1alpha3.ServiceEntry) bool {
- var areEndpointsValid = true
-
- temp := make([]*networkingv1alpha3.WorkloadEntry, 0)
- for _, endpoint := range obj.Spec.Endpoints {
- if endpoint.Address == "dummy.admiral.global" {
- areEndpointsValid = false
- } else {
- temp = append(temp, endpoint)
- }
- }
- obj.Spec.Endpoints = temp
- log.Infof("type=ServiceEntry, name=%s, endpointsValid=%v, numberOfValidEndpoints=%d", obj.Name, areEndpointsValid, len(obj.Spec.Endpoints))
-
- return areEndpointsValid
-}
-
-func addUpdateServiceEntry(ctx context.Context, obj *v1alpha3.ServiceEntry, exist *v1alpha3.ServiceEntry, namespace string, rc *RemoteController) {
- var (
- err error
- op, diff string
- skipUpdate bool
- )
-
- if obj.Annotations == nil {
- obj.Annotations = map[string]string{}
- }
- obj.Annotations["app.kubernetes.io/created-by"] = "admiral"
-
- areEndpointsValid := validateAndProcessServiceEntryEndpoints(obj)
-
- if exist == nil || exist.Spec.Hosts == nil {
- op = "Add"
- //se will be created if endpoints are valid, in case they are not valid se will be created with just valid endpoints
- if len(obj.Spec.Endpoints) > 0 {
- obj.Namespace = namespace
- obj.ResourceVersion = ""
- _, err = rc.ServiceEntryController.IstioClient.NetworkingV1alpha3().ServiceEntries(namespace).Create(ctx, obj, metav1.CreateOptions{})
- log.Infof(LogFormat+" SE=%s", op, "ServiceEntry", obj.Name, rc.ClusterID, "New SE", obj.Spec.String())
- } else {
- log.Errorf(LogFormat+" SE=%s", op, "ServiceEntry", obj.Name, rc.ClusterID, "Creation of SE skipped as endpoints are not valid", obj.Spec.String())
- }
- } else {
- op = "Update"
- if areEndpointsValid { //update will happen only when all the endpoints are valid
- exist.Labels = obj.Labels
- exist.Annotations = obj.Annotations
- skipUpdate, diff = skipDestructiveUpdate(rc, obj, exist)
- if diff != "" {
- log.Infof(LogFormat+" diff=%s", op, "ServiceEntry", obj.Name, rc.ClusterID, "Diff in update", diff)
- }
- if skipUpdate {
- log.Infof(LogFormat, op, "ServiceEntry", obj.Name, rc.ClusterID, "Update skipped as it was destructive during Admiral's bootup phase")
- return
- } else {
- //nolint
- exist.Spec = obj.Spec
- _, err = rc.ServiceEntryController.IstioClient.NetworkingV1alpha3().ServiceEntries(namespace).Update(ctx, exist, metav1.UpdateOptions{})
- }
- } else {
- log.Infof(LogFormat, op, "ServiceEntry", obj.Name, rc.ClusterID, "SE could not be updated as all the recived endpoints are not valid.")
- }
- }
- if err != nil {
- log.Errorf(LogErrFormat, op, "ServiceEntry", obj.Name, rc.ClusterID, err)
- } else {
- log.Infof(LogFormat, op, "ServiceEntry", obj.Name, rc.ClusterID, "Success")
- }
-}
-
-func skipDestructiveUpdate(rc *RemoteController, new *v1alpha3.ServiceEntry, old *v1alpha3.ServiceEntry) (bool, string) {
- var (
- skipDestructive = false
- destructive, diff = getServiceEntryDiff(new, old)
- )
- //do not update SEs during bootup phase if they are destructive
- if time.Since(rc.StartTime) < (2*common.GetAdmiralParams().CacheRefreshDuration) && destructive {
- skipDestructive = true
- }
- return skipDestructive, diff
-}
-
-// Diffs only endpoints
-func getServiceEntryDiff(new *v1alpha3.ServiceEntry, old *v1alpha3.ServiceEntry) (destructive bool, diff string) {
- //we diff only if both objects exist
- if old == nil || new == nil {
- return false, ""
- }
- destructive = false
- format := "%s %s before: %v, after: %v;"
- var buffer bytes.Buffer
- //nolint
- seNew := new.Spec
- //nolint
- seOld := old.Spec
-
- oldEndpointMap := make(map[string]*networkingv1alpha3.WorkloadEntry)
- found := make(map[string]string)
- for _, oEndpoint := range seOld.Endpoints {
- oldEndpointMap[oEndpoint.Address] = oEndpoint
- }
- for _, nEndpoint := range seNew.Endpoints {
- if val, ok := oldEndpointMap[nEndpoint.Address]; ok {
- found[nEndpoint.Address] = "1"
- if !cmp.Equal(val, nEndpoint, protocmp.Transform()) {
- destructive = true
- buffer.WriteString(fmt.Sprintf(format, "endpoint", "Update", val.String(), nEndpoint.String()))
- }
- } else {
- buffer.WriteString(fmt.Sprintf(format, "endpoint", "Add", "", nEndpoint.String()))
- }
- }
-
- for key := range oldEndpointMap {
- if _, ok := found[key]; !ok {
- destructive = true
- buffer.WriteString(fmt.Sprintf(format, "endpoint", "Delete", oldEndpointMap[key].String(), ""))
- }
- }
-
- diff = buffer.String()
- return destructive, diff
-}
-
-func deleteVirtualService(ctx context.Context, exist *v1alpha3.VirtualService, namespace string, rc *RemoteController) error {
- if exist == nil {
- return fmt.Errorf("the VirtualService passed was nil")
- }
- err := rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(namespace).Delete(ctx, exist.Name, metav1.DeleteOptions{})
- if err != nil {
- if k8sErrors.IsNotFound(err) {
- return fmt.Errorf("either VirtualService was already deleted, or it never existed")
- }
- return err
+ if common.IsDefaultPersona() && len(annotations) > 0 && annotations[common.CreatedBy] == common.Cartographer {
+ return true
}
- return nil
-}
-func deleteServiceEntry(ctx context.Context, exist *v1alpha3.ServiceEntry, namespace string, rc *RemoteController) {
- if exist != nil {
- err := rc.ServiceEntryController.IstioClient.NetworkingV1alpha3().ServiceEntries(namespace).Delete(ctx, exist.Name, metav1.DeleteOptions{})
- if err != nil {
- if k8sErrors.IsNotFound(err) {
- log.Infof(LogFormat, "Delete", "ServiceEntry", exist.Name, rc.ClusterID, "Either ServiceEntry was already deleted, or it never existed")
- } else {
- log.Errorf(LogErrFormat, "Delete", "ServiceEntry", exist.Name, rc.ClusterID, err)
- }
- } else {
- log.Infof(LogFormat, "Delete", "ServiceEntry", exist.Name, rc.ClusterID, "Success")
- }
- }
+ return true
}
-func addUpdateDestinationRule(ctx context.Context, obj *v1alpha3.DestinationRule, exist *v1alpha3.DestinationRule, namespace string, rc *RemoteController) {
- var err error
- var op string
- if obj.Annotations == nil {
- obj.Annotations = map[string]string{}
- }
- obj.Annotations["app.kubernetes.io/created-by"] = "admiral"
- if exist == nil || exist.Name == "" || exist.Spec.Host == "" {
- obj.Namespace = namespace
- obj.ResourceVersion = ""
- _, err = rc.DestinationRuleController.IstioClient.NetworkingV1alpha3().DestinationRules(namespace).Create(ctx, obj, metav1.CreateOptions{})
- op = "Add"
- } else {
- exist.Labels = obj.Labels
- exist.Annotations = obj.Annotations
- //nolint
- exist.Spec = obj.Spec
- op = "Update"
- _, err = rc.DestinationRuleController.IstioClient.NetworkingV1alpha3().DestinationRules(namespace).Update(ctx, exist, metav1.UpdateOptions{})
- }
-
- if err != nil {
- log.Errorf(LogErrFormat, op, "DestinationRule", obj.Name, rc.ClusterID, err)
- } else {
- log.Infof(LogFormat, op, "DestinationRule", obj.Name, rc.ClusterID, "Success")
+func getServiceForDeployment(rc *RemoteController, deployment *appsV1.Deployment) (*coreV1.Service, error) {
+ if deployment == nil {
+ return nil, fmt.Errorf(LogFormatAdv, "Get", "Service", "", "", rc.ClusterID, "error getting service, deployment is nil.")
}
-}
-func deleteDestinationRule(ctx context.Context, exist *v1alpha3.DestinationRule, namespace string, rc *RemoteController) {
- if exist != nil {
- err := rc.DestinationRuleController.IstioClient.NetworkingV1alpha3().DestinationRules(namespace).Delete(ctx, exist.Name, metav1.DeleteOptions{})
- if err != nil {
- if k8sErrors.IsNotFound(err) {
- log.Infof(LogFormat, "Delete", "DestinationRule", exist.Name, rc.ClusterID, "Either DestinationRule was already deleted, or it never existed")
- } else {
- log.Errorf(LogErrFormat, "Delete", "DestinationRule", exist.Name, rc.ClusterID, err)
- }
- } else {
- log.Infof(LogFormat, "Delete", "DestinationRule", exist.Name, rc.ClusterID, "Success")
- }
+ if deployment.Spec.Selector == nil || deployment.Spec.Selector.MatchLabels == nil {
+ return nil, fmt.Errorf(LogFormatAdv, "Get", "Service", deployment.Name, deployment.Namespace, rc.ClusterID, "no selectors found")
}
-}
-// nolint
-func createServiceEntrySkeletion(se networkingv1alpha3.ServiceEntry, name string, namespace string) *v1alpha3.ServiceEntry {
- return &v1alpha3.ServiceEntry{Spec: se, ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}}
-}
-
-// nolint
-func createSidecarSkeleton(sidecar networkingv1alpha3.Sidecar, name string, namespace string) *v1alpha3.Sidecar {
- return &v1alpha3.Sidecar{Spec: sidecar, ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}}
-}
-
-// nolint
-func createDestinationRuleSkeletion(dr networkingv1alpha3.DestinationRule, name string, namespace string) *v1alpha3.DestinationRule {
- return &v1alpha3.DestinationRule{Spec: dr, ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}}
-}
-
-// nolint
-func createVirtualServiceSkeleton(vs networkingv1alpha3.VirtualService, name string, namespace string) *v1alpha3.VirtualService {
- return &v1alpha3.VirtualService{Spec: vs, ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}}
-}
-
-func getServiceForDeployment(rc *RemoteController, deployment *k8sAppsV1.Deployment) *k8sV1.Service {
- if deployment == nil {
- return nil
- }
cachedServices := rc.ServiceController.Cache.Get(deployment.Namespace)
if cachedServices == nil {
- return nil
+ return nil, fmt.Errorf(LogFormatAdv, "Get", "Service", deployment.Name, deployment.Namespace, rc.ClusterID, "no cached services found for deployment.")
}
- var matchedService *k8sV1.Service
+
+ // Sort the cachedServices such that the service are sorted based on creation time
+ sort.Slice(cachedServices, func(i, j int) bool {
+ return cachedServices[i].CreationTimestamp.Before(&cachedServices[j].CreationTimestamp)
+ })
+
+ var matchedService *coreV1.Service
for _, service := range cachedServices {
var match = common.IsServiceMatch(service.Spec.Selector, deployment.Spec.Selector)
//make sure the service matches the deployment Selector and also has a mesh port in the port spec
if match {
- ports := GetMeshPortsForDeployment(rc.ClusterID, service, deployment)
+ ports := GetMeshPortsForDeployments(rc.ClusterID, service, deployment)
if len(ports) > 0 {
matchedService = service
break
}
}
}
- return matchedService
-}
-func getDependentClusters(dependents map[string]string, identityClusterCache *common.MapOfMaps, sourceServices map[string]*k8sV1.Service) map[string]string {
- var dependentClusters = make(map[string]string)
- if dependents == nil {
- return dependentClusters
- }
- for depIdentity := range dependents {
- clusters := identityClusterCache.Get(depIdentity)
- if clusters == nil {
- continue
- }
- clusters.Range(func(k string, clusterID string) {
- _, ok := sourceServices[clusterID]
- if !ok {
- dependentClusters[clusterID] = clusterID
- }
- })
+ if matchedService == nil {
+ return nil, fmt.Errorf(LogFormatAdv, "Get", "Service", deployment.Name, deployment.Namespace, rc.ClusterID, "no matching service instances found")
}
- return dependentClusters
+
+ return matchedService, nil
}
-func copyEndpoint(e *networkingv1alpha3.WorkloadEntry) *networkingv1alpha3.WorkloadEntry {
+func copyEndpoint(e *networkingV1Alpha3.WorkloadEntry) *networkingV1Alpha3.WorkloadEntry {
var (
labels = make(map[string]string)
ports = make(map[string]uint32)
)
util.MapCopy(labels, e.Labels)
util.MapCopy(ports, e.Ports)
- return &networkingv1alpha3.WorkloadEntry{Address: e.Address, Ports: ports, Locality: e.Locality, Labels: labels}
+ return &networkingV1Alpha3.WorkloadEntry{Address: e.Address, Ports: ports, Locality: e.Locality, Labels: labels}
}
// A rollout can use one of 2 stratergies :-
// 1. Canary strategy - which can use a virtual service to manage the weights associated with a stable and canary service. Admiral created endpoints in service entries will use the weights assigned in the Virtual Service
// 2. Blue green strategy- this contains 2 service instances in a namespace, an active service and a preview service. Admiral will use repective service to create active and preview endpoints
-func getServiceForRollout(ctx context.Context, rc *RemoteController, rollout *argo.Rollout) map[string]*WeightedService {
+func getServiceForRollout(ctx context.Context, rc *RemoteController, rollout *rolloutsV1Alpha1.Rollout) map[string]*WeightedService {
if rollout == nil {
return nil
}
+
+ if rollout.Spec.Selector == nil || rollout.Spec.Selector.MatchLabels == nil {
+ log.Infof("No selector for rollout=%s in namespace=%s and cluster=%s", rollout.Name, rollout.Namespace, rc.ClusterID)
+ return nil
+ }
+
cachedServices := rc.ServiceController.Cache.Get(rollout.Namespace)
if cachedServices == nil {
return nil
}
+
+ if rollout.Spec.Strategy == (rolloutsV1Alpha1.RolloutStrategy{}) {
+ return nil
+ }
+
rolloutStrategy := rollout.Spec.Strategy
+
if rolloutStrategy.BlueGreen == nil && rolloutStrategy.Canary == nil {
return nil
}
@@ -835,94 +165,122 @@ func getServiceForRollout(ctx context.Context, rc *RemoteController, rollout *ar
blueGreenActiveService = GetServiceWithSuffixMatch(common.RolloutActiveServiceSuffix, cachedServices)
}
} else if rolloutStrategy.Canary != nil {
- canaryService = rolloutStrategy.Canary.CanaryService
- stableService = rolloutStrategy.Canary.StableService
-
- //calculate canary weights if canary strategy is using Istio traffic management
- if len(stableService) > 0 && len(canaryService) > 0 && rolloutStrategy.Canary.TrafficRouting != nil && rolloutStrategy.Canary.TrafficRouting.Istio != nil {
- //pick stable service if specified
- if len(stableService) > 0 {
+ //If istio canary perform below operations
+ if rolloutStrategy.Canary.TrafficRouting != nil && rolloutStrategy.Canary.TrafficRouting.Istio != nil {
+ canaryService = rolloutStrategy.Canary.CanaryService
+ stableService = rolloutStrategy.Canary.StableService
+
+ //calculate canary weights if canary strategy is using Istio traffic management
+ if len(stableService) > 0 && len(canaryService) > 0 {
+ //pick stable service if specified
istioCanaryWeights[stableService] = 1
- } else {
- //pick a service that ends in RolloutStableServiceSuffix if one is available
- sName := GetServiceWithSuffixMatch(common.RolloutStableServiceSuffix, cachedServices)
- if len(sName) > 0 {
- istioCanaryWeights[sName] = 1
- }
- }
- virtualServiceName := rolloutStrategy.Canary.TrafficRouting.Istio.VirtualService.Name
- virtualService, err := rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(rollout.Namespace).Get(ctx, virtualServiceName, metav1.GetOptions{})
+ if rolloutStrategy.Canary.TrafficRouting.Istio.VirtualService != nil && rolloutStrategy.Canary.TrafficRouting.Istio.VirtualService.Name != "" {
+ virtualServiceName := rolloutStrategy.Canary.TrafficRouting.Istio.VirtualService.Name
+ virtualService, err := rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(rollout.Namespace).Get(ctx, virtualServiceName, metaV1.GetOptions{})
- if err != nil {
- log.Warnf("Error fetching VirtualService referenced in rollout canary for rollout with name=%s in namespace=%s and cluster=%s err=%v", rollout.Name, rollout.Namespace, rc.ClusterID, err)
- }
+ if err != nil {
+ log.Warnf("Error fetching VirtualService referenced in rollout canary for rollout with name=%s in namespace=%s and cluster=%s err=%v", rollout.Name, rollout.Namespace, rc.ClusterID, err)
+ }
- if len(rolloutStrategy.Canary.TrafficRouting.Istio.VirtualService.Routes) > 0 {
- virtualServiceRouteName = rolloutStrategy.Canary.TrafficRouting.Istio.VirtualService.Routes[0]
- }
+ if rolloutStrategy.Canary.TrafficRouting.Istio.VirtualService.Routes != nil && len(rolloutStrategy.Canary.TrafficRouting.Istio.VirtualService.Routes) > 0 {
+ virtualServiceRouteName = rolloutStrategy.Canary.TrafficRouting.Istio.VirtualService.Routes[0]
+ }
- if virtualService != nil {
- //nolint
- var vs = virtualService.Spec
- if len(vs.Http) > 0 {
- var httpRoute *networkingv1alpha3.HTTPRoute
- if len(virtualServiceRouteName) > 0 {
- for _, route := range vs.Http {
- if route.Name == virtualServiceRouteName {
- httpRoute = route
- log.Infof("VirtualService route referenced in rollout found, for rollout with name=%s route=%s in namespace=%s and cluster=%s", rollout.Name, virtualServiceRouteName, rollout.Namespace, rc.ClusterID)
- break
+ if virtualService != nil {
+ //nolint
+ var vs = virtualService.Spec
+ if len(vs.Http) > 0 {
+ var httpRoute *networkingV1Alpha3.HTTPRoute
+ if len(virtualServiceRouteName) > 0 {
+ for _, route := range vs.Http {
+ if route.Name == virtualServiceRouteName {
+ httpRoute = route
+ log.Infof("VirtualService route referenced in rollout found, for rollout with name=%s route=%s in namespace=%s and cluster=%s", rollout.Name, virtualServiceRouteName, rollout.Namespace, rc.ClusterID)
+ break
+ } else {
+ log.Debugf("Argo rollout VirtualService route name didn't match with a route, for rollout with name=%s route=%s in namespace=%s and cluster=%s", rollout.Name, route.Name, rollout.Namespace, rc.ClusterID)
+ }
+ }
} else {
- log.Debugf("Argo rollout VirtualService route name didn't match with a route, for rollout with name=%s route=%s in namespace=%s and cluster=%s", rollout.Name, route.Name, rollout.Namespace, rc.ClusterID)
+ if len(vs.Http) == 1 {
+ httpRoute = vs.Http[0]
+ log.Debugf("Using the default and the only route in Virtual Service, for rollout with name=%s route=%s in namespace=%s and cluster=%s", rollout.Name, "", rollout.Namespace, rc.ClusterID)
+ } else {
+ log.Errorf("Skipping VirtualService referenced in rollout as it has MORE THAN ONE route but no name route selector in rollout, for rollout with name=%s in namespace=%s and cluster=%s", rollout.Name, rollout.Namespace, rc.ClusterID)
+ }
+ }
+ if httpRoute != nil {
+ //find the weight associated with the destination (k8s service)
+ for _, destination := range httpRoute.Route {
+ if (destination.Destination.Host == canaryService || destination.Destination.Host == stableService) && destination.Weight > 0 {
+ istioCanaryWeights[destination.Destination.Host] = destination.Weight
+ }
+ }
}
- }
- } else {
- if len(vs.Http) == 1 {
- httpRoute = vs.Http[0]
- log.Debugf("Using the default and the only route in Virtual Service, for rollout with name=%s route=%s in namespace=%s and cluster=%s", rollout.Name, "", rollout.Namespace, rc.ClusterID)
} else {
- log.Errorf("Skipping VirtualService referenced in rollout as it has MORE THAN ONE route but no name route selector in rollout, for rollout with name=%s in namespace=%s and cluster=%s", rollout.Name, rollout.Namespace, rc.ClusterID)
+ log.Warnf("No VirtualService was specified in rollout or the specified VirtualService has NO routes, for rollout with name=%s in namespace=%s and cluster=%s", rollout.Name, rollout.Namespace, rc.ClusterID)
}
}
- if httpRoute != nil {
- //find the weight associated with the destination (k8s service)
- for _, destination := range httpRoute.Route {
- if (destination.Destination.Host == canaryService || destination.Destination.Host == stableService) && destination.Weight > 0 {
- istioCanaryWeights[destination.Destination.Host] = destination.Weight
+ }
+ for _, service := range cachedServices {
+ match := common.IsServiceMatch(service.Spec.Selector, rollout.Spec.Selector)
+ //make sure the service matches the rollout Selector and also has a mesh port in the port spec
+ if match {
+ ports := GetMeshPortsForRollout(rc.ClusterID, service, rollout)
+ if len(ports) > 0 {
+ if val, ok := istioCanaryWeights[service.Name]; ok {
+ matchedServices[service.Name] = &WeightedService{Weight: val, Service: service}
}
}
}
- } else {
- log.Warnf("No VirtualService was specified in rollout or the specified VirtualService has NO routes, for rollout with name=%s in namespace=%s and cluster=%s", rollout.Name, rollout.Namespace, rc.ClusterID)
}
- }
- for _, service := range cachedServices {
- match := common.IsServiceMatch(service.Spec.Selector, rollout.Spec.Selector)
- //make sure the service matches the rollout Selector and also has a mesh port in the port spec
- if match {
- ports := GetMeshPortsForRollout(rc.ClusterID, service, rollout)
- if len(ports) > 0 {
- if val, ok := istioCanaryWeights[service.Name]; ok {
- matchedServices[service.Name] = &WeightedService{Weight: val, Service: service}
+ return matchedServices
+ } else if len(stableService) > 0 {
+ for _, service := range cachedServices {
+ //skip services that are not referenced in the rollout
+ if service.ObjectMeta.Name != stableService {
+ log.Infof("Skipping service=%s for rollout=%s in namespace=%s and cluster=%s", service.Name, rollout.Name, rollout.Namespace, rc.ClusterID)
+ continue
+ }
+ match := common.IsServiceMatch(service.Spec.Selector, rollout.Spec.Selector)
+ //make sure the service matches the rollout Selector and also has a mesh port in the port spec
+ if match {
+ ports := GetMeshPortsForRollout(rc.ClusterID, service, rollout)
+ if len(ports) > 0 {
+ if len(istioCanaryWeights) == 0 {
+ matchedServices[service.Name] = &WeightedService{Weight: 1, Service: service}
+ return matchedServices
+ }
}
}
}
}
- return matchedServices
- } else if len(stableService) > 0 {
- for _, service := range cachedServices {
- //skip services that are not referenced in the rollout
- if service.ObjectMeta.Name != stableService {
- log.Infof("Skipping service=%s for rollout=%s in namespace=%s and cluster=%s", service.Name, rollout.Name, rollout.Namespace, rc.ClusterID)
- continue
- }
- match := common.IsServiceMatch(service.Spec.Selector, rollout.Spec.Selector)
- //make sure the service matches the rollout Selector and also has a mesh port in the port spec
- if match {
- ports := GetMeshPortsForRollout(rc.ClusterID, service, rollout)
- if len(ports) > 0 {
- if len(istioCanaryWeights) == 0 {
+ } else {
+ /*
+ This change is for MESH-2786, where if not istio canary then all traffic will need to go to root service
+ since istio does not know the split info as there is no virtual service
+ */
+
+ sName := GetServiceWithSuffixMatch(common.RolloutRootServiceSuffix, cachedServices)
+ if len(sName) <= 0 {
+ //Fallback if root service not found
+ log.Infof("root service not found, falling back to stable for rollout=%s in namespace=%s and cluster=%s", rollout.Name, rollout.Namespace, rc.ClusterID)
+ sName = GetServiceWithSuffixMatch(common.RolloutStableServiceSuffix, cachedServices)
+ }
+
+ // If root and stable not found, exit canary logic and use generic logic to choose random service
+ if len(sName) != 0 {
+ for _, service := range cachedServices {
+ if sName != service.Name {
+ continue
+ }
+ match := common.IsServiceMatch(service.Spec.Selector, rollout.Spec.Selector)
+ //make sure the service matches the rollout Selector and also has a mesh port in the port spec
+ if match {
+ ports := GetMeshPortsForRollout(rc.ClusterID, service, rollout)
+ if len(ports) > 0 {
+ //Adding 100% traffic to this service
matchedServices[service.Name] = &WeightedService{Weight: 1, Service: service}
return matchedServices
}
@@ -957,7 +315,7 @@ func getServiceForRollout(ctx context.Context, rc *RemoteController, rollout *ar
return matchedServices
}
-func GetServiceWithSuffixMatch(suffix string, services []*k8sV1.Service) string {
+func GetServiceWithSuffixMatch(suffix string, services []*coreV1.Service) string {
for _, service := range services {
if strings.HasSuffix(service.Name, suffix) {
return service.Name
From 7aaf31b47a2a40bb8989c4c148dddf37194233ec Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 10:38:03 -0400
Subject: [PATCH 148/235] copied admiral/pkg/clusters/handler_test.go chages
from master
Signed-off-by: Shriram Sharma
---
admiral/pkg/clusters/handler_test.go | 1297 ++++----------------------
1 file changed, 156 insertions(+), 1141 deletions(-)
diff --git a/admiral/pkg/clusters/handler_test.go b/admiral/pkg/clusters/handler_test.go
index d171c21e..475956f0 100644
--- a/admiral/pkg/clusters/handler_test.go
+++ b/admiral/pkg/clusters/handler_test.go
@@ -2,106 +2,47 @@ package clusters
import (
"context"
- "fmt"
- "reflect"
- "strings"
"testing"
"time"
- "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/model"
+ argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
+ cmp "github.com/google/go-cmp/cmp"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/client/loader"
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/istio"
"github.com/istio-ecosystem/admiral/admiral/pkg/test"
- k8sErrors "k8s.io/apimachinery/pkg/api/errors"
-
- argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
- "github.com/golang/protobuf/ptypes/duration"
- "github.com/golang/protobuf/ptypes/wrappers"
- "github.com/google/go-cmp/cmp"
- "github.com/stretchr/testify/assert"
- "google.golang.org/protobuf/testing/protocmp"
"istio.io/api/networking/v1alpha3"
v1alpha32 "istio.io/client-go/pkg/apis/networking/v1alpha3"
istioFake "istio.io/client-go/pkg/clientset/versioned/fake"
+
coreV1 "k8s.io/api/core/v1"
metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/rest"
)
-func TestGetDependentClusters(t *testing.T) {
- identityClusterCache := common.NewMapOfMaps()
- identityClusterCache.Put("id1", "dep1", "cl1")
- identityClusterCache.Put("id2", "dep2", "cl2")
- identityClusterCache.Put("id3", "dep3", "cl3")
-
- testCases := []struct {
- name string
- dependents map[string]string
- identityClusterCache *common.MapOfMaps
- sourceServices map[string]*coreV1.Service
- expectedResult map[string]string
- }{
- {
- name: "nil dependents map",
- dependents: nil,
- expectedResult: make(map[string]string),
- },
- {
- name: "empty dependents map",
- dependents: map[string]string{},
- identityClusterCache: identityClusterCache,
- expectedResult: map[string]string{},
- },
- {
- name: "no dependent match",
- dependents: map[string]string{
- "id99": "val1",
- },
- identityClusterCache: identityClusterCache,
- expectedResult: map[string]string{},
- },
- {
- name: "no service for matched dep cluster",
- dependents: map[string]string{
- "id1": "val1",
- },
- identityClusterCache: identityClusterCache,
- sourceServices: map[string]*coreV1.Service{
- "cl1": &coreV1.Service{},
- },
- expectedResult: map[string]string{},
- },
- {
- name: "found service for matched dep cluster",
- dependents: map[string]string{
- "id1": "val1",
- },
- identityClusterCache: identityClusterCache,
- sourceServices: map[string]*coreV1.Service{
- "cl99": &coreV1.Service{
- ObjectMeta: metaV1.ObjectMeta{
- Name: "testservice",
- },
- },
- },
- expectedResult: map[string]string{
- "cl1": "cl1",
- },
- },
- }
-
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- actualResult := getDependentClusters(tc.dependents, tc.identityClusterCache, tc.sourceServices)
- assert.Equal(t, len(tc.expectedResult), len(actualResult))
- assert.True(t, reflect.DeepEqual(actualResult, tc.expectedResult))
- })
+func admiralParamsForHandlerTests(argoEnabled bool) common.AdmiralParams {
+ return common.AdmiralParams{
+ ArgoRolloutsEnabled: argoEnabled,
+ LabelSet: &common.LabelSet{},
}
+}
+func setupForHandlerTests(argoEnabled bool) {
+ common.ResetSync()
+ common.InitializeConfig(admiralParamsForHandlerTests(argoEnabled))
}
func TestIgnoreIstioResource(t *testing.T) {
+
+ admiralParams := common.AdmiralParams{
+ LabelSet: &common.LabelSet{},
+ TrafficConfigPersona: false,
+ SyncNamespace: "ns",
+ }
+ common.ResetSync()
+ common.InitializeConfig(admiralParams)
+
//Struct of test case info. Name is required.
testCases := []struct {
name string
@@ -159,6 +100,13 @@ func TestIgnoreIstioResource(t *testing.T) {
namespace: "ns",
expectedResult: true,
},
+ {
+ name: "created by cartographer",
+ exportTo: []string{"namespace1", "namespace2"},
+ annotations: map[string]string{common.CreatedBy: common.Cartographer},
+ namespace: "random-namespace",
+ expectedResult: true,
+ },
}
//Run the test for every provided case
@@ -174,551 +122,14 @@ func TestIgnoreIstioResource(t *testing.T) {
}
}
-func TestGetDestinationRule(t *testing.T) {
- //Do setup here
- outlierDetection := &v1alpha3.OutlierDetection{
- BaseEjectionTime: &duration.Duration{Seconds: 300},
- ConsecutiveGatewayErrors: &wrappers.UInt32Value{Value: 50},
- Consecutive_5XxErrors: &wrappers.UInt32Value{Value: 0},
- Interval: &duration.Duration{Seconds: 60},
- MaxEjectionPercent: 100,
- }
- mTLS := &v1alpha3.TrafficPolicy{
- Tls: &v1alpha3.ClientTLSSettings{
- Mode: v1alpha3.ClientTLSSettings_ISTIO_MUTUAL,
- },
- OutlierDetection: outlierDetection,
- ConnectionPool: &v1alpha3.ConnectionPoolSettings{
- Http: &v1alpha3.ConnectionPoolSettings_HTTPSettings{
- Http2MaxRequests: DefaultHTTP2MaxRequests,
- MaxRequestsPerConnection: DefaultMaxRequestsPerConnection,
- },
- },
- LoadBalancer: &v1alpha3.LoadBalancerSettings{
- LbPolicy: &v1alpha3.LoadBalancerSettings_Simple{
- Simple: v1alpha3.LoadBalancerSettings_LEAST_REQUEST,
- },
- },
- }
-
- se := &v1alpha3.ServiceEntry{Hosts: []string{"qa.myservice.global"}, Endpoints: []*v1alpha3.WorkloadEntry{
- {Address: "east.com", Locality: "us-east-2"}, {Address: "west.com", Locality: "us-west-2"},
- }}
- noGtpDr := v1alpha3.DestinationRule{
- Host: "qa.myservice.global",
- TrafficPolicy: mTLS,
- }
-
- basicGtpDr := v1alpha3.DestinationRule{
- Host: "qa.myservice.global",
- TrafficPolicy: &v1alpha3.TrafficPolicy{
- Tls: &v1alpha3.ClientTLSSettings{Mode: v1alpha3.ClientTLSSettings_ISTIO_MUTUAL},
- LoadBalancer: &v1alpha3.LoadBalancerSettings{
- LbPolicy: &v1alpha3.LoadBalancerSettings_Simple{Simple: v1alpha3.LoadBalancerSettings_LEAST_REQUEST},
- LocalityLbSetting: &v1alpha3.LocalityLoadBalancerSetting{},
- },
- OutlierDetection: outlierDetection,
- ConnectionPool: &v1alpha3.ConnectionPoolSettings{
- Http: &v1alpha3.ConnectionPoolSettings_HTTPSettings{
- Http2MaxRequests: DefaultHTTP2MaxRequests,
- MaxRequestsPerConnection: DefaultMaxRequestsPerConnection,
- },
- },
- },
- }
-
- failoverGtpDr := v1alpha3.DestinationRule{
- Host: "qa.myservice.global",
- TrafficPolicy: &v1alpha3.TrafficPolicy{
- Tls: &v1alpha3.ClientTLSSettings{Mode: v1alpha3.ClientTLSSettings_ISTIO_MUTUAL},
- LoadBalancer: &v1alpha3.LoadBalancerSettings{
- LbPolicy: &v1alpha3.LoadBalancerSettings_Simple{Simple: v1alpha3.LoadBalancerSettings_LEAST_REQUEST},
- LocalityLbSetting: &v1alpha3.LocalityLoadBalancerSetting{
- Distribute: []*v1alpha3.LocalityLoadBalancerSetting_Distribute{
- {
- From: "uswest2/*",
- To: map[string]uint32{"us-west-2": 100},
- },
- },
- },
- },
- OutlierDetection: outlierDetection,
- ConnectionPool: &v1alpha3.ConnectionPoolSettings{
- Http: &v1alpha3.ConnectionPoolSettings_HTTPSettings{
- Http2MaxRequests: DefaultHTTP2MaxRequests,
- MaxRequestsPerConnection: DefaultMaxRequestsPerConnection,
- },
- },
- },
- }
-
- topologyGTPPolicy := &model.TrafficPolicy{
- LbType: model.TrafficPolicy_TOPOLOGY,
- Target: []*model.TrafficGroup{
- {
- Region: "us-west-2",
- Weight: 100,
- },
- },
- }
-
- failoverGTPPolicy := &model.TrafficPolicy{
- LbType: model.TrafficPolicy_FAILOVER,
- Target: []*model.TrafficGroup{
- {
- Region: "us-west-2",
- Weight: 100,
- },
- {
- Region: "us-east-2",
- Weight: 0,
- },
- },
- }
-
- //Struct of test case info. Name is required.
- testCases := []struct {
- name string
- se *v1alpha3.ServiceEntry
- locality string
- gtpPolicy *model.TrafficPolicy
- destinationRule *v1alpha3.DestinationRule
- }{
- {
- name: "Should handle a nil GTP",
- se: se,
- locality: "uswest2",
- gtpPolicy: nil,
- destinationRule: &noGtpDr,
- },
- {
- name: "Should return default DR with empty locality",
- se: se,
- locality: "",
- gtpPolicy: failoverGTPPolicy,
- destinationRule: &noGtpDr,
- },
- {
- name: "Should handle a topology GTP",
- se: se,
- locality: "uswest2",
- gtpPolicy: topologyGTPPolicy,
- destinationRule: &basicGtpDr,
- },
- {
- name: "Should handle a failover GTP",
- se: se,
- locality: "uswest2",
- gtpPolicy: failoverGTPPolicy,
- destinationRule: &failoverGtpDr,
- },
- }
-
- //Run the test for every provided case
- for _, c := range testCases {
- t.Run(c.name, func(t *testing.T) {
- result := getDestinationRule(c.se, c.locality, c.gtpPolicy)
- if !cmp.Equal(result, c.destinationRule, protocmp.Transform()) {
- t.Fatalf("DestinationRule Mismatch. Diff: %v", cmp.Diff(result, c.destinationRule))
- }
- })
- }
-}
-
-func TestGetOutlierDetection(t *testing.T) {
- //Do setup here
- outlierDetection := &v1alpha3.OutlierDetection{
- BaseEjectionTime: &duration.Duration{Seconds: DefaultBaseEjectionTime},
- ConsecutiveGatewayErrors: &wrappers.UInt32Value{Value: DefaultConsecutiveGatewayErrors},
- Consecutive_5XxErrors: &wrappers.UInt32Value{Value: DefaultConsecutive5xxErrors},
- Interval: &duration.Duration{Seconds: DefaultInterval},
- MaxEjectionPercent: 100,
- }
-
- outlierDetectionOneHostRemote := &v1alpha3.OutlierDetection{
- BaseEjectionTime: &duration.Duration{Seconds: DefaultBaseEjectionTime},
- ConsecutiveGatewayErrors: &wrappers.UInt32Value{Value: DefaultConsecutiveGatewayErrors},
- Consecutive_5XxErrors: &wrappers.UInt32Value{Value: DefaultConsecutive5xxErrors},
- Interval: &duration.Duration{Seconds: DefaultInterval},
- MaxEjectionPercent: 34,
- }
-
- topologyGTPPolicy := &model.TrafficPolicy{
- LbType: model.TrafficPolicy_TOPOLOGY,
- Target: []*model.TrafficGroup{
- {
- Region: "us-west-2",
- Weight: 100,
- },
- },
- }
-
- se := &v1alpha3.ServiceEntry{Hosts: []string{"qa.myservice.global"}, Endpoints: []*v1alpha3.WorkloadEntry{
- {Address: "east.com", Locality: "us-east-2"}, {Address: "west.com", Locality: "us-west-2"},
- }}
-
- seOneHostRemote := &v1alpha3.ServiceEntry{Hosts: []string{"qa.myservice.global"}, Endpoints: []*v1alpha3.WorkloadEntry{
- {Address: "east.com", Locality: "us-east-2"},
- }}
-
- seOneHostLocal := &v1alpha3.ServiceEntry{Hosts: []string{"qa.myservice.global"}, Endpoints: []*v1alpha3.WorkloadEntry{
- {Address: "hello.ns.svc.cluster.local", Locality: "us-east-2"},
- }}
-
- seOneHostRemoteIp := &v1alpha3.ServiceEntry{Hosts: []string{"qa.myservice.global"}, Endpoints: []*v1alpha3.WorkloadEntry{
- {Address: "95.45.25.34", Locality: "us-east-2"},
- }}
-
- //Struct of test case info. Name is required.
- testCases := []struct {
- name string
- se *v1alpha3.ServiceEntry
- locality string
- gtpPolicy *model.TrafficPolicy
- outlierDetection *v1alpha3.OutlierDetection
- }{
-
- {
- name: "Should return nil for cluster local only endpoint",
- se: seOneHostLocal,
- locality: "uswest2",
- gtpPolicy: topologyGTPPolicy,
- outlierDetection: nil,
- },
- {
- name: "Should return nil for one IP endpoint",
- se: seOneHostRemoteIp,
- locality: "uswest2",
- gtpPolicy: topologyGTPPolicy,
- outlierDetection: nil,
- },
- {
- name: "Should return 34% ejection for remote endpoint with one entry",
- se: seOneHostRemote,
- locality: "uswest2",
- gtpPolicy: topologyGTPPolicy,
- outlierDetection: outlierDetectionOneHostRemote,
- },
- {
- name: "Should return 100% ejection for two remote endpoints",
- se: se,
- locality: "uswest2",
- gtpPolicy: topologyGTPPolicy,
- outlierDetection: outlierDetection,
- },
- {
- name: "Should use the default outlier detection if gtpPolicy is nil",
- se: se,
- locality: "uswest2",
- gtpPolicy: nil,
- outlierDetection: outlierDetection,
- },
- {
- name: "Should use the default outlier detection if OutlierDetection is nil inside gtpPolicy",
- se: se,
- locality: "uswest2",
- gtpPolicy: topologyGTPPolicy,
- outlierDetection: outlierDetection,
- },
- {
- name: "Should apply the default BaseEjectionTime if it is not configured in the outlier detection config",
- se: se,
- locality: "uswest2",
- gtpPolicy: &model.TrafficPolicy{
- LbType: model.TrafficPolicy_TOPOLOGY,
- Target: []*model.TrafficGroup{
- {
- Region: "us-west-2",
- Weight: 100,
- },
- },
- OutlierDetection: &model.TrafficPolicy_OutlierDetection{
- ConsecutiveGatewayErrors: 10,
- Interval: 60,
- },
- },
- outlierDetection: &v1alpha3.OutlierDetection{
- BaseEjectionTime: &duration.Duration{Seconds: DefaultBaseEjectionTime},
- ConsecutiveGatewayErrors: &wrappers.UInt32Value{Value: 10},
- Consecutive_5XxErrors: &wrappers.UInt32Value{Value: 0},
- Interval: &duration.Duration{Seconds: 60},
- MaxEjectionPercent: 100,
- },
- },
- {
- name: "Should apply the default ConsecutiveGatewayErrors if it is not configured in the outlier detection config",
- se: se,
- locality: "uswest2",
- gtpPolicy: &model.TrafficPolicy{
- LbType: model.TrafficPolicy_TOPOLOGY,
- Target: []*model.TrafficGroup{
- {
- Region: "us-west-2",
- Weight: 100,
- },
- },
- OutlierDetection: &model.TrafficPolicy_OutlierDetection{
- BaseEjectionTime: 600,
- Interval: 60,
- },
- },
- outlierDetection: &v1alpha3.OutlierDetection{
- BaseEjectionTime: &duration.Duration{Seconds: 600},
- ConsecutiveGatewayErrors: &wrappers.UInt32Value{Value: DefaultConsecutiveGatewayErrors},
- Consecutive_5XxErrors: &wrappers.UInt32Value{Value: DefaultConsecutive5xxErrors},
- Interval: &duration.Duration{Seconds: 60},
- MaxEjectionPercent: 100,
- },
- },
- {
- name: "Should apply the default Interval if it is not configured in the outlier detection config",
- se: se,
- locality: "uswest2",
- gtpPolicy: &model.TrafficPolicy{
- LbType: model.TrafficPolicy_TOPOLOGY,
- Target: []*model.TrafficGroup{
- {
- Region: "us-west-2",
- Weight: 100,
- },
- },
- OutlierDetection: &model.TrafficPolicy_OutlierDetection{
- BaseEjectionTime: 600,
- ConsecutiveGatewayErrors: 50,
- },
- },
- outlierDetection: &v1alpha3.OutlierDetection{
- BaseEjectionTime: &duration.Duration{Seconds: 600},
- ConsecutiveGatewayErrors: &wrappers.UInt32Value{Value: 50},
- Consecutive_5XxErrors: &wrappers.UInt32Value{Value: 0},
- Interval: &duration.Duration{Seconds: DefaultInterval},
- MaxEjectionPercent: 100,
- },
- },
- {
- name: "Default outlier detection config should be overriden by the outlier detection config specified in the TrafficPolicy",
- se: se,
- locality: "uswest2",
- gtpPolicy: &model.TrafficPolicy{
- LbType: model.TrafficPolicy_TOPOLOGY,
- Target: []*model.TrafficGroup{
- {
- Region: "us-west-2",
- Weight: 100,
- },
- },
- OutlierDetection: &model.TrafficPolicy_OutlierDetection{
- BaseEjectionTime: 600,
- ConsecutiveGatewayErrors: 10,
- Interval: 60,
- },
- },
- outlierDetection: &v1alpha3.OutlierDetection{
- BaseEjectionTime: &duration.Duration{Seconds: 600},
- ConsecutiveGatewayErrors: &wrappers.UInt32Value{Value: 10},
- Consecutive_5XxErrors: &wrappers.UInt32Value{Value: 0},
- Interval: &duration.Duration{Seconds: 60},
- MaxEjectionPercent: 100,
- },
- },
- }
-
- //Run the test for every provided case
- for _, c := range testCases {
- t.Run(c.name, func(t *testing.T) {
- result := getOutlierDetection(c.se, c.locality, c.gtpPolicy)
- if !cmp.Equal(result, c.outlierDetection, protocmp.Transform()) {
- t.Fatalf("OutlierDetection Mismatch. Diff: %v", cmp.Diff(result, c.outlierDetection))
- }
- })
- }
-}
-
-func TestHandleVirtualServiceEvent(t *testing.T) {
- var (
- ctx = context.Background()
- cnameCache = common.NewMapOfMaps()
- goodCnameCache = common.NewMapOfMaps()
- rr = NewRemoteRegistry(context.TODO(), common.AdmiralParams{})
- rr1 = NewRemoteRegistry(context.TODO(), common.AdmiralParams{})
- rr2 = NewRemoteRegistry(context.TODO(), common.AdmiralParams{})
- fakeIstioClient = istioFake.NewSimpleClientset()
- fullFakeIstioClient = istioFake.NewSimpleClientset()
-
- tooManyHosts = v1alpha32.VirtualService{
- Spec: v1alpha3.VirtualService{
- Hosts: []string{"qa.blah.global", "e2e.blah.global"},
- },
- ObjectMeta: metaV1.ObjectMeta{
- Name: "too-many-hosts",
- Namespace: "other-ns",
- },
- }
- happyPath = v1alpha32.VirtualService{
- Spec: v1alpha3.VirtualService{
- Hosts: []string{"e2e.blah.global"},
- },
- ObjectMeta: metaV1.ObjectMeta{
- Name: "vs-name",
- Namespace: "other-ns",
- },
- }
- nonExistentVs = v1alpha32.VirtualService{
- Spec: v1alpha3.VirtualService{
- Hosts: []string{"does-not-exist.com"},
- },
- ObjectMeta: metaV1.ObjectMeta{
- Name: "does-not-exist",
- Namespace: "other-ns",
- },
- }
- vsNotGeneratedByAdmiral = v1alpha32.VirtualService{
- Spec: v1alpha3.VirtualService{
- Hosts: []string{"e2e.blah.something"},
- },
- ObjectMeta: metaV1.ObjectMeta{
- Name: "vs-name-other-nss",
- Namespace: "other-ns",
- },
- }
- )
-
- rr.AdmiralCache = &AdmiralCache{
- CnameDependentClusterCache: cnameCache,
- SeClusterCache: common.NewMapOfMaps(),
- }
- noDependentClustersHandler := VirtualServiceHandler{
- RemoteRegistry: rr,
- }
-
- goodCnameCache.Put("e2e.blah.global", "cluster.k8s.global", "cluster.k8s.global")
- rr1.AdmiralCache = &AdmiralCache{
- CnameDependentClusterCache: goodCnameCache,
- SeClusterCache: common.NewMapOfMaps(),
- }
- rr1.PutRemoteController("cluster.k8s.global", &RemoteController{
- VirtualServiceController: &istio.VirtualServiceController{
- IstioClient: fakeIstioClient,
- },
- })
- handlerEmptyClient := VirtualServiceHandler{
- RemoteRegistry: rr1,
- }
- fullFakeIstioClient.NetworkingV1alpha3().VirtualServices("ns").Create(ctx, &v1alpha32.VirtualService{
- ObjectMeta: metaV1.ObjectMeta{
- Name: "vs-name",
- },
- Spec: v1alpha3.VirtualService{
- Hosts: []string{"e2e.blah.global"},
- },
- }, metaV1.CreateOptions{})
- rr2.AdmiralCache = &AdmiralCache{
- CnameDependentClusterCache: goodCnameCache,
- SeClusterCache: common.NewMapOfMaps(),
- }
- rr2.PutRemoteController("cluster.k8s.global", &RemoteController{
- VirtualServiceController: &istio.VirtualServiceController{
- IstioClient: fullFakeIstioClient,
- },
- })
- handlerFullClient := VirtualServiceHandler{
- ClusterID: "cluster2.k8s.global",
- RemoteRegistry: rr2,
- }
-
- //Struct of test case info. Name is required.
- testCases := []struct {
- name string
- vs *v1alpha32.VirtualService
- handler *VirtualServiceHandler
- expectedError error
- event common.Event
- }{
- {
- name: "Virtual Service with multiple hosts",
- vs: &tooManyHosts,
- expectedError: nil,
- handler: &noDependentClustersHandler,
- event: 0,
- },
- {
- name: "No dependent clusters",
- vs: &happyPath,
- expectedError: nil,
- handler: &noDependentClustersHandler,
- event: 0,
- },
- {
- name: "Add event for VS not generated by Admiral",
- vs: &happyPath,
- expectedError: nil,
- handler: &handlerFullClient,
- event: 0,
- },
- {
- name: "Update event for VS not generated by Admiral",
- vs: &vsNotGeneratedByAdmiral,
- expectedError: nil,
- handler: &handlerFullClient,
- event: 1,
- },
- {
- name: "Delete event for VS not generated by Admiral",
- vs: &vsNotGeneratedByAdmiral,
- expectedError: nil,
- handler: &handlerFullClient,
- event: 2,
- },
- {
- name: "New Virtual Service",
- vs: &happyPath,
- expectedError: nil,
- handler: &handlerEmptyClient,
- event: 0,
- },
- {
- name: "Existing Virtual Service",
- vs: &happyPath,
- expectedError: nil,
- handler: &handlerFullClient,
- event: 1,
- },
- {
- name: "Deleted existing Virtual Service, should not return an error",
- vs: &happyPath,
- expectedError: nil,
- handler: &handlerFullClient,
- event: 2,
- },
- {
- name: "Deleting virtual service which does not exist, should not return an error",
- vs: &nonExistentVs,
- expectedError: nil,
- handler: &handlerFullClient,
- event: 2,
- },
- }
-
- //Run the test for every provided case
- for _, c := range testCases {
- t.Run(c.name, func(t *testing.T) {
- err := handleVirtualServiceEvent(ctx, c.vs, c.handler, c.event, common.VirtualServiceResourceType)
- if err != c.expectedError {
- t.Fatalf("Error mismatch, expected %v but got %v", c.expectedError, err)
- }
- })
- }
-}
-
func TestGetServiceForRolloutCanary(t *testing.T) {
- //Struct of test case info. Name is required.
const (
Namespace = "namespace"
ServiceName = "serviceName"
StableServiceName = "stableserviceName"
CanaryServiceName = "canaryserviceName"
GeneratedStableServiceName = "hello-" + common.RolloutStableServiceSuffix
- LatestMatchingService = "hello-root-service"
+ RootService = "hello-root-service"
vsName1 = "virtualservice1"
vsName2 = "virtualservice2"
vsName3 = "virtualservice3"
@@ -736,14 +147,15 @@ func TestGetServiceForRolloutCanary(t *testing.T) {
}
ports = []coreV1.ServicePort{{Port: 8080}, {Port: 8081}}
)
- s, err := admiral.NewServiceController("test", stop, &test.MockServiceHandler{}, &config, time.Second*time.Duration(300))
+ s, err := admiral.NewServiceController(stop, &test.MockServiceHandler{}, &config, time.Second*time.Duration(300), loader.GetFakeClientLoader())
if err != nil {
t.Fatalf("failed to initialize service controller, err: %v", err)
}
- r, err := admiral.NewRolloutsController("test", stop, &test.MockRolloutHandler{}, &config, time.Second*time.Duration(300))
+ r, err := admiral.NewRolloutsController(stop, &test.MockRolloutHandler{}, &config, time.Second*time.Duration(300), loader.GetFakeClientLoader())
if err != nil {
t.Fatalf("failed ot initialize rollout controller, err: %v", err)
}
+
v := &istio.VirtualServiceController{
IstioClient: fakeIstioClient,
}
@@ -806,6 +218,20 @@ func TestGetServiceForRolloutCanary(t *testing.T) {
},
}
+ service5 := &coreV1.Service{
+ ObjectMeta: metaV1.ObjectMeta{Name: GeneratedStableServiceName, Namespace: "namespace5", CreationTimestamp: metaV1.NewTime(time.Now())},
+ Spec: coreV1.ServiceSpec{
+ Selector: map[string]string{
+ "app": "test5",
+ },
+ Ports: []coreV1.ServicePort{{
+ Port: 8081,
+ Name: "random5",
+ },
+ },
+ },
+ }
+
// namespace Services
stableService := &coreV1.Service{
ObjectMeta: metaV1.ObjectMeta{Name: StableServiceName, Namespace: Namespace, CreationTimestamp: metaV1.NewTime(time.Now().Add(time.Duration(-15)))},
@@ -831,8 +257,8 @@ func TestGetServiceForRolloutCanary(t *testing.T) {
},
}
- latestMatchingService := &coreV1.Service{
- ObjectMeta: metaV1.ObjectMeta{Name: LatestMatchingService, Namespace: Namespace, CreationTimestamp: metaV1.NewTime(time.Now())},
+ rootService := &coreV1.Service{
+ ObjectMeta: metaV1.ObjectMeta{Name: RootService, Namespace: Namespace, CreationTimestamp: metaV1.NewTime(time.Now())},
Spec: coreV1.ServiceSpec{
Selector: selectorMap,
Ports: ports,
@@ -843,10 +269,11 @@ func TestGetServiceForRolloutCanary(t *testing.T) {
rcTemp.ServiceController.Cache.Put(service1)
rcTemp.ServiceController.Cache.Put(service3)
rcTemp.ServiceController.Cache.Put(service4)
+ rcTemp.ServiceController.Cache.Put(service5)
rcTemp.ServiceController.Cache.Put(stableService)
rcTemp.ServiceController.Cache.Put(canaryService)
rcTemp.ServiceController.Cache.Put(generatedStableService)
- rcTemp.ServiceController.Cache.Put(latestMatchingService)
+ rcTemp.ServiceController.Cache.Put(rootService)
virtualService := &v1alpha32.VirtualService{
ObjectMeta: metaV1.ObjectMeta{Name: vsName1, Namespace: Namespace},
@@ -1007,34 +434,80 @@ func TestGetServiceForRolloutCanary(t *testing.T) {
},
}
- canaryRolloutWithStableService := argo.Rollout{
+ canaryRolloutWithRootService := argo.Rollout{
Spec: argo.RolloutSpec{Template: coreV1.PodTemplateSpec{
ObjectMeta: metaV1.ObjectMeta{Annotations: map[string]string{}},
}}}
- canaryRolloutWithStableService.Spec.Selector = &labelSelector
+ canaryRolloutWithRootService.Spec.Selector = &labelSelector
- canaryRolloutWithStableService.Namespace = Namespace
- canaryRolloutWithStableService.Spec.Strategy = argo.RolloutStrategy{
+ canaryRolloutWithRootService.Namespace = Namespace
+ canaryRolloutWithRootService.Spec.Strategy = argo.RolloutStrategy{
Canary: &argo.CanaryStrategy{
StableService: StableServiceName,
CanaryService: CanaryServiceName,
},
}
- canaryRolloutIstioVsMimatch := argo.Rollout{
+ canaryRolloutWithoutRootService := argo.Rollout{
Spec: argo.RolloutSpec{Template: coreV1.PodTemplateSpec{
ObjectMeta: metaV1.ObjectMeta{Annotations: map[string]string{}},
}}}
- canaryRolloutIstioVsMimatch.Spec.Selector = &labelSelector
+ matchLabel5 := make(map[string]string)
+ matchLabel5["app"] = "test5"
- canaryRolloutIstioVsMimatch.Namespace = Namespace
- canaryRolloutIstioVsMimatch.Spec.Strategy = argo.RolloutStrategy{
- Canary: &argo.CanaryStrategy{
- StableService: StableServiceName,
- CanaryService: CanaryServiceName,
- TrafficRouting: &argo.RolloutTrafficRouting{
- Istio: &argo.IstioTrafficRouting{
- VirtualService: &argo.IstioVirtualService{Name: "random"},
+ labelSelector5 := metaV1.LabelSelector{
+ MatchLabels: matchLabel5,
+ }
+ canaryRolloutWithoutRootService.Spec.Selector = &labelSelector5
+
+ canaryRolloutWithoutRootService.Namespace = "namespace5"
+ canaryRolloutWithoutRootService.Spec.Strategy = argo.RolloutStrategy{
+ Canary: &argo.CanaryStrategy{},
+ }
+
+ canaryRolloutNoStrategy := argo.Rollout{
+ Spec: argo.RolloutSpec{Template: coreV1.PodTemplateSpec{
+ ObjectMeta: metaV1.ObjectMeta{Annotations: map[string]string{}},
+ }}}
+ matchLabel6 := make(map[string]string)
+ matchLabel6["app"] = "test6"
+
+ labelSelector6 := metaV1.LabelSelector{
+ MatchLabels: matchLabel6,
+ }
+ canaryRolloutNoStrategy.Spec.Selector = &labelSelector6
+
+ canaryRolloutNoStrategy.Namespace = "namespace6"
+
+ canaryRolloutWithoutIstioVS := argo.Rollout{
+ ObjectMeta: metaV1.ObjectMeta{Namespace: Namespace},
+ Spec: argo.RolloutSpec{
+ Selector: &labelSelector,
+ Strategy: argo.RolloutStrategy{
+ Canary: &argo.CanaryStrategy{
+ StableService: StableServiceName,
+ CanaryService: CanaryServiceName,
+ TrafficRouting: &argo.RolloutTrafficRouting{
+ Istio: &argo.IstioTrafficRouting{},
+ },
+ },
+ },
+ },
+ }
+
+ canaryRolloutIstioVsMismatch := argo.Rollout{
+ ObjectMeta: metaV1.ObjectMeta{Namespace: Namespace},
+ Spec: argo.RolloutSpec{
+ Selector: &labelSelector,
+ Strategy: argo.RolloutStrategy{
+ Canary: &argo.CanaryStrategy{
+ StableService: StableServiceName,
+ CanaryService: CanaryServiceName,
+ TrafficRouting: &argo.RolloutTrafficRouting{
+ Istio: &argo.IstioTrafficRouting{
+ VirtualService: &argo.IstioVirtualService{Name: "random"},
+ },
+ },
},
},
},
@@ -1056,13 +529,17 @@ func TestGetServiceForRolloutCanary(t *testing.T) {
resultForDummy := map[string]*WeightedService{service3.Name: {Weight: 1, Service: service3}}
- resultForEmptyStableServiceOnRollout := map[string]*WeightedService{LatestMatchingService: {Weight: 1, Service: latestMatchingService}}
+ resultForEmptyStableServiceOnRollout := map[string]*WeightedService{RootService: {Weight: 1, Service: rootService}}
resultForCanaryWithIstio := map[string]*WeightedService{StableServiceName: {Weight: 80, Service: stableService},
CanaryServiceName: {Weight: 20, Service: canaryService}}
+ resultForCanaryWithRootService := map[string]*WeightedService{RootService: {Weight: 1, Service: rootService}}
+
resultForCanaryWithStableService := map[string]*WeightedService{StableServiceName: {Weight: 1, Service: stableService}}
+ resultForCanaryWithoutRootService := map[string]*WeightedService{GeneratedStableServiceName: {Weight: 1, Service: service5}}
+
resultForCanaryWithStableServiceWeight := map[string]*WeightedService{StableServiceName: {Weight: 100, Service: stableService}}
resultRolloutWithOneServiceHavingMeshPort := map[string]*WeightedService{service3.Name: {Weight: 1, Service: service3}}
@@ -1088,9 +565,14 @@ func TestGetServiceForRolloutCanary(t *testing.T) {
rollout: &canaryRollout,
rc: rcTemp,
result: resultForEmptyStableServiceOnRollout,
+ }, {
+ name: "canaryRolloutWithoutIstioVS",
+ rollout: &canaryRolloutWithoutIstioVS,
+ rc: rcTemp,
+ result: resultForCanaryWithStableService,
}, {
name: "canaryRolloutWithIstioVsMimatch",
- rollout: &canaryRolloutIstioVsMimatch,
+ rollout: &canaryRolloutIstioVsMismatch,
rc: rcTemp,
result: resultForCanaryWithStableService,
}, {
@@ -1115,10 +597,10 @@ func TestGetServiceForRolloutCanary(t *testing.T) {
result: resultForCanaryWithStableService,
},
{
- name: "canaryRolloutWithStableServiceName",
- rollout: &canaryRolloutWithStableService,
+ name: "canaryRolloutWithRootServiceName",
+ rollout: &canaryRolloutWithRootService,
rc: rcTemp,
- result: resultForCanaryWithStableService,
+ result: resultForCanaryWithRootService,
},
{
name: "canaryRolloutWithOneServiceHavingMeshPort",
@@ -1126,6 +608,18 @@ func TestGetServiceForRolloutCanary(t *testing.T) {
rc: rcTemp,
result: resultRolloutWithOneServiceHavingMeshPort,
},
+ {
+ name: "canaryRolloutWithRootServiceNameMissing",
+ rollout: &canaryRolloutWithoutRootService,
+ rc: rcTemp,
+ result: resultForCanaryWithoutRootService,
+ },
+ {
+ name: "canaryRolloutEmptyStrategy",
+ rollout: &canaryRolloutNoStrategy,
+ rc: rcTemp,
+ result: nil,
+ },
}
//Run the test for every provided case
@@ -1157,8 +651,9 @@ func TestGetServiceForRolloutCanary(t *testing.T) {
func TestGetServiceForRolloutBlueGreen(t *testing.T) {
//Struct of test case info. Name is required.
const (
- namespace = "namespace"
- serviceName = "serviceNameActive"
+ namespace = "namespace"
+ serviceName = "serviceNameActive"
+
generatedActiveServiceName = "hello-" + common.RolloutActiveServiceSuffix
rolloutPodHashLabel string = "rollouts-pod-template-hash"
)
@@ -1218,16 +713,16 @@ func TestGetServiceForRolloutBlueGreen(t *testing.T) {
},
}
)
- s, err := admiral.NewServiceController("test", stop, &test.MockServiceHandler{}, &config, time.Second*time.Duration(300))
+ s, err := admiral.NewServiceController(stop, &test.MockServiceHandler{}, &config, time.Second*time.Duration(300), loader.GetFakeClientLoader())
if err != nil {
t.Fatalf("failed to initialize service controller, err: %v", err)
}
- r, err := admiral.NewRolloutsController("test", stop, &test.MockRolloutHandler{}, &config, time.Second*time.Duration(300))
+ r, err := admiral.NewRolloutsController(stop, &test.MockRolloutHandler{}, &config, time.Second*time.Duration(300), loader.GetFakeClientLoader())
if err != nil {
t.Fatalf("failed to initialize rollout controller, err: %v", err)
}
- emptyCacheService, err := admiral.NewServiceController("test", stop, &test.MockServiceHandler{}, &config, time.Second*time.Duration(300))
+ emptyCacheService, err := admiral.NewServiceController(stop, &test.MockServiceHandler{}, &config, time.Second*time.Duration(300), loader.GetFakeClientLoader())
if err != nil {
t.Fatalf("failed to initialize empty service controller, err: %v", err)
}
@@ -1433,504 +928,24 @@ func TestGetServiceForRolloutBlueGreen(t *testing.T) {
}
}
-func TestSkipDestructiveUpdate(t *testing.T) {
- twoEndpointSe := v1alpha3.ServiceEntry{
- Hosts: []string{"e2e.my-first-service.mesh"},
- Addresses: []string{"240.10.1.1"},
- Ports: []*v1alpha3.Port{{Number: uint32(common.DefaultServiceEntryPort),
- Name: "http", Protocol: "http"}},
- Location: v1alpha3.ServiceEntry_MESH_INTERNAL,
- Resolution: v1alpha3.ServiceEntry_DNS,
- SubjectAltNames: []string{"spiffe://prefix/my-first-service"},
- Endpoints: []*v1alpha3.WorkloadEntry{
- {Address: "dummy.admiral.global-west", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2"},
- {Address: "dummy.admiral.global-east", Ports: map[string]uint32{"http": 0}, Locality: "us-east-2"},
- },
- }
-
- twoEndpointSeUpdated := v1alpha3.ServiceEntry{
- Hosts: []string{"e2e.my-first-service.mesh"},
- Addresses: []string{"240.10.1.1"},
- Ports: []*v1alpha3.Port{{Number: uint32(common.DefaultServiceEntryPort),
- Name: "http", Protocol: "http"}},
- Location: v1alpha3.ServiceEntry_MESH_INTERNAL,
- Resolution: v1alpha3.ServiceEntry_DNS,
- SubjectAltNames: []string{"spiffe://prefix/my-first-service"},
- Endpoints: []*v1alpha3.WorkloadEntry{
- {Address: "dummy.admiral.global-west", Ports: map[string]uint32{"http": 90}, Locality: "us-west-2"},
- {Address: "dummy.admiral.global-east", Ports: map[string]uint32{"http": 0}, Locality: "us-east-2"},
- },
- }
-
- oneEndpointSe := v1alpha3.ServiceEntry{
- Hosts: []string{"e2e.my-first-service.mesh"},
- Addresses: []string{"240.10.1.1"},
- Ports: []*v1alpha3.Port{{Number: uint32(common.DefaultServiceEntryPort),
- Name: "http", Protocol: "http"}},
- Location: v1alpha3.ServiceEntry_MESH_INTERNAL,
- Resolution: v1alpha3.ServiceEntry_DNS,
- SubjectAltNames: []string{"spiffe://prefix/my-first-service"},
- Endpoints: []*v1alpha3.WorkloadEntry{
- {Address: "dummy.admiral.global-west", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2"},
- },
- }
-
- newSeTwoEndpoints := &v1alpha32.ServiceEntry{
- ObjectMeta: metaV1.ObjectMeta{Name: "se1", Namespace: "random"},
- //nolint
- Spec: twoEndpointSe,
- }
-
- newSeTwoEndpointsUpdated := &v1alpha32.ServiceEntry{
- ObjectMeta: metaV1.ObjectMeta{Name: "se1", Namespace: "random"},
- //nolint
- Spec: twoEndpointSeUpdated,
- }
-
- newSeOneEndpoint := &v1alpha32.ServiceEntry{
- ObjectMeta: metaV1.ObjectMeta{Name: "se1", Namespace: "random"},
- //nolint
- Spec: oneEndpointSe,
- }
-
- oldSeTwoEndpoints := &v1alpha32.ServiceEntry{
- ObjectMeta: metaV1.ObjectMeta{Name: "se1", Namespace: "random"},
- //nolint
- Spec: twoEndpointSe,
- }
-
- oldSeOneEndpoint := &v1alpha32.ServiceEntry{
- ObjectMeta: metaV1.ObjectMeta{Name: "se1", Namespace: "random"},
- //nolint
- Spec: oneEndpointSe,
- }
-
- rcWarmupPhase := &RemoteController{
- StartTime: time.Now(),
- }
-
- rcNotinWarmupPhase := &RemoteController{
- StartTime: time.Now().Add(time.Duration(-21) * time.Minute),
- }
-
- //Struct of test case info. Name is required.
- testCases := []struct {
- name string
- rc *RemoteController
- newSe *v1alpha32.ServiceEntry
- oldSe *v1alpha32.ServiceEntry
- skipDestructive bool
- diff string
- }{
- {
- name: "Should return false when in warm up phase but not destructive",
- rc: rcWarmupPhase,
- newSe: newSeOneEndpoint,
- oldSe: oldSeOneEndpoint,
- skipDestructive: false,
- diff: "",
- },
- {
- name: "Should return true when in warm up phase but is destructive",
- rc: rcWarmupPhase,
- newSe: newSeOneEndpoint,
- oldSe: oldSeTwoEndpoints,
- skipDestructive: true,
- diff: "Delete",
- },
- {
- name: "Should return false when not in warm up phase but is destructive",
- rc: rcNotinWarmupPhase,
- newSe: newSeOneEndpoint,
- oldSe: oldSeTwoEndpoints,
- skipDestructive: false,
- diff: "Delete",
- },
- {
- name: "Should return false when in warm up phase but is constructive",
- rc: rcWarmupPhase,
- newSe: newSeTwoEndpoints,
- oldSe: oldSeOneEndpoint,
- skipDestructive: false,
- diff: "Add",
- },
- {
- name: "Should return false when not in warm up phase but endpoints updated",
- rc: rcNotinWarmupPhase,
- newSe: newSeTwoEndpointsUpdated,
- oldSe: oldSeTwoEndpoints,
- skipDestructive: false,
- diff: "Update",
- },
- {
- name: "Should return true when in warm up phase but endpoints are updated (destructive)",
- rc: rcWarmupPhase,
- newSe: newSeTwoEndpointsUpdated,
- oldSe: oldSeTwoEndpoints,
- skipDestructive: true,
- diff: "Update",
- },
- }
-
- //Run the test for every provided case
- for _, c := range testCases {
- t.Run(c.name, func(t *testing.T) {
- skipDestructive, diff := skipDestructiveUpdate(c.rc, c.newSe, c.oldSe)
- if skipDestructive == c.skipDestructive {
- //perfect
- } else {
- t.Errorf("Result Failed. Got %v, expected %v", skipDestructive, c.skipDestructive)
- }
- if c.diff == "" || (c.diff != "" && strings.Contains(diff, c.diff)) {
- //perfect
- } else {
- t.Errorf("Diff Failed. Got %v, expected %v", diff, c.diff)
- }
- })
- }
-}
-
-func TestAddUpdateServiceEntry(t *testing.T) {
+func makeRemoteRegistry(
+ clusterNames []string, remoteController *RemoteController, cname string, dependentClusters []string) *RemoteRegistry {
var (
- ctx = context.Background()
- fakeIstioClient = istioFake.NewSimpleClientset()
- seCtrl = &istio.ServiceEntryController{
- IstioClient: fakeIstioClient,
- }
+ cache = common.NewMapOfMaps()
+ rr = NewRemoteRegistry(context.TODO(), common.AdmiralParams{})
)
-
- twoEndpointSe := v1alpha3.ServiceEntry{
- Hosts: []string{"e2e.my-first-service.mesh"},
- Addresses: []string{"240.10.1.1"},
- Ports: []*v1alpha3.Port{{Number: uint32(common.DefaultServiceEntryPort),
- Name: "http", Protocol: "http"}},
- Location: v1alpha3.ServiceEntry_MESH_INTERNAL,
- Resolution: v1alpha3.ServiceEntry_DNS,
- SubjectAltNames: []string{"spiffe://prefix/my-first-service"},
- Endpoints: []*v1alpha3.WorkloadEntry{
- {Address: "dummy.admiral.global-west", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2"},
- {Address: "dummy.admiral.global-east", Ports: map[string]uint32{"http": 0}, Locality: "us-east-2"},
- },
- }
-
- oneEndpointSe := v1alpha3.ServiceEntry{
- Hosts: []string{"e2e.my-first-service.mesh"},
- Addresses: []string{"240.10.1.1"},
- Ports: []*v1alpha3.Port{{Number: uint32(common.DefaultServiceEntryPort),
- Name: "http", Protocol: "http"}},
- Location: v1alpha3.ServiceEntry_MESH_INTERNAL,
- Resolution: v1alpha3.ServiceEntry_DNS,
- SubjectAltNames: []string{"spiffe://prefix/my-first-service"},
- Endpoints: []*v1alpha3.WorkloadEntry{
- {Address: "dummy.admiral.global-west", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2"},
- },
- }
-
- invalidEndpoint := v1alpha3.ServiceEntry{
- Hosts: []string{"e2e.test-service.mesh"},
- Addresses: []string{"240.10.1.1"},
- Ports: []*v1alpha3.Port{{Number: uint32(common.DefaultServiceEntryPort),
- Name: "http", Protocol: "http"}},
- Location: v1alpha3.ServiceEntry_MESH_INTERNAL,
- Resolution: v1alpha3.ServiceEntry_DNS,
- SubjectAltNames: []string{"spiffe://prefix/my-first-service"},
- Endpoints: []*v1alpha3.WorkloadEntry{
- {Address: "dummy.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2"},
- {Address: "test.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-east-2"},
- },
- }
-
- invalidEndpointSe := &v1alpha32.ServiceEntry{
- ObjectMeta: metaV1.ObjectMeta{Name: "se3", Namespace: "namespace"},
- //nolint
- Spec: invalidEndpoint,
- }
-
- newSeOneEndpoint := &v1alpha32.ServiceEntry{
- ObjectMeta: metaV1.ObjectMeta{Name: "se1", Namespace: "namespace"},
- //nolint
- Spec: oneEndpointSe,
- }
-
- oldSeTwoEndpoints := &v1alpha32.ServiceEntry{
- ObjectMeta: metaV1.ObjectMeta{Name: "se2", Namespace: "namespace"},
- //nolint
- Spec: twoEndpointSe,
- }
-
- _, err := seCtrl.IstioClient.NetworkingV1alpha3().ServiceEntries("namespace").Create(ctx, oldSeTwoEndpoints, metaV1.CreateOptions{})
- if err != nil {
- t.Error(err)
- }
-
- rcWarmupPhase := &RemoteController{
- ServiceEntryController: seCtrl,
- StartTime: time.Now(),
- }
-
- rcNotInWarmupPhase := &RemoteController{
- ServiceEntryController: seCtrl,
- StartTime: time.Now().Add(time.Duration(-21) * time.Minute),
- }
-
- //Struct of test case info. Name is required.
- testCases := []struct {
- name string
- rc *RemoteController
- newSe *v1alpha32.ServiceEntry
- oldSe *v1alpha32.ServiceEntry
- skipDestructive bool
- }{
- {
- name: "Should add a new SE",
- rc: rcWarmupPhase,
- newSe: newSeOneEndpoint,
- oldSe: nil,
- skipDestructive: false,
- },
- {
- name: "Should not update SE when in warm up mode and the update is destructive",
- rc: rcWarmupPhase,
- newSe: newSeOneEndpoint,
- oldSe: oldSeTwoEndpoints,
- skipDestructive: true,
- },
- {
- name: "Should update an SE",
- rc: rcNotInWarmupPhase,
- newSe: newSeOneEndpoint,
- oldSe: oldSeTwoEndpoints,
- skipDestructive: false,
- },
- {
- name: "Should create an SE with one endpoint",
- rc: rcNotInWarmupPhase,
- newSe: invalidEndpointSe,
- oldSe: nil,
- skipDestructive: false,
- },
- }
-
- //Run the test for every provided case
- for _, c := range testCases {
- t.Run(c.name, func(t *testing.T) {
- addUpdateServiceEntry(ctx, c.newSe, c.oldSe, "namespace", c.rc)
- if c.skipDestructive {
- //verify the update did not go through
- se, err := c.rc.ServiceEntryController.IstioClient.NetworkingV1alpha3().ServiceEntries("namespace").Get(ctx, c.oldSe.Name, metaV1.GetOptions{})
- if err != nil {
- t.Error(err)
- }
- _, diff := getServiceEntryDiff(c.oldSe, se)
- if diff != "" {
- t.Errorf("Failed. Got %v, expected %v", se.Spec.String(), c.oldSe.Spec.String())
- }
- }
- })
- }
-}
-
-func TestValidateServiceEntryEndpoints(t *testing.T) {
-
- twoValidEndpoints := []*v1alpha3.WorkloadEntry{
- {Address: "valid1.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2"},
- {Address: "valid2.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-east-2"},
- }
-
- oneValidEndpoints := []*v1alpha3.WorkloadEntry{
- {Address: "valid1.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2"},
- }
-
- dummyEndpoints := []*v1alpha3.WorkloadEntry{
- {Address: "dummy.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2"},
- }
-
- validAndInvalidEndpoints := []*v1alpha3.WorkloadEntry{
- {Address: "dummy.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2"},
- {Address: "valid2.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-east-2"},
- }
-
- twoValidEndpointsSe := &v1alpha32.ServiceEntry{
- ObjectMeta: metaV1.ObjectMeta{Name: "twoValidEndpointsSe", Namespace: "namespace"},
- Spec: v1alpha3.ServiceEntry{
- Hosts: []string{"e2e.my-first-service.mesh"},
- Addresses: []string{"240.10.1.1"},
- Ports: []*v1alpha3.Port{{Number: uint32(common.DefaultServiceEntryPort),
- Name: "http", Protocol: "http"}},
- Location: v1alpha3.ServiceEntry_MESH_INTERNAL,
- Resolution: v1alpha3.ServiceEntry_DNS,
- SubjectAltNames: []string{"spiffe://prefix/my-first-service"},
- Endpoints: twoValidEndpoints,
- },
- }
-
- oneValidEndpointsSe := &v1alpha32.ServiceEntry{
- ObjectMeta: metaV1.ObjectMeta{Name: "twoValidEndpointsSe", Namespace: "namespace"},
- Spec: v1alpha3.ServiceEntry{
- Hosts: []string{"e2e.my-first-service.mesh"},
- Addresses: []string{"240.10.1.1"},
- Ports: []*v1alpha3.Port{{Number: uint32(common.DefaultServiceEntryPort),
- Name: "http", Protocol: "http"}},
- Location: v1alpha3.ServiceEntry_MESH_INTERNAL,
- Resolution: v1alpha3.ServiceEntry_DNS,
- SubjectAltNames: []string{"spiffe://prefix/my-first-service"},
- Endpoints: oneValidEndpoints,
- },
- }
-
- dummyEndpointsSe := &v1alpha32.ServiceEntry{
- ObjectMeta: metaV1.ObjectMeta{Name: "twoValidEndpointsSe", Namespace: "namespace"},
- Spec: v1alpha3.ServiceEntry{
- Hosts: []string{"e2e.my-first-service.mesh"},
- Addresses: []string{"240.10.1.1"},
- Ports: []*v1alpha3.Port{{Number: uint32(common.DefaultServiceEntryPort),
- Name: "http", Protocol: "http"}},
- Location: v1alpha3.ServiceEntry_MESH_INTERNAL,
- Resolution: v1alpha3.ServiceEntry_DNS,
- SubjectAltNames: []string{"spiffe://prefix/my-first-service"},
- Endpoints: dummyEndpoints,
- },
- }
-
- validAndInvalidEndpointsSe := &v1alpha32.ServiceEntry{
- ObjectMeta: metaV1.ObjectMeta{Name: "twoValidEndpointsSe", Namespace: "namespace"},
- Spec: v1alpha3.ServiceEntry{
- Hosts: []string{"e2e.my-first-service.mesh"},
- Addresses: []string{"240.10.1.1"},
- Ports: []*v1alpha3.Port{{Number: uint32(common.DefaultServiceEntryPort),
- Name: "http", Protocol: "http"}},
- Location: v1alpha3.ServiceEntry_MESH_INTERNAL,
- Resolution: v1alpha3.ServiceEntry_DNS,
- SubjectAltNames: []string{"spiffe://prefix/my-first-service"},
- Endpoints: validAndInvalidEndpoints,
- },
- }
-
- //Struct of test case info. Name is required.
- testCases := []struct {
- name string
- serviceEntry *v1alpha32.ServiceEntry
- expectedAreEndpointsValid bool
- expectedValidEndpoints []*v1alpha3.WorkloadEntry
- }{
- {
- name: "Validate SE with dummy endpoint",
- serviceEntry: dummyEndpointsSe,
- expectedAreEndpointsValid: false,
- expectedValidEndpoints: []*v1alpha3.WorkloadEntry{},
- },
- {
- name: "Validate SE with valid endpoint",
- serviceEntry: oneValidEndpointsSe,
- expectedAreEndpointsValid: true,
- expectedValidEndpoints: []*v1alpha3.WorkloadEntry{{Address: "valid1.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2"}},
- },
- {
- name: "Validate endpoint with multiple valid endpoints",
- serviceEntry: twoValidEndpointsSe,
- expectedAreEndpointsValid: true,
- expectedValidEndpoints: []*v1alpha3.WorkloadEntry{
- {Address: "valid1.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2"},
- {Address: "valid2.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-east-2"}},
- },
- {
- name: "Validate endpoint with mix of valid and dummy endpoints",
- serviceEntry: validAndInvalidEndpointsSe,
- expectedAreEndpointsValid: false,
- expectedValidEndpoints: []*v1alpha3.WorkloadEntry{
- {Address: "valid2.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-east-2"}},
- },
- }
-
- //Run the test for every provided case
- for _, c := range testCases {
- t.Run(c.name, func(t *testing.T) {
- areValidEndpoints := validateAndProcessServiceEntryEndpoints(c.serviceEntry)
-
- if areValidEndpoints != c.expectedAreEndpointsValid {
- t.Errorf("Failed. Got %v, expected %v", areValidEndpoints, c.expectedAreEndpointsValid)
- }
-
- if len(c.serviceEntry.Spec.Endpoints) != len(c.expectedValidEndpoints) {
- t.Errorf("Failed. Got %v, expected %v", len(c.serviceEntry.Spec.Endpoints), len(c.expectedValidEndpoints))
- }
- })
- }
-}
-
-func TestDeleteVirtualService(t *testing.T) {
-
- ctx := context.Background()
- namespace := "testns"
-
- fooVS := &v1alpha32.VirtualService{
- ObjectMeta: metaV1.ObjectMeta{
- Name: "stage.test00.foo-vs",
- },
- Spec: v1alpha3.VirtualService{
- Hosts: []string{"stage.test00.foo", "stage.test00.bar"},
- },
+ rr.AdmiralCache = &AdmiralCache{
+ CnameDependentClusterCache: cache,
}
-
- validIstioClient := istioFake.NewSimpleClientset()
- validIstioClient.NetworkingV1alpha3().VirtualServices(namespace).Create(ctx, fooVS, metaV1.CreateOptions{})
-
- testcases := []struct {
- name string
- virtualService *v1alpha32.VirtualService
- rc *RemoteController
- expectedError error
- expectedDeletedVSName string
- }{
- {
- name: "Given virtualservice to delete, when nil VS is passed, the func should return an error",
- virtualService: nil,
- expectedError: fmt.Errorf("the VirtualService passed was nil"),
- },
- {
- name: "Given virtualservice to delete, when VS passed does not exists, the func should return an error",
- virtualService: &v1alpha32.VirtualService{ObjectMeta: metaV1.ObjectMeta{Name: "vs-does-not-exists"}},
- expectedError: fmt.Errorf("either VirtualService was already deleted, or it never existed"),
- rc: &RemoteController{
- VirtualServiceController: &istio.VirtualServiceController{
- IstioClient: validIstioClient,
- },
- },
- },
- {
- name: "Given virtualservice to delete, when VS exists, the func should delete the VS and not return any error",
- virtualService: fooVS,
- expectedError: nil,
- rc: &RemoteController{
- VirtualServiceController: &istio.VirtualServiceController{
- IstioClient: validIstioClient,
- },
- },
- expectedDeletedVSName: "stage.test00.foo-vs",
- },
+ for _, dependentCluster := range dependentClusters {
+ rr.AdmiralCache.CnameDependentClusterCache.Put(cname, dependentCluster, dependentCluster)
}
-
- for _, tc := range testcases {
- t.Run(tc.name, func(t *testing.T) {
-
- err := deleteVirtualService(ctx, tc.virtualService, namespace, tc.rc)
-
- if err != nil && tc.expectedError != nil {
- if !strings.Contains(err.Error(), tc.expectedError.Error()) {
- t.Errorf("expected %s, got %s", tc.expectedError.Error(), err.Error())
- }
- } else if err != tc.expectedError {
- t.Errorf("expected %v, got %v", tc.expectedError, err)
- }
-
- if err == nil && tc.expectedDeletedVSName != "" {
- _, err := tc.rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(namespace).Get(context.Background(), tc.expectedDeletedVSName, metaV1.GetOptions{})
- if err != nil && !k8sErrors.IsNotFound(err) {
- t.Errorf("test failed as VS should have been deleted. error: %v", err)
- }
- }
-
- })
+ for _, clusterName := range clusterNames {
+ rr.PutRemoteController(
+ clusterName,
+ remoteController,
+ )
}
+ return rr
}
From 2802eeaed3d97b715977aa02711f160e7b2dcd76 Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 10:39:36 -0400
Subject: [PATCH 149/235] copied
admiral/pkg/clusters/outlierdetection_handler.go changes from master
Signed-off-by: Shriram Sharma
---
.../pkg/clusters/outlierdetection_handler.go | 122 ++++++++++++++++++
1 file changed, 122 insertions(+)
create mode 100644 admiral/pkg/clusters/outlierdetection_handler.go
diff --git a/admiral/pkg/clusters/outlierdetection_handler.go b/admiral/pkg/clusters/outlierdetection_handler.go
new file mode 100644
index 00000000..395d31d9
--- /dev/null
+++ b/admiral/pkg/clusters/outlierdetection_handler.go
@@ -0,0 +1,122 @@
+package clusters
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+
+ v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ log "github.com/sirupsen/logrus"
+)
+
+type OutlierDetectionHandler struct {
+ RemoteRegistry *RemoteRegistry
+ ClusterID string
+}
+
+type OutlierDetectionCache interface {
+ GetFromIdentity(identity string, environment string) (*v1.OutlierDetection, error)
+ Put(od *v1.OutlierDetection) error
+ Delete(identity string, env string) error
+}
+
+type outlierDetectionCache struct {
+
+ //Map of OutlierDetection key=environment.identity, value:OutlierDetection
+ identityCache map[string]*v1.OutlierDetection
+ mutex *sync.Mutex
+}
+
+func (cache *outlierDetectionCache) GetFromIdentity(identity string, environment string) (*v1.OutlierDetection, error) {
+ cache.mutex.Lock()
+ defer cache.mutex.Unlock()
+ return cache.identityCache[common.ConstructKeyWithEnvAndIdentity(environment, identity)], nil
+}
+
+func (cache *outlierDetectionCache) Put(od *v1.OutlierDetection) error {
+ if od.Name == "" {
+ return errors.New("Cannot add an empty outlierdetection to the cache")
+ }
+
+ defer cache.mutex.Unlock()
+ cache.mutex.Lock()
+
+ identity := common.GetODIdentity(od)
+ env := common.GetODEnv(od)
+
+ log.Infof("Adding OutlierDetection with name=%s to OutlierDetectionCache. LabelMatch=%v env=%v", od.Name, identity, env)
+ key := common.ConstructKeyWithEnvAndIdentity(env, identity)
+ cache.identityCache[key] = od
+ return nil
+}
+
+func (cache *outlierDetectionCache) Delete(identity string, env string) error {
+ cache.mutex.Lock()
+ defer cache.mutex.Unlock()
+ key := common.ConstructKeyWithEnvAndIdentity(env, identity)
+ if _, ok := cache.identityCache[key]; ok {
+ log.Infof("Deleting OutlierDetection with key=%s from OutlierDetection cache.", key)
+ delete(cache.identityCache, key)
+ } else {
+ return fmt.Errorf("OutlierDetection with key %s not found in cache", key)
+ }
+ return nil
+}
+
+func (od OutlierDetectionHandler) Added(ctx context.Context, obj *v1.OutlierDetection) error {
+ log.Infof(LogFormat, common.Add, common.OutlierDetection, obj.Name, od.ClusterID, common.ReceivedStatus)
+ err := HandleEventForOutlierDetection(ctx, admiral.EventType(common.Add), obj, od.RemoteRegistry, od.ClusterID, modifyServiceEntryForNewServiceOrPod)
+ if err != nil {
+ return fmt.Errorf(LogErrFormat, common.Add, common.OutlierDetection, obj.Name, od.ClusterID, err.Error())
+ }
+ return nil
+}
+
+func (od OutlierDetectionHandler) Updated(ctx context.Context, obj *v1.OutlierDetection) error {
+ log.Infof(LogFormat, common.Update, common.OutlierDetection, obj.Name, od.ClusterID, common.ReceivedStatus)
+ err := HandleEventForOutlierDetection(ctx, admiral.Update, obj, od.RemoteRegistry, od.ClusterID, modifyServiceEntryForNewServiceOrPod)
+ if err != nil {
+ return fmt.Errorf(LogErrFormat, common.Update, common.OutlierDetection, obj.Name, od.ClusterID, err.Error())
+ }
+ return nil
+}
+
+func (od OutlierDetectionHandler) Deleted(ctx context.Context, obj *v1.OutlierDetection) error {
+ log.Infof(LogFormat, common.Delete, common.OutlierDetection, obj.Name, od.ClusterID, common.ReceivedStatus)
+ err := HandleEventForOutlierDetection(ctx, admiral.Update, obj, od.RemoteRegistry, od.ClusterID, modifyServiceEntryForNewServiceOrPod)
+ if err != nil {
+ return fmt.Errorf(LogErrFormat, common.Delete, common.OutlierDetection, obj.Name, od.ClusterID, err.Error())
+ }
+ return nil
+}
+
+func HandleEventForOutlierDetection(ctx context.Context, event admiral.EventType, od *v1.OutlierDetection, registry *RemoteRegistry,
+ clusterName string, modifySE ModifySEFunc) error {
+
+ identity := common.GetODIdentity(od)
+ if len(identity) <= 0 {
+ return fmt.Errorf(LogFormat, "Event", common.OutlierDetection, od.Name, clusterName, "Skipped as label "+common.GetAdmiralCRDIdentityLabel()+" was not found, namespace="+od.Namespace)
+ }
+
+ env := common.GetODEnv(od)
+ if len(env) <= 0 {
+ return fmt.Errorf(LogFormat, "Event", common.OutlierDetection, od.Name, clusterName, "Skipped as env "+env+" was not found, namespace="+od.Namespace)
+ }
+
+ ctx = context.WithValue(ctx, common.ClusterName, clusterName)
+ ctx = context.WithValue(ctx, common.EventResourceType, common.OutlierDetection)
+
+ _, err := modifySE(ctx, admiral.Update, env, identity, registry)
+
+ return err
+}
+
+func NewOutlierDetectionCache() *outlierDetectionCache {
+ odCache := &outlierDetectionCache{}
+ odCache.identityCache = make(map[string]*v1.OutlierDetection)
+ odCache.mutex = &sync.Mutex{}
+ return odCache
+}
From 4efa88a493bc8cc7f90a7ff65138b759a66d65e1 Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 10:40:55 -0400
Subject: [PATCH 150/235] copied
admiral/pkg/clusters/outlierdetection_handler_test.go changes from master
Signed-off-by: Shriram Sharma
---
.../clusters/outlierdetection_handler_test.go | 111 ++++++++++++++++++
1 file changed, 111 insertions(+)
create mode 100644 admiral/pkg/clusters/outlierdetection_handler_test.go
diff --git a/admiral/pkg/clusters/outlierdetection_handler_test.go b/admiral/pkg/clusters/outlierdetection_handler_test.go
new file mode 100644
index 00000000..e864ad35
--- /dev/null
+++ b/admiral/pkg/clusters/outlierdetection_handler_test.go
@@ -0,0 +1,111 @@
+package clusters
+
+import (
+ "context"
+ "errors"
+ "testing"
+
+ "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/model"
+ v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ "github.com/stretchr/testify/assert"
+ networkingAlpha3 "istio.io/api/networking/v1alpha3"
+ metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+func TestHandleEventForOutlierDetection(t *testing.T) {
+ ctx := context.Background()
+
+ admiralParamTest := common.AdmiralParams{
+ KubeconfigPath: "testdata/fake.config",
+ LabelSet: &common.LabelSet{
+ AdmiralCRDIdentityLabel: "assetAlias",
+ },
+ }
+
+ common.ResetSync()
+ registryTest, _ := InitAdmiral(ctx, admiralParamTest)
+
+ type args struct {
+ event admiral.EventType
+ od *v1.OutlierDetection
+ clusterName string
+ modifySE ModifySEFunc
+ }
+
+ odConfig := model.OutlierConfig{
+ BaseEjectionTime: 0,
+ ConsecutiveGatewayErrors: 0,
+ Interval: 0,
+ XXX_NoUnkeyedLiteral: struct{}{},
+ XXX_unrecognized: nil,
+ XXX_sizecache: 0,
+ }
+
+ od := v1.OutlierDetection{
+ TypeMeta: metaV1.TypeMeta{},
+ ObjectMeta: metaV1.ObjectMeta{},
+ Spec: model.OutlierDetection{
+ OutlierConfig: &odConfig,
+ Selector: map[string]string{"identity": "payments", "env": "e2e"},
+ XXX_NoUnkeyedLiteral: struct{}{},
+ XXX_unrecognized: nil,
+ XXX_sizecache: 0,
+ },
+ Status: v1.OutlierDetectionStatus{},
+ }
+
+ seFunc := func(ctx context.Context, event admiral.EventType, env string, sourceIdentity string, remoteRegistry *RemoteRegistry) (map[string]*networkingAlpha3.ServiceEntry, error) {
+ return nil, nil
+ }
+
+ testArg1 := args{
+ event: admiral.Add,
+ od: &v1.OutlierDetection{
+ Spec: od.Spec,
+ ObjectMeta: metaV1.ObjectMeta{Name: "od1", Namespace: "ns1", Labels: map[string]string{"assetAlias": "Intuit.devx.supercar", "identity": "id", "admiral.io/env": "stage"}},
+ TypeMeta: metaV1.TypeMeta{
+ Kind: "admiral.io/v1",
+ APIVersion: common.OutlierDetection,
+ },
+ },
+ clusterName: "test",
+ modifySE: seFunc,
+ }
+
+ testArg2 := args{
+ event: admiral.Add,
+ od: &v1.OutlierDetection{
+ Spec: od.Spec,
+ ObjectMeta: metaV1.ObjectMeta{Name: "od1", Namespace: "ns1", Labels: map[string]string{"foo": "bar"}},
+ TypeMeta: metaV1.TypeMeta{
+ Kind: "admiral.io/v1",
+ APIVersion: common.OutlierDetection,
+ },
+ },
+ clusterName: "test",
+ modifySE: seFunc,
+ }
+
+ errors.New("foo")
+
+ tests := []struct {
+ name string
+ args args
+ expErr error
+ }{
+ {"identity label missing", testArg2, errors.New("Skipped as label assetAlias was not found")},
+ {"happy path", testArg1, nil},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := HandleEventForOutlierDetection(ctx, tt.args.event, tt.args.od, registryTest, tt.args.clusterName, tt.args.modifySE)
+ if tt.expErr != nil {
+ assert.Contains(t, err.Error(), tt.expErr.Error())
+ } else {
+ assert.Nil(t, err, "Not expecting error")
+ }
+ })
+ }
+}
From e9652d6e786c7064ca29f4091ea2ca1e38c3db70 Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 10:43:46 -0400
Subject: [PATCH 151/235] copied admiral/pkg/clusters/registry.go changes from
master
Signed-off-by: Shriram Sharma
---
admiral/pkg/clusters/registry.go | 301 ++++++++++++++++++-------------
1 file changed, 171 insertions(+), 130 deletions(-)
diff --git a/admiral/pkg/clusters/registry.go b/admiral/pkg/clusters/registry.go
index 8cc2315b..c57a7bef 100644
--- a/admiral/pkg/clusters/registry.go
+++ b/admiral/pkg/clusters/registry.go
@@ -6,239 +6,280 @@ import (
"os"
"time"
- "github.com/istio-ecosystem/admiral/admiral/pkg/controller/istio"
- "k8s.io/client-go/rest"
-
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/istio"
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/secret"
- log "github.com/sirupsen/logrus"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/util"
+ commonUtil "github.com/istio-ecosystem/admiral/admiral/pkg/util"
+ "k8s.io/client-go/rest"
+
+ "github.com/sirupsen/logrus"
)
const (
- LogFormat = "op=%s type=%v name=%v cluster=%s message=%s"
- LogErrFormat = "op=%s type=%v name=%v cluster=%s, e=%v"
+ LogFormat = "op=%v type=%v name=%v cluster=%s message=%v"
+ LogFormatAdv = "op=%v type=%v name=%v namespace=%s cluster=%s message=%v"
+ LogFormatNew = "op=%v type=%v name=%v namespace=%s identity=%s cluster=%s message=%v"
+ LogFormatOperationTime = "op=%v type=%v name=%v namespace=%s cluster=%s message=%v"
+ LogErrFormat = "op=%v type=%v name=%v cluster=%v error=%v"
+ AlertLogMsg = "type assertion failed, %v is not of type string"
+ AssertionLogMsg = "type assertion failed, %v is not of type *RemoteRegistry"
)
func InitAdmiral(ctx context.Context, params common.AdmiralParams) (*RemoteRegistry, error) {
-
- log.Infof("Initializing Admiral with params: %v", params)
-
+ ctxLogger := logrus.WithFields(logrus.Fields{})
+ logrus.Infof("Initializing Admiral with params: %v", params)
common.InitializeConfig(params)
- CurrentAdmiralState = AdmiralState{ReadOnly: ReadOnlyEnabled, IsStateInitialized: StateNotInitialized}
- startAdmiralStateChecker(ctx, params)
- pauseForAdmiralToInitializeState()
-
+ //init admiral state
+ commonUtil.CurrentAdmiralState = commonUtil.AdmiralState{ReadOnly: ReadOnlyEnabled, IsStateInitialized: StateNotInitialized}
+ // start admiral state checker for DR
+ drStateChecker := initAdmiralStateChecker(ctx, params.AdmiralStateCheckerName, params.DRStateStoreConfigPath)
rr := NewRemoteRegistry(ctx, params)
+ ctx = context.WithValue(ctx, "remoteRegistry", rr)
+ RunAdmiralStateCheck(ctx, params.AdmiralStateCheckerName, drStateChecker)
+ pauseForAdmiralToInitializeState()
+ var err error
+ destinationServiceProcessor := &ProcessDestinationService{}
wd := DependencyHandler{
- RemoteRegistry: rr,
+ RemoteRegistry: rr,
+ DestinationServiceProcessor: destinationServiceProcessor,
}
- var err error
- wd.DepController, err = admiral.NewDependencyController(ctx.Done(), &wd, params.KubeconfigPath, params.DependenciesNamespace, params.CacheRefreshDuration)
+ wd.DepController, err = admiral.NewDependencyController(ctx.Done(), &wd, params.KubeconfigPath, params.DependenciesNamespace, 0, rr.ClientLoader)
if err != nil {
return nil, fmt.Errorf("error with dependency controller init: %v", err)
}
- dependencyProxyHandler := DependencyProxyHandler{
- RemoteRegistry: rr,
- dependencyProxyDefaultHostNameGenerator: &dependencyProxyDefaultHostNameGenerator{},
+ if !params.ArgoRolloutsEnabled {
+ logrus.Info("argo rollouts disabled")
}
- dependencyProxyHandler.DepController, err = admiral.NewDependencyProxyController(ctx.Done(), &dependencyProxyHandler, params.KubeconfigPath, params.DependenciesNamespace, params.CacheRefreshDuration)
+ configMapController, err := admiral.NewConfigMapController(params.ServiceEntryIPPrefix, rr.ClientLoader)
if err != nil {
- return nil, fmt.Errorf("error with dependencyproxy controller %w", err)
+ return nil, fmt.Errorf("error with configmap controller init: %v", err)
}
- if !params.ArgoRolloutsEnabled {
- log.Info("argo rollouts disabled")
- }
+ rr.AdmiralCache.ConfigMapController = configMapController
+ loadServiceEntryCacheData(ctxLogger, ctx, rr.AdmiralCache.ConfigMapController, rr.AdmiralCache)
+
+ err = InitAdmiralWithDefaultPersona(ctx, params, rr)
- configMapController, err := admiral.NewConfigMapController(params.ServiceEntryIPPrefix)
if err != nil {
- return nil, fmt.Errorf("error with configmap controller init: %v", err)
+ return nil, err
}
- rr.AdmiralCache.ConfigMapController = configMapController
- loadServiceEntryCacheData(ctx, rr.AdmiralCache.ConfigMapController, rr.AdmiralCache)
- err = createSecretController(ctx, rr)
+ go rr.shutdown()
+
+ return rr, err
+}
+
+func InitAdmiralHA(ctx context.Context, params common.AdmiralParams) (*RemoteRegistry, error) {
+ var (
+ err error
+ rr *RemoteRegistry
+ )
+ logrus.Infof("Initializing Admiral HA with params: %v", params)
+ common.InitializeConfig(params)
+ if common.GetHAMode() == common.HAController {
+ rr = NewRemoteRegistryForHAController(ctx)
+ } else {
+ return nil, fmt.Errorf("admiral HA only supports %s mode", common.HAController)
+ }
+ destinationServiceProcessor := &ProcessDestinationService{}
+ rr.DependencyController, err = admiral.NewDependencyController(
+ ctx.Done(),
+ &DependencyHandler{
+ RemoteRegistry: rr,
+ DestinationServiceProcessor: destinationServiceProcessor,
+ },
+ params.KubeconfigPath,
+ params.DependenciesNamespace,
+ params.CacheReconcileDuration,
+ rr.ClientLoader)
if err != nil {
- return nil, fmt.Errorf("error with secret control init: %v", err)
+ return nil, fmt.Errorf("error with DependencyController initialization: %v", err)
}
+ err = InitAdmiralWithDefaultPersona(ctx, params, rr)
go rr.shutdown()
+ return rr, err
+}
- return rr, nil
+func InitAdmiralWithDefaultPersona(ctx context.Context, params common.AdmiralParams, w *RemoteRegistry) error {
+ logrus.Infof("Initializing Default Persona of Admiral")
+
+ err := createSecretController(ctx, w)
+ if err != nil {
+ return fmt.Errorf("error with secret control init: %v", err)
+ }
+ return nil
}
func pauseForAdmiralToInitializeState() {
// Sleep until Admiral determines state. This is done to make sure events are not skipped during startup while determining READ-WRITE state
start := time.Now()
- log.Info("Pausing thread to let Admiral determine it's READ-WRITE state. This is to let Admiral determine it's state during startup")
+ logrus.Info("Pausing thread to let Admiral determine it's READ-WRITE state. This is to let Admiral determine it's state during startup")
for {
- if CurrentAdmiralState.IsStateInitialized {
- log.Infof("Time taken for Admiral to complete state initialization =%v ms", time.Since(start).Milliseconds())
+ if commonUtil.CurrentAdmiralState.IsStateInitialized {
+ logrus.Infof("Time taken for Admiral to complete state initialization =%v ms", time.Since(start).Milliseconds())
break
}
if time.Since(start).Milliseconds() > 60000 {
- log.Error("Admiral not initialized after 60 seconds. Exiting now!!")
+ logrus.Error("Admiral not initialized after 60 seconds. Exiting now!!")
os.Exit(-1)
}
- log.Debug("Admiral is waiting to determine state before proceeding with boot up")
+ logrus.Debug("Admiral is waiting to determine state before proceeding with boot up")
time.Sleep(100 * time.Millisecond)
}
}
func createSecretController(ctx context.Context, w *RemoteRegistry) error {
- var err error
- var controller *secret.Controller
-
- w.secretClient, err = admiral.K8sClientFromPath(common.GetKubeconfigPath())
+ var (
+ err error
+ controller *secret.Controller
+ )
+ w.secretClient, err = w.ClientLoader.LoadKubeClientFromPath(common.GetKubeconfigPath())
if err != nil {
return fmt.Errorf("could not create K8s client: %v", err)
}
-
- controller, err = secret.StartSecretController(ctx, w.secretClient,
+ controller, err = secret.StartSecretController(
+ ctx,
+ w.secretClient,
w.createCacheController,
w.updateCacheController,
w.deleteCacheController,
common.GetClusterRegistriesNamespace(),
- common.GetSecretResolver())
-
+ common.GetAdmiralProfile(), common.GetAdmiralConfigPath())
if err != nil {
return fmt.Errorf("could not start secret controller: %v", err)
}
-
w.SecretController = controller
-
return nil
}
-func (r *RemoteRegistry) createCacheController(clientConfig *rest.Config, clusterID string, resyncPeriod time.Duration) error {
-
- stop := make(chan struct{})
-
- rc := RemoteController{
- stop: stop,
- ClusterID: clusterID,
- ApiServer: clientConfig.Host,
- StartTime: time.Now(),
- }
-
- var err error
-
- log.Infof("starting service controller clusterID: %v", clusterID)
- rc.ServiceController, err = admiral.NewServiceController(clusterID, stop, &ServiceHandler{RemoteRegistry: r, ClusterID: clusterID}, clientConfig, 0)
-
- if err != nil {
- return fmt.Errorf("error with ServiceController controller init: %v", err)
- }
-
- log.Infof("starting global traffic policy controller custerID: %v", clusterID)
-
- rc.GlobalTraffic, err = admiral.NewGlobalTrafficController(clusterID, stop, &GlobalTrafficHandler{RemoteRegistry: r, ClusterID: clusterID}, clientConfig, 0)
-
- if err != nil {
- return fmt.Errorf("error with GlobalTrafficController controller init: %v", err)
- }
-
- log.Infof("starting node controller clusterID: %v", clusterID)
- rc.NodeController, err = admiral.NewNodeController(clusterID, stop, &NodeHandler{RemoteRegistry: r, ClusterID: clusterID}, clientConfig)
-
- if err != nil {
- return fmt.Errorf("error with NodeController controller init: %v", err)
- }
+func (r *RemoteRegistry) createCacheController(clientConfig *rest.Config, clusterID string, resyncPeriod util.ResyncIntervals) error {
+ var (
+ err error
+ stop = make(chan struct{})
+ rc = RemoteController{
+ stop: stop,
+ ClusterID: clusterID,
+ ApiServer: clientConfig.Host,
+ StartTime: time.Now(),
+ }
+ )
+ if common.GetHAMode() != common.HAController {
+ logrus.Infof("starting ServiceController clusterID: %v", clusterID)
+ rc.ServiceController, err = admiral.NewServiceController(stop, &ServiceHandler{RemoteRegistry: r, ClusterID: clusterID}, clientConfig, 0, r.ClientLoader)
+ if err != nil {
+ return fmt.Errorf("error with ServiceController initialization, err: %v", err)
+ }
- log.Infof("starting service entry controller for custerID: %v", clusterID)
- rc.ServiceEntryController, err = istio.NewServiceEntryController(clusterID, stop, &ServiceEntryHandler{RemoteRegistry: r, ClusterID: clusterID}, clientConfig, 0)
+ if common.IsClientConnectionConfigProcessingEnabled() {
+ logrus.Infof("starting ClientConnectionsConfigController clusterID: %v", clusterID)
+ rc.ClientConnectionConfigController, err = admiral.NewClientConnectionConfigController(
+ stop, &ClientConnectionConfigHandler{RemoteRegistry: r, ClusterID: clusterID}, clientConfig, 0, r.ClientLoader)
+ if err != nil {
+ return fmt.Errorf("error with ClientConnectionsConfigController initialization, err: %v", err)
+ }
+ } else {
+ logrus.Infof("ClientConnectionsConfigController processing is disabled")
+ }
- if err != nil {
- return fmt.Errorf("error with ServiceEntryController init: %v", err)
- }
+ logrus.Infof("starting GlobalTrafficController clusterID: %v", clusterID)
+ rc.GlobalTraffic, err = admiral.NewGlobalTrafficController(stop, &GlobalTrafficHandler{RemoteRegistry: r, ClusterID: clusterID}, clientConfig, 0, r.ClientLoader)
+ if err != nil {
+ return fmt.Errorf("error with GlobalTrafficController initialization, err: %v", err)
+ }
- log.Infof("starting destination rule controller for custerID: %v", clusterID)
- rc.DestinationRuleController, err = istio.NewDestinationRuleController(clusterID, stop, &DestinationRuleHandler{RemoteRegistry: r, ClusterID: clusterID}, clientConfig, 0)
+ logrus.Infof("starting OutlierDetectionController clusterID : %v", clusterID)
+ rc.OutlierDetectionController, err = admiral.NewOutlierDetectionController(stop, &OutlierDetectionHandler{RemoteRegistry: r, ClusterID: clusterID}, clientConfig, 0, r.ClientLoader)
+ if err != nil {
+ return fmt.Errorf("error with OutlierDetectionController initialization, err: %v", err)
+ }
- if err != nil {
- return fmt.Errorf("error with DestinationRuleController init: %v", err)
- }
+ logrus.Infof("starting NodeController clusterID: %v", clusterID)
+ rc.NodeController, err = admiral.NewNodeController(stop, &NodeHandler{RemoteRegistry: r, ClusterID: clusterID}, clientConfig, r.ClientLoader)
+ if err != nil {
+ return fmt.Errorf("error with NodeController controller initialization, err: %v", err)
+ }
+ logrus.Infof("starting ServiceEntryController for clusterID: %v", clusterID)
+ rc.ServiceEntryController, err = istio.NewServiceEntryController(stop, &ServiceEntryHandler{RemoteRegistry: r, ClusterID: clusterID}, clusterID, clientConfig, resyncPeriod.SeAndDrReconcileInterval, r.ClientLoader)
+ if err != nil {
+ return fmt.Errorf("error with ServiceEntryController initialization, err: %v", err)
+ }
- log.Infof("starting virtual service controller for custerID: %v", clusterID)
- rc.VirtualServiceController, err = istio.NewVirtualServiceController(clusterID, stop, &VirtualServiceHandler{RemoteRegistry: r, ClusterID: clusterID}, clientConfig, 0)
+ logrus.Infof("starting DestinationRuleController for clusterID: %v", clusterID)
+ rc.DestinationRuleController, err = istio.NewDestinationRuleController(stop, &DestinationRuleHandler{RemoteRegistry: r, ClusterID: clusterID}, clusterID, clientConfig, resyncPeriod.SeAndDrReconcileInterval, r.ClientLoader)
+ if err != nil {
+ return fmt.Errorf("error with DestinationRuleController initialization, err: %v", err)
+ }
- if err != nil {
- return fmt.Errorf("error with VirtualServiceController init: %v", err)
- }
+ logrus.Infof("starting VirtualServiceController for clusterID: %v", clusterID)
+ virtualServiceHandler, err := NewVirtualServiceHandler(r, clusterID)
+ if err != nil {
+ return fmt.Errorf("error initializing VirtualServiceHandler: %v", err)
+ }
+ rc.VirtualServiceController, err = istio.NewVirtualServiceController(stop, virtualServiceHandler, clientConfig, 0, r.ClientLoader)
+ if err != nil {
+ return fmt.Errorf("error with VirtualServiceController initialization, err: %v", err)
+ }
- rc.SidecarController, err = istio.NewSidecarController(clusterID, stop, &SidecarHandler{RemoteRegistry: r, ClusterID: clusterID}, clientConfig, 0)
+ logrus.Infof("starting SidecarController for clusterID: %v", clusterID)
+ rc.SidecarController, err = istio.NewSidecarController(stop, &SidecarHandler{RemoteRegistry: r, ClusterID: clusterID}, clientConfig, 0, r.ClientLoader)
+ if err != nil {
+ return fmt.Errorf("error with SidecarController initialization, err: %v", err)
+ }
- if err != nil {
- return fmt.Errorf("error with DestinationRuleController init: %v", err)
+ logrus.Infof("starting RoutingPoliciesController for clusterID: %v", clusterID)
+ rc.RoutingPolicyController, err = admiral.NewRoutingPoliciesController(stop, &RoutingPolicyHandler{RemoteRegistry: r, ClusterID: clusterID}, clientConfig, 0, r.ClientLoader)
+ if err != nil {
+ return fmt.Errorf("error with RoutingPoliciesController initialization, err: %v", err)
+ }
}
-
- log.Infof("starting deployment controller clusterID: %v", clusterID)
- rc.DeploymentController, err = admiral.NewDeploymentController(clusterID, stop, &DeploymentHandler{RemoteRegistry: r, ClusterID: clusterID}, clientConfig, resyncPeriod)
-
+ logrus.Infof("starting DeploymentController for clusterID: %v", clusterID)
+ rc.DeploymentController, err = admiral.NewDeploymentController(stop, &DeploymentHandler{RemoteRegistry: r, ClusterID: clusterID}, clientConfig, resyncPeriod.UniversalReconcileInterval, r.ClientLoader)
if err != nil {
- return fmt.Errorf("error with DeploymentController controller init: %v", err)
+ return fmt.Errorf("error with DeploymentController initialization, err: %v", err)
}
-
+ logrus.Infof("starting RolloutController clusterID: %v", clusterID)
if r.AdmiralCache == nil {
- log.Warn("admiral cache was nil!")
+ logrus.Warn("admiral cache was nil!")
} else if r.AdmiralCache.argoRolloutsEnabled {
- log.Infof("starting rollout controller clusterID: %v", clusterID)
- rc.RolloutController, err = admiral.NewRolloutsController(clusterID, stop, &RolloutHandler{RemoteRegistry: r, ClusterID: clusterID}, clientConfig, resyncPeriod)
-
+ rc.RolloutController, err = admiral.NewRolloutsController(stop, &RolloutHandler{RemoteRegistry: r, ClusterID: clusterID}, clientConfig, resyncPeriod.UniversalReconcileInterval, r.ClientLoader)
if err != nil {
- return fmt.Errorf("error with Rollout controller init: %v", err)
+ return fmt.Errorf("error with RolloutController initialization, err: %v", err)
}
}
-
- log.Infof("starting Routing Policies controller for custerID: %v", clusterID)
- rc.RoutingPolicyController, err = admiral.NewRoutingPoliciesController(stop, &RoutingPolicyHandler{RemoteRegistry: r, ClusterID: clusterID}, clientConfig, 1*time.Minute)
-
- if err != nil {
- return fmt.Errorf("error with virtualServiceController init: %v", err)
- }
-
r.PutRemoteController(clusterID, &rc)
-
- log.Infof("Create Controller %s", clusterID)
-
return nil
}
-func (r *RemoteRegistry) updateCacheController(clientConfig *rest.Config, clusterID string, resyncPeriod time.Duration) error {
+func (r *RemoteRegistry) updateCacheController(clientConfig *rest.Config, clusterID string, resyncPeriod util.ResyncIntervals) error {
//We want to refresh the cache controllers. But the current approach is parking the goroutines used in the previous set of controllers, leading to a rather large memory leak.
//This is a temporary fix to only do the controller refresh if the API Server of the remote cluster has changed
//The refresh will still park goroutines and still increase memory usage. But it will be a *much* slower leak. Filed https://github.com/istio-ecosystem/admiral/issues/122 for that.
controller := r.GetRemoteController(clusterID)
-
if clientConfig.Host != controller.ApiServer {
- log.Infof("Client mismatch, recreating cache controllers for cluster=%v", clusterID)
-
+ logrus.Infof("Client mismatch, recreating cache controllers for cluster=%v", clusterID)
if err := r.deleteCacheController(clusterID); err != nil {
return err
}
return r.createCacheController(clientConfig, clusterID, resyncPeriod)
-
}
return nil
}
func (r *RemoteRegistry) deleteCacheController(clusterID string) error {
-
controller := r.GetRemoteController(clusterID)
-
if controller != nil {
close(controller.stop)
}
-
r.DeleteRemoteController(clusterID)
-
- log.Infof(LogFormat, "Delete", "remote-controller", clusterID, clusterID, "success")
+ logrus.Infof(LogFormat, "Delete", "remote-controller", clusterID, clusterID, "success")
return nil
}
From 54055d305f2474283f31f54d79fbafa366a03421 Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 10:45:24 -0400
Subject: [PATCH 152/235] copied admiral/pkg/clusters/registry_test.go changes
from master
Signed-off-by: Shriram Sharma
---
admiral/pkg/clusters/registry_test.go | 164 ++++++++++++++++++--------
1 file changed, 116 insertions(+), 48 deletions(-)
diff --git a/admiral/pkg/clusters/registry_test.go b/admiral/pkg/clusters/registry_test.go
index 92f50d53..823946d5 100644
--- a/admiral/pkg/clusters/registry_test.go
+++ b/admiral/pkg/clusters/registry_test.go
@@ -2,6 +2,7 @@ package clusters
import (
"context"
+ "fmt"
"strings"
"sync"
"testing"
@@ -9,10 +10,12 @@ import (
"github.com/google/go-cmp/cmp"
depModel "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/model"
- v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
+ v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/client/loader"
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
"github.com/istio-ecosystem/admiral/admiral/pkg/test"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/util"
"github.com/sirupsen/logrus"
logTest "github.com/sirupsen/logrus/hooks/test"
networking "istio.io/api/networking/v1alpha3"
@@ -29,24 +32,25 @@ var registryTestSingleton sync.Once
func admiralParamsForRegistryTests() common.AdmiralParams {
return common.AdmiralParams{
LabelSet: &common.LabelSet{
- WorkloadIdentityKey: "identity",
- GlobalTrafficDeploymentLabel: "identity",
- PriorityKey: "priority",
- EnvKey: "admiral.io/env",
+ WorkloadIdentityKey: "identity",
+ AdmiralCRDIdentityLabel: "identity",
+ PriorityKey: "priority",
+ EnvKey: "admiral.io/env",
},
- KubeconfigPath: "testdata/fake.config",
- EnableSAN: true,
- SANPrefix: "prefix",
- HostnameSuffix: "mesh",
- SyncNamespace: "ns",
- CacheRefreshDuration: time.Minute,
- ClusterRegistriesNamespace: "default",
- DependenciesNamespace: "default",
- SecretResolver: "",
- WorkloadSidecarUpdate: "enabled",
- WorkloadSidecarName: "default",
- EnableRoutingPolicy: true,
- EnvoyFilterVersion: "1.13",
+ KubeconfigPath: "testdata/fake.config",
+ EnableSAN: true,
+ SANPrefix: "prefix",
+ HostnameSuffix: "mesh",
+ SyncNamespace: "ns",
+ CacheReconcileDuration: 1 * time.Minute,
+ SeAndDrCacheReconcileDuration: 1 * time.Minute,
+ ClusterRegistriesNamespace: "default",
+ DependenciesNamespace: "default",
+ WorkloadSidecarUpdate: "enabled",
+ WorkloadSidecarName: "default",
+ EnableRoutingPolicy: true,
+ EnvoyFilterVersion: "1.13",
+ Profile: common.AdmiralProfileDefault,
}
}
@@ -68,12 +72,12 @@ func TestDeleteCacheControllerThatDoesntExist(t *testing.T) {
func TestDeleteCacheController(t *testing.T) {
setupForRegistryTests()
- w := NewRemoteRegistry(nil, common.AdmiralParams{})
+ w := NewRemoteRegistry(context.TODO(), common.AdmiralParams{})
r := rest.Config{
Host: "test.com",
}
cluster := "test.cluster"
- w.createCacheController(&r, cluster, time.Second*time.Duration(300))
+ w.createCacheController(&r, cluster, util.ResyncIntervals{UniversalReconcileInterval: 300 * time.Second, SeAndDrReconcileInterval: 300 * time.Second})
rc := w.GetRemoteController(cluster)
if rc == nil {
@@ -142,23 +146,23 @@ func createMockRemoteController(f func(interface{})) (*RemoteController, error)
Host: "localhost",
}
stop := make(chan struct{})
- d, err := admiral.NewDeploymentController("", stop, &test.MockDeploymentHandler{}, &config, time.Second*time.Duration(300))
+ d, err := admiral.NewDeploymentController(stop, &test.MockDeploymentHandler{}, &config, time.Second*time.Duration(300), loader.GetFakeClientLoader())
if err != nil {
return nil, err
}
- s, err := admiral.NewServiceController("test", stop, &test.MockServiceHandler{}, &config, time.Second*time.Duration(300))
+ s, err := admiral.NewServiceController(stop, &test.MockServiceHandler{}, &config, time.Second*time.Duration(300), loader.GetFakeClientLoader())
if err != nil {
return nil, err
}
- n, err := admiral.NewNodeController("", stop, &test.MockNodeHandler{}, &config)
+ n, err := admiral.NewNodeController(stop, &test.MockNodeHandler{}, &config, loader.GetFakeClientLoader())
if err != nil {
return nil, err
}
- r, err := admiral.NewRolloutsController("test", stop, &test.MockRolloutHandler{}, &config, time.Second*time.Duration(300))
+ r, err := admiral.NewRolloutsController(stop, &test.MockRolloutHandler{}, &config, time.Second*time.Duration(300), loader.GetFakeClientLoader())
if err != nil {
return nil, err
}
- rpc, err := admiral.NewRoutingPoliciesController(stop, &test.MockRoutingPolicyHandler{}, &config, time.Second*time.Duration(300))
+ rpc, err := admiral.NewRoutingPoliciesController(stop, &test.MockRoutingPolicyHandler{}, &config, time.Second*time.Duration(300), loader.GetFakeClientLoader())
if err != nil {
return nil, err
}
@@ -167,6 +171,7 @@ func createMockRemoteController(f func(interface{})) (*RemoteController, error)
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "test",
+ Labels: map[string]string{"sidecar.istio.io/inject": "true", "identity": "bar", "env": "dev"},
},
Spec: k8sAppsV1.DeploymentSpec{
Selector: &metav1.LabelSelector{
@@ -174,7 +179,8 @@ func createMockRemoteController(f func(interface{})) (*RemoteController, error)
},
Template: k8sCoreV1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
- Labels: map[string]string{"identity": "bar", "istio-injected": "true", "env": "dev"},
+ Annotations: map[string]string{"sidecar.istio.io/inject": "true"},
+ Labels: map[string]string{"identity": "bar", "istio-injected": "true", "env": "dev"},
},
},
},
@@ -215,7 +221,7 @@ func TestCreateSecretController(t *testing.T) {
common.SetKubeconfigPath("fail")
- err = createSecretController(context.Background(), NewRemoteRegistry(nil, common.AdmiralParams{}))
+ err = createSecretController(context.Background(), NewRemoteRegistry(context.TODO(), common.AdmiralParams{}))
common.SetKubeconfigPath("testdata/fake.config")
@@ -257,15 +263,16 @@ func TestAdded(t *testing.T) {
t.Fail()
})
rr.PutRemoteController("test.cluster", rc)
- d, e := admiral.NewDependencyController(make(chan struct{}), &test.MockDependencyHandler{}, p.KubeconfigPath, "dep-ns", time.Second*time.Duration(300))
+ d, e := admiral.NewDependencyController(make(chan struct{}), &test.MockDependencyHandler{}, p.KubeconfigPath, "dep-ns", time.Second*time.Duration(300), loader.GetFakeClientLoader())
if e != nil {
t.Fail()
}
dh := DependencyHandler{
- RemoteRegistry: rr,
- DepController: d,
+ RemoteRegistry: rr,
+ DepController: d,
+ DestinationServiceProcessor: &MockDestinationServiceProcessor{},
}
depData := v1.Dependency{
@@ -284,23 +291,9 @@ func TestAdded(t *testing.T) {
func TestGetServiceForDeployment(t *testing.T) {
setupForRegistryTests()
baseRc, _ := createMockRemoteController(func(i interface{}) {
- //res := i.(istio.Config)
- //se, ok := res.Spec.(*v1alpha3.ServiceEntry)
- //if ok {
- // if se.Hosts[0] != "dev.bar.global" {
- // t.Errorf("Host mismatch. Expected dev.bar.global, got %v", se.Hosts[0])
- // }
- //}
})
rcWithService, _ := createMockRemoteController(func(i interface{}) {
- //res := i.(istio.Config)
- //se, ok := res.Spec.(*networking.ServiceEntry)
- //if ok {
- // if se.Hosts[0] != "dev.bar.global" {
- // t.Errorf("Host mismatch. Expected dev.bar.global, got %v", se.Hosts[0])
- // }
- //}
})
service := k8sCoreV1.Service{}
@@ -354,7 +347,7 @@ func TestGetServiceForDeployment(t *testing.T) {
//Run the test for every provided case
for _, c := range testCases {
t.Run(c.name, func(t *testing.T) {
- resultingService := getServiceForDeployment(c.controller, c.deployment)
+ resultingService, _ := getServiceForDeployment(c.controller, c.deployment)
if resultingService == nil && c.expectedService == nil {
//perfect
} else {
@@ -370,9 +363,14 @@ func TestGetServiceForDeployment(t *testing.T) {
func TestUpdateCacheController(t *testing.T) {
setupForRegistryTests()
p := common.AdmiralParams{
- KubeconfigPath: "testdata/fake.config",
+ KubeconfigPath: "testdata/fake.config",
+ CacheReconcileDuration: 300 * time.Second,
+ SeAndDrCacheReconcileDuration: 150 * time.Second,
}
originalConfig, err := clientcmd.BuildConfigFromFlags("", "testdata/fake.config")
+ if err != nil {
+ t.Fatalf("unexpected error when building client with testdata/fake.config, err: %v", err)
+ }
changedConfig, err := clientcmd.BuildConfigFromFlags("", "testdata/fake_2.config")
if err != nil {
t.Fatalf("Unexpected error getting client %v", err)
@@ -415,13 +413,13 @@ func TestUpdateCacheController(t *testing.T) {
t.Run(c.name, func(t *testing.T) {
hook := logTest.NewGlobal()
rr.GetRemoteController(c.clusterId).ApiServer = c.oldConfig.Host
- d, err := admiral.NewDeploymentController("", make(chan struct{}), &test.MockDeploymentHandler{}, c.oldConfig, time.Second*time.Duration(300))
+ d, err := admiral.NewDeploymentController(make(chan struct{}), &test.MockDeploymentHandler{}, c.oldConfig, time.Second*time.Duration(300), loader.GetFakeClientLoader())
if err != nil {
t.Fatalf("Unexpected error creating controller %v", err)
}
rc.DeploymentController = d
- err = rr.updateCacheController(c.newConfig, c.clusterId, time.Second*time.Duration(300))
+ err = rr.updateCacheController(c.newConfig, c.clusterId, common.GetResyncIntervals())
if err != nil {
t.Fatalf("Unexpected error doing update %v", err)
}
@@ -447,3 +445,73 @@ func checkIfLogged(entries []*logrus.Entry, phrase string) bool {
}
return false
}
+
+func TestInitAdmiralHA(t *testing.T) {
+ var (
+ ctx = context.TODO()
+ dummyKubeConfig = "./testdata/fake.config"
+ dependencyNamespace = "dependency-ns"
+ )
+ testCases := []struct {
+ name string
+ params common.AdmiralParams
+ assertFunc func(rr *RemoteRegistry, t *testing.T)
+ expectedErr error
+ }{
+ {
+ name: "Given Admiral is running in HA mode for database builder, " +
+ "When InitAdmiralHA is invoked with correct parameters, " +
+ "Then, it should return RemoteRegistry with 3 controllers - DependencyController, " +
+ "DeploymentController, and RolloutController",
+ params: common.AdmiralParams{
+ HAMode: common.HAController,
+ KubeconfigPath: dummyKubeConfig,
+ DependenciesNamespace: dependencyNamespace,
+ },
+ assertFunc: func(rr *RemoteRegistry, t *testing.T) {
+ if rr == nil {
+ t.Error("expected RemoteRegistry to be initialized, but got nil")
+ }
+ // check if it has DependencyController initialized
+ if rr != nil && rr.DependencyController == nil {
+ t.Error("expected DependencyController to be initialized, but it was not")
+ }
+ },
+ expectedErr: nil,
+ },
+ {
+ name: "Given Admiral is running in HA mode for database builder, " +
+ "When InitAdmiralHA is invoked with invalid HAMode parameter, " +
+ "Then InitAdmiralHA should return an expected error",
+ params: common.AdmiralParams{
+ KubeconfigPath: dummyKubeConfig,
+ DependenciesNamespace: dependencyNamespace,
+ },
+ assertFunc: func(rr *RemoteRegistry, t *testing.T) {
+ if rr != nil {
+ t.Error("expected RemoteRegistry to be uninitialized")
+ }
+ },
+ expectedErr: fmt.Errorf("admiral HA only supports %s mode", common.HAController),
+ },
+ }
+
+ for _, c := range testCases {
+ t.Run(c.name, func(t *testing.T) {
+ common.ResetSync()
+ rr, err := InitAdmiralHA(ctx, c.params)
+ if c.expectedErr == nil && err != nil {
+ t.Errorf("expected: nil, got: %v", err)
+ }
+ if c.expectedErr != nil {
+ if err == nil {
+ t.Errorf("expected: %v, got: %v", c.expectedErr, err)
+ }
+ if err != nil && c.expectedErr.Error() != err.Error() {
+ t.Errorf("expected: %v, got: %v", c.expectedErr, err)
+ }
+ }
+ c.assertFunc(rr, t)
+ })
+ }
+}
From e92990f5a595e389b293dea2654473b28bde48c2 Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 10:47:04 -0400
Subject: [PATCH 153/235] copied admiral/pkg/clusters/rollout_handler.go
changes from master
Signed-off-by: Shriram Sharma
---
admiral/pkg/clusters/rollout_handler.go | 74 +++++++++++++++++++++++++
1 file changed, 74 insertions(+)
create mode 100644 admiral/pkg/clusters/rollout_handler.go
diff --git a/admiral/pkg/clusters/rollout_handler.go b/admiral/pkg/clusters/rollout_handler.go
new file mode 100644
index 00000000..13adf06d
--- /dev/null
+++ b/admiral/pkg/clusters/rollout_handler.go
@@ -0,0 +1,74 @@
+package clusters
+
+import (
+ "context"
+ "fmt"
+
+ argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ log "github.com/sirupsen/logrus"
+)
+
+type RolloutHandler struct {
+ RemoteRegistry *RemoteRegistry
+ ClusterID string
+}
+
+func (rh *RolloutHandler) Added(ctx context.Context, obj *argo.Rollout) error {
+ err := HandleEventForRollout(ctx, admiral.Add, obj, rh.RemoteRegistry, rh.ClusterID)
+ if err != nil {
+ return fmt.Errorf(LogErrFormat, common.Add, common.RolloutResourceType, obj.Name, rh.ClusterID, err)
+ }
+ return err
+}
+
+func (rh *RolloutHandler) Updated(ctx context.Context, obj *argo.Rollout) error {
+ log.Infof(LogFormat, common.Update, common.RolloutResourceType, obj.Name, rh.ClusterID, common.ReceivedStatus)
+ return nil
+}
+
+func (rh *RolloutHandler) Deleted(ctx context.Context, obj *argo.Rollout) error {
+ err := HandleEventForRollout(ctx, admiral.Delete, obj, rh.RemoteRegistry, rh.ClusterID)
+ if err != nil {
+ return fmt.Errorf(LogErrFormat, common.Delete, common.RolloutResourceType, obj.Name, rh.ClusterID, err)
+ }
+ return err
+}
+
+type HandleEventForRolloutFunc func(ctx context.Context, event admiral.EventType, obj *argo.Rollout,
+ remoteRegistry *RemoteRegistry, clusterName string) error
+
+// HandleEventForRollout helper function to handle add and delete for RolloutHandler
+func HandleEventForRollout(ctx context.Context, event admiral.EventType, obj *argo.Rollout,
+ remoteRegistry *RemoteRegistry, clusterName string) error {
+ log.Infof(LogFormat, event, common.RolloutResourceType, obj.Name, clusterName, common.ReceivedStatus)
+ globalIdentifier := common.GetRolloutGlobalIdentifier(obj)
+ originalIdentifier := common.GetRolloutOriginalIdentifier(obj)
+ if len(globalIdentifier) == 0 {
+ log.Infof(LogFormat, event, common.RolloutResourceType, obj.Name, clusterName, "Skipped as '"+common.GetWorkloadIdentifier()+" was not found', namespace="+obj.Namespace)
+ return nil
+ }
+ env := common.GetEnvForRollout(obj)
+
+ ctx = context.WithValue(ctx, "clusterName", clusterName)
+ ctx = context.WithValue(ctx, "eventResourceType", common.Rollout)
+
+ if remoteRegistry.AdmiralCache != nil {
+ if remoteRegistry.AdmiralCache.IdentityClusterCache != nil {
+ remoteRegistry.AdmiralCache.IdentityClusterCache.Put(globalIdentifier, clusterName, clusterName)
+ }
+ if common.EnableSWAwareNSCaches() {
+ if remoteRegistry.AdmiralCache.IdentityClusterNamespaceCache != nil {
+ remoteRegistry.AdmiralCache.IdentityClusterNamespaceCache.Put(globalIdentifier, clusterName, obj.Namespace, obj.Namespace)
+ }
+ if remoteRegistry.AdmiralCache.PartitionIdentityCache != nil && len(common.GetRolloutIdentityPartition(obj)) > 0 {
+ remoteRegistry.AdmiralCache.PartitionIdentityCache.Put(globalIdentifier, originalIdentifier)
+ }
+ }
+ }
+
+ // Use the same function as added deployment function to update and put new service entry in place to replace old one
+ _, err := modifyServiceEntryForNewServiceOrPod(ctx, event, env, globalIdentifier, remoteRegistry)
+ return err
+}
From e0b6740c747ea2340d5311fa88e7606aa9fbda21 Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 15:16:38 -0400
Subject: [PATCH 154/235] copied admiral/pkg/clusters/rollout_handler_test.go
changes from master
Signed-off-by: Shriram Sharma
---
admiral/pkg/clusters/rollout_handler_test.go | 238 +++++++++++++++++++
1 file changed, 238 insertions(+)
create mode 100644 admiral/pkg/clusters/rollout_handler_test.go
diff --git a/admiral/pkg/clusters/rollout_handler_test.go b/admiral/pkg/clusters/rollout_handler_test.go
new file mode 100644
index 00000000..14a626b7
--- /dev/null
+++ b/admiral/pkg/clusters/rollout_handler_test.go
@@ -0,0 +1,238 @@
+package clusters
+
+import (
+ "context"
+ "sync"
+ "testing"
+ "time"
+
+ v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ admiralFake "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned/fake"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+
+ argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
+ coreV1 "k8s.io/api/core/v1"
+ metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+var rolloutHandlerTestSingleton sync.Once
+
+func admiralParamsForRolloutHandlerTests() common.AdmiralParams {
+ return common.AdmiralParams{
+ KubeconfigPath: "testdata/fake.config",
+ LabelSet: &common.LabelSet{
+ WorkloadIdentityKey: "identity",
+ EnvKey: "admiral.io/env",
+ AdmiralCRDIdentityLabel: "identity",
+ PriorityKey: "priority",
+ IdentityPartitionKey: "admiral.io/identityPartition",
+ },
+ EnableSAN: true,
+ SANPrefix: "prefix",
+ HostnameSuffix: "mesh",
+ SyncNamespace: "ns",
+ CacheReconcileDuration: time.Minute,
+ ClusterRegistriesNamespace: "default",
+ DependenciesNamespace: "default",
+ EnableRoutingPolicy: true,
+ EnvoyFilterVersion: "1.13",
+ Profile: common.AdmiralProfileDefault,
+ EnableSWAwareNSCaches: true,
+ ExportToIdentityList: []string{"*"},
+ ExportToMaxNamespaces: 35,
+ }
+}
+
+func setupForRolloutHandlerTests() {
+ rolloutHandlerTestSingleton.Do(func() {
+ common.ResetSync()
+ common.InitializeConfig(admiralParamsForRolloutHandlerTests())
+ })
+}
+
+func TestRolloutHandlerPartitionCache(t *testing.T) {
+ setupForRolloutHandlerTests()
+ admiralParams := admiralParamsForRolloutHandlerTests()
+ ctx := context.Background()
+ remoteRegistry, _ := InitAdmiral(ctx, admiralParams)
+ remoteRegistry.AdmiralCache.PartitionIdentityCache = common.NewMap()
+ partitionIdentifier := "admiral.io/identityPartition"
+ clusterName := "test-k8s"
+
+ testCases := []struct {
+ name string
+ rollout argo.Rollout
+ expected string
+ }{
+ {
+ name: "Given the rollout has the partition label, " +
+ "Then the PartitionIdentityCache should contain an entry for that rollout",
+ rollout: argo.Rollout{Spec: argo.RolloutSpec{Template: coreV1.PodTemplateSpec{ObjectMeta: metaV1.ObjectMeta{Labels: map[string]string{partitionIdentifier: "sw1", "env": "stage", "identity": "services.gateway"}}}}},
+ expected: "services.gateway",
+ },
+ {
+ name: "Given the rollout has the partition annotation, " +
+ "Then the PartitionIdentityCache should contain an entry for that rollout",
+ rollout: argo.Rollout{Spec: argo.RolloutSpec{Template: coreV1.PodTemplateSpec{ObjectMeta: metaV1.ObjectMeta{Annotations: map[string]string{partitionIdentifier: "sw2", "env": "stage", "identity": "services.gateway"}}}}},
+ expected: "services.gateway",
+ },
+ {
+ name: "Given the rollout doesn't have the partition label or annotation, " +
+ "Then the PartitionIdentityCache should not contain an entry for that rollout",
+ rollout: argo.Rollout{Spec: argo.RolloutSpec{Template: coreV1.PodTemplateSpec{ObjectMeta: metaV1.ObjectMeta{Labels: map[string]string{"identity": "services.gateway"}, Annotations: map[string]string{}}}}},
+ expected: "",
+ },
+ }
+
+ for _, c := range testCases {
+ t.Run(c.name, func(t *testing.T) {
+ _ = HandleEventForRollout(ctx, admiral.Add, &c.rollout, remoteRegistry, clusterName)
+ iVal := ""
+ if len(c.expected) > 0 {
+ globalIdentifier := common.GetRolloutGlobalIdentifier(&c.rollout)
+ iVal = remoteRegistry.AdmiralCache.PartitionIdentityCache.Get(globalIdentifier)
+ }
+ if !(iVal == c.expected) {
+ t.Errorf("Expected cache to contain: %s, got: %s", c.expected, iVal)
+ }
+ })
+ }
+}
+
+func TestRolloutHandler(t *testing.T) {
+ setupForRolloutHandlerTests()
+ ctx := context.Background()
+ p := common.AdmiralParams{
+ KubeconfigPath: "testdata/fake.config",
+ }
+
+ gtpCache := &globalTrafficCache{}
+ gtpCache.identityCache = make(map[string]*v1.GlobalTrafficPolicy)
+ gtpCache.mutex = &sync.Mutex{}
+
+ fakeCrdClient := admiralFake.NewSimpleClientset()
+ gtpController := &admiral.GlobalTrafficController{CrdClient: fakeCrdClient}
+
+ remoteController, _ := createMockRemoteController(func(i interface{}) {
+ })
+ remoteController.GlobalTraffic = gtpController
+ registry, _ := InitAdmiral(context.Background(), p)
+ registry.remoteControllers = map[string]*RemoteController{"cluster-1": remoteController}
+ registry.AdmiralCache.GlobalTrafficCache = gtpCache
+
+ handler := RolloutHandler{}
+ handler.RemoteRegistry = registry
+ handler.ClusterID = "cluster-1"
+
+ rollout := argo.Rollout{
+ ObjectMeta: metaV1.ObjectMeta{
+ Name: "test",
+ Namespace: "namespace",
+ Labels: map[string]string{"identity": "app1"},
+ },
+ Spec: argo.RolloutSpec{
+ Selector: &metaV1.LabelSelector{
+ MatchLabels: map[string]string{"identity": "bar"},
+ },
+ Template: coreV1.PodTemplateSpec{
+ ObjectMeta: metaV1.ObjectMeta{
+ Labels: map[string]string{"identity": "bar", "istio-injected": "true", "env": "dev"},
+ },
+ },
+ },
+ }
+
+ testCases := []struct {
+ name string
+ addedRollout *argo.Rollout
+ expectedRolloutCacheKey string
+ expectedIdentityCacheValue *v1.GlobalTrafficPolicy
+ expectedRolloutCacheValue *argo.Rollout
+ }{{
+ name: "Shouldn't throw errors when called",
+ addedRollout: &rollout,
+ expectedRolloutCacheKey: "myGTP1",
+ expectedIdentityCacheValue: nil,
+ expectedRolloutCacheValue: nil,
+ }, {
+ name: "Shouldn't throw errors when called-no identity",
+ addedRollout: &argo.Rollout{},
+ expectedRolloutCacheKey: "myGTP1",
+ expectedIdentityCacheValue: nil,
+ expectedRolloutCacheValue: nil,
+ },
+ }
+
+ //Rather annoying, but wasn't able to get the autogenerated fake k8s client for GTP objects to allow me to list resources, so this test is only for not throwing errors. I'll be testing the rest of the fucntionality picemeal.
+ //Side note, if anyone knows how to fix `level=error msg="Failed to list rollouts in cluster, error: no kind \"GlobalTrafficPolicyList\" is registered for version \"admiral.io/v1\" in scheme \"pkg/runtime/scheme.go:101\""`, I'd love to hear it!
+ //Already tried working through this: https://github.com/camilamacedo86/operator-sdk/blob/e40d7db97f0d132333b1e46ddf7b7f3cab1e379f/doc/user/unit-testing.md with no luck
+
+ for _, c := range testCases {
+ t.Run(c.name, func(t *testing.T) {
+ gtpCache = &globalTrafficCache{}
+ gtpCache.identityCache = make(map[string]*v1.GlobalTrafficPolicy)
+ gtpCache.mutex = &sync.Mutex{}
+ handler.RemoteRegistry.AdmiralCache.GlobalTrafficCache = gtpCache
+ handler.Added(ctx, c.addedRollout)
+ ns := handler.RemoteRegistry.AdmiralCache.IdentityClusterNamespaceCache.Get("bar").Get("cluster-1").GetKeys()[0]
+ if ns != "namespace" {
+ t.Errorf("expected namespace: %v but got %v", "namespace", ns)
+ }
+ handler.Deleted(ctx, c.addedRollout)
+ handler.Updated(ctx, c.addedRollout)
+ })
+ }
+}
+
+func newFakeRollout(name, namespace string, matchLabels map[string]string) *argo.Rollout {
+ return &argo.Rollout{
+ ObjectMeta: metaV1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ },
+ Spec: argo.RolloutSpec{
+ Selector: &metaV1.LabelSelector{
+ MatchLabels: matchLabels,
+ },
+ },
+ }
+}
+
+type fakeHandleEventForRollout struct {
+ handleEventForRolloutFunc func() HandleEventForRolloutFunc
+ calledByRolloutName map[string]bool
+ calledRolloutByNamespace map[string]map[string]bool
+}
+
+func (f *fakeHandleEventForRollout) CalledRolloutForNamespace(name, namespace string) bool {
+ if f.calledRolloutByNamespace[namespace] != nil {
+ return f.calledRolloutByNamespace[namespace][name]
+ }
+ return false
+}
+
+func newFakeHandleEventForRolloutsByError(errByRollout map[string]map[string]error) *fakeHandleEventForRollout {
+ f := &fakeHandleEventForRollout{
+ calledRolloutByNamespace: make(map[string]map[string]bool, 0),
+ }
+ f.handleEventForRolloutFunc = func() HandleEventForRolloutFunc {
+ return func(
+ ctx context.Context,
+ event admiral.EventType,
+ rollout *argo.Rollout,
+ remoteRegistry *RemoteRegistry,
+ clusterName string) error {
+ if f.calledRolloutByNamespace[rollout.Namespace] == nil {
+ f.calledRolloutByNamespace[rollout.Namespace] = map[string]bool{
+ rollout.Name: true,
+ }
+ } else {
+ f.calledRolloutByNamespace[rollout.Namespace][rollout.Name] = true
+ }
+
+ return errByRollout[rollout.Namespace][rollout.Name]
+ }
+ }
+ return f
+}
From a77f838b04b780702afca2af3df4736506e6fd21 Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 15:17:18 -0400
Subject: [PATCH 155/235] copied admiral/pkg/clusters/routingpolicy_handler.go
changes from master
Signed-off-by: Shriram Sharma
---
admiral/pkg/clusters/routingpolicy_handler.go | 241 ++++++++++++++++++
1 file changed, 241 insertions(+)
create mode 100644 admiral/pkg/clusters/routingpolicy_handler.go
diff --git a/admiral/pkg/clusters/routingpolicy_handler.go b/admiral/pkg/clusters/routingpolicy_handler.go
new file mode 100644
index 00000000..4b285126
--- /dev/null
+++ b/admiral/pkg/clusters/routingpolicy_handler.go
@@ -0,0 +1,241 @@
+package clusters
+
+import (
+ "context"
+ "errors"
+ "sync"
+
+ commonUtil "github.com/istio-ecosystem/admiral/admiral/pkg/util"
+
+ v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ log "github.com/sirupsen/logrus"
+ metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+type RoutingPolicyHandler struct {
+ RemoteRegistry *RemoteRegistry
+ ClusterID string
+}
+
+type routingPolicyCache struct {
+ // map of routing policies key=environment.identity, value: RoutingPolicy object
+ // only one routing policy per identity + env is allowed
+ identityCache map[string]*v1.RoutingPolicy
+ mutex *sync.Mutex
+}
+
+func (r *routingPolicyCache) Delete(identity string, environment string) {
+ defer r.mutex.Unlock()
+ r.mutex.Lock()
+ key := common.ConstructRoutingPolicyKey(environment, identity)
+ if _, ok := r.identityCache[key]; ok {
+ log.Infof("deleting RoutingPolicy with key=%s from global RoutingPolicy cache", key)
+ delete(r.identityCache, key)
+ }
+}
+
+func (r *routingPolicyCache) GetFromIdentity(identity string, environment string) *v1.RoutingPolicy {
+ defer r.mutex.Unlock()
+ r.mutex.Lock()
+ return r.identityCache[common.ConstructRoutingPolicyKey(environment, identity)]
+}
+
+func (r *routingPolicyCache) Put(rp *v1.RoutingPolicy) error {
+ if rp == nil || rp.Name == "" {
+ // no RoutingPolicy, throw error
+ return errors.New("cannot add an empty RoutingPolicy to the cache")
+ }
+ if rp.Labels == nil {
+ return errors.New("labels empty in RoutingPolicy")
+ }
+ defer r.mutex.Unlock()
+ r.mutex.Lock()
+ var rpIdentity = rp.Labels[common.GetRoutingPolicyLabel()]
+ var rpEnv = common.GetRoutingPolicyEnv(rp)
+
+ log.Infof("Adding RoutingPolicy with name %v to RoutingPolicy cache. LabelMatch=%v env=%v", rp.Name, rpIdentity, rpEnv)
+ key := common.ConstructRoutingPolicyKey(rpEnv, rpIdentity)
+ r.identityCache[key] = rp
+
+ return nil
+}
+
+type routingPolicyFilterCache struct {
+ // map of envoyFilters key=routingpolicyName+identity+environment of the routingPolicy, value is a map [clusterId -> map [filterName -> filterNameSpace]]
+ filterCache map[string]map[string]map[string]string
+ mutex *sync.Mutex
+}
+
+/*
+Get - returns the envoyFilters for a given identity(rpName+identity)+env key
+*/
+func (r *routingPolicyFilterCache) Get(identityEnvKey string) (filters map[string]map[string]string) {
+ defer r.mutex.Unlock()
+ r.mutex.Lock()
+ return r.filterCache[identityEnvKey]
+}
+
+/*
+Put - updates the cache for filters, where it uses identityEnvKey, clusterID, and filterName as the key, and filterNamespace as the value
+*/
+func (r *routingPolicyFilterCache) Put(identityEnvKey string, clusterId string, filterName string, filterNamespace string) {
+ defer r.mutex.Unlock()
+ r.mutex.Lock()
+ if r.filterCache[identityEnvKey] == nil {
+ r.filterCache[identityEnvKey] = make(map[string]map[string]string)
+ }
+
+ if r.filterCache[identityEnvKey][clusterId] == nil {
+ r.filterCache[identityEnvKey][clusterId] = make(map[string]string)
+ }
+ r.filterCache[identityEnvKey][clusterId][filterName] = filterNamespace
+}
+
+func (r *routingPolicyFilterCache) Delete(identityEnvKey string) {
+ if commonUtil.IsAdmiralReadOnly() {
+ log.Infof(LogFormat, admiral.Delete, "routingpolicy", identityEnvKey, "", "skipping read-only mode")
+ return
+ }
+ if common.GetEnableRoutingPolicy() {
+ defer r.mutex.Unlock()
+ r.mutex.Lock()
+ // delete all envoyFilters for a given identity+env key
+ delete(r.filterCache, identityEnvKey)
+ } else {
+ log.Infof(LogFormat, admiral.Delete, "routingpolicy", identityEnvKey, "", "routingpolicy disabled")
+ }
+}
+func (r RoutingPolicyHandler) Added(ctx context.Context, obj *v1.RoutingPolicy) error {
+ if commonUtil.IsAdmiralReadOnly() {
+ log.Infof(LogFormat, admiral.Add, "routingpolicy", "", "", "skipping read-only mode")
+ return nil
+ }
+ if common.GetEnableRoutingPolicy() {
+ if common.ShouldIgnoreResource(obj.ObjectMeta) {
+ log.Infof("op=%s type=%v name=%v namespace=%s cluster=%s message=%s", "admiralIoIgnoreAnnotationCheck", common.RoutingPolicyResourceType,
+ obj.Name, obj.Namespace, "", "Value=true")
+ log.Infof(LogFormat, "success", "routingpolicy", obj.Name, "", "Ignored the RoutingPolicy because of the annotation")
+ return nil
+ }
+ dependents := getDependents(obj, r)
+ if len(dependents) == 0 {
+ log.Info("No dependents found for Routing Policy - ", obj.Name)
+ return nil
+ }
+ err := r.processroutingPolicy(ctx, dependents, obj, admiral.Add)
+ if err != nil {
+ log.Errorf(LogErrFormat, admiral.Update, "routingpolicy", obj.Name, "", "failed to process routing policy")
+ return err
+ }
+ log.Infof(LogFormat, admiral.Add, "routingpolicy", obj.Name, "", "finished processing routing policy")
+ } else {
+ log.Infof(LogFormat, admiral.Add, "routingpolicy", obj.Name, "", "routingpolicy disabled")
+ }
+ return nil
+}
+
+func (r RoutingPolicyHandler) processroutingPolicy(ctx context.Context, dependents map[string]string, routingPolicy *v1.RoutingPolicy, eventType admiral.EventType) error {
+ var err error
+ for _, remoteController := range r.RemoteRegistry.remoteControllers {
+ for _, dependent := range dependents {
+ // Check if the dependent exists in this remoteCluster. If so, we create an envoyFilter with dependent identity as workload selector
+ if _, ok := r.RemoteRegistry.AdmiralCache.IdentityClusterCache.Get(dependent).Copy()[remoteController.ClusterID]; ok {
+ _, err1 := createOrUpdateEnvoyFilter(ctx, remoteController, routingPolicy, eventType, dependent, r.RemoteRegistry.AdmiralCache)
+ if err1 != nil {
+ log.Errorf(LogErrFormat, eventType, "routingpolicy", routingPolicy.Name, remoteController.ClusterID, err)
+ err = common.AppendError(err, err1)
+ } else {
+ log.Infof(LogFormat, eventType, "routingpolicy ", routingPolicy.Name, remoteController.ClusterID, "created envoyfilters")
+ }
+ }
+ }
+ }
+ return err
+}
+
+func (r RoutingPolicyHandler) Updated(ctx context.Context, obj *v1.RoutingPolicy) error {
+ if commonUtil.IsAdmiralReadOnly() {
+ log.Infof(LogFormat, admiral.Update, "routingpolicy", "", "", "skipping read-only mode")
+ return nil
+ }
+ if common.GetEnableRoutingPolicy() {
+ if common.ShouldIgnoreResource(obj.ObjectMeta) {
+ log.Infof("op=%s type=%v name=%v namespace=%s cluster=%s message=%s", "admiralIoIgnoreAnnotationCheck", common.RoutingPolicyResourceType,
+ obj.Name, obj.Namespace, "", "Value=true")
+ log.Infof(LogFormat, admiral.Update, "routingpolicy", obj.Name, "", "Ignored the RoutingPolicy because of the annotation")
+ // We need to process this as a delete event.
+ r.Deleted(ctx, obj)
+ return nil
+ }
+ dependents := getDependents(obj, r)
+ if len(dependents) == 0 {
+ return nil
+ }
+ err := r.processroutingPolicy(ctx, dependents, obj, admiral.Update)
+ if err != nil {
+ log.Errorf(LogErrFormat, admiral.Update, "routingpolicy", obj.Name, "", "failed to process routing policy")
+ return err
+ }
+ log.Infof(LogFormat, admiral.Update, "routingpolicy", obj.Name, "", "updated routing policy")
+ } else {
+ log.Infof(LogFormat, admiral.Update, "routingpolicy", obj.Name, "", "routingpolicy disabled")
+ }
+ return nil
+}
+
+// getDependents - Returns the client dependents for the destination service with routing policy
+// Returns a list of asset ID's of the client services or nil if no dependents are found
+func getDependents(obj *v1.RoutingPolicy, r RoutingPolicyHandler) map[string]string {
+ sourceIdentity := common.GetRoutingPolicyIdentity(obj)
+ if len(sourceIdentity) == 0 {
+ err := errors.New("identity label is missing")
+ log.Warnf(LogErrFormat, "add", "RoutingPolicy", obj.Name, r.ClusterID, err)
+ return nil
+ }
+
+ dependents := r.RemoteRegistry.AdmiralCache.IdentityDependencyCache.Get(sourceIdentity).Copy()
+ return dependents
+}
+
+/*
+Deleted - deletes the envoyFilters for the routingPolicy when delete event received for routing policy
+*/
+func (r RoutingPolicyHandler) Deleted(ctx context.Context, obj *v1.RoutingPolicy) error {
+ err := r.deleteEnvoyFilters(ctx, obj, admiral.Delete)
+ if err != nil {
+ log.Infof(LogFormat, admiral.Delete, "routingpolicy", obj.Name, "", "deleted envoy filter for routing policy")
+ }
+ return err
+}
+
+func (r RoutingPolicyHandler) deleteEnvoyFilters(ctx context.Context, obj *v1.RoutingPolicy, eventType admiral.EventType) error {
+ key := obj.Name + common.GetRoutingPolicyIdentity(obj) + common.GetRoutingPolicyEnv(obj)
+ if r.RemoteRegistry == nil || r.RemoteRegistry.AdmiralCache == nil || r.RemoteRegistry.AdmiralCache.RoutingPolicyFilterCache == nil {
+ log.Infof(LogFormat, eventType, "routingpolicy", obj.Name, "", "skipping delete event as cache is nil")
+ return nil
+ }
+ clusterIdFilterMap := r.RemoteRegistry.AdmiralCache.RoutingPolicyFilterCache.Get(key) // RoutingPolicyFilterCache key=rpname+rpidentity+environment of the routingPolicy, value is a map [clusterId -> map [filterName -> filterNameSpace]]
+ var err error
+ for _, rc := range r.RemoteRegistry.remoteControllers {
+ if rc != nil {
+ if filterMap, ok := clusterIdFilterMap[rc.ClusterID]; ok {
+ for filter, filterNs := range filterMap {
+ log.Infof(LogFormat, eventType, "envoyfilter", filter, rc.ClusterID, "deleting")
+ err1 := rc.RoutingPolicyController.IstioClient.NetworkingV1alpha3().EnvoyFilters(filterNs).Delete(ctx, filter, metaV1.DeleteOptions{})
+ if err1 != nil {
+ log.Errorf(LogErrFormat, eventType, "envoyfilter", filter, rc.ClusterID, err1)
+ err = common.AppendError(err, err1)
+ } else {
+ log.Infof(LogFormat, eventType, "envoyfilter", filter, rc.ClusterID, "deleting from cache")
+ }
+ }
+ }
+ }
+ }
+ if err == nil {
+ r.RemoteRegistry.AdmiralCache.RoutingPolicyFilterCache.Delete(key)
+ }
+ return err
+}
From 0cf10e46cc7f14875391bec92298bf28e97d3f84 Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 15:17:37 -0400
Subject: [PATCH 156/235] copied
admiral/pkg/clusters/routingpolicy_handler_test.go changes from master
Signed-off-by: Shriram Sharma
---
.../clusters/routingpolicy_handler_test.go | 286 ++++++++++++++++++
1 file changed, 286 insertions(+)
create mode 100644 admiral/pkg/clusters/routingpolicy_handler_test.go
diff --git a/admiral/pkg/clusters/routingpolicy_handler_test.go b/admiral/pkg/clusters/routingpolicy_handler_test.go
new file mode 100644
index 00000000..6e40d353
--- /dev/null
+++ b/admiral/pkg/clusters/routingpolicy_handler_test.go
@@ -0,0 +1,286 @@
+package clusters
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "os"
+ "reflect"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ commonUtil "github.com/istio-ecosystem/admiral/admiral/pkg/util"
+
+ "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/model"
+ istiofake "istio.io/client-go/pkg/clientset/versioned/fake"
+
+ admiralV1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ log "github.com/sirupsen/logrus"
+ "github.com/stretchr/testify/assert"
+ metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+func TestRoutingPolicyHandler(t *testing.T) {
+ common.ResetSync()
+ p := common.AdmiralParams{
+ KubeconfigPath: "testdata/fake.config",
+ LabelSet: &common.LabelSet{
+ DeploymentAnnotation: "sidecar.istio.io/inject",
+ },
+ EnableSAN: true,
+ SANPrefix: "prefix",
+ HostnameSuffix: "mesh",
+ SyncNamespace: "ns",
+ CacheReconcileDuration: time.Minute,
+ ClusterRegistriesNamespace: "default",
+ DependenciesNamespace: "default",
+ EnableRoutingPolicy: true,
+ EnvoyFilterVersion: "1.13",
+ Profile: common.AdmiralProfileDefault,
+ }
+
+ p.LabelSet.WorkloadIdentityKey = "identity"
+ p.LabelSet.EnvKey = "admiral.io/env"
+ p.LabelSet.AdmiralCRDIdentityLabel = "identity"
+
+ registry, _ := InitAdmiral(context.Background(), p)
+
+ handler := RoutingPolicyHandler{}
+
+ rpFilterCache := &routingPolicyFilterCache{}
+ rpFilterCache.filterCache = make(map[string]map[string]map[string]string)
+ rpFilterCache.mutex = &sync.Mutex{}
+
+ routingPolicyController := &admiral.RoutingPolicyController{IstioClient: istiofake.NewSimpleClientset()}
+ remoteController, _ := createMockRemoteController(func(i interface{}) {
+
+ })
+
+ remoteController.RoutingPolicyController = routingPolicyController
+
+ registry.remoteControllers = map[string]*RemoteController{"cluster-1": remoteController}
+ registry.AdmiralCache.RoutingPolicyFilterCache = rpFilterCache
+
+ // foo is dependent upon bar and bar has a deployment in the same cluster.
+ registry.AdmiralCache.IdentityDependencyCache.Put("foo", "bar", "bar")
+ registry.AdmiralCache.IdentityClusterCache.Put("bar", remoteController.ClusterID, remoteController.ClusterID)
+
+ // foo is also dependent upon bar2 but bar2 is in a different cluster, so this cluster should not have the envoyfilter created
+ registry.AdmiralCache.IdentityDependencyCache.Put("foo2", "bar2", "bar2")
+ registry.AdmiralCache.IdentityClusterCache.Put("bar2", "differentCluster", "differentCluster")
+
+ // foo1 is dependent upon bar 1 but bar1 does not have a deployment so it is missing from identityClusterCache
+ registry.AdmiralCache.IdentityDependencyCache.Put("foo1", "bar1", "bar1")
+
+ handler.RemoteRegistry = registry
+
+ routingPolicyFoo := &admiralV1.RoutingPolicy{
+ TypeMeta: metaV1.TypeMeta{},
+ ObjectMeta: metaV1.ObjectMeta{
+ Name: "rpfoo",
+ Labels: map[string]string{
+ "identity": "foo",
+ "admiral.io/env": "dev",
+ },
+ },
+ Spec: model.RoutingPolicy{
+ Plugin: "test",
+ Hosts: []string{"e2e.testservice.mesh"},
+ Config: map[string]string{
+ "cachePrefix": "cache-v1",
+ "cachettlSec": "86400",
+ "routingServiceUrl": "e2e.test.routing.service.mesh",
+ "pathPrefix": "/sayhello,/v1/company/{id}/",
+ },
+ },
+ Status: admiralV1.RoutingPolicyStatus{},
+ }
+
+ routingPolicyFooTest := &admiralV1.RoutingPolicy{
+ TypeMeta: metaV1.TypeMeta{},
+ ObjectMeta: metaV1.ObjectMeta{
+ Name: "rpfoo",
+ Labels: map[string]string{
+ "identity": "foo",
+ "admiral.io/env": "dev",
+ },
+ },
+ Spec: model.RoutingPolicy{
+ Plugin: "test",
+ Hosts: []string{"e2e.testservice.mesh"},
+ Config: map[string]string{
+ "routingServiceUrl": "e2e.test.routing.service.mesh",
+ },
+ },
+ Status: admiralV1.RoutingPolicyStatus{},
+ }
+
+ routingPolicyFoo1 := routingPolicyFoo.DeepCopy()
+ routingPolicyFoo1.Labels[common.GetWorkloadIdentifier()] = "foo1"
+
+ routingPolicyFoo2 := routingPolicyFoo.DeepCopy()
+ routingPolicyFoo2.Labels[common.GetWorkloadIdentifier()] = "foo2"
+
+ testCases := []struct {
+ name string
+ routingPolicy *admiralV1.RoutingPolicy
+ expectedFilterCacheKey string
+ expectedFilterCount int
+ expectedEnvoyFilterConfigPatchVal map[string]interface{}
+ }{
+ {
+ name: "If dependent deployment exists, should fetch filter from cache",
+ routingPolicy: routingPolicyFooTest,
+ expectedFilterCacheKey: "rpfoofoodev",
+ expectedFilterCount: 1,
+ expectedEnvoyFilterConfigPatchVal: map[string]interface{}{"name": "dynamicRoutingFilterPatch", "typed_config": map[string]interface{}{
+ "@type": "type.googleapis.com/udpa.type.v1.TypedStruct", "type_url": "type.googleapis.com/envoy.extensions.filters.http.wasm.v3.Wasm",
+ "value": map[string]interface{}{
+ "config": map[string]interface{}{
+ "configuration": map[string]interface{}{
+ "@type": "type.googleapis.com/google.protobuf.StringValue",
+ "value": "routingServiceUrl: e2e.test.routing.service.mesh\nhosts: e2e.testservice.mesh\nplugin: test"},
+ "vm_config": map[string]interface{}{"code": map[string]interface{}{"local": map[string]interface{}{"filename": ""}}, "runtime": "envoy.wasm.runtime.v8", "vm_id": "test-dr-532221909d5db54fe5f5-f6ce3712830af1b15625-1.13"}}}}},
+ },
+ {
+ name: "If dependent deployment does not exist, the filter should not be created ",
+ routingPolicy: routingPolicyFoo1,
+ expectedFilterCacheKey: "rpfoofoodev",
+ expectedFilterCount: 0,
+ },
+ {
+ name: "If dependent deployment exists in a different cluster, the filter should not be created in cluster where dependency isnt there",
+ routingPolicy: routingPolicyFoo2,
+ expectedFilterCacheKey: "rpfoofoodev",
+ expectedFilterCount: 0,
+ },
+ }
+
+ ctx := context.Background()
+
+ time.Sleep(time.Second * 30)
+ for _, c := range testCases {
+ t.Run(c.name, func(t *testing.T) {
+ handler.Added(ctx, c.routingPolicy)
+ if c.expectedFilterCount > 0 {
+ filterCacheValue := registry.AdmiralCache.RoutingPolicyFilterCache.Get(c.expectedFilterCacheKey)
+ assert.NotNil(t, filterCacheValue)
+ routingPolicyNameSha, _ := getSha1(c.routingPolicy.Name + common.GetRoutingPolicyEnv(c.routingPolicy) + common.GetRoutingPolicyIdentity(c.routingPolicy))
+ dependentIdentitySha, _ := getSha1("bar")
+ envoyFilterName := fmt.Sprintf("%s-dr-%s-%s-%s", strings.ToLower(c.routingPolicy.Spec.Plugin), routingPolicyNameSha, dependentIdentitySha, "1.13")
+
+ filterMap := filterCacheValue[remoteController.ClusterID]
+ assert.NotNil(t, filterMap)
+ assert.NotNil(t, filterMap[envoyFilterName])
+
+ filter, err := remoteController.RoutingPolicyController.IstioClient.NetworkingV1alpha3().
+ EnvoyFilters("istio-system").Get(ctx, envoyFilterName, metaV1.GetOptions{})
+ assert.Nil(t, err)
+ assert.NotNil(t, filter)
+ }
+ //get envoyfilters from all namespaces
+ list1, _ := remoteController.RoutingPolicyController.IstioClient.NetworkingV1alpha3().EnvoyFilters("istio-system").List(ctx, metaV1.ListOptions{})
+ assert.Equal(t, c.expectedFilterCount, len(list1.Items))
+ if c.expectedFilterCount > 0 {
+ receivedEnvoyFilter, _ := remoteController.RoutingPolicyController.IstioClient.NetworkingV1alpha3().EnvoyFilters("istio-system").Get(ctx, "test-dr-532221909d5db54fe5f5-f6ce3712830af1b15625-1.13", metaV1.GetOptions{})
+ eq := reflect.DeepEqual(c.expectedEnvoyFilterConfigPatchVal, receivedEnvoyFilter.Spec.ConfigPatches[0].Patch.Value.AsMap())
+ assert.True(t, eq)
+ }
+
+ // once the routing policy is deleted, the corresponding filter should also be deleted
+ handler.Deleted(ctx, c.routingPolicy)
+ assert.Nil(t, registry.AdmiralCache.RoutingPolicyFilterCache.Get(c.expectedFilterCacheKey))
+ })
+ }
+
+ // ignore the routing policy
+ annotations := routingPolicyFoo.GetAnnotations()
+ if annotations == nil {
+ annotations = make(map[string]string)
+ }
+ annotations[common.AdmiralIgnoreAnnotation] = "true"
+ routingPolicyFoo.SetAnnotations(annotations)
+
+ handler.Updated(ctx, routingPolicyFoo)
+ assert.Nil(t, registry.AdmiralCache.RoutingPolicyFilterCache.Get("rpfoofoodev"))
+}
+
+func TestRoutingPolicyReadOnly(t *testing.T) {
+ p := common.AdmiralParams{
+ KubeconfigPath: "testdata/fake.config",
+ LabelSet: &common.LabelSet{},
+ EnableSAN: true,
+ SANPrefix: "prefix",
+ HostnameSuffix: "mesh",
+ SyncNamespace: "ns",
+ CacheReconcileDuration: time.Minute,
+ ClusterRegistriesNamespace: "default",
+ DependenciesNamespace: "default",
+ EnableRoutingPolicy: true,
+ EnvoyFilterVersion: "1.13",
+ }
+
+ p.LabelSet.WorkloadIdentityKey = "identity"
+ p.LabelSet.EnvKey = "admiral.io/env"
+ p.LabelSet.AdmiralCRDIdentityLabel = "identity"
+
+ handler := RoutingPolicyHandler{}
+
+ testcases := []struct {
+ name string
+ rp *admiralV1.RoutingPolicy
+ readOnly bool
+ doesError bool
+ }{
+ {
+ name: "Readonly test - Routing Policy",
+ rp: &admiralV1.RoutingPolicy{},
+ readOnly: true,
+ doesError: true,
+ },
+ {
+ name: "Readonly false test - Routing Policy",
+ rp: &admiralV1.RoutingPolicy{},
+ readOnly: false,
+ doesError: false,
+ },
+ }
+
+ ctx := context.Background()
+
+ for _, c := range testcases {
+ t.Run(c.name, func(t *testing.T) {
+ if c.readOnly {
+ commonUtil.CurrentAdmiralState.ReadOnly = true
+ } else {
+ commonUtil.CurrentAdmiralState.ReadOnly = false
+ }
+ var buf bytes.Buffer
+ log.SetOutput(&buf)
+ defer func() {
+ log.SetOutput(os.Stderr)
+ }()
+ // Add routing policy test
+ handler.Added(ctx, c.rp)
+ t.Log(buf.String())
+ val := strings.Contains(buf.String(), "skipping read-only mode")
+ assert.Equal(t, c.doesError, val)
+
+ // Update routing policy test
+ handler.Updated(ctx, c.rp)
+ t.Log(buf.String())
+ val = strings.Contains(buf.String(), "skipping read-only mode")
+ assert.Equal(t, c.doesError, val)
+
+ // Delete routing policy test
+ handler.Deleted(ctx, c.rp)
+ t.Log(buf.String())
+ val = strings.Contains(buf.String(), "skipping read-only mode")
+ assert.Equal(t, c.doesError, val)
+ })
+ }
+}
From 06ac66140e2d14d1955be63e65f8a48bf5caaa5e Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 15:19:49 -0400
Subject: [PATCH 157/235] copied admiral/pkg/clusters/serviceEntrySuspender.go
changes from master
Signed-off-by: Shriram Sharma
---
admiral/pkg/clusters/serviceEntrySuspender.go | 31 ++++++++++++++-----
1 file changed, 23 insertions(+), 8 deletions(-)
diff --git a/admiral/pkg/clusters/serviceEntrySuspender.go b/admiral/pkg/clusters/serviceEntrySuspender.go
index 19b4b237..c83f8301 100644
--- a/admiral/pkg/clusters/serviceEntrySuspender.go
+++ b/admiral/pkg/clusters/serviceEntrySuspender.go
@@ -1,26 +1,39 @@
package clusters
import (
+ "context"
"sync"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
log "github.com/sirupsen/logrus"
)
const (
+ serviceEntrySuspenderLogPrefix = "op=serviceEntrySuspender message="
// Alert logs
- alertMsgSuspensionEnabled = "op=dynamicEndpointSuspension message=endpoint generation suspension is enabled." +
- "this does not mean that endpoint generation will be suspended. " +
+ alertMsgSuspensionEnabled = serviceEntrySuspenderLogPrefix + "service entry update suspension is enabled. " +
+ "this does not mean that service entry updates will not happen. " +
"it will depend on the suspension list, which can include all identities " +
"for all environments, OR certain identities for all or certain environments"
- alertMsgSuspensionForAll = "op=dynamicEndpointSuspension message=endpoint generation suspended for all"
- alertMsgSuspensionForIdentityInAllEnvironments = "op=dynamicEndpointSuspension message=endpoint generation suspended for identity across all environments"
- alertMsgSuspensionForIdentityInMatchingEnvironment = "op=dynamicEndpointSuspension message=endpoint generation suspended for identity for given environment"
+ alertMsgSuspensionForAll = serviceEntrySuspenderLogPrefix + "service entry update is suspended for all"
+ alertMsgSuspensionForIdentityInAllEnvironments = serviceEntrySuspenderLogPrefix + "service entry update is suspended for identity across all environments"
+ alertMsgSuspensionForIdentityInMatchingEnvironment = serviceEntrySuspenderLogPrefix + "service entry update is suspended for identity for given environment"
)
type serviceEntrySuspender struct {
ignoredIdentityCache *IgnoredIdentityCache
}
+func NewDynamicServiceEntrySuspender(ctx context.Context, params common.AdmiralParams) *serviceEntrySuspender {
+ var cache = &IgnoredIdentityCache{
+ RWLock: &sync.RWMutex{},
+ }
+ stateChecker := initAdmiralStateChecker(ctx, ignoreIdentityChecker, params.AdmiralConfig)
+ stateChecker.initStateCache(cache)
+ RunAdmiralStateCheck(ctx, ignoreIdentityChecker, stateChecker)
+ return &serviceEntrySuspender{ignoredIdentityCache: cache}
+}
+
func NewDefaultServiceEntrySuspender(items []string) *serviceEntrySuspender {
var (
enabled bool
@@ -55,21 +68,23 @@ func (des *serviceEntrySuspender) SuspendUpdate(identity, environment string) bo
func (des *serviceEntrySuspender) enabled() bool {
if des.ignoredIdentityCache.Enabled {
log.Println(alertMsgSuspensionEnabled)
+ } else {
+ log.Println(serviceEntrySuspenderLogPrefix + "service entry update suspension is not enabled")
}
- log.Println("op=dynamicEndpointSuspension message=endpoint generation suspension is not enabled")
return des.ignoredIdentityCache.Enabled
}
func (des *serviceEntrySuspender) all() bool {
if des.ignoredIdentityCache.All {
log.Println(alertMsgSuspensionForAll)
+ } else {
+ log.Println(serviceEntrySuspenderLogPrefix + "service entry update suspension for 'all' identities is not enabled")
}
- log.Println("op=dynamicEndpointSuspension message=endpoint generation suspension for 'all' identities is not enabled")
return des.ignoredIdentityCache.All
}
func (des *serviceEntrySuspender) identityByEnvironment(identity, environment string) bool {
- log.Printf("op=dynamicEndpointSuspension message=checking if identity %s in environment %s is in the suspension list",
+ log.Printf(serviceEntrySuspenderLogPrefix+"checking if identity %s in environment %s is in the suspension list",
identity, environment)
des.ignoredIdentityCache.RWLock.RLock()
defer des.ignoredIdentityCache.RWLock.RUnlock()
From ab049fdd6a74b76fe138b1038ecb1a5bf655cad9 Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 15:20:13 -0400
Subject: [PATCH 158/235] copied admiral/pkg/clusters/service_handler.go
changes from master
Signed-off-by: Shriram Sharma
---
admiral/pkg/clusters/service_handler.go | 270 ++++++++++++++++++++++++
1 file changed, 270 insertions(+)
create mode 100644 admiral/pkg/clusters/service_handler.go
diff --git a/admiral/pkg/clusters/service_handler.go b/admiral/pkg/clusters/service_handler.go
new file mode 100644
index 00000000..874ebd58
--- /dev/null
+++ b/admiral/pkg/clusters/service_handler.go
@@ -0,0 +1,270 @@
+package clusters
+
+import (
+ "context"
+ "fmt"
+
+ rolloutsV1Alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ log "github.com/sirupsen/logrus"
+ appsV1 "k8s.io/api/apps/v1"
+ coreV1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+type ServiceHandler struct {
+ RemoteRegistry *RemoteRegistry
+ ClusterID string
+}
+
+func (sh *ServiceHandler) Added(ctx context.Context, obj *coreV1.Service) error {
+ log.Infof(LogFormat, common.Add, common.ServiceResourceType, obj.Name, sh.ClusterID, common.ReceivedStatus)
+ ctx = context.WithValue(ctx, common.EventType, admiral.Add)
+ err := handleEventForService(ctx, obj, sh.RemoteRegistry, sh.ClusterID)
+ if err != nil {
+ return fmt.Errorf(LogErrFormat, common.Add, common.ServiceResourceType, obj.Name, sh.ClusterID, err)
+ }
+ return nil
+}
+
+func (sh *ServiceHandler) Updated(ctx context.Context, obj *coreV1.Service) error {
+ log.Infof(LogFormat, common.Update, common.ServiceResourceType, obj.Name, sh.ClusterID, common.ReceivedStatus)
+ ctx = context.WithValue(ctx, common.EventType, admiral.Update)
+ err := handleEventForService(ctx, obj, sh.RemoteRegistry, sh.ClusterID)
+ if err != nil {
+ return fmt.Errorf(LogErrFormat, common.Update, common.ServiceResourceType, obj.Name, sh.ClusterID, err)
+ }
+ return nil
+}
+
+func (sh *ServiceHandler) Deleted(ctx context.Context, obj *coreV1.Service) error {
+ log.Infof(LogFormat, common.Delete, common.ServiceResourceType, obj.Name, sh.ClusterID, common.ReceivedStatus)
+ ctx = context.WithValue(ctx, common.EventType, admiral.Delete)
+ err := handleEventForService(ctx, obj, sh.RemoteRegistry, sh.ClusterID)
+ if err != nil {
+ return fmt.Errorf(LogErrFormat, common.Delete, common.ServiceResourceType, obj.Name, sh.ClusterID, err)
+ }
+ return nil
+}
+
+func handleEventForService(
+ ctx context.Context,
+ svc *coreV1.Service,
+ remoteRegistry *RemoteRegistry,
+ clusterName string) error {
+ if svc.Spec.Selector == nil {
+ return fmt.Errorf("selector missing on service=%s in namespace=%s cluster=%s", svc.Name, svc.Namespace, clusterName)
+ }
+
+ rc := remoteRegistry.GetRemoteController(clusterName)
+ if rc == nil {
+ return fmt.Errorf("could not find the remote controller for cluster=%s", clusterName)
+ }
+
+ var handleSvcEventError error
+ deploymentController := rc.DeploymentController
+ rolloutController := rc.RolloutController
+ serviceController := rc.ServiceController
+
+ if deploymentController != nil && serviceController != nil {
+ err := handleServiceEventForDeployment(ctx, svc, remoteRegistry, clusterName, deploymentController, serviceController, HandleEventForDeployment)
+ if err != nil {
+ handleSvcEventError = common.AppendError(handleSvcEventError, err)
+ }
+ }
+
+ if common.GetAdmiralParams().ArgoRolloutsEnabled && rolloutController != nil && serviceController != nil {
+ err := handleServiceEventForRollout(ctx, svc, remoteRegistry, clusterName, rolloutController, serviceController, HandleEventForRollout)
+ if err != nil {
+ handleSvcEventError = common.AppendError(handleSvcEventError, err)
+ }
+ }
+
+ return handleSvcEventError
+}
+
+func handleServiceEventForDeployment(
+ ctx context.Context,
+ svc *coreV1.Service,
+ remoteRegistry *RemoteRegistry,
+ clusterName string,
+ deployController *admiral.DeploymentController,
+ serviceController *admiral.ServiceController,
+ deploymentHandler HandleEventForDeploymentFunc) error {
+ var (
+ allErrors error
+ deployments []appsV1.Deployment
+ )
+
+ eventType, ok := ctx.Value(common.EventType).(admiral.EventType)
+ if !ok {
+ return fmt.Errorf(AlertLogMsg, ctx.Value(common.EventType))
+ }
+
+ if common.IsIstioIngressGatewayService(svc) {
+ // The eventType is overridden to admiral.Update. This is mainly
+ // for admiral.Delete events sent for the ingress in the cluster
+ // else it would delete all the SEs in the source and dependent clusters
+ eventType = admiral.Update
+ deployments = deployController.Cache.List()
+ log.Infof(LogFormat, "Event", "Deployment", "", clusterName,
+ fmt.Sprintf("updating %v deployments across the cluster for service %s",
+ len(deployments), svc.Name))
+ } else {
+ deployments = deployController.GetDeploymentBySelectorInNamespace(ctx, svc.Spec.Selector, svc.Namespace)
+ log.Infof(LogFormat, "Event", "Deployment", "", clusterName,
+ fmt.Sprintf("updating %v deployments across namespace %s for service %s",
+ len(deployments), svc.Namespace, svc.Name))
+ }
+
+ for _, deployment := range deployments {
+ // If the eventType is a admiral.Delete we want to compute if there are any other services associated to the deployment
+ // If Yes - We change the eventType to admiral.Update and delete the svc from the cache for which we got an event for. This is
+ // done to update the SE with the new endpoints.
+ // If No - We are safe to assume that there was only one associate service and the related SE is deleted
+ // NOTE: if there is an err returned from checkIfThereAreMultipleMatchingServices we continue to prevent any
+ // destructive updates
+ if eventType == admiral.Delete {
+ multipleSvcExist, err := checkIfThereAreMultipleMatchingServices(svc, serviceController, deployment, clusterName)
+ if err != nil {
+ allErrors = common.AppendError(allErrors, err)
+ continue
+ }
+ if multipleSvcExist {
+ eventType = admiral.Update
+ ctx = context.WithValue(ctx, common.EventType, admiral.Update)
+ serviceController.Cache.Delete(svc)
+ }
+ }
+
+ err := deploymentHandler(ctx, eventType, &deployment, remoteRegistry, clusterName)
+ if err != nil {
+ allErrors = common.AppendError(allErrors, err)
+ }
+ }
+
+ return allErrors
+}
+
+func handleServiceEventForRollout(
+ ctx context.Context,
+ svc *coreV1.Service,
+ remoteRegistry *RemoteRegistry,
+ clusterName string,
+ rolloutController *admiral.RolloutController,
+ serviceController *admiral.ServiceController,
+ rolloutHandler HandleEventForRolloutFunc) error {
+ var (
+ allErrors error
+ rollouts []rolloutsV1Alpha1.Rollout
+ )
+
+ eventType, ok := ctx.Value(common.EventType).(admiral.EventType)
+ if !ok {
+ return fmt.Errorf(AlertLogMsg, ctx.Value(common.EventType))
+ }
+
+ if common.IsIstioIngressGatewayService(svc) {
+ // The eventType is overridden to admiral.Update. This is mainly
+ // for admiral.Delete events sent for the ingress in the cluster
+ // else it would delete all the SEs in the source and dependent clusters
+ eventType = admiral.Update
+ rollouts = rolloutController.Cache.List()
+ log.Infof(LogFormat, "Event", "Rollout", "", clusterName,
+ fmt.Sprintf("updating %v rollouts across the cluster for service %s",
+ len(rollouts), svc.Name))
+ } else {
+ rollouts = rolloutController.GetRolloutBySelectorInNamespace(ctx, svc.Spec.Selector, svc.Namespace)
+ log.Infof(LogFormat, "Event", "Rollout", "", clusterName,
+ fmt.Sprintf("updating %v rollouts across namespace %s for service %s",
+ len(rollouts), svc.Namespace, svc.Name))
+ }
+
+ for _, rollout := range rollouts {
+ // If the eventType is a admiral.Delete we want to compute if there are any other services associated to the rollout
+ // If Yes - We change the eventType to admiral.Update and delete the svc from the cache for which we got an event for. This is
+ // done to update the SE with the new endpoints.
+ // If No - We are safe to assume that there was only one associate service and the related SE is deleted
+ // NOTE: if there is an err returned from checkIfThereAreMultipleMatchingServices we continue to prevent any
+ // destructive updates
+ if eventType == admiral.Delete {
+ multipleSvcExist, err := checkIfThereAreMultipleMatchingServices(svc, serviceController, rollout, clusterName)
+ if err != nil {
+ allErrors = common.AppendError(allErrors, err)
+ continue
+ }
+ if multipleSvcExist {
+ eventType = admiral.Update
+ ctx = context.WithValue(ctx, common.EventType, admiral.Update)
+ serviceController.Cache.Delete(svc)
+ }
+ }
+
+ err := rolloutHandler(ctx, eventType, &rollout, remoteRegistry, clusterName)
+ if err != nil {
+ allErrors = common.AppendError(allErrors, err)
+ }
+ }
+
+ return allErrors
+}
+
+// checkIfThereAreMultipleMatchingServices checks if there are multiple matching services in the namespace associated to the deployment/rollout
+func checkIfThereAreMultipleMatchingServices(svc *coreV1.Service, serviceController *admiral.ServiceController, obj interface{}, clusterName string) (bool, error) {
+ var (
+ selector *metav1.LabelSelector
+ appType string
+ ports map[string]uint32
+ )
+
+ matchedServices := make(map[string]bool)
+ cachedServices := serviceController.Cache.Get(svc.Namespace)
+ if cachedServices == nil {
+ return false, fmt.Errorf("service to be deleted does not exist in the cache")
+ }
+
+ switch v := obj.(type) {
+ case rolloutsV1Alpha1.Rollout:
+ selector = v.Spec.Selector
+ appType = common.Rollout
+ case appsV1.Deployment:
+ selector = v.Spec.Selector
+ appType = common.Deployment
+ default:
+ return false, fmt.Errorf("type assertion failed, %v is not of type *v1.Deployment or *argo.Rollout", obj)
+ }
+
+ for _, service := range cachedServices {
+ match := common.IsServiceMatch(service.Spec.Selector, selector)
+ if match {
+ if appType == common.Deployment {
+ deployment, ok := obj.(appsV1.Deployment)
+ if !ok {
+ return false, fmt.Errorf("type assertion failed, %v is not of type *v1.Deployment", obj)
+ }
+ ports = GetMeshPortsForDeployments(clusterName, service, &deployment)
+ } else {
+ rollout, ok := obj.(rolloutsV1Alpha1.Rollout)
+ if !ok {
+ return false, fmt.Errorf("type assertion failed, %v is not of type *argo.Rollout", obj)
+ }
+ ports = GetMeshPortsForRollout(clusterName, service, &rollout)
+ }
+
+ if len(ports) > 0 {
+ matchedServices[service.Name] = true
+ }
+ }
+ }
+
+ // If length of the matched services for a deployment/rollout is greater than 1
+ // or the delete event is received for a service that does not match the deployment/rollout
+ // then return true so that there is an admiral.Update sent rather than admiral.Delete
+ // later in the code
+ if len(matchedServices) > 1 || !matchedServices[svc.Name] {
+ return true, nil
+ }
+
+ return false, nil
+}
From b2e9955d3477dca453731e12674160a967b4a3b2 Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 15:20:31 -0400
Subject: [PATCH 159/235] copied admiral/pkg/clusters/service_handler_test.go
changes from master
Signed-off-by: Shriram Sharma
---
admiral/pkg/clusters/service_handler_test.go | 662 +++++++++++++++++++
1 file changed, 662 insertions(+)
create mode 100644 admiral/pkg/clusters/service_handler_test.go
diff --git a/admiral/pkg/clusters/service_handler_test.go b/admiral/pkg/clusters/service_handler_test.go
new file mode 100644
index 00000000..ae71d4e9
--- /dev/null
+++ b/admiral/pkg/clusters/service_handler_test.go
@@ -0,0 +1,662 @@
+package clusters
+
+import (
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/istio-ecosystem/admiral/admiral/pkg/client/loader"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/test"
+ "k8s.io/apimachinery/pkg/util/intstr"
+ "k8s.io/client-go/rest"
+
+ argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
+ argoFake "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned/fake"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ "github.com/stretchr/testify/assert"
+ appsV1 "k8s.io/api/apps/v1"
+ coreV1 "k8s.io/api/core/v1"
+ apiMachineryMetaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ k8sFake "k8s.io/client-go/kubernetes/fake"
+)
+
+func TestHandleEventForService(t *testing.T) {
+ ctx := context.Background()
+ params := common.AdmiralParams{
+ KubeconfigPath: "testdata/fake.config",
+ }
+
+ registry, _ := InitAdmiral(context.Background(), params)
+
+ deploymentController := &admiral.DeploymentController{K8sClient: k8sFake.NewSimpleClientset(&appsV1.Deployment{})}
+ remoteController1, _ := createMockRemoteController(func(i interface{}) {})
+ remoteController1.DeploymentController = deploymentController
+ remoteController1.RolloutController = nil
+
+ rolloutController := &admiral.RolloutController{RolloutClient: argoFake.NewSimpleClientset(&argo.Rollout{}).ArgoprojV1alpha1()}
+ remoteController2, _ := createMockRemoteController(func(i interface{}) {})
+ remoteController2.RolloutController = rolloutController
+ remoteController2.DeploymentController = nil
+
+ registry.remoteControllers = map[string]*RemoteController{"cluster-1": remoteController1, "cluster-2": remoteController2}
+
+ serviceWithSelector := &coreV1.Service{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "testservice",
+ Annotations: map[string]string{"admiral.io/env": "testenv"},
+ Namespace: "namespace",
+ },
+ }
+ serviceWithSelector.Spec.Selector = map[string]string{"app": "debug"}
+
+ serviceWithoutSelector := &coreV1.Service{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "testservice",
+ Annotations: map[string]string{"admiral.io/env": "testenv"},
+ Namespace: "namespace",
+ },
+ }
+ testcases := []struct {
+ name string
+ service *coreV1.Service
+ remoteRegistry *RemoteRegistry
+ clusterName string
+ ArgoRolloutEnabled bool
+ error error
+ }{
+ {
+ name: "if selector of the Service is nil",
+ service: serviceWithoutSelector,
+ remoteRegistry: registry,
+ clusterName: "cluster1",
+ ArgoRolloutEnabled: false,
+ error: fmt.Errorf("selector missing on service=%s in namespace=%s cluster=%s", "testservice", "namespace", "cluster1"),
+ },
+ {
+ name: "if remote controller for the cluster does not exist",
+ service: serviceWithSelector,
+ remoteRegistry: registry,
+ clusterName: "clusterX",
+ ArgoRolloutEnabled: false,
+ error: fmt.Errorf("could not find the remote controller for cluster=%s", "clusterX"),
+ },
+ {
+ name: "if deployment controller is not nil",
+ service: serviceWithSelector,
+ remoteRegistry: registry,
+ clusterName: "cluster-1",
+ ArgoRolloutEnabled: false,
+ error: nil,
+ },
+ {
+ name: "if rollout controller is not nil",
+ service: serviceWithSelector,
+ remoteRegistry: registry,
+ clusterName: "cluster-2",
+ ArgoRolloutEnabled: true,
+ error: nil,
+ },
+ }
+
+ for _, c := range testcases {
+ t.Run(c.name, func(t *testing.T) {
+ common.SetArgoRolloutsEnabled(c.ArgoRolloutEnabled)
+ ctx = context.WithValue(ctx, "eventType", admiral.Update)
+ err := handleEventForService(ctx, c.service, c.remoteRegistry, c.clusterName)
+ if err != nil || c.error != nil {
+ assert.Equal(t, err.Error(), c.error.Error())
+ } else {
+ assert.Equal(t, err, c.error)
+ }
+ })
+ }
+}
+
+func TestHandleServiceEventForDeployment(t *testing.T) {
+ var (
+ deploymentController *admiral.DeploymentController
+ ctx = context.TODO()
+ labels = map[string]string{"app": "app"}
+ clusterName = "cluster-name"
+ deploymentName1 = "deployment1"
+ deploymentName2 = "deployment2"
+ serviceInNamespace1 = "service1"
+ namespace1 = "namespace1"
+ namespace2 = "namespace2"
+ deployment1InNamespace1 = newFakeDeployment(deploymentName1, namespace1, labels)
+ deployment2InNamespace1 = newFakeDeployment(deploymentName2, namespace1, labels)
+ deployment1InNamespace2 = newFakeDeployment(deploymentName1, namespace2, labels)
+ istioIngressGatewayService = newFakeService(common.IstioIngressGatewayServiceName, common.NamespaceIstioSystem, labels)
+ applicationServiceInNamespace1 = newFakeService(serviceInNamespace1, namespace1, labels)
+
+ remoteControllers = map[string]*RemoteController{
+ clusterName: &RemoteController{
+ DeploymentController: &admiral.DeploymentController{
+ K8sClient: k8sFake.NewSimpleClientset(
+ deployment1InNamespace1,
+ deployment2InNamespace1,
+ deployment1InNamespace2),
+ Cache: admiral.NewDeploymentCache(),
+ },
+ },
+ }
+ remoteRegistry = newRemoteRegistry(ctx, remoteControllers)
+ stop = make(chan struct{})
+ config = rest.Config{Host: "localhost"}
+ resyncPeriod = time.Millisecond * 1
+ )
+ deploymentController = remoteControllers[clusterName].DeploymentController
+ deploymentController.Cache.UpdateDeploymentToClusterCache("asset1", deployment1InNamespace1)
+ deploymentController.Cache.UpdateDeploymentToClusterCache("asset2", deployment2InNamespace1)
+ deploymentController.Cache.UpdateDeploymentToClusterCache("asset3", deployment1InNamespace2)
+
+ serviceController, err := admiral.NewServiceController(stop, &test.MockServiceHandler{}, &config, resyncPeriod, loader.GetFakeClientLoader())
+ if err != nil {
+ t.Fatalf("%v", err)
+ }
+ remoteControllers[clusterName].ServiceController = serviceController
+
+ cases := []struct {
+ name string
+ svc *coreV1.Service
+ fakeHandleEventForDeployment *fakeHandleEventForDeployment
+ assertFunc func(fakeHandler *fakeHandleEventForDeployment) error
+ expectedErr error
+ }{
+ {
+ name: "Given, there is a change in a service, and there are two deployments in the same namespace, " +
+ "When, HandleServiceEventForDeployment is invoked, " +
+ "Then, handler should be called for both the deployments",
+ svc: applicationServiceInNamespace1,
+ fakeHandleEventForDeployment: newFakeHandleEventForDeploymentsByError(
+ map[string]map[string]error{
+ namespace1: map[string]error{
+ deploymentName1: nil,
+ deploymentName2: nil,
+ },
+ },
+ ),
+ assertFunc: func(fakeHandler *fakeHandleEventForDeployment) error {
+ if fakeHandler.CalledDeploymentForNamespace(deploymentName1, namespace1) &&
+ fakeHandler.CalledDeploymentForNamespace(deploymentName2, namespace1) {
+ return nil
+ }
+ return fmt.Errorf("expected to call both %s and %s",
+ deploymentName1, deploymentName2)
+ },
+ expectedErr: nil,
+ },
+ {
+ name: "Given, there is a change in a service, and there are two deployments in the same namespace, " +
+ "When, HandleServiceEventForDeployment is invoked, " +
+ "When, handler for deployment returns nil for both deployments, " +
+ "Then, it should return nil",
+ svc: applicationServiceInNamespace1,
+ fakeHandleEventForDeployment: newFakeHandleEventForDeploymentsByError(
+ map[string]map[string]error{
+ namespace1: map[string]error{
+ deploymentName1: nil,
+ deploymentName2: nil,
+ },
+ },
+ ),
+ assertFunc: func(fakeHandler *fakeHandleEventForDeployment) error {
+ return nil
+ },
+ expectedErr: nil,
+ },
+ {
+ name: "Given, there is a change in a service, and there are two deployments in the same namespace, " +
+ "When, HandleServiceEventForDeployment is invoked, " +
+ "When, handler for deployment returns an error for one of the deployments, " +
+ "Then, it should process both the deployments, but still return an error",
+ svc: applicationServiceInNamespace1,
+ fakeHandleEventForDeployment: newFakeHandleEventForDeploymentsByError(
+ map[string]map[string]error{
+ namespace1: map[string]error{
+ deploymentName1: nil,
+ deploymentName2: fmt.Errorf("error processing %s", deploymentName2),
+ },
+ },
+ ),
+ assertFunc: func(fakeHandler *fakeHandleEventForDeployment) error {
+ return nil
+ },
+ expectedErr: fmt.Errorf("error processing %s", deploymentName2),
+ },
+ {
+ name: "Given, there is a change in istio ingressgateway service, " +
+ "When, HandleServiceEventForDeployment is invoked, " +
+ "Then, it should call handler for deployment with all the deployments in the cluster",
+ svc: istioIngressGatewayService,
+ fakeHandleEventForDeployment: newFakeHandleEventForDeploymentsByError(
+ map[string]map[string]error{
+ namespace1: map[string]error{
+ deploymentName1: nil,
+ deploymentName2: nil,
+ },
+ namespace2: map[string]error{
+ deploymentName1: nil,
+ },
+ },
+ ),
+ assertFunc: func(fakeHandler *fakeHandleEventForDeployment) error {
+ if fakeHandler.CalledDeploymentForNamespace(deploymentName1, namespace1) &&
+ fakeHandler.CalledDeploymentForNamespace(deploymentName2, namespace1) &&
+ fakeHandler.CalledDeploymentForNamespace(deploymentName1, namespace2) {
+ return nil
+ }
+ return nil
+ },
+ },
+ {
+ name: "Given, there is a change in a service other than the istio ingressgateway service, " +
+ "When, HandleServiceEventForDeployment is invoked, " +
+ "Then, it should call handler for deployment with all the deployments in the namespace, " +
+ "And, it should not call handler for deployment in namespaces other than the namespace of the service",
+ svc: applicationServiceInNamespace1,
+ fakeHandleEventForDeployment: newFakeHandleEventForDeploymentsByError(
+ map[string]map[string]error{
+ namespace1: map[string]error{
+ deploymentName1: nil,
+ deploymentName2: nil,
+ },
+ },
+ ),
+ assertFunc: func(fakeHandler *fakeHandleEventForDeployment) error {
+ if fakeHandler.CalledDeploymentForNamespace(deploymentName1, namespace1) &&
+ fakeHandler.CalledDeploymentForNamespace(deploymentName2, namespace1) {
+ if fakeHandler.CalledDeploymentForNamespace(deploymentName1, namespace2) {
+ return fmt.Errorf(
+ "deployment handler called for deployment in %s "+
+ "namespace which is not the same as the service namespace, which is: %s",
+ namespace2, namespace1)
+ }
+ return nil
+ }
+ return fmt.Errorf("deployment handler not called for deployments %s and %s in namespace %s",
+ deploymentName1, deploymentName2, namespace1)
+ },
+ },
+ }
+ for _, c := range cases {
+ t.Run(c.name, func(t *testing.T) {
+ ctx = context.WithValue(ctx, "eventType", admiral.Update)
+ err := handleServiceEventForDeployment(
+ ctx,
+ c.svc,
+ remoteRegistry,
+ clusterName,
+ deploymentController,
+ serviceController,
+ c.fakeHandleEventForDeployment.handleEventForDeploymentFunc())
+ if err != nil && c.expectedErr == nil {
+ t.Errorf("expected error to be nil but got %v", err)
+ }
+ if err != nil && c.expectedErr != nil {
+ if !(err.Error() == c.expectedErr.Error()) {
+ t.Errorf("error mismatch, expected '%v' but got '%v'", c.expectedErr, err)
+ }
+ }
+ if err == nil && c.expectedErr != nil {
+ t.Errorf("expected error %v but got nil", c.expectedErr)
+ }
+ err = c.assertFunc(c.fakeHandleEventForDeployment)
+ if err != nil {
+ t.Errorf("expected assertion to return nil, but got: %v", err)
+ }
+ })
+ }
+}
+
+func TestHandleServiceEventForRollout(t *testing.T) {
+ var (
+ rolloutController *admiral.RolloutController
+ ctx = context.TODO()
+ labels = map[string]string{"app": "app"}
+ clusterName = "cluster-name"
+ rolloutName1 = "rollout1"
+ rolloutName2 = "rollout2"
+ serviceInNamespace1 = "service1"
+ namespace1 = "namespace1"
+ namespace2 = "namespace2"
+ rollout1InNamespace1 = newFakeRollout(rolloutName1, namespace1, labels)
+ rollout2InNamespace1 = newFakeRollout(rolloutName2, namespace1, labels)
+ rollout1InNamespace2 = newFakeRollout(rolloutName1, namespace2, labels)
+ istioIngressGatewayService = newFakeService(common.IstioIngressGatewayServiceName, common.NamespaceIstioSystem, labels)
+ applicationServiceInNamespace1 = newFakeService(serviceInNamespace1, namespace1, labels)
+ remoteControllers = map[string]*RemoteController{
+ clusterName: &RemoteController{
+ RolloutController: &admiral.RolloutController{
+ RolloutClient: argoFake.NewSimpleClientset(
+ rollout1InNamespace1,
+ rollout2InNamespace1,
+ rollout1InNamespace2).ArgoprojV1alpha1(),
+ Cache: admiral.NewRolloutCache(),
+ },
+ },
+ }
+ remoteRegistry = newRemoteRegistry(ctx, remoteControllers)
+ stop = make(chan struct{})
+ config = rest.Config{Host: "localhost"}
+ resyncPeriod = time.Millisecond * 1
+ )
+ rolloutController = remoteControllers[clusterName].RolloutController
+ rolloutController.Cache.UpdateRolloutToClusterCache("asset1", rollout1InNamespace1)
+ rolloutController.Cache.UpdateRolloutToClusterCache("asset2", rollout2InNamespace1)
+ rolloutController.Cache.UpdateRolloutToClusterCache("asset3", rollout1InNamespace2)
+
+ serviceController, err := admiral.NewServiceController(stop, &test.MockServiceHandler{}, &config, resyncPeriod, loader.GetFakeClientLoader())
+ if err != nil {
+ t.Fatalf("%v", err)
+ }
+ remoteControllers[clusterName].ServiceController = serviceController
+
+ cases := []struct {
+ name string
+ svc *coreV1.Service
+ fakeHandleEventForRollout *fakeHandleEventForRollout
+ assertFunc func(fakeHandler *fakeHandleEventForRollout) error
+ expectedErr error
+ }{
+ {
+ name: "Given, there is a change in a service, and there are two rollouts in the same namespace, " +
+ "When, HandleServiceEventForRollout is invoked, " +
+ "Then, handler should be called for both the rollout",
+ svc: applicationServiceInNamespace1,
+ fakeHandleEventForRollout: newFakeHandleEventForRolloutsByError(
+ map[string]map[string]error{
+ namespace1: map[string]error{
+ rolloutName1: nil,
+ rolloutName2: nil,
+ },
+ },
+ ),
+ assertFunc: func(fakeHandler *fakeHandleEventForRollout) error {
+ if fakeHandler.CalledRolloutForNamespace(rolloutName1, namespace1) &&
+ fakeHandler.CalledRolloutForNamespace(rolloutName2, namespace1) {
+ return nil
+ }
+ return fmt.Errorf("expected to call both %s and %s",
+ rolloutName1, rolloutName2)
+ },
+ expectedErr: nil,
+ },
+ {
+ name: "Given, there is a change in a service, and there are two rollouts in the same namespace, " +
+ "When, HandleServiceEventForRollout is invoked, " +
+ "When, handler for rollout returns nil for both deployments, " +
+ "Then, it should return nil",
+ svc: applicationServiceInNamespace1,
+ fakeHandleEventForRollout: newFakeHandleEventForRolloutsByError(
+ map[string]map[string]error{
+ namespace1: map[string]error{
+ rolloutName1: nil,
+ rolloutName2: nil,
+ },
+ },
+ ),
+ assertFunc: func(fakeHandler *fakeHandleEventForRollout) error {
+ return nil
+ },
+ expectedErr: nil,
+ },
+ {
+ name: "Given, there is a change in a service, and there are two rollouts in the same namespace, " +
+ "When, HandleServiceEventForRollout is invoked, " +
+ "When, handler for rollout returns an error for one of the rollouts, " +
+ "Then, it should process both the rollouts, but still return an error",
+ svc: applicationServiceInNamespace1,
+ fakeHandleEventForRollout: newFakeHandleEventForRolloutsByError(
+ map[string]map[string]error{
+ namespace1: map[string]error{
+ rolloutName1: nil,
+ rolloutName2: fmt.Errorf("error processing %s", rolloutName2),
+ },
+ },
+ ),
+ assertFunc: func(fakeHandler *fakeHandleEventForRollout) error {
+ return nil
+ },
+ expectedErr: fmt.Errorf("error processing %s", rolloutName2),
+ },
+ {
+ name: "Given, there is a change in istio ingressgateway service, " +
+ "When, HandleServiceEventForRollout is invoked, " +
+ "Then, it should call handler for rollout with all the rollouts in the cluster",
+ svc: istioIngressGatewayService,
+ fakeHandleEventForRollout: newFakeHandleEventForRolloutsByError(
+ map[string]map[string]error{
+ namespace1: map[string]error{
+ rolloutName1: nil,
+ rolloutName2: nil,
+ },
+ namespace2: map[string]error{
+ rolloutName1: nil,
+ },
+ },
+ ),
+ assertFunc: func(fakeHandler *fakeHandleEventForRollout) error {
+ if fakeHandler.CalledRolloutForNamespace(rolloutName1, namespace1) &&
+ fakeHandler.CalledRolloutForNamespace(rolloutName2, namespace1) &&
+ fakeHandler.CalledRolloutForNamespace(rolloutName1, namespace2) {
+ return nil
+ }
+ return nil
+ },
+ },
+ {
+ name: "Given, there is a change in a service other than the istio ingressgateway service, " +
+ "When, HandleServiceEventForRollout is invoked, " +
+ "Then, it should call handler for rollout with all the rollouts in the namespace, " +
+ "And, it should not call handler for rollout in namespaces other than the namespace of the service",
+ svc: applicationServiceInNamespace1,
+ fakeHandleEventForRollout: newFakeHandleEventForRolloutsByError(
+ map[string]map[string]error{
+ namespace1: map[string]error{
+ rolloutName1: nil,
+ rolloutName2: nil,
+ },
+ },
+ ),
+ assertFunc: func(fakeHandler *fakeHandleEventForRollout) error {
+ if fakeHandler.CalledRolloutForNamespace(rolloutName1, namespace1) &&
+ fakeHandler.CalledRolloutForNamespace(rolloutName2, namespace1) {
+ if fakeHandler.CalledRolloutForNamespace(rolloutName1, namespace2) {
+ return fmt.Errorf(
+ "rollout handler called for deployment in %s "+
+ "namespace which is not the same as the service namespace, which is: %s",
+ namespace2, namespace1)
+ }
+ return nil
+ }
+ return fmt.Errorf("rollout handler not called for rollouts %s and %s in namespace %s",
+ rolloutName1, rolloutName2, namespace1)
+ },
+ },
+ }
+ for _, c := range cases {
+ t.Run(c.name, func(t *testing.T) {
+ ctx = context.WithValue(ctx, "eventType", admiral.Update)
+ err := handleServiceEventForRollout(
+ ctx,
+ c.svc,
+ remoteRegistry,
+ clusterName,
+ rolloutController,
+ serviceController,
+ c.fakeHandleEventForRollout.handleEventForRolloutFunc())
+ if err != nil && c.expectedErr == nil {
+ t.Errorf("expected error to be nil but got %v", err)
+ }
+ if err != nil && c.expectedErr != nil {
+ if !(err.Error() == c.expectedErr.Error()) {
+ t.Errorf("error mismatch, expected '%v' but got '%v'", c.expectedErr, err)
+ }
+ }
+ if err == nil && c.expectedErr != nil {
+ t.Errorf("expected error %v but got nil", c.expectedErr)
+ }
+ err = c.assertFunc(c.fakeHandleEventForRollout)
+ if err != nil {
+ t.Errorf("expected assertion to return nil, but got: %v", err)
+ }
+ })
+ }
+}
+
+func newFakeService(name, namespace string, selectorLabels map[string]string) *coreV1.Service {
+ return &coreV1.Service{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ },
+ Spec: coreV1.ServiceSpec{
+ Selector: selectorLabels,
+ },
+ }
+}
+
+func TestCheckIfThereAreMultipleMatchingServices(t *testing.T) {
+ var (
+ labels = map[string]string{"app": "app"}
+ serviceInNamespace1 = "service1"
+ serviceInNamespace2 = "service2"
+ serviceInNamespace3 = "service3"
+ namespace = "namespace"
+ applicationService1 = newFakeService(serviceInNamespace1, namespace, labels)
+ applicationService2 = newFakeService(serviceInNamespace2, namespace, labels)
+ applicationService3 = newFakeService(serviceInNamespace3, namespace, labels)
+ stop = make(chan struct{})
+ config = rest.Config{Host: "localhost"}
+ resyncPeriod = time.Millisecond * 1
+ clusterName = "test-cluster"
+ )
+
+ applicationService1.Spec.Ports = []coreV1.ServicePort{{Name: "http", Protocol: "http", Port: int32(8090), TargetPort: intstr.FromInt(8090)}}
+ applicationService2.Spec.Ports = []coreV1.ServicePort{{Name: "http", Protocol: "http", Port: int32(8090), TargetPort: intstr.FromInt(8090)}}
+
+ serviceControllerWithNoService, err := admiral.NewServiceController(stop, &test.MockServiceHandler{}, &config, resyncPeriod, loader.GetFakeClientLoader())
+ if err != nil {
+ t.Fatalf("%v", err)
+ }
+
+ serviceControllerWithOneMatchingService, err := admiral.NewServiceController(stop, &test.MockServiceHandler{}, &config, resyncPeriod, loader.GetFakeClientLoader())
+ if err != nil {
+ t.Fatalf("%v", err)
+ }
+ serviceControllerWithOneMatchingService.Cache.Put(applicationService1)
+ serviceControllerWithOneMatchingService.Cache.Put(applicationService3)
+
+ serviceControllerWithMultipleMatchingService, err := admiral.NewServiceController(stop, &test.MockServiceHandler{}, &config, resyncPeriod, loader.GetFakeClientLoader())
+ if err != nil {
+ t.Fatalf("%v", err)
+ }
+ serviceControllerWithMultipleMatchingService.Cache.Put(applicationService1)
+ serviceControllerWithMultipleMatchingService.Cache.Put(applicationService2)
+
+ deployment := appsV1.Deployment{}
+ deployment.Name = "depWithSelector"
+ deployment.Namespace = "namespace"
+ deployment.Spec.Selector = &apiMachineryMetaV1.LabelSelector{MatchLabels: map[string]string{"app": "app"}}
+ deployment.Spec.Template.Annotations = map[string]string{common.SidecarEnabledPorts: "8090"}
+
+ rollout := argo.Rollout{}
+ rollout.Name = "rolloutWithSelector"
+ rollout.Namespace = "namespace"
+ rollout.Spec.Selector = &apiMachineryMetaV1.LabelSelector{MatchLabels: map[string]string{"app": "app"}}
+ rollout.Spec.Template.Annotations = map[string]string{common.SidecarEnabledPorts: "8090"}
+
+ testCases := []struct {
+ name string
+ eventForService *coreV1.Service
+ serviceController *admiral.ServiceController
+ obj interface{}
+ expectedRes bool
+ expectedErr error
+ }{
+ {
+ name: "Given we receive an event for service," +
+ "And there are multiple SVC associated to the deployment," +
+ "Then we expect to return true",
+ serviceController: serviceControllerWithMultipleMatchingService,
+ eventForService: applicationService1,
+ obj: deployment,
+ expectedRes: true,
+ expectedErr: nil,
+ },
+ {
+ name: "Given we receive an event for service," +
+ "And there is only one SVC associated to the deployment," +
+ "Then we expect to return false",
+ serviceController: serviceControllerWithOneMatchingService,
+ eventForService: applicationService1,
+ obj: deployment,
+ expectedRes: false,
+ expectedErr: nil,
+ },
+ {
+ name: "Given we receive an event for service," +
+ "And there are multiple SVC associated to the rollout," +
+ "Then we expect to return true",
+ serviceController: serviceControllerWithMultipleMatchingService,
+ eventForService: applicationService1,
+ obj: rollout,
+ expectedRes: true,
+ expectedErr: nil,
+ },
+ {
+ name: "Given we receive an event for service," +
+ "And there is only one SVC associated to the rollout," +
+ "Then we expect to return false",
+ serviceController: serviceControllerWithOneMatchingService,
+ eventForService: applicationService1,
+ obj: rollout,
+ expectedRes: false,
+ expectedErr: nil,
+ },
+ {
+ name: "Given we receive an event for service," +
+ "And there is are multiple SVC associated to the rollout," +
+ "And the one we receive the event for does not have mesh ports," +
+ "Then we expect to return true",
+ serviceController: serviceControllerWithOneMatchingService,
+ eventForService: applicationService3,
+ obj: rollout,
+ expectedRes: true,
+ expectedErr: nil,
+ },
+ {
+ name: "Given we receive an event for service," +
+ "And there is only no SVC in the cache for that namespace," +
+ "Then we expect to return false," +
+ "And the error - service to be deleted does not exist in the cache",
+ serviceController: serviceControllerWithNoService,
+ eventForService: applicationService1,
+ obj: rollout,
+ expectedRes: false,
+ expectedErr: fmt.Errorf("service to be deleted does not exist in the cache"),
+ },
+ {
+ name: "Given we receive an event for service," +
+ "And the type of the object is not rollout or deployment," +
+ "Then we expect to return false," +
+ "And the error - type assertion failed, obj is not of type *v1.Deployment or *argo.Rollout",
+ serviceController: serviceControllerWithOneMatchingService,
+ eventForService: applicationService1,
+ obj: "notDeploymentOrRollout",
+ expectedRes: false,
+ expectedErr: fmt.Errorf("type assertion failed, notDeploymentOrRollout is not of type *v1.Deployment or *argo.Rollout"),
+ },
+ }
+
+ for _, c := range testCases {
+ t.Run(c.name, func(t *testing.T) {
+ eventType, err := checkIfThereAreMultipleMatchingServices(c.eventForService, c.serviceController, c.obj, clusterName)
+ assert.Equal(t, c.expectedRes, eventType)
+ assert.Equal(t, c.expectedErr, err)
+ })
+ }
+}
From d992a846d66a80460c631b4a9c92e16f222e1982 Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 15:21:59 -0400
Subject: [PATCH 160/235] copied admiral/pkg/clusters/serviceentry_handler.go
changes from master
Signed-off-by: Shriram Sharma
---
admiral/pkg/clusters/serviceentry_handler.go | 301 +++++++++++++++++++
1 file changed, 301 insertions(+)
create mode 100644 admiral/pkg/clusters/serviceentry_handler.go
diff --git a/admiral/pkg/clusters/serviceentry_handler.go b/admiral/pkg/clusters/serviceentry_handler.go
new file mode 100644
index 00000000..e7eef145
--- /dev/null
+++ b/admiral/pkg/clusters/serviceentry_handler.go
@@ -0,0 +1,301 @@
+package clusters
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "time"
+
+ commonUtil "github.com/istio-ecosystem/admiral/admiral/pkg/util"
+
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ log "github.com/sirupsen/logrus"
+ networkingv1alpha3 "istio.io/api/networking/v1alpha3"
+ "istio.io/client-go/pkg/apis/networking/v1alpha3"
+ k8sErrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// ServiceEntryHandler responsible for handling Add/Update/Delete events for
+// ServiceEntry resources
+type ServiceEntryHandler struct {
+ RemoteRegistry *RemoteRegistry
+ ClusterID string
+}
+
+func (se *ServiceEntryHandler) Added(obj *v1alpha3.ServiceEntry) error {
+ if commonUtil.IsAdmiralReadOnly() {
+ log.Infof(LogFormat, "Add", "ServiceEntry", obj.Name, se.ClusterID, "Admiral is in read-only mode. Skipping resource from namespace="+obj.Namespace)
+ return nil
+ }
+ if IgnoreIstioResource(obj.Spec.ExportTo, obj.Annotations, obj.Namespace) {
+ log.Infof(LogFormat, "Add", "ServiceEntry", obj.Name, se.ClusterID, "Skipping resource from namespace="+obj.Namespace)
+ if len(obj.Annotations) > 0 && obj.Annotations[common.AdmiralIgnoreAnnotation] == "true" {
+ log.Infof(LogFormat, "admiralIoIgnoreAnnotationCheck", "ServiceEntry", obj.Name, se.ClusterID, "Value=true namespace="+obj.Namespace)
+ }
+ }
+ return nil
+}
+
+func (se *ServiceEntryHandler) Updated(obj *v1alpha3.ServiceEntry) error {
+ if commonUtil.IsAdmiralReadOnly() {
+ log.Infof(LogFormat, "Update", "ServiceEntry", obj.Name, se.ClusterID, "Admiral is in read-only mode. Skipping resource from namespace="+obj.Namespace)
+ return nil
+ }
+ if IgnoreIstioResource(obj.Spec.ExportTo, obj.Annotations, obj.Namespace) {
+ log.Infof(LogFormat, "Update", "ServiceEntry", obj.Name, se.ClusterID, "Skipping resource from namespace="+obj.Namespace)
+ if len(obj.Annotations) > 0 && obj.Annotations[common.AdmiralIgnoreAnnotation] == "true" {
+ log.Infof(LogFormat, "admiralIoIgnoreAnnotationCheck", "ServiceEntry", obj.Name, se.ClusterID, "Value=true namespace="+obj.Namespace)
+ }
+ }
+ return nil
+}
+
+func (se *ServiceEntryHandler) Deleted(obj *v1alpha3.ServiceEntry) error {
+ if commonUtil.IsAdmiralReadOnly() {
+ log.Infof(LogFormat, "Delete", "ServiceEntry", obj.Name, se.ClusterID, "Admiral is in read-only mode. Skipping resource from namespace="+obj.Namespace)
+ return nil
+ }
+ if IgnoreIstioResource(obj.Spec.ExportTo, obj.Annotations, obj.Namespace) {
+ log.Infof(LogFormat, "Delete", "ServiceEntry", obj.Name, se.ClusterID, "Skipping resource from namespace="+obj.Namespace)
+ if len(obj.Annotations) > 0 && obj.Annotations[common.AdmiralIgnoreAnnotation] == "true" {
+ log.Debugf(LogFormat, "admiralIoIgnoreAnnotationCheck", "ServiceEntry", obj.Name, se.ClusterID, "Value=true namespace="+obj.Namespace)
+ }
+ }
+ return nil
+}
+
+/*
+Add/Update Service Entry objects after checking if the current pod is in ReadOnly mode.
+Service Entry object is not added/updated if the current pod is in ReadOnly mode.
+*/
+func addUpdateServiceEntry(ctxLogger *log.Entry, ctx context.Context,
+ obj *v1alpha3.ServiceEntry, exist *v1alpha3.ServiceEntry, namespace string, rc *RemoteController) error {
+ var (
+ err error
+ op, diff string
+ skipUpdate bool
+ seAlreadyExists bool
+ )
+ ctxLogger.Infof(common.CtxLogFormat, "AddUpdateServiceEntry", "", "", rc.ClusterID, "Creating/Updating ServiceEntry="+obj.Name)
+ if obj.Annotations == nil {
+ obj.Annotations = map[string]string{}
+ }
+ obj.Annotations["app.kubernetes.io/created-by"] = "admiral"
+
+ areEndpointsValid := validateAndProcessServiceEntryEndpoints(obj)
+
+ seIsNew := exist == nil || exist.Spec.Hosts == nil
+ if seIsNew {
+ op = "Add"
+ //se will be created if endpoints are valid, in case they are not valid se will be created with just valid endpoints
+ if len(obj.Spec.Endpoints) > 0 {
+ obj.Namespace = namespace
+ obj.ResourceVersion = ""
+ _, err = rc.ServiceEntryController.IstioClient.NetworkingV1alpha3().ServiceEntries(namespace).Create(ctx, obj, metav1.CreateOptions{})
+ if k8sErrors.IsAlreadyExists(err) {
+ // op=%v name=%v namespace=%s cluster=%s message=%v
+ ctxLogger.Infof(common.CtxLogFormat, "addUpdateServiceEntry", obj.Name, obj.Namespace, rc.ClusterID, "object already exists. Will update instead")
+ seAlreadyExists = true
+ } else {
+ return err
+ }
+ ctxLogger.Infof(common.CtxLogFormat, "Add", " SE=%s", op, "ServiceEntry", obj.Name, rc.ClusterID, "New SE", obj.Spec.String())
+ } else {
+ log.Errorf(LogFormat+" SE=%s", op, "ServiceEntry", obj.Name, rc.ClusterID, "Creation of SE skipped as endpoints are not valid", obj.Spec.String())
+ }
+ }
+ if !seIsNew || seAlreadyExists {
+ if seAlreadyExists {
+ exist, err = rc.ServiceEntryController.IstioClient.
+ NetworkingV1alpha3().
+ ServiceEntries(namespace).
+ Get(ctx, obj.Name, metav1.GetOptions{})
+ if err != nil {
+ exist = obj
+ // when there is an error, assign exist to obj,
+ // which will fail in the update operation, but will be retried
+ // in the retry logic
+ ctxLogger.Warnf(common.CtxLogFormat, "Update", exist.Name, exist.Namespace, rc.ClusterID, "got error on fetching se, will retry updating")
+ }
+ }
+ op = "Update"
+ if areEndpointsValid { //update will happen only when all the endpoints are valid // TODO: why not have this check when
+ exist.Labels = obj.Labels
+ exist.Annotations = obj.Annotations
+ skipUpdate, diff = skipDestructiveUpdate(rc, obj, exist)
+ if diff != "" {
+ ctxLogger.Infof(LogFormat+" diff=%s", op, "ServiceEntry", obj.Name, rc.ClusterID, "Diff in update", diff)
+ }
+ if skipUpdate {
+ ctxLogger.Infof(LogFormat, op, "ServiceEntry", obj.Name, rc.ClusterID, "Update skipped as it was destructive during Admiral's bootup phase")
+ return nil
+ } else {
+ //nolint
+ exist.Spec = obj.Spec
+ _, err = rc.ServiceEntryController.IstioClient.NetworkingV1alpha3().ServiceEntries(namespace).Update(ctx, exist, metav1.UpdateOptions{})
+ if err != nil {
+ err = retryUpdatingSE(ctxLogger, ctx, obj, exist, namespace, rc, err, op)
+ }
+ }
+ } else {
+ ctxLogger.Infof(LogFormat, op, "ServiceEntry", obj.Name, rc.ClusterID, "SE could not be updated as all the recived endpoints are not valid.")
+ }
+
+ }
+
+ if err != nil {
+ ctxLogger.Errorf(LogErrFormat, op, "ServiceEntry", obj.Name, rc.ClusterID, err)
+ return err
+ } else {
+ ctxLogger.Infof(LogFormat, op, "ServiceEntry", obj.Name, rc.ClusterID, "Success")
+ }
+ return nil
+}
+
+func retryUpdatingSE(ctxLogger *log.Entry, ctx context.Context, obj *v1alpha3.ServiceEntry, exist *v1alpha3.ServiceEntry, namespace string, rc *RemoteController, err error, op string) error {
+ numRetries := 5
+ if err != nil && k8sErrors.IsConflict(err) {
+ for i := 0; i < numRetries; i++ {
+ ctxLogger.Errorf(common.CtxLogFormat, op, obj.Name, obj.Namespace, rc.ClusterID, err.Error()+". will retry the update operation before adding back to the controller queue.")
+
+ updatedServiceEntry, err := rc.ServiceEntryController.IstioClient.NetworkingV1alpha3().ServiceEntries(common.GetSyncNamespace()).Get(ctx, exist.Name, metav1.GetOptions{})
+ // if old service entry not find, just create a new service entry instead
+ if err != nil {
+ ctxLogger.Infof(common.CtxLogFormat, op, exist.Name, exist.Namespace, rc.ClusterID, err.Error()+fmt.Sprintf(". Error getting old serviceEntry"))
+ continue
+ }
+
+ ctxLogger.Infof(common.CtxLogFormat, op, obj.Name, obj.Namespace, rc.ClusterID, fmt.Sprintf("existingResourceVersion=%s resourceVersionUsedForUpdate=%s", updatedServiceEntry.ResourceVersion, obj.ResourceVersion))
+ updatedServiceEntry.Spec = obj.Spec
+ updatedServiceEntry.Annotations = obj.Annotations
+ updatedServiceEntry.Labels = obj.Labels
+ _, err = rc.ServiceEntryController.IstioClient.NetworkingV1alpha3().ServiceEntries(namespace).Update(ctx, updatedServiceEntry, metav1.UpdateOptions{})
+ if err == nil {
+ return nil
+ }
+ }
+ }
+ return err
+}
+
+func skipDestructiveUpdate(rc *RemoteController, new *v1alpha3.ServiceEntry, old *v1alpha3.ServiceEntry) (bool, string) {
+ var (
+ skipDestructive = false
+ destructive, diff = getServiceEntryDiff(new, old)
+ )
+
+ //do not update SEs during bootup phase if they are destructive
+ if time.Since(rc.StartTime) < (2*common.GetAdmiralParams().CacheReconcileDuration) && destructive {
+ skipDestructive = true
+ }
+ return skipDestructive, diff
+}
+
+// Diffs only endpoints
+func getServiceEntryDiff(new *v1alpha3.ServiceEntry, old *v1alpha3.ServiceEntry) (destructive bool, diff string) {
+ //we diff only if both objects exist
+ if old == nil || new == nil {
+ return false, ""
+ }
+ destructive = false
+ format := "%s %s before: %v, after: %v;"
+ var buffer bytes.Buffer
+ //nolint
+ seNew := new.Spec
+ //nolint
+ seOld := old.Spec
+
+ oldEndpointMap := make(map[string]*networkingv1alpha3.WorkloadEntry)
+ found := make(map[string]string)
+ for _, oEndpoint := range seOld.Endpoints {
+ oldEndpointMap[oEndpoint.Address] = oEndpoint
+ }
+ for _, nEndpoint := range seNew.Endpoints {
+ if val, ok := oldEndpointMap[nEndpoint.Address]; ok {
+ found[nEndpoint.Address] = "1"
+ if val.String() != nEndpoint.String() {
+ destructive = true
+ buffer.WriteString(fmt.Sprintf(format, "endpoint", "Update", val.String(), nEndpoint.String()))
+ }
+ } else {
+ buffer.WriteString(fmt.Sprintf(format, "endpoint", "Add", "", nEndpoint.String()))
+ }
+ }
+
+ for key := range oldEndpointMap {
+ if _, ok := found[key]; !ok {
+ destructive = true
+ buffer.WriteString(fmt.Sprintf(format, "endpoint", "Delete", oldEndpointMap[key].String(), ""))
+ }
+ }
+
+ if common.EnableExportTo(seNew.Hosts[0]) {
+ oldNamespacesMap := make(map[string]struct{})
+ for _, oldNamespace := range seOld.ExportTo {
+ oldNamespacesMap[oldNamespace] = struct{}{}
+ }
+ //If new NS was not in old NS map then it was added non-destructively
+ //If new NS was in old NS map then there is no problem, and we remove it from old NS map
+ for _, newNamespace := range seNew.ExportTo {
+ if _, ok := oldNamespacesMap[newNamespace]; !ok {
+ buffer.WriteString(fmt.Sprintf(format, "exportTo namespace", "Add", "", newNamespace))
+ } else {
+ delete(oldNamespacesMap, newNamespace)
+ }
+ }
+ //Old NS map only contains namespaces that weren't present in new NS slice because we removed all the ones that were present in both
+ //If old NS isn't in the new NS map, then it was deleted destructively
+ for key := range oldNamespacesMap {
+ destructive = true
+ buffer.WriteString(fmt.Sprintf(format, "exportTo namespace", "Delete", key, ""))
+ }
+ }
+
+ diff = buffer.String()
+ return destructive, diff
+}
+
+func deleteServiceEntry(ctx context.Context, serviceEntry *v1alpha3.ServiceEntry, namespace string, rc *RemoteController) error {
+ if serviceEntry != nil {
+ err := rc.ServiceEntryController.IstioClient.NetworkingV1alpha3().ServiceEntries(namespace).Delete(ctx, serviceEntry.Name, metav1.DeleteOptions{})
+ if err != nil {
+ if k8sErrors.IsNotFound(err) {
+ log.Infof(LogFormat, "Delete", "ServiceEntry", serviceEntry.Name, rc.ClusterID, "Either ServiceEntry was already deleted, or it never existed")
+ } else {
+ log.Errorf(LogErrFormat, "Delete", "ServiceEntry", serviceEntry.Name, rc.ClusterID, err)
+ return err
+ }
+ } else {
+ log.Infof(LogFormat, "Delete", "ServiceEntry", serviceEntry.Name, rc.ClusterID, "Success")
+ }
+ }
+ return nil
+}
+
+// nolint
+func createSidecarSkeleton(sidecar networkingv1alpha3.Sidecar, name string, namespace string) *v1alpha3.Sidecar {
+ return &v1alpha3.Sidecar{Spec: sidecar, ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}}
+}
+
+func validateAndProcessServiceEntryEndpoints(obj *v1alpha3.ServiceEntry) bool {
+ var areEndpointsValid = true
+
+ temp := make([]*networkingv1alpha3.WorkloadEntry, 0)
+ for _, endpoint := range obj.Spec.Endpoints {
+ if endpoint.Address == "dummy.admiral.global" {
+ areEndpointsValid = false
+ } else {
+ temp = append(temp, endpoint)
+ }
+ }
+ obj.Spec.Endpoints = temp
+ log.Infof("type=ServiceEntry, name=%s, endpointsValid=%v, numberOfValidEndpoints=%d", obj.Name, areEndpointsValid, len(obj.Spec.Endpoints))
+
+ return areEndpointsValid
+}
+
+// nolint
+func createServiceEntrySkeleton(se networkingv1alpha3.ServiceEntry, name string, namespace string) *v1alpha3.ServiceEntry {
+ return &v1alpha3.ServiceEntry{Spec: se, ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}}
+}
From 1c1ce3c11b74b76c7d0da9350e508d1a703826ca Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 15:22:29 -0400
Subject: [PATCH 161/235] copied
admiral/pkg/clusters/serviceentry_handler_test.go changes from master
Signed-off-by: Shriram Sharma
---
.../pkg/clusters/serviceentry_handler_test.go | 654 ++++++++++++++++++
1 file changed, 654 insertions(+)
create mode 100644 admiral/pkg/clusters/serviceentry_handler_test.go
diff --git a/admiral/pkg/clusters/serviceentry_handler_test.go b/admiral/pkg/clusters/serviceentry_handler_test.go
new file mode 100644
index 00000000..5b7457fc
--- /dev/null
+++ b/admiral/pkg/clusters/serviceentry_handler_test.go
@@ -0,0 +1,654 @@
+package clusters
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+ "testing"
+ "time"
+
+ k8sErrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ commonUtil "github.com/istio-ecosystem/admiral/admiral/pkg/util"
+ log "github.com/sirupsen/logrus"
+
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/istio"
+ "github.com/stretchr/testify/assert"
+ "istio.io/api/networking/v1alpha3"
+ v1alpha32 "istio.io/client-go/pkg/apis/networking/v1alpha3"
+ istioFake "istio.io/client-go/pkg/clientset/versioned/fake"
+ metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+func TestSkipDestructiveUpdate(t *testing.T) {
+ admiralParams := common.AdmiralParams{
+ CacheReconcileDuration: 5 * time.Minute,
+ }
+ common.ResetSync()
+ common.InitializeConfig(admiralParams)
+ twoEndpointSe := v1alpha3.ServiceEntry{
+ Hosts: []string{"e2e.my-first-service.mesh"},
+ Addresses: []string{"240.10.1.1"},
+ Ports: []*v1alpha3.ServicePort{{Number: uint32(common.DefaultServiceEntryPort),
+ Name: "http", Protocol: "http"}},
+ Location: v1alpha3.ServiceEntry_MESH_INTERNAL,
+ Resolution: v1alpha3.ServiceEntry_DNS,
+ SubjectAltNames: []string{"spiffe://prefix/my-first-service"},
+ Endpoints: []*v1alpha3.WorkloadEntry{
+ {Address: "dummy.admiral.global-west", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2"},
+ {Address: "dummy.admiral.global-east", Ports: map[string]uint32{"http": 0}, Locality: "us-east-2"},
+ },
+ }
+ twoEndpointSeUpdated := v1alpha3.ServiceEntry{
+ Hosts: []string{"e2e.my-first-service.mesh"},
+ Addresses: []string{"240.10.1.1"},
+ Ports: []*v1alpha3.ServicePort{{Number: uint32(common.DefaultServiceEntryPort),
+ Name: "http", Protocol: "http"}},
+ Location: v1alpha3.ServiceEntry_MESH_INTERNAL,
+ Resolution: v1alpha3.ServiceEntry_DNS,
+ SubjectAltNames: []string{"spiffe://prefix/my-first-service"},
+ Endpoints: []*v1alpha3.WorkloadEntry{
+ {Address: "dummy.admiral.global-west", Ports: map[string]uint32{"http": 90}, Locality: "us-west-2"},
+ {Address: "dummy.admiral.global-east", Ports: map[string]uint32{"http": 0}, Locality: "us-east-2"},
+ },
+ }
+ oneEndpointSe := v1alpha3.ServiceEntry{
+ Hosts: []string{"e2e.my-first-service.mesh"},
+ Addresses: []string{"240.10.1.1"},
+ Ports: []*v1alpha3.ServicePort{{Number: uint32(common.DefaultServiceEntryPort),
+ Name: "http", Protocol: "http"}},
+ Location: v1alpha3.ServiceEntry_MESH_INTERNAL,
+ Resolution: v1alpha3.ServiceEntry_DNS,
+ SubjectAltNames: []string{"spiffe://prefix/my-first-service"},
+ Endpoints: []*v1alpha3.WorkloadEntry{
+ {Address: "dummy.admiral.global-west", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2"},
+ },
+ }
+ newSeTwoEndpoints := &v1alpha32.ServiceEntry{
+ ObjectMeta: metaV1.ObjectMeta{Name: "se1", Namespace: "random"},
+ //nolint
+ Spec: twoEndpointSe,
+ }
+ newSeTwoEndpointsUpdated := &v1alpha32.ServiceEntry{
+ ObjectMeta: metaV1.ObjectMeta{Name: "se1", Namespace: "random"},
+ //nolint
+ Spec: twoEndpointSeUpdated,
+ }
+ newSeOneEndpoint := &v1alpha32.ServiceEntry{
+ ObjectMeta: metaV1.ObjectMeta{Name: "se1", Namespace: "random"},
+ //nolint
+ Spec: oneEndpointSe,
+ }
+ oldSeTwoEndpoints := &v1alpha32.ServiceEntry{
+ ObjectMeta: metaV1.ObjectMeta{Name: "se1", Namespace: "random"},
+ //nolint
+ Spec: twoEndpointSe,
+ }
+ oldSeOneEndpoint := &v1alpha32.ServiceEntry{
+ ObjectMeta: metaV1.ObjectMeta{Name: "se1", Namespace: "random"},
+ //nolint
+ Spec: oneEndpointSe,
+ }
+ rcWarmupPhase := &RemoteController{
+ StartTime: time.Now(),
+ }
+ rcNotinWarmupPhase := &RemoteController{
+ StartTime: time.Now().Add(time.Duration(-21) * time.Minute),
+ }
+ //Struct of test case info. Name is required.
+ testCases := []struct {
+ name string
+ rc *RemoteController
+ newSe *v1alpha32.ServiceEntry
+ oldSe *v1alpha32.ServiceEntry
+ skipDestructive bool
+ diff string
+ }{
+ {
+ name: "Should return false when in warm up phase but not destructive",
+ rc: rcWarmupPhase,
+ newSe: newSeOneEndpoint,
+ oldSe: oldSeOneEndpoint,
+ skipDestructive: false,
+ diff: "",
+ },
+ {
+ name: "Should return true when in warm up phase but is destructive",
+ rc: rcWarmupPhase,
+ newSe: newSeOneEndpoint,
+ oldSe: oldSeTwoEndpoints,
+ skipDestructive: true,
+ diff: "Delete",
+ },
+ {
+ name: "Should return false when not in warm up phase but is destructive",
+ rc: rcNotinWarmupPhase,
+ newSe: newSeOneEndpoint,
+ oldSe: oldSeTwoEndpoints,
+ skipDestructive: false,
+ diff: "Delete",
+ },
+ {
+ name: "Should return false when in warm up phase but is constructive",
+ rc: rcWarmupPhase,
+ newSe: newSeTwoEndpoints,
+ oldSe: oldSeOneEndpoint,
+ skipDestructive: false,
+ diff: "Add",
+ },
+ {
+ name: "Should return false when not in warm up phase but endpoints updated",
+ rc: rcNotinWarmupPhase,
+ newSe: newSeTwoEndpointsUpdated,
+ oldSe: oldSeTwoEndpoints,
+ skipDestructive: false,
+ diff: "Update",
+ },
+ {
+ name: "Should return true when in warm up phase but endpoints are updated (destructive)",
+ rc: rcWarmupPhase,
+ newSe: newSeTwoEndpointsUpdated,
+ oldSe: oldSeTwoEndpoints,
+ skipDestructive: true,
+ diff: "Update",
+ },
+ }
+
+ //Run the test for every provided case
+ for _, c := range testCases {
+ t.Run(c.name, func(t *testing.T) {
+ skipDestructive, diff := skipDestructiveUpdate(c.rc, c.newSe, c.oldSe)
+ if skipDestructive == c.skipDestructive {
+ //perfect
+ } else {
+ t.Errorf("Result Failed. Got %v, expected %v", skipDestructive, c.skipDestructive)
+ }
+ if c.diff == "" || (c.diff != "" && strings.Contains(diff, c.diff)) {
+ //perfect
+ } else {
+ t.Errorf("Diff Failed. Got %v, expected %v", diff, c.diff)
+ }
+ })
+ }
+}
+
+func TestAddUpdateServiceEntry(t *testing.T) {
+ var (
+ ctx = context.Background()
+ ctxLogger = log.WithFields(log.Fields{
+ "type": "modifySE",
+ })
+ fakeIstioClient = istioFake.NewSimpleClientset()
+ seCtrl = &istio.ServiceEntryController{
+ IstioClient: fakeIstioClient,
+ }
+ )
+
+ twoEndpointSe := v1alpha3.ServiceEntry{
+ Hosts: []string{"e2e.my-first-service.mesh"},
+ Addresses: []string{"240.10.1.1"},
+ Ports: []*v1alpha3.ServicePort{{Number: uint32(common.DefaultServiceEntryPort),
+ Name: "http", Protocol: "http"}},
+ Location: v1alpha3.ServiceEntry_MESH_INTERNAL,
+ Resolution: v1alpha3.ServiceEntry_DNS,
+ SubjectAltNames: []string{"spiffe://prefix/my-first-service"},
+ Endpoints: []*v1alpha3.WorkloadEntry{
+ {Address: "dummy.admiral.global-west", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2"},
+ {Address: "dummy.admiral.global-east", Ports: map[string]uint32{"http": 0}, Locality: "us-east-2"},
+ },
+ }
+
+ oneEndpointSe := v1alpha3.ServiceEntry{
+ Hosts: []string{"e2e.my-first-service.mesh"},
+ Addresses: []string{"240.10.1.1"},
+ Ports: []*v1alpha3.ServicePort{{Number: uint32(common.DefaultServiceEntryPort),
+ Name: "http", Protocol: "http"}},
+ Location: v1alpha3.ServiceEntry_MESH_INTERNAL,
+ Resolution: v1alpha3.ServiceEntry_DNS,
+ SubjectAltNames: []string{"spiffe://prefix/my-first-service"},
+ Endpoints: []*v1alpha3.WorkloadEntry{
+ {Address: "dummy.admiral.global-west", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2"},
+ },
+ }
+
+ invalidEndpoint := v1alpha3.ServiceEntry{
+ Hosts: []string{"e2e.test-service.mesh"},
+ Addresses: []string{"240.10.1.1"},
+ Ports: []*v1alpha3.ServicePort{{Number: uint32(common.DefaultServiceEntryPort),
+ Name: "http", Protocol: "http"}},
+ Location: v1alpha3.ServiceEntry_MESH_INTERNAL,
+ Resolution: v1alpha3.ServiceEntry_DNS,
+ SubjectAltNames: []string{"spiffe://prefix/my-first-service"},
+ Endpoints: []*v1alpha3.WorkloadEntry{
+ {Address: "dummy.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2"},
+ {Address: "test.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-east-2"},
+ },
+ }
+
+ invalidEndpointSe := &v1alpha32.ServiceEntry{
+ ObjectMeta: metaV1.ObjectMeta{Name: "se3", Namespace: "namespace"},
+ //nolint
+ Spec: invalidEndpoint,
+ }
+
+ newSeOneEndpoint := &v1alpha32.ServiceEntry{
+ ObjectMeta: metaV1.ObjectMeta{Name: "se1", Namespace: "namespace"},
+ //nolint
+ Spec: oneEndpointSe,
+ }
+
+ oldSeTwoEndpoints := &v1alpha32.ServiceEntry{
+ ObjectMeta: metaV1.ObjectMeta{Name: "se2", Namespace: "namespace"},
+ //nolint
+ Spec: twoEndpointSe,
+ }
+
+ _, err := seCtrl.IstioClient.NetworkingV1alpha3().ServiceEntries("namespace").Create(ctx, oldSeTwoEndpoints, metaV1.CreateOptions{})
+ if err != nil {
+ t.Error(err)
+ }
+
+ rcWarmupPhase := &RemoteController{
+ ServiceEntryController: seCtrl,
+ StartTime: time.Now(),
+ }
+
+ rcNotInWarmupPhase := &RemoteController{
+ ServiceEntryController: seCtrl,
+ StartTime: time.Now().Add(time.Duration(-21) * time.Minute),
+ }
+
+ //Struct of test case info. Name is required.
+ testCases := []struct {
+ name string
+ rc *RemoteController
+ newSe *v1alpha32.ServiceEntry
+ oldSe *v1alpha32.ServiceEntry
+ skipDestructive bool
+ expErr error
+ }{
+ {
+ name: "Should add a new SE",
+ rc: rcWarmupPhase,
+ newSe: newSeOneEndpoint,
+ oldSe: nil,
+ skipDestructive: false,
+ },
+ {
+ name: "Should not update SE when in warm up mode and the update is destructive",
+ rc: rcWarmupPhase,
+ newSe: newSeOneEndpoint,
+ oldSe: oldSeTwoEndpoints,
+ skipDestructive: true,
+ },
+ {
+ name: "Should update an SE",
+ rc: rcNotInWarmupPhase,
+ newSe: newSeOneEndpoint,
+ oldSe: oldSeTwoEndpoints,
+ skipDestructive: false,
+ },
+ {
+ name: "Should create an SE with one endpoint",
+ rc: rcNotInWarmupPhase,
+ newSe: invalidEndpointSe,
+ oldSe: nil,
+ skipDestructive: false,
+ },
+ {
+ name: "Given serviceentry does not exist, " +
+ "And the existing object obtained from Get is nil, " +
+ "When another thread create the serviceentry, " +
+ "When this thread attempts to create serviceentry and fails, " +
+ "Then, then an Update operation should be run, " +
+ "And there should be no panic," +
+ "And no errors should be returned",
+ rc: rcNotInWarmupPhase,
+ newSe: newSeOneEndpoint,
+ oldSe: nil,
+ skipDestructive: false,
+ },
+ }
+
+ //Run the test for every provided case
+ for _, c := range testCases {
+ t.Run(c.name, func(t *testing.T) {
+ err := addUpdateServiceEntry(ctxLogger, ctx, c.newSe, c.oldSe, "namespace", c.rc)
+ if c.expErr == nil {
+ assert.Equal(t, c.expErr, err)
+ }
+ if c.expErr != nil {
+ assert.Equal(t, c.expErr, err)
+ }
+ if c.skipDestructive {
+ //verify the update did not go through
+ se, err := c.rc.ServiceEntryController.IstioClient.NetworkingV1alpha3().ServiceEntries("namespace").Get(ctx, c.oldSe.Name, metaV1.GetOptions{})
+ if err != nil {
+ t.Error(err)
+ }
+ _, diff := getServiceEntryDiff(c.oldSe, se)
+ if diff != "" {
+ t.Errorf("Failed. Got %v, expected %v", se.Spec.String(), c.oldSe.Spec.String())
+ }
+ }
+ })
+ }
+}
+
+func TestValidateServiceEntryEndpoints(t *testing.T) {
+ twoValidEndpoints := []*v1alpha3.WorkloadEntry{
+ {Address: "valid1.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2"},
+ {Address: "valid2.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-east-2"},
+ }
+
+ oneValidEndpoints := []*v1alpha3.WorkloadEntry{
+ {Address: "valid1.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2"},
+ }
+
+ dummyEndpoints := []*v1alpha3.WorkloadEntry{
+ {Address: "dummy.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2"},
+ }
+
+ validAndInvalidEndpoints := []*v1alpha3.WorkloadEntry{
+ {Address: "dummy.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2"},
+ {Address: "valid2.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-east-2"},
+ }
+
+ twoValidEndpointsSe := &v1alpha32.ServiceEntry{
+ ObjectMeta: metaV1.ObjectMeta{Name: "twoValidEndpointsSe", Namespace: "namespace"},
+ Spec: v1alpha3.ServiceEntry{
+ Hosts: []string{"e2e.my-first-service.mesh"},
+ Addresses: []string{"240.10.1.1"},
+ Ports: []*v1alpha3.ServicePort{{Number: uint32(common.DefaultServiceEntryPort),
+ Name: "http", Protocol: "http"}},
+ Location: v1alpha3.ServiceEntry_MESH_INTERNAL,
+ Resolution: v1alpha3.ServiceEntry_DNS,
+ SubjectAltNames: []string{"spiffe://prefix/my-first-service"},
+ Endpoints: twoValidEndpoints,
+ },
+ }
+
+ oneValidEndpointsSe := &v1alpha32.ServiceEntry{
+ ObjectMeta: metaV1.ObjectMeta{Name: "twoValidEndpointsSe", Namespace: "namespace"},
+ Spec: v1alpha3.ServiceEntry{
+ Hosts: []string{"e2e.my-first-service.mesh"},
+ Addresses: []string{"240.10.1.1"},
+ Ports: []*v1alpha3.ServicePort{{Number: uint32(common.DefaultServiceEntryPort),
+ Name: "http", Protocol: "http"}},
+ Location: v1alpha3.ServiceEntry_MESH_INTERNAL,
+ Resolution: v1alpha3.ServiceEntry_DNS,
+ SubjectAltNames: []string{"spiffe://prefix/my-first-service"},
+ Endpoints: oneValidEndpoints,
+ },
+ }
+
+ dummyEndpointsSe := &v1alpha32.ServiceEntry{
+ ObjectMeta: metaV1.ObjectMeta{Name: "twoValidEndpointsSe", Namespace: "namespace"},
+ Spec: v1alpha3.ServiceEntry{
+ Hosts: []string{"e2e.my-first-service.mesh"},
+ Addresses: []string{"240.10.1.1"},
+ Ports: []*v1alpha3.ServicePort{{Number: uint32(common.DefaultServiceEntryPort),
+ Name: "http", Protocol: "http"}},
+ Location: v1alpha3.ServiceEntry_MESH_INTERNAL,
+ Resolution: v1alpha3.ServiceEntry_DNS,
+ SubjectAltNames: []string{"spiffe://prefix/my-first-service"},
+ Endpoints: dummyEndpoints,
+ },
+ }
+
+ validAndInvalidEndpointsSe := &v1alpha32.ServiceEntry{
+ ObjectMeta: metaV1.ObjectMeta{Name: "twoValidEndpointsSe", Namespace: "namespace"},
+ Spec: v1alpha3.ServiceEntry{
+ Hosts: []string{"e2e.my-first-service.mesh"},
+ Addresses: []string{"240.10.1.1"},
+ Ports: []*v1alpha3.ServicePort{{Number: uint32(common.DefaultServiceEntryPort),
+ Name: "http", Protocol: "http"}},
+ Location: v1alpha3.ServiceEntry_MESH_INTERNAL,
+ Resolution: v1alpha3.ServiceEntry_DNS,
+ SubjectAltNames: []string{"spiffe://prefix/my-first-service"},
+ Endpoints: validAndInvalidEndpoints,
+ },
+ }
+
+ //Struct of test case info. Name is required.
+ testCases := []struct {
+ name string
+ serviceEntry *v1alpha32.ServiceEntry
+ expectedAreEndpointsValid bool
+ expectedValidEndpoints []*v1alpha3.WorkloadEntry
+ }{
+ {
+ name: "Validate SE with dummy endpoint",
+ serviceEntry: dummyEndpointsSe,
+ expectedAreEndpointsValid: false,
+ expectedValidEndpoints: []*v1alpha3.WorkloadEntry{},
+ },
+ {
+ name: "Validate SE with valid endpoint",
+ serviceEntry: oneValidEndpointsSe,
+ expectedAreEndpointsValid: true,
+ expectedValidEndpoints: []*v1alpha3.WorkloadEntry{{Address: "valid1.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2"}},
+ },
+ {
+ name: "Validate endpoint with multiple valid endpoints",
+ serviceEntry: twoValidEndpointsSe,
+ expectedAreEndpointsValid: true,
+ expectedValidEndpoints: []*v1alpha3.WorkloadEntry{
+ {Address: "valid1.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2"},
+ {Address: "valid2.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-east-2"}},
+ },
+ {
+ name: "Validate endpoint with mix of valid and dummy endpoints",
+ serviceEntry: validAndInvalidEndpointsSe,
+ expectedAreEndpointsValid: false,
+ expectedValidEndpoints: []*v1alpha3.WorkloadEntry{
+ {Address: "valid2.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-east-2"}},
+ },
+ }
+
+ //Run the test for every provided case
+ for _, c := range testCases {
+ t.Run(c.name, func(t *testing.T) {
+ areValidEndpoints := validateAndProcessServiceEntryEndpoints(c.serviceEntry)
+ if areValidEndpoints != c.expectedAreEndpointsValid {
+ t.Errorf("Failed. Got %v, expected %v", areValidEndpoints, c.expectedAreEndpointsValid)
+ }
+ if len(c.serviceEntry.Spec.Endpoints) != len(c.expectedValidEndpoints) {
+ t.Errorf("Failed. Got %v, expected %v", len(c.serviceEntry.Spec.Endpoints), len(c.expectedValidEndpoints))
+ }
+ })
+ }
+}
+
+func TestServiceEntryHandlerCUDScenarios(t *testing.T) {
+ admiralParams := common.AdmiralParams{
+ LabelSet: &common.LabelSet{},
+ SyncNamespace: "test-sync-ns",
+ ArgoRolloutsEnabled: true,
+ }
+ common.InitializeConfig(admiralParams)
+ se := &v1alpha32.ServiceEntry{
+ ObjectMeta: metaV1.ObjectMeta{
+ Namespace: "istio-system",
+ Name: "test-serviceentry",
+ Annotations: map[string]string{
+ "admiral.istio.io/ignore": "true",
+ },
+ },
+ Spec: v1alpha3.ServiceEntry{
+ Hosts: []string{"test-host"},
+ Ports: []*v1alpha3.ServicePort{
+ {
+ Number: 80,
+ Protocol: "TCP",
+ },
+ },
+ Location: v1alpha3.ServiceEntry_MESH_INTERNAL,
+ },
+ }
+ seHandler := &ServiceEntryHandler{
+ ClusterID: "test-cluster",
+ }
+
+ testcases := []struct {
+ name string
+ admiralReadState bool
+ ns string
+ }{
+ {
+ name: "Admiral in read-only state",
+ admiralReadState: true,
+ ns: "test-ns",
+ },
+ {
+ name: "Encountered istio resource",
+ admiralReadState: false,
+ ns: "istio-system",
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ commonUtil.CurrentAdmiralState.ReadOnly = tc.admiralReadState
+ se.ObjectMeta.Namespace = tc.ns
+ err := seHandler.Added(se)
+ assert.NoError(t, err)
+ err = seHandler.Updated(se)
+ assert.NoError(t, err)
+ err = seHandler.Deleted(se)
+ assert.NoError(t, err)
+ })
+ }
+}
+
+func TestAddServiceEntry(t *testing.T) {
+ ctxLogger := log.WithFields(log.Fields{
+ "type": "modifySE",
+ })
+ se := &v1alpha32.ServiceEntry{
+ ObjectMeta: metaV1.ObjectMeta{Name: "se1", Namespace: "random"},
+ }
+ admiralParams := common.AdmiralParams{
+ LabelSet: &common.LabelSet{},
+ SyncNamespace: "test-sync-ns",
+ }
+ common.InitializeConfig(admiralParams)
+ ctx := context.Background()
+ rc := &RemoteController{
+ ServiceEntryController: &istio.ServiceEntryController{
+ IstioClient: istioFake.NewSimpleClientset(),
+ },
+ }
+ err := deleteServiceEntry(ctx, se, admiralParams.SyncNamespace, rc)
+ assert.Nil(t, err)
+ addUpdateServiceEntry(ctxLogger, ctx, se, nil, admiralParams.SyncNamespace, rc)
+ assert.Nil(t, err)
+ err = deleteServiceEntry(ctx, se, admiralParams.SyncNamespace, rc)
+ assert.Nil(t, err)
+}
+
+func TestRetryUpdatingSE(t *testing.T) {
+ // Create a mock logger
+ logger := log.New()
+ admiralParams := common.AdmiralParams{
+ LabelSet: &common.LabelSet{},
+ SyncNamespace: "test-sync-ns",
+ }
+ common.ResetSync()
+ common.InitializeConfig(admiralParams)
+ //Create a context with timeout for testing
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+
+ admiralParams = common.GetAdmiralParams()
+ log.Info("admiralSyncNS: " + admiralParams.SyncNamespace)
+ // Create mock objects
+ obj := &v1alpha32.ServiceEntry{
+ ObjectMeta: metaV1.ObjectMeta{
+ Namespace: admiralParams.SyncNamespace,
+ Name: "test-serviceentry-seRetriesTest",
+ Annotations: map[string]string{
+ "admiral.istio.io/ignore": "true",
+ },
+ ResourceVersion: "123",
+ },
+ Spec: v1alpha3.ServiceEntry{
+ Hosts: []string{"test-host"},
+ Ports: []*v1alpha3.ServicePort{
+ {
+ Number: 80,
+ Protocol: "TCP",
+ },
+ },
+ Location: v1alpha3.ServiceEntry_MESH_INTERNAL,
+ },
+ }
+ exist := &v1alpha32.ServiceEntry{
+ ObjectMeta: metaV1.ObjectMeta{
+ Namespace: admiralParams.SyncNamespace,
+ Name: "test-serviceentry-seRetriesTest",
+ Annotations: map[string]string{
+ "admiral.istio.io/ignore": "true",
+ },
+ ResourceVersion: "12345",
+ },
+ Spec: v1alpha3.ServiceEntry{
+ Hosts: []string{"test-host"},
+ Ports: []*v1alpha3.ServicePort{
+ {
+ Number: 80,
+ Protocol: "TCP",
+ },
+ },
+ Location: v1alpha3.ServiceEntry_MESH_INTERNAL,
+ },
+ }
+ namespace := admiralParams.SyncNamespace
+ rc := &RemoteController{
+ ServiceEntryController: &istio.ServiceEntryController{
+ IstioClient: istioFake.NewSimpleClientset(),
+ },
+ }
+
+ _, err := rc.ServiceEntryController.IstioClient.NetworkingV1alpha3().ServiceEntries(namespace).Create(ctx, exist, metaV1.CreateOptions{})
+ if err != nil {
+ t.Error(err)
+ }
+ obj2, err2 := rc.ServiceEntryController.IstioClient.NetworkingV1alpha3().ServiceEntries(namespace).Create(ctx, exist, metaV1.CreateOptions{})
+ if k8sErrors.IsAlreadyExists(err2) {
+ fmt.Printf("obj: %v", obj2)
+ }
+ errConflict := k8sErrors.NewConflict(schema.GroupResource{}, "", nil)
+ errOther := errors.New("Some other error")
+
+ // Test when err is nil
+ err = retryUpdatingSE(logger.WithField("test", "success"), ctx, obj, exist, namespace, rc, nil, "test-op")
+ if err != nil {
+ t.Errorf("Expected nil error, got %v", err)
+ }
+
+ // get the SE here, it should still have the old resource version.
+ se, err := rc.ServiceEntryController.IstioClient.NetworkingV1alpha3().ServiceEntries(namespace).Get(ctx, exist.Name, metaV1.GetOptions{})
+ assert.Nil(t, err)
+ assert.Equal(t, "12345", se.ObjectMeta.ResourceVersion)
+
+ // Test when err is a conflict error
+ err = retryUpdatingSE(logger.WithField("test", "conflict"), ctx, obj, exist, namespace, rc, errConflict, "test-op")
+ if err != nil {
+ t.Errorf("Expected nil error, got %v", err)
+ }
+
+ // get the SE and the resourceVersion should have been updated to 12345
+ se, err = rc.ServiceEntryController.IstioClient.NetworkingV1alpha3().ServiceEntries(admiralParams.SyncNamespace).Get(ctx, exist.Name, metaV1.GetOptions{})
+ assert.Nil(t, err)
+ assert.Equal(t, "12345", se.ObjectMeta.ResourceVersion)
+
+ // Test when err is a non-conflict error
+ err = retryUpdatingSE(logger.WithField("test", "error"), ctx, obj, exist, namespace, rc, errOther, "test-op")
+ if err == nil {
+ t.Error("Expected non-nil error, got nil")
+ }
+}
From 0ae084ccd48d016d5b2f9c2043ea1cbd7c9ee1c7 Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 15:23:21 -0400
Subject: [PATCH 162/235] copied admiral/pkg/clusters/serviceentry_od_test.go
changes from master
Signed-off-by: Shriram Sharma
---
admiral/pkg/clusters/serviceentry_od_test.go | 428 +++++++++++++++++++
1 file changed, 428 insertions(+)
create mode 100644 admiral/pkg/clusters/serviceentry_od_test.go
diff --git a/admiral/pkg/clusters/serviceentry_od_test.go b/admiral/pkg/clusters/serviceentry_od_test.go
new file mode 100644
index 00000000..6e85c2f2
--- /dev/null
+++ b/admiral/pkg/clusters/serviceentry_od_test.go
@@ -0,0 +1,428 @@
+package clusters
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/model"
+ admiralV1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ v13 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/client/loader"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/istio"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/test"
+ commonUtil "github.com/istio-ecosystem/admiral/admiral/pkg/util"
+ "github.com/sirupsen/logrus"
+ "github.com/stretchr/testify/assert"
+ istioNetworkingV1Alpha3 "istio.io/api/networking/v1alpha3"
+ istiofake "istio.io/client-go/pkg/clientset/versioned/fake"
+ coreV1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/rest"
+)
+
+func Test_updateGlobalOutlierDetectionCache(t *testing.T) {
+
+ ctxLogger := logrus.WithFields(logrus.Fields{
+ "txId": "abc",
+ })
+ common.ResetSync()
+
+ remoteRegistryTest, _ := InitAdmiral(context.Background(), common.AdmiralParams{
+ KubeconfigPath: "testdata/fake.config",
+ LabelSet: &common.LabelSet{
+ AdmiralCRDIdentityLabel: "assetAlias",
+ },
+ })
+
+ type args struct {
+ cache *AdmiralCache
+ identity string
+ env string
+ outlierDetections map[string][]*admiralV1.OutlierDetection
+ }
+
+ testLabels := make(map[string]string)
+ testLabels["identity"] = "foo"
+ testLabels["assetAlias"] = "foo"
+
+ outlierDetection1 := admiralV1.OutlierDetection{
+ TypeMeta: metav1.TypeMeta{},
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "foo",
+ Namespace: "foo",
+ Labels: testLabels,
+ },
+ Spec: makeOutlierDetectionTestModel(),
+ Status: v13.OutlierDetectionStatus{},
+ }
+
+ outlierDetection1.ObjectMeta.CreationTimestamp = metav1.Now()
+
+ odConfig1 := makeOutlierDetectionTestModel()
+ odConfig1.OutlierConfig.ConsecutiveGatewayErrors = 100
+ outlierDetection2 := admiralV1.OutlierDetection{
+ TypeMeta: metav1.TypeMeta{},
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "foo1",
+ Namespace: "foo1",
+ Labels: testLabels,
+ },
+ Spec: odConfig1,
+ Status: v13.OutlierDetectionStatus{},
+ }
+
+ outlierDetection2.ObjectMeta.CreationTimestamp = metav1.Now()
+
+ arg1 := args{
+ cache: remoteRegistryTest.AdmiralCache,
+ identity: "foo",
+ env: "e2e",
+ outlierDetections: nil,
+ }
+ arg1.outlierDetections = make(map[string][]*admiralV1.OutlierDetection)
+ arg1.outlierDetections["test"] = append(arg1.outlierDetections["test"], &outlierDetection1)
+ arg1.outlierDetections["test"] = append(arg1.outlierDetections["test"], &outlierDetection2)
+
+ arg2 := args{
+ cache: remoteRegistryTest.AdmiralCache,
+ identity: "foo",
+ env: "e2e",
+ outlierDetections: nil,
+ }
+ arg2.outlierDetections = make(map[string][]*admiralV1.OutlierDetection)
+
+ arg2.cache.OutlierDetectionCache.Put(&outlierDetection1)
+ arg2.cache.OutlierDetectionCache.Put(&outlierDetection2)
+
+ tests := []struct {
+ name string
+ args args
+ expected *admiralV1.OutlierDetection
+ wantedErr bool
+ }{
+ {"Validate only latest outlier detection object CRD present when more 2 object supplied", arg1, &outlierDetection2, false},
+ {"Validate no object present when no outlier detection found", arg2, nil, false},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ updateGlobalOutlierDetectionCache(ctxLogger, tt.args.cache, tt.args.identity, tt.args.env, tt.args.outlierDetections)
+ actualOD, err := remoteRegistryTest.AdmiralCache.OutlierDetectionCache.GetFromIdentity("foo", "e2e")
+ if tt.wantedErr {
+ assert.NotNil(t, err, "Expected Error")
+ }
+ assert.Equal(t, tt.expected, actualOD)
+ assert.Nil(t, err, "Expecting no errors")
+
+ })
+ }
+}
+
+func makeOutlierDetectionTestModel() model.OutlierDetection {
+ odConfig := model.OutlierConfig{
+ BaseEjectionTime: 0,
+ ConsecutiveGatewayErrors: 0,
+ Interval: 0,
+ XXX_NoUnkeyedLiteral: struct{}{},
+ XXX_unrecognized: nil,
+ XXX_sizecache: 0,
+ }
+
+ od := model.OutlierDetection{
+ Selector: map[string]string{"identity": "payments", "env": "e2e"},
+ OutlierConfig: &odConfig,
+ }
+
+ return od
+}
+
+func Test_modifyServiceEntryForNewServiceOrPodForOutlierDetection(t *testing.T) {
+ setupForServiceEntryTests()
+ var (
+ env = "test"
+ stop = make(chan struct{})
+ foobarMetadataName = "foobar"
+ foobarMetadataNamespace = "foobar-ns"
+ deployment1Identity = "deployment1"
+ deployment1 = makeTestDeployment(foobarMetadataName, foobarMetadataNamespace, deployment1Identity)
+ cluster1ID = "test-dev-1-k8s"
+ cluster2ID = "test-dev-2-k8s"
+ fakeIstioClient = istiofake.NewSimpleClientset()
+ config = rest.Config{Host: "localhost"}
+ resyncPeriod = time.Millisecond * 1
+ expectedServiceEntriesForDeployment = map[string]*istioNetworkingV1Alpha3.ServiceEntry{
+ "test." + deployment1Identity + ".mesh": &istioNetworkingV1Alpha3.ServiceEntry{
+ Hosts: []string{"test." + deployment1Identity + ".mesh"},
+ Addresses: []string{"127.0.0.1"},
+ Ports: []*istioNetworkingV1Alpha3.ServicePort{
+ {
+ Number: 80,
+ Protocol: "http",
+ Name: "http",
+ },
+ },
+ Location: istioNetworkingV1Alpha3.ServiceEntry_MESH_INTERNAL,
+ Resolution: istioNetworkingV1Alpha3.ServiceEntry_DNS,
+ Endpoints: []*istioNetworkingV1Alpha3.WorkloadEntry{
+ &istioNetworkingV1Alpha3.WorkloadEntry{
+ Address: "internal-load-balancer-" + cluster1ID,
+ Ports: map[string]uint32{
+ "http": 0,
+ },
+ Locality: "us-west-2",
+ },
+ },
+ SubjectAltNames: []string{"spiffe://prefix/" + deployment1Identity},
+ },
+ }
+ serviceEntryAddressStore = &ServiceEntryAddressStore{
+ EntryAddresses: map[string]string{
+ "test." + deployment1Identity + ".mesh-se": "127.0.0.1",
+ },
+ Addresses: []string{},
+ }
+ serviceForDeployment = &coreV1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: foobarMetadataName,
+ Namespace: foobarMetadataNamespace,
+ },
+ Spec: coreV1.ServiceSpec{
+ Selector: map[string]string{"app": deployment1Identity},
+ Ports: []coreV1.ServicePort{
+ {
+ Name: "http",
+ Port: 8090,
+ },
+ },
+ },
+ }
+ serviceForIngressInCluster1 = &coreV1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "istio-ingressgateway",
+ Namespace: "istio-system",
+ Labels: map[string]string{
+ "app": "gatewayapp",
+ },
+ },
+ Spec: coreV1.ServiceSpec{
+ Selector: map[string]string{"app": "istio-ingressgateway"},
+ Ports: []coreV1.ServicePort{
+ {
+ Name: "http",
+ Port: 8090,
+ },
+ },
+ },
+ Status: coreV1.ServiceStatus{
+ LoadBalancer: coreV1.LoadBalancerStatus{
+ Ingress: []coreV1.LoadBalancerIngress{
+ coreV1.LoadBalancerIngress{
+ Hostname: "internal-load-balancer-" + cluster1ID,
+ },
+ },
+ },
+ },
+ }
+ serviceForIngressInCluster2 = &coreV1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "istio-ingressgateway",
+ Namespace: "istio-system",
+ Labels: map[string]string{
+ "app": "gatewayapp",
+ },
+ },
+ Spec: coreV1.ServiceSpec{
+ Selector: map[string]string{"app": "istio-ingressgateway"},
+ Ports: []coreV1.ServicePort{
+ {
+ Name: "http",
+ Port: 8090,
+ },
+ },
+ },
+ Status: coreV1.ServiceStatus{
+ LoadBalancer: coreV1.LoadBalancerStatus{
+ Ingress: []coreV1.LoadBalancerIngress{
+ coreV1.LoadBalancerIngress{
+ Hostname: "internal-load-balancer-" + cluster2ID,
+ },
+ },
+ },
+ },
+ }
+ remoteRegistry, _ = InitAdmiral(context.Background(), admiralParamsForServiceEntryTests())
+ )
+ deploymentController, err := admiral.NewDeploymentController(make(chan struct{}), &test.MockDeploymentHandler{}, &config, resyncPeriod, loader.GetFakeClientLoader())
+ if err != nil {
+ t.Fail()
+ }
+ deploymentController.Cache.UpdateDeploymentToClusterCache(deployment1Identity, deployment1)
+ rolloutController, err := admiral.NewRolloutsController(make(chan struct{}), &test.MockRolloutHandler{}, &config, resyncPeriod, loader.GetFakeClientLoader())
+ if err != nil {
+ t.Fail()
+ }
+ serviceControllerCluster1, err := admiral.NewServiceController(stop, &test.MockServiceHandler{}, &config, resyncPeriod, loader.GetFakeClientLoader())
+ if err != nil {
+ t.Fatalf("%v", err)
+ }
+ serviceControllerCluster2, err := admiral.NewServiceController(stop, &test.MockServiceHandler{}, &config, resyncPeriod, loader.GetFakeClientLoader())
+ if err != nil {
+ t.Fatalf("%v", err)
+ }
+ virtualServiceController, err := istio.NewVirtualServiceController(make(chan struct{}), &test.MockVirtualServiceHandler{}, &config, resyncPeriod, loader.GetFakeClientLoader())
+ if err != nil {
+ t.Fatalf("%v", err)
+ }
+ globalTrafficPolicyController, err := admiral.NewGlobalTrafficController(make(chan struct{}), &test.MockGlobalTrafficHandler{}, &config, resyncPeriod, loader.GetFakeClientLoader())
+ if err != nil {
+ t.Fatalf("%v", err)
+ t.FailNow()
+ }
+
+ outlierDetectionPolicy := v13.OutlierDetection{
+ TypeMeta: metav1.TypeMeta{},
+ ObjectMeta: metav1.ObjectMeta{
+ Name: foobarMetadataName,
+ Namespace: foobarMetadataNamespace,
+ Annotations: map[string]string{"admiral.io/env": "test", "env": "test"},
+ Labels: map[string]string{"assetAlias": "deployment1", "identity": "deployment1"},
+ },
+ Spec: model.OutlierDetection{
+ OutlierConfig: &model.OutlierConfig{
+ BaseEjectionTime: 10,
+ ConsecutiveGatewayErrors: 10,
+ Interval: 100,
+ },
+ Selector: nil,
+ XXX_NoUnkeyedLiteral: struct{}{},
+ XXX_unrecognized: nil,
+ XXX_sizecache: 0,
+ },
+ Status: v13.OutlierDetectionStatus{},
+ }
+
+ outlierDetectionController, err := admiral.NewOutlierDetectionController(make(chan struct{}), &test.MockOutlierDetectionHandler{}, &config, resyncPeriod, loader.GetFakeClientLoader())
+ if err != nil {
+ t.Fatalf("%v", err)
+ t.FailNow()
+ }
+ outlierDetectionController.GetCache().Put(&outlierDetectionPolicy)
+
+ serviceControllerCluster1.Cache.Put(serviceForDeployment)
+ serviceControllerCluster1.Cache.Put(serviceForIngressInCluster1)
+ serviceControllerCluster2.Cache.Put(serviceForDeployment)
+ serviceControllerCluster2.Cache.Put(serviceForIngressInCluster2)
+ rcCluster1 := &RemoteController{
+ ClusterID: cluster1ID,
+ DeploymentController: deploymentController,
+ RolloutController: rolloutController,
+ ServiceController: serviceControllerCluster1,
+ VirtualServiceController: virtualServiceController,
+ NodeController: &admiral.NodeController{
+ Locality: &admiral.Locality{
+ Region: "us-west-2",
+ },
+ },
+ ServiceEntryController: &istio.ServiceEntryController{
+ IstioClient: fakeIstioClient,
+ Cache: istio.NewServiceEntryCache(),
+ },
+ DestinationRuleController: &istio.DestinationRuleController{
+ IstioClient: fakeIstioClient,
+ Cache: istio.NewDestinationRuleCache(),
+ },
+ GlobalTraffic: globalTrafficPolicyController,
+ OutlierDetectionController: outlierDetectionController,
+ }
+ rcCluster2 := &RemoteController{
+ ClusterID: cluster2ID,
+ DeploymentController: deploymentController,
+ RolloutController: rolloutController,
+ ServiceController: serviceControllerCluster2,
+ VirtualServiceController: virtualServiceController,
+ NodeController: &admiral.NodeController{
+ Locality: &admiral.Locality{
+ Region: "us-east-2",
+ },
+ },
+ ServiceEntryController: &istio.ServiceEntryController{
+ IstioClient: fakeIstioClient,
+ Cache: istio.NewServiceEntryCache(),
+ },
+ DestinationRuleController: &istio.DestinationRuleController{
+ IstioClient: fakeIstioClient,
+ Cache: istio.NewDestinationRuleCache(),
+ },
+ GlobalTraffic: globalTrafficPolicyController,
+ OutlierDetectionController: outlierDetectionController,
+ }
+
+ remoteRegistry.PutRemoteController(cluster1ID, rcCluster1)
+ remoteRegistry.PutRemoteController(cluster2ID, rcCluster2)
+ remoteRegistry.ServiceEntrySuspender = NewDefaultServiceEntrySuspender([]string{"asset1"})
+ remoteRegistry.StartTime = time.Now()
+ remoteRegistry.AdmiralCache.ServiceEntryAddressStore = serviceEntryAddressStore
+
+ testCases := []struct {
+ name string
+ assetIdentity string
+ readOnly bool
+ remoteRegistry *RemoteRegistry
+ expectedServiceEntries map[string]*istioNetworkingV1Alpha3.ServiceEntry
+ }{
+ //Both test case should return same service entry as outlier detection crd doesn't change Service Entry
+ {
+ name: "OutlierDetection present in namespace",
+ assetIdentity: deployment1Identity,
+ remoteRegistry: remoteRegistry,
+ expectedServiceEntries: expectedServiceEntriesForDeployment,
+ },
+ {
+ name: "OutlierDetection not present",
+ assetIdentity: deployment1Identity,
+ remoteRegistry: remoteRegistry,
+ expectedServiceEntries: expectedServiceEntriesForDeployment,
+ },
+ }
+
+ for _, c := range testCases {
+ t.Run(c.name, func(t *testing.T) {
+ if c.readOnly {
+ commonUtil.CurrentAdmiralState.ReadOnly = ReadOnlyEnabled
+ }
+
+ ctx := context.Background()
+ ctx = context.WithValue(ctx, "clusterName", "clusterName")
+ ctx = context.WithValue(ctx, "eventResourceType", common.Deployment)
+ serviceEntries, _ := modifyServiceEntryForNewServiceOrPod(
+ ctx,
+ admiral.Add,
+ env,
+ c.assetIdentity,
+ c.remoteRegistry,
+ )
+ if len(serviceEntries) != len(c.expectedServiceEntries) {
+ t.Fatalf("expected service entries to be of length: %d, but got: %d", len(c.expectedServiceEntries), len(serviceEntries))
+ }
+ if len(c.expectedServiceEntries) > 0 {
+ for k := range c.expectedServiceEntries {
+ if serviceEntries[k] == nil {
+ t.Fatalf(
+ "expected service entries to contain service entry for: %s, "+
+ "but did not find it. Got map: %v",
+ k, serviceEntries,
+ )
+ }
+ }
+ }
+ destinationRule, err := c.remoteRegistry.remoteControllers[cluster1ID].DestinationRuleController.IstioClient.NetworkingV1alpha3().DestinationRules("ns").Get(ctx, "test.deployment1.mesh-default-dr", metav1.GetOptions{})
+ assert.Nil(t, err, "Expected no error for fetching outlier detection")
+ assert.Equal(t, int(destinationRule.Spec.TrafficPolicy.OutlierDetection.Interval.Seconds), 100)
+ assert.Equal(t, int(destinationRule.Spec.TrafficPolicy.OutlierDetection.BaseEjectionTime.Seconds), 10)
+ assert.Equal(t, int(destinationRule.Spec.TrafficPolicy.OutlierDetection.ConsecutiveGatewayErrors.Value), 10)
+ assert.Equal(t, int(destinationRule.Spec.TrafficPolicy.OutlierDetection.Consecutive_5XxErrors.Value), 0)
+ })
+ }
+}
From 203a642e85e2e764d644d7d2aba496e362874112 Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 15:24:03 -0400
Subject: [PATCH 163/235] copied admiral/pkg/clusters/sidecar_handler.go
changes from master
Signed-off-by: Shriram Sharma
---
admiral/pkg/clusters/sidecar_handler.go | 26 +++++++++++++++++++++++++
1 file changed, 26 insertions(+)
create mode 100644 admiral/pkg/clusters/sidecar_handler.go
diff --git a/admiral/pkg/clusters/sidecar_handler.go b/admiral/pkg/clusters/sidecar_handler.go
new file mode 100644
index 00000000..26607b38
--- /dev/null
+++ b/admiral/pkg/clusters/sidecar_handler.go
@@ -0,0 +1,26 @@
+package clusters
+
+import (
+ "context"
+
+ "istio.io/client-go/pkg/apis/networking/v1alpha3"
+)
+
+// SidecarHandler responsible for handling Add/Update/Delete events for
+// Sidecar resources
+type SidecarHandler struct {
+ RemoteRegistry *RemoteRegistry
+ ClusterID string
+}
+
+func (dh *SidecarHandler) Added(ctx context.Context, obj *v1alpha3.Sidecar) error {
+ return nil
+}
+
+func (dh *SidecarHandler) Updated(ctx context.Context, obj *v1alpha3.Sidecar) error {
+ return nil
+}
+
+func (dh *SidecarHandler) Deleted(ctx context.Context, obj *v1alpha3.Sidecar) error {
+ return nil
+}
From 8214600b0365e611c6e9b7977a47bf99f75beca3 Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 15:24:22 -0400
Subject: [PATCH 164/235] copied admiral/pkg/clusters/sidecar_handler_test.go
from master
Signed-off-by: Shriram Sharma
---
admiral/pkg/clusters/sidecar_handler_test.go | 1 +
1 file changed, 1 insertion(+)
create mode 100644 admiral/pkg/clusters/sidecar_handler_test.go
diff --git a/admiral/pkg/clusters/sidecar_handler_test.go b/admiral/pkg/clusters/sidecar_handler_test.go
new file mode 100644
index 00000000..4eaddca0
--- /dev/null
+++ b/admiral/pkg/clusters/sidecar_handler_test.go
@@ -0,0 +1 @@
+package clusters
From 796dfc2d458ae603c0bbb60b2cf3e0c5394791ee Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 15:25:33 -0400
Subject: [PATCH 165/235] copied admiral/pkg/clusters/types.go from master
Signed-off-by: Shriram Sharma
---
admiral/pkg/clusters/types.go | 698 ++++++++--------------------------
1 file changed, 149 insertions(+), 549 deletions(-)
diff --git a/admiral/pkg/clusters/types.go b/admiral/pkg/clusters/types.go
index 40ccdfdf..b98a1042 100644
--- a/admiral/pkg/clusters/types.go
+++ b/admiral/pkg/clusters/types.go
@@ -2,23 +2,20 @@ package clusters
import (
"context"
- "errors"
- "fmt"
+ "regexp"
"sync"
"time"
- "istio.io/client-go/pkg/apis/networking/v1alpha3"
- metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
- argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
- v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
+ admiralV1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/client/loader"
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/istio"
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/secret"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/registry"
+
log "github.com/sirupsen/logrus"
- k8sAppsV1 "k8s.io/api/apps/v1"
- k8sV1 "k8s.io/api/core/v1"
+ networking "istio.io/api/networking/v1alpha3"
k8s "k8s.io/client-go/kubernetes"
)
@@ -35,42 +32,51 @@ type IgnoredIdentityCache struct {
}
type RemoteController struct {
- ClusterID string
- ApiServer string
- StartTime time.Time
- GlobalTraffic *admiral.GlobalTrafficController
- DeploymentController *admiral.DeploymentController
- ServiceController *admiral.ServiceController
- NodeController *admiral.NodeController
- ServiceEntryController *istio.ServiceEntryController
- DestinationRuleController *istio.DestinationRuleController
- VirtualServiceController *istio.VirtualServiceController
- SidecarController *istio.SidecarController
- RolloutController *admiral.RolloutController
- RoutingPolicyController *admiral.RoutingPolicyController
- stop chan struct{}
+ ClusterID string
+ ApiServer string
+ StartTime time.Time
+ GlobalTraffic *admiral.GlobalTrafficController
+ DeploymentController *admiral.DeploymentController
+ ServiceController *admiral.ServiceController
+ NodeController *admiral.NodeController
+ ServiceEntryController *istio.ServiceEntryController
+ DestinationRuleController *istio.DestinationRuleController
+ VirtualServiceController *istio.VirtualServiceController
+ SidecarController *istio.SidecarController
+ RolloutController *admiral.RolloutController
+ RoutingPolicyController *admiral.RoutingPolicyController
+ EnvoyFilterController *admiral.EnvoyFilterController
+ OutlierDetectionController *admiral.OutlierDetectionController
+ ClientConnectionConfigController *admiral.ClientConnectionConfigController
+ stop chan struct{}
//listener for normal types
}
type AdmiralCache struct {
- CnameClusterCache *common.MapOfMaps
- CnameDependentClusterCache *common.MapOfMaps
- CnameIdentityCache *sync.Map
- IdentityClusterCache *common.MapOfMaps
- WorkloadSelectorCache *common.MapOfMaps
- ClusterLocalityCache *common.MapOfMaps
- IdentityDependencyCache *common.MapOfMaps
- SubsetServiceEntryIdentityCache *sync.Map
- ServiceEntryAddressStore *ServiceEntryAddressStore
- ConfigMapController admiral.ConfigMapControllerInterface //todo this should be in the remotecontrollers map once we expand it to have one configmap per cluster
- GlobalTrafficCache *globalTrafficCache //The cache needs to live in the handler because it needs access to deployments
- DependencyNamespaceCache *common.SidecarEgressMap
- SeClusterCache *common.MapOfMaps
- RoutingPolicyFilterCache *routingPolicyFilterCache
- RoutingPolicyCache *routingPolicyCache
- DependencyProxyVirtualServiceCache *dependencyProxyVirtualServiceCache
- SourceToDestinations *sourceToDestinations //This cache is to fetch list of all dependencies for a given source identity
- argoRolloutsEnabled bool
+ CnameClusterCache *common.MapOfMaps
+ CnameDependentClusterCache *common.MapOfMaps
+ CnameIdentityCache *sync.Map
+ IdentityClusterCache *common.MapOfMaps
+ ClusterLocalityCache *common.MapOfMaps
+ IdentityDependencyCache *common.MapOfMaps
+ ServiceEntryAddressStore *ServiceEntryAddressStore
+ ConfigMapController admiral.ConfigMapControllerInterface //todo this should be in the remotecontrollers map once we expand it to have one configmap per cluster
+ GlobalTrafficCache GlobalTrafficCache //The cache needs to live in the handler because it needs access to deployments
+ OutlierDetectionCache OutlierDetectionCache
+ ClientConnectionConfigCache ClientConnectionConfigCache
+ DependencyNamespaceCache *common.SidecarEgressMap
+ SeClusterCache *common.MapOfMaps
+ RoutingPolicyFilterCache *routingPolicyFilterCache
+ SourceToDestinations *sourceToDestinations //This cache is to fetch list of all dependencies for a given source identity,
+ TrafficConfigIgnoreAssets []string
+ GatewayAssets []string
+ argoRolloutsEnabled bool
+ DynamoDbEndpointUpdateCache *sync.Map
+ TrafficConfigWorkingScope []*regexp.Regexp // regex of assets that are visible to Cartographer
+ IdentitiesWithAdditionalEndpoints *sync.Map
+ IdentityClusterNamespaceCache *common.MapOfMapOfMaps
+ CnameDependentClusterNamespaceCache *common.MapOfMapOfMaps
+ PartitionIdentityCache *common.Map
}
type RemoteRegistry struct {
@@ -81,57 +87,111 @@ type RemoteRegistry struct {
ctx context.Context
AdmiralCache *AdmiralCache
StartTime time.Time
- ServiceEntryUpdateSuspender ServiceEntrySuspender
- ExcludedIdentityMap map[string]bool
+ ServiceEntrySuspender ServiceEntrySuspender
+ AdmiralDatabaseClient AdmiralDatabaseManager
+ DependencyController *admiral.DependencyController
+ ClientLoader loader.ClientLoader
+ ClusterShardHandler registry.ClusterShardStore
+ ClusterIdentityStoreHandler registry.ClusterIdentityStore
}
+// ModifySEFunc is a function that follows the dependency injection pattern which is used by HandleEventForGlobalTrafficPolicy
+type ModifySEFunc func(ctx context.Context, event admiral.EventType, env string, sourceIdentity string, remoteRegistry *RemoteRegistry) (map[string]*networking.ServiceEntry, error)
+
+// TODO - Write a new function to prepare a new Map.
+
func NewRemoteRegistry(ctx context.Context, params common.AdmiralParams) *RemoteRegistry {
- var serviceEntryUpdateSuspender ServiceEntrySuspender
+ var serviceEntrySuspender ServiceEntrySuspender
+ var admiralDatabaseClient AdmiralDatabaseManager
+ var err error
+
gtpCache := &globalTrafficCache{}
- gtpCache.identityCache = make(map[string]*v1.GlobalTrafficPolicy)
+ gtpCache.identityCache = make(map[string]*admiralV1.GlobalTrafficPolicy)
gtpCache.mutex = &sync.Mutex{}
+
+ //Initialize OutlierDetection Cache
+ odCache := NewOutlierDetectionCache()
+
+ clientConnectionSettingsCache := &clientConnectionSettingsCache{
+ identityCache: make(map[string]*admiralV1.ClientConnectionConfig),
+ mutex: &sync.RWMutex{},
+ }
+
rpFilterCache := &routingPolicyFilterCache{}
rpFilterCache.filterCache = make(map[string]map[string]map[string]string)
rpFilterCache.mutex = &sync.Mutex{}
- rpCache := &routingPolicyCache{}
- rpCache.identityCache = make(map[string]*v1.RoutingPolicy)
- rpCache.mutex = &sync.Mutex{}
admiralCache := &AdmiralCache{
- IdentityClusterCache: common.NewMapOfMaps(),
- CnameClusterCache: common.NewMapOfMaps(),
- CnameDependentClusterCache: common.NewMapOfMaps(),
- ClusterLocalityCache: common.NewMapOfMaps(),
- IdentityDependencyCache: common.NewMapOfMaps(),
- WorkloadSelectorCache: common.NewMapOfMaps(),
- RoutingPolicyFilterCache: rpFilterCache,
- RoutingPolicyCache: rpCache,
- DependencyNamespaceCache: common.NewSidecarEgressMap(),
- CnameIdentityCache: &sync.Map{},
- SubsetServiceEntryIdentityCache: &sync.Map{},
- ServiceEntryAddressStore: &ServiceEntryAddressStore{EntryAddresses: map[string]string{}, Addresses: []string{}},
- GlobalTrafficCache: gtpCache,
- SeClusterCache: common.NewMapOfMaps(),
- argoRolloutsEnabled: params.ArgoRolloutsEnabled,
- DependencyProxyVirtualServiceCache: &dependencyProxyVirtualServiceCache{
- identityVSCache: make(map[string]map[string]*v1alpha3.VirtualService),
- mutex: &sync.Mutex{},
- },
+ IdentityClusterCache: common.NewMapOfMaps(),
+ CnameClusterCache: common.NewMapOfMaps(),
+ CnameDependentClusterCache: common.NewMapOfMaps(),
+ IdentityDependencyCache: common.NewMapOfMaps(),
+ RoutingPolicyFilterCache: rpFilterCache,
+ DependencyNamespaceCache: common.NewSidecarEgressMap(),
+ CnameIdentityCache: &sync.Map{},
+ ServiceEntryAddressStore: &ServiceEntryAddressStore{EntryAddresses: map[string]string{}, Addresses: []string{}},
+ GlobalTrafficCache: gtpCache,
+ OutlierDetectionCache: odCache,
+ ClientConnectionConfigCache: clientConnectionSettingsCache,
+ SeClusterCache: common.NewMapOfMaps(),
+ DynamoDbEndpointUpdateCache: &sync.Map{},
+ argoRolloutsEnabled: params.ArgoRolloutsEnabled,
SourceToDestinations: &sourceToDestinations{
sourceDestinations: make(map[string][]string),
mutex: &sync.Mutex{},
},
+ IdentitiesWithAdditionalEndpoints: &sync.Map{},
+ IdentityClusterNamespaceCache: common.NewMapOfMapOfMaps(),
+ CnameDependentClusterNamespaceCache: common.NewMapOfMapOfMaps(),
+ PartitionIdentityCache: common.NewMap(),
+ }
+ if common.GetAdmiralProfile() == common.AdmiralProfileDefault || common.GetAdmiralProfile() == common.AdmiralProfilePerf {
+ serviceEntrySuspender = NewDefaultServiceEntrySuspender(params.ExcludedIdentityList)
+ } else if common.GetAdmiralProfile() == common.AdmiralProfileIntuit {
+ serviceEntrySuspender = NewDynamicServiceEntrySuspender(ctx, params)
+ } else {
+ serviceEntrySuspender = NewDummyServiceEntrySuspender()
+ }
+
+ if common.GetEnableWorkloadDataStorage() {
+ admiralDatabaseClient, err = NewAdmiralDatabaseClient(common.GetAdmiralConfigPath(), NewDynamoClient)
+ if err != nil {
+ alertMsgWhenFailedToConfigureDatabaseClient := "failed to configure admiral database client"
+ log.WithField("error", err.Error()).Error(alertMsgWhenFailedToConfigureDatabaseClient)
+ }
+ } else {
+ admiralDatabaseClient = &DummyDatabaseClient{}
}
- if common.GetSecretResolver() == "" {
- serviceEntryUpdateSuspender = NewDefaultServiceEntrySuspender(params.ExcludedIdentityList)
+
+ var clientLoader loader.ClientLoader
+ if common.GetAdmiralProfile() == common.AdmiralProfilePerf {
+ clientLoader = loader.GetFakeClientLoader()
} else {
- serviceEntryUpdateSuspender = NewDummyServiceEntrySuspender()
+ clientLoader = loader.GetKubeClientLoader()
}
+
return &RemoteRegistry{
- ctx: ctx,
- StartTime: time.Now(),
- remoteControllers: make(map[string]*RemoteController),
- AdmiralCache: admiralCache,
- ServiceEntryUpdateSuspender: serviceEntryUpdateSuspender,
+ ctx: ctx,
+ StartTime: time.Now(),
+ remoteControllers: make(map[string]*RemoteController),
+ AdmiralCache: admiralCache,
+ ServiceEntrySuspender: serviceEntrySuspender,
+ AdmiralDatabaseClient: admiralDatabaseClient,
+ ClientLoader: clientLoader,
+ }
+}
+
+// NewRemoteRegistryForHAController - creates an instance of RemoteRegistry
+// which initializes properties relevant to database builder functionality
+func NewRemoteRegistryForHAController(ctx context.Context) *RemoteRegistry {
+ return &RemoteRegistry{
+ ctx: ctx,
+ StartTime: time.Now(),
+ remoteControllers: make(map[string]*RemoteController),
+ ClientLoader: loader.GetKubeClientLoader(),
+ AdmiralCache: &AdmiralCache{
+ IdentityClusterCache: common.NewMapOfMaps(),
+ IdentityDependencyCache: common.NewMapOfMaps(),
+ },
}
}
@@ -140,7 +200,7 @@ type sourceToDestinations struct {
mutex *sync.Mutex
}
-func (d *sourceToDestinations) put(dependencyObj *v1.Dependency) {
+func (d *sourceToDestinations) put(dependencyObj *admiralV1.Dependency) {
if dependencyObj.Spec.Source == "" {
return
}
@@ -152,7 +212,7 @@ func (d *sourceToDestinations) put(dependencyObj *v1.Dependency) {
}
d.mutex.Lock()
d.sourceDestinations[dependencyObj.Spec.Source] = dependencyObj.Spec.Destinations
- defer d.mutex.Unlock()
+ d.mutex.Unlock()
}
func (d *sourceToDestinations) Get(key string) []string {
@@ -214,485 +274,25 @@ type ServiceEntryAddressStore struct {
Addresses []string `yaml:"addresses,omitempty"` //trading space for efficiency - this will give a quick way to validate that the address is unique
}
-type DependencyHandler struct {
- RemoteRegistry *RemoteRegistry
- DepController *admiral.DependencyController
+type RouteConfig struct {
+ ServicesConfig []*ServiceRouteConfig `json:"servicesRouteConfig"`
}
-type DependencyProxyHandler struct {
- RemoteRegistry *RemoteRegistry
- DepController *admiral.DependencyProxyController
- dependencyProxyDefaultHostNameGenerator DependencyProxyDefaultHostNameGenerator
+type ServiceRouteConfig struct {
+ WorkloadEnvRevision map[string]string `json:"workloadEnvRevision,omitempty"`
+ ServiceAssetAlias string `json:"serviceAssetAlias,omitempty"`
+ Routes []*Route `json:"routes,omitempty"`
}
-type GlobalTrafficHandler struct {
- RemoteRegistry *RemoteRegistry
- ClusterID string
-}
-
-type RolloutHandler struct {
- RemoteRegistry *RemoteRegistry
- ClusterID string
-}
-
-type globalTrafficCache struct {
- //map of global traffic policies key=environment.identity, value: GlobalTrafficPolicy object
- identityCache map[string]*v1.GlobalTrafficPolicy
-
- mutex *sync.Mutex
-}
-
-func (g *globalTrafficCache) GetFromIdentity(identity string, environment string) *v1.GlobalTrafficPolicy {
- return g.identityCache[common.ConstructGtpKey(environment, identity)]
-}
-
-func (g *globalTrafficCache) Put(gtp *v1.GlobalTrafficPolicy) error {
- if gtp.Name == "" {
- //no GTP, throw error
- return errors.New("cannot add an empty globaltrafficpolicy to the cache")
- }
- defer g.mutex.Unlock()
- g.mutex.Lock()
- var gtpIdentity = gtp.Labels[common.GetGlobalTrafficDeploymentLabel()]
- var gtpEnv = common.GetGtpEnv(gtp)
-
- log.Infof("adding GTP with name %v to GTP cache. LabelMatch=%v env=%v", gtp.Name, gtpIdentity, gtpEnv)
- identity := gtp.Labels[common.GetGlobalTrafficDeploymentLabel()]
- key := common.ConstructGtpKey(gtpEnv, identity)
- g.identityCache[key] = gtp
- return nil
-}
-
-func (g *globalTrafficCache) Delete(identity string, environment string) {
- key := common.ConstructGtpKey(environment, identity)
- if _, ok := g.identityCache[key]; ok {
- log.Infof("deleting gtp with key=%s from global GTP cache", key)
- delete(g.identityCache, key)
- }
-}
-
-type RoutingPolicyHandler struct {
- RemoteRegistry *RemoteRegistry
- ClusterID string
-}
-
-type routingPolicyCache struct {
- // map of routing policies key=environment.identity, value: RoutingPolicy object
- // only one routing policy per identity + env is allowed
- identityCache map[string]*v1.RoutingPolicy
- mutex *sync.Mutex
-}
-
-func (r *routingPolicyCache) Delete(identity string, environment string) {
- defer r.mutex.Unlock()
- r.mutex.Lock()
- key := common.ConstructRoutingPolicyKey(environment, identity)
- if _, ok := r.identityCache[key]; ok {
- log.Infof("deleting RoutingPolicy with key=%s from global RoutingPolicy cache", key)
- delete(r.identityCache, key)
- }
-}
-
-func (r *routingPolicyCache) GetFromIdentity(identity string, environment string) *v1.RoutingPolicy {
- defer r.mutex.Unlock()
- r.mutex.Lock()
- return r.identityCache[common.ConstructRoutingPolicyKey(environment, identity)]
-}
-
-func (r *routingPolicyCache) Put(rp *v1.RoutingPolicy) error {
- if rp == nil || rp.Name == "" {
- // no RoutingPolicy, throw error
- return errors.New("cannot add an empty RoutingPolicy to the cache")
- }
- if rp.Labels == nil {
- return errors.New("labels empty in RoutingPolicy")
- }
- defer r.mutex.Unlock()
- r.mutex.Lock()
- var rpIdentity = rp.Labels[common.GetRoutingPolicyLabel()]
- var rpEnv = common.GetRoutingPolicyEnv(rp)
-
- log.Infof("Adding RoutingPolicy with name %v to RoutingPolicy cache. LabelMatch=%v env=%v", rp.Name, rpIdentity, rpEnv)
- key := common.ConstructRoutingPolicyKey(rpEnv, rpIdentity)
- r.identityCache[key] = rp
-
- return nil
-}
-
-type routingPolicyFilterCache struct {
- // map of envoyFilters key=environment+identity of the routingPolicy, value is a map [clusterId -> map [filterName -> filterName]]
- filterCache map[string]map[string]map[string]string
- mutex *sync.Mutex
-}
-
-func (r *routingPolicyFilterCache) Get(identityEnvKey string) (filters map[string]map[string]string) {
- defer r.mutex.Unlock()
- r.mutex.Lock()
- return r.filterCache[identityEnvKey]
-}
-
-func (r *routingPolicyFilterCache) Put(identityEnvKey string, clusterId string, filterName string) {
- defer r.mutex.Unlock()
- r.mutex.Lock()
- if r.filterCache[identityEnvKey] == nil {
- r.filterCache[identityEnvKey] = make(map[string]map[string]string)
- }
-
- if r.filterCache[identityEnvKey][clusterId] == nil {
- r.filterCache[identityEnvKey][clusterId] = make(map[string]string)
- }
- r.filterCache[identityEnvKey][clusterId][filterName] = filterName
-}
-
-func (r *routingPolicyFilterCache) Delete(identityEnvKey string) {
- if CurrentAdmiralState.ReadOnly {
- log.Infof(LogFormat, admiral.Delete, "routingpolicy", identityEnvKey, "", "skipping read-only mode")
- return
- }
- if common.GetEnableRoutingPolicy() {
- defer r.mutex.Unlock()
- r.mutex.Lock()
- // delete all envoyFilters for a given identity+env key
- delete(r.filterCache, identityEnvKey)
- } else {
- log.Infof(LogFormat, admiral.Delete, "routingpolicy", identityEnvKey, "", "routingpolicy disabled")
- }
-}
-func (r RoutingPolicyHandler) Added(ctx context.Context, obj *v1.RoutingPolicy) {
- if CurrentAdmiralState.ReadOnly {
- log.Infof(LogFormat, admiral.Add, "routingpolicy", "", "", "skipping read-only mode")
- return
- }
- if common.GetEnableRoutingPolicy() {
- if common.ShouldIgnoreResource(obj.ObjectMeta) {
- log.Infof(LogFormat, "success", "routingpolicy", obj.Name, "", "Ignored the RoutingPolicy because of the annotation")
- return
- }
- dependents := getDependents(obj, r)
- if len(dependents) == 0 {
- log.Info("No dependents found for Routing Policy - ", obj.Name)
- return
- }
- r.processroutingPolicy(ctx, dependents, obj, admiral.Add)
-
- log.Infof(LogFormat, admiral.Add, "routingpolicy", obj.Name, "", "finished processing routing policy")
- } else {
- log.Infof(LogFormat, admiral.Add, "routingpolicy", obj.Name, "", "routingpolicy disabled")
- }
-}
-
-func (r RoutingPolicyHandler) processroutingPolicy(ctx context.Context, dependents map[string]string, routingPolicy *v1.RoutingPolicy, eventType admiral.EventType) {
- for _, remoteController := range r.RemoteRegistry.remoteControllers {
- for _, dependent := range dependents {
-
- // Check if the dependent exists in this remoteCluster. If so, we create an envoyFilter with dependent identity as workload selector
- if _, ok := r.RemoteRegistry.AdmiralCache.IdentityClusterCache.Get(dependent).Copy()[remoteController.ClusterID]; ok {
- selectors := r.RemoteRegistry.AdmiralCache.WorkloadSelectorCache.Get(dependent + remoteController.ClusterID).Copy()
- if len(selectors) != 0 {
-
- filter, err := createOrUpdateEnvoyFilter(ctx, remoteController, routingPolicy, eventType, dependent, r.RemoteRegistry.AdmiralCache, selectors)
- if err != nil {
- // Best effort create
- log.Errorf(LogErrFormat, eventType, "routingpolicy", routingPolicy.Name, remoteController.ClusterID, err)
- } else {
- log.Infof("msg=%s name=%s cluster=%s", "created envoyfilter", filter.Name, remoteController.ClusterID)
- }
- }
- }
- }
-
- }
-}
-
-func (r RoutingPolicyHandler) Updated(ctx context.Context, obj *v1.RoutingPolicy) {
- if CurrentAdmiralState.ReadOnly {
- log.Infof(LogFormat, admiral.Update, "routingpolicy", "", "", "skipping read-only mode")
- return
- }
- if common.GetEnableRoutingPolicy() {
- if common.ShouldIgnoreResource(obj.ObjectMeta) {
- log.Infof(LogFormat, admiral.Update, "routingpolicy", obj.Name, "", "Ignored the RoutingPolicy because of the annotation")
- // We need to process this as a delete event.
- r.Deleted(ctx, obj)
- return
- }
- dependents := getDependents(obj, r)
- if len(dependents) == 0 {
- return
- }
- r.processroutingPolicy(ctx, dependents, obj, admiral.Update)
-
- log.Infof(LogFormat, admiral.Update, "routingpolicy", obj.Name, "", "updated routing policy")
- } else {
- log.Infof(LogFormat, admiral.Update, "routingpolicy", obj.Name, "", "routingpolicy disabled")
- }
-}
-
-// getDependents - Returns the client dependents for the destination service with routing policy
-// Returns a list of asset ID's of the client services or nil if no dependents are found
-func getDependents(obj *v1.RoutingPolicy, r RoutingPolicyHandler) map[string]string {
- sourceIdentity := common.GetRoutingPolicyIdentity(obj)
- if len(sourceIdentity) == 0 {
- err := errors.New("identity label is missing")
- log.Warnf(LogErrFormat, "add", "RoutingPolicy", obj.Name, r.ClusterID, err)
- return nil
- }
-
- dependents := r.RemoteRegistry.AdmiralCache.IdentityDependencyCache.Get(sourceIdentity).Copy()
- return dependents
-}
-
-func (r RoutingPolicyHandler) Deleted(ctx context.Context, obj *v1.RoutingPolicy) {
- dependents := getDependents(obj, r)
- if len(dependents) != 0 {
- r.deleteEnvoyFilters(ctx, dependents, obj, admiral.Delete)
- log.Infof(LogFormat, admiral.Delete, "routingpolicy", obj.Name, "", "deleted envoy filter for routing policy")
- }
-}
-
-func (r RoutingPolicyHandler) deleteEnvoyFilters(ctx context.Context, dependents map[string]string, obj *v1.RoutingPolicy, eventType admiral.EventType) {
- for _, dependent := range dependents {
- key := dependent + common.GetRoutingPolicyEnv(obj)
- clusterIdFilterMap := r.RemoteRegistry.AdmiralCache.RoutingPolicyFilterCache.Get(key)
- for _, rc := range r.RemoteRegistry.remoteControllers {
- if filterMap, ok := clusterIdFilterMap[rc.ClusterID]; ok {
- for _, filter := range filterMap {
- log.Infof(LogFormat, eventType, "envoyfilter", filter, rc.ClusterID, "deleting")
- err := rc.RoutingPolicyController.IstioClient.NetworkingV1alpha3().EnvoyFilters("istio-system").Delete(ctx, filter, metaV1.DeleteOptions{})
- if err != nil {
- // Best effort delete
- log.Errorf(LogErrFormat, eventType, "envoyfilter", filter, rc.ClusterID, err)
- } else {
- log.Infof(LogFormat, eventType, "envoyfilter", filter, rc.ClusterID, "deleting from cache")
- r.RemoteRegistry.AdmiralCache.RoutingPolicyFilterCache.Delete(key)
- }
- }
- }
- }
- }
-}
-
-type DeploymentHandler struct {
- RemoteRegistry *RemoteRegistry
- ClusterID string
+type Route struct {
+ Name string `json:"name"`
+ Inbound string `json:"inbound"`
+ Outbound string `json:"outbound"`
+ WorkloadEnvSelectors []string `json:"workloadEnvSelectors"`
+ OutboundEndpoints []string
}
type NodeHandler struct {
RemoteRegistry *RemoteRegistry
ClusterID string
}
-
-type ServiceHandler struct {
- RemoteRegistry *RemoteRegistry
- ClusterID string
-}
-
-func (sh *ServiceHandler) Added(ctx context.Context, obj *k8sV1.Service) {
- log.Infof(LogFormat, "Added", "service", obj.Name, sh.ClusterID, "received")
- err := HandleEventForService(ctx, obj, sh.RemoteRegistry, sh.ClusterID)
- if err != nil {
- log.Errorf(LogErrFormat, "Error", "service", obj.Name, sh.ClusterID, err)
- }
-}
-
-func (sh *ServiceHandler) Updated(ctx context.Context, obj *k8sV1.Service) {
- log.Infof(LogFormat, "Updated", "service", obj.Name, sh.ClusterID, "received")
- err := HandleEventForService(ctx, obj, sh.RemoteRegistry, sh.ClusterID)
- if err != nil {
- log.Errorf(LogErrFormat, "Error", "service", obj.Name, sh.ClusterID, err)
- }
-}
-
-func (sh *ServiceHandler) Deleted(ctx context.Context, obj *k8sV1.Service) {
- log.Infof(LogFormat, "Deleted", "service", obj.Name, sh.ClusterID, "received")
- err := HandleEventForService(ctx, obj, sh.RemoteRegistry, sh.ClusterID)
- if err != nil {
- log.Errorf(LogErrFormat, "Error", "service", obj.Name, sh.ClusterID, err)
- }
-}
-
-func HandleEventForService(ctx context.Context, svc *k8sV1.Service, remoteRegistry *RemoteRegistry, clusterName string) error {
- if svc.Spec.Selector == nil {
- return fmt.Errorf("selector missing on service=%s in namespace=%s cluster=%s", svc.Name, svc.Namespace, clusterName)
- }
- rc := remoteRegistry.GetRemoteController(clusterName)
- if rc == nil {
- return fmt.Errorf("could not find the remote controller for cluster=%s", clusterName)
- }
- deploymentController := rc.DeploymentController
- rolloutController := rc.RolloutController
- if deploymentController != nil {
- matchingDeployments := deploymentController.GetDeploymentBySelectorInNamespace(ctx, svc.Spec.Selector, svc.Namespace)
- if len(matchingDeployments) > 0 {
- for _, deployment := range matchingDeployments {
- HandleEventForDeployment(ctx, admiral.Update, &deployment, remoteRegistry, clusterName)
- }
- }
- }
- if common.GetAdmiralParams().ArgoRolloutsEnabled && rolloutController != nil {
- matchingRollouts := rolloutController.GetRolloutBySelectorInNamespace(ctx, svc.Spec.Selector, svc.Namespace)
-
- if len(matchingRollouts) > 0 {
- for _, rollout := range matchingRollouts {
- HandleEventForRollout(ctx, admiral.Update, &rollout, remoteRegistry, clusterName)
- }
- }
- }
- return nil
-}
-
-func (dh *DependencyHandler) Added(ctx context.Context, obj *v1.Dependency) {
-
- log.Infof(LogFormat, "Add", "dependency-record", obj.Name, "", "Received=true namespace="+obj.Namespace)
-
- HandleDependencyRecord(ctx, obj, dh.RemoteRegistry)
-
-}
-
-func (dh *DependencyHandler) Updated(ctx context.Context, obj *v1.Dependency) {
-
- log.Infof(LogFormat, "Update", "dependency-record", obj.Name, "", "Received=true namespace="+obj.Namespace)
-
- // need clean up before handle it as added, I need to handle update that delete the dependency, find diff first
- // this is more complex cos want to make sure no other service depend on the same service (which we just removed the dependancy).
- // need to make sure nothing depend on that before cleaning up the SE for that service
- HandleDependencyRecord(ctx, obj, dh.RemoteRegistry)
-
-}
-
-func HandleDependencyRecord(ctx context.Context, obj *v1.Dependency, remoteRegitry *RemoteRegistry) {
- sourceIdentity := obj.Spec.Source
-
- if len(sourceIdentity) == 0 {
- log.Infof(LogFormat, "Event", "dependency-record", obj.Name, "", "No identity found namespace="+obj.Namespace)
- }
-
- updateIdentityDependencyCache(sourceIdentity, remoteRegitry.AdmiralCache.IdentityDependencyCache, obj)
-
- remoteRegitry.AdmiralCache.SourceToDestinations.put(obj)
-
-}
-
-func (dh *DependencyHandler) Deleted(ctx context.Context, obj *v1.Dependency) {
- // special case of update, delete the dependency crd file for one service, need to loop through all ones we plan to update
- // and make sure nobody else is relying on the same SE in same cluster
- log.Infof(LogFormat, "Deleted", "dependency", obj.Name, "", "Skipping, not implemented")
-}
-
-func (gtp *GlobalTrafficHandler) Added(ctx context.Context, obj *v1.GlobalTrafficPolicy) {
- log.Infof(LogFormat, "Added", "globaltrafficpolicy", obj.Name, gtp.ClusterID, "received")
- err := HandleEventForGlobalTrafficPolicy(ctx, admiral.Add, obj, gtp.RemoteRegistry, gtp.ClusterID)
- if err != nil {
- log.Infof(err.Error())
- }
-}
-
-func (gtp *GlobalTrafficHandler) Updated(ctx context.Context, obj *v1.GlobalTrafficPolicy) {
- log.Infof(LogFormat, "Updated", "globaltrafficpolicy", obj.Name, gtp.ClusterID, "received")
- err := HandleEventForGlobalTrafficPolicy(ctx, admiral.Update, obj, gtp.RemoteRegistry, gtp.ClusterID)
- if err != nil {
- log.Infof(err.Error())
- }
-}
-
-func (gtp *GlobalTrafficHandler) Deleted(ctx context.Context, obj *v1.GlobalTrafficPolicy) {
- log.Infof(LogFormat, "Deleted", "globaltrafficpolicy", obj.Name, gtp.ClusterID, "received")
- err := HandleEventForGlobalTrafficPolicy(ctx, admiral.Delete, obj, gtp.RemoteRegistry, gtp.ClusterID)
- if err != nil {
- log.Infof(err.Error())
- }
-}
-
-func (pc *DeploymentHandler) Added(ctx context.Context, obj *k8sAppsV1.Deployment) {
- HandleEventForDeployment(ctx, admiral.Add, obj, pc.RemoteRegistry, pc.ClusterID)
-}
-
-func (pc *DeploymentHandler) Deleted(ctx context.Context, obj *k8sAppsV1.Deployment) {
- HandleEventForDeployment(ctx, admiral.Delete, obj, pc.RemoteRegistry, pc.ClusterID)
-}
-
-func (rh *RolloutHandler) Added(ctx context.Context, obj *argo.Rollout) {
- HandleEventForRollout(ctx, admiral.Add, obj, rh.RemoteRegistry, rh.ClusterID)
-}
-
-func (rh *RolloutHandler) Updated(ctx context.Context, obj *argo.Rollout) {
- log.Infof(LogFormat, "Updated", "rollout", obj.Name, rh.ClusterID, "received")
-}
-
-func (rh *RolloutHandler) Deleted(ctx context.Context, obj *argo.Rollout) {
- HandleEventForRollout(ctx, admiral.Delete, obj, rh.RemoteRegistry, rh.ClusterID)
-}
-
-// HandleEventForRollout helper function to handle add and delete for RolloutHandler
-func HandleEventForRollout(ctx context.Context, event admiral.EventType, obj *argo.Rollout, remoteRegistry *RemoteRegistry, clusterName string) {
-
- log.Infof(LogFormat, event, "rollout", obj.Name, clusterName, "Received")
- globalIdentifier := common.GetRolloutGlobalIdentifier(obj)
-
- if len(globalIdentifier) == 0 {
- log.Infof(LogFormat, "Event", "rollout", obj.Name, clusterName, "Skipped as '"+common.GetWorkloadIdentifier()+" was not found', namespace="+obj.Namespace)
- return
- }
-
- env := common.GetEnvForRollout(obj)
-
- // Use the same function as added deployment function to update and put new service entry in place to replace old one
- modifyServiceEntryForNewServiceOrPod(ctx, event, env, globalIdentifier, remoteRegistry)
-}
-
-// helper function to handle add and delete for DeploymentHandler
-func HandleEventForDeployment(ctx context.Context, event admiral.EventType, obj *k8sAppsV1.Deployment, remoteRegistry *RemoteRegistry, clusterName string) {
-
- log.Infof(LogFormat, event, "deployment", obj.Name, clusterName, "Received")
- globalIdentifier := common.GetDeploymentGlobalIdentifier(obj)
-
- if len(globalIdentifier) == 0 {
- log.Infof(LogFormat, "Event", "deployment", obj.Name, clusterName, "Skipped as '"+common.GetWorkloadIdentifier()+" was not found', namespace="+obj.Namespace)
- return
- }
-
- env := common.GetEnv(obj)
-
- // Use the same function as added deployment function to update and put new service entry in place to replace old one
- modifyServiceEntryForNewServiceOrPod(ctx, event, env, globalIdentifier, remoteRegistry)
-}
-
-// HandleEventForGlobalTrafficPolicy processes all the events related to GTPs
-func HandleEventForGlobalTrafficPolicy(ctx context.Context, event admiral.EventType, gtp *v1.GlobalTrafficPolicy,
- remoteRegistry *RemoteRegistry, clusterName string) error {
-
- globalIdentifier := common.GetGtpIdentity(gtp)
-
- if len(globalIdentifier) == 0 {
- return fmt.Errorf(LogFormat, "Event", "globaltrafficpolicy", gtp.Name, clusterName, "Skipped as '"+common.GetWorkloadIdentifier()+" was not found', namespace="+gtp.Namespace)
- }
-
- env := common.GetGtpEnv(gtp)
-
- // For now we're going to force all the events to update only in order to prevent
- // the endpoints from being deleted.
- // TODO: Need to come up with a way to prevent deleting default endpoints so that this hack can be removed.
- // Use the same function as added deployment function to update and put new service entry in place to replace old one
- modifyServiceEntryForNewServiceOrPod(ctx, admiral.Update, env, globalIdentifier, remoteRegistry)
- return nil
-}
-
-func (dh *DependencyProxyHandler) Added(ctx context.Context, obj *v1.DependencyProxy) {
- log.Infof(LogFormat, "Add", "dependencyproxy", obj.Name, "", "Received=true namespace="+obj.Namespace)
- err := updateIdentityDependencyProxyCache(ctx, dh.RemoteRegistry.AdmiralCache.DependencyProxyVirtualServiceCache, obj, dh.dependencyProxyDefaultHostNameGenerator)
- if err != nil {
- log.Errorf(LogErrFormat, "Add", "dependencyproxy", obj.Name, "", err)
- }
-}
-
-func (dh *DependencyProxyHandler) Updated(ctx context.Context, obj *v1.DependencyProxy) {
- log.Infof(LogFormat, "Update", "dependencyproxy", obj.Name, "", "Received=true namespace="+obj.Namespace)
- err := updateIdentityDependencyProxyCache(ctx, dh.RemoteRegistry.AdmiralCache.DependencyProxyVirtualServiceCache, obj, dh.dependencyProxyDefaultHostNameGenerator)
- if err != nil {
- log.Errorf(LogErrFormat, "Add", "dependencyproxy", obj.Name, "", err)
- }
-}
-
-func (dh *DependencyProxyHandler) Deleted(ctx context.Context, obj *v1.DependencyProxy) {
- log.Infof(LogFormat, "Deleted", "dependencyproxy", obj.Name, "", "Skipping, not implemented")
-}
From 21606441458f4d13ec8c19c9dd0b32c0f475f8bf Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 15:25:48 -0400
Subject: [PATCH 166/235] copied admiral/pkg/clusters/types_test.go from master
Signed-off-by: Shriram Sharma
---
admiral/pkg/clusters/types_test.go | 517 +----------------------------
1 file changed, 9 insertions(+), 508 deletions(-)
diff --git a/admiral/pkg/clusters/types_test.go b/admiral/pkg/clusters/types_test.go
index 840bebee..e4274254 100644
--- a/admiral/pkg/clusters/types_test.go
+++ b/admiral/pkg/clusters/types_test.go
@@ -1,31 +1,16 @@
package clusters
import (
- "bytes"
- "context"
- "fmt"
- "strings"
"sync"
- "testing"
"time"
- argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
"github.com/google/go-cmp/cmp/cmpopts"
- "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/model"
- v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
- admiralFake "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned/fake"
- "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
+ admiralV1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
- log "github.com/sirupsen/logrus"
- "github.com/stretchr/testify/assert"
- istiofake "istio.io/client-go/pkg/clientset/versioned/fake"
- v12 "k8s.io/api/apps/v1"
- v13 "k8s.io/api/core/v1"
- time2 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "os"
)
-var ignoreUnexported = cmpopts.IgnoreUnexported(v1.GlobalTrafficPolicy{}.Status)
+var ignoreUnexported = cmpopts.IgnoreUnexported(admiralV1.GlobalTrafficPolicy{}.Status)
var typeTestSingleton sync.Once
@@ -33,21 +18,21 @@ func admiralParamsForTypesTests() common.AdmiralParams {
return common.AdmiralParams{
KubeconfigPath: "testdata/fake.config",
LabelSet: &common.LabelSet{
- WorkloadIdentityKey: "identity",
- EnvKey: "admiral.io/env",
- GlobalTrafficDeploymentLabel: "identity",
- PriorityKey: "priority",
+ WorkloadIdentityKey: "identity",
+ EnvKey: "admiral.io/env",
+ AdmiralCRDIdentityLabel: "identity",
+ PriorityKey: "priority",
},
EnableSAN: true,
SANPrefix: "prefix",
HostnameSuffix: "mesh",
SyncNamespace: "ns",
- CacheRefreshDuration: time.Minute,
+ CacheReconcileDuration: time.Minute,
ClusterRegistriesNamespace: "default",
DependenciesNamespace: "default",
- SecretResolver: "",
EnableRoutingPolicy: true,
EnvoyFilterVersion: "1.13",
+ Profile: common.AdmiralProfileDefault,
}
}
@@ -57,487 +42,3 @@ func setupForTypeTests() {
common.InitializeConfig(admiralParamsForTypesTests())
})
}
-
-func TestDeploymentHandler(t *testing.T) {
- setupForTypeTests()
- ctx := context.Background()
-
- p := common.AdmiralParams{
- KubeconfigPath: "testdata/fake.config",
- }
-
- registry, _ := InitAdmiral(context.Background(), p)
-
- handler := DeploymentHandler{}
-
- gtpCache := &globalTrafficCache{}
- gtpCache.identityCache = make(map[string]*v1.GlobalTrafficPolicy)
- gtpCache.mutex = &sync.Mutex{}
-
- fakeCrdClient := admiralFake.NewSimpleClientset()
-
- gtpController := &admiral.GlobalTrafficController{CrdClient: fakeCrdClient}
- remoteController, _ := createMockRemoteController(func(i interface{}) {
-
- })
- remoteController.GlobalTraffic = gtpController
-
- registry.remoteControllers = map[string]*RemoteController{"cluster-1": remoteController}
-
- registry.AdmiralCache.GlobalTrafficCache = gtpCache
- handler.RemoteRegistry = registry
-
- deployment := v12.Deployment{
- ObjectMeta: time2.ObjectMeta{
- Name: "test",
- Namespace: "namespace",
- Labels: map[string]string{"identity": "app1"},
- },
- Spec: v12.DeploymentSpec{
- Selector: &time2.LabelSelector{
- MatchLabels: map[string]string{"identity": "bar"},
- },
- Template: v13.PodTemplateSpec{
- ObjectMeta: time2.ObjectMeta{
- Labels: map[string]string{"identity": "bar", "istio-injected": "true", "env": "dev"},
- },
- },
- },
- }
-
- //Struct of test case info. Name is required.
- testCases := []struct {
- name string
- addedDeployment *v12.Deployment
- expectedDeploymentCacheKey string
- expectedIdentityCacheValue *v1.GlobalTrafficPolicy
- expectedDeploymentCacheValue *v12.Deployment
- }{
- {
- name: "Shouldn't throw errors when called",
- addedDeployment: &deployment,
- expectedDeploymentCacheKey: "myGTP1",
- expectedIdentityCacheValue: nil,
- expectedDeploymentCacheValue: nil,
- },
- }
-
- //Rather annoying, but wasn't able to get the autogenerated fake k8s client for GTP objects to allow me to list resources, so this test is only for not throwing errors. I'll be testing the rest of the fucntionality picemeal.
- //Side note, if anyone knows how to fix `level=error msg="Failed to list deployments in cluster, error: no kind \"GlobalTrafficPolicyList\" is registered for version \"admiral.io/v1\" in scheme \"pkg/runtime/scheme.go:101\""`, I'd love to hear it!
- //Already tried working through this: https://github.com/camilamacedo86/operator-sdk/blob/e40d7db97f0d132333b1e46ddf7b7f3cab1e379f/doc/user/unit-testing.md with no luck
-
- //Run the test for every provided case
- for _, c := range testCases {
- t.Run(c.name, func(t *testing.T) {
- gtpCache = &globalTrafficCache{}
- gtpCache.identityCache = make(map[string]*v1.GlobalTrafficPolicy)
- gtpCache.mutex = &sync.Mutex{}
- handler.RemoteRegistry.AdmiralCache.GlobalTrafficCache = gtpCache
-
- handler.Added(ctx, &deployment)
- handler.Deleted(ctx, &deployment)
- })
- }
-}
-
-func TestRolloutHandler(t *testing.T) {
- setupForTypeTests()
- ctx := context.Background()
-
- p := common.AdmiralParams{
- KubeconfigPath: "testdata/fake.config",
- }
-
- registry, _ := InitAdmiral(context.Background(), p)
-
- handler := RolloutHandler{}
-
- gtpCache := &globalTrafficCache{}
- gtpCache.identityCache = make(map[string]*v1.GlobalTrafficPolicy)
- gtpCache.mutex = &sync.Mutex{}
-
- fakeCrdClient := admiralFake.NewSimpleClientset()
-
- gtpController := &admiral.GlobalTrafficController{CrdClient: fakeCrdClient}
- remoteController, _ := createMockRemoteController(func(i interface{}) {
-
- })
- remoteController.GlobalTraffic = gtpController
-
- registry.remoteControllers = map[string]*RemoteController{"cluster-1": remoteController}
-
- registry.AdmiralCache.GlobalTrafficCache = gtpCache
- handler.RemoteRegistry = registry
-
- rollout := argo.Rollout{
- ObjectMeta: time2.ObjectMeta{
- Name: "test",
- Namespace: "namespace",
- Labels: map[string]string{"identity": "app1"},
- },
- Spec: argo.RolloutSpec{
- Selector: &time2.LabelSelector{
- MatchLabels: map[string]string{"identity": "bar"},
- },
- Template: v13.PodTemplateSpec{
- ObjectMeta: time2.ObjectMeta{
- Labels: map[string]string{"identity": "bar", "istio-injected": "true", "env": "dev"},
- },
- },
- },
- }
-
- //Struct of test case info. Name is required.
- testCases := []struct {
- name string
- addedRolout *argo.Rollout
- expectedRolloutCacheKey string
- expectedIdentityCacheValue *v1.GlobalTrafficPolicy
- expectedRolloutCacheValue *argo.Rollout
- }{{
- name: "Shouldn't throw errors when called",
- addedRolout: &rollout,
- expectedRolloutCacheKey: "myGTP1",
- expectedIdentityCacheValue: nil,
- expectedRolloutCacheValue: nil,
- }, {
- name: "Shouldn't throw errors when called-no identity",
- addedRolout: &argo.Rollout{},
- expectedRolloutCacheKey: "myGTP1",
- expectedIdentityCacheValue: nil,
- expectedRolloutCacheValue: nil,
- },
- }
-
- //Rather annoying, but wasn't able to get the autogenerated fake k8s client for GTP objects to allow me to list resources, so this test is only for not throwing errors. I'll be testing the rest of the fucntionality picemeal.
- //Side note, if anyone knows how to fix `level=error msg="Failed to list rollouts in cluster, error: no kind \"GlobalTrafficPolicyList\" is registered for version \"admiral.io/v1\" in scheme \"pkg/runtime/scheme.go:101\""`, I'd love to hear it!
- //Already tried working through this: https://github.com/camilamacedo86/operator-sdk/blob/e40d7db97f0d132333b1e46ddf7b7f3cab1e379f/doc/user/unit-testing.md with no luck
-
- //Run the test for every provided case
- for _, c := range testCases {
- t.Run(c.name, func(t *testing.T) {
- gtpCache = &globalTrafficCache{}
- gtpCache.identityCache = make(map[string]*v1.GlobalTrafficPolicy)
- gtpCache.mutex = &sync.Mutex{}
- handler.RemoteRegistry.AdmiralCache.GlobalTrafficCache = gtpCache
- handler.Added(ctx, c.addedRolout)
- handler.Deleted(ctx, c.addedRolout)
- handler.Updated(ctx, c.addedRolout)
- })
- }
-}
-
-func TestHandleEventForGlobalTrafficPolicy(t *testing.T) {
- setupForTypeTests()
- ctx := context.Background()
- event := admiral.EventType("Add")
- p := common.AdmiralParams{
- KubeconfigPath: "testdata/fake.config",
- }
- registry, _ := InitAdmiral(context.Background(), p)
-
- testcases := []struct {
- name string
- gtp *v1.GlobalTrafficPolicy
- doesError bool
- }{
- {
- name: "missing identity label in GTP should result in error being returned by the handler",
- gtp: &v1.GlobalTrafficPolicy{
- ObjectMeta: time2.ObjectMeta{
- Name: "testgtp",
- Annotations: map[string]string{"admiral.io/env": "testenv"},
- },
- },
- doesError: true,
- },
- {
- name: "empty identity label in GTP should result in error being returned by the handler",
- gtp: &v1.GlobalTrafficPolicy{
- ObjectMeta: time2.ObjectMeta{
- Name: "testgtp",
- Labels: map[string]string{"identity": ""},
- Annotations: map[string]string{"admiral.io/env": "testenv"},
- },
- },
- doesError: true,
- },
- {
- name: "valid GTP config which is expected to pass",
- gtp: &v1.GlobalTrafficPolicy{
- ObjectMeta: time2.ObjectMeta{
- Name: "testgtp",
- Labels: map[string]string{"identity": "testapp"},
- Annotations: map[string]string{"admiral.io/env": "testenv"},
- },
- },
- doesError: false,
- },
- }
-
- for _, c := range testcases {
- t.Run(c.name, func(t *testing.T) {
- err := HandleEventForGlobalTrafficPolicy(ctx, event, c.gtp, registry, "testcluster")
- assert.Equal(t, err != nil, c.doesError)
- })
- }
-}
-
-func TestRoutingPolicyHandler(t *testing.T) {
- common.ResetSync()
- p := common.AdmiralParams{
- KubeconfigPath: "testdata/fake.config",
- LabelSet: &common.LabelSet{},
- EnableSAN: true,
- SANPrefix: "prefix",
- HostnameSuffix: "mesh",
- SyncNamespace: "ns",
- CacheRefreshDuration: time.Minute,
- ClusterRegistriesNamespace: "default",
- DependenciesNamespace: "default",
- SecretResolver: "",
- EnableRoutingPolicy: true,
- EnvoyFilterVersion: "1.13",
- }
-
- p.LabelSet.WorkloadIdentityKey = "identity"
- p.LabelSet.EnvKey = "admiral.io/env"
- p.LabelSet.GlobalTrafficDeploymentLabel = "identity"
-
- registry, _ := InitAdmiral(context.Background(), p)
-
- handler := RoutingPolicyHandler{}
-
- rpFilterCache := &routingPolicyFilterCache{}
- rpFilterCache.filterCache = make(map[string]map[string]map[string]string)
- rpFilterCache.mutex = &sync.Mutex{}
-
- routingPolicyController := &admiral.RoutingPolicyController{IstioClient: istiofake.NewSimpleClientset()}
- remoteController, _ := createMockRemoteController(func(i interface{}) {
-
- })
- remoteController.RoutingPolicyController = routingPolicyController
-
- registry.remoteControllers = map[string]*RemoteController{"cluster-1": remoteController}
- registry.AdmiralCache.RoutingPolicyFilterCache = rpFilterCache
-
- // foo is dependent upon bar and bar has a deployment in the same cluster.
- registry.AdmiralCache.IdentityDependencyCache.Put("foo", "bar", "bar")
- registry.AdmiralCache.IdentityClusterCache.Put("bar", remoteController.ClusterID, remoteController.ClusterID)
-
- // foo is also dependent upon bar2 but bar2 is in a different cluster, so this cluster should not have the envoyfilter created
- registry.AdmiralCache.IdentityDependencyCache.Put("foo", "bar2", "bar2")
- registry.AdmiralCache.IdentityClusterCache.Put("bar2", "differentCluster", "differentCluster")
-
- // foo1 is dependent upon bar 1 but bar1 does not have a deployment so it is missing from identityClusterCache
- registry.AdmiralCache.IdentityDependencyCache.Put("foo1", "bar1", "bar1")
-
- var mp = common.NewMap()
- mp.Put("k1", "v1")
- registry.AdmiralCache.WorkloadSelectorCache.PutMap("bar"+remoteController.ClusterID, mp)
- registry.AdmiralCache.WorkloadSelectorCache.PutMap("bar2differentCluster", mp)
-
- handler.RemoteRegistry = registry
-
- routingPolicyFoo := &v1.RoutingPolicy{
- TypeMeta: time2.TypeMeta{},
- ObjectMeta: time2.ObjectMeta{
- Labels: map[string]string{
- "identity": "foo",
- "admiral.io/env": "stage",
- },
- },
- Spec: model.RoutingPolicy{
- Plugin: "test",
- Hosts: []string{"e2e.testservice.mesh"},
- Config: map[string]string{
- "cachePrefix": "cache-v1",
- "cachettlSec": "86400",
- "routingServiceUrl": "e2e.test.routing.service.mesh",
- "pathPrefix": "/sayhello,/v1/company/{id}/",
- },
- },
- Status: v1.RoutingPolicyStatus{},
- }
-
- routingPolicyFoo1 := routingPolicyFoo.DeepCopy()
- routingPolicyFoo1.Labels[common.GetWorkloadIdentifier()] = "foo1"
-
- testCases := []struct {
- name string
- routingPolicy *v1.RoutingPolicy
- expectedFilterCacheKey string
- valueExpected bool
- }{
- {
- name: "If dependent deployment exists, should fetch filter from cache",
- routingPolicy: routingPolicyFoo,
- expectedFilterCacheKey: "barstage",
- valueExpected: true,
- },
- {
- name: "If dependent deployment does not exist, the filter should not be created",
- routingPolicy: routingPolicyFoo1,
- expectedFilterCacheKey: "bar1stage",
- valueExpected: false,
- },
- {
- name: "If dependent deployment exists in a different cluster, the filter should not be created",
- routingPolicy: routingPolicyFoo,
- expectedFilterCacheKey: "bar2stage",
- valueExpected: false,
- },
- }
-
- ctx := context.Background()
-
- time.Sleep(time.Second * 30)
- for _, c := range testCases {
- t.Run(c.name, func(t *testing.T) {
- handler.Added(ctx, c.routingPolicy)
- if c.valueExpected {
- filterCacheValue := registry.AdmiralCache.RoutingPolicyFilterCache.Get(c.expectedFilterCacheKey)
- assert.NotNil(t, filterCacheValue)
- selectorLabelsSha, err := common.GetSha1("bar" + common.GetRoutingPolicyEnv(c.routingPolicy))
- if err != nil {
- t.Error("Error ocurred while computing workload Labels sha1")
- }
- envoyFilterName := fmt.Sprintf("%s-dynamicrouting-%s-%s", strings.ToLower(c.routingPolicy.Spec.Plugin), selectorLabelsSha, "1.13")
- filterMap := filterCacheValue[remoteController.ClusterID]
- assert.NotNil(t, filterMap)
- assert.NotNil(t, filterMap[envoyFilterName])
-
- // once the routing policy is deleted, the corresponding filter should also be deleted
- handler.Deleted(ctx, c.routingPolicy)
- assert.Nil(t, registry.AdmiralCache.RoutingPolicyFilterCache.Get(c.expectedFilterCacheKey))
- } else {
- assert.Nil(t, registry.AdmiralCache.RoutingPolicyFilterCache.Get(c.expectedFilterCacheKey))
- }
-
- })
- }
-
- // Test for multiple filters
- registry.AdmiralCache.IdentityDependencyCache.Put("foo", "bar3", "bar3")
- registry.AdmiralCache.IdentityClusterCache.Put("bar3", remoteController.ClusterID, remoteController.ClusterID)
- registry.AdmiralCache.WorkloadSelectorCache.PutMap("bar3"+remoteController.ClusterID, mp)
- handler.Added(ctx, routingPolicyFoo)
-
- selectorLabelsShaBar3, err := common.GetSha1("bar3" + common.GetRoutingPolicyEnv(routingPolicyFoo))
- if err != nil {
- t.Error("Error ocurred while computing workload Labels sha1")
- }
- envoyFilterNameBar3 := fmt.Sprintf("%s-dynamicrouting-%s-%s", strings.ToLower(routingPolicyFoo.Spec.Plugin), selectorLabelsShaBar3, "1.13")
-
- filterCacheValue := registry.AdmiralCache.RoutingPolicyFilterCache.Get("bar3stage")
- assert.NotNil(t, filterCacheValue)
- filterMap := filterCacheValue[remoteController.ClusterID]
- assert.NotNil(t, filterMap)
- assert.NotNil(t, filterMap[envoyFilterNameBar3])
-
- registry.AdmiralCache.IdentityDependencyCache.Put("foo", "bar4", "bar4")
- registry.AdmiralCache.IdentityClusterCache.Put("bar4", remoteController.ClusterID, remoteController.ClusterID)
- registry.AdmiralCache.WorkloadSelectorCache.PutMap("bar4"+remoteController.ClusterID, mp)
- handler.Updated(ctx, routingPolicyFoo)
-
- selectorLabelsShaBar4, err := common.GetSha1("bar4" + common.GetRoutingPolicyEnv(routingPolicyFoo))
- if err != nil {
- t.Error("Error ocurred while computing workload Labels sha1")
- }
- envoyFilterNameBar4 := fmt.Sprintf("%s-dynamicrouting-%s-%s", strings.ToLower(routingPolicyFoo.Spec.Plugin), selectorLabelsShaBar4, "1.13")
-
- filterCacheValue = registry.AdmiralCache.RoutingPolicyFilterCache.Get("bar4stage")
- assert.NotNil(t, filterCacheValue)
- filterMap = filterCacheValue[remoteController.ClusterID]
- assert.NotNil(t, filterMap)
- assert.NotNil(t, filterMap[envoyFilterNameBar4])
-
- // ignore the routing policy
- annotations := routingPolicyFoo.GetAnnotations()
- if annotations == nil {
- annotations = make(map[string]string)
- }
- annotations[common.AdmiralIgnoreAnnotation] = "true"
- routingPolicyFoo.SetAnnotations(annotations)
-
- handler.Updated(ctx, routingPolicyFoo)
- assert.Nil(t, registry.AdmiralCache.RoutingPolicyFilterCache.Get("bar4stage"))
- assert.Nil(t, registry.AdmiralCache.RoutingPolicyFilterCache.Get("bar3stage"))
-}
-
-func TestRoutingPolicyReadOnly(t *testing.T) {
- p := common.AdmiralParams{
- KubeconfigPath: "testdata/fake.config",
- LabelSet: &common.LabelSet{},
- EnableSAN: true,
- SANPrefix: "prefix",
- HostnameSuffix: "mesh",
- SyncNamespace: "ns",
- CacheRefreshDuration: time.Minute,
- ClusterRegistriesNamespace: "default",
- DependenciesNamespace: "default",
- SecretResolver: "",
- EnableRoutingPolicy: true,
- EnvoyFilterVersion: "1.13",
- }
-
- p.LabelSet.WorkloadIdentityKey = "identity"
- p.LabelSet.EnvKey = "admiral.io/env"
- p.LabelSet.GlobalTrafficDeploymentLabel = "identity"
-
- handler := RoutingPolicyHandler{}
-
- testcases := []struct {
- name string
- rp *v1.RoutingPolicy
- readOnly bool
- doesError bool
- }{
- {
- name: "Readonly test for DR scenario - Routing Policy",
- rp: &v1.RoutingPolicy{},
- readOnly: true,
- doesError: true,
- },
- {
- name: "Readonly false test for DR scenario - Routing Policy",
- rp: &v1.RoutingPolicy{},
- readOnly: false,
- doesError: false,
- },
- }
-
- ctx := context.Background()
-
- for _, c := range testcases {
- t.Run(c.name, func(t *testing.T) {
- if c.readOnly {
- CurrentAdmiralState.ReadOnly = true
- } else {
- CurrentAdmiralState.ReadOnly = false
- }
- var buf bytes.Buffer
- log.SetOutput(&buf)
- defer func() {
- log.SetOutput(os.Stderr)
- }()
- // Add routing policy test
- handler.Added(ctx, c.rp)
- t.Log(buf.String())
- val := strings.Contains(buf.String(), "skipping read-only mode")
- assert.Equal(t, c.doesError, val)
-
- // Update routing policy test
- handler.Updated(ctx, c.rp)
- t.Log(buf.String())
- val = strings.Contains(buf.String(), "skipping read-only mode")
- assert.Equal(t, c.doesError, val)
-
- // Delete routing policy test
- handler.Deleted(ctx, c.rp)
- t.Log(buf.String())
- val = strings.Contains(buf.String(), "skipping read-only mode")
- assert.Equal(t, c.doesError, val)
- })
- }
-}
From c42e1b664205910023ac09a3a65604d55f10006f Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 15:26:17 -0400
Subject: [PATCH 167/235] copied admiral/pkg/clusters/util.go from master
Signed-off-by: Shriram Sharma
---
admiral/pkg/clusters/util.go | 288 ++++++++++++++++++++++++++++++-----
1 file changed, 247 insertions(+), 41 deletions(-)
diff --git a/admiral/pkg/clusters/util.go b/admiral/pkg/clusters/util.go
index 64274753..d16f1026 100644
--- a/admiral/pkg/clusters/util.go
+++ b/admiral/pkg/clusters/util.go
@@ -1,39 +1,39 @@
package clusters
import (
+ "context"
"errors"
+ "sort"
"strconv"
"strings"
"time"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/util"
+
argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
- log "github.com/sirupsen/logrus"
+ "github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
+ networking "istio.io/api/networking/v1alpha3"
k8sAppsV1 "k8s.io/api/apps/v1"
k8sV1 "k8s.io/api/core/v1"
)
-func GetMeshPortAndLabelsFromDeploymentOrRollout(
- cluster string, serviceInstance *k8sV1.Service,
- deploymentsByCluster map[string]*k8sAppsV1.Deployment,
- rolloutsByCluster map[string]*argo.Rollout,
-) (portsByProtocol map[string]uint32, labels map[string]string) {
- if len(deploymentsByCluster) > 0 && deploymentsByCluster[cluster] != nil {
- deployment := deploymentsByCluster[cluster]
- return GetMeshPortsForDeployment(cluster, serviceInstance, deployment), deployment.Labels
- }
- if len(rolloutsByCluster) > 0 && rolloutsByCluster[cluster] != nil {
- rollout := rolloutsByCluster[cluster]
- return GetMeshPortsForRollout(cluster, serviceInstance, rollout), rollout.Labels
- }
- return nil, nil
-}
+type WorkloadEntrySorted []*networking.WorkloadEntry
-func GetMeshPortsForDeployment(clusterName string, destService *k8sV1.Service,
+func GetMeshPortsForDeployments(clusterName string, destService *k8sV1.Service,
destDeployment *k8sAppsV1.Deployment) map[string]uint32 {
+
+ if destService == nil || destDeployment == nil {
+ logrus.Warnf("Deployment or Service is nil cluster=%s", clusterName)
+ return nil
+ }
+
var meshPorts string
- if destDeployment != nil {
+ if destDeployment.Spec.Template.Annotations == nil {
+ meshPorts = ""
+ } else {
meshPorts = destDeployment.Spec.Template.Annotations[common.SidecarEnabledPorts]
}
ports := getMeshPortsHelper(meshPorts, destService, clusterName)
@@ -42,8 +42,15 @@ func GetMeshPortsForDeployment(clusterName string, destService *k8sV1.Service,
func GetMeshPortsForRollout(clusterName string, destService *k8sV1.Service,
destRollout *argo.Rollout) map[string]uint32 {
+ if destService == nil || destRollout == nil {
+ logrus.Warnf("Rollout or Service is nil cluster=%s", clusterName)
+ return nil
+ }
+
var meshPorts string
- if destRollout != nil {
+ if destRollout.Spec.Template.Annotations == nil {
+ meshPorts = ""
+ } else {
meshPorts = destRollout.Spec.Template.Annotations[common.SidecarEnabledPorts]
}
ports := getMeshPortsHelper(meshPorts, destService, clusterName)
@@ -54,14 +61,14 @@ func GetMeshPortsForRollout(clusterName string, destService *k8sV1.Service,
func GetServiceSelector(clusterName string, destService *k8sV1.Service) *common.Map {
var selectors = destService.Spec.Selector
if len(selectors) == 0 {
- log.Infof(LogFormat, "GetServiceLabels", "no selectors present", destService.Name, clusterName, selectors)
+ logrus.Infof(LogFormat, "GetServiceLabels", "no selectors present", destService.Name, clusterName, selectors)
return nil
}
var tempMap = common.NewMap()
for key, value := range selectors {
tempMap.Put(key, value)
}
- log.Infof(LogFormat, "GetServiceLabels", "selectors present", destService.Name, clusterName, selectors)
+ logrus.Infof(LogFormat, "GetServiceLabels", "selectors present", destService.Name, clusterName, selectors)
return tempMap
}
@@ -72,9 +79,10 @@ func getMeshPortsHelper(meshPorts string, destService *k8sV1.Service, clusterNam
return ports
}
if len(meshPorts) == 0 {
- log.Infof(LogFormat, "GetMeshPorts", "service", destService.Name, clusterName, "No mesh ports present, defaulting to first port")
+ logrus.Infof(LogFormatAdv, "GetMeshPorts", "service", destService.Name, destService.Namespace,
+ clusterName, "No mesh ports present, defaulting to first port")
if destService.Spec.Ports != nil && len(destService.Spec.Ports) > 0 {
- var protocol = GetPortProtocol(destService.Spec.Ports[0].Name)
+ var protocol = util.GetPortProtocol(destService.Spec.Ports[0].Name)
ports[protocol] = uint32(destService.Spec.Ports[0].Port)
}
return ports
@@ -83,7 +91,7 @@ func getMeshPortsHelper(meshPorts string, destService *k8sV1.Service, clusterNam
meshPortsSplit := strings.Split(meshPorts, ",")
if len(meshPortsSplit) > 1 {
- log.Warnf(LogErrFormat, "Get", "MeshPorts", "", clusterName,
+ logrus.Warnf(LogErrFormat, "Get", "MeshPorts", "", clusterName,
"Multiple inbound mesh ports detected, admiral generates service entry with first matched port and protocol")
}
@@ -104,7 +112,7 @@ func getMeshPortsHelper(meshPorts string, destService *k8sV1.Service, clusterNam
if servicePort.TargetPort.StrVal != "" {
port, err := strconv.Atoi(servicePort.TargetPort.StrVal)
if err != nil {
- log.Warnf(LogErrFormat, "GetMeshPorts", "Failed to parse TargetPort", destService.Name, clusterName, err)
+ logrus.Warnf(LogErrFormat, "GetMeshPorts", "Failed to parse TargetPort", destService.Name, clusterName, err)
}
if port > 0 {
targetPort = uint32(port)
@@ -115,8 +123,9 @@ func getMeshPortsHelper(meshPorts string, destService *k8sV1.Service, clusterNam
targetPort = uint32(servicePort.TargetPort.IntVal)
}
if _, ok := meshPortMap[targetPort]; ok {
- var protocol = GetPortProtocol(servicePort.Name)
- log.Debugf(LogFormat, "GetMeshPorts", servicePort.Port, destService.Name, clusterName, "Adding mesh port for protocol: "+protocol)
+ var protocol = util.GetPortProtocol(servicePort.Name)
+ logrus.Infof(LogFormatAdv, "MeshPort", servicePort.Port, destService.Name, destService.Namespace,
+ clusterName, "Protocol: "+protocol)
ports[protocol] = uint32(servicePort.Port)
break
}
@@ -124,18 +133,6 @@ func getMeshPortsHelper(meshPorts string, destService *k8sV1.Service, clusterNam
return ports
}
-func GetPortProtocol(name string) string {
- var protocol = common.Http
- if strings.Index(name, common.GrpcWeb) == 0 {
- protocol = common.GrpcWeb
- } else if strings.Index(name, common.Grpc) == 0 {
- protocol = common.Grpc
- } else if strings.Index(name, common.Http2) == 0 {
- protocol = common.Http2
- }
- return protocol
-}
-
func GetServiceEntryStateFromConfigmap(configmap *k8sV1.ConfigMap) *ServiceEntryAddressStore {
bytes := []byte(configmap.Data["serviceEntryAddressStore"])
@@ -143,7 +140,7 @@ func GetServiceEntryStateFromConfigmap(configmap *k8sV1.ConfigMap) *ServiceEntry
err := yaml.Unmarshal(bytes, &addressStore)
if err != nil {
- log.Errorf("Could not unmarshal configmap data. Double check the configmap format. %v", err)
+ logrus.Errorf("Could not unmarshal configmap data. Double check the configmap format. %v", err)
return nil
}
if addressStore.Addresses == nil {
@@ -168,5 +165,214 @@ func ValidateConfigmapBeforePutting(cm *k8sV1.ConfigMap) error {
}
func IsCacheWarmupTime(remoteRegistry *RemoteRegistry) bool {
- return time.Since(remoteRegistry.StartTime) < common.GetAdmiralParams().CacheRefreshDuration
+ return time.Since(remoteRegistry.StartTime) < common.GetAdmiralParams().CacheReconcileDuration
+}
+
+func IsCacheWarmupTimeForDependency(remoteRegistry *RemoteRegistry) bool {
+ return time.Since(remoteRegistry.StartTime) < (common.GetAdmiralParams().CacheReconcileDuration * time.Duration(common.DependencyWarmupMultiplier()))
+}
+
+// removeSeEndpoints is used determine if we want to add, update or delete the endpoints for the current cluster being processed.
+// Based on this information we will decide if we should add, update or delete the SE in the source as well as dependent clusters.
+func removeSeEndpoints(eventCluster string, event admiral.EventType, clusterId string, deployToRolloutMigration bool, appType string, clusterAppDeleteMap map[string]string) (admiral.EventType, bool) {
+ eventType := event
+ deleteCluster := false
+
+ if event == admiral.Delete {
+ if eventCluster == clusterId {
+ deleteCluster = true
+ // If both the deployment and rollout are present and the cluster for which
+ // the function was called is not the cluster for which the delete event was sent
+ // we update the event to admiral.Update
+ if deployToRolloutMigration && appType != clusterAppDeleteMap[eventCluster] {
+ eventType = admiral.Update
+ }
+ } else {
+ eventType = admiral.Update
+ }
+ }
+
+ return eventType, deleteCluster
+}
+
+// GenerateServiceEntryForCanary - generates a service entry only for canary endpoint
+// This is required for rollouts to test only canary version of the services
+func GenerateServiceEntryForCanary(ctxLogger *logrus.Entry, ctx context.Context, event admiral.EventType, rc *RemoteController, admiralCache *AdmiralCache,
+ meshPorts map[string]uint32, destRollout *argo.Rollout, serviceEntries map[string]*networking.ServiceEntry, workloadIdentityKey string, san []string) error {
+
+ if destRollout.Spec.Strategy.Canary != nil && destRollout.Spec.Strategy.Canary.CanaryService != "" &&
+ destRollout.Spec.Strategy.Canary.TrafficRouting != nil && destRollout.Spec.Strategy.Canary.TrafficRouting.Istio != nil {
+ rolloutServices := GetAllServicesForRollout(rc, destRollout)
+ logrus.Debugf("number of services %d matched for rollout %s in namespace=%s and cluster=%s", len(rolloutServices), destRollout.Name, destRollout.Namespace, rc.ClusterID)
+ if rolloutServices == nil {
+ return nil
+ }
+ if _, ok := rolloutServices[destRollout.Spec.Strategy.Canary.CanaryService]; ok {
+ canaryGlobalFqdn := common.CanaryRolloutCanaryPrefix + common.Sep + common.GetCnameForRollout(destRollout, workloadIdentityKey, common.GetHostnameSuffix())
+ admiralCache.CnameIdentityCache.Store(canaryGlobalFqdn, common.GetRolloutGlobalIdentifier(destRollout))
+ err := generateSECanary(ctxLogger, ctx, event, rc, admiralCache, meshPorts, serviceEntries, san, canaryGlobalFqdn)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// Returns all services that match the rollot selector, in case of canary strategy this should return a map with root, stable and canary services
+func GetAllServicesForRollout(rc *RemoteController, rollout *argo.Rollout) map[string]*WeightedService {
+
+ if rollout == nil {
+ return nil
+ }
+
+ if rollout.Spec.Selector == nil || rollout.Spec.Selector.MatchLabels == nil {
+ logrus.Infof("no selector for rollout=%s in namespace=%s and cluster=%s", rollout.Name, rollout.Namespace, rc.ClusterID)
+ return nil
+ }
+
+ cachedServices := rc.ServiceController.Cache.Get(rollout.Namespace)
+
+ if cachedServices == nil {
+ return nil
+ }
+ var matchedServices = make(map[string]*WeightedService)
+
+ for _, service := range cachedServices {
+ match := common.IsServiceMatch(service.Spec.Selector, rollout.Spec.Selector)
+ //make sure the service matches the rollout Selector and also has a mesh port in the port spec
+ if match {
+ ports := GetMeshPortsForRollout(rc.ClusterID, service, rollout)
+ if len(ports) > 0 {
+ //Weights are not important, this just returns list of all services matching rollout
+ matchedServices[service.Name] = &WeightedService{Weight: 1, Service: service}
+ logrus.Debugf("service matched=%s rollout=%s in namespace=%s and cluster=%s", service.Name, rollout.Name, rollout.Namespace, rc.ClusterID)
+ }
+ }
+ }
+ return matchedServices
+}
+
+// generateSECanary generates uniqui IP address for the SE, it also calls generateServiceEntry to create the skeleton Service entry
+func generateSECanary(ctxLogger *logrus.Entry, ctx context.Context, event admiral.EventType, rc *RemoteController, admiralCache *AdmiralCache, meshPorts map[string]uint32, serviceEntries map[string]*networking.ServiceEntry, san []string, fqdn string) error {
+
+ address, err := getUniqueAddress(ctxLogger, ctx, admiralCache, fqdn)
+ if err != nil {
+ logrus.Errorf("failed to generate unique address for canary fqdn - %v error - %v", fqdn, err)
+ return err
+ }
+ // This check preserves original behavior of checking for non-empty fqdn and address before
+ // generating SE when disable_ip_generation=false. When disable_ip_generation=true, it still
+ // checks for non-empty fqdn but allows for empty address.
+ if len(fqdn) != 0 && (common.DisableIPGeneration() || len(address) != 0) {
+ logrus.Infof("se generated for canary fqdn - %v", fqdn)
+ generateServiceEntry(ctxLogger, event, admiralCache, meshPorts, fqdn, rc, serviceEntries, address, san, common.Rollout)
+ }
+ return nil
+}
+
+// Checks if istio strategy is used by rollout, also if there is a canary service defined in the spec
+func IsCanaryIstioStrategy(rollout *argo.Rollout) bool {
+ if rollout != nil && &rollout.Spec != (&argo.RolloutSpec{}) && rollout.Spec.Strategy != (argo.RolloutStrategy{}) {
+ if rollout.Spec.Strategy.Canary != nil && rollout.Spec.Strategy.Canary.TrafficRouting != nil && rollout.Spec.Strategy.Canary.TrafficRouting.Istio != nil &&
+ len(rollout.Spec.Strategy.Canary.CanaryService) > 0 {
+ return true
+ }
+ }
+ return false
+}
+
+// filterClusters removes the clusters from the sourceClusters which are co-located in
+// the same cluster as the destination service
+func filterClusters(sourceClusters, destinationClusters *common.Map) *common.Map {
+ filteredSourceClusters := common.NewMap()
+ sourceClusters.Range(func(k string, v string) {
+ if destinationClusters != nil && !destinationClusters.CheckIfPresent(k) {
+ filteredSourceClusters.Put(k, v)
+ } else {
+ logrus.Infof("Filtering out %v from sourceClusters list as it is present in destinationClusters", k)
+ }
+ })
+ return filteredSourceClusters
+}
+
+// getSortedDependentNamespaces takes a cname and reduces it to its base form (without canary/bluegreen prefix) and fetches the partitionedIdentity based on that
+// Then, it checks if the clusterId matches any of the source clusters, and if so, adds istio-system to the list of dependent namespaces
+// Then, it fetches the dependent namespaces based on the cname or cnameWithoutPrefix and adds them to the list of dependent namespaces
+// If the list is above the maximum number of allowed exportTo values, it replaces the entries with "*"
+// Otherwise, it sorts and dedups the list of dependent namespaces and returns them.
+func getSortedDependentNamespaces(admiralCache *AdmiralCache, cname string, clusterId string, ctxLogger *logrus.Entry) []string {
+ var clusterNamespaces *common.MapOfMaps
+ var namespaceSlice []string
+ var cnameWithoutPrefix string
+ cname = strings.ToLower(cname)
+ if strings.HasPrefix(cname, common.CanaryRolloutCanaryPrefix+common.Sep) {
+ cnameWithoutPrefix = strings.TrimPrefix(cname, common.CanaryRolloutCanaryPrefix+common.Sep)
+ } else if strings.HasPrefix(cname, common.BlueGreenRolloutPreviewPrefix+common.Sep) {
+ cnameWithoutPrefix = strings.TrimPrefix(cname, common.BlueGreenRolloutPreviewPrefix+common.Sep)
+ }
+ if admiralCache == nil || admiralCache.CnameDependentClusterNamespaceCache == nil {
+ return namespaceSlice
+ }
+ //This section gets the identity and uses it to fetch the identity's source clusters
+ //If the cluster we are fetching dependent namespaces for is also a source cluster
+ //Then we add istio-system to the list of namespaces for ExportTo
+ if admiralCache.CnameIdentityCache != nil {
+ partitionedIdentity, ok := admiralCache.CnameIdentityCache.Load(cname)
+ if ok && admiralCache.IdentityClusterCache != nil {
+ sourceClusters := admiralCache.IdentityClusterCache.Get(partitionedIdentity.(string))
+ if sourceClusters != nil && sourceClusters.Get(clusterId) != "" {
+ namespaceSlice = append(namespaceSlice, common.NamespaceIstioSystem)
+
+ // Add source namespaces s.t. throttle filter can query envoy clusters
+ if admiralCache.IdentityClusterNamespaceCache != nil && admiralCache.IdentityClusterNamespaceCache.Get(partitionedIdentity.(string)) != nil {
+ sourceNamespacesInCluster := admiralCache.IdentityClusterNamespaceCache.Get(partitionedIdentity.(string)).Get(clusterId)
+ if sourceNamespacesInCluster != nil && sourceNamespacesInCluster.Len() > 0 {
+ namespaceSlice = append(namespaceSlice, sourceNamespacesInCluster.GetKeys()...)
+ }
+ }
+ }
+ }
+ }
+ cnameWithoutPrefix = strings.TrimSpace(cnameWithoutPrefix)
+ clusterNamespaces = admiralCache.CnameDependentClusterNamespaceCache.Get(cname)
+ if clusterNamespaces == nil && cnameWithoutPrefix != "" {
+ clusterNamespaces = admiralCache.CnameDependentClusterNamespaceCache.Get(cnameWithoutPrefix)
+ if clusterNamespaces != nil {
+ admiralCache.CnameDependentClusterNamespaceCache.PutMapofMaps(cname, clusterNamespaces)
+ ctxLogger.Infof("clusterNamespaces for prefixed cname %v was empty, replacing with clusterNamespaces for %v", cname, cnameWithoutPrefix)
+ }
+ }
+ if clusterNamespaces != nil && clusterNamespaces.Len() > 0 {
+ namespaces := clusterNamespaces.Get(clusterId)
+ if namespaces != nil && namespaces.Len() > 0 {
+ namespaceSlice = append(namespaceSlice, namespaces.GetKeys()...)
+ if len(namespaceSlice) > common.GetExportToMaxNamespaces() {
+ namespaceSlice = []string{"*"}
+ ctxLogger.Infof("exceeded max namespaces for cname=%s in cluster=%s", cname, clusterId)
+ }
+ sort.Strings(namespaceSlice)
+ }
+ }
+ // this is to avoid duplication in namespaceSlice e.g. dynamicrouting deployment present in istio-system can be a dependent of blackhole on blackhole's source cluster
+ var dedupNamespaceSlice []string
+ for i := 0; i < len(namespaceSlice); i++ {
+ if i == 0 || namespaceSlice[i] != namespaceSlice[i-1] {
+ dedupNamespaceSlice = append(dedupNamespaceSlice, namespaceSlice[i])
+ }
+ }
+ ctxLogger.Infof("getSortedDependentNamespaces for cname %v and cluster %v got namespaces: %v", cname, clusterId, dedupNamespaceSlice)
+ return dedupNamespaceSlice
+}
+
+func (w WorkloadEntrySorted) Len() int {
+ return len(w)
+}
+
+func (w WorkloadEntrySorted) Less(i, j int) bool {
+ return w[i].Address < w[j].Address
+}
+
+func (w WorkloadEntrySorted) Swap(i, j int) {
+ w[i], w[j] = w[j], w[i]
}
From 1327553af3b897261bfd7977a7696574e94ed78e Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 15:26:35 -0400
Subject: [PATCH 168/235] copied admiral/pkg/clusters/util_test.go from master
Signed-off-by: Shriram Sharma
---
admiral/pkg/clusters/util_test.go | 1058 +++++++++++++++++++++++++----
1 file changed, 932 insertions(+), 126 deletions(-)
diff --git a/admiral/pkg/clusters/util_test.go b/admiral/pkg/clusters/util_test.go
index 49fc5b31..290727ad 100644
--- a/admiral/pkg/clusters/util_test.go
+++ b/admiral/pkg/clusters/util_test.go
@@ -1,16 +1,29 @@
package clusters
import (
+ "context"
"errors"
"reflect"
"strconv"
+ "sync"
"testing"
+ "time"
+
+ "github.com/istio-ecosystem/admiral/admiral/pkg/client/loader"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/test"
+ "github.com/sirupsen/logrus"
+ istioNetworkingV1Alpha3 "istio.io/api/networking/v1alpha3"
+ istiofake "istio.io/client-go/pkg/clientset/versioned/fake"
+ "k8s.io/client-go/rest"
argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ "istio.io/client-go/pkg/apis/networking/v1alpha3"
k8sAppsV1 "k8s.io/api/apps/v1"
coreV1 "k8s.io/api/core/v1"
k8sV1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
@@ -32,15 +45,15 @@ func TestGetMeshPorts(t *testing.T) {
meshK8sSvcPort = k8sV1.ServicePort{Name: "mesh", Port: int32(annotatedPort)}
serviceMeshPorts = []k8sV1.ServicePort{defaultK8sSvcPort, meshK8sSvcPort}
serviceMeshPortsOnlyDefault = []k8sV1.ServicePort{defaultK8sSvcPortNoName}
- service = &k8sV1.Service{
+ service = k8sV1.Service{
ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}},
Spec: k8sV1.ServiceSpec{Ports: serviceMeshPorts},
}
- deployment = &k8sAppsV1.Deployment{
+ deployment = k8sAppsV1.Deployment{
Spec: k8sAppsV1.DeploymentSpec{Template: coreV1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{common.SidecarEnabledPorts: strconv.Itoa(annotatedPort)}},
}}}
- deploymentWithMultipleMeshPorts = &k8sAppsV1.Deployment{
+ deploymentWithMultipleMeshPorts = k8sAppsV1.Deployment{
Spec: k8sAppsV1.DeploymentSpec{Template: coreV1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{common.SidecarEnabledPorts: strconv.Itoa(annotatedPort) + "," + strconv.Itoa(annotatedSecondPort)}},
}}}
@@ -49,8 +62,8 @@ func TestGetMeshPorts(t *testing.T) {
testCases := []struct {
name string
clusterName string
- service *k8sV1.Service
- deployment *k8sAppsV1.Deployment
+ service k8sV1.Service
+ deployment k8sAppsV1.Deployment
expected map[string]uint32
}{
{
@@ -61,7 +74,7 @@ func TestGetMeshPorts(t *testing.T) {
},
{
name: "should return a http port if no port name is specified",
- service: &k8sV1.Service{
+ service: k8sV1.Service{
ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}},
Spec: k8sV1.ServiceSpec{Ports: []k8sV1.ServicePort{{Port: int32(80), TargetPort: intstr.FromInt(annotatedPort)}}},
},
@@ -70,7 +83,7 @@ func TestGetMeshPorts(t *testing.T) {
},
{
name: "should return a http port if the port name doesn't start with a protocol name",
- service: &k8sV1.Service{
+ service: k8sV1.Service{
ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}},
Spec: k8sV1.ServiceSpec{Ports: []k8sV1.ServicePort{{Name: "hello-grpc", Port: int32(annotatedPort)}}},
},
@@ -79,7 +92,7 @@ func TestGetMeshPorts(t *testing.T) {
},
{
name: "should return a grpc port based on annotation",
- service: &k8sV1.Service{
+ service: k8sV1.Service{
ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}},
Spec: k8sV1.ServiceSpec{Ports: []k8sV1.ServicePort{{Name: "grpc-service", Port: int32(annotatedPort)}}},
},
@@ -88,7 +101,7 @@ func TestGetMeshPorts(t *testing.T) {
},
{
name: "should return a grpc-web port based on annotation",
- service: &k8sV1.Service{
+ service: k8sV1.Service{
ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}},
Spec: k8sV1.ServiceSpec{Ports: []k8sV1.ServicePort{{Name: "grpc-web", Port: int32(annotatedPort)}}},
},
@@ -97,7 +110,7 @@ func TestGetMeshPorts(t *testing.T) {
},
{
name: "should return a http2 port based on annotation",
- service: &k8sV1.Service{
+ service: k8sV1.Service{
ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}},
Spec: k8sV1.ServiceSpec{Ports: []k8sV1.ServicePort{{Name: "http2", Port: int32(annotatedPort)}}},
},
@@ -106,11 +119,11 @@ func TestGetMeshPorts(t *testing.T) {
},
{
name: "should return a default port",
- service: &k8sV1.Service{
+ service: k8sV1.Service{
ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}},
Spec: k8sV1.ServiceSpec{Ports: serviceMeshPortsOnlyDefault},
},
- deployment: &k8sAppsV1.Deployment{
+ deployment: k8sAppsV1.Deployment{
Spec: k8sAppsV1.DeploymentSpec{Template: coreV1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{}},
}}},
@@ -118,11 +131,11 @@ func TestGetMeshPorts(t *testing.T) {
},
{
name: "should return empty ports",
- service: &k8sV1.Service{
+ service: k8sV1.Service{
ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}},
Spec: k8sV1.ServiceSpec{Ports: nil},
},
- deployment: &k8sAppsV1.Deployment{
+ deployment: k8sAppsV1.Deployment{
Spec: k8sAppsV1.DeploymentSpec{Template: coreV1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{}},
}}},
@@ -130,7 +143,7 @@ func TestGetMeshPorts(t *testing.T) {
},
{
name: "should return a http port if the port name doesn't start with a protocol name",
- service: &k8sV1.Service{
+ service: k8sV1.Service{
ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}},
Spec: k8sV1.ServiceSpec{Ports: []k8sV1.ServicePort{{Name: "http", Port: int32(annotatedPort)},
{Name: "grpc", Port: int32(annotatedSecondPort)}}},
@@ -138,21 +151,11 @@ func TestGetMeshPorts(t *testing.T) {
deployment: deploymentWithMultipleMeshPorts,
expected: ports,
},
- {
- name: "should not panic when deployment is empty",
- service: &k8sV1.Service{
- ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}},
- Spec: k8sV1.ServiceSpec{Ports: []k8sV1.ServicePort{{Name: "http", Port: int32(annotatedPort)},
- {Name: "grpc", Port: int32(annotatedSecondPort)}}},
- },
- deployment: nil,
- expected: ports,
- },
}
for _, c := range testCases {
t.Run(c.name, func(t *testing.T) {
- meshPorts := GetMeshPortsForDeployment(c.clusterName, c.service, c.deployment)
+ meshPorts := GetMeshPortsForDeployments(c.clusterName, &c.service, &c.deployment)
if !reflect.DeepEqual(meshPorts, c.expected) {
t.Errorf("Wanted meshPorts: %v, got: %v", c.expected, meshPorts)
}
@@ -283,11 +286,11 @@ func TestGetMeshPortsForRollout(t *testing.T) {
serviceMeshPortsOnlyDefault := []k8sV1.ServicePort{defaultK8sSvcPortNoName}
- service := &k8sV1.Service{
+ service := k8sV1.Service{
ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}},
Spec: k8sV1.ServiceSpec{Ports: serviceMeshPorts},
}
- rollout := &argo.Rollout{
+ rollout := argo.Rollout{
Spec: argo.RolloutSpec{Template: coreV1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{common.SidecarEnabledPorts: strconv.Itoa(annotatedPort)}},
}}}
@@ -301,8 +304,8 @@ func TestGetMeshPortsForRollout(t *testing.T) {
testCases := []struct {
name string
clusterName string
- service *k8sV1.Service
- rollout *argo.Rollout
+ service k8sV1.Service
+ rollout argo.Rollout
expected map[string]uint32
}{
{
@@ -313,11 +316,11 @@ func TestGetMeshPortsForRollout(t *testing.T) {
},
{
name: "should return a default port",
- service: &k8sV1.Service{
+ service: k8sV1.Service{
ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}},
Spec: k8sV1.ServiceSpec{Ports: serviceMeshPortsOnlyDefault},
},
- rollout: &argo.Rollout{
+ rollout: argo.Rollout{
Spec: argo.RolloutSpec{Template: coreV1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{}},
}}},
@@ -325,30 +328,21 @@ func TestGetMeshPortsForRollout(t *testing.T) {
},
{
name: "should return empty ports",
- service: &k8sV1.Service{
+ service: k8sV1.Service{
ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}},
Spec: k8sV1.ServiceSpec{Ports: nil},
},
- rollout: &argo.Rollout{
+ rollout: argo.Rollout{
Spec: argo.RolloutSpec{Template: coreV1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{}},
}}},
expected: emptyPorts,
},
- {
- name: "should not panic when rollout is nil",
- service: &k8sV1.Service{
- ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}},
- Spec: k8sV1.ServiceSpec{Ports: nil},
- },
- rollout: nil,
- expected: emptyPorts,
- },
}
for _, c := range testCases {
t.Run(c.name, func(t *testing.T) {
- meshPorts := GetMeshPortsForRollout(c.clusterName, c.service, c.rollout)
+ meshPorts := GetMeshPortsForRollout(c.clusterName, &c.service, &c.rollout)
if !reflect.DeepEqual(meshPorts, c.expected) {
t.Errorf("Wanted meshPorts: %v, got: %v", c.expected, meshPorts)
}
@@ -356,116 +350,928 @@ func TestGetMeshPortsForRollout(t *testing.T) {
}
}
-func TestGetMeshPortAndLabelsFromDeploymentOrRollout(t *testing.T) {
+func TestRemoveSeEndpoints(t *testing.T) {
+ clusterName := "clusterForWhichEventWasSent"
+ differentClusterName := "notSameClusterForWhichEventWasSent"
+ testCases := []struct {
+ name string
+ event admiral.EventType
+ clusterId string
+ deployToRolloutMigration bool
+ appType string
+ clusterAppDeleteMap map[string]string
+ expectedEvent admiral.EventType
+ expectedDeleteCluster bool
+ }{
+ {
+ name: "Given a delete event is received," +
+ "And we are currently processing for the same cluster," +
+ "Then we should return a delete event and true for deleting the endpoints for the cluster",
+ event: admiral.Delete,
+ clusterId: clusterName,
+ deployToRolloutMigration: false,
+ appType: common.Deployment,
+ clusterAppDeleteMap: nil,
+ expectedEvent: admiral.Delete,
+ expectedDeleteCluster: true,
+ },
+ {
+ name: "Given a delete event is received," +
+ "And we are currently processing for a different cluster," +
+ "Then we should return a update event and false for deleting the endpoints for the cluster",
+ event: admiral.Delete,
+ clusterId: differentClusterName,
+ deployToRolloutMigration: false,
+ appType: common.Deployment,
+ clusterAppDeleteMap: nil,
+ expectedEvent: admiral.Update,
+ expectedDeleteCluster: false,
+ },
+ {
+ name: "Given a add event is received," +
+ "And we are currently processing for a different cluster," +
+ "Then we should return a add event and false for deleting the endpoints for the cluster",
+ event: admiral.Add,
+ clusterId: differentClusterName,
+ deployToRolloutMigration: false,
+ appType: common.Deployment,
+ clusterAppDeleteMap: nil,
+ expectedEvent: admiral.Add,
+ expectedDeleteCluster: false,
+ },
+ {
+ name: "Given a update event is received," +
+ "And we are currently processing for a different cluster," +
+ "Then we should return a update event and false for deleting the endpoints for the cluster",
+ event: admiral.Update,
+ clusterId: differentClusterName,
+ deployToRolloutMigration: false,
+ appType: common.Deployment,
+ clusterAppDeleteMap: nil,
+ expectedEvent: admiral.Update,
+ expectedDeleteCluster: false,
+ },
+ {
+ name: "Given a add event is received," +
+ "And we are currently processing for the same cluster," +
+ "Then we should return a add event and false for deleting the endpoints for the cluster",
+ event: admiral.Add,
+ clusterId: clusterName,
+ deployToRolloutMigration: false,
+ appType: common.Deployment,
+ clusterAppDeleteMap: nil,
+ expectedEvent: admiral.Add,
+ expectedDeleteCluster: false,
+ },
+ {
+ name: "Given a update event is received," +
+ "And we are currently processing for the same cluster," +
+ "Then we should return a update event and false for deleting the endpoints for the cluster",
+ event: admiral.Update,
+ clusterId: clusterName,
+ deployToRolloutMigration: false,
+ appType: common.Deployment,
+ clusterAppDeleteMap: nil,
+ expectedEvent: admiral.Update,
+ expectedDeleteCluster: false,
+ },
+ {
+ name: "Given a add event is received," +
+ "And we are currently processing for the same cluster," +
+ "And an application is being migrated from deployment to rollout," +
+ "Then we should return a add event and false for deleting the endpoints for the cluster",
+ event: admiral.Add,
+ clusterId: clusterName,
+ deployToRolloutMigration: true,
+ appType: common.Deployment,
+ clusterAppDeleteMap: nil,
+ expectedEvent: admiral.Add,
+ expectedDeleteCluster: false,
+ },
+ {
+ name: "Given a update event is received," +
+ "And we are currently processing for the same cluster," +
+ "And an application is being migrated from deployment to rollout," +
+ "Then we should return a update event and false for deleting the endpoints for the cluster",
+ event: admiral.Update,
+ clusterId: clusterName,
+ deployToRolloutMigration: true,
+ appType: common.Deployment,
+ clusterAppDeleteMap: nil,
+ expectedEvent: admiral.Update,
+ expectedDeleteCluster: false,
+ },
+ {
+ name: "Given a delete event is received," +
+ "And we are currently processing for the same cluster," +
+ "And an application is being migrated from deployment to rollout," +
+ "And an application is of deployment type," +
+ "Then we should return a delete event and true for deleting the endpoints for the cluster",
+ event: admiral.Delete,
+ clusterId: clusterName,
+ deployToRolloutMigration: true,
+ appType: common.Deployment,
+ clusterAppDeleteMap: map[string]string{clusterName: common.Deployment},
+ expectedEvent: admiral.Delete,
+ expectedDeleteCluster: true,
+ },
+ {
+ name: "Given a delete event is received," +
+ "And we are currently processing for the same cluster," +
+ "And an application is being migrated from deployment to rollout," +
+ "And an application is of rollout type," +
+ "Then we should return a update event and true for deleting the endpoints for the cluster",
+ event: admiral.Delete,
+ clusterId: clusterName,
+ deployToRolloutMigration: true,
+ appType: common.Rollout,
+ clusterAppDeleteMap: map[string]string{clusterName: common.Deployment},
+ expectedEvent: admiral.Update,
+ expectedDeleteCluster: true,
+ },
+ {
+ name: "Given a delete event is received," +
+ "And we are currently processing for the same cluster," +
+ "And an application is being migrated from rollout to deployment," +
+ "And an application is of rollout type," +
+ "Then we should return a update event and true for deleting the endpoints for the cluster",
+ event: admiral.Delete,
+ clusterId: clusterName,
+ deployToRolloutMigration: true,
+ appType: common.Rollout,
+ clusterAppDeleteMap: map[string]string{clusterName: common.Rollout},
+ expectedEvent: admiral.Delete,
+ expectedDeleteCluster: true,
+ },
+ {
+ name: "Given a add event is received," +
+ "And we are currently processing for a different cluster," +
+ "And an application is being migrated from deployment to rollout," +
+ "Then we should return a add event and false for deleting the endpoints for the cluster",
+ event: admiral.Add,
+ clusterId: differentClusterName,
+ deployToRolloutMigration: true,
+ appType: common.Deployment,
+ clusterAppDeleteMap: nil,
+ expectedEvent: admiral.Add,
+ expectedDeleteCluster: false,
+ },
+ {
+ name: "Given a update event is received," +
+ "And we are currently processing for a different cluster," +
+ "And an application is being migrated from deployment to rollout," +
+ "Then we should return a update event and false for deleting the endpoints for the cluster",
+ event: admiral.Update,
+ clusterId: differentClusterName,
+ deployToRolloutMigration: false,
+ appType: common.Deployment,
+ clusterAppDeleteMap: nil,
+ expectedEvent: admiral.Update,
+ expectedDeleteCluster: false,
+ },
+ {
+ name: "Given a delete event is received," +
+ "And we are currently processing for a different cluster," +
+ "And an application is being migrated from deployment to rollout," +
+ "And an application is of deployment type," +
+ "Then we should return a delete event and false for deleting the endpoints for the cluster",
+ event: admiral.Delete,
+ clusterId: differentClusterName,
+ deployToRolloutMigration: true,
+ appType: common.Deployment,
+ clusterAppDeleteMap: nil,
+ expectedEvent: admiral.Update,
+ expectedDeleteCluster: false,
+ },
+ {
+ name: "Given a delete event is received," +
+ "And we are currently processing for a different cluster," +
+ "And an application is being migrated from deployment to rollout," +
+ "And an application is of rollout type," +
+ "Then we should return a delete event and false for deleting the endpoints for the cluster",
+ event: admiral.Delete,
+ clusterId: differentClusterName,
+ deployToRolloutMigration: true,
+ appType: common.Rollout,
+ clusterAppDeleteMap: nil,
+ expectedEvent: admiral.Update,
+ expectedDeleteCluster: false,
+ },
+ }
+
+ for _, c := range testCases {
+ t.Run(c.name, func(t *testing.T) {
+ eventType, deleteCluster := removeSeEndpoints(clusterName, c.event, c.clusterId, c.deployToRolloutMigration, c.appType, c.clusterAppDeleteMap)
+ if !reflect.DeepEqual(eventType, c.expectedEvent) {
+ t.Errorf("wanted eventType: %v, got: %v", c.expectedEvent, eventType)
+ }
+
+ if !reflect.DeepEqual(deleteCluster, c.expectedDeleteCluster) {
+ t.Errorf("wanted deleteCluster: %v, got: %v", c.expectedDeleteCluster, deleteCluster)
+ }
+ })
+ }
+}
+
+func TestGetAllServicesForRollout(t *testing.T) {
+
+ setupForServiceEntryTests()
+ config := rest.Config{
+ Host: "localhost",
+ }
+
+ stop := make(chan struct{})
+ s, e := admiral.NewServiceController(stop, &test.MockServiceHandler{}, &config, time.Second*time.Duration(300), loader.GetFakeClientLoader())
+ if e != nil {
+ t.Fatalf("%v", e)
+ }
+ sWithNolabels, e := admiral.NewServiceController(stop, &test.MockServiceHandler{}, &config, time.Second*time.Duration(300), loader.GetFakeClientLoader())
+ if e != nil {
+ t.Fatalf("%v", e)
+ }
+ sWithRootLabels, e := admiral.NewServiceController(stop, &test.MockServiceHandler{}, &config, time.Second*time.Duration(300), loader.GetFakeClientLoader())
+ if e != nil {
+ t.Fatalf("%v", e)
+ }
+ sWithNoService, e := admiral.NewServiceController(stop, &test.MockServiceHandler{}, &config, time.Second*time.Duration(300), loader.GetFakeClientLoader())
+ if e != nil {
+ t.Fatalf("%v", e)
+ }
+ sWithRootMeshPorts, e := admiral.NewServiceController(stop, &test.MockServiceHandler{}, &config, time.Second*time.Duration(300), loader.GetFakeClientLoader())
+ if e != nil {
+ t.Fatalf("%v", e)
+ }
+
+ admiralCache := AdmiralCache{}
+
+ cacheWithNoEntry := ServiceEntryAddressStore{
+ EntryAddresses: map[string]string{},
+ Addresses: []string{},
+ }
+
+ localAddress := common.LocalAddressPrefix + ".10.1"
+
+ cnameIdentityCache := sync.Map{}
+ cnameIdentityCache.Store("dev.bar.global", "bar")
+ admiralCache.CnameIdentityCache = &cnameIdentityCache
+
+ admiralCache.ServiceEntryAddressStore = &ServiceEntryAddressStore{
+ EntryAddresses: map[string]string{"e2e.my-first-service.mesh-se": localAddress},
+ Addresses: []string{localAddress},
+ }
+
+ admiralCache.CnameClusterCache = common.NewMapOfMaps()
+
+ fakeIstioClient := istiofake.NewSimpleClientset()
+
+ cacheWithEntry := ServiceEntryAddressStore{
+ EntryAddresses: map[string]string{},
+ Addresses: []string{},
+ }
+
+ cacheController := &test.FakeConfigMapController{
+ GetError: nil,
+ PutError: nil,
+ ConfigmapToReturn: buildFakeConfigMapFromAddressStore(&cacheWithEntry, "123"),
+ }
+
+ admiralCache.ConfigMapController = cacheController
+ admiralCache.ServiceEntryAddressStore = &cacheWithNoEntry
+
+ rc := generateRC(fakeIstioClient, s)
+ rcWithNolabels := generateRC(fakeIstioClient, sWithNolabels)
+ rcWithOnlyRootLabels := generateRC(fakeIstioClient, sWithRootLabels)
+ rcWithNoService := generateRC(fakeIstioClient, sWithNoService)
+ rcWithRootMeshPorts := generateRC(fakeIstioClient, sWithRootMeshPorts)
+
+ serviceRootForRollout := generateService("rootservice", "test-ns", map[string]string{"app": "test"}, 8090)
+ serviceStableForRollout := generateService("stableservice", "test-ns", map[string]string{"app": "test"}, 8090)
+ serviceStableForRolloutNoLabels := generateService("stableservicenolabels", "test-ns", map[string]string{}, 8090)
+ serviceRootForRolloutNoLabels := generateService("rootservicenolabels", "test-ns", map[string]string{}, 8090)
+ serviceStableForRolloutNoPorts := generateService("stablenoports", "test-ns", map[string]string{}, 1024)
+
+ s.Cache.Put(serviceRootForRollout)
+ s.Cache.Put(serviceStableForRollout)
+
+ sWithRootLabels.Cache.Put(serviceStableForRolloutNoLabels)
+ sWithRootLabels.Cache.Put(serviceRootForRollout)
+
+ sWithNolabels.Cache.Put(serviceStableForRolloutNoLabels)
+ sWithNolabels.Cache.Put(serviceRootForRolloutNoLabels)
+
+ sWithRootMeshPorts.Cache.Put(serviceRootForRollout)
+ sWithRootMeshPorts.Cache.Put(serviceStableForRolloutNoPorts)
+
+ selector := metav1.LabelSelector{
+ MatchLabels: map[string]string{"app": "test"},
+ }
+
+ selectorEmpty := metav1.LabelSelector{
+ MatchLabels: map[string]string{},
+ }
+
+ testRollout := createTestRollout(selector, "stableservice", "rootservice")
+ testRolloutEmpty := createTestRollout(selectorEmpty, "stableservice", "rootservice")
+
+ testCases := []struct {
+ name string
+ rc *RemoteController
+ rollout *argo.Rollout
+ expectedServiceArray []string
+ }{
+ {
+ name: "Should return root and stable services, " +
+ "given the root and stable services match the rollout label spec and have mesh ports",
+ rc: rc,
+ rollout: &testRollout,
+ expectedServiceArray: []string{"stableservice", "rootservice"},
+ },
+ {
+ name: "Should return root service " +
+ "given root and stable services are present, only root matches rollout labels",
+ rc: rcWithOnlyRootLabels,
+ rollout: &testRollout,
+ expectedServiceArray: []string{"rootservice"},
+ },
+ {
+ name: "Should return root service " +
+ "given root and stable services are present, only root has mesh ports",
+ rc: rcWithRootMeshPorts,
+ rollout: &testRollout,
+ expectedServiceArray: []string{"rootservice"},
+ },
+ {
+ name: "Should return no service " +
+ "given root and stable services are present, no labels are matching rollout",
+ rc: rcWithNolabels,
+ rollout: &testRollout,
+ expectedServiceArray: []string{},
+ },
+ {
+ name: "Should return no service " +
+ "given no service is present in cache",
+ rc: rcWithNoService,
+ rollout: &testRollout,
+ expectedServiceArray: []string{},
+ },
+ {
+ name: "Should return no service " +
+ "given rollout is nil",
+ rc: rc,
+ rollout: nil,
+ expectedServiceArray: []string{},
+ },
+ {
+ name: "Should return root service " +
+ "given rollout does not have selector",
+ rc: rc,
+ rollout: &testRolloutEmpty,
+ expectedServiceArray: []string{},
+ },
+ }
+
+ //Run the test for every provided case
+ for _, c := range testCases {
+ t.Run(c.name, func(t *testing.T) {
+
+ serviceMap := GetAllServicesForRollout(c.rc, c.rollout)
+
+ for _, key := range c.expectedServiceArray {
+ if serviceMap[key] == nil {
+ t.Errorf("Test %s failed, expected: %v got %v", c.name, c.expectedServiceArray, serviceMap)
+ }
+ }
+
+ if len(c.expectedServiceArray) != len(serviceMap) {
+ t.Errorf("Test %s failed, expected length: %v got %v", c.name, len(c.expectedServiceArray), len(serviceMap))
+ }
+
+ })
+ }
+
+}
+
+func TestGenerateServiceEntryForCanary(t *testing.T) {
+ ctxLogger := logrus.WithFields(logrus.Fields{"txId": "abc"})
+ setupForServiceEntryTests()
+ ctx := context.Background()
+ config := rest.Config{
+ Host: "localhost",
+ }
+
+ stop := make(chan struct{})
+ s, e := admiral.NewServiceController(stop, &test.MockServiceHandler{}, &config, time.Second*time.Duration(300), loader.GetFakeClientLoader())
+
+ if e != nil {
+ t.Fatalf("%v", e)
+ }
+
+ admiralCache := AdmiralCache{}
+
+ cacheWithNoEntry := ServiceEntryAddressStore{
+ EntryAddresses: map[string]string{},
+ Addresses: []string{},
+ }
+
+ localAddress := common.LocalAddressPrefix + ".10.1"
+
+ cnameIdentityCache := sync.Map{}
+ cnameIdentityCache.Store("dev.bar.global", "bar")
+ admiralCache.CnameIdentityCache = &cnameIdentityCache
+
+ admiralCache.ServiceEntryAddressStore = &ServiceEntryAddressStore{
+ EntryAddresses: map[string]string{"e2e.my-first-service.mesh-se": localAddress},
+ Addresses: []string{localAddress},
+ }
+
+ admiralCache.CnameClusterCache = common.NewMapOfMaps()
+
+ fakeIstioClient := istiofake.NewSimpleClientset()
+
+ cacheWithEntry := ServiceEntryAddressStore{
+ EntryAddresses: map[string]string{},
+ Addresses: []string{},
+ }
+
+ cacheController := &test.FakeConfigMapController{
+ GetError: nil,
+ PutError: nil,
+ ConfigmapToReturn: buildFakeConfigMapFromAddressStore(&cacheWithEntry, "123"),
+ }
+
+ admiralCache.ConfigMapController = cacheController
+ admiralCache.ServiceEntryAddressStore = &cacheWithNoEntry
+
+ rc := generateRC(fakeIstioClient, s)
+
+ serviceForRollout := generateService("stableservice", "test-ns", map[string]string{"app": "test"}, 8090)
+ serviceCanaryForRollout := generateService("canaryservice", "test-ns", map[string]string{"app": "test"}, 8090)
+
+ s.Cache.Put(serviceForRollout)
+ s.Cache.Put(serviceCanaryForRollout)
+
+ vsRoutes := []*istioNetworkingV1Alpha3.HTTPRouteDestination{
+ {
+ Destination: &istioNetworkingV1Alpha3.Destination{
+ Host: "canaryservice",
+ Port: &istioNetworkingV1Alpha3.PortSelector{
+ Number: common.DefaultServiceEntryPort,
+ },
+ },
+ Weight: 30,
+ },
+ {
+ Destination: &istioNetworkingV1Alpha3.Destination{
+ Host: "stableservice",
+ Port: &istioNetworkingV1Alpha3.PortSelector{
+ Number: common.DefaultServiceEntryPort,
+ },
+ },
+ Weight: 70,
+ },
+ }
+
+ fooVS := &v1alpha3.VirtualService{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "virtualservice",
+ Labels: map[string]string{"admiral.io/env": "e2e", "identity": "my-first-service"},
+ },
+ Spec: istioNetworkingV1Alpha3.VirtualService{
+ Hosts: []string{"stage.test00.foo"},
+ Http: []*istioNetworkingV1Alpha3.HTTPRoute{
+ {
+ Route: vsRoutes,
+ },
+ },
+ },
+ }
+
+ _, err := fakeIstioClient.NetworkingV1alpha3().VirtualServices("test-ns").Create(ctx, fooVS, metav1.CreateOptions{})
+ if err != nil {
+ t.Error(err)
+ }
+ selector := metav1.LabelSelector{
+ MatchLabels: map[string]string{"app": "test"},
+ }
+
+ workloadIdentityKey := "identity"
+ rolloutSeCreationTestCases := []struct {
+ name string
+ rc *RemoteController
+ rollout argo.Rollout
+ expectedResult int
+ }{
+ {
+ name: "Should return a created service entry for canary, " +
+ "given the 2 services exist and the VS has reference to both services",
+ rc: rc,
+ rollout: createTestRollout(selector, "stableservice", "canaryservice"),
+ expectedResult: 1,
+ },
+ {
+ name: "Should not create service entry for canary, " +
+ "given both services exist and VS has reference to only canary ",
+ rc: rc,
+ rollout: createTestRollout(selector, "", "canaryservice"),
+ expectedResult: 1,
+ },
+ {
+ name: "Should not create service entry for stable, " +
+ "given both services exist and VS has reference to only stable ",
+ rc: rc,
+ rollout: createTestRollout(selector, "stableservice", ""),
+ expectedResult: 0,
+ },
+ {
+ name: "Should not return created service entry for stable, " +
+ "given only stable service exists and VS has reference to both ",
+ rc: rc,
+ rollout: createTestRollout(selector, "stableservice", "canaryservice2"),
+ expectedResult: 0,
+ },
+ {
+ name: "Should not return SE, both services are missing",
+ rc: rc,
+ rollout: createTestRollout(selector, "stableservice2", "canaryservice2"),
+ expectedResult: 0,
+ },
+ {
+ name: "Should not return SE, reference in VS are missing",
+ rc: rc,
+ rollout: createTestRollout(selector, "", ""),
+ expectedResult: 0,
+ },
+ {
+ name: "Should not return SE, canary strategy is nil",
+ rc: rc,
+ rollout: createTestRollout(selector, "", ""),
+ expectedResult: 0,
+ },
+ }
+
+ //Run the test for every provided case
+ for _, c := range rolloutSeCreationTestCases {
+ t.Run(c.name, func(t *testing.T) {
+ se := map[string]*istioNetworkingV1Alpha3.ServiceEntry{}
+ san := getSanForRollout(&c.rollout, workloadIdentityKey)
+ err := GenerateServiceEntryForCanary(ctxLogger, ctx, admiral.Add, rc, &admiralCache, map[string]uint32{"http": uint32(80)}, &c.rollout, se, workloadIdentityKey, san)
+ if err != nil || len(se) != c.expectedResult {
+ t.Errorf("Test %s failed, expected: %v got %v", c.name, c.expectedResult, len(se))
+ }
+
+ })
+ }
+
+}
+
+func TestIsIstioCanaryStrategy(t *testing.T) {
var (
- service = &k8sV1.Service{
- Spec: k8sV1.ServiceSpec{
- Ports: []k8sV1.ServicePort{
- {
- Name: common.Http,
- Port: 8090,
+ emptyRollout *argo.Rollout
+ rolloutWithBlueGreenStrategy = &argo.Rollout{
+ Spec: argo.RolloutSpec{
+ Strategy: argo.RolloutStrategy{
+ BlueGreen: &argo.BlueGreenStrategy{
+ ActiveService: "active",
},
},
},
}
- clusterNameWithExistingDeployment = "cluster_with_deployment-ppd-k8s"
- clusterNameWithExistingRollout = "cluster_with_rollout-ppd-k8s"
- clusterNameWithoutExistingRolloutOrDeployment = "cluster_without_deployment_rollout-ppd-k8s"
- deploymentByClusterNameForExistingClusterWithDeployment = map[string]*k8sAppsV1.Deployment{
- clusterNameWithExistingDeployment: {
- ObjectMeta: v1.ObjectMeta{
- Labels: map[string]string{
- "key": "value",
+ rolloutWithSimpleCanaryStrategy = &argo.Rollout{
+ Spec: argo.RolloutSpec{
+ Strategy: argo.RolloutStrategy{
+ Canary: &argo.CanaryStrategy{
+ CanaryService: "canaryservice",
},
},
},
}
- rolloutByClusterNameForExistingClusterWithRollout = map[string]*argo.Rollout{
- clusterNameWithExistingRollout: {
- ObjectMeta: v1.ObjectMeta{
- Labels: map[string]string{
- "key": "value",
+ rolloutWithIstioCanaryStrategy = &argo.Rollout{
+ Spec: argo.RolloutSpec{
+ Strategy: argo.RolloutStrategy{
+ Canary: &argo.CanaryStrategy{
+ CanaryService: "canaryservice",
+ StableService: "stableservice",
+ TrafficRouting: &argo.RolloutTrafficRouting{
+ Istio: &argo.IstioTrafficRouting{
+ VirtualService: &argo.IstioVirtualService{Name: "virtualservice"},
+ },
+ },
},
},
},
}
+ rolloutWithNoStrategy = &argo.Rollout{
+ Spec: argo.RolloutSpec{},
+ }
+ rolloutWithEmptySpec = &argo.Rollout{}
)
cases := []struct {
- name string
- cluster string
- serviceInstance *k8sV1.Service
- deploymentByCluster map[string]*k8sAppsV1.Deployment
- rolloutsByCluster map[string]*argo.Rollout
- expectedMeshPort map[string]uint32
- expectedLabels map[string]string
+ name string
+ rollout *argo.Rollout
+ expectedResult bool
}{
{
- name: "Given a deployment with labels exists in a cluster, " +
- "When GetMeshPortAndLabelsFromDeploymentOrRollout is called with," +
- "this cluster, with a valid service, " +
- "Then, it should return mesh ports and labels",
- cluster: clusterNameWithExistingDeployment,
- serviceInstance: service,
- deploymentByCluster: deploymentByClusterNameForExistingClusterWithDeployment,
- rolloutsByCluster: rolloutByClusterNameForExistingClusterWithRollout,
- expectedMeshPort: map[string]uint32{
- common.Http: 8090,
- },
- expectedLabels: map[string]string{
- "key": "value",
- },
+ name: "Given argo rollout is configured with blue green rollout strategy" +
+ "When isCanaryIstioStrategy is called" +
+ "Then it should return false",
+ rollout: rolloutWithBlueGreenStrategy,
+ expectedResult: false,
+ },
+ {
+ name: "Given argo rollout is configured with canary rollout strategy" +
+ "When isCanaryIstioStrategy is called" +
+ "Then it should return false",
+ rollout: rolloutWithSimpleCanaryStrategy,
+ expectedResult: false,
},
{
- name: "Given a rollout with labels exists in a cluster, " +
- "When GetMeshPortAndLabelsFromDeploymentOrRollout is called with," +
- "this cluster, with a valid service, " +
- "Then, it should return mesh ports and labels",
- cluster: clusterNameWithExistingRollout,
- serviceInstance: service,
- deploymentByCluster: deploymentByClusterNameForExistingClusterWithDeployment,
- rolloutsByCluster: rolloutByClusterNameForExistingClusterWithRollout,
- expectedMeshPort: map[string]uint32{
- common.Http: 8090,
+ name: "Given argo rollout is configured with istio canary rollout strategy" +
+ "When isCanaryIstioStrategy is called" +
+ "Then it should return true",
+ rollout: rolloutWithIstioCanaryStrategy,
+ expectedResult: true,
+ },
+ {
+ name: "Given argo rollout is configured without any rollout strategy" +
+ "When isCanaryIstioStrategy is called" +
+ "Then it should return false",
+ rollout: rolloutWithNoStrategy,
+ expectedResult: false,
+ },
+ {
+ name: "Given argo rollout is nil" +
+ "When isCanaryIstioStrategy is called" +
+ "Then it should return false",
+ rollout: emptyRollout,
+ expectedResult: false,
+ },
+ {
+ name: "Given argo rollout has an empty Spec" +
+ "When isCanaryIstioStrategy is called" +
+ "Then it should return false",
+ rollout: rolloutWithEmptySpec,
+ expectedResult: false,
+ },
+ }
+
+ for _, c := range cases {
+ t.Run(c.name, func(t *testing.T) {
+ result := IsCanaryIstioStrategy(c.rollout)
+ if result != c.expectedResult {
+ t.Errorf("expected: %t, got: %t", c.expectedResult, result)
+ }
+ })
+ }
+}
+
+func generateSEGivenIdentity(deployment1Identity string) *istioNetworkingV1Alpha3.ServiceEntry {
+ return &istioNetworkingV1Alpha3.ServiceEntry{
+ Hosts: []string{"test." + deployment1Identity + ".mesh"},
+ Addresses: []string{"127.0.0.1"},
+ Ports: []*istioNetworkingV1Alpha3.ServicePort{
+ {
+ Number: 80,
+ Protocol: "http",
+ Name: "http",
},
- expectedLabels: map[string]string{
- "key": "value",
+ },
+ Location: istioNetworkingV1Alpha3.ServiceEntry_MESH_INTERNAL,
+ Resolution: istioNetworkingV1Alpha3.ServiceEntry_DNS,
+ Endpoints: []*istioNetworkingV1Alpha3.WorkloadEntry{
+ &istioNetworkingV1Alpha3.WorkloadEntry{
+ Address: "dummy.admiral.global",
+ Ports: map[string]uint32{
+ "http": 0,
+ },
+ Locality: "us-west-2",
},
},
+ SubjectAltNames: []string{"spiffe://prefix/" + deployment1Identity},
+ }
+
+}
+
+func TestFilterClusters(t *testing.T) {
+ var (
+ sourceClusters = common.NewMap()
+ destinationClusters = common.NewMap()
+ destinationAllCommonClusters = common.NewMap()
+ destinationMoreClusters = common.NewMap()
+ destinationNoCommonClusters = common.NewMap()
+ )
+ sourceClusters.Put("A", "A")
+ sourceClusters.Put("B", "B")
+ destinationClusters.Put("A", "A")
+ destinationAllCommonClusters.Put("A", "A")
+ destinationAllCommonClusters.Put("B", "B")
+ destinationMoreClusters.Put("A", "A")
+ destinationMoreClusters.Put("B", "B")
+ destinationMoreClusters.Put("C", "C")
+ destinationNoCommonClusters.Put("E", "E")
+
+ cases := []struct {
+ name string
+ sourceClusters *common.Map
+ destinationClusters *common.Map
+ expectedResult map[string]string
+ }{
+ {
+ name: "Given sourceClusters and destinationClusters" +
+ "When there are common clusters between the two" +
+ "Then it should only the clusters where the source is but not the destination",
+ sourceClusters: sourceClusters,
+ destinationClusters: destinationClusters,
+ expectedResult: map[string]string{"B": "B"},
+ },
{
- name: "Given neither a deployment nor a rollout with labels exists in a cluster, " +
- "When GetMeshPortAndLabelsFromDeploymentOrRollout is called with," +
- "this cluster, with a valid service, " +
- "Then, it should return nil for mesh ports, and nil for labels",
- cluster: clusterNameWithoutExistingRolloutOrDeployment,
- serviceInstance: service,
- deploymentByCluster: deploymentByClusterNameForExistingClusterWithDeployment,
- rolloutsByCluster: rolloutByClusterNameForExistingClusterWithRollout,
- expectedMeshPort: nil,
- expectedLabels: nil,
+ name: "Given sourceClusters and destinationClusters" +
+ "When all the cluster are common" +
+ "Then it should return an empty map",
+ sourceClusters: sourceClusters,
+ destinationClusters: destinationAllCommonClusters,
+ expectedResult: map[string]string{},
},
{
- name: "Given neither a deployment nor a rollout with labels exists in a cluster, " +
- "When GetMeshPortAndLabelsFromDeploymentOrRollout is called with," +
- "this cluster, with a valid service, but empty deployment by cluster and rollout by cluster maps " +
- "Then, it should return nil for mesh ports, and nil for labels",
- cluster: clusterNameWithoutExistingRolloutOrDeployment,
- serviceInstance: service,
- deploymentByCluster: nil,
- rolloutsByCluster: nil,
- expectedMeshPort: nil,
- expectedLabels: nil,
+ name: "Given sourceClusters and destinationClusters" +
+ "When all the cluster are common and destination has more clusters" +
+ "Then it should return an empty map",
+ sourceClusters: sourceClusters,
+ destinationClusters: destinationMoreClusters,
+ expectedResult: map[string]string{},
+ },
+ {
+ name: "Given sourceClusters and destinationClusters" +
+ "When no cluster are common" +
+ "Then it should return all the clusters in the sourceClusters",
+ sourceClusters: sourceClusters,
+ destinationClusters: destinationNoCommonClusters,
+ expectedResult: map[string]string{"A": "A", "B": "B"},
},
}
for _, c := range cases {
- meshPort, labels := GetMeshPortAndLabelsFromDeploymentOrRollout(
- c.cluster, c.serviceInstance, c.deploymentByCluster, c.rolloutsByCluster,
- )
- if !reflect.DeepEqual(meshPort, c.expectedMeshPort) {
- t.Errorf("expected: %v, got: %v", c.expectedMeshPort, meshPort)
- }
- if !reflect.DeepEqual(labels, c.expectedLabels) {
- t.Errorf("expected: %v, got: %v", c.expectedLabels, labels)
- }
+ t.Run(c.name, func(t *testing.T) {
+ result := filterClusters(c.sourceClusters, c.destinationClusters)
+ if !reflect.DeepEqual(result.Copy(), c.expectedResult) {
+ t.Errorf("expected: %v, got: %v", c.expectedResult, result)
+ }
+ })
+ }
+}
+
+func TestGetSortedDependentNamespaces(t *testing.T) {
+ admiralParams := common.GetAdmiralParams()
+ admiralParams.EnableSWAwareNSCaches = true
+ admiralParams.ExportToIdentityList = []string{"*"}
+ admiralParams.ExportToMaxNamespaces = 35
+ ctxLogger := logrus.WithFields(logrus.Fields{"txId": "abc"})
+ common.ResetSync()
+ common.InitializeConfig(admiralParams)
+ emptynscache := common.NewMapOfMapOfMaps()
+ emptycnameidcache := &sync.Map{}
+ emptyidclustercache := common.NewMapOfMaps()
+ cndepclusternscache := common.NewMapOfMapOfMaps()
+ clusternscache := common.NewMapOfMaps()
+ clusternscache.PutMap("cluster1", nil)
+ cndepclusternscache.PutMapofMaps("cname", clusternscache)
+ cndepclusternscache.Put("cname", "cluster2", "ns1", "ns1")
+ cndepclusternscache.Put("cname", "cluster2", "ns2", "ns2")
+ cndepclusternscache.Put("cname", "cluster3", "ns3", "ns3")
+ cndepclusternscache.Put("cname", "cluster3", "ns4", "ns4")
+ cndepclusternscache.Put("cname", "cluster3", "ns5", "ns5")
+ cndepclusternscache.Put("cname", "cluster4", "ns6", "ns6")
+ cndepclusternscache.Put("cname", "cluster4", "ns7", "ns7")
+ for i := range [35]int{} {
+ nshash := "ns" + strconv.Itoa(i+3)
+ cndepclusternscache.Put("cname", "cluster3", nshash, nshash)
+ }
+ idclustercache := common.NewMapOfMaps()
+ idclustercache.Put("cnameid", "cluster1", "cluster1")
+ idclustercache.Put("cnameid", "cluster2", "cluster2")
+ idclustercache.Put("cnameid", "cluster3", "cluster3")
+ cnameidcache := &sync.Map{}
+ cnameidcache.Store("cname", "cnameid")
+ var nilSlice []string
+ cases := []struct {
+ name string
+ identityClusterCache *common.MapOfMaps
+ cnameIdentityCache *sync.Map
+ cnameDependentClusterNamespaceCache *common.MapOfMapOfMaps
+ cname string
+ clusterId string
+ expectedResult []string
+ }{
+ {
+ name: "Given CnameDependentClusterNamespaceCache is nil " +
+ "Then we should return nil slice",
+ identityClusterCache: nil,
+ cnameIdentityCache: nil,
+ cnameDependentClusterNamespaceCache: nil,
+ cname: "cname",
+ clusterId: "fake-cluster",
+ expectedResult: nilSlice,
+ },
+ {
+ name: "Given CnameDependentClusterNamespaceCache is filled and CnameIdentityCache is nil " +
+ "Then we should return the dependent namespaces without istio-system",
+ identityClusterCache: nil,
+ cnameIdentityCache: nil,
+ cnameDependentClusterNamespaceCache: cndepclusternscache,
+ cname: "cname",
+ clusterId: "cluster2",
+ expectedResult: []string{"ns1", "ns2"},
+ },
+ {
+ name: "Given CnameDependentClusterNamespaceCache and CnameIdentityCache are filled but IdentityClusterCache is nil " +
+ "Then we should return the dependent namespaces without istio-system",
+ identityClusterCache: nil,
+ cnameIdentityCache: cnameidcache,
+ cnameDependentClusterNamespaceCache: cndepclusternscache,
+ cname: "cname",
+ clusterId: "cluster2",
+ expectedResult: []string{"ns1", "ns2"},
+ },
+ {
+ name: "Given CnameDependentClusterNamespaceCache has no entries for the cname " +
+ "Then we should return nil slice",
+ identityClusterCache: nil,
+ cnameIdentityCache: nil,
+ cnameDependentClusterNamespaceCache: emptynscache,
+ cname: "cname-none",
+ clusterId: "fake-cluster",
+ expectedResult: nilSlice,
+ },
+ {
+ name: "Given CnameDependentClusterNamespaceCache has no namespace entries for the cname and cluster " +
+ "Then we should return nil slice",
+ identityClusterCache: nil,
+ cnameIdentityCache: nil,
+ cnameDependentClusterNamespaceCache: cndepclusternscache,
+ cname: "cname",
+ clusterId: "cluster1",
+ expectedResult: nilSlice,
+ },
+ {
+ name: "Given CnameDependentClusterNamespaceCache is filled and CnameIdentityCache has no entries for the cname" +
+ "Then we should return the dependent namespaces without istio-system",
+ identityClusterCache: nil,
+ cnameIdentityCache: emptycnameidcache,
+ cnameDependentClusterNamespaceCache: cndepclusternscache,
+ cname: "cname",
+ clusterId: "cluster2",
+ expectedResult: []string{"ns1", "ns2"},
+ },
+ {
+ name: "Given CnameDependentClusterNamespaceCache and CnameIdentityCache are filled but IdentityClusterCache has no entries for the identity " +
+ "Then we should return the dependent namespaces without istio-system",
+ identityClusterCache: emptyidclustercache,
+ cnameIdentityCache: cnameidcache,
+ cnameDependentClusterNamespaceCache: cndepclusternscache,
+ cname: "cname",
+ clusterId: "cluster2",
+ expectedResult: []string{"ns1", "ns2"},
+ },
+ {
+ name: "Given the cname has dependent cluster namespaces but no dependents are in the source cluster " +
+ "Then we should return a sorted slice of the dependent cluster namespaces",
+ identityClusterCache: idclustercache,
+ cnameIdentityCache: cnameidcache,
+ cnameDependentClusterNamespaceCache: cndepclusternscache,
+ cname: "cname",
+ clusterId: "cluster4",
+ expectedResult: []string{"ns6", "ns7"},
+ },
+ {
+ name: "Given the cname has dependent cluster namespaces and some dependents in the source cluster " +
+ "Then we should return a sorted slice of the dependent cluster namespaces including istio-system",
+ identityClusterCache: idclustercache,
+ cnameIdentityCache: cnameidcache,
+ cnameDependentClusterNamespaceCache: cndepclusternscache,
+ cname: "cname",
+ clusterId: "cluster2",
+ expectedResult: []string{"istio-system", "ns1", "ns2"},
+ },
+ {
+ name: "Given the cname has more dependent cluster namespaces than the maximum " +
+ "Then we should return a slice containing *",
+ identityClusterCache: idclustercache,
+ cnameIdentityCache: cnameidcache,
+ cnameDependentClusterNamespaceCache: cndepclusternscache,
+ cname: "cname",
+ clusterId: "cluster3",
+ expectedResult: []string{"*"},
+ },
+ }
+ for _, c := range cases {
+ t.Run(c.name, func(t *testing.T) {
+ admiralCache := &AdmiralCache{}
+ admiralCache.IdentityClusterCache = c.identityClusterCache
+ admiralCache.CnameIdentityCache = c.cnameIdentityCache
+ admiralCache.CnameDependentClusterNamespaceCache = c.cnameDependentClusterNamespaceCache
+ result := getSortedDependentNamespaces(admiralCache, c.cname, c.clusterId, ctxLogger)
+ if !reflect.DeepEqual(result, c.expectedResult) {
+ t.Errorf("expected: %v, got: %v", c.expectedResult, result)
+ }
+ })
}
}
From 6e31efa525a058a4bdafc788d7696af24b06750d Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 15:27:10 -0400
Subject: [PATCH 169/235] added admiral/pkg/clusters/virtualservice_handler.go
from master
Signed-off-by: Shriram Sharma
---
.../pkg/clusters/virtualservice_handler.go | 619 ++++++++++++++++++
1 file changed, 619 insertions(+)
create mode 100644 admiral/pkg/clusters/virtualservice_handler.go
diff --git a/admiral/pkg/clusters/virtualservice_handler.go b/admiral/pkg/clusters/virtualservice_handler.go
new file mode 100644
index 00000000..0691ecd3
--- /dev/null
+++ b/admiral/pkg/clusters/virtualservice_handler.go
@@ -0,0 +1,619 @@
+package clusters
+
+import (
+ "context"
+ "fmt"
+ "regexp"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/google/uuid"
+ commonUtil "github.com/istio-ecosystem/admiral/admiral/pkg/util"
+
+ argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ log "github.com/sirupsen/logrus"
+ networkingV1Alpha3 "istio.io/api/networking/v1alpha3"
+ "istio.io/client-go/pkg/apis/networking/v1alpha3"
+ k8sErrors "k8s.io/apimachinery/pkg/api/errors"
+ metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// NewVirtualServiceHandler returns a new instance of VirtualServiceHandler after verifying
+// the required properties are set correctly
+func NewVirtualServiceHandler(remoteRegistry *RemoteRegistry, clusterID string) (*VirtualServiceHandler, error) {
+ if remoteRegistry == nil {
+ return nil, fmt.Errorf("remote registry is nil, cannot initialize VirtualServiceHandler")
+ }
+ if clusterID == "" {
+ return nil, fmt.Errorf("clusterID is empty, cannot initialize VirtualServiceHandler")
+ }
+ return &VirtualServiceHandler{
+ remoteRegistry: remoteRegistry,
+ clusterID: clusterID,
+ updateResource: handleVirtualServiceEventForRollout,
+ syncVirtualServiceForDependentClusters: syncVirtualServicesToAllDependentClusters,
+ syncVirtualServiceForAllClusters: syncVirtualServicesToAllRemoteClusters,
+ }, nil
+}
+
+// UpdateResourcesForVirtualService is a type function for processing VirtualService update operations
+type UpdateResourcesForVirtualService func(
+ ctx context.Context,
+ virtualService *v1alpha3.VirtualService,
+ remoteRegistry *RemoteRegistry,
+ clusterID string,
+ handlerFunc HandleEventForRolloutFunc,
+) (bool, error)
+
+// SyncVirtualServiceResource is a type function for sync VirtualServices
+// for a set of clusters
+type SyncVirtualServiceResource func(
+ ctx context.Context,
+ clusters []string,
+ virtualService *v1alpha3.VirtualService,
+ event common.Event,
+ remoteRegistry *RemoteRegistry,
+ sourceCluster string,
+ syncNamespace string,
+) error
+
+// VirtualServiceHandler responsible for handling Add/Update/Delete events for
+// VirtualService resources
+type VirtualServiceHandler struct {
+ remoteRegistry *RemoteRegistry
+ clusterID string
+ updateResource UpdateResourcesForVirtualService
+ syncVirtualServiceForDependentClusters SyncVirtualServiceResource
+ syncVirtualServiceForAllClusters SyncVirtualServiceResource
+}
+
+func (vh *VirtualServiceHandler) Added(ctx context.Context, obj *v1alpha3.VirtualService) error {
+ if commonUtil.IsAdmiralReadOnly() {
+ log.Infof(LogFormat, common.Add, "VirtualService", obj.Name, vh.clusterID, "Admiral is in read-only mode. Skipping resource from namespace="+obj.Namespace)
+ return nil
+ }
+ if IgnoreIstioResource(obj.Spec.ExportTo, obj.Annotations, obj.Namespace) {
+ log.Infof(LogFormat, common.Add, "VirtualService", obj.Name, vh.clusterID, "Skipping resource from namespace="+obj.Namespace)
+ if len(obj.Annotations) > 0 && obj.Annotations[common.AdmiralIgnoreAnnotation] == "true" {
+ log.Infof(LogFormat, "admiralIoIgnoreAnnotationCheck", "VirtualService", obj.Name, vh.clusterID, "Value=true namespace="+obj.Namespace)
+ }
+ return nil
+ }
+ return vh.handleVirtualServiceEvent(ctx, obj, common.Add)
+}
+
+func (vh *VirtualServiceHandler) Updated(ctx context.Context, obj *v1alpha3.VirtualService) error {
+ if commonUtil.IsAdmiralReadOnly() {
+ log.Infof(LogFormat, common.Update, "VirtualService", obj.Name, vh.clusterID, "Admiral is in read-only mode. Skipping resource from namespace="+obj.Namespace)
+ return nil
+ }
+ if IgnoreIstioResource(obj.Spec.ExportTo, obj.Annotations, obj.Namespace) {
+ log.Infof(LogFormat, common.Update, "VirtualService", obj.Name, vh.clusterID, "Skipping resource from namespace="+obj.Namespace)
+ if len(obj.Annotations) > 0 && obj.Annotations[common.AdmiralIgnoreAnnotation] == "true" {
+ log.Infof(LogFormat, "admiralIoIgnoreAnnotationCheck", "VirtualService", obj.Name, vh.clusterID, "Value=true namespace="+obj.Namespace)
+ }
+ return nil
+ }
+ return vh.handleVirtualServiceEvent(ctx, obj, common.Update)
+}
+
+func (vh *VirtualServiceHandler) Deleted(ctx context.Context, obj *v1alpha3.VirtualService) error {
+ if commonUtil.IsAdmiralReadOnly() {
+ log.Infof(LogFormat, common.Delete, "VirtualService", obj.Name, vh.clusterID, "Admiral is in read-only mode. Skipping resource from namespace="+obj.Namespace)
+ return nil
+ }
+ if IgnoreIstioResource(obj.Spec.ExportTo, obj.Annotations, obj.Namespace) {
+ log.Infof(LogFormat, common.Delete, "VirtualService", obj.Name, vh.clusterID, "Skipping resource from namespace="+obj.Namespace)
+ if len(obj.Annotations) > 0 && obj.Annotations[common.AdmiralIgnoreAnnotation] == "true" {
+ log.Debugf(LogFormat, "admiralIoIgnoreAnnotationCheck", "VirtualService", obj.Name, vh.clusterID, "Value=true namespace="+obj.Namespace)
+ }
+ return nil
+ }
+ return vh.handleVirtualServiceEvent(ctx, obj, common.Delete)
+}
+
+func (vh *VirtualServiceHandler) handleVirtualServiceEvent(ctx context.Context, virtualService *v1alpha3.VirtualService, event common.Event) error {
+ var (
+ //nolint
+ syncNamespace = common.GetSyncNamespace()
+ )
+ defer logElapsedTimeForVirtualService("handleVirtualServiceEvent="+string(event), vh.clusterID, virtualService)()
+ if syncNamespace == "" {
+ return fmt.Errorf("expected valid value for sync namespace, got empty")
+ }
+ if ctx == nil {
+ return fmt.Errorf("empty context passed")
+ }
+ if virtualService == nil {
+ return fmt.Errorf("passed %s object is nil", common.VirtualServiceResourceType)
+ }
+ //nolint
+ spec := virtualService.Spec
+
+ log.Infof(LogFormat, event, common.VirtualServiceResourceType, virtualService.Name, vh.clusterID, "Received event")
+
+ if len(spec.Hosts) > 1 {
+ log.Errorf(LogFormat, "Event", common.VirtualServiceResourceType, virtualService.Name, vh.clusterID, "Skipping as multiple hosts not supported for virtual service namespace="+virtualService.Namespace)
+ return nil
+ }
+
+ // check if this virtual service is used by Argo rollouts for canary strategy, if so, update the corresponding SE with appropriate weights
+ if common.GetAdmiralParams().ArgoRolloutsEnabled {
+ isRolloutCanaryVS, err := vh.updateResource(ctx, virtualService, vh.remoteRegistry, vh.clusterID, HandleEventForRollout)
+ if err != nil {
+ return err
+ }
+ if isRolloutCanaryVS {
+ log.Infof(LogFormat, "Event", common.VirtualServiceResourceType, virtualService.Name, vh.clusterID,
+ "Skipping replicating VirtualService in other clusters as this VirtualService is associated with a Argo Rollout")
+ return nil
+ }
+ }
+
+ if len(spec.Hosts) == 0 {
+ log.Infof(LogFormat, "Event", common.VirtualServiceResourceType, virtualService.Name, vh.clusterID, "No hosts found in VirtualService, will not sync to other clusters")
+ return nil
+ }
+
+ dependentClusters := vh.remoteRegistry.AdmiralCache.CnameDependentClusterCache.Get(spec.Hosts[0]).CopyJustValues()
+ if len(dependentClusters) > 0 {
+ err := vh.syncVirtualServiceForDependentClusters(
+ ctx,
+ dependentClusters,
+ virtualService,
+ event,
+ vh.remoteRegistry,
+ vh.clusterID,
+ syncNamespace,
+ )
+ if err != nil {
+ log.Warnf(LogErrFormat, "Sync", common.VirtualServiceResourceType, virtualService.Name, dependentClusters, err.Error()+": sync to dependent clusters will not be retried")
+ } else {
+ log.Infof(LogFormat, "Sync", common.VirtualServiceResourceType, virtualService.Name, dependentClusters, "synced to all dependent clusters")
+ }
+ return nil
+ }
+ log.Infof(LogFormat, "Event", "VirtualService", virtualService.Name, vh.clusterID, "No dependent clusters found")
+ // copy the VirtualService `as is` if they are not generated by Admiral (not in CnameDependentClusterCache)
+ log.Infof(LogFormat, "Event", "VirtualService", virtualService.Name, vh.clusterID, "Replicating 'as is' to all clusters")
+ remoteClusters := vh.remoteRegistry.GetClusterIds()
+ err := vh.syncVirtualServiceForAllClusters(
+ ctx,
+ remoteClusters,
+ virtualService,
+ event,
+ vh.remoteRegistry,
+ vh.clusterID,
+ syncNamespace,
+ )
+ if err != nil {
+ log.Warnf(LogErrFormat, "Sync", common.VirtualServiceResourceType, virtualService.Name, "*", err.Error()+": sync to remote clusters will not be retried")
+ return nil
+ }
+ log.Infof(LogFormat, "Sync", common.VirtualServiceResourceType, virtualService.Name, "*", "synced to remote clusters")
+ return nil
+}
+
+// handleVirtualServiceEventForRollout fetches corresponding rollout for the
+// virtual service and triggers an update for ServiceEntries and DestinationRules
+func handleVirtualServiceEventForRollout(
+ ctx context.Context,
+ virtualService *v1alpha3.VirtualService,
+ remoteRegistry *RemoteRegistry,
+ clusterID string,
+ handleEventForRollout HandleEventForRolloutFunc) (bool, error) {
+ defer logElapsedTimeForVirtualService("handleVirtualServiceEventForRollout", clusterID, virtualService)()
+ // This will be set to true, if the VirtualService is configured in any of the
+ // argo rollouts present in the namespace
+ var isRolloutCanaryVS bool
+ if virtualService == nil {
+ return isRolloutCanaryVS, fmt.Errorf("VirtualService is nil")
+ }
+ if remoteRegistry == nil {
+ return isRolloutCanaryVS, fmt.Errorf("remoteRegistry is nil")
+ }
+ rc := remoteRegistry.GetRemoteController(clusterID)
+ if rc == nil {
+ return isRolloutCanaryVS, fmt.Errorf(LogFormat, "Event", common.VirtualServiceResourceType, virtualService.Name, clusterID, "remote controller not initialized for cluster")
+ }
+ rolloutController := rc.RolloutController
+ if rolloutController == nil {
+ return isRolloutCanaryVS, fmt.Errorf(LogFormat, "Event", common.VirtualServiceResourceType, virtualService.Name, clusterID, "argo rollout controller not initialized for cluster")
+ }
+ rollouts, err := rolloutController.RolloutClient.Rollouts(virtualService.Namespace).List(ctx, metav1.ListOptions{})
+ if err != nil {
+ return isRolloutCanaryVS, fmt.Errorf(LogFormat, "Get", "Rollout", "Error finding rollouts in namespace="+virtualService.Namespace, clusterID, err)
+ }
+ var allErrors error
+ for _, rollout := range rollouts.Items {
+ if matchRolloutCanaryStrategy(rollout.Spec.Strategy, virtualService.Name) {
+ isRolloutCanaryVS = true
+ err = handleEventForRollout(ctx, admiral.Update, &rollout, remoteRegistry, clusterID)
+ if err != nil {
+ allErrors = common.AppendError(allErrors, fmt.Errorf(LogFormat, "Event", "Rollout", rollout.Name, clusterID, err.Error()))
+ }
+ }
+ }
+ return isRolloutCanaryVS, allErrors
+}
+
+func syncVirtualServicesToAllDependentClusters(
+ ctx context.Context,
+ clusters []string,
+ virtualService *v1alpha3.VirtualService,
+ event common.Event,
+ remoteRegistry *RemoteRegistry,
+ sourceCluster string,
+ syncNamespace string,
+) error {
+ defer logElapsedTimeForVirtualService("syncVirtualServicesToAllDependentClusters="+string(event), "", virtualService)()
+ if virtualService == nil {
+ return fmt.Errorf(LogFormat, "Event", common.VirtualServiceResourceType, "", sourceCluster, "VirtualService is nil")
+ }
+ if remoteRegistry == nil {
+ return fmt.Errorf(LogFormat, "Event", common.VirtualServiceResourceType, "", sourceCluster, "remoteRegistry is nil")
+ }
+ var allClusterErrors error
+ var wg sync.WaitGroup
+ wg.Add(len(clusters))
+ for _, cluster := range clusters {
+ if cluster == sourceCluster && !common.DoSyncIstioResourcesToSourceClusters() {
+ wg.Done()
+ continue
+ }
+ go func(ctx context.Context, cluster string, remoteRegistry *RemoteRegistry, virtualServiceCopy *v1alpha3.VirtualService, event common.Event, syncNamespace string) {
+ defer wg.Done()
+ err := syncVirtualServiceToDependentCluster(
+ ctx,
+ cluster,
+ remoteRegistry,
+ virtualServiceCopy,
+ event,
+ syncNamespace,
+ )
+ if err != nil {
+ allClusterErrors = common.AppendError(allClusterErrors, err)
+ }
+ }(ctx, cluster, remoteRegistry, virtualService.DeepCopy(), event, syncNamespace)
+ }
+ wg.Wait()
+ return allClusterErrors
+}
+
+func syncVirtualServiceToDependentCluster(
+ ctx context.Context,
+ cluster string,
+ remoteRegistry *RemoteRegistry,
+ virtualService *v1alpha3.VirtualService,
+ event common.Event,
+ syncNamespace string) error {
+
+ ctxLogger := log.WithFields(log.Fields{
+ "type": "syncVirtualServiceToDependentCluster",
+ "identity": virtualService.Name,
+ "txId": uuid.New().String(),
+ })
+
+ defer logElapsedTimeForVirtualService("syncVirtualServiceToDependentCluster="+string(event), cluster, virtualService)()
+ rc := remoteRegistry.GetRemoteController(cluster)
+ if rc == nil {
+ return fmt.Errorf(LogFormat, "Event", common.VirtualServiceResourceType, virtualService.Name,
+ cluster, "dependent controller not initialized for cluster")
+ }
+ ctxLogger.Infof(LogFormat, "Event", "VirtualService", virtualService.Name, cluster, "Processing")
+ if rc.VirtualServiceController == nil {
+ return fmt.Errorf(LogFormat, "Event", common.VirtualServiceResourceType, virtualService.Name, cluster, "VirtualService controller not initialized for cluster")
+ }
+ if event == common.Delete {
+ err := rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(syncNamespace).Delete(ctx, virtualService.Name, metav1.DeleteOptions{})
+ if err != nil {
+ if k8sErrors.IsNotFound(err) {
+ ctxLogger.Infof(LogFormat, "Delete", "VirtualService", virtualService.Name, cluster, "Either VirtualService was already deleted, or it never existed")
+ return nil
+ }
+ if isDeadCluster(err) {
+ ctxLogger.Warnf(LogErrFormat, "Create/Update", common.VirtualServiceResourceType, virtualService.Name, cluster, "dead cluster")
+ return nil
+ }
+ return fmt.Errorf(LogErrFormat, "Delete", "VirtualService", virtualService.Name, cluster, err)
+ }
+ ctxLogger.Infof(LogFormat, "Delete", "VirtualService", virtualService.Name, cluster, "Success")
+ return nil
+ }
+ exist, err := rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(syncNamespace).Get(ctx, virtualService.Name, metav1.GetOptions{})
+ if k8sErrors.IsNotFound(err) {
+ ctxLogger.Infof(LogFormat, "Get", common.VirtualServiceResourceType, virtualService.Name, cluster, "VirtualService does not exist")
+ exist = nil
+ }
+ if isDeadCluster(err) {
+ ctxLogger.Warnf(LogErrFormat, "Create/Update", common.VirtualServiceResourceType, virtualService.Name, cluster, "dead cluster")
+ return nil
+ }
+ //change destination host for all http routes .. to same as host on the virtual service
+ for _, httpRoute := range virtualService.Spec.Http {
+ for _, destination := range httpRoute.Route {
+ //get at index 0, we do not support wildcards or multiple hosts currently
+ if strings.HasSuffix(destination.Destination.Host, common.DotLocalDomainSuffix) {
+ destination.Destination.Host = virtualService.Spec.Hosts[0]
+ }
+ }
+ }
+ for _, tlsRoute := range virtualService.Spec.Tls {
+ for _, destination := range tlsRoute.Route {
+ //get at index 0, we do not support wildcards or multiple hosts currently
+ if strings.HasSuffix(destination.Destination.Host, common.DotLocalDomainSuffix) {
+ destination.Destination.Host = virtualService.Spec.Hosts[0]
+ }
+ }
+ }
+ // nolint
+ return addUpdateVirtualService(ctxLogger, ctx, virtualService, exist, syncNamespace, rc, remoteRegistry)
+}
+
+func syncVirtualServicesToAllRemoteClusters(
+ ctx context.Context,
+ clusters []string,
+ virtualService *v1alpha3.VirtualService,
+ event common.Event,
+ remoteRegistry *RemoteRegistry,
+ sourceCluster string,
+ syncNamespace string) error {
+ defer logElapsedTimeForVirtualService("syncVirtualServicesToAllRemoteClusters="+string(event), "*", virtualService)()
+ if virtualService == nil {
+ return fmt.Errorf(LogFormat, "Event", common.VirtualServiceResourceType, "", sourceCluster, "VirtualService is nil")
+ }
+ if remoteRegistry == nil {
+ return fmt.Errorf(LogFormat, "Event", common.VirtualServiceResourceType, "", sourceCluster, "remoteRegistry is nil")
+ }
+ var allClusterErrors error
+ var wg sync.WaitGroup
+ wg.Add(len(clusters))
+ for _, cluster := range clusters {
+ if cluster == sourceCluster && !common.DoSyncIstioResourcesToSourceClusters() {
+ wg.Done()
+ continue
+ }
+ go func(ctx context.Context, cluster string, remoteRegistry *RemoteRegistry, virtualServiceCopy *v1alpha3.VirtualService, event common.Event, syncNamespace string) {
+ defer wg.Done()
+ err := syncVirtualServiceToRemoteCluster(
+ ctx,
+ cluster,
+ remoteRegistry,
+ virtualServiceCopy,
+ event,
+ syncNamespace,
+ )
+ if err != nil {
+ allClusterErrors = common.AppendError(allClusterErrors, err)
+ }
+ }(ctx, cluster, remoteRegistry, virtualService.DeepCopy(), event, syncNamespace)
+ }
+ wg.Wait()
+ return allClusterErrors
+}
+
+func syncVirtualServiceToRemoteCluster(
+ ctx context.Context,
+ cluster string,
+ remoteRegistry *RemoteRegistry,
+ virtualService *v1alpha3.VirtualService,
+ event common.Event,
+ syncNamespace string) error {
+
+ ctxLogger := log.WithFields(log.Fields{
+ "type": "syncVirtualServicesToAllRemoteClusters",
+ "identity": virtualService.Name,
+ "txId": uuid.New().String(),
+ })
+
+ defer logElapsedTimeForVirtualService("syncVirtualServiceToRemoteCluster="+string(event), cluster, virtualService)()
+ rc := remoteRegistry.GetRemoteController(cluster)
+ if rc == nil {
+ return fmt.Errorf(LogFormat, "Event", common.VirtualServiceResourceType, virtualService.Name, cluster, "remote controller not initialized for cluster")
+ }
+ if rc.VirtualServiceController == nil {
+ return fmt.Errorf(LogFormat, "Event", common.VirtualServiceResourceType, virtualService.Name, cluster, "VirtualService controller not initialized for cluster")
+ }
+ if event == common.Delete {
+ err := rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(syncNamespace).Delete(ctx, virtualService.Name, metav1.DeleteOptions{})
+ if err != nil {
+ if k8sErrors.IsNotFound(err) {
+ ctxLogger.Infof(LogFormat, "Delete", common.VirtualServiceResourceType, virtualService.Name, cluster, "Either VirtualService was already deleted, or it never existed")
+ return nil
+ }
+ if isDeadCluster(err) {
+ ctxLogger.Warnf(LogErrFormat, "Delete", common.VirtualServiceResourceType, virtualService.Name, cluster, "dead cluster")
+ return nil
+ }
+ return fmt.Errorf(LogErrFormat, "Delete", common.VirtualServiceResourceType, virtualService.Name, cluster, err)
+ }
+ ctxLogger.Infof(LogFormat, "Delete", common.VirtualServiceResourceType, virtualService.Name, cluster, "Success")
+ return nil
+ }
+ exist, err := rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(syncNamespace).Get(ctx, virtualService.Name, metav1.GetOptions{})
+ if k8sErrors.IsNotFound(err) {
+ ctxLogger.Infof(LogFormat, "Get", common.VirtualServiceResourceType, virtualService.Name, cluster, "VirtualService does not exist")
+ exist = nil
+ }
+ if isDeadCluster(err) {
+ ctxLogger.Warnf(LogErrFormat, "Create/Update", common.VirtualServiceResourceType, virtualService.Name, cluster, "dead cluster")
+ return nil
+ }
+ // nolint
+ return addUpdateVirtualService(ctxLogger, ctx, virtualService, exist, syncNamespace, rc, remoteRegistry)
+}
+
+func matchRolloutCanaryStrategy(rolloutStrategy argo.RolloutStrategy, virtualServiceName string) bool {
+ if rolloutStrategy.Canary == nil ||
+ rolloutStrategy.Canary.TrafficRouting == nil ||
+ rolloutStrategy.Canary.TrafficRouting.Istio == nil ||
+ rolloutStrategy.Canary.TrafficRouting.Istio.VirtualService == nil {
+ return false
+ }
+ return rolloutStrategy.Canary.TrafficRouting.Istio.VirtualService.Name == virtualServiceName
+}
+
+/*
+Add/Update Virtual service after checking if the current pod is in ReadOnly mode.
+Virtual Service object is not added/updated if the current pod is in ReadOnly mode.
+*/
+func addUpdateVirtualService(
+ ctxLogger *log.Entry,
+ ctx context.Context,
+ new *v1alpha3.VirtualService,
+ exist *v1alpha3.VirtualService,
+ namespace string, rc *RemoteController, rr *RemoteRegistry) error {
+ var (
+ err error
+ op string
+ newCopy = new.DeepCopy()
+ )
+
+ format := "virtualservice %s before: %v, after: %v;"
+
+ if newCopy.Annotations == nil {
+ newCopy.Annotations = map[string]string{}
+ }
+ newCopy.Annotations["app.kubernetes.io/created-by"] = "admiral"
+ if common.EnableExportTo(newCopy.Spec.Hosts[0]) {
+ sortedDependentNamespaces := getSortedDependentNamespaces(rr.AdmiralCache, newCopy.Spec.Hosts[0], rc.ClusterID, ctxLogger)
+ newCopy.Spec.ExportTo = sortedDependentNamespaces
+ ctxLogger.Infof(LogFormat, "ExportTo", common.VirtualServiceResourceType, newCopy.Name, rc.ClusterID, fmt.Sprintf("VS usecase-ExportTo updated to %v", newCopy.Spec.ExportTo))
+ }
+ vsAlreadyExists := false
+ if exist == nil {
+ op = "Add"
+ ctxLogger.Infof(LogFormat, op, common.VirtualServiceResourceType, newCopy.Name, rc.ClusterID,
+ fmt.Sprintf("new virtualservice for cluster: %s VirtualService name=%s",
+ rc.ClusterID, newCopy.Name))
+ newCopy.Namespace = namespace
+ newCopy.ResourceVersion = ""
+ _, err = rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(namespace).Create(ctx, newCopy, metav1.CreateOptions{})
+ if k8sErrors.IsAlreadyExists(err) {
+ ctxLogger.Infof(LogFormat, op, common.VirtualServiceResourceType, newCopy.Name, rc.ClusterID,
+ fmt.Sprintf("skipping create virtualservice and it already exists for cluster: %s VirtualService name=%s",
+ rc.ClusterID, newCopy.Name))
+ vsAlreadyExists = true
+ }
+ }
+ if exist != nil || vsAlreadyExists {
+ if vsAlreadyExists {
+ exist, err = rc.VirtualServiceController.IstioClient.
+ NetworkingV1alpha3().
+ VirtualServices(namespace).
+ Get(ctx, newCopy.Name, metav1.GetOptions{})
+ if err != nil {
+ // when there is an error, assign exist to obj,
+ // which will fail in the update operation, but will be retried
+ // in the retry logic
+ exist = newCopy
+ ctxLogger.Warnf(common.CtxLogFormat, "Update", exist.Name, exist.Namespace, rc.ClusterID, "got error on fetching se, will retry updating")
+ }
+ }
+ op = "Update"
+ ctxLogger.Infof(LogFormat, op, common.VirtualServiceResourceType, newCopy.Name, rc.ClusterID,
+ fmt.Sprintf("existing virtualservice for cluster: %s VirtualService name=%s",
+ rc.ClusterID, newCopy.Name))
+ ctxLogger.Infof(format, op, exist.Spec.String(), newCopy.Spec.String())
+ exist.Labels = newCopy.Labels
+ exist.Annotations = newCopy.Annotations
+ //nolint
+ exist.Spec = newCopy.Spec
+ _, err = rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(namespace).Update(ctx, exist, metav1.UpdateOptions{})
+ if err != nil {
+ err = retryUpdatingVS(ctxLogger, ctx, newCopy, exist, namespace, rc, err, op)
+ }
+ }
+
+ if err != nil {
+ ctxLogger.Errorf(LogErrFormat, op, common.VirtualServiceResourceType, newCopy.Name, rc.ClusterID, err)
+ return err
+ }
+ ctxLogger.Infof(LogFormat, op, common.VirtualServiceResourceType, newCopy.Name, rc.ClusterID, "ExportTo: "+strings.Join(newCopy.Spec.ExportTo, " ")+" Success")
+ return nil
+}
+
+func retryUpdatingVS(ctxLogger *log.Entry, ctx context.Context, obj *v1alpha3.VirtualService,
+ exist *v1alpha3.VirtualService, namespace string, rc *RemoteController, err error, op string) error {
+ numRetries := 5
+ if err != nil && k8sErrors.IsConflict(err) {
+ for i := 0; i < numRetries; i++ {
+ vsIdentity := ""
+ if obj.Annotations != nil {
+ vsIdentity = obj.Labels[common.GetWorkloadIdentifier()]
+ }
+ ctxLogger.Errorf(LogFormatNew, op, common.VirtualServiceResourceType, obj.Name, obj.Namespace,
+ vsIdentity, rc.ClusterID, err.Error()+". will retry the update operation before adding back to the controller queue.")
+
+ updatedVS, err := rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().
+ VirtualServices(namespace).Get(ctx, exist.Name, metav1.GetOptions{})
+ if err != nil {
+ ctxLogger.Infof(LogFormatNew, op, common.VirtualServiceResourceType, exist.Name, exist.Namespace,
+ vsIdentity, rc.ClusterID, err.Error()+fmt.Sprintf(". Error getting virtualservice"))
+ continue
+ }
+
+ ctxLogger.Infof(LogFormatNew, op, common.VirtualServiceResourceType, obj.Name, obj.Namespace,
+ vsIdentity, rc.ClusterID, fmt.Sprintf("existingResourceVersion=%s resourceVersionUsedForUpdate=%s",
+ updatedVS.ResourceVersion, obj.ResourceVersion))
+ updatedVS.Spec = obj.Spec
+ updatedVS.Labels = obj.Labels
+ updatedVS.Annotations = obj.Annotations
+ _, err = rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(namespace).Update(ctx, updatedVS, metav1.UpdateOptions{})
+ if err == nil {
+ return nil
+ }
+ }
+ }
+ return err
+}
+
+func isDeadCluster(err error) bool {
+ if err == nil {
+ return false
+ }
+ isNoSuchHostErr, _ := regexp.MatchString("dial tcp: lookup(.*): no such host", err.Error())
+ return isNoSuchHostErr
+}
+
+func logElapsedTimeForVirtualService(operation, clusterID string, virtualService *v1alpha3.VirtualService) func() {
+ startTime := time.Now()
+ return func() {
+ var name string
+ var namespace string
+ if virtualService != nil {
+ name = virtualService.Name
+ namespace = virtualService.Namespace
+ }
+ log.Infof(LogFormatOperationTime,
+ operation,
+ common.VirtualServiceResourceType,
+ name,
+ namespace,
+ clusterID,
+ time.Since(startTime).Milliseconds())
+ }
+}
+
+// nolint
+func createVirtualServiceSkeleton(vs networkingV1Alpha3.VirtualService, name string, namespace string) *v1alpha3.VirtualService {
+ return &v1alpha3.VirtualService{Spec: vs, ObjectMeta: metaV1.ObjectMeta{Name: name, Namespace: namespace}}
+}
+
+func deleteVirtualService(ctx context.Context, exist *v1alpha3.VirtualService, namespace string, rc *RemoteController) error {
+ if exist == nil {
+ return fmt.Errorf("the VirtualService passed was nil")
+ }
+ err := rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(namespace).Delete(ctx, exist.Name, metaV1.DeleteOptions{})
+ if err != nil {
+ if k8sErrors.IsNotFound(err) {
+ return fmt.Errorf("either VirtualService was already deleted, or it never existed")
+ }
+ return err
+ }
+ return nil
+}
From 521e967103e5da95809801556bdad043b412f934 Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 15:27:27 -0400
Subject: [PATCH 170/235] added
admiral/pkg/clusters/virtualservice_handler_test.go from master
Signed-off-by: Shriram Sharma
---
.../clusters/virtualservice_handler_test.go | 1728 +++++++++++++++++
1 file changed, 1728 insertions(+)
create mode 100644 admiral/pkg/clusters/virtualservice_handler_test.go
diff --git a/admiral/pkg/clusters/virtualservice_handler_test.go b/admiral/pkg/clusters/virtualservice_handler_test.go
new file mode 100644
index 00000000..dd6b248c
--- /dev/null
+++ b/admiral/pkg/clusters/virtualservice_handler_test.go
@@ -0,0 +1,1728 @@
+package clusters
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/istio-ecosystem/admiral/admiral/pkg/client/loader"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/istio"
+ testMocks "github.com/istio-ecosystem/admiral/admiral/pkg/test"
+ commonUtil "github.com/istio-ecosystem/admiral/admiral/pkg/util"
+ log "github.com/sirupsen/logrus"
+ "github.com/stretchr/testify/assert"
+ networkingV1Alpha3 "istio.io/api/networking/v1alpha3"
+ apiNetworkingV1Alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3"
+ istioFake "istio.io/client-go/pkg/clientset/versioned/fake"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ k8sErrors "k8s.io/apimachinery/pkg/api/errors"
+ metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/rest"
+)
+
+func TestHandleVirtualServiceEvent(t *testing.T) {
+ var (
+ clusterID = "cluster-1"
+ syncNamespace = "sync-namespace"
+ updateResourceErr = fmt.Errorf("updateResource returned error")
+ syncVirtualServiceForDependentClusterErr = fmt.Errorf("syncVirtualServiceForDependentCluster returned error")
+ syncVirtualServiceForAllClustersErr = fmt.Errorf("syncVirtualServiceForAllClusters returned error")
+ cname1 = "one"
+ cname2 = "two"
+ dependentCluster1 = "dep-cluster-1"
+ ctx = context.TODO()
+ remoteRegistry = NewRemoteRegistry(ctx, common.AdmiralParams{})
+ remoteRegistryWithDependents = newRemoteRegistryWithDependents(ctx, cname1, dependentCluster1)
+ )
+ cases := []struct {
+ name string
+ virtualService *apiNetworkingV1Alpha3.VirtualService
+ params common.AdmiralParams
+ remoteRegistry *RemoteRegistry
+ updateResource *fakeUpdateResource
+ syncResourceForDependentClusters *fakeSyncResource
+ syncResourceForAllClusters *fakeSyncResource
+ expectToCallUpdateResource bool
+ expectToCallSyncResourceForDependentClusters bool
+ expectToCallSyncResourceForAllClusters bool
+ expectedErr error
+ }{
+ {
+ name: "Given sync namespace is not configured, " +
+ "When, handleVirtualServiceEvent is invoked" +
+ "Then, it should return 'passed VirtualService object is nil' error",
+ params: common.AdmiralParams{
+ SyncNamespace: "",
+ },
+ remoteRegistry: remoteRegistry,
+ virtualService: nil,
+ updateResource: newFakeUpdateResource(false, nil),
+ syncResourceForDependentClusters: newFakeSyncResource(nil),
+ syncResourceForAllClusters: newFakeSyncResource(nil),
+ expectedErr: fmt.Errorf("expected valid value for sync namespace, got empty"),
+ },
+ {
+ name: "Given an empty VirtualService object is passed, " +
+ "When, handleVirtualServiceEvent is invoked" +
+ "Then, it should return 'passed VirtualService object is nil' error",
+ params: common.AdmiralParams{
+ SyncNamespace: syncNamespace,
+ },
+ remoteRegistry: remoteRegistry,
+ virtualService: nil,
+ updateResource: newFakeUpdateResource(false, nil),
+ syncResourceForDependentClusters: newFakeSyncResource(nil),
+ syncResourceForAllClusters: newFakeSyncResource(nil),
+ expectedErr: fmt.Errorf("passed %s object is nil", common.VirtualServiceResourceType),
+ },
+ {
+ name: "Given a VirtualService contains more than 1 host in its spec, " +
+ "When, handleVirtualServiceEvent is invoked, " +
+ "Then, it should return with a nil, and not call updateResource, and both the syncResource methods",
+ params: common.AdmiralParams{
+ SyncNamespace: syncNamespace,
+ },
+ remoteRegistry: remoteRegistry,
+ virtualService: &apiNetworkingV1Alpha3.VirtualService{
+ Spec: networkingV1Alpha3.VirtualService{
+ Hosts: []string{cname1, cname2},
+ },
+ },
+ updateResource: newFakeUpdateResource(false, nil),
+ syncResourceForDependentClusters: newFakeSyncResource(nil),
+ syncResourceForAllClusters: newFakeSyncResource(nil),
+ expectToCallUpdateResource: false,
+ expectToCallSyncResourceForDependentClusters: false,
+ expectToCallSyncResourceForAllClusters: false,
+ expectedErr: nil,
+ },
+ {
+ name: "Given VirtualService is valid and argo is enabled, " +
+ "When, handleVirtualServiceEvent is invoked, " +
+ "When there are no dependents clusters," +
+ "When updateResource returns an error" +
+ "Then, it should return an '" + updateResourceErr.Error() + "'error",
+ params: common.AdmiralParams{
+ ArgoRolloutsEnabled: true,
+ SyncNamespace: syncNamespace,
+ },
+ remoteRegistry: remoteRegistry,
+ virtualService: &apiNetworkingV1Alpha3.VirtualService{
+ Spec: networkingV1Alpha3.VirtualService{
+ Hosts: []string{cname1},
+ },
+ },
+ updateResource: newFakeUpdateResource(false, updateResourceErr),
+ syncResourceForDependentClusters: newFakeSyncResource(nil),
+ syncResourceForAllClusters: newFakeSyncResource(nil),
+ expectToCallUpdateResource: true,
+ expectToCallSyncResourceForDependentClusters: false,
+ expectToCallSyncResourceForAllClusters: false,
+ expectedErr: updateResourceErr,
+ },
+ {
+ name: "Given VirtualService is valid and argo is enabled, " +
+ "When, handleVirtualServiceEvent is invoked, " +
+ "When there are no dependents clusters," +
+ "When updateResource returns true, and nil" +
+ "Then, only updateResource should be called, " +
+ "And, it should return nil, " +
+ "And, syncVirtualServiceForDependentClusters & syncVirtualServiceForAllClusters should not be called",
+ params: common.AdmiralParams{
+ ArgoRolloutsEnabled: true,
+ SyncNamespace: syncNamespace,
+ },
+ remoteRegistry: remoteRegistry,
+ virtualService: &apiNetworkingV1Alpha3.VirtualService{
+ Spec: networkingV1Alpha3.VirtualService{
+ Hosts: []string{cname1},
+ },
+ },
+ updateResource: newFakeUpdateResource(true, nil),
+ syncResourceForDependentClusters: newFakeSyncResource(nil),
+ syncResourceForAllClusters: newFakeSyncResource(nil),
+ expectToCallUpdateResource: true,
+ expectToCallSyncResourceForDependentClusters: false,
+ expectToCallSyncResourceForAllClusters: false,
+ expectedErr: nil,
+ },
+ {
+ name: "Given VirtualService is valid and argo is enabled, " +
+ "When, handleVirtualServiceEvent is invoked, " +
+ "When there are no dependents clusters," +
+ "When updateResource returns true, and an error" +
+ "Then, only updateResource should be called, " +
+ "And, it should return the same error, " +
+ "And, syncVirtualServiceForDependentClusters & syncVirtualServiceForAllClusters should not be called",
+ params: common.AdmiralParams{
+ ArgoRolloutsEnabled: true,
+ SyncNamespace: syncNamespace,
+ },
+ remoteRegistry: remoteRegistry,
+ virtualService: &apiNetworkingV1Alpha3.VirtualService{
+ Spec: networkingV1Alpha3.VirtualService{
+ Hosts: []string{cname1},
+ },
+ },
+ updateResource: newFakeUpdateResource(true, updateResourceErr),
+ syncResourceForDependentClusters: newFakeSyncResource(nil),
+ syncResourceForAllClusters: newFakeSyncResource(nil),
+ expectToCallUpdateResource: true,
+ expectToCallSyncResourceForDependentClusters: false,
+ expectToCallSyncResourceForAllClusters: false,
+ expectedErr: updateResourceErr,
+ },
+ {
+ name: "Given VirtualService has 0 hosts in its spec, " +
+ "And, argo is enabled, " +
+ "When, handleVirtualServiceEvent is invoked, " +
+ "When updateResource returns false, and nil" +
+ "Then, only updateResource should be called, " +
+ "And, it should return nil, " +
+ "And, syncVirtualServiceForDependentClusters & syncVirtualServiceForAllClusters should not be called",
+ params: common.AdmiralParams{
+ ArgoRolloutsEnabled: true,
+ SyncNamespace: syncNamespace,
+ },
+ remoteRegistry: remoteRegistry,
+ virtualService: &apiNetworkingV1Alpha3.VirtualService{
+ Spec: networkingV1Alpha3.VirtualService{
+ Hosts: []string{},
+ },
+ },
+ updateResource: newFakeUpdateResource(false, nil),
+ syncResourceForDependentClusters: newFakeSyncResource(nil),
+ syncResourceForAllClusters: newFakeSyncResource(nil),
+ expectToCallUpdateResource: true,
+ expectToCallSyncResourceForDependentClusters: false,
+ expectToCallSyncResourceForAllClusters: false,
+ expectedErr: nil,
+ },
+ {
+ name: "Given VirtualService is valid and argo is enabled, " +
+ "When, handleVirtualServiceEvent is invoked, " +
+ "When there are no dependents clusters," +
+ "When updateResource, syncVirtualServiceForDependentClusters, and syncVirtualServiceForDependentClusters return nil" +
+ "Then, only updateResource & syncVirtualServiceForAllClusters should be called, " +
+ "And, syncVirtualServiceForDependentClusters should NOT be called",
+ params: common.AdmiralParams{
+ ArgoRolloutsEnabled: true,
+ SyncNamespace: syncNamespace,
+ },
+ remoteRegistry: remoteRegistry,
+ virtualService: &apiNetworkingV1Alpha3.VirtualService{
+ Spec: networkingV1Alpha3.VirtualService{
+ Hosts: []string{cname1},
+ },
+ },
+ updateResource: newFakeUpdateResource(false, nil),
+ syncResourceForDependentClusters: newFakeSyncResource(nil),
+ syncResourceForAllClusters: newFakeSyncResource(nil),
+ expectToCallUpdateResource: true,
+ expectToCallSyncResourceForDependentClusters: false,
+ expectToCallSyncResourceForAllClusters: true,
+ expectedErr: nil,
+ },
+ {
+ name: "Given VirtualService is valid and argo is enabled, " +
+ "When, handleVirtualServiceEvent is invoked, " +
+ "When there are dependents clusters," +
+ "When updateResource, syncVirtualServiceForDependentClusters, and syncVirtualServiceForDependentClusters return nil" +
+ "Then, only updateResource & syncVirtualServiceForDependentClusters should be called, " +
+ "And, syncVirtualServiceForAllClusters should NOT be called",
+ params: common.AdmiralParams{
+ ArgoRolloutsEnabled: true,
+ SyncNamespace: syncNamespace,
+ },
+ remoteRegistry: remoteRegistryWithDependents,
+ virtualService: &apiNetworkingV1Alpha3.VirtualService{
+ Spec: networkingV1Alpha3.VirtualService{
+ Hosts: []string{cname1},
+ },
+ },
+ updateResource: newFakeUpdateResource(false, nil),
+ syncResourceForDependentClusters: newFakeSyncResource(nil),
+ syncResourceForAllClusters: newFakeSyncResource(nil),
+ expectToCallUpdateResource: true,
+ expectToCallSyncResourceForDependentClusters: true,
+ expectToCallSyncResourceForAllClusters: false,
+ expectedErr: nil,
+ },
+ {
+ name: "Given VirtualService is valid and argo is enabled, " +
+ "When, handleVirtualServiceEvent is invoked, " +
+ "When there are dependents clusters," +
+ "When updateResource returns nil, " +
+ "And syncVirtualServiceForDependentClusters returns an error" +
+ "Then, it should return nil" +
+ "And, syncVirtualServiceForDependentClusters should be called",
+ params: common.AdmiralParams{
+ ArgoRolloutsEnabled: true,
+ SyncNamespace: syncNamespace,
+ },
+ remoteRegistry: remoteRegistryWithDependents,
+ virtualService: &apiNetworkingV1Alpha3.VirtualService{
+ Spec: networkingV1Alpha3.VirtualService{
+ Hosts: []string{cname1},
+ },
+ },
+ updateResource: newFakeUpdateResource(false, nil),
+ syncResourceForDependentClusters: newFakeSyncResource(syncVirtualServiceForDependentClusterErr),
+ syncResourceForAllClusters: newFakeSyncResource(nil),
+ expectToCallUpdateResource: true,
+ expectToCallSyncResourceForDependentClusters: true,
+ expectToCallSyncResourceForAllClusters: false,
+ expectedErr: nil,
+ },
+ {
+ name: "Given VirtualService is valid and argo is enabled, " +
+ "When, handleVirtualServiceEvent is invoked, " +
+ "When there are NOT dependents clusters," +
+ "When updateResource syncVirtualServiceForAllClusters & return nil, " +
+ "Then only updateResource & syncVirtualServiceForAllClusters should be called, " +
+ "And, syncVirtualServiceForDependentClusters should NOT be called" +
+ "And, it handleVirtualServiceEvent should return nil",
+ params: common.AdmiralParams{
+ ArgoRolloutsEnabled: true,
+ SyncNamespace: syncNamespace,
+ },
+ remoteRegistry: remoteRegistry,
+ virtualService: &apiNetworkingV1Alpha3.VirtualService{
+ Spec: networkingV1Alpha3.VirtualService{
+ Hosts: []string{cname1},
+ },
+ },
+ updateResource: newFakeUpdateResource(false, nil),
+ syncResourceForDependentClusters: newFakeSyncResource(nil),
+ syncResourceForAllClusters: newFakeSyncResource(nil),
+ expectToCallUpdateResource: true,
+ expectToCallSyncResourceForDependentClusters: false,
+ expectToCallSyncResourceForAllClusters: true,
+ expectedErr: nil,
+ },
+ {
+ name: "Given VirtualService is valid and argo is enabled, " +
+ "When, handleVirtualServiceEvent is invoked, " +
+ "When there are NOT dependents clusters," +
+ "When updateResource returns nil, " +
+ "When syncVirtualServiceForAllClusters returns an error" +
+ "Then only updateResource & syncVirtualServiceForAllClusters should be called, " +
+ "And, syncVirtualServiceForDependentClusters should NOT be called" +
+ "And, it handleVirtualServiceEvent should return nil",
+ params: common.AdmiralParams{
+ ArgoRolloutsEnabled: true,
+ SyncNamespace: syncNamespace,
+ },
+ remoteRegistry: remoteRegistry,
+ virtualService: &apiNetworkingV1Alpha3.VirtualService{
+ Spec: networkingV1Alpha3.VirtualService{
+ Hosts: []string{cname1},
+ },
+ },
+ updateResource: newFakeUpdateResource(false, nil),
+ syncResourceForDependentClusters: newFakeSyncResource(nil),
+ syncResourceForAllClusters: newFakeSyncResource(syncVirtualServiceForAllClustersErr),
+ expectToCallUpdateResource: true,
+ expectToCallSyncResourceForDependentClusters: false,
+ expectToCallSyncResourceForAllClusters: true,
+ expectedErr: nil,
+ },
+ }
+ for _, c := range cases {
+ t.Run(c.name, func(t *testing.T) {
+ virtualServiceHandler := &VirtualServiceHandler{
+ remoteRegistry: c.remoteRegistry,
+ clusterID: clusterID,
+ updateResource: c.updateResource.updateResourceFunc(),
+ syncVirtualServiceForDependentClusters: c.syncResourceForDependentClusters.syncResourceFunc(),
+ syncVirtualServiceForAllClusters: c.syncResourceForAllClusters.syncResourceFunc(),
+ }
+ common.ResetSync()
+ common.InitializeConfig(c.params)
+ err := virtualServiceHandler.handleVirtualServiceEvent(
+ ctx,
+ c.virtualService,
+ common.Add,
+ )
+ if err != nil && c.expectedErr == nil {
+ t.Errorf("expected error to be nil but got %v", err)
+ }
+ if err != nil && c.expectedErr != nil {
+ if !(err.Error() == c.expectedErr.Error()) {
+ t.Errorf("error mismatch, expected %v but got %v", c.expectedErr, err)
+ }
+ }
+ if err == nil && c.expectedErr != nil {
+ t.Errorf("expected error %v but got nil", c.expectedErr)
+ }
+ if c.expectToCallUpdateResource && !c.updateResource.called {
+ t.Errorf("expected updateResource to be called, but it was not called")
+ }
+ if !c.expectToCallUpdateResource && c.updateResource.called {
+ t.Errorf("expected updateResource to NOT be called, but it was called")
+ }
+ if c.expectToCallSyncResourceForDependentClusters && !c.syncResourceForDependentClusters.called {
+ t.Errorf("expected syncForDependentClusters to be called, but it was not called")
+ }
+ if !c.expectToCallSyncResourceForDependentClusters && c.syncResourceForDependentClusters.called {
+ t.Errorf("expected syncForDependentClusters to NOT be called, but it was called")
+ }
+ if c.expectToCallSyncResourceForAllClusters && !c.syncResourceForAllClusters.called {
+ t.Errorf("expected syncForAllClusters to be called, but it was not called")
+ }
+ if !c.expectToCallSyncResourceForAllClusters && c.syncResourceForAllClusters.called {
+ t.Errorf("expected syncForAllClusters to NOT be called, but it was called")
+ }
+ })
+ }
+}
+
+func TestHandleVirtualServiceEventForRollout(t *testing.T) {
+ var (
+ ctx = context.TODO()
+ cname1 = "cname-1"
+ namespace1 = "namespace-1"
+ rollout1 = "rollout-name"
+ rollout2 = "rollout-name2"
+ handleEventForRolloutErrForRollout2 = fmt.Errorf("failed to update rollout")
+ expectedHandleEventForRolloutErrForRollout2 = fmt.Errorf("op=Event type=Rollout name=rollout-name2 cluster=cluster-1 message=failed to update rollout")
+ remoteRegistryWithRolloutControllerForExistingCluster = newRemoteRegistry(ctx, nil)
+ workingVS = &apiNetworkingV1Alpha3.VirtualService{
+ Spec: networkingV1Alpha3.VirtualService{
+ Hosts: []string{cname1},
+ },
+ ObjectMeta: metaV1.ObjectMeta{
+ Name: "virtual-service-1",
+ Namespace: namespace1,
+ },
+ }
+ clusterID = "cluster-1"
+ rolloutControllerNotInitializedErr = fmt.Errorf(
+ LogFormat, "Event", common.VirtualServiceResourceType, workingVS.Name,
+ clusterID, "remote controller not initialized for cluster",
+ )
+ /* TODO:
+ rolloutListClientErr = "failed listing rollouts"
+ rolloutListErr = fmt.Errorf(
+ LogFormat, "Get", "Rollout",
+ "Error finding rollouts in namespace="+workingVS.Namespace, clusterID, rolloutListClientErr,
+ )
+ */
+ )
+ remoteRegistryWithRolloutControllerForExistingCluster.PutRemoteController(
+ clusterID, &RemoteController{
+ RolloutController: &admiral.RolloutController{
+ RolloutClient: testMocks.MockRolloutsGetter{},
+ },
+ },
+ )
+ cases := []struct {
+ name string
+ clusters []string
+ virtualService *apiNetworkingV1Alpha3.VirtualService
+ event common.Event
+ remoteRegistry *RemoteRegistry
+ fakeHandleEventForRollout *fakeHandleEventForRollout
+ expectedRolloutVS bool
+ expectHandleEventForRolloutToBeCalled bool
+ expectedErr error
+ }{
+ {
+ name: "Given virtualService passed is nil, " +
+ "When, handleVirtualServicesForRollout is invoked, " +
+ "Then, it should return 'VirtualService is nil' error",
+ fakeHandleEventForRollout: newFakeHandleEventForRolloutsByError(nil),
+ expectedErr: fmt.Errorf("VirtualService is nil"),
+ },
+ {
+ name: "Given remoteRegistry is nil, " +
+ "When, handleVirtualServicesForRollout is invoked, " +
+ "Then, it should return 'remoteRegistry is nil' error",
+ virtualService: &apiNetworkingV1Alpha3.VirtualService{},
+ fakeHandleEventForRollout: newFakeHandleEventForRolloutsByError(nil),
+ expectedErr: fmt.Errorf("remoteRegistry is nil"),
+ },
+ {
+ name: "Given remoteRegistry for cluster passed is nil, " +
+ "When, handleVirtualServicesForRollout is invoked, " +
+ "Then, it should return '" + rolloutControllerNotInitializedErr.Error() + "' error",
+ virtualService: workingVS,
+ remoteRegistry: newRemoteRegistry(ctx, nil),
+ fakeHandleEventForRollout: newFakeHandleEventForRolloutsByError(nil),
+ expectedErr: rolloutControllerNotInitializedErr,
+ },
+ {
+ name: "Given rollout a valid list of rollouts, " +
+ "And, handleEventForRollout returns nil, " +
+ "When, handleVirtualServicesForRollout is invoked, " +
+ "When, a rollout matches the virtual service passed, " +
+ "Then, it should return true, and nil, " +
+ "And, it should call handleEventForRollout function",
+ virtualService: workingVS,
+ remoteRegistry: remoteRegistryWithRolloutControllerForExistingCluster,
+ fakeHandleEventForRollout: newFakeHandleEventForRolloutsByError(map[string]map[string]error{
+ testMocks.RolloutNamespace: map[string]error{
+ "rollout-name": nil,
+ "rollout-name2": nil,
+ },
+ }),
+ expectHandleEventForRolloutToBeCalled: true,
+ expectedRolloutVS: true,
+ expectedErr: nil,
+ },
+ // TODO: cannot mock return from List yet. Need more code changes
+ /*
+ {
+ name: "Given rollout list returns an error, " +
+ "When, handleVirtualServicesForRollout is invoked, " +
+ "Then, it should return '" + rolloutListErr.Error() + "' error",
+ virtualService: workingVS,
+ remoteRegistry: remoteRegistryWithRolloutControllerWithListErr,
+ fakeHandleEventForRollout: newFakeHandleEventForRollout(nil),
+ expectedErr: rolloutListErr,
+ },
+ */
+ {
+ name: "Given there are multiple rollouts in the given namespace, " +
+ "And, handleEventForRollout returns an error for one of them, " +
+ "When, handleVirtualServicesForRollout is invoked, " +
+ "When, both the rollout match the virtual service passed, " +
+ "Then, it should return true, and an error, " +
+ "And, it should call handleEventForRollout function",
+ virtualService: workingVS,
+ remoteRegistry: remoteRegistryWithRolloutControllerForExistingCluster,
+ fakeHandleEventForRollout: newFakeHandleEventForRolloutsByError(map[string]map[string]error{
+ testMocks.RolloutNamespace: map[string]error{
+ "rollout-name": nil,
+ "rollout-name2": handleEventForRolloutErrForRollout2,
+ },
+ }),
+ expectHandleEventForRolloutToBeCalled: true,
+ expectedRolloutVS: true,
+ expectedErr: expectedHandleEventForRolloutErrForRollout2,
+ },
+ }
+
+ for _, c := range cases {
+ t.Run(c.name, func(t *testing.T) {
+ isRolloutVS, err := handleVirtualServiceEventForRollout(
+ ctx,
+ c.virtualService,
+ c.remoteRegistry,
+ clusterID,
+ c.fakeHandleEventForRollout.handleEventForRolloutFunc(),
+ )
+ if err != nil && c.expectedErr == nil {
+ t.Errorf("expected error to be nil but got %v", err)
+ }
+ if err != nil && c.expectedErr != nil {
+ if !(err.Error() == c.expectedErr.Error()) {
+ t.Errorf("error mismatch, expected '%v' but got '%v'", c.expectedErr, err)
+ }
+ }
+ if err == nil && c.expectedErr != nil {
+ t.Errorf("expected error %v but got nil", c.expectedErr)
+ }
+ if isRolloutVS != c.expectedRolloutVS {
+ t.Errorf("expected: %v, got: %v", c.expectedRolloutVS, isRolloutVS)
+ }
+ if c.expectHandleEventForRolloutToBeCalled && (c.fakeHandleEventForRollout.calledByRolloutName[rollout1] &&
+ c.fakeHandleEventForRollout.calledByRolloutName[rollout2]) {
+ t.Errorf("expected handleRollout to be called, but it was not")
+ }
+ })
+ }
+}
+
+func TestSyncVirtualServicesToAllDependentClusters(t *testing.T) {
+ var (
+ ctx = context.TODO()
+ cname1 = "cname1"
+ namespace1 = "namespace1"
+ syncNamespace = "sync-namespace"
+ dependentCluster1 = "dep-cluster1"
+ dependentCluster2 = "dep-cluster2"
+ sourceCluster = "cluster1"
+ workingVS = &apiNetworkingV1Alpha3.VirtualService{
+ Spec: networkingV1Alpha3.VirtualService{
+ Hosts: []string{cname1},
+ },
+ ObjectMeta: metaV1.ObjectMeta{
+ Name: "virtual-service-1",
+ Namespace: namespace1,
+ },
+ }
+
+ fakeIstioClientWithoutAnyVirtualServices = istioFake.NewSimpleClientset()
+ fakeIstioClientWithoutKnownVirtualServices = newFakeIstioClient(ctx, namespace1, workingVS)
+ nilVirtualServiceControllerForDependencyCluster1 = map[string]*RemoteController{
+ dependentCluster1: &RemoteController{},
+ }
+ virtualServiceControllerForDepCluster1AndNilForCluster2 = map[string]*RemoteController{
+ dependentCluster1: &RemoteController{
+ VirtualServiceController: &istio.VirtualServiceController{
+ IstioClient: istioFake.NewSimpleClientset(),
+ },
+ },
+ dependentCluster2: &RemoteController{},
+ }
+
+ virtualServiceControllerForKnownClustersWithoutAnyVirtualServices = map[string]*RemoteController{
+ dependentCluster1: &RemoteController{
+ VirtualServiceController: &istio.VirtualServiceController{
+ IstioClient: fakeIstioClientWithoutAnyVirtualServices,
+ },
+ },
+ }
+ virtualServiceControllerForKnownClustersWithKnownVirtualServices = map[string]*RemoteController{
+ dependentCluster1: &RemoteController{
+ VirtualServiceController: &istio.VirtualServiceController{
+ IstioClient: fakeIstioClientWithoutKnownVirtualServices,
+ },
+ },
+ }
+ virtualServiceControllerForSourceClustersWithoutAnyVirtualServices = map[string]*RemoteController{
+ sourceCluster: &RemoteController{
+ VirtualServiceController: &istio.VirtualServiceController{
+ IstioClient: fakeIstioClientWithoutAnyVirtualServices,
+ },
+ },
+ dependentCluster1: &RemoteController{
+ VirtualServiceController: &istio.VirtualServiceController{
+ IstioClient: fakeIstioClientWithoutKnownVirtualServices,
+ },
+ },
+ }
+ virtualServiceControllerForSourceClustersWithKnownVirtualServices = map[string]*RemoteController{
+ dependentCluster1: &RemoteController{
+ VirtualServiceController: &istio.VirtualServiceController{
+ IstioClient: fakeIstioClientWithoutKnownVirtualServices,
+ },
+ },
+ sourceCluster: &RemoteController{
+ VirtualServiceController: &istio.VirtualServiceController{
+ IstioClient: fakeIstioClientWithoutKnownVirtualServices,
+ },
+ },
+ }
+
+ cluster1 = []string{dependentCluster1}
+ clusters1And2 = []string{dependentCluster1, dependentCluster2}
+ clustersContainingSourceCluster = []string{dependentCluster1, sourceCluster}
+ emptyVSErr = fmt.Errorf(
+ LogFormat, "Event", common.VirtualServiceResourceType, "", sourceCluster,
+ "VirtualService is nil",
+ )
+ emptyRemoteRegistryErr = fmt.Errorf(
+ LogFormat, "Event", common.VirtualServiceResourceType, "", sourceCluster,
+ "remoteRegistry is nil",
+ )
+ nilRemoteControllerForDepCluster1Err = fmt.Errorf(
+ LogFormat, "Event", common.VirtualServiceResourceType, workingVS.Name, dependentCluster1,
+ "dependent controller not initialized for cluster",
+ )
+ nilRemoteControllerForDepCluster2Err = fmt.Errorf(
+ LogFormat, "Event", common.VirtualServiceResourceType, workingVS.Name, dependentCluster2,
+ "dependent controller not initialized for cluster",
+ )
+ virtualServiceControllerNotInitializedForCluster1Err = fmt.Errorf(
+ LogFormat, "Event", common.VirtualServiceResourceType, workingVS.Name, dependentCluster1,
+ "VirtualService controller not initialized for cluster",
+ )
+ virtualServiceControllerNotInitializedForCluster2Err = fmt.Errorf(
+ LogFormat, "Event", common.VirtualServiceResourceType, workingVS.Name, dependentCluster2,
+ "VirtualService controller not initialized for cluster",
+ )
+ )
+
+ cases := []struct {
+ name string
+ clusters []string
+ virtualService *apiNetworkingV1Alpha3.VirtualService
+ event common.Event
+ remoteRegistry *RemoteRegistry
+ sourceCluster string
+ syncNamespace string
+ assertFunc func(remoteRegistry *RemoteRegistry, clusters []string, t *testing.T)
+ doSyncVSToSourceCluster bool
+ expectedErr error
+ }{
+ {
+ name: "Given a nil VirtualService is passed , " +
+ "When, syncVirtualServicesToAllDependentClusters is invoked, " +
+ "Then, it should return '" + emptyVSErr.Error() + "' error",
+ sourceCluster: sourceCluster,
+ expectedErr: emptyVSErr,
+ },
+ {
+ name: "Given a nil remoteRegistry is passed , " +
+ "When, syncVirtualServicesToAllDependentClusters is invoked, " +
+ "Then, it should return '" + emptyRemoteRegistryErr.Error() + "' error",
+ sourceCluster: sourceCluster,
+ virtualService: workingVS,
+ expectedErr: emptyRemoteRegistryErr,
+ },
+ {
+ name: "Given remote controller for cluster is not initialized , " +
+ "When, syncVirtualServicesToAllDependentClusters is invoked, " +
+ "Then, it should return '" + nilRemoteControllerForDepCluster1Err.Error() + "' error",
+ virtualService: workingVS,
+ remoteRegistry: newRemoteRegistry(ctx, nil),
+ clusters: cluster1,
+ sourceCluster: sourceCluster,
+ expectedErr: nilRemoteControllerForDepCluster1Err,
+ },
+ {
+ name: "Given remote controller for one cluster is not initialized , " +
+ "And, there is another cluster, which has remote controller initialized, " +
+ "When, syncVirtualServicesToAllDependentClusters is invoked, " +
+ "Then, it should return '" + nilRemoteControllerForDepCluster1Err.Error() + "' error" +
+ "And, it creates VirtualService for cluster which has remote controller initialized",
+ virtualService: workingVS,
+ remoteRegistry: newRemoteRegistry(ctx, virtualServiceControllerForKnownClustersWithoutAnyVirtualServices),
+ clusters: clusters1And2,
+ sourceCluster: sourceCluster,
+ assertFunc: func(remoteRegistry *RemoteRegistry, clusters []string, t *testing.T) {
+ for _, cluster := range clusters {
+ // cluster with no nil pointer exception
+ if cluster == dependentCluster1 {
+ rc := remoteRegistry.GetRemoteController(cluster)
+ vs, err := rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(syncNamespace).Get(ctx, workingVS.Name, metaV1.GetOptions{})
+ if err != nil {
+ t.Errorf("expected nil, but got error: %v", err)
+ return
+ }
+ if vs == nil || vs.Name != workingVS.Name {
+ t.Errorf("expected VirtualService to be created, but it was not")
+ }
+ }
+ }
+ },
+ expectedErr: nilRemoteControllerForDepCluster2Err,
+ },
+ {
+ name: "Given VirtualServiceController for cluster is not initialized , " +
+ "When, syncVirtualServicesToAllDependentClusters is invoked, " +
+ "Then, it should return '" + virtualServiceControllerNotInitializedForCluster1Err.Error() + "' error",
+ virtualService: workingVS,
+ remoteRegistry: newRemoteRegistry(ctx, nilVirtualServiceControllerForDependencyCluster1),
+ clusters: cluster1,
+ sourceCluster: sourceCluster,
+ expectedErr: virtualServiceControllerNotInitializedForCluster1Err,
+ },
+ {
+ name: "Given VirtualServiceController for one cluster is not initialized , " +
+ "And, there is another cluster, which has VirtualServiceController initialized, " +
+ "When, syncVirtualServicesToAllDependentClusters is invoked, " +
+ "Then, it should return '" + virtualServiceControllerNotInitializedForCluster2Err.Error() + "' error" +
+ "And, it should create VirtualService for cluster which has the controller initialized",
+ virtualService: workingVS,
+ remoteRegistry: newRemoteRegistry(ctx, virtualServiceControllerForDepCluster1AndNilForCluster2),
+ clusters: clusters1And2,
+ sourceCluster: sourceCluster,
+ assertFunc: func(remoteRegistry *RemoteRegistry, clusters []string, t *testing.T) {
+ for _, cluster := range clusters {
+ // cluster with no nil pointer exception
+ if cluster == dependentCluster1 {
+ rc := remoteRegistry.GetRemoteController(cluster)
+ vs, err := rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(syncNamespace).Get(ctx, workingVS.Name, metaV1.GetOptions{})
+ if err != nil {
+ t.Errorf("expected nil, but got error: %v", err)
+ return
+ }
+ if vs == nil || vs.Name != workingVS.Name {
+ t.Errorf("expected VirtualService to be created, but it was not")
+ }
+ }
+ }
+ },
+ expectedErr: virtualServiceControllerNotInitializedForCluster2Err,
+ },
+ {
+ name: "Given a valid VirtualService is passed for CREATE event, " +
+ "And the VirtualService does not exist, " +
+ "When, syncVirtualServicesToAllDependentClusters is invoked, " +
+ "Then, the new VirtualService should be created" +
+ "And, it should not return an error",
+ event: common.Add,
+ virtualService: workingVS,
+ remoteRegistry: newRemoteRegistry(ctx, virtualServiceControllerForKnownClustersWithoutAnyVirtualServices),
+ clusters: cluster1,
+ sourceCluster: sourceCluster,
+ assertFunc: func(remoteRegistry *RemoteRegistry, clusters []string, t *testing.T) {
+ for _, cluster := range clusters {
+ rc := remoteRegistry.GetRemoteController(cluster)
+ vs, err := rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(syncNamespace).Get(ctx, workingVS.Name, metaV1.GetOptions{})
+ if err != nil {
+ t.Errorf("expected nil, but got error: %v", err)
+ return
+ }
+ if vs == nil || vs.Name != workingVS.Name {
+ t.Errorf("expected VirtualService to be created, but it was not")
+ }
+ }
+ },
+ expectedErr: nil,
+ },
+ {
+ name: "Given a valid VirtualService is passed UPDATE event, " +
+ "And the VirtualService already exists, " +
+ "When, syncVirtualServicesToAllDependentClusters is invoked, " +
+ "Then, the VirtualService should be updated" +
+ "And, it should not return an error",
+ event: common.Update,
+ virtualService: workingVS,
+ remoteRegistry: newRemoteRegistry(ctx, virtualServiceControllerForKnownClustersWithKnownVirtualServices),
+ clusters: cluster1,
+ sourceCluster: sourceCluster,
+ assertFunc: func(remoteRegistry *RemoteRegistry, clusters []string, t *testing.T) {
+ for _, cluster := range clusters {
+ rc := remoteRegistry.GetRemoteController(cluster)
+ vs, err := rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(syncNamespace).Get(ctx, workingVS.Name, metaV1.GetOptions{})
+ if err != nil {
+ t.Errorf("expected nil, but got error: %v", err)
+ return
+ }
+ if vs == nil || vs.Name != workingVS.Name {
+ t.Errorf("expected VirtualService to be created, but it was not")
+ }
+ }
+ },
+ expectedErr: nil,
+ },
+
+ {
+ name: "Given a valid VirtualService is passed for DELETE event, " +
+ "And the VirtualService exists, " +
+ "When, syncVirtualServicesToAllDependentClusters is invoked, " +
+ "Then, the VirtualService should be deleted, " +
+ "And, it should not return an error",
+ event: common.Delete,
+ virtualService: workingVS,
+ remoteRegistry: newRemoteRegistry(ctx, virtualServiceControllerForKnownClustersWithKnownVirtualServices),
+ clusters: cluster1,
+ sourceCluster: sourceCluster,
+ assertFunc: func(remoteRegistry *RemoteRegistry, clusters []string, t *testing.T) {
+ for _, cluster := range clusters {
+ rc := remoteRegistry.GetRemoteController(cluster)
+ _, err := rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(syncNamespace).Get(ctx, workingVS.Name, metaV1.GetOptions{})
+ if !k8sErrors.IsNotFound(err) {
+ t.Errorf("expected error to be Not Found, but got: %v", err)
+ }
+ }
+ },
+ expectedErr: nil,
+ },
+ {
+ name: "Given a valid VirtualService is passed for DELETE event, " +
+ "And the VirtualService does not exist, " +
+ "When, syncVirtualServicesToAllDependentClusters is invoked, " +
+ "Then, the VirtualService should be deleted",
+ event: common.Delete,
+ virtualService: workingVS,
+ remoteRegistry: newRemoteRegistry(ctx, virtualServiceControllerForKnownClustersWithoutAnyVirtualServices),
+ clusters: cluster1,
+ sourceCluster: sourceCluster,
+ assertFunc: func(remoteRegistry *RemoteRegistry, clusters []string, t *testing.T) {
+ for _, cluster := range clusters {
+ rc := remoteRegistry.GetRemoteController(cluster)
+ _, err := rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(syncNamespace).Get(ctx, workingVS.Name, metaV1.GetOptions{})
+ if !k8sErrors.IsNotFound(err) {
+ t.Errorf("expected error to be Not Found, but got: %v", err)
+ }
+ }
+ },
+ expectedErr: nil,
+ },
+ {
+ name: "Given a valid VirtualService is passed for CREATE event, " +
+ "And the VirtualService does not exist, " +
+ "When, an asset has a client in the source cluster, " +
+ "Then, the new VirtualService should be created in source and dependent clusters" +
+ "And, it should not return an error",
+ event: common.Add,
+ virtualService: workingVS,
+ remoteRegistry: newRemoteRegistry(ctx, virtualServiceControllerForSourceClustersWithoutAnyVirtualServices),
+ clusters: clustersContainingSourceCluster,
+ sourceCluster: sourceCluster,
+ assertFunc: func(remoteRegistry *RemoteRegistry, clusters []string, t *testing.T) {
+ for _, cluster := range clusters {
+ rc := remoteRegistry.GetRemoteController(cluster)
+ vs, err := rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(syncNamespace).Get(ctx, workingVS.Name, metaV1.GetOptions{})
+ if err != nil {
+ t.Errorf("expected nil, but got error: %v", err)
+ return
+ }
+ if vs == nil || vs.Name != workingVS.Name {
+ t.Errorf("expected VirtualService to be created, but it was not")
+ }
+ }
+ },
+ doSyncVSToSourceCluster: true,
+ expectedErr: nil,
+ },
+ {
+ name: "Given a valid VirtualService is passed for DELETE event, " +
+ "And the VirtualService exist, " +
+ "When, an asset has a client in the source cluster, " +
+ "Then, the new VirtualService should be deleted in source and dependent clusters" +
+ "And, it should not return an error",
+ event: common.Add,
+ virtualService: workingVS,
+ remoteRegistry: newRemoteRegistry(ctx, virtualServiceControllerForSourceClustersWithKnownVirtualServices),
+ clusters: clustersContainingSourceCluster,
+ sourceCluster: sourceCluster,
+ assertFunc: func(remoteRegistry *RemoteRegistry, clusters []string, t *testing.T) {
+ for _, cluster := range clusters {
+ rc := remoteRegistry.GetRemoteController(cluster)
+ vs, err := rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(syncNamespace).Get(ctx, workingVS.Name, metaV1.GetOptions{})
+ if err != nil {
+ t.Errorf("expected nil, but got error: %v", err)
+ return
+ }
+ if vs == nil || vs.Name != workingVS.Name {
+ t.Errorf("expected VirtualService to be created, but it was not")
+ }
+ }
+ },
+ doSyncVSToSourceCluster: true,
+ expectedErr: nil,
+ },
+ }
+
+ for _, c := range cases {
+ t.Run(c.name, func(t *testing.T) {
+ common.ResetSync()
+ admiralParams := common.AdmiralParams{
+ EnableSyncIstioResourcesToSourceClusters: c.doSyncVSToSourceCluster,
+ }
+ common.InitializeConfig(admiralParams)
+ err := syncVirtualServicesToAllDependentClusters(
+ ctx,
+ c.clusters,
+ c.virtualService,
+ c.event,
+ c.remoteRegistry,
+ c.sourceCluster,
+ syncNamespace,
+ )
+ if err != nil && c.expectedErr == nil {
+ t.Errorf("expected error to be nil but got %v", err)
+ }
+ if err != nil && c.expectedErr != nil {
+ if !(err.Error() == c.expectedErr.Error()) {
+ t.Errorf("error mismatch, expected %v but got %v", c.expectedErr, err)
+ }
+ }
+ if err == nil && c.expectedErr != nil {
+ t.Errorf("expected error %v but got nil", c.expectedErr)
+ }
+ if c.assertFunc != nil {
+ c.assertFunc(c.remoteRegistry, c.clusters, t)
+ }
+ })
+ }
+}
+
+func TestSyncVirtualServicesToAllRemoteClusters(t *testing.T) {
+ var (
+ ctx = context.TODO()
+ cname1 = "cname1"
+ namespace1 = "namespace1"
+ syncNamespace = "sync-namespace"
+ dependentCluster1 = "dep-cluster1"
+ dependentCluster2 = "dep-cluster2"
+ sourceCluster = "cluster1"
+ workingVS = &apiNetworkingV1Alpha3.VirtualService{
+ Spec: networkingV1Alpha3.VirtualService{
+ Hosts: []string{cname1},
+ },
+ ObjectMeta: metaV1.ObjectMeta{
+ Name: "virtual-service-1",
+ Namespace: namespace1,
+ },
+ }
+ fakeIstioClientWithoutAnyVirtualServices = istioFake.NewSimpleClientset()
+ fakeIstioClientWithoutKnownVirtualServices = newFakeIstioClient(ctx, namespace1, workingVS)
+ nilVirtualServiceControllerForKnownClusters = map[string]*RemoteController{
+ dependentCluster1: &RemoteController{},
+ }
+ virtualServiceControllerForDepCluster1AndNilForCluster2 = map[string]*RemoteController{
+ dependentCluster1: &RemoteController{
+ VirtualServiceController: &istio.VirtualServiceController{
+ IstioClient: istioFake.NewSimpleClientset(),
+ },
+ },
+ dependentCluster2: &RemoteController{},
+ }
+ virtualServiceControllerForKnownClustersWithoutAnyVirtualServices = map[string]*RemoteController{
+ dependentCluster1: &RemoteController{
+ VirtualServiceController: &istio.VirtualServiceController{
+ IstioClient: fakeIstioClientWithoutAnyVirtualServices,
+ },
+ },
+ }
+ virtualServiceControllerForKnownClustersWithKnownVirtualServices = map[string]*RemoteController{
+ dependentCluster1: &RemoteController{
+ VirtualServiceController: &istio.VirtualServiceController{
+ IstioClient: fakeIstioClientWithoutKnownVirtualServices,
+ },
+ },
+ }
+ virtualServiceControllerForSourceClustersWithKnownVirtualServices = map[string]*RemoteController{
+ dependentCluster1: &RemoteController{
+ VirtualServiceController: &istio.VirtualServiceController{
+ IstioClient: fakeIstioClientWithoutKnownVirtualServices,
+ },
+ },
+ sourceCluster: &RemoteController{
+ VirtualServiceController: &istio.VirtualServiceController{
+ IstioClient: fakeIstioClientWithoutKnownVirtualServices,
+ },
+ },
+ }
+ virtualServiceControllerForSourceClustersWithoutAnyVirtualServices = map[string]*RemoteController{
+ dependentCluster1: &RemoteController{
+ VirtualServiceController: &istio.VirtualServiceController{
+ IstioClient: fakeIstioClientWithoutAnyVirtualServices,
+ },
+ },
+ sourceCluster: &RemoteController{
+ VirtualServiceController: &istio.VirtualServiceController{
+ IstioClient: fakeIstioClientWithoutAnyVirtualServices,
+ },
+ },
+ }
+ cluster1 = []string{dependentCluster1}
+ cluster1And2 = []string{dependentCluster1, dependentCluster2}
+ clustersContainingSourceCluster = []string{dependentCluster1, sourceCluster}
+ emptyVSErr = fmt.Errorf(
+ LogFormat, "Event", common.VirtualServiceResourceType, "", sourceCluster,
+ "VirtualService is nil",
+ )
+ emptyRemoteRegistryErr = fmt.Errorf(
+ LogFormat, "Event", common.VirtualServiceResourceType, "", sourceCluster,
+ "remoteRegistry is nil",
+ )
+ nilRemoteControllerForDepCluster1Err = fmt.Errorf(
+ LogFormat, "Event", common.VirtualServiceResourceType, workingVS.Name, dependentCluster1,
+ "remote controller not initialized for cluster",
+ )
+ nilRemoteControllerForDepCluster2Err = fmt.Errorf(
+ LogFormat, "Event", common.VirtualServiceResourceType, workingVS.Name, dependentCluster2,
+ "remote controller not initialized for cluster",
+ )
+ virtualServiceControllerNotInitializedForCluster1Err = fmt.Errorf(
+ LogFormat, "Event", common.VirtualServiceResourceType, workingVS.Name, dependentCluster1,
+ "VirtualService controller not initialized for cluster",
+ )
+ virtualServiceControllerNotInitializedForCluster2Err = fmt.Errorf(
+ LogFormat, "Event", common.VirtualServiceResourceType, workingVS.Name, dependentCluster2,
+ "VirtualService controller not initialized for cluster",
+ )
+ )
+
+ cases := []struct {
+ name string
+ clusters []string
+ virtualService *apiNetworkingV1Alpha3.VirtualService
+ event common.Event
+ remoteRegistry *RemoteRegistry
+ sourceCluster string
+ syncNamespace string
+ assertFunc func(remoteRegistry *RemoteRegistry, clusters []string, t *testing.T)
+ doSyncVSToSourceCluster bool
+ expectedErr error
+ }{
+ {
+ name: "Given a nil VirtualService is passed , " +
+ "When, syncVirtualServicesToAllRemoteClusters is invoked, " +
+ "Then, it should return '" + emptyVSErr.Error() + "' error",
+ sourceCluster: sourceCluster,
+ expectedErr: emptyVSErr,
+ },
+ {
+ name: "Given a nil remoteRegistry is passed , " +
+ "When, syncVirtualServicesToAllRemoteClusters is invoked, " +
+ "Then, it should return '" + emptyRemoteRegistryErr.Error() + "' error",
+ sourceCluster: sourceCluster,
+ virtualService: workingVS,
+ expectedErr: emptyRemoteRegistryErr,
+ },
+ {
+ name: "Given remote controller for cluster is not initialized , " +
+ "When, syncVirtualServicesToAllRemoteClusters is invoked, " +
+ "Then, it should return '" + nilRemoteControllerForDepCluster1Err.Error() + "' error",
+ virtualService: workingVS,
+ remoteRegistry: newRemoteRegistry(ctx, nil),
+ clusters: cluster1,
+ sourceCluster: sourceCluster,
+ expectedErr: nilRemoteControllerForDepCluster1Err,
+ },
+ {
+ name: "Given remote controller for one cluster is not initialized , " +
+ "And, it is initialized for another cluster, " +
+ "When, syncVirtualServicesToAllRemoteClusters is invoked, " +
+ "Then, it should return '" + nilRemoteControllerForDepCluster1Err.Error() + "' error" +
+ "And, it creates VirtualService for cluster which has remote controller initialized",
+ virtualService: workingVS,
+ remoteRegistry: newRemoteRegistry(ctx, virtualServiceControllerForKnownClustersWithoutAnyVirtualServices),
+ clusters: cluster1And2,
+ sourceCluster: sourceCluster,
+ assertFunc: func(remoteRegistry *RemoteRegistry, clusters []string, t *testing.T) {
+ for _, cluster := range clusters {
+ // cluster with no nil pointer exception
+ if cluster == dependentCluster1 {
+ rc := remoteRegistry.GetRemoteController(cluster)
+ vs, err := rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(syncNamespace).Get(ctx, workingVS.Name, metaV1.GetOptions{})
+ if err != nil {
+ t.Errorf("expected nil, but got error: %v", err)
+ return
+ }
+ if vs == nil || vs.Name != workingVS.Name {
+ t.Errorf("expected VirtualService to be created, but it was not")
+ }
+ }
+ }
+ },
+ expectedErr: nilRemoteControllerForDepCluster2Err,
+ },
+ {
+ name: "Given VirtualServiceController for cluster is not initialized , " +
+ "When, syncVirtualServicesToAllRemoteClusters is invoked, " +
+ "Then, it should return '" + virtualServiceControllerNotInitializedForCluster1Err.Error() + "' error",
+ virtualService: workingVS,
+ remoteRegistry: newRemoteRegistry(ctx, nilVirtualServiceControllerForKnownClusters),
+ clusters: cluster1,
+ sourceCluster: sourceCluster,
+ expectedErr: virtualServiceControllerNotInitializedForCluster1Err,
+ },
+ {
+ name: "Given VirtualServiceController for one cluster is not initialized , " +
+ "And VirtualServiceController is initialized for another cluster, " +
+ "When, syncVirtualServicesToAllRemoteClusters is invoked, " +
+ "Then, it should return '" + virtualServiceControllerNotInitializedForCluster2Err.Error() + "' error" +
+ "And, it should create VirtualService for cluster which has it initialized",
+ virtualService: workingVS,
+ remoteRegistry: newRemoteRegistry(ctx, virtualServiceControllerForDepCluster1AndNilForCluster2),
+ clusters: cluster1And2,
+ sourceCluster: sourceCluster,
+ expectedErr: virtualServiceControllerNotInitializedForCluster2Err,
+ },
+ {
+ name: "Given a valid VirtualService is passed for CREATE event, " +
+ "And the VirtualService does not exist, " +
+ "When, syncVirtualServicesToAllRemoteClusters is invoked, " +
+ "Then, the new VirtualService should be created" +
+ "And, it should not return an error",
+ event: common.Add,
+ virtualService: workingVS,
+ remoteRegistry: newRemoteRegistry(ctx, virtualServiceControllerForKnownClustersWithoutAnyVirtualServices),
+ clusters: cluster1,
+ sourceCluster: sourceCluster,
+ assertFunc: func(remoteRegistry *RemoteRegistry, clusters []string, t *testing.T) {
+ for _, cluster := range clusters {
+ rc := remoteRegistry.GetRemoteController(cluster)
+ vs, err := rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(syncNamespace).Get(ctx, workingVS.Name, metaV1.GetOptions{})
+ if err != nil {
+ t.Errorf("expected nil, but got error: %v", err)
+ return
+ }
+ if vs == nil || vs.Name != workingVS.Name {
+ t.Errorf("expected VirtualService to be created, but it was not")
+ }
+ }
+ },
+ expectedErr: nil,
+ },
+ {
+ name: "Given a valid VirtualService is passed UPDATE event, " +
+ "And the VirtualService already exists, " +
+ "When, syncVirtualServicesToAllRemoteClusters is invoked, " +
+ "Then, the VirtualService should be updated" +
+ "And, it should not return an error",
+ event: common.Update,
+ virtualService: workingVS,
+ remoteRegistry: newRemoteRegistry(ctx, virtualServiceControllerForKnownClustersWithKnownVirtualServices),
+ clusters: cluster1,
+ sourceCluster: sourceCluster,
+ assertFunc: func(remoteRegistry *RemoteRegistry, clusters []string, t *testing.T) {
+ for _, cluster := range clusters {
+ rc := remoteRegistry.GetRemoteController(cluster)
+ vs, err := rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(syncNamespace).Get(ctx, workingVS.Name, metaV1.GetOptions{})
+ if err != nil {
+ t.Errorf("expected nil, but got error: %v", err)
+ return
+ }
+ if vs == nil || vs.Name != workingVS.Name {
+ t.Errorf("expected VirtualService to be created, but it was not")
+ }
+ }
+ },
+ expectedErr: nil,
+ },
+
+ {
+ name: "Given a valid VirtualService is passed for DELETE event, " +
+ "And the VirtualService exists, " +
+ "When, syncVirtualServicesToAllRemoteClusters is invoked, " +
+ "Then, the VirtualService should be deleted, " +
+ "And, it should not return an error",
+ event: common.Delete,
+ virtualService: workingVS,
+ remoteRegistry: newRemoteRegistry(ctx, virtualServiceControllerForKnownClustersWithKnownVirtualServices),
+ clusters: cluster1,
+ sourceCluster: sourceCluster,
+ assertFunc: func(remoteRegistry *RemoteRegistry, clusters []string, t *testing.T) {
+ for _, cluster := range clusters {
+ rc := remoteRegistry.GetRemoteController(cluster)
+ _, err := rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(syncNamespace).Get(ctx, workingVS.Name, metaV1.GetOptions{})
+ if !k8sErrors.IsNotFound(err) {
+ t.Errorf("expected error to be Not Found, but got: %v", err)
+ }
+ }
+ },
+ expectedErr: nil,
+ },
+ {
+ name: "Given a valid VirtualService is passed for DELETE event, " +
+ "And the VirtualService does not exist, " +
+ "When, syncVirtualServicesToAllRemoteClusters is invoked, " +
+ "Then, the VirtualService should be deleted",
+ event: common.Delete,
+ virtualService: workingVS,
+ remoteRegistry: newRemoteRegistry(ctx, virtualServiceControllerForKnownClustersWithoutAnyVirtualServices),
+ clusters: cluster1,
+ sourceCluster: sourceCluster,
+ assertFunc: func(remoteRegistry *RemoteRegistry, clusters []string, t *testing.T) {
+ for _, cluster := range clusters {
+ rc := remoteRegistry.GetRemoteController(cluster)
+ _, err := rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(syncNamespace).Get(ctx, workingVS.Name, metaV1.GetOptions{})
+ if !k8sErrors.IsNotFound(err) {
+ t.Errorf("expected error to be Not Found, but got: %v", err)
+ }
+ }
+ },
+ expectedErr: nil,
+ },
+ {
+ name: "Given a valid VirtualService is passed for CREATE event, " +
+ "And the VirtualService does not exist, " +
+ "When, an asset has a client in the source cluster, " +
+ "Then, the new VirtualService should be created in source and dependent clusters" +
+ "And, it should not return an error",
+ event: common.Add,
+ virtualService: workingVS,
+ remoteRegistry: newRemoteRegistry(ctx, virtualServiceControllerForSourceClustersWithoutAnyVirtualServices),
+ clusters: clustersContainingSourceCluster,
+ sourceCluster: sourceCluster,
+ assertFunc: func(remoteRegistry *RemoteRegistry, clusters []string, t *testing.T) {
+ for _, cluster := range clusters {
+ rc := remoteRegistry.GetRemoteController(cluster)
+ vs, err := rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(syncNamespace).Get(ctx, workingVS.Name, metaV1.GetOptions{})
+ if err != nil {
+ t.Errorf("expected nil, but got error: %v", err)
+ return
+ }
+ if vs == nil || vs.Name != workingVS.Name {
+ t.Errorf("expected VirtualService to be created, but it was not")
+ }
+ }
+ },
+ doSyncVSToSourceCluster: true,
+ expectedErr: nil,
+ },
+ {
+ name: "Given a valid VirtualService is passed for DELETE event, " +
+ "And the VirtualService does not exist, " +
+ "When, an asset has a client in the source cluster, " +
+ "Then, the VirtualService should be deleted from source and dependent clusters",
+ event: common.Delete,
+ virtualService: workingVS,
+ remoteRegistry: newRemoteRegistry(ctx, virtualServiceControllerForSourceClustersWithKnownVirtualServices),
+ clusters: cluster1,
+ sourceCluster: sourceCluster,
+ assertFunc: func(remoteRegistry *RemoteRegistry, clusters []string, t *testing.T) {
+ for _, cluster := range clusters {
+ rc := remoteRegistry.GetRemoteController(cluster)
+ _, err := rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(syncNamespace).Get(ctx, workingVS.Name, metaV1.GetOptions{})
+ if !k8sErrors.IsNotFound(err) {
+ t.Errorf("expected error to be Not Found, but got: %v", err)
+ }
+ }
+ },
+ doSyncVSToSourceCluster: true,
+ expectedErr: nil,
+ },
+ }
+
+ for _, c := range cases {
+ t.Run(c.name, func(t *testing.T) {
+ common.ResetSync()
+ admiralParams := common.AdmiralParams{
+ EnableSyncIstioResourcesToSourceClusters: c.doSyncVSToSourceCluster,
+ }
+ common.InitializeConfig(admiralParams)
+ err := syncVirtualServicesToAllRemoteClusters(
+ ctx,
+ c.clusters,
+ c.virtualService,
+ c.event,
+ c.remoteRegistry,
+ c.sourceCluster,
+ syncNamespace,
+ )
+ if err != nil && c.expectedErr == nil {
+ t.Errorf("expected error to be nil but got %v", err)
+ }
+ if err != nil && c.expectedErr != nil {
+ if !(err.Error() == c.expectedErr.Error()) {
+ t.Errorf("error mismatch, expected %v but got %v", c.expectedErr, err)
+ }
+ }
+ if err == nil && c.expectedErr != nil {
+ t.Errorf("expected error %v but got %v", c.expectedErr, err)
+ }
+ if c.assertFunc != nil {
+ c.assertFunc(c.remoteRegistry, c.clusters, t)
+ }
+ })
+ }
+}
+
+func TestVirtualSvcHandlerCUDScenarios(t *testing.T) {
+ ctx := context.Background()
+
+ admiralParams := common.AdmiralParams{
+ LabelSet: &common.LabelSet{},
+ SyncNamespace: "test-sync-ns",
+ ArgoRolloutsEnabled: true,
+ }
+ common.InitializeConfig(admiralParams)
+
+ vs := &apiNetworkingV1Alpha3.VirtualService{
+ ObjectMeta: metaV1.ObjectMeta{Name: "my-vs", Namespace: "test-ns"},
+ Spec: networkingV1Alpha3.VirtualService{
+ Http: []*networkingV1Alpha3.HTTPRoute{{Name: "random", Route: []*networkingV1Alpha3.HTTPRouteDestination{
+ {Destination: &networkingV1Alpha3.Destination{Host: "stable-host"}, Weight: 100},
+ {Destination: &networkingV1Alpha3.Destination{Host: "canary-host"}, Weight: 0},
+ }}},
+ },
+ }
+
+ var (
+ config = rest.Config{
+ Host: "localhost",
+ }
+ stop = make(chan struct{})
+ )
+
+ r, err := admiral.NewRolloutsController(stop, &testMocks.MockRolloutHandler{}, &config, time.Second*time.Duration(300), loader.GetFakeClientLoader())
+ if err != nil {
+ t.Fatalf("failed ot initialize rollout controller, err: %v", err)
+ }
+
+ r.RolloutClient = testMocks.MockRolloutsGetter{}
+
+ rr := NewRemoteRegistry(ctx, admiralParams)
+ rr.PutRemoteController("test-cluster", &RemoteController{
+ RolloutController: r,
+ })
+
+ vsHandler := &VirtualServiceHandler{
+ clusterID: "test-cluster",
+ remoteRegistry: rr,
+ updateResource: handleVirtualServiceEventForRollout,
+ }
+
+ admiralParams = common.AdmiralParams{
+ LabelSet: &common.LabelSet{},
+ SyncNamespace: "test-sync-ns",
+ ArgoRolloutsEnabled: false,
+ }
+ common.InitializeConfig(admiralParams)
+ vsHandler2 := &VirtualServiceHandler{
+ clusterID: "test-cluster",
+ remoteRegistry: NewRemoteRegistry(ctx, admiralParams),
+ updateResource: handleVirtualServiceEventForRollout,
+ }
+
+ cases := []struct {
+ name string
+ admiralReadState bool
+ ns string
+ handler *VirtualServiceHandler
+ argoRolloutsEnabled bool
+ }{
+ {
+ name: "virtual service used by Argo rollouts case",
+ admiralReadState: false,
+ ns: "test-ns",
+ handler: vsHandler,
+ argoRolloutsEnabled: true,
+ },
+ {
+ name: "Admiral in read-only state",
+ admiralReadState: true,
+ ns: "test-ns",
+ handler: vsHandler2,
+ argoRolloutsEnabled: false,
+ },
+ {
+ name: "Encountered istio resource",
+ admiralReadState: false,
+ ns: "istio-system",
+ handler: vsHandler2,
+ argoRolloutsEnabled: false,
+ },
+ }
+
+ for _, c := range cases {
+ t.Run(c.name, func(t *testing.T) {
+ common.ResetSync()
+ admiralParams = common.AdmiralParams{
+ LabelSet: &common.LabelSet{},
+ SyncNamespace: "test-sync-ns",
+ ArgoRolloutsEnabled: c.argoRolloutsEnabled,
+ }
+ common.InitializeConfig(admiralParams)
+ commonUtil.CurrentAdmiralState.ReadOnly = c.admiralReadState
+ vs.ObjectMeta.Namespace = c.ns
+ err := c.handler.Added(ctx, vs)
+ assert.NoError(t, err)
+
+ err = c.handler.Updated(ctx, vs)
+ assert.NoError(t, err)
+
+ err = c.handler.Deleted(ctx, vs)
+ assert.NoError(t, err)
+
+ })
+ }
+}
+
+func TestDeleteVirtualService(t *testing.T) {
+ ctx := context.Background()
+ namespace := "testns"
+
+ fooVS := &apiNetworkingV1Alpha3.VirtualService{
+ ObjectMeta: metaV1.ObjectMeta{
+ Name: "stage.test00.foo-vs",
+ },
+ Spec: networkingV1Alpha3.VirtualService{
+ Hosts: []string{"stage.test00.foo", "stage.test00.bar"},
+ },
+ }
+
+ validIstioClient := istioFake.NewSimpleClientset()
+ validIstioClient.NetworkingV1alpha3().VirtualServices(namespace).Create(ctx, fooVS, metaV1.CreateOptions{})
+
+ testcases := []struct {
+ name string
+ virtualService *apiNetworkingV1Alpha3.VirtualService
+ rc *RemoteController
+ expectedError error
+ expectedDeletedVSName string
+ }{
+ {
+ name: "Given virtualservice to delete, when nil VS is passed, the func should return an error",
+ virtualService: nil,
+ expectedError: fmt.Errorf("the VirtualService passed was nil"),
+ },
+ {
+ name: "Given virtualservice to delete, when VS passed does not exists, the func should return an error",
+ virtualService: &apiNetworkingV1Alpha3.VirtualService{ObjectMeta: metaV1.ObjectMeta{Name: "vs-does-not-exists"}},
+ expectedError: fmt.Errorf("either VirtualService was already deleted, or it never existed"),
+ rc: &RemoteController{
+ VirtualServiceController: &istio.VirtualServiceController{
+ IstioClient: validIstioClient,
+ },
+ },
+ },
+ {
+ name: "Given virtualservice to delete, when VS exists, the func should delete the VS and not return any error",
+ virtualService: fooVS,
+ expectedError: nil,
+ rc: &RemoteController{
+ VirtualServiceController: &istio.VirtualServiceController{
+ IstioClient: validIstioClient,
+ },
+ },
+ expectedDeletedVSName: "stage.test00.foo-vs",
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+
+ err := deleteVirtualService(ctx, tc.virtualService, namespace, tc.rc)
+
+ if err != nil && tc.expectedError != nil {
+ if !strings.Contains(err.Error(), tc.expectedError.Error()) {
+ t.Errorf("expected %s, got %s", tc.expectedError.Error(), err.Error())
+ }
+ } else if err != tc.expectedError {
+ t.Errorf("expected %v, got %v", tc.expectedError, err)
+ }
+
+ if err == nil && tc.expectedDeletedVSName != "" {
+ _, err := tc.rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(namespace).Get(context.Background(), tc.expectedDeletedVSName, metaV1.GetOptions{})
+ if err != nil && !k8sErrors.IsNotFound(err) {
+ t.Errorf("test failed as VS should have been deleted. error: %v", err)
+ }
+ }
+
+ })
+ }
+
+}
+
+type fakeSyncResource struct {
+ syncResourceFunc func() SyncVirtualServiceResource
+ called bool
+}
+
+func newFakeSyncResource(err error) *fakeSyncResource {
+ f := &fakeSyncResource{}
+ f.syncResourceFunc = func() SyncVirtualServiceResource {
+ return func(
+ ctx context.Context,
+ dependentClusters []string,
+ obj *apiNetworkingV1Alpha3.VirtualService,
+ event common.Event,
+ remoteRegistry *RemoteRegistry,
+ clusterId string,
+ syncNamespace string) error {
+ f.called = true
+ return err
+ }
+ }
+ return f
+}
+
+type fakeUpdateResource struct {
+ updateResourceFunc func() UpdateResourcesForVirtualService
+ called bool
+}
+
+func newFakeUpdateResource(isCanaryVS bool, err error) *fakeUpdateResource {
+ f := &fakeUpdateResource{}
+ f.updateResourceFunc = func() UpdateResourcesForVirtualService {
+ return func(
+ ctx context.Context,
+ virtualService *apiNetworkingV1Alpha3.VirtualService,
+ remoteRegistry *RemoteRegistry,
+ clusterID string,
+ handlerFunc HandleEventForRolloutFunc) (bool, error) {
+ f.called = true
+ return isCanaryVS, err
+ }
+ }
+ return f
+}
+
+func newRemoteRegistryWithDependents(ctx context.Context, cname, clusterID string) *RemoteRegistry {
+ remoteRegistry := NewRemoteRegistry(ctx, common.AdmiralParams{})
+ remoteRegistry.AdmiralCache.CnameDependentClusterCache.Put(cname, clusterID, clusterID)
+ return remoteRegistry
+}
+
+func newRemoteRegistry(ctx context.Context, clusters map[string]*RemoteController) *RemoteRegistry {
+ remoteRegistry := NewRemoteRegistry(ctx, common.AdmiralParams{})
+ for cluster, controller := range clusters {
+ remoteRegistry.PutRemoteController(cluster, controller)
+ }
+ return remoteRegistry
+}
+
+func newFakeIstioClient(ctx context.Context, namespace string, vs *apiNetworkingV1Alpha3.VirtualService) *istioFake.Clientset {
+ fakeIstioClientWithoutKnownVirtualServices := istioFake.NewSimpleClientset()
+ fakeIstioClientWithoutKnownVirtualServices.
+ NetworkingV1alpha3().
+ VirtualServices(namespace).
+ Create(ctx, vs, metaV1.CreateOptions{})
+ return fakeIstioClientWithoutKnownVirtualServices
+}
+
+func TestRetryUpdatingVS(t *testing.T) {
+
+ ctxLogger := log.WithFields(log.Fields{
+ "type": "retryUpdatingVS",
+ })
+ ctx := context.TODO()
+
+ vsDoesNotExists := &apiNetworkingV1Alpha3.VirtualService{
+ ObjectMeta: metaV1.ObjectMeta{
+ Name: "vs-does-not-exists",
+ Namespace: common.GetSyncNamespace(),
+ },
+ }
+ vs := &apiNetworkingV1Alpha3.VirtualService{
+ ObjectMeta: metaV1.ObjectMeta{
+ Name: "vsToBeUpdated",
+ Namespace: common.GetSyncNamespace(),
+ Labels: map[string]string{"updated": "false"},
+ Annotations: map[string]string{"updated": "false"},
+ },
+ Spec: networkingV1Alpha3.VirtualService{
+ Hosts: []string{"old.host"},
+ },
+ }
+ istioClient := istioFake.NewSimpleClientset()
+ istioClient.
+ NetworkingV1alpha3().
+ VirtualServices(common.GetSyncNamespace()).
+ Create(ctx, vs, metaV1.CreateOptions{})
+
+ rc := &RemoteController{
+ VirtualServiceController: &istio.VirtualServiceController{
+ IstioClient: istioClient,
+ },
+ }
+
+ vsThatShouldBeUpdated := &apiNetworkingV1Alpha3.VirtualService{
+ ObjectMeta: metaV1.ObjectMeta{
+ Name: "vsToBeUpdated",
+ Namespace: common.GetSyncNamespace(),
+ Labels: map[string]string{"updated": "true"},
+ Annotations: map[string]string{"updated": "true"},
+ },
+ Spec: networkingV1Alpha3.VirtualService{
+ Hosts: []string{"new.host"},
+ },
+ }
+
+ testCases := []struct {
+ name string
+ newVS *apiNetworkingV1Alpha3.VirtualService
+ existingVS *apiNetworkingV1Alpha3.VirtualService
+ err error
+ expectedError error
+ expectedVS *apiNetworkingV1Alpha3.VirtualService
+ }{
+ {
+ name: "Given valid params " +
+ "When the error passed is nil" +
+ "Then the func should not update the vs and return no errors",
+ newVS: vs,
+ existingVS: vs,
+ err: nil,
+ expectedVS: vs,
+ expectedError: nil,
+ },
+ {
+ name: "Given valid params " +
+ "When the error is of not type IsConflict" +
+ "Then the func should not update the vs and return no errors",
+ newVS: vs,
+ existingVS: vs,
+ err: fmt.Errorf("some other error"),
+ expectedVS: vs,
+ expectedError: fmt.Errorf("some other error"),
+ },
+ {
+ name: "Given valid params " +
+ "When the passed VS does not exists" +
+ "Then the func should not update the vs and return no errors",
+ newVS: vs,
+ existingVS: vsDoesNotExists,
+ err: k8sErrors.NewConflict(schema.GroupResource{}, "", fmt.Errorf("object already modified")),
+ expectedVS: vs,
+ expectedError: k8sErrors.NewConflict(schema.GroupResource{}, "", fmt.Errorf("object already modified")),
+ },
+ {
+ name: "Given valid params " +
+ "When the passed VS exists" +
+ "Then the func should update the vs and return no errors",
+ newVS: vsThatShouldBeUpdated,
+ existingVS: vsThatShouldBeUpdated,
+ err: k8sErrors.NewConflict(schema.GroupResource{}, "", fmt.Errorf("object already modified")),
+ expectedVS: vsThatShouldBeUpdated,
+ expectedError: nil,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+
+ actualError := retryUpdatingVS(ctxLogger, ctx, tc.newVS, tc.existingVS, common.GetSyncNamespace(), rc, tc.err, "Update")
+
+ if tc.expectedError != nil {
+ assert.NotNil(t, actualError)
+ assert.Equal(t, tc.expectedError.Error(), actualError.Error())
+ } else {
+ assert.Nil(t, actualError)
+ actualVS, err := rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(common.GetSyncNamespace()).Get(ctx, tc.existingVS.Name, metaV1.GetOptions{})
+ assert.Nil(t, err)
+ assert.Equal(t, tc.expectedVS.Labels, actualVS.Labels)
+ assert.Equal(t, tc.expectedVS.Annotations, actualVS.Annotations)
+ assert.Equal(t, tc.expectedVS.Spec.Hosts, actualVS.Spec.Hosts)
+ }
+
+ })
+ }
+}
+
+func TestAddUpdateVirtualService(t *testing.T) {
+ var (
+ ctxLogger = log.WithFields(log.Fields{
+ "type": "VirtualService",
+ })
+ ctx = context.Background()
+
+ namespace = "testns"
+ fooVS = &apiNetworkingV1Alpha3.VirtualService{
+ ObjectMeta: metaV1.ObjectMeta{
+ Name: "stage.test00.foo-vs",
+ },
+ Spec: networkingV1Alpha3.VirtualService{
+ Hosts: []string{"stage.test00.foo", "stage.test00.bar"},
+ },
+ }
+ istioClientWithExistingVS = istioFake.NewSimpleClientset()
+ )
+ istioClientWithExistingVS.NetworkingV1alpha3().VirtualServices(namespace).Create(ctx, fooVS, metaV1.CreateOptions{})
+ rc := &RemoteController{
+ ClusterID: "cluster-1",
+ VirtualServiceController: &istio.VirtualServiceController{
+ IstioClient: istioClientWithExistingVS,
+ },
+ }
+ admiralParams := common.AdmiralParams{
+ LabelSet: &common.LabelSet{},
+ SyncNamespace: "test-sync-ns",
+ EnableSWAwareNSCaches: true,
+ }
+ rr := NewRemoteRegistry(ctx, admiralParams)
+
+ cases := []struct {
+ name string
+ newVS *apiNetworkingV1Alpha3.VirtualService
+ existingVS *apiNetworkingV1Alpha3.VirtualService
+ expErr error
+ }{
+ {
+ name: "Given virtual service does not exist, " +
+ "And the existing object obtained from Get is nil, " +
+ "When another thread create the virtualservice, " +
+ "When this thread attempts to create virtualservice and fails, " +
+ "Then, then an Update operation should be run, " +
+ "And there should be no panic," +
+ "And no errors should be returned",
+ newVS: fooVS,
+ existingVS: nil,
+ expErr: nil,
+ },
+ }
+
+ for _, c := range cases {
+ t.Run(c.name, func(t *testing.T) {
+ addUpdateVirtualService(ctxLogger, ctx, c.newVS, c.existingVS, namespace, rc, rr)
+ })
+ }
+}
From 61e67fd71a692f0828ec36db49c1364a7315fc4a Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 15:28:54 -0400
Subject: [PATCH 171/235] removed
admiral/pkg/controller/admiral/admiralclient.go
Signed-off-by: Shriram Sharma
---
.../pkg/controller/admiral/admiralclient.go | 48 -------------------
1 file changed, 48 deletions(-)
delete mode 100644 admiral/pkg/controller/admiral/admiralclient.go
diff --git a/admiral/pkg/controller/admiral/admiralclient.go b/admiral/pkg/controller/admiral/admiralclient.go
deleted file mode 100644
index f01ac018..00000000
--- a/admiral/pkg/controller/admiral/admiralclient.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package admiral
-
-import (
- "fmt"
- log "github.com/sirupsen/logrus"
- "k8s.io/client-go/kubernetes"
- "k8s.io/client-go/rest"
- "k8s.io/client-go/tools/clientcmd"
-
- clientset "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned"
-)
-
-// retrieve the Kubernetes cluster client from outside of the cluster
-func AdmiralCrdClientFromPath(kubeConfigPath string) (clientset.Interface, error) {
- config, err := getConfig(kubeConfigPath)
- if err != nil || config == nil {
- return nil, err
- }
- return AdmiralCrdClientFromConfig(config)
-}
-
-func AdmiralCrdClientFromConfig(config *rest.Config) (clientset.Interface, error) {
- return clientset.NewForConfig(config)
-}
-
-func K8sClientFromConfig(config *rest.Config) (kubernetes.Interface, error) {
- return kubernetes.NewForConfig(config)
-}
-
-func K8sClientFromPath(kubeConfigPath string) (kubernetes.Interface, error) {
-
- config, err := getConfig(kubeConfigPath)
- if err != nil || config == nil {
- return nil, err
- }
- return K8sClientFromConfig(config)
-}
-
-func getConfig(kubeConfigPath string) (*rest.Config, error) {
- log.Infof("getting kubeconfig from: %#v", kubeConfigPath)
- // create the config from the path
- config, err := clientcmd.BuildConfigFromFlags("", kubeConfigPath)
-
- if err != nil || config == nil {
- return nil, fmt.Errorf("could not retrieve kubeconfig: %v", err)
- }
- return config, err
-}
From 48c732de8866d7f73f5fe33bd5d8f50875fd0e7b Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 15:29:21 -0400
Subject: [PATCH 172/235] added
admiral/pkg/controller/admiral/clientconnectionconfigcontroller.go from
master
Signed-off-by: Shriram Sharma
---
.../clientconnectionconfigcontroller.go | 263 ++++++++++++++++++
1 file changed, 263 insertions(+)
create mode 100644 admiral/pkg/controller/admiral/clientconnectionconfigcontroller.go
diff --git a/admiral/pkg/controller/admiral/clientconnectionconfigcontroller.go b/admiral/pkg/controller/admiral/clientconnectionconfigcontroller.go
new file mode 100644
index 00000000..a2a40632
--- /dev/null
+++ b/admiral/pkg/controller/admiral/clientconnectionconfigcontroller.go
@@ -0,0 +1,263 @@
+package admiral
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "time"
+
+ v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ clientset "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned"
+ informerV1 "github.com/istio-ecosystem/admiral/admiral/pkg/client/informers/externalversions/admiral/v1alpha1"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/client/loader"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ "github.com/sirupsen/logrus"
+ log "github.com/sirupsen/logrus"
+ meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/rest"
+ "k8s.io/client-go/tools/cache"
+)
+
+type ClientConnectionConfigHandlerInterface interface {
+ Added(ctx context.Context, obj *v1.ClientConnectionConfig) error
+ Updated(ctx context.Context, obj *v1.ClientConnectionConfig) error
+ Deleted(ctx context.Context, obj *v1.ClientConnectionConfig) error
+}
+
+type ClientConnectionConfigController struct {
+ crdClient clientset.Interface
+ informer cache.SharedIndexInformer
+ clientConnectionSettingsHandler ClientConnectionConfigHandlerInterface
+ Cache *clientConnectionSettingsCache
+}
+
+type clientConnectionSettingsItem struct {
+ clientConnectionSettings *v1.ClientConnectionConfig
+ status string
+}
+
+type clientConnectionSettingsCache struct {
+ cache map[string]map[string]map[string]*clientConnectionSettingsItem
+ mutex *sync.RWMutex
+}
+
+func (c *clientConnectionSettingsCache) Get(key, namespace string) []*v1.ClientConnectionConfig {
+ defer c.mutex.RUnlock()
+ c.mutex.RLock()
+ namespacesWithClientConnectionConfig := c.cache[key]
+ matchedClientConnectionConfig := make([]*v1.ClientConnectionConfig, 0)
+ for ns, clientConnectionSettingsItem := range namespacesWithClientConnectionConfig {
+ if namespace != ns {
+ continue
+ }
+ for _, item := range clientConnectionSettingsItem {
+ matchedClientConnectionConfig = append(matchedClientConnectionConfig, item.clientConnectionSettings.DeepCopy())
+ }
+ }
+ return matchedClientConnectionConfig
+}
+
+func (c *clientConnectionSettingsCache) Put(clientConnectionSettings *v1.ClientConnectionConfig) {
+ defer c.mutex.Unlock()
+ c.mutex.Lock()
+ key := common.ConstructKeyWithEnvAndIdentity(common.GetClientConnectionConfigEnv(clientConnectionSettings),
+ common.GetClientConnectionConfigIdentity(clientConnectionSettings))
+ namespacesWithClientConnectionConfig := c.cache[key]
+ if namespacesWithClientConnectionConfig == nil {
+ namespacesWithClientConnectionConfig = make(map[string]map[string]*clientConnectionSettingsItem)
+ }
+ namespaces := namespacesWithClientConnectionConfig[clientConnectionSettings.Namespace]
+ if namespaces == nil {
+ namespaces = make(map[string]*clientConnectionSettingsItem)
+ }
+ if common.ShouldIgnoreResource(clientConnectionSettings.ObjectMeta) {
+ log.Infof("op=%s type=%v name=%v namespace=%s cluster=%s message=%s",
+ "admiralIoIgnoreAnnotationCheck", common.ClientConnectionConfig,
+ clientConnectionSettings.Name, clientConnectionSettings.Namespace, "", "Value=true")
+ delete(namespaces, clientConnectionSettings.Name)
+ } else {
+ namespaces[clientConnectionSettings.Name] = &clientConnectionSettingsItem{
+ clientConnectionSettings: clientConnectionSettings,
+ status: common.ProcessingInProgress,
+ }
+ }
+
+ namespacesWithClientConnectionConfig[clientConnectionSettings.Namespace] = namespaces
+ c.cache[key] = namespacesWithClientConnectionConfig
+
+ logrus.Infof("%s cache for key=%s gtp=%v", common.ClientConnectionConfig, key, namespacesWithClientConnectionConfig)
+}
+
+func (c *clientConnectionSettingsCache) Delete(clientConnectionSettings *v1.ClientConnectionConfig) {
+ defer c.mutex.Unlock()
+ c.mutex.Lock()
+ key := common.ConstructKeyWithEnvAndIdentity(common.GetClientConnectionConfigEnv(clientConnectionSettings),
+ common.GetClientConnectionConfigIdentity(clientConnectionSettings))
+ namespacesWithClientConnectionConfig := c.cache[key]
+ if namespacesWithClientConnectionConfig == nil {
+ return
+ }
+ namespaces := namespacesWithClientConnectionConfig[clientConnectionSettings.Namespace]
+ if namespaces == nil {
+ return
+ }
+ delete(namespaces, clientConnectionSettings.Name)
+ namespacesWithClientConnectionConfig[clientConnectionSettings.Namespace] = namespaces
+ c.cache[key] = namespacesWithClientConnectionConfig
+}
+
+func (c *clientConnectionSettingsCache) GetStatus(clientConnectionSettings *v1.ClientConnectionConfig) string {
+ defer c.mutex.RUnlock()
+ c.mutex.RLock()
+
+ key := common.ConstructKeyWithEnvAndIdentity(common.GetClientConnectionConfigEnv(clientConnectionSettings),
+ common.GetClientConnectionConfigIdentity(clientConnectionSettings))
+
+ namespacesWithClientConnectionConfig, ok := c.cache[key]
+ if !ok {
+ return common.NotProcessed
+ }
+ namespaces, ok := namespacesWithClientConnectionConfig[clientConnectionSettings.Namespace]
+ if !ok {
+ return common.NotProcessed
+ }
+ cachedClientConnectionConfig, ok := namespaces[clientConnectionSettings.Name]
+ if !ok {
+ return common.NotProcessed
+ }
+
+ return cachedClientConnectionConfig.status
+}
+
+func (c *clientConnectionSettingsCache) UpdateStatus(
+ clientConnectionSettings *v1.ClientConnectionConfig, status string) error {
+ defer c.mutex.Unlock()
+ c.mutex.Lock()
+
+ key := common.ConstructKeyWithEnvAndIdentity(common.GetClientConnectionConfigEnv(clientConnectionSettings),
+ common.GetClientConnectionConfigIdentity(clientConnectionSettings))
+
+ namespacesWithClientConnectionConfig, ok := c.cache[key]
+ if !ok {
+ return fmt.Errorf(LogCacheFormat, common.Update, common.ClientConnectionConfig,
+ clientConnectionSettings.Name, clientConnectionSettings.Namespace,
+ "", "skipped updating status in cache, clientConnectionSettings not found in cache")
+ }
+ namespaces, ok := namespacesWithClientConnectionConfig[clientConnectionSettings.Namespace]
+ if !ok {
+ return fmt.Errorf(LogCacheFormat, common.Update, common.ClientConnectionConfig,
+ clientConnectionSettings.Name, clientConnectionSettings.Namespace,
+ "", "skipped updating status in cache, clientConnectionSettings namespace not found in cache")
+ }
+ cachedClientConnectionConfig, ok := namespaces[clientConnectionSettings.Name]
+ if !ok {
+ return fmt.Errorf(LogCacheFormat, common.Update, common.ClientConnectionConfig,
+ clientConnectionSettings.Name, clientConnectionSettings.Namespace,
+ "", "skipped updating status in cache, clientConnectionSettings not found in cache with the specified name")
+ }
+ cachedClientConnectionConfig.status = status
+ c.cache[key] = namespacesWithClientConnectionConfig
+ return nil
+}
+
+func (c *ClientConnectionConfigController) Added(ctx context.Context, obj interface{}) error {
+ clientConnectionSettings, ok := obj.(*v1.ClientConnectionConfig)
+ if !ok {
+ return fmt.Errorf("type assertion failed, %v is not of type *v1.ClientConnectionConfig", obj)
+ }
+ c.Cache.Put(clientConnectionSettings)
+ return c.clientConnectionSettingsHandler.Added(ctx, clientConnectionSettings)
+}
+
+func (c *ClientConnectionConfigController) Updated(ctx context.Context, obj interface{}, oldObj interface{}) error {
+ clientConnectionSettings, ok := obj.(*v1.ClientConnectionConfig)
+ if !ok {
+ return fmt.Errorf("type assertion failed, %v is not of type *v1.ClientConnectionConfig", obj)
+ }
+ c.Cache.Put(clientConnectionSettings)
+ return c.clientConnectionSettingsHandler.Updated(ctx, clientConnectionSettings)
+}
+
+func (c *ClientConnectionConfigController) Deleted(ctx context.Context, obj interface{}) error {
+ clientConnectionSettings, ok := obj.(*v1.ClientConnectionConfig)
+ if !ok {
+ return fmt.Errorf("type assertion failed, %v is not of type *v1.ClientConnectionConfig", obj)
+ }
+ c.Cache.Delete(clientConnectionSettings)
+ return c.clientConnectionSettingsHandler.Deleted(ctx, clientConnectionSettings)
+}
+
+func (c *ClientConnectionConfigController) UpdateProcessItemStatus(obj interface{}, status string) error {
+ clientConnectionSettings, ok := obj.(*v1.ClientConnectionConfig)
+ if !ok {
+ return fmt.Errorf("type assertion failed, %v is not of type *v1.ClientConnectionConfig", obj)
+ }
+ return c.Cache.UpdateStatus(clientConnectionSettings, status)
+}
+
+func (c *ClientConnectionConfigController) GetProcessItemStatus(obj interface{}) (string, error) {
+ clientConnectionSettings, ok := obj.(*v1.ClientConnectionConfig)
+ if !ok {
+ return common.NotProcessed,
+ fmt.Errorf("type assertion failed, %v is not of type *v1.ClientConnectionConfig", obj)
+ }
+ return c.Cache.GetStatus(clientConnectionSettings), nil
+}
+
+func (c *ClientConnectionConfigController) LogValueOfAdmiralIoIgnore(obj interface{}) {
+ clientConnectionSettings, ok := obj.(*v1.ClientConnectionConfig)
+ if !ok {
+ return
+ }
+ metadata := clientConnectionSettings.ObjectMeta
+ if metadata.Annotations[common.AdmiralIgnoreAnnotation] == "true" || metadata.Labels[common.AdmiralIgnoreAnnotation] == "true" {
+ log.Infof("op=%s type=%v name=%v namespace=%s cluster=%s message=%s",
+ "admiralIoIgnoreAnnotationCheck", common.ClientConnectionConfig,
+ clientConnectionSettings.Name, clientConnectionSettings.Namespace, "", "Value=true")
+ }
+}
+
+func (c *ClientConnectionConfigController) Get(ctx context.Context, isRetry bool, obj interface{}) (interface{}, error) {
+ clientConnectionSettings, ok := obj.(*v1.ClientConnectionConfig)
+ if !ok {
+ return nil, fmt.Errorf("type assertion failed, %v is not of type *v1.ClientConnectionConfig", obj)
+ }
+ if c.crdClient == nil {
+ return nil, fmt.Errorf("crd client is not initialized, txId=%s", ctx.Value("txId"))
+ }
+ return c.crdClient.AdmiralV1alpha1().
+ ClientConnectionConfigs(clientConnectionSettings.Namespace).
+ Get(ctx, clientConnectionSettings.Name, meta_v1.GetOptions{})
+}
+
+// NewClientConnectionConfigController creates a new instance of ClientConnectionConfigController
+func NewClientConnectionConfigController(stopCh <-chan struct{}, handler ClientConnectionConfigHandlerInterface,
+ config *rest.Config, resyncPeriod time.Duration, clientLoader loader.ClientLoader) (*ClientConnectionConfigController, error) {
+
+ crdClient, err := clientLoader.LoadAdmiralClientFromConfig(config)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create clientconnectionsettings controller crd client: %w", err)
+ }
+
+ clientConnectionCache := &clientConnectionSettingsCache{}
+ clientConnectionCache.cache = make(map[string]map[string]map[string]*clientConnectionSettingsItem)
+ clientConnectionCache.mutex = &sync.RWMutex{}
+
+ clientConnectionSettingsController := ClientConnectionConfigController{
+ clientConnectionSettingsHandler: handler,
+ crdClient: crdClient,
+ Cache: clientConnectionCache,
+ }
+
+ clientConnectionSettingsController.informer = informerV1.NewClientConnectionConfigInformer(
+ crdClient,
+ meta_v1.NamespaceAll,
+ resyncPeriod,
+ cache.Indexers{},
+ )
+
+ NewController("clientconnectionsettings-ctrl", config.Host, stopCh,
+ &clientConnectionSettingsController, clientConnectionSettingsController.informer)
+
+ return &clientConnectionSettingsController, nil
+}
From 0a3b3fa92a559cfa8e3cd07bf76d7255ca946e47 Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 15:29:39 -0400
Subject: [PATCH 173/235] added
admiral/pkg/controller/admiral/clientconnectionconfigcontroller_test.go from
master
Signed-off-by: Shriram Sharma
---
.../clientconnectionconfigcontroller_test.go | 1415 +++++++++++++++++
1 file changed, 1415 insertions(+)
create mode 100644 admiral/pkg/controller/admiral/clientconnectionconfigcontroller_test.go
diff --git a/admiral/pkg/controller/admiral/clientconnectionconfigcontroller_test.go b/admiral/pkg/controller/admiral/clientconnectionconfigcontroller_test.go
new file mode 100644
index 00000000..43782956
--- /dev/null
+++ b/admiral/pkg/controller/admiral/clientconnectionconfigcontroller_test.go
@@ -0,0 +1,1415 @@
+package admiral
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "testing"
+
+ v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ admiralv1 "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned/typed/admiral/v1alpha1"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/client/loader"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ "github.com/stretchr/testify/assert"
+ apiMachineryMetaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/watch"
+ "k8s.io/client-go/discovery"
+ "k8s.io/client-go/rest"
+)
+
+func TestNewClientConnectionConfigController(t *testing.T) {
+
+ testCases := []struct {
+ name string
+ clientConnectionSettingsHandler ClientConnectionConfigHandlerInterface
+ configPath *rest.Config
+ expectedError error
+ }{
+ {
+ name: "Given valid params " +
+ "When NewClientConnectionConfigController func is called " +
+ "Then func should return ClientConnectionConfigController and no error",
+ configPath: &rest.Config{},
+ clientConnectionSettingsHandler: &MockClientConnectionHandler{},
+ expectedError: nil,
+ },
+ }
+ stop := make(chan struct{})
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+
+ actualClientConnectionConfigController, actualError := NewClientConnectionConfigController(
+ stop, tc.clientConnectionSettingsHandler, tc.configPath, 0, loader.GetFakeClientLoader())
+ if tc.expectedError != nil {
+ if actualError == nil {
+ t.Fatalf("expected %s error got nil error", tc.expectedError)
+ }
+ assert.Equal(t, tc.expectedError.Error(), actualError.Error())
+ } else {
+ assert.NotNil(t, actualClientConnectionConfigController)
+ }
+
+ })
+ }
+
+}
+
+func TestGetClientConnectionConfigController(t *testing.T) {
+ p := common.AdmiralParams{
+ LabelSet: &common.LabelSet{
+ EnvKey: "admiral.io/env",
+ AdmiralCRDIdentityLabel: "identity",
+ },
+ }
+ common.InitializeConfig(p)
+
+ testCases := []struct {
+ name string
+ clientConnectionSettings interface{}
+ ctx context.Context
+ clientConnectionSettingsController *ClientConnectionConfigController
+ expectedError error
+ }{
+ {
+ name: "Given a ClientConnectionConfigController " +
+ "When invalid object is passed to Get func " +
+ "Then then the func should return an error",
+ clientConnectionSettings: &struct{}{},
+ clientConnectionSettingsController: &ClientConnectionConfigController{
+ Cache: &clientConnectionSettingsCache{
+ cache: map[string]map[string]map[string]*clientConnectionSettingsItem{},
+ mutex: &sync.RWMutex{},
+ },
+ },
+ ctx: context.WithValue(context.Background(), "txId", "999"),
+ expectedError: fmt.Errorf("type assertion failed, &{} is not of type *v1.ClientConnectionConfig"),
+ },
+ {
+ name: "Given a ClientConnectionConfigController " +
+ "When valid ClientConnectionConfig object is passed to Get func " +
+ "And crdClient is nil on the ClientConnectionConfigController " +
+ "Then then the func should return an error",
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ clientConnectionSettingsController: &ClientConnectionConfigController{
+ Cache: &clientConnectionSettingsCache{
+ cache: map[string]map[string]map[string]*clientConnectionSettingsItem{},
+ mutex: &sync.RWMutex{},
+ },
+ },
+ ctx: context.WithValue(context.Background(), "txId", "999"),
+ expectedError: fmt.Errorf("crd client is not initialized, txId=999"),
+ },
+ {
+ name: "Given a ClientConnectionConfigController " +
+ "When valid ClientConnectionConfig object is passed to Get func " +
+ "Then then the func should not return any errors",
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ clientConnectionSettingsController: &ClientConnectionConfigController{
+ crdClient: &MockCRDClient{},
+ Cache: &clientConnectionSettingsCache{
+ cache: map[string]map[string]map[string]*clientConnectionSettingsItem{
+ "testEnv.testId": {
+ "testns": {"ccsName": &clientConnectionSettingsItem{
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ status: common.ProcessingInProgress,
+ }},
+ },
+ },
+ mutex: &sync.RWMutex{},
+ },
+ },
+ ctx: context.WithValue(context.Background(), "ClientConnectionConfig",
+ &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ }),
+ expectedError: nil,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ actual, actualError := tc.clientConnectionSettingsController.Get(
+ tc.ctx, false, tc.clientConnectionSettings)
+ if actualError != nil {
+ assert.Equal(t, tc.expectedError, actualError)
+ } else {
+ assert.NotNil(t, actual.(*v1.ClientConnectionConfig))
+ }
+
+ })
+ }
+
+}
+
+func TestGetProcessItemStatusClientConnectionConfigController(t *testing.T) {
+ p := common.AdmiralParams{
+ LabelSet: &common.LabelSet{
+ EnvKey: "admiral.io/env",
+ AdmiralCRDIdentityLabel: "identity",
+ },
+ }
+ common.InitializeConfig(p)
+
+ testCases := []struct {
+ name string
+ clientConnectionSettings interface{}
+ clientConnectionSettingsController *ClientConnectionConfigController
+ expectedstatus string
+ expectedError error
+ }{
+ {
+ name: "Given a ClientConnectionConfigController " +
+ "When invalid object is passed to GetProcessItemStatus func " +
+ "Then then the func should return an error",
+ clientConnectionSettings: &struct{}{},
+ clientConnectionSettingsController: &ClientConnectionConfigController{
+ Cache: &clientConnectionSettingsCache{
+ cache: map[string]map[string]map[string]*clientConnectionSettingsItem{},
+ mutex: &sync.RWMutex{},
+ },
+ },
+ expectedError: fmt.Errorf("type assertion failed, &{} is not of type *v1.ClientConnectionConfig"),
+ },
+ {
+ name: "Given a ClientConnectionConfigController " +
+ "When valid ClientConnectionConfig and status is passed to GetProcessItemStatus func " +
+ "Then then the func should not return any errors and return the status",
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ clientConnectionSettingsController: &ClientConnectionConfigController{
+ Cache: &clientConnectionSettingsCache{
+ cache: map[string]map[string]map[string]*clientConnectionSettingsItem{
+ "testEnv.testId": {
+ "testns": {"ccsName": &clientConnectionSettingsItem{
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ status: common.ProcessingInProgress,
+ }},
+ },
+ },
+ mutex: &sync.RWMutex{},
+ },
+ },
+ expectedError: nil,
+ expectedstatus: common.ProcessingInProgress,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ actualStatus, actualError := tc.clientConnectionSettingsController.GetProcessItemStatus(tc.clientConnectionSettings)
+ if actualError != nil {
+ assert.Equal(t, tc.expectedError, actualError)
+ } else {
+ assert.Equal(t, tc.expectedstatus, actualStatus)
+ }
+
+ })
+ }
+
+}
+
+func TestUpdateProcessItemStatusClientConnectionConfigController(t *testing.T) {
+ p := common.AdmiralParams{
+ LabelSet: &common.LabelSet{
+ EnvKey: "admiral.io/env",
+ AdmiralCRDIdentityLabel: "identity",
+ },
+ }
+ common.InitializeConfig(p)
+
+ testCases := []struct {
+ name string
+ clientConnectionSettings interface{}
+ clientConnectionSettingsController *ClientConnectionConfigController
+ status string
+ expectedError error
+ }{
+ {
+ name: "Given a ClientConnectionConfigController " +
+ "When invalid object is passed to UpdateProcessItemStatus func " +
+ "Then then the func should return an error",
+ clientConnectionSettings: &struct{}{},
+ clientConnectionSettingsController: &ClientConnectionConfigController{
+ Cache: &clientConnectionSettingsCache{
+ cache: map[string]map[string]map[string]*clientConnectionSettingsItem{},
+ mutex: &sync.RWMutex{},
+ },
+ },
+ expectedError: fmt.Errorf("type assertion failed, &{} is not of type *v1.ClientConnectionConfig"),
+ },
+ {
+ name: "Given a ClientConnectionConfigController " +
+ "When valid ClientConnectionConfig and status is passed to UpdateProcessItemStatus func " +
+ "Then then the func should not return any errors and update the status",
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ clientConnectionSettingsController: &ClientConnectionConfigController{
+ Cache: &clientConnectionSettingsCache{
+ cache: map[string]map[string]map[string]*clientConnectionSettingsItem{
+ "testEnv.testId": {
+ "testns": {"ccsName": &clientConnectionSettingsItem{
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ status: common.ProcessingInProgress,
+ }},
+ },
+ },
+ mutex: &sync.RWMutex{},
+ },
+ },
+ expectedError: nil,
+ status: common.Processed,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ actualError := tc.clientConnectionSettingsController.UpdateProcessItemStatus(
+ tc.clientConnectionSettings, tc.status)
+ if actualError != nil {
+ assert.Equal(t, tc.expectedError, actualError)
+ } else {
+ actualStatus := tc.clientConnectionSettingsController.Cache.GetStatus(
+ tc.clientConnectionSettings.(*v1.ClientConnectionConfig))
+ assert.Equal(t, tc.status, actualStatus)
+ }
+
+ })
+ }
+
+}
+
+func TestDeletedClientConnectionConfigController(t *testing.T) {
+ p := common.AdmiralParams{
+ LabelSet: &common.LabelSet{
+ EnvKey: "admiral.io/env",
+ AdmiralCRDIdentityLabel: "identity",
+ },
+ }
+ common.InitializeConfig(p)
+
+ testCases := []struct {
+ name string
+ clientConnectionSettings interface{}
+ clientConnectionSettingsController *ClientConnectionConfigController
+ expectedError error
+ }{
+ {
+ name: "Given a ClientConnectionConfigController " +
+ "When invalid object is passed to Deleted func " +
+ "Then then the func should return an error",
+ clientConnectionSettings: &struct{}{},
+ clientConnectionSettingsController: &ClientConnectionConfigController{
+ clientConnectionSettingsHandler: &MockClientConnectionHandler{},
+ Cache: &clientConnectionSettingsCache{
+ cache: map[string]map[string]map[string]*clientConnectionSettingsItem{},
+ mutex: &sync.RWMutex{},
+ },
+ },
+ expectedError: fmt.Errorf("type assertion failed, &{} is not of type *v1.ClientConnectionConfig"),
+ },
+ {
+ name: "Given a ClientConnectionConfigController " +
+ "When valid ClientConnectionConfig is passed to Deleted func " +
+ "Then then the func should not return any errors",
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ clientConnectionSettingsController: &ClientConnectionConfigController{
+ clientConnectionSettingsHandler: &MockClientConnectionHandler{},
+ Cache: &clientConnectionSettingsCache{
+ cache: map[string]map[string]map[string]*clientConnectionSettingsItem{},
+ mutex: &sync.RWMutex{},
+ },
+ },
+ expectedError: nil,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+
+ actualError := tc.clientConnectionSettingsController.Deleted(context.Background(),
+ tc.clientConnectionSettings)
+ assert.Equal(t, tc.expectedError, actualError)
+
+ })
+ }
+
+}
+
+func TestUpdatedClientConnectionConfigController(t *testing.T) {
+ p := common.AdmiralParams{
+ LabelSet: &common.LabelSet{
+ EnvKey: "admiral.io/env",
+ AdmiralCRDIdentityLabel: "identity",
+ },
+ }
+ common.InitializeConfig(p)
+
+ testCases := []struct {
+ name string
+ clientConnectionSettings interface{}
+ clientConnectionSettingsController *ClientConnectionConfigController
+ expectedError error
+ }{
+ {
+ name: "Given a ClientConnectionConfigController " +
+ "When invalid object is passed to Updated func " +
+ "Then then the func should return an error",
+ clientConnectionSettings: &struct{}{},
+ clientConnectionSettingsController: &ClientConnectionConfigController{
+ clientConnectionSettingsHandler: &MockClientConnectionHandler{},
+ Cache: &clientConnectionSettingsCache{
+ cache: map[string]map[string]map[string]*clientConnectionSettingsItem{},
+ mutex: &sync.RWMutex{},
+ },
+ },
+ expectedError: fmt.Errorf("type assertion failed, &{} is not of type *v1.ClientConnectionConfig"),
+ },
+ {
+ name: "Given a ClientConnectionConfigController " +
+ "When valid ClientConnectionConfig is passed to Updated func " +
+ "Then then the func should not return any errors",
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ clientConnectionSettingsController: &ClientConnectionConfigController{
+ clientConnectionSettingsHandler: &MockClientConnectionHandler{},
+ Cache: &clientConnectionSettingsCache{
+ cache: map[string]map[string]map[string]*clientConnectionSettingsItem{},
+ mutex: &sync.RWMutex{},
+ },
+ },
+ expectedError: nil,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+
+ actualError := tc.clientConnectionSettingsController.Updated(context.Background(),
+ tc.clientConnectionSettings, nil)
+ assert.Equal(t, tc.expectedError, actualError)
+
+ })
+ }
+
+}
+
+func TestAddedClientConnectionConfigController(t *testing.T) {
+ p := common.AdmiralParams{
+ LabelSet: &common.LabelSet{
+ EnvKey: "admiral.io/env",
+ AdmiralCRDIdentityLabel: "identity",
+ },
+ }
+ common.InitializeConfig(p)
+
+ testCases := []struct {
+ name string
+ clientConnectionSettings interface{}
+ clientConnectionSettingsController *ClientConnectionConfigController
+ expectedError error
+ }{
+ {
+ name: "Given a ClientConnectionConfigController " +
+ "When invalid object is passed to Added func " +
+ "Then then the func should return an error",
+ clientConnectionSettings: &struct{}{},
+ clientConnectionSettingsController: &ClientConnectionConfigController{
+ clientConnectionSettingsHandler: &MockClientConnectionHandler{},
+ Cache: &clientConnectionSettingsCache{
+ cache: map[string]map[string]map[string]*clientConnectionSettingsItem{},
+ mutex: &sync.RWMutex{},
+ },
+ },
+ expectedError: fmt.Errorf("type assertion failed, &{} is not of type *v1.ClientConnectionConfig"),
+ },
+ {
+ name: "Given a ClientConnectionConfigController " +
+ "When valid ClientConnectionConfig is passed to Added func " +
+ "Then then the func should not return any errors",
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ clientConnectionSettingsController: &ClientConnectionConfigController{
+ clientConnectionSettingsHandler: &MockClientConnectionHandler{},
+ Cache: &clientConnectionSettingsCache{
+ cache: map[string]map[string]map[string]*clientConnectionSettingsItem{},
+ mutex: &sync.RWMutex{},
+ },
+ },
+ expectedError: nil,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+
+ actualError := tc.clientConnectionSettingsController.Added(context.Background(), tc.clientConnectionSettings)
+ assert.Equal(t, tc.expectedError, actualError)
+
+ })
+ }
+
+}
+
+func TestUpdateStatus(t *testing.T) {
+ p := common.AdmiralParams{
+ LabelSet: &common.LabelSet{
+ EnvKey: "admiral.io/env",
+ AdmiralCRDIdentityLabel: "identity",
+ },
+ }
+ common.InitializeConfig(p)
+
+ testCases := []struct {
+ name string
+ clientConnectionSettingsCache *clientConnectionSettingsCache
+ clientConnectionSettings *v1.ClientConnectionConfig
+ status string
+ expectedError error
+ }{
+ {
+ name: "Given an clientConnectionSettingsCache " +
+ "When a ClientConnectionConfig, and status is passed to the UpdateStatus func " +
+ "And the key does not exists in the cache " +
+ "Then the func should return an error",
+ clientConnectionSettingsCache: &clientConnectionSettingsCache{
+ cache: map[string]map[string]map[string]*clientConnectionSettingsItem{
+ "testEnv.testId": {
+ "testns": {"ccsName": &clientConnectionSettingsItem{
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ status: common.ProcessingInProgress,
+ }},
+ },
+ },
+ mutex: &sync.RWMutex{},
+ },
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "bar",
+ Namespace: "barns",
+ Labels: map[string]string{
+ "admiral.io/env": "foo",
+ "identity": "bar",
+ },
+ },
+ },
+ status: common.NotProcessed,
+ expectedError: fmt.Errorf(
+ "op=Update type=ClientConnectionConfig name=bar namespace=barns cluster= " +
+ "message=skipped updating status in cache, clientConnectionSettings not found in cache"),
+ },
+ {
+ name: "Given an clientConnectionSettingsCache " +
+ "When a ClientConnectionConfig, and status is passed to the UpdateStatus func " +
+ "And the matching namespace does not exists in the cache " +
+ "Then the func should return an error",
+ clientConnectionSettingsCache: &clientConnectionSettingsCache{
+ cache: map[string]map[string]map[string]*clientConnectionSettingsItem{
+ "testEnv.testId": {
+ "testns": {"ccsName": &clientConnectionSettingsItem{
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ status: common.ProcessingInProgress,
+ }},
+ },
+ },
+ mutex: &sync.RWMutex{},
+ },
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "barns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ status: common.NotProcessed,
+ expectedError: fmt.Errorf(
+ "op=Update type=ClientConnectionConfig name=ccsName namespace=barns cluster= " +
+ "message=skipped updating status in cache, clientConnectionSettings namespace not found in cache"),
+ },
+ {
+ name: "Given an clientConnectionSettingsCache " +
+ "When a ClientConnectionConfig, and status is passed to the UpdateStatus func " +
+ "And the matching name does not exists in the cache " +
+ "Then the func should return an error",
+ clientConnectionSettingsCache: &clientConnectionSettingsCache{
+ cache: map[string]map[string]map[string]*clientConnectionSettingsItem{
+ "testEnv.testId": {
+ "testns": {"ccsName": &clientConnectionSettingsItem{
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ status: common.ProcessingInProgress,
+ }},
+ },
+ },
+ mutex: &sync.RWMutex{},
+ },
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccs0",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ status: common.NotProcessed,
+ expectedError: fmt.Errorf(
+ "op=Update type=ClientConnectionConfig name=ccs0 namespace=testns cluster= " +
+ "message=skipped updating status in cache, clientConnectionSettings not found in cache with the specified name"),
+ },
+ {
+ name: "Given an clientConnectionSettingsCache " +
+ "When a valid ClientConnectionConfig, and status is passed to the UpdateStatus func " +
+ "Then the func should updated the status and not return an error",
+ clientConnectionSettingsCache: &clientConnectionSettingsCache{
+ cache: map[string]map[string]map[string]*clientConnectionSettingsItem{
+ "testEnv.testId": {
+ "testns": {"ccsName": &clientConnectionSettingsItem{
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ status: common.ProcessingInProgress,
+ }},
+ },
+ },
+ mutex: &sync.RWMutex{},
+ },
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ status: common.NotProcessed,
+ expectedError: nil,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+
+ actualError := tc.clientConnectionSettingsCache.UpdateStatus(tc.clientConnectionSettings, tc.status)
+
+ if actualError != nil {
+ assert.Equal(t, tc.expectedError.Error(), actualError.Error())
+ } else {
+ actualStatus := tc.clientConnectionSettingsCache.GetStatus(tc.clientConnectionSettings)
+ assert.Equal(t, tc.status, actualStatus)
+ }
+
+ })
+ }
+
+}
+
+func TestGetStatus(t *testing.T) {
+ p := common.AdmiralParams{
+ LabelSet: &common.LabelSet{
+ EnvKey: "admiral.io/env",
+ AdmiralCRDIdentityLabel: "identity",
+ },
+ }
+ common.InitializeConfig(p)
+
+ testCases := []struct {
+ name string
+ clientConnectionSettingsCache *clientConnectionSettingsCache
+ clientConnectionSettings *v1.ClientConnectionConfig
+ expectedStatus string
+ }{
+ {
+ name: "Given an clientConnectionSettingsCache " +
+ "When a ClientConnectionConfig is passed to the GetStatus func " +
+ "And the key does not exists in the cache " +
+ "Then the func should return NotProcessed as the status",
+ clientConnectionSettingsCache: &clientConnectionSettingsCache{
+ cache: map[string]map[string]map[string]*clientConnectionSettingsItem{
+ "testEnv.testId": {
+ "testns": {"ccsName": &clientConnectionSettingsItem{
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ status: common.ProcessingInProgress,
+ }},
+ },
+ },
+ mutex: &sync.RWMutex{},
+ },
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "bar",
+ Namespace: "barns",
+ Labels: map[string]string{
+ "admiral.io/env": "foo",
+ "identity": "bar",
+ },
+ },
+ },
+ expectedStatus: common.NotProcessed,
+ },
+ {
+ name: "Given an clientConnectionSettingsCache " +
+ "When a ClientConnectionConfig is passed to the GetStatus func " +
+ "And there is no matching clientConnectionSetting in the cache for the given NS" +
+ "Then the func should return NotProcessed as the status",
+ clientConnectionSettingsCache: &clientConnectionSettingsCache{
+ cache: map[string]map[string]map[string]*clientConnectionSettingsItem{
+ "testEnv.testId": {
+ "testns": {"ccsName": &clientConnectionSettingsItem{
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ status: common.ProcessingInProgress,
+ }},
+ },
+ },
+ mutex: &sync.RWMutex{},
+ },
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "testId",
+ Namespace: "barns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ expectedStatus: common.NotProcessed,
+ },
+ {
+ name: "Given an clientConnectionSettingsCache " +
+ "When a ClientConnectionConfig is passed to the GetStatus func " +
+ "And there is no matching clientConnectionSetting in the cache for the given name" +
+ "Then the func should return NotProcessed as the status",
+ clientConnectionSettingsCache: &clientConnectionSettingsCache{
+ cache: map[string]map[string]map[string]*clientConnectionSettingsItem{
+ "testEnv.testId": {
+ "testns": {"ccsName": &clientConnectionSettingsItem{
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ status: common.ProcessingInProgress,
+ }},
+ },
+ },
+ mutex: &sync.RWMutex{},
+ },
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "testId",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ expectedStatus: common.NotProcessed,
+ },
+ {
+ name: "Given an clientConnectionSettingsCache " +
+ "When a ClientConnectionConfig is passed to the GetStatus func " +
+ "And there is a matching clientConnectionSetting in the cache " +
+ "Then the func should return the correct status",
+ clientConnectionSettingsCache: &clientConnectionSettingsCache{
+ cache: map[string]map[string]map[string]*clientConnectionSettingsItem{
+ "testEnv.testId": {
+ "testns": {"ccsName": &clientConnectionSettingsItem{
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ status: common.ProcessingInProgress,
+ }},
+ },
+ },
+ mutex: &sync.RWMutex{},
+ },
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ expectedStatus: common.ProcessingInProgress,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+
+ actual := tc.clientConnectionSettingsCache.GetStatus(tc.clientConnectionSettings)
+
+ assert.Equal(t, tc.expectedStatus, actual)
+
+ })
+ }
+
+}
+
+func TestDeleteClientConnectionConfigCache(t *testing.T) {
+ p := common.AdmiralParams{
+ LabelSet: &common.LabelSet{
+ EnvKey: "admiral.io/env",
+ AdmiralCRDIdentityLabel: "identity",
+ },
+ }
+ common.InitializeConfig(p)
+ testCases := []struct {
+ name string
+ clientConnectionSettingsCache *clientConnectionSettingsCache
+ clientConnectionSettings *v1.ClientConnectionConfig
+ expectedCache map[string]map[string]map[string]*clientConnectionSettingsItem
+ }{
+ {
+ name: "Given an clientConnectionSettingsCache " +
+ "When a ClientConnectionConfig is passed to the Delete func " +
+ "And the ClientConnectionConfig does not exists in the cache " +
+ "Then the func should not delete anything from the cache",
+ clientConnectionSettingsCache: &clientConnectionSettingsCache{
+ cache: map[string]map[string]map[string]*clientConnectionSettingsItem{
+ "testEnv.testId": {
+ "testns": {"ccsName": &clientConnectionSettingsItem{
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ status: common.ProcessingInProgress,
+ }},
+ },
+ },
+ mutex: &sync.RWMutex{},
+ },
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "bar",
+ Namespace: "barns",
+ Labels: map[string]string{
+ "admiral.io/env": "foo",
+ "identity": "bar",
+ },
+ },
+ },
+ expectedCache: map[string]map[string]map[string]*clientConnectionSettingsItem{
+ "testEnv.testId": {
+ "testns": {"ccsName": &clientConnectionSettingsItem{
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ status: common.ProcessingInProgress,
+ }},
+ },
+ },
+ },
+ {
+ name: "Given an clientConnectionSettingsCache " +
+ "When a valid ClientConnectionConfig is passed to the Delete func " +
+ "Then the func should delete the ClientConnectionConfig fromthe cache",
+ clientConnectionSettingsCache: &clientConnectionSettingsCache{
+ cache: map[string]map[string]map[string]*clientConnectionSettingsItem{
+ "testEnv.testId": {
+ "testns": {"ccs0": &clientConnectionSettingsItem{
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccs0",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ status: common.ProcessingInProgress,
+ }},
+ },
+ },
+ mutex: &sync.RWMutex{},
+ },
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccs0",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ expectedCache: map[string]map[string]map[string]*clientConnectionSettingsItem{
+ "testEnv.testId": {
+ "testns": {},
+ },
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+
+ tc.clientConnectionSettingsCache.Delete(tc.clientConnectionSettings)
+
+ assert.Equal(t, tc.expectedCache, tc.clientConnectionSettingsCache.cache)
+
+ })
+ }
+
+}
+
+func TestPutClientConnectionConfigCache(t *testing.T) {
+ p := common.AdmiralParams{
+ LabelSet: &common.LabelSet{
+ EnvKey: "admiral.io/env",
+ AdmiralCRDIdentityLabel: "identity",
+ },
+ }
+ common.InitializeConfig(p)
+
+ testCases := []struct {
+ name string
+ clientConnectionSettingsCache *clientConnectionSettingsCache
+ clientConnectionSettings *v1.ClientConnectionConfig
+ expectedCache map[string]map[string]map[string]*clientConnectionSettingsItem
+ }{
+ {
+ name: "Given an empty clientConnectionSettingsCache " +
+ "When a valid ClientConnectionConfig is passed to the Put func " +
+ "Then the func should add the ClientConnectionConfig to the cache",
+ clientConnectionSettingsCache: &clientConnectionSettingsCache{
+ cache: make(map[string]map[string]map[string]*clientConnectionSettingsItem),
+ mutex: &sync.RWMutex{},
+ },
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ expectedCache: map[string]map[string]map[string]*clientConnectionSettingsItem{
+ "testEnv.testId": {
+ "testns": {"ccsName": &clientConnectionSettingsItem{
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ status: common.ProcessingInProgress,
+ }},
+ },
+ },
+ },
+ {
+ name: "Given an clientConnectionSettingsCache " +
+ "When a valid ClientConnectionConfig is passed to the Put func " +
+ "And the ClientConnectionConfig is in a different namespace " +
+ "Then the func should add the ClientConnectionConfig to the cache",
+ clientConnectionSettingsCache: &clientConnectionSettingsCache{
+ cache: map[string]map[string]map[string]*clientConnectionSettingsItem{
+ "testEnv.testId": {
+ "someotherns": {"ccsName": &clientConnectionSettingsItem{
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "someotherns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ status: common.ProcessingInProgress,
+ }},
+ },
+ },
+ mutex: &sync.RWMutex{},
+ },
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ expectedCache: map[string]map[string]map[string]*clientConnectionSettingsItem{
+ "testEnv.testId": {
+ "testns": {"ccsName": &clientConnectionSettingsItem{
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ status: common.ProcessingInProgress,
+ }},
+ "someotherns": {"ccsName": &clientConnectionSettingsItem{
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ Namespace: "someotherns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ status: common.ProcessingInProgress,
+ }},
+ },
+ },
+ },
+ {
+ name: "Given an clientConnectionSettingsCache " +
+ "When a valid ClientConnectionConfig is passed to the Put func " +
+ "And another ClientConnectionConfig is in same namespace " +
+ "Then the func should add the ClientConnectionConfig to the cache",
+ clientConnectionSettingsCache: &clientConnectionSettingsCache{
+ cache: map[string]map[string]map[string]*clientConnectionSettingsItem{
+ "testEnv.testId": {
+ "testns": {"ccs0": &clientConnectionSettingsItem{
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccs0",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ status: common.ProcessingInProgress,
+ }},
+ },
+ },
+ mutex: &sync.RWMutex{},
+ },
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccs1",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ expectedCache: map[string]map[string]map[string]*clientConnectionSettingsItem{
+ "testEnv.testId": {
+ "testns": {
+ "ccs0": &clientConnectionSettingsItem{
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccs0",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ status: common.ProcessingInProgress,
+ },
+ "ccs1": &clientConnectionSettingsItem{
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccs1",
+ Namespace: "testns",
+ Labels: map[string]string{
+ "admiral.io/env": "testEnv",
+ "identity": "testId",
+ },
+ },
+ },
+ status: common.ProcessingInProgress,
+ },
+ },
+ },
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+
+ tc.clientConnectionSettingsCache.Put(tc.clientConnectionSettings)
+
+ assert.Equal(t, tc.expectedCache, tc.clientConnectionSettingsCache.cache)
+
+ })
+ }
+
+}
+
+func TestGetClientConnectionConfigCache(t *testing.T) {
+
+ testCases := []struct {
+ name string
+ clientConnectionSettingsCache *clientConnectionSettingsCache
+ key string
+ namespace string
+ expectedClientConnectionConfigList []*v1.ClientConnectionConfig
+ }{
+ {
+ name: "Given an empty clientConnectionSettingsCache " +
+ "When Get func is called on it " +
+ "Then the func should return an empty slice of clientConnectionSettingsItem",
+ clientConnectionSettingsCache: &clientConnectionSettingsCache{
+ cache: make(map[string]map[string]map[string]*clientConnectionSettingsItem),
+ mutex: &sync.RWMutex{},
+ },
+ key: "doesNotExists",
+ namespace: "testns",
+ expectedClientConnectionConfigList: []*v1.ClientConnectionConfig{},
+ },
+ {
+ name: "Given an clientConnectionSettingsCache " +
+ "When Get func is called with a key and namespace param " +
+ "And the passed namespace does not match the key " +
+ "Then the func should return an empty slice of clientConnectionSettingsItem",
+ clientConnectionSettingsCache: &clientConnectionSettingsCache{
+ cache: map[string]map[string]map[string]*clientConnectionSettingsItem{
+ "ccskey": {"someotherns": map[string]*clientConnectionSettingsItem{}},
+ },
+ mutex: &sync.RWMutex{},
+ },
+ key: "ccskey",
+ namespace: "testns",
+ expectedClientConnectionConfigList: []*v1.ClientConnectionConfig{},
+ },
+ {
+ name: "Given an clientConnectionSettingsCache " +
+ "When Get func is called with a key and namespace param " +
+ "And the passed namespace does match the key " +
+ "Then the func should return a slice of clientConnectionSettingsItem",
+ clientConnectionSettingsCache: &clientConnectionSettingsCache{
+ cache: map[string]map[string]map[string]*clientConnectionSettingsItem{
+ "ccskey": {
+ "testns": {"ccsName": &clientConnectionSettingsItem{
+ clientConnectionSettings: &v1.ClientConnectionConfig{
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ },
+ },
+ status: common.ProcessingInProgress,
+ }},
+ },
+ },
+ mutex: &sync.RWMutex{},
+ },
+ key: "ccskey",
+ namespace: "testns",
+ expectedClientConnectionConfigList: []*v1.ClientConnectionConfig{
+ {
+ ObjectMeta: apiMachineryMetaV1.ObjectMeta{
+ Name: "ccsName",
+ },
+ },
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+
+ actual := tc.clientConnectionSettingsCache.Get(tc.key, tc.namespace)
+
+ assert.NotNil(t, actual)
+ assert.Equal(t, tc.expectedClientConnectionConfigList, actual)
+
+ })
+ }
+
+}
+
+type MockClientConnectionHandler struct {
+}
+
+func (m *MockClientConnectionHandler) Added(ctx context.Context, obj *v1.ClientConnectionConfig) error {
+ return nil
+}
+
+func (m *MockClientConnectionHandler) Updated(ctx context.Context, obj *v1.ClientConnectionConfig) error {
+ return nil
+}
+
+func (m *MockClientConnectionHandler) Deleted(ctx context.Context, obj *v1.ClientConnectionConfig) error {
+ return nil
+}
+
+type MockCRDClient struct {
+}
+
+func (m MockCRDClient) Discovery() discovery.DiscoveryInterface {
+ return nil
+}
+
+func (m MockCRDClient) AdmiralV1alpha1() admiralv1.AdmiralV1alpha1Interface {
+ return MockAdmiralV1{}
+}
+
+type MockAdmiralV1 struct {
+}
+
+func (m MockAdmiralV1) RESTClient() rest.Interface {
+ return nil
+}
+
+func (m MockAdmiralV1) ClientConnectionConfigs(namespace string) admiralv1.ClientConnectionConfigInterface {
+ return MockClientConnectionConfig{}
+}
+
+func (m MockAdmiralV1) Dependencies(namespace string) admiralv1.DependencyInterface {
+ return nil
+}
+
+func (m MockAdmiralV1) DependencyProxies(namespace string) admiralv1.DependencyProxyInterface {
+ return nil
+}
+
+func (m MockAdmiralV1) GlobalTrafficPolicies(namespace string) admiralv1.GlobalTrafficPolicyInterface {
+ return nil
+}
+
+func (m MockAdmiralV1) OutlierDetections(namespace string) admiralv1.OutlierDetectionInterface {
+ return nil
+}
+
+func (m MockAdmiralV1) RoutingPolicies(namespace string) admiralv1.RoutingPolicyInterface {
+ return nil
+}
+
+func (m MockAdmiralV1) TrafficConfigs(namespace string) admiralv1.TrafficConfigInterface {
+ return nil
+}
+
+type MockClientConnectionConfig struct {
+}
+
+func (m MockClientConnectionConfig) Create(ctx context.Context, clientConnectionSettings *v1.ClientConnectionConfig, opts apiMachineryMetaV1.CreateOptions) (*v1.ClientConnectionConfig, error) {
+ return nil, nil
+}
+
+func (m MockClientConnectionConfig) Update(ctx context.Context, clientConnectionSettings *v1.ClientConnectionConfig, opts apiMachineryMetaV1.UpdateOptions) (*v1.ClientConnectionConfig, error) {
+ return nil, nil
+}
+
+func (m MockClientConnectionConfig) UpdateStatus(ctx context.Context, clientConnectionSettings *v1.ClientConnectionConfig, opts apiMachineryMetaV1.UpdateOptions) (*v1.ClientConnectionConfig, error) {
+ return nil, nil
+}
+
+func (m MockClientConnectionConfig) Delete(ctx context.Context, name string, opts apiMachineryMetaV1.DeleteOptions) error {
+ return nil
+}
+
+func (m MockClientConnectionConfig) DeleteCollection(ctx context.Context, opts apiMachineryMetaV1.DeleteOptions, listOpts apiMachineryMetaV1.ListOptions) error {
+ return nil
+}
+
+func (m MockClientConnectionConfig) Get(ctx context.Context, name string, opts apiMachineryMetaV1.GetOptions) (*v1.ClientConnectionConfig, error) {
+ return ctx.Value("ClientConnectionConfig").(*v1.ClientConnectionConfig), nil
+}
+
+func (m MockClientConnectionConfig) List(ctx context.Context, opts apiMachineryMetaV1.ListOptions) (*v1.ClientConnectionConfigList, error) {
+ return nil, nil
+}
+
+func (m MockClientConnectionConfig) Watch(ctx context.Context, opts apiMachineryMetaV1.ListOptions) (watch.Interface, error) {
+ return nil, nil
+}
+
+func (m MockClientConnectionConfig) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts apiMachineryMetaV1.PatchOptions, subresources ...string) (result *v1.ClientConnectionConfig, err error) {
+ return nil, nil
+}
From fc75c59d8bcc488999e8d9a336631d66699327cc Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 15:30:39 -0400
Subject: [PATCH 174/235] copied admiral/pkg/controller/admiral/configmap.go
from master
Signed-off-by: Shriram Sharma
---
admiral/pkg/controller/admiral/configmap.go | 55 ++++++---------------
1 file changed, 14 insertions(+), 41 deletions(-)
diff --git a/admiral/pkg/controller/admiral/configmap.go b/admiral/pkg/controller/admiral/configmap.go
index 8156d9b8..7dd9f100 100644
--- a/admiral/pkg/controller/admiral/configmap.go
+++ b/admiral/pkg/controller/admiral/configmap.go
@@ -4,16 +4,13 @@ import (
"context"
"strings"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/client/loader"
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
v1 "k8s.io/api/core/v1"
metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
- "k8s.io/client-go/rest"
- "k8s.io/client-go/tools/clientcmd"
)
-const configmapName = "se-address-configmap"
-
type ConfigMapControllerInterface interface {
GetConfigMap(ctx context.Context) (*v1.ConfigMap, error)
PutConfigMap(ctx context.Context, newMap *v1.ConfigMap) error
@@ -26,52 +23,28 @@ type ConfigMapController struct {
ServiceEntryIPPrefix string
}
-//todo this is a temp state, eventually changes will have to be made to give each cluster it's own configmap
-
-func NewConfigMapController(seIPPrefix string) (*ConfigMapController, error) {
+// todo this is a temp state, eventually changes will have to be made to give each cluster it's own configmap
+func NewConfigMapController(seIPPrefix string, clientLoader loader.ClientLoader) (*ConfigMapController, error) {
kubeconfigPath := common.GetKubeconfigPath()
namespaceToUse := common.GetSyncNamespace()
- if kubeconfigPath == "" {
- config, err := rest.InClusterConfig()
- if err != nil {
- return nil, err
- }
- client, err := kubernetes.NewForConfig(config)
- if err != nil {
- return nil, err
- }
- controller := ConfigMapController{
- K8sClient: client,
- ConfigmapNamespace: namespaceToUse,
- ServiceEntryIPPrefix: seIPPrefix,
- }
- return &controller, nil
- } else {
- // use the current context in kubeconfig
- config, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath)
- if err != nil {
- return nil, err
- }
+ client, err := clientLoader.LoadKubeClientFromPath(kubeconfigPath)
+ if err != nil {
+ return nil, err
+ }
- // create the clientset
- client, err := kubernetes.NewForConfig(config)
- if err != nil {
- return nil, err
- }
- controller := ConfigMapController{
- K8sClient: client,
- ConfigmapNamespace: namespaceToUse,
- ServiceEntryIPPrefix: seIPPrefix,
- }
- return &controller, nil
+ controller := ConfigMapController{
+ K8sClient: client,
+ ConfigmapNamespace: namespaceToUse,
+ ServiceEntryIPPrefix: seIPPrefix,
}
+ return &controller, nil
}
func (c *ConfigMapController) GetConfigMap(ctx context.Context) (*v1.ConfigMap, error) {
getOpts := metaV1.GetOptions{}
- configMap, err := c.K8sClient.CoreV1().ConfigMaps(c.ConfigmapNamespace).Get(ctx, configmapName, getOpts)
+ configMap, err := c.K8sClient.CoreV1().ConfigMaps(c.ConfigmapNamespace).Get(ctx, common.GetSeAddressConfigMap(), getOpts)
if err == nil {
return configMap, err
@@ -79,7 +52,7 @@ func (c *ConfigMapController) GetConfigMap(ctx context.Context) (*v1.ConfigMap,
if strings.Contains(err.Error(), "not found") {
cm := v1.ConfigMap{}
- cm.Name = configmapName
+ cm.Name = common.GetSeAddressConfigMap()
cm.Namespace = c.ConfigmapNamespace
configMap, err = c.K8sClient.CoreV1().ConfigMaps(c.ConfigmapNamespace).Create(ctx, &cm, metaV1.CreateOptions{})
}
From 7fa6712b7f2ec48c0a4333d55d410e151e2e9d52 Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 15:30:55 -0400
Subject: [PATCH 175/235] copied
admiral/pkg/controller/admiral/configmap_test.go from master
Signed-off-by: Shriram Sharma
---
.../pkg/controller/admiral/configmap_test.go | 111 +++++++++---------
1 file changed, 58 insertions(+), 53 deletions(-)
diff --git a/admiral/pkg/controller/admiral/configmap_test.go b/admiral/pkg/controller/admiral/configmap_test.go
index 99b684e9..46b8da55 100644
--- a/admiral/pkg/controller/admiral/configmap_test.go
+++ b/admiral/pkg/controller/admiral/configmap_test.go
@@ -2,7 +2,6 @@ package admiral
import (
"context"
- "errors"
"testing"
"time"
@@ -16,6 +15,10 @@ import (
)
func init() {
+ initConfig("se-address-configmap")
+}
+
+func initConfig(seAdressCM string) {
p := common.AdmiralParams{
KubeconfigPath: "testdata/fake.config",
LabelSet: &common.LabelSet{},
@@ -23,14 +26,16 @@ func init() {
SANPrefix: "prefix",
HostnameSuffix: "mesh",
SyncNamespace: "ns",
- CacheRefreshDuration: time.Minute,
+ CacheReconcileDuration: time.Minute,
ClusterRegistriesNamespace: "default",
DependenciesNamespace: "default",
- SecretResolver: "",
+ Profile: common.AdmiralProfileDefault,
+ SeAddressConfigmap: seAdressCM,
}
p.LabelSet.WorkloadIdentityKey = "identity"
- p.LabelSet.GlobalTrafficDeploymentLabel = "identity"
+ p.LabelSet.AdmiralCRDIdentityLabel = "identity"
+
p.LabelSet.EnvKey = "admiral.io/env"
common.InitializeConfig(p)
}
@@ -41,10 +46,7 @@ func TestConfigMapController_GetConfigMap(t *testing.T) {
}
client := fake.NewSimpleClientset()
- cm := v1.ConfigMap{}
- cm.Name = "se-address-configmap"
- cm.Namespace = "admiral"
- cm.Labels = map[string]string{"foo": "bar"} //differentiating from a new/empty cm
+ cm := createConfigMap("se-address-configmap", "admiral", map[string]string{"foo": "bar"}) //differentiating from a new/empty cm
ctx := context.Background()
_, err := client.CoreV1().ConfigMaps("admiral").Create(ctx, &cm, metav1.CreateOptions{})
if err != nil {
@@ -52,38 +54,74 @@ func TestConfigMapController_GetConfigMap(t *testing.T) {
}
configmapController.K8sClient = client
- emptyConfigmapController := ConfigMapController{
+ configmapController2 := ConfigMapController{
ConfigmapNamespace: "admiral",
}
+ client2 := fake.NewSimpleClientset()
+ cm2 := createConfigMap("se-address-configmap2", "admiral", map[string]string{"foo": "bar"}) //differentiating from a new/empty cm
+ ctx2 := context.Background()
+ _, err = client2.CoreV1().ConfigMaps("admiral").Create(ctx2, &cm2, metav1.CreateOptions{})
+ if err != nil {
+ t.Errorf("%v", err)
+ }
+ configmapController2.K8sClient = client2
+
+ emptyConfigmapController := ConfigMapController{
+ ConfigmapNamespace: "admiral",
+ }
emptyClient := fake.NewSimpleClientset()
- emptyCM := v1.ConfigMap{}
- emptyCM.Name = "se-address-configmap"
- emptyCM.Namespace = "admiral"
+ emptyCM := createConfigMap("se-address-configmap", "admiral", nil)
emptyConfigmapController.K8sClient = emptyClient
+ emptyConfigmapController2 := ConfigMapController{
+ ConfigmapNamespace: "admiral",
+ }
+ emptyClient2 := fake.NewSimpleClientset()
+ emptyCM2 := createConfigMap("se-address-configmap2", "admiral", nil)
+ emptyConfigmapController2.K8sClient = emptyClient2
+
testCases := []struct {
name string
configMapController *ConfigMapController
expectedConfigMap *v1.ConfigMap
+ seAdressCMName string
expectedError error
}{
{
- name: "should return confirmap",
+ name: "given default configmap name in AdmiralParams, should return configmap",
configMapController: &configmapController,
expectedConfigMap: &cm,
expectedError: nil,
},
{
- name: "should return newly created configmap",
+ name: "given default configmap name in AdmiralParams, should return newly created configmap",
configMapController: &emptyConfigmapController,
expectedConfigMap: &emptyCM,
expectedError: nil,
},
+ {
+ name: "given se-address-configmap2 in AdmiralParams, should return configmap with addressconfigmap2",
+ configMapController: &configmapController2,
+ expectedConfigMap: &cm2,
+ seAdressCMName: "se-address-configmap2",
+ expectedError: nil,
+ },
+ {
+ name: "given se-address-configmap2 in AdmiralParams, should return newly created configmap with addressconfigmap2",
+ configMapController: &emptyConfigmapController2,
+ expectedConfigMap: &emptyCM2,
+ seAdressCMName: "se-address-configmap2",
+ expectedError: nil,
+ },
}
for _, c := range testCases {
t.Run(c.name, func(t *testing.T) {
+ if len(c.seAdressCMName) > 0 {
+ common.ResetSync()
+ initConfig(c.seAdressCMName)
+ }
cm, err := c.configMapController.GetConfigMap(ctx)
if err == nil && c.expectedError == nil {
//we're fine
@@ -101,45 +139,12 @@ func TestConfigMapController_GetConfigMap(t *testing.T) {
}
}
-func TestNewConfigMapController(t *testing.T) {
- testCases := []struct {
- name string
- kubeconfigPath string
- namespace string
- expectedError error
- }{
- {
- name: "Fails creating an in-cluster config while out of a cluster",
- kubeconfigPath: "",
- namespace: "ns",
- expectedError: errors.New("unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined"),
- },
- {
- name: "Kubeconfig config",
- kubeconfigPath: "../../test/resources/admins@fake-cluster.k8s.local",
- namespace: "ns",
- expectedError: nil,
- },
- }
-
- for _, c := range testCases {
- t.Run(c.name, func(t *testing.T) {
- common.SetKubeconfigPath(c.kubeconfigPath)
- controller, err := NewConfigMapController("240.0")
- if err == nil && c.expectedError == nil {
- //only do these in an error-less context
- if c.namespace != controller.ConfigmapNamespace {
- t.Errorf("Namespace mismatch. Expected %v but got %v", c.namespace, controller.ConfigmapNamespace)
- }
- if controller.K8sClient.CoreV1() == nil {
- t.Errorf("Clientset is nil")
- }
- } else if err.Error() != c.expectedError.Error() {
- t.Errorf("Error mismatch. Expected %v but got %v", c.expectedError, err)
- }
- })
- }
-
+func createConfigMap(name string, namespace string, labels map[string]string) v1.ConfigMap {
+ cm := v1.ConfigMap{}
+ cm.Name = name
+ cm.Namespace = namespace
+ cm.Labels = labels
+ return cm
}
func TestConfigMapController_PutConfigMap(t *testing.T) {
From 7162d84bf667c2d51820066ca7427decd71ba285 Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 15:31:48 -0400
Subject: [PATCH 176/235] copied admiral/pkg/controller/admiral/controller.go
from master
Signed-off-by: Shriram Sharma
---
admiral/pkg/controller/admiral/controller.go | 407 ++++++++++++++++---
1 file changed, 351 insertions(+), 56 deletions(-)
diff --git a/admiral/pkg/controller/admiral/controller.go b/admiral/pkg/controller/admiral/controller.go
index a8a6f690..85ce1e1b 100644
--- a/admiral/pkg/controller/admiral/controller.go
+++ b/admiral/pkg/controller/admiral/controller.go
@@ -3,11 +3,19 @@ package admiral
import (
"context"
"fmt"
+ "reflect"
+ "strings"
"time"
+ "github.com/google/uuid"
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/util"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
+ commonUtil "github.com/istio-ecosystem/admiral/admiral/pkg/util"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
@@ -16,14 +24,34 @@ import (
)
const (
- maxRetries = 5
+ maxRetries = 2
+ // operations
+ operationInformerEvents = "informerEvents"
+ // tasks
+ taskAddEventToQueue = "addEventToQueue"
+ taskGetEventFromQueue = "getEventFromQueue"
+ taskSendEventToDelegator = "sendEventToDelegator"
+ taskReceiveEventFromDelegator = "receivedEventFromDelegator"
+ taskRequeueAttempt = "requeueAttempt"
+ taskGivingUpEvent = "givingUpEvent"
+ taskRequeueEvent = "requeueEvent"
+)
+
+var (
+ // Log Formats
+ ControllerLogFormat = "task=%v len=%v message=%v"
+ LogQueueFormat = "op=" + operationInformerEvents + " task=%v controller=%v cluster=%v len=%v message=%v"
)
// Delegator interface contains the methods that are required
type Delegator interface {
- Added(context.Context, interface{})
- Updated(context.Context, interface{}, interface{})
- Deleted(context.Context, interface{})
+ Added(context.Context, interface{}) error
+ Updated(context.Context, interface{}, interface{}) error
+ Deleted(context.Context, interface{}) error
+ UpdateProcessItemStatus(interface{}, string) error
+ GetProcessItemStatus(interface{}) (string, error)
+ LogValueOfAdmiralIoIgnore(interface{})
+ Get(ctx context.Context, isRetry bool, obj interface{}) (interface{}, error)
}
type EventType string
@@ -39,72 +67,202 @@ type InformerCacheObj struct {
eventType EventType
obj interface{}
oldObj interface{}
+ txId string
+ ctxLogger *log.Entry
}
type Controller struct {
name string
+ cluster string
delegator Delegator
queue workqueue.RateLimitingInterface
informer cache.SharedIndexInformer
}
-func NewController(name string, stopCh <-chan struct{}, delegator Delegator, informer cache.SharedIndexInformer) Controller {
-
+func NewController(name, clusterEndpoint string, stopCh <-chan struct{}, delegator Delegator, informer cache.SharedIndexInformer) Controller {
controller := Controller{
name: name,
+ cluster: clusterEndpoint,
informer: informer,
delegator: delegator,
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
}
-
controller.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
+ var (
+ txId = uuid.NewString()
+ metaName, metaNamespace string
+ )
+ meta, ok := obj.(metav1.Object)
+ if ok && meta != nil && meta.GetResourceVersion() != "" {
+ txId = common.GenerateTxId(meta, controller.name, txId)
+ metaName = meta.GetName()
+ metaNamespace = meta.GetNamespace()
+ }
+ ctxLogger := log.WithFields(log.Fields{
+ "op": operationInformerEvents,
+ "name": metaName,
+ "namespace": metaNamespace,
+ "controller": controller.name,
+ "cluster": controller.cluster,
+ "txId": txId,
+ })
key, err := cache.MetaNamespaceKeyFunc(obj)
if err == nil {
- log.Infof("Informer Add controller=%v obj=%v", controller.name, key)
- controller.queue.Add(InformerCacheObj{key: key, eventType: Add, obj: obj})
+ ctxLogger.Infof(ControllerLogFormat, taskAddEventToQueue, controller.queue.Len(), Add+" Event")
+ controller.queue.Add(InformerCacheObj{
+ key: key,
+ eventType: Add,
+ obj: obj,
+ txId: txId,
+ ctxLogger: ctxLogger,
+ })
}
-
},
UpdateFunc: func(oldObj, newObj interface{}) {
+ var (
+ ctx = context.Background()
+ txId = uuid.NewString()
+ metaName, metaNamespace string
+ )
+ meta, ok := newObj.(metav1.Object)
+ if ok && meta != nil && meta.GetResourceVersion() != "" {
+ txId = common.GenerateTxId(meta, controller.name, txId)
+ metaName = meta.GetName()
+ metaNamespace = meta.GetNamespace()
+ }
+ ctx = context.WithValue(ctx, "txId", txId)
+ ctxLogger := log.WithFields(log.Fields{
+ "op": operationInformerEvents,
+ "name": metaName,
+ "namespace": metaNamespace,
+ "controller": controller.name,
+ "cluster": controller.cluster,
+ "txId": txId,
+ })
+
key, err := cache.MetaNamespaceKeyFunc(newObj)
if err == nil {
- log.Infof("Informer Update controller=%v obj=%v", controller.name, key)
- controller.queue.Add(InformerCacheObj{key: key, eventType: Update, obj: newObj, oldObj: oldObj})
+ ctxLogger.Infof(ControllerLogFormat, taskAddEventToQueue, controller.queue.Len(), Update+" Event")
+ // Check if the event has already been processed or the resource version
+ // has changed. If either the event has not been processed yet or the
+ // resource version has changed only then add it to the queue
+
+ status, err := controller.delegator.GetProcessItemStatus(newObj)
+ if err != nil {
+ ctxLogger.Errorf(err.Error())
+ }
+ controller.delegator.LogValueOfAdmiralIoIgnore(newObj)
+ latestObj, isVersionChanged := checkIfResourceVersionHasIncreased(ctxLogger, ctx, oldObj, newObj, delegator)
+ txId, ctxLogger = updateTxId(ctx, newObj, latestObj, txId, ctxLogger, controller)
+
+ if status == common.NotProcessed || isVersionChanged {
+ ctxLogger.Infof(ControllerLogFormat, taskAddEventToQueue, controller.queue.Len(),
+ fmt.Sprintf("version changed=%v", isVersionChanged))
+ controller.queue.Add(
+ InformerCacheObj{
+ key: key,
+ eventType: Update,
+ obj: latestObj,
+ oldObj: oldObj,
+ txId: txId,
+ ctxLogger: ctxLogger,
+ })
+ // If the pod is running in Active Mode we update the status to ProcessingInProgress
+ // to prevent any duplicate events that might be added to the queue if there is full
+ // resync that happens and a similar event in the queue is not processed yet
+ if !commonUtil.IsAdmiralReadOnly() {
+ ctxLogger.Infof(ControllerLogFormat, taskAddEventToQueue, controller.queue.Len(),
+ "status=%s", common.ProcessingInProgress)
+ controller.delegator.UpdateProcessItemStatus(latestObj, common.ProcessingInProgress)
+ }
+ }
}
},
DeleteFunc: func(obj interface{}) {
+ var (
+ txId = uuid.NewString()
+ )
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
if err == nil {
- log.Infof("Informer Delete controller=%v obj=%v", controller.name, key)
- controller.queue.Add(InformerCacheObj{key: key, eventType: Delete, obj: obj})
+ meta, ok := obj.(metav1.Object)
+ var metaName, metaNamespace string
+ if ok && meta != nil && meta.GetResourceVersion() != "" {
+ txId = common.GenerateTxId(meta, controller.name, txId)
+ metaName = meta.GetName()
+ metaNamespace = meta.GetNamespace()
+ }
+ ctxLogger := log.WithFields(log.Fields{
+ "op": operationInformerEvents,
+ "name": metaName,
+ "namespace": metaNamespace,
+ "controller": controller.name,
+ "cluster": controller.cluster,
+ "txId": txId,
+ })
+ ctxLogger.Infof(ControllerLogFormat, taskAddEventToQueue, controller.queue.Len(), Delete+" Event")
+ controller.queue.Add(
+ InformerCacheObj{
+ key: key,
+ eventType: Delete,
+ obj: obj,
+ txId: txId,
+ ctxLogger: ctxLogger,
+ })
}
},
})
-
go controller.Run(stopCh)
-
return controller
}
+func updateTxId(
+ ctx context.Context,
+ newObj, latestObj interface{},
+ txId string,
+ ctxLogger *log.Entry,
+ controller Controller) (string, *log.Entry) {
+ lMeta, ok := latestObj.(metav1.Object)
+ if ok && lMeta.GetResourceVersion() != "" {
+ nMeta, ok := newObj.(metav1.Object)
+ if ok && nMeta.GetResourceVersion() != lMeta.GetResourceVersion() {
+ txId = common.GenerateTxId(lMeta, controller.name, txId)
+ ctxLogger = log.WithFields(log.Fields{
+ "op": operationInformerEvents,
+ "controller": controller.name,
+ "cluster": controller.cluster,
+ "txId": txId,
+ })
+ }
+ }
+ return txId, ctxLogger
+}
+
// Run starts the controller until it receives a message over stopCh
func (c *Controller) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer c.queue.ShutDown()
- log.Infof("Starting controller=%v", c.name)
+ log.Infof("Starting controller=%v cluster=%v", c.name, c.cluster)
go c.informer.Run(stopCh)
// Wait for the caches to be synced before starting workers
- log.Infof(" Waiting for informer caches to sync for controller=%v", c.name)
+ log.Infof("Waiting for informer caches to sync for controller=%v cluster=%v", c.name, c.cluster)
if !cache.WaitForCacheSync(stopCh, c.informer.HasSynced) {
- utilruntime.HandleError(fmt.Errorf(" timed out waiting for caches to sync for controller=%v", c.name))
+ utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync for controller=%v cluster=%v", c.name, c.cluster))
return
}
- log.Infof("Informer caches synced for controller=%v, current keys=%v", c.name, c.informer.GetStore().ListKeys())
-
+ log.Infof("Informer caches synced for controller=%v cluster=%v, current keys=%v", c.name, c.cluster, c.informer.GetStore().ListKeys())
+ concurrency := 1
+ if strings.Contains(c.name, deploymentControllerPrefix) || strings.Contains(c.name, rolloutControllerPrefix) {
+ concurrency = common.DeploymentOrRolloutWorkerConcurrency()
+ log.Infof("controller=%v cluster=%v concurrency=%v", c.name, c.cluster, concurrency)
+ }
+ for i := 0; i < concurrency-1; i++ {
+ go wait.Until(c.runWorker, 5*time.Second, stopCh)
+ }
wait.Until(c.runWorker, 5*time.Second, stopCh)
}
@@ -112,26 +270,68 @@ func (c *Controller) runWorker() {
for c.processNextItem() {
// continue looping
}
+ log.Errorf("Shutting Down controller=%v cluster=%v", c.name, c.cluster)
}
func (c *Controller) processNextItem() bool {
- item, quit := c.queue.Get()
- if quit {
+ item, quit := c.queue.Get()
+ if item == nil || quit {
return false
}
+
+ log.Infof(LogQueueFormat, taskGetEventFromQueue, c.name, c.cluster, c.queue.Len(), "current queue length")
+
defer c.queue.Done(item)
+ informerCache, ok := item.(InformerCacheObj)
+ if !ok {
+ return true
+ }
+ var (
+ txId string
+ err error
+ processEvent = true
+ ctx = context.Background()
+ )
+
+ txId = informerCache.txId
+ ctx = context.WithValue(ctx, "txId", txId)
+ ctxLogger := informerCache.ctxLogger
+ if c.queue.NumRequeues(item) > 0 {
+ ctxLogger.Infof(ControllerLogFormat, taskRequeueAttempt, c.queue.Len(),
+ fmt.Sprintf("retryCount=%d", c.queue.NumRequeues(item)))
+ processEvent = shouldRetry(ctxLogger, ctx, informerCache.obj, c.delegator)
+ }
+ if processEvent {
+ err = c.processItem(item.(InformerCacheObj))
+ } else {
+ ctxLogger.Infof(ControllerLogFormat, taskRequeueAttempt, c.queue.Len(),
+ fmt.Sprintf("stale event will not be retried. newer event was already processed"))
+ c.queue.Forget(item)
+ return true
+ }
- err := c.processItem(item.(InformerCacheObj))
if err == nil {
- // No error, reset the ratelimit counters
+ // No error, forget item
c.queue.Forget(item)
} else if c.queue.NumRequeues(item) < maxRetries {
- log.Errorf("Error processing %s (will retry): %v", item, err)
- c.queue.AddRateLimited(item)
+ ctxLogger.Errorf(ControllerLogFormat, taskRequeueAttempt, c.queue.Len(), "checking if event is eligible for requeueing. error="+err.Error())
+ processRetry := shouldRetry(ctxLogger, ctx, item.(InformerCacheObj).obj, c.delegator)
+ if processRetry {
+ ctxLogger.Infof(ControllerLogFormat, taskRequeueAttempt, c.queue.Len(),
+ fmt.Sprintf("event is eligible for retry. retryCount=%v", c.queue.NumRequeues(item)))
+ c.queue.AddRateLimited(item)
+ } else {
+ ctxLogger.Infof(ControllerLogFormat, taskRequeueAttempt, c.queue.Len(),
+ fmt.Sprintf("event is not eligible for retry. forgetting event. retryCount=%v", c.queue.NumRequeues(item)))
+ c.queue.Forget(item)
+ }
} else {
- log.Errorf("Error processing %s (giving up): %v", item, err)
+ ctxLogger.Errorf(ControllerLogFormat, taskGivingUpEvent, c.queue.Len(), "not requeueing. error="+err.Error())
c.queue.Forget(item)
+ // If the controller is not able to process the event even after retries due to
+ // errors we mark it as NotProcessed
+ c.delegator.UpdateProcessItemStatus(item.(InformerCacheObj).obj, common.NotProcessed)
utilruntime.HandleError(err)
}
@@ -139,42 +339,137 @@ func (c *Controller) processNextItem() bool {
}
func (c *Controller) processItem(informerCacheObj InformerCacheObj) error {
- ctx := context.Background()
+ var (
+ ctx = context.Background()
+ txId = informerCacheObj.txId
+ ctxLogger = informerCacheObj.ctxLogger
+ )
+ ctxLogger.Infof(ControllerLogFormat, taskSendEventToDelegator, c.queue.Len(), "processing event")
+ defer util.LogElapsedTimeController(
+ ctxLogger, fmt.Sprintf(ControllerLogFormat, taskSendEventToDelegator, c.queue.Len(), "processingTime"))()
+ ctx = context.WithValue(ctx, "txId", txId)
+ ctx = context.WithValue(ctx, "controller", c.name)
+ var err error
if informerCacheObj.eventType == Delete {
- c.delegator.Deleted(ctx, informerCacheObj.obj)
+ err = c.delegator.Deleted(ctx, informerCacheObj.obj)
} else if informerCacheObj.eventType == Update {
- c.delegator.Updated(ctx, informerCacheObj.obj, informerCacheObj.oldObj)
+ err = c.delegator.Updated(ctx, informerCacheObj.obj, informerCacheObj.oldObj)
} else if informerCacheObj.eventType == Add {
- c.delegator.Added(ctx, informerCacheObj.obj)
+ err = c.delegator.Added(ctx, informerCacheObj.obj)
}
- return nil
-}
-type MonitoredDelegator struct {
- clusterID string
- objectType string
- d Delegator
-}
-
-func NewMonitoredDelegator(d Delegator, clusterID string, objectType string) *MonitoredDelegator {
- return &MonitoredDelegator{
- clusterID: clusterID,
- objectType: objectType,
- d: d,
+ // processItemStatus is set to:
+ // 1. Processed only if there are no errors and Admiral is not in read only mode
+ // 2. ProcessingInProgress if not in read only mode but there are errors
+ // 3. NotProcessed if it is in read only mode
+ processItemStatus := common.NotProcessed
+ if !commonUtil.IsAdmiralReadOnly() {
+ processItemStatus = common.ProcessingInProgress
+ if err == nil {
+ processItemStatus = common.Processed
+ }
}
+ ctxLogger.Infof(ControllerLogFormat, taskReceiveEventFromDelegator, c.queue.Len(), "status="+processItemStatus)
+ c.delegator.UpdateProcessItemStatus(informerCacheObj.obj, processItemStatus)
+ return err
}
-func (s *MonitoredDelegator) Added(ctx context.Context, obj interface{}) {
- common.EventsProcessed.With(s.clusterID, s.objectType, common.AddEventLabelValue).Inc()
- s.d.Added(ctx, obj)
-}
-
-func (s *MonitoredDelegator) Updated(ctx context.Context, obj interface{}, oldObj interface{}) {
- common.EventsProcessed.With(s.clusterID, s.objectType, common.UpdateEventLabelValue).Inc()
- s.d.Updated(ctx, obj, oldObj)
+// checkIfResourceVersionHasIncreased compares old object, with the new obj
+// and returns true, along with the object which should be processed.
+// It returns true when:
+// 1. new version > old version
+// 2. new version < old version:
+// When new version had been reset after reaching the max value
+// which could be assigned to it.
+//
+// For all other cases it returns false, which signals that the object
+// should not be processed, because:
+// 1. It was already processed
+// 2. It is an older object
+func checkIfResourceVersionHasIncreased(ctxLogger *logrus.Entry, ctx context.Context, oldObj, newObj interface{}, delegator Delegator) (interface{}, bool) {
+ oldObjMeta, oldOk := oldObj.(metav1.Object)
+ newObjMeta, newOk := newObj.(metav1.Object)
+
+ if oldOk && newOk && oldObjMeta.GetResourceVersion() == newObjMeta.GetResourceVersion() {
+ return oldObj, false
+ }
+ if oldOk && newOk && oldObjMeta.GetResourceVersion() > newObjMeta.GetResourceVersion() {
+ if reflect.ValueOf(delegator).IsNil() {
+ return oldObj, true
+ }
+ // if old version is > new version then this could be due to:
+ // 1. An old object was requeued because of retry, which now comes as new object
+ // 2. The new object version is lower than old object version because the
+ // version had reached the maximum value, and was reset to a lower
+ // value by kubernetes
+ ctxLogger.Infof("task=CheckIfResourceVersionHasIncreased message=new resource version is smaller than old resource version, checking if this is due to resourceVersion wrapping around")
+ var (
+ maxRetry = 5
+ latestObj interface{}
+ err error
+ )
+
+ err = common.RetryWithBackOff(ctx, func() error {
+ latestObj, err = delegator.Get(ctx, false, newObj)
+ return err
+ }, maxRetry)
+ if err != nil {
+ ctxLogger.Errorf("task=CheckIfResourceVersionHasIncreased message=unable to fetch latest object from kubernetes after %d retries, giving up querying obj from API server, old obj=%+v, new obj=%+v",
+ maxRetry, oldObjMeta, latestObj)
+ return newObj, true
+ }
+ // event 1 ==> processed
+ // event 2, 3 ==> happen simultaneously, 3 is expected to be final state
+ // event 3 ==> processed
+ // event 2 ==> ready to be processed ==> this event is in the new object, passed into this function
+ // the below check will ensure that this is the case
+ // as it fetches the latest object from kubernetes, and finds it was
+ // event 3, which is nothing but old object
+ latestObjMeta, latestOk := latestObj.(metav1.Object)
+ if latestOk && oldObjMeta.GetResourceVersion() == latestObjMeta.GetResourceVersion() {
+ ctxLogger.Infof("task=CheckIfResourceVersionHasIncreased message=not processing resource version=%v, because it is stale, and was added to the queue due to a retry. version=%v was already processed",
+ newObjMeta.GetResourceVersion(),
+ latestObjMeta.GetResourceVersion())
+ return oldObj, false
+ }
+ ctxLogger.Infof("task=CheckIfResourceVersionHasIncreased message=new version is less than old version, which is because it was wrapped around by kubernetes, after reaching max allowable value")
+ return latestObj, true
+ }
+ return newObj, true
}
-func (s *MonitoredDelegator) Deleted(ctx context.Context, obj interface{}) {
- common.EventsProcessed.With(s.clusterID, s.objectType, common.DeleteEventLabelValue).Inc()
- s.d.Deleted(ctx, obj)
+func shouldRetry(ctxLogger *logrus.Entry, ctx context.Context, obj interface{}, delegator Delegator) bool {
+ objMeta, ok := obj.(metav1.Object)
+ if ok {
+ if reflect.ValueOf(objMeta).IsNil() || reflect.ValueOf(delegator).IsNil() {
+ return true
+ }
+ objFromCache, err := delegator.Get(ctx, true, obj)
+ if err != nil {
+ ctxLogger.Errorf("task=shouldRetry message=unable to fetch latest object from cache, obj received=%+v", objMeta)
+ return true
+ }
+ latestObjMeta, latestOk := objFromCache.(metav1.Object)
+ if !latestOk || reflect.ValueOf(latestObjMeta).IsNil() {
+ ctxLogger.Errorf("task=shouldRetry message=unable to cast latest object from cache to metav1 object, obj received=%+v", objMeta)
+ return true
+ }
+ // event 1 ==> processed
+ // event 2 ==> failed
+ // event 3 ==> processed
+ // event 2 ==> requeued ==> this event is in the object, passed into this function
+ // the below check will ensure that this is the case
+ // as it fetches the latest object from cache, and finds it was
+ // event 3, which is a newer event
+
+ if objMeta.GetResourceVersion() < latestObjMeta.GetResourceVersion() {
+ ctxLogger.Infof("task=shouldRetry message=not processing resource version=%v, because it is stale, and was added to the queue due to a retry. version=%v was already processed",
+ objMeta.GetResourceVersion(), latestObjMeta.GetResourceVersion())
+ return false
+ }
+ // TODO: Wrap around check- make Kube API server call to get the latest object and ensure
+ // we do not retry when the resource version has been wrapped around. Implementation similar to checkIfResourceVersionHasIncreased.
+ }
+ ctxLogger.Errorf("task=shouldRetry message=obj parsed=%v, retrying object, obj received=%+v", ok, objMeta)
+ return true
}
From 64c6913174e72bf4db7c8506fca7bfb0ad66c53c Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 15:32:03 -0400
Subject: [PATCH 177/235] copied
admiral/pkg/controller/admiral/controller_test.go from master
Signed-off-by: Shriram Sharma
---
.../pkg/controller/admiral/controller_test.go | 280 +++++++++++++++---
1 file changed, 238 insertions(+), 42 deletions(-)
diff --git a/admiral/pkg/controller/admiral/controller_test.go b/admiral/pkg/controller/admiral/controller_test.go
index 53325a34..c2817bf4 100644
--- a/admiral/pkg/controller/admiral/controller_test.go
+++ b/admiral/pkg/controller/admiral/controller_test.go
@@ -4,53 +4,249 @@ import (
"context"
"testing"
+ log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
+ k8sAppsV1 "k8s.io/api/apps/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
-func TestMonitoredDelegator_Added(t *testing.T) {
- td := &TestDelegator{}
- d := NewMonitoredDelegator(td, "test", "test")
- d.Added(context.Background(), nil)
+func TestCheckIfResourceVersionIsChanged(t *testing.T) {
+ var (
+ ctxLogger = log.WithFields(log.Fields{
+ "txId": "abc",
+ })
+ ctx = context.Background()
+ resourceName1 = "resource-name-1"
+ resourceNameSpace1 = "resource-namespace-1"
+ )
+ testCases := []struct {
+ name string
+ oldObj interface{}
+ newObj interface{}
+ delegator *MockDelegator
+ resourceName string
+ resourceNamespace string
+ latestObjInKubernetes interface{}
+ expectedResult bool
+ }{
+ {
+ name: "Given an update event " +
+ "When the resource version for both the object is the same " +
+ "Then func should return false",
+ oldObj: &k8sAppsV1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ ResourceVersion: "1111",
+ },
+ },
+ newObj: &k8sAppsV1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ ResourceVersion: "1111",
+ },
+ },
+ expectedResult: false,
+ },
+ {
+ name: "Given an update event " +
+ "When the resource version of new object is greater than resource version of old object, " +
+ "Then func should return true",
+ oldObj: &k8sAppsV1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ ResourceVersion: "1111",
+ },
+ },
+ newObj: &k8sAppsV1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ ResourceVersion: "9999",
+ },
+ },
+ expectedResult: true,
+ },
+ {
+ name: "Given an update event " +
+ "When the resource version for the new object is smaller than the old object, " +
+ "When the new object was added because it was a retry event, " +
+ "When delegator is not initialized, " +
+ "Then func should return true",
+ oldObj: &k8sAppsV1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ ResourceVersion: "2",
+ },
+ },
+ newObj: &k8sAppsV1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ ResourceVersion: "1",
+ },
+ },
+ delegator: nil,
+ expectedResult: true,
+ },
+ {
+ name: "Given an update event " +
+ "When the resource version for the new object is smaller than the old object, " +
+ "When the new object was added because it was a retry event, " +
+ "When delegator is initialized, " +
+ "Then func should return false",
+ oldObj: &k8sAppsV1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: resourceName1,
+ Namespace: resourceNameSpace1,
+ ResourceVersion: "2",
+ },
+ },
+ newObj: &k8sAppsV1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: resourceName1,
+ Namespace: resourceNameSpace1,
+ ResourceVersion: "1",
+ },
+ },
+ delegator: NewMockDelegator(),
+ resourceName: resourceName1,
+ resourceNamespace: resourceNameSpace1,
+ latestObjInKubernetes: &k8sAppsV1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: resourceName1,
+ Namespace: resourceNameSpace1,
+ ResourceVersion: "2",
+ },
+ },
+ expectedResult: false,
+ },
+ {
+ name: "Given an update event " +
+ "When the resource version for the new object is smaller than the old object, " +
+ "When the new object was added because the resource version was reset , " +
+ "When delegator is initialized, " +
+ "Then func should return true",
+ oldObj: &k8sAppsV1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: resourceName1,
+ Namespace: resourceNameSpace1,
+ ResourceVersion: "2",
+ },
+ },
+ newObj: &k8sAppsV1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: resourceName1,
+ Namespace: resourceNameSpace1,
+ ResourceVersion: "1",
+ },
+ },
+ delegator: NewMockDelegator(),
+ resourceName: resourceName1,
+ resourceNamespace: resourceNameSpace1,
+ latestObjInKubernetes: &k8sAppsV1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: resourceName1,
+ Namespace: resourceNameSpace1,
+ ResourceVersion: "1",
+ },
+ },
+ expectedResult: true,
+ },
+ }
- assert.True(t, td.AddedInvoked)
- assert.False(t, td.DeleteInvoked)
- assert.False(t, td.UpdatedInvoked)
+ for _, c := range testCases {
+ t.Run(c.name, func(t *testing.T) {
+ if c.delegator != nil {
+ c.delegator.SetGetReturn(c.latestObjInKubernetes, nil)
+ }
+ var result bool
+ _, result = checkIfResourceVersionHasIncreased(ctxLogger, ctx, c.oldObj, c.newObj, c.delegator)
+ assert.Equal(t, c.expectedResult, result)
+ })
+ }
}
-func TestMonitoredDelegator_Deleted(t *testing.T) {
- td := &TestDelegator{}
- d := NewMonitoredDelegator(td, "test", "test")
- d.Deleted(context.Background(), nil)
+func TestShouldRetry(t *testing.T) {
+ var (
+ ctxLogger = log.WithFields(log.Fields{
+ "txId": "abc",
+ })
+ ctx = context.Background()
+ resourceName1 = "resource-name-1"
+ resourceNameSpace1 = "resource-namespace-1"
+ )
+ testCases := []struct {
+ name string
+ obj interface{}
+ delegator *MockDelegator
+ resourceName string
+ resourceNamespace string
+ latestObjInKubernetes interface{}
+ expectedResult bool
+ }{
+ {
+ name: "Given an update event " +
+ "When the resource version for both the object is the same " +
+ "When the new object was added because it was a retry event " +
+ "Then func should return true",
+ obj: &k8sAppsV1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ ResourceVersion: "1111",
+ },
+ },
+ delegator: nil,
+ expectedResult: true,
+ resourceName: resourceName1,
+ resourceNamespace: resourceNameSpace1,
+ latestObjInKubernetes: &k8sAppsV1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ ResourceVersion: "1111",
+ },
+ },
+ },
+ {
+ name: "Given an update event " +
+ "When the resource version for the new object is smaller than the old object " +
+ "When the new object was added because it was a retry event " +
+ "When delegator is initialized" +
+ "Then func should return false",
+ obj: &k8sAppsV1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: resourceName1,
+ Namespace: resourceNameSpace1,
+ ResourceVersion: "2",
+ },
+ },
+ delegator: NewMockDelegator(),
+ resourceName: resourceName1,
+ resourceNamespace: resourceNameSpace1,
+ latestObjInKubernetes: &k8sAppsV1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: resourceName1,
+ Namespace: resourceNameSpace1,
+ ResourceVersion: "3",
+ },
+ },
+ expectedResult: false,
+ },
+ {
+ name: "Given an update event " +
+ "When the resource version for the new object is smaller than the old object, " +
+ "When the new object was added because of a retry " +
+ "When delegator is nil, " +
+ "Then func should return true",
+ obj: &k8sAppsV1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: resourceName1,
+ Namespace: resourceNameSpace1,
+ ResourceVersion: "2",
+ },
+ },
+ delegator: nil,
+ expectedResult: true,
+ },
+ }
- assert.False(t, td.AddedInvoked)
- assert.True(t, td.DeleteInvoked)
- assert.False(t, td.UpdatedInvoked)
-}
-
-func TestMonitoredDelegator_Updated(t *testing.T) {
- td := &TestDelegator{}
- d := NewMonitoredDelegator(td, "test", "test")
- d.Updated(context.Background(), nil, nil)
-
- assert.False(t, td.AddedInvoked)
- assert.False(t, td.DeleteInvoked)
- assert.True(t, td.UpdatedInvoked)
-}
-
-type TestDelegator struct {
- AddedInvoked bool
- UpdatedInvoked bool
- DeleteInvoked bool
-}
-
-func (t *TestDelegator) Added(context.Context, interface{}) {
- t.AddedInvoked = true
-}
-
-func (t *TestDelegator) Updated(context.Context, interface{}, interface{}) {
- t.UpdatedInvoked = true
-}
-
-func (t *TestDelegator) Deleted(context.Context, interface{}) {
- t.DeleteInvoked = true
+ for _, c := range testCases {
+ t.Run(c.name, func(t *testing.T) {
+ if c.delegator != nil {
+ c.delegator.SetGetReturn(c.latestObjInKubernetes, nil)
+ }
+ var result bool
+ result = shouldRetry(ctxLogger, ctx, c.obj, c.delegator)
+ assert.Equal(t, c.expectedResult, result)
+ })
+ }
}
From cf175af0bd575a5836ac142f306e582c656c2bf4 Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 15:32:40 -0400
Subject: [PATCH 178/235] Added
admiral/pkg/controller/admiral/delegator_mock_test.go from master
Signed-off-by: Shriram Sharma
---
.../controller/admiral/delegator_mock_test.go | 41 +++++++++++++++++++
1 file changed, 41 insertions(+)
create mode 100644 admiral/pkg/controller/admiral/delegator_mock_test.go
diff --git a/admiral/pkg/controller/admiral/delegator_mock_test.go b/admiral/pkg/controller/admiral/delegator_mock_test.go
new file mode 100644
index 00000000..37ca703c
--- /dev/null
+++ b/admiral/pkg/controller/admiral/delegator_mock_test.go
@@ -0,0 +1,41 @@
+package admiral
+
+import (
+ "context"
+)
+
+type MockDelegator struct {
+ obj interface{}
+ getErr error
+}
+
+func NewMockDelegator() *MockDelegator {
+ return &MockDelegator{}
+}
+
+func (m *MockDelegator) SetGetReturn(obj interface{}, err error) {
+ m.obj = obj
+ m.getErr = err
+}
+
+func (m *MockDelegator) Added(context.Context, interface{}) error {
+ return nil
+}
+func (m *MockDelegator) Updated(context.Context, interface{}, interface{}) error {
+ return nil
+}
+func (m *MockDelegator) Deleted(context.Context, interface{}) error {
+ return nil
+}
+func (m *MockDelegator) UpdateProcessItemStatus(interface{}, string) error {
+ return nil
+}
+func (m *MockDelegator) GetProcessItemStatus(interface{}) (string, error) {
+ return "", nil
+}
+func (m *MockDelegator) LogValueOfAdmiralIoIgnore(interface{}) {
+ return
+}
+func (m *MockDelegator) Get(context.Context, bool, interface{}) (interface{}, error) {
+ return m.obj, m.getErr
+}
From 15fcd7599088e1b1986faaea77395c6f628611f7 Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 15:33:57 -0400
Subject: [PATCH 179/235] Added admiral/pkg/controller/admiral/dependency.go
from master
Signed-off-by: Shriram Sharma
---
admiral/pkg/controller/admiral/dependency.go | 141 ++++++++++++++++---
1 file changed, 118 insertions(+), 23 deletions(-)
diff --git a/admiral/pkg/controller/admiral/dependency.go b/admiral/pkg/controller/admiral/dependency.go
index 45d7a1f9..8048ab29 100644
--- a/admiral/pkg/controller/admiral/dependency.go
+++ b/admiral/pkg/controller/admiral/dependency.go
@@ -6,19 +6,28 @@ import (
"sync"
"time"
- v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
+ meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+
+ v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
clientset "github.com/istio-ecosystem/admiral/admiral/pkg/client/clientset/versioned"
- informerV1 "github.com/istio-ecosystem/admiral/admiral/pkg/client/informers/externalversions/admiral/v1"
+ informerV1 "github.com/istio-ecosystem/admiral/admiral/pkg/client/informers/externalversions/admiral/v1alpha1"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/client/loader"
+)
+
+const (
+ dependencyControllerPrefix = "dependency-ctrl"
)
// DepHandler interface contains the methods that are required
type DepHandler interface {
- Added(ctx context.Context, obj *v1.Dependency)
- Updated(ctx context.Context, obj *v1.Dependency)
- Deleted(ctx context.Context, obj *v1.Dependency)
+ Added(ctx context.Context, obj *v1.Dependency) error
+ Updated(ctx context.Context, obj *v1.Dependency) error
+ Deleted(ctx context.Context, obj *v1.Dependency) error
}
type DependencyController struct {
@@ -29,17 +38,26 @@ type DependencyController struct {
informer cache.SharedIndexInformer
}
+type DependencyItem struct {
+ Dependency *v1.Dependency
+ Status string
+}
+
type depCache struct {
//map of dependencies key=identity value array of onboarded identitys
- cache map[string]*v1.Dependency
+ cache map[string]*DependencyItem
mutex *sync.Mutex
}
func (d *depCache) Put(dep *v1.Dependency) {
defer d.mutex.Unlock()
d.mutex.Lock()
+
key := d.getKey(dep)
- d.cache[key] = dep
+ d.cache[key] = &DependencyItem{
+ Dependency: dep,
+ Status: common.ProcessingInProgress,
+ }
}
func (d *depCache) getKey(dep *v1.Dependency) string {
@@ -49,7 +67,13 @@ func (d *depCache) getKey(dep *v1.Dependency) string {
func (d *depCache) Get(identity string) *v1.Dependency {
defer d.mutex.Unlock()
d.mutex.Lock()
- return d.cache[identity]
+
+ depItem, ok := d.cache[identity]
+ if ok {
+ return depItem.Dependency
+ }
+
+ return nil
}
func (d *depCache) Delete(dep *v1.Dependency) {
@@ -58,24 +82,55 @@ func (d *depCache) Delete(dep *v1.Dependency) {
delete(d.cache, d.getKey(dep))
}
-func NewDependencyController(stopCh <-chan struct{}, handler DepHandler, configPath string, namespace string, resyncPeriod time.Duration) (*DependencyController, error) {
+func (d *depCache) GetDependencyProcessStatus(dep *v1.Dependency) string {
+ defer d.mutex.Unlock()
+ d.mutex.Lock()
+
+ key := d.getKey(dep)
+
+ depItem, ok := d.cache[key]
+ if ok {
+ return depItem.Status
+ }
+
+ return common.NotProcessed
+}
+
+func (d *depCache) UpdateDependencyProcessStatus(dep *v1.Dependency, status string) error {
+ defer d.mutex.Unlock()
+ d.mutex.Lock()
+
+ key := d.getKey(dep)
+
+ depItem, ok := d.cache[key]
+ if ok {
+ depItem.Status = status
+ d.cache[key] = depItem
+ return nil
+ }
+
+ return fmt.Errorf(LogCacheFormat, "Update", "Dependency",
+ dep.Name, dep.Namespace, "", "nothing to update, dependency not found in cache")
+}
+
+func NewDependencyController(stopCh <-chan struct{}, handler DepHandler, configPath string, namespace string, resyncPeriod time.Duration, clientLoader loader.ClientLoader) (*DependencyController, error) {
depController := DependencyController{}
depController.DepHandler = handler
depCache := depCache{}
- depCache.cache = make(map[string]*v1.Dependency)
+ depCache.cache = make(map[string]*DependencyItem)
depCache.mutex = &sync.Mutex{}
depController.Cache = &depCache
var err error
- depController.K8sClient, err = K8sClientFromPath(configPath)
+ depController.K8sClient, err = clientLoader.LoadKubeClientFromPath(configPath)
if err != nil {
return nil, fmt.Errorf("failed to create dependency controller k8s client: %v", err)
}
- depController.DepCrdClient, err = AdmiralCrdClientFromPath(configPath)
+ depController.DepCrdClient, err = clientLoader.LoadAdmiralClientFromPath(configPath)
if err != nil {
return nil, fmt.Errorf("failed to create dependency controller crd client: %v", err)
@@ -88,26 +143,66 @@ func NewDependencyController(stopCh <-chan struct{}, handler DepHandler, configP
cache.Indexers{},
)
- mcd := NewMonitoredDelegator(&depController, "primary", "dependency")
- NewController("dependency-ctrl-"+namespace, stopCh, mcd, depController.informer)
+ NewController(dependencyControllerPrefix+"-"+namespace, "", stopCh, &depController, depController.informer)
return &depController, nil
}
-func (d *DependencyController) Added(ctx context.Context, ojb interface{}) {
- dep := ojb.(*v1.Dependency)
+func (d *DependencyController) Added(ctx context.Context, obj interface{}) error {
+ dep, ok := obj.(*v1.Dependency)
+ if !ok {
+ return fmt.Errorf("type assertion failed, %v is not of type *v1.Dependency", obj)
+ }
+
d.Cache.Put(dep)
- d.DepHandler.Added(ctx, dep)
+ return d.DepHandler.Added(ctx, dep)
}
-func (d *DependencyController) Updated(ctx context.Context, obj interface{}, oldObj interface{}) {
- dep := obj.(*v1.Dependency)
+func (d *DependencyController) Updated(ctx context.Context, obj interface{}, oldObj interface{}) error {
+ dep, ok := obj.(*v1.Dependency)
+ if !ok {
+ return fmt.Errorf("type assertion failed, %v is not of type *v1.Dependency", obj)
+ }
+
d.Cache.Put(dep)
- d.DepHandler.Updated(ctx, dep)
+ return d.DepHandler.Updated(ctx, dep)
}
-func (d *DependencyController) Deleted(ctx context.Context, ojb interface{}) {
- dep := ojb.(*v1.Dependency)
+func (d *DependencyController) Deleted(ctx context.Context, obj interface{}) error {
+ dep, ok := obj.(*v1.Dependency)
+ if !ok {
+ return fmt.Errorf("type assertion failed, %v is not of type *v1.Dependency", obj)
+ }
d.Cache.Delete(dep)
- d.DepHandler.Deleted(ctx, dep)
+ return d.DepHandler.Deleted(ctx, dep)
+}
+
+func (d *DependencyController) GetProcessItemStatus(obj interface{}) (string, error) {
+ dependency, ok := obj.(*v1.Dependency)
+ if !ok {
+ return common.NotProcessed, fmt.Errorf("type assertion failed, %v is not of type *v1.Dependency", obj)
+ }
+ return d.Cache.GetDependencyProcessStatus(dependency), nil
+}
+
+func (d *DependencyController) UpdateProcessItemStatus(obj interface{}, status string) error {
+ dependency, ok := obj.(*v1.Dependency)
+ if !ok {
+ return fmt.Errorf("type assertion failed, %v is not of type *v1.Dependency", obj)
+ }
+ return d.Cache.UpdateDependencyProcessStatus(dependency, status)
+}
+
+func (d *DependencyController) LogValueOfAdmiralIoIgnore(obj interface{}) {
+}
+
+func (d *DependencyController) Get(ctx context.Context, isRetry bool, obj interface{}) (interface{}, error) {
+ dep, ok := obj.(*v1.Dependency)
+ if ok && isRetry {
+ return d.Cache.Get(dep.Name), nil
+ }
+ if ok && d.DepCrdClient != nil {
+ return d.DepCrdClient.AdmiralV1alpha1().Dependencies(dep.Namespace).Get(ctx, dep.Name, meta_v1.GetOptions{})
+ }
+ return nil, fmt.Errorf("depcrd client is not initialized, txId=%s", ctx.Value("txId"))
}
From 899ceaf8764e0eeba9a1e6f0b21be51f8410d657 Mon Sep 17 00:00:00 2001
From: Shriram Sharma
Date: Sat, 20 Jul 2024 15:34:10 -0400
Subject: [PATCH 180/235] Added
admiral/pkg/controller/admiral/dependency_test.go from master
Signed-off-by: Shriram Sharma
---
.../pkg/controller/admiral/dependency_test.go | 329 +++++++++++++++++-
1 file changed, 324 insertions(+), 5 deletions(-)
diff --git a/admiral/pkg/controller/admiral/dependency_test.go b/admiral/pkg/controller/admiral/dependency_test.go
index 8d1dc6e3..9c2d1adc 100644
--- a/admiral/pkg/controller/admiral/dependency_test.go
+++ b/admiral/pkg/controller/admiral/dependency_test.go
@@ -2,21 +2,204 @@ package admiral
import (
"context"
+ "fmt"
+ "sync"
"testing"
"time"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/client/loader"
+ "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
+
"github.com/google/go-cmp/cmp"
"github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/model"
- v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
+ admiralV1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
+ v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1"
"github.com/istio-ecosystem/admiral/admiral/pkg/test"
+ "github.com/stretchr/testify/assert"
+ coreV1 "k8s.io/api/core/v1"
+ metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v12 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
+func TestDependencyAdded(t *testing.T) {
+
+ mockDependencyHandler := &test.MockDependencyHandler{}
+ ctx := context.Background()
+ dependencyController := DependencyController{
+ Cache: &depCache{
+ cache: make(map[string]*DependencyItem),
+ mutex: &sync.Mutex{},
+ },
+ DepHandler: mockDependencyHandler,
+ }
+
+ testCases := []struct {
+ name string
+ Dependency interface{}
+ expectedError error
+ }{
+ {
+ name: "Given context and Dependency " +
+ "When Dependency param is nil " +
+ "Then func should return an error",
+ Dependency: nil,
+ expectedError: fmt.Errorf("type assertion failed,