diff --git a/go.mod b/go.mod index 05ca2c6..e4bc108 100644 --- a/go.mod +++ b/go.mod @@ -3,12 +3,14 @@ module kusionstack.io/kube-utils go 1.19 require ( - github.com/go-logr/logr v1.2.4 + github.com/go-logr/logr v1.4.1 + github.com/hashicorp/consul/sdk v0.16.0 github.com/onsi/ginkgo v1.16.5 - github.com/onsi/gomega v1.27.6 + github.com/onsi/gomega v1.30.0 github.com/prometheus/client_golang v1.16.0 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.8.4 + golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 k8s.io/api v0.28.4 k8s.io/apimachinery v0.28.4 k8s.io/client-go v0.28.4 @@ -21,14 +23,14 @@ require ( require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/go-logr/zapr v1.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.3.0 // indirect github.com/googleapis/gnostic v0.5.5 // indirect @@ -39,17 +41,17 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/nxadm/tail v1.4.8 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/procfs v0.10.1 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.24.0 // indirect - golang.org/x/net v0.19.0 // indirect + golang.org/x/net v0.20.0 // indirect golang.org/x/oauth2 v0.8.0 // indirect - golang.org/x/sys v0.15.0 // indirect - golang.org/x/term v0.15.0 // indirect + golang.org/x/sys v0.16.0 // indirect + golang.org/x/term v0.16.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.3.0 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect diff --git a/go.sum b/go.sum index 71cb7d9..963814f 100644 --- a/go.sum +++ b/go.sum @@ -88,8 +88,9 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsr github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= @@ -186,8 +187,8 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= @@ -216,6 +217,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.16.0 h1:SE9m0W6DEfgIVCJX7xU+iv/hUl4m/nxqMTnCdMxDpJ8= +github.com/hashicorp/consul/sdk v0.16.0/go.mod h1:7pxqqhqoaPqnBnzXD1StKed62LqJeClzVsUEy85Zr0A= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= @@ -329,8 +332,9 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -469,6 +473,7 @@ golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= +golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -529,8 +534,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -603,8 +608,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -613,8 +618,8 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= +golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt new file mode 100644 index 0000000..e9d71f2 --- /dev/null +++ b/hack/boilerplate.go.txt @@ -0,0 +1,15 @@ +/** + * Copyright 2024 KusionStack Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ diff --git a/resourcetopo/event_queue.go b/resourcetopo/event_queue.go new file mode 100644 index 0000000..92483e3 --- /dev/null +++ b/resourcetopo/event_queue.go @@ -0,0 +1,135 @@ +/** + * Copyright 2024 KusionStack Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package resourcetopo + +import ( + "time" + + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" +) + +type eventType string + +const ( + EventTypeAdd eventType = "add" + EventTypeUpdate eventType = "update" + EventTypeDelete eventType = "delete" + EventTypeRelatedUpdate eventType = "relatedUpdate" +) + +const ( + defaultNodeEventQueueSize = 1000 + defaultRelationEventQueueSize = 1000 + defaultNodeEventHandlePeriod = time.Second + defaultRelationEventHandlePeriod = time.Second +) + +type nodeEvent struct { + eventType eventType + node *nodeInfo +} + +type relationEvent struct { + eventType eventType + preNode *nodeInfo + postNode *nodeInfo +} + +func (m *manager) startHandleEvent(stopCh <-chan struct{}) { + go wait.Until(m.handleNodeEvent, defaultNodeEventHandlePeriod, stopCh) + go wait.Until(m.handleRelationEvent, defaultRelationEventHandlePeriod, stopCh) +} + +func (m *manager) handleNodeEvent() { + for { + select { + case e := <-m.nodeEventQueue: + storage := e.node.storageRef + if storage == nil { + klog.Errorf("Unexpected nil nodeStorage for nodeEvent node %v", e.node) + continue + } + + switch e.eventType { + case EventTypeAdd: + for _, h := range storage.nodeUpdateHandler { + h.OnAdd(e.node) + } + case EventTypeUpdate: + for _, h := range storage.nodeUpdateHandler { + h.OnUpdate(e.node) + } + case EventTypeDelete: + for _, h := range storage.nodeUpdateHandler { + h.OnDelete(e.node) + } + case EventTypeRelatedUpdate: + for _, h := range storage.nodeUpdateHandler { + h.OnRelatedUpdate(e.node) + } + } + default: + break + } + } +} + +func (m *manager) handleRelationEvent() { + for { + select { + case e := <-m.relationEventQueue: + storage := e.preNode.storageRef + if storage == nil { + klog.Errorf("Unexpected nil nodeStorage for relaltion event preNode node %v", e.preNode) + continue + } + handlers := storage.relationUpdateHandler[e.postNode.storageRef.metaKey] + if handlers == nil { + continue + } + + switch e.eventType { + case EventTypeAdd: + for _, handler := range handlers { + handler.OnAdd(e.preNode, e.postNode) + } + case EventTypeDelete: + for _, handler := range handlers { + handler.OnDelete(e.preNode, e.postNode) + } + } + default: + break + } + } +} + +func (m *manager) newNodeEvent(info *nodeInfo, eType eventType) { + m.nodeEventQueue <- nodeEvent{ + eventType: eType, + node: info, + } +} + +func (m *manager) newRelationEvent(preNode, postNode *nodeInfo, eType eventType) { + m.relationEventQueue <- relationEvent{ + eventType: eType, + preNode: preNode, + postNode: postNode, + } +} diff --git a/resourcetopo/manager.go b/resourcetopo/manager.go new file mode 100644 index 0000000..fa60769 --- /dev/null +++ b/resourcetopo/manager.go @@ -0,0 +1,220 @@ +/** + * Copyright 2024 KusionStack Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package resourcetopo + +import ( + "container/list" + "fmt" + "sync" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" +) + +var _ Manager = &manager{} + +type manager struct { + relationEventQueue chan relationEvent + nodeEventQueue chan nodeEvent + configLock sync.Mutex + started bool + + storages map[string]*nodeStorage // meta => nodeStorage and status info +} + +func NewResourcesTopoManager(cfg ManagerConfig) (Manager, error) { + checkManagerConfig(&cfg) + m := &manager{ + nodeEventQueue: make(chan nodeEvent, cfg.NodeEventQueueSize), + relationEventQueue: make(chan relationEvent, cfg.RelationEventQueueSize), + storages: make(map[string]*nodeStorage), + } + + if cfg.TopologyConfig != nil { + if err := m.AddTopologyConfig(*cfg.TopologyConfig); err != nil { + return nil, err + } + } + return m, nil +} + +// AddTopologyConfig add new resource topology config to this manager +func (m *manager) AddTopologyConfig(cfg TopologyConfig) error { + m.configLock.Lock() + defer m.configLock.Unlock() + + for _, r := range cfg.Resolvers { + preOrderStorage, err := m.getOrCreateStorage(r.PreMeta, cfg.GetInformer) + if err != nil { + return err + } + for _, v := range r.PostMetas { + _, err := m.getOrCreateStorage(v, cfg.GetInformer) + if err != nil { + return err + } + } + if err = preOrderStorage.addRelationConfig(&r); err != nil { + return err + } + } + + for _, d := range cfg.Discoverers { + s, err := m.getOrCreateStorage(d.PostMeta, cfg.GetInformer) + if err != nil { + return err + } + _, err = m.createVirtualStorage(d.PreMeta) + if err != nil { + return err + } + + if err = s.addDiscoverConfig(&d); err != nil { + return err + } + } + + if err := m.dagCheck(); err != nil { + klog.Errorf("Failed to check resource topology config: %s", err.Error()) + return err + } + + return nil +} + +// AddRelationHandler add a new relation handler for the relation change between +// the two type nodes configured by preMeta and postMeta +func (m *manager) AddRelationHandler(preOrder, postOrder metav1.TypeMeta, handler RelationHandler) error { + preOrderKey := generateMetaKey(preOrder) + preStorage, ok := m.storages[preOrderKey] + if !ok { + return fmt.Errorf("failed to find initialized nodeStorage for resource %s", preOrderKey) + } + preStorage.addRelationHandler(postOrder, handler) + return nil +} + +// AddNodeHandler add a new node handler for nodes of type configured by meta. +func (m *manager) AddNodeHandler(meta metav1.TypeMeta, handler NodeHandler) error { + s := m.storages[generateMetaKey(meta)] + if s == nil { + return fmt.Errorf("resource %v not configured in this manager", meta) + } + s.addNodeHandler(handler) + return nil +} + +// Start to handler relation events and node events. +// Start can only be called once. +func (m *manager) Start(stopCh <-chan struct{}) { + m.configLock.Lock() + defer m.configLock.Unlock() + if !m.started { + m.startHandleEvent(stopCh) + m.started = true + } else { + klog.Warning("resourcetopo Manager has already started, ignore this call.") + } +} + +// GetTopoNodeStorage return the ref to TopoNodeStorage that match resource meta. +func (m *manager) GetTopoNodeStorage(meta metav1.TypeMeta) (TopoNodeStorage, error) { + if s := m.storages[generateMetaKey(meta)]; s == nil { + return nil, fmt.Errorf("resource %v not configured in this manager", meta) + } else { + return s, nil + } +} + +// GetNode return the ref to Node that match the resouorce meta and node's name if existed. +func (m *manager) GetNode(meta metav1.TypeMeta, name types.NamespacedName) (NodeInfo, error) { + s, err := m.GetTopoNodeStorage(meta) + if err != nil { + return nil, err + } + return s.GetNode(name) +} + +func (m *manager) getOrCreateStorage(typeMeta metav1.TypeMeta, getInformer func(meta metav1.TypeMeta) cache.SharedInformer) (*nodeStorage, error) { + if getInformer == nil { + return nil, fmt.Errorf("unexpected nil getInformer func") + } + key := generateMetaKey(typeMeta) + + s, ok := m.storages[key] + if ok { + return s, nil + } + + informer := getInformer(typeMeta) + if informer == nil { + return nil, fmt.Errorf("failed to get informer for resource %s", key) + } + + s = newNodeStorage(m, informer, typeMeta) + m.storages[key] = s + return s, nil +} + +func (m *manager) getStorage(meta metav1.TypeMeta) *nodeStorage { + key := generateMetaKey(meta) + + return m.storages[key] +} + +func (m *manager) createVirtualStorage(meta metav1.TypeMeta) (*nodeStorage, error) { + key := generateMetaKey(meta) + _, ok := m.storages[key] + if ok { + return nil, fmt.Errorf("unexpected twice configuration for virtual resource %s", key) + } + + s := newVirtualStorage(m, meta) + m.storages[key] = s + return s, nil +} + +// dagCheck implement a depth first algorithm to make sure the resource topology in this manager has no cycle +func (m *manager) dagCheck() error { + visited := make(map[string]bool) + stack := list.New() + + for k, s := range m.storages { + if visited[k] { + if existInList(stack, k) { + return fmt.Errorf("DAG check for resource %s failed", k) + } else { + continue + } + } + if err := checkNode(s, stack, visited); err != nil { + return err + } + } + return nil +} + +func checkManagerConfig(c *ManagerConfig) { + if c.NodeEventQueueSize <= 0 { + c.NodeEventQueueSize = defaultNodeEventQueueSize + } + if c.RelationEventQueueSize <= 0 { + c.RelationEventQueueSize = defaultRelationEventQueueSize + } +} diff --git a/resourcetopo/node_info.go b/resourcetopo/node_info.go new file mode 100644 index 0000000..37441ef --- /dev/null +++ b/resourcetopo/node_info.go @@ -0,0 +1,265 @@ +/** + * Copyright 2024 KusionStack Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package resourcetopo + +import ( + "container/list" + "sync" + + "golang.org/x/exp/maps" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" +) + +var _ NodeInfo = &nodeInfo{} + +type ownerInfo struct { + metaKey string + name string +} + +type nodeInfo struct { + storageRef *nodeStorage + lock sync.RWMutex + + // Lists of nodeInfo ref, cached all existed relation for this node. + directReferredPreOrders *list.List + labelReferredPreOrders *list.List + directReferredPostOrders *list.List + labelReferredPostOrders *list.List + + // We cached all the data needed by the relation existence decision. + namespace string + name string + ownerNodes []ownerInfo + labels labels.Set // labels is a ref to object.meta.labels, do not edit! + relations []ResourceRelation + + // objectExisted is added for directRef relation to cache the relation before post object added, + // will be updated to true after the object added to cache or the manager is of virtual type. + objectExisted bool +} + +func newNode(s *nodeStorage, namespace, name string) *nodeInfo { + return &nodeInfo{ + storageRef: s, + name: name, + namespace: namespace, + // For virtual resource, its lifecycle is handled by resourcetopo + objectExisted: s.virtualResource, + } +} + +// TypeInfo return the node's resource type info +func (n *nodeInfo) TypeInfo() metav1.TypeMeta { + return n.storageRef.meta +} + +// NodeInfo return the node's namespaced name +func (n *nodeInfo) NodeInfo() types.NamespacedName { + return types.NamespacedName{ + Namespace: n.namespace, + Name: n.name, + } +} + +// GetPreOrders return the pre-order node slice for this node +func (n *nodeInfo) GetPreOrders() []NodeInfo { + n.lock.RLock() + defer n.lock.RUnlock() + return append(transformToNodeSliceWithFilters(n.labelReferredPreOrders, objectExistFilter), + transformToNodeSliceWithFilters(n.directReferredPreOrders, objectExistFilter)...) +} + +// GetPostOrders return the post-order node slice for this node +func (n *nodeInfo) GetPostOrders() []NodeInfo { + n.lock.RLock() + defer n.lock.RUnlock() + return append(transformToNodeSliceWithFilters(n.labelReferredPostOrders, objectExistFilter), + transformToNodeSliceWithFilters(n.directReferredPostOrders, objectExistFilter)...) +} + +// GetPreOrdersWithMeta return the pre-order nodes slice that match this resource meta +func (n *nodeInfo) GetPreOrdersWithMeta(meta metav1.TypeMeta) []NodeInfo { + n.lock.RLock() + defer n.lock.RUnlock() + return append(transformToNodeSliceWithFilters(n.labelReferredPreOrders, metaMatchFillter(meta), objectExistFilter), + transformToNodeSliceWithFilters(n.directReferredPreOrders, metaMatchFillter(meta), objectExistFilter)...) +} + +// GetPostOrdersWithMeta return the post-order node slice that match this resource meta +func (n *nodeInfo) GetPostOrdersWithMeta(meta metav1.TypeMeta) []NodeInfo { + n.lock.RLock() + defer n.lock.RUnlock() + return append(transformToNodeSliceWithFilters(n.labelReferredPostOrders, metaMatchFillter(meta), objectExistFilter), + transformToNodeSliceWithFilters(n.directReferredPostOrders, metaMatchFillter(meta), objectExistFilter)...) +} + +func (n *nodeInfo) updateNodeMeta(obj Object) { + n.lock.Lock() + defer n.lock.Unlock() + n.labels = obj.GetLabels() + n.resolveOwner(obj) + if !n.objectExisted { + n.postObjectAdded() + n.objectExisted = true + } +} + +func (n *nodeInfo) matched(selector labels.Selector) bool { + n.lock.RLock() + defer n.lock.RUnlock() + return selector.Matches(n.labels) +} + +func (n *nodeInfo) ownerMatched(node *nodeInfo) bool { + n.lock.RLock() + defer n.lock.RUnlock() + for _, owner := range n.ownerNodes { + if node.storageRef.metaKey == owner.metaKey && + node.name == owner.name { + return true + } + } + return false +} + +func (n *nodeInfo) labelEqualed(labelMap map[string]string) bool { + n.lock.RLock() + defer n.lock.RUnlock() + + return maps.Equal(n.labels, labelMap) +} + +// preOrderRelationDeleted do sth when preNode changed and trigger this relation delete event +func (n *nodeInfo) preOrderRelationDeleted(preNode *nodeInfo) { + // for direct referred relation, + if !n.isObjectExisted() { + return + } + m := n.storageRef.manager + m.newRelationEvent(preNode, n, EventTypeDelete) + + if _, ok := preNode.storageRef.postNoticeRelation[n.storageRef.metaKey]; ok { + m.newNodeEvent(n, EventTypeRelatedUpdate) + n.propagateNodeChange(m) + } +} + +// postOrderRelationDeleted do sth when postNode changed and trigger this relation delete event +func (n *nodeInfo) postOrderRelationDeleted(postNode *nodeInfo) { + m := n.storageRef.manager + m.newRelationEvent(n, postNode, EventTypeDelete) + + if _, ok := n.storageRef.postNoticeRelation[postNode.storageRef.metaKey]; !ok { + m.newNodeEvent(n, EventTypeRelatedUpdate) + n.propagateNodeChange(m) + } +} + +// propagateNodeChange call node event handler for existed relations. +func (n *nodeInfo) propagateNodeChange(m *manager) { + n.lock.RLock() + defer n.lock.RUnlock() + + noticePreOrder := func(preOrder *nodeInfo) { + if _, ok := preOrder.storageRef.postNoticeRelation[n.storageRef.metaKey]; !ok { + m.newNodeEvent(preOrder, EventTypeRelatedUpdate) + preOrder.propagateNodeChange(m) + } + } + noticePostOrder := func(postOrder *nodeInfo) { + if _, ok := n.storageRef.postNoticeRelation[postOrder.storageRef.metaKey]; ok { + if !postOrder.isObjectExisted() { + return + } + m.newNodeEvent(postOrder, EventTypeRelatedUpdate) + postOrder.propagateNodeChange(m) + } + } + + rangeNodeList(n.directReferredPreOrders, noticePreOrder) + rangeNodeList(n.labelReferredPreOrders, noticePreOrder) + rangeNodeList(n.directReferredPostOrders, noticePostOrder) + rangeNodeList(n.labelReferredPostOrders, noticePostOrder) + + if n.storageRef.virtualResource { + if n.directReferredPostOrders == nil || n.directReferredPostOrders.Len() == 0 { + n.storageRef.deleteNode(n.namespace, n.name) + m.newNodeEvent(n, EventTypeDelete) + } + } else if n.readyToDelete() { + n.storageRef.deleteNode(n.namespace, n.name) + } +} + +func (n *nodeInfo) readyToDelete() bool { + n.lock.RLock() + defer n.lock.RUnlock() + return !n.objectExisted && (n.directReferredPreOrders == nil || n.directReferredPreOrders.Len() == 0) +} + +func (n *nodeInfo) isObjectExisted() bool { + n.lock.RLock() + defer n.lock.RUnlock() + + return n.objectExisted +} + +func (n *nodeInfo) postObjectAdded() { + if n.directReferredPreOrders != nil { + for ele := n.directReferredPreOrders.Front(); ele != nil; ele = ele.Next() { + preOrder := ele.Value.(*nodeInfo) + n.storageRef.manager.newRelationEvent(preOrder, n, EventTypeAdd) + } + } +} + +func (n *nodeInfo) preObjectDeleted() { + n.objectExisted = false + n.labels = nil +} + +func (n *nodeInfo) resolveOwner(o Object) { + for _, ownerref := range o.GetOwnerReferences() { + ownerKey := generateKey(ownerref.APIVersion, ownerref.Kind) + ownerStorage := n.storageRef.preOrderResources[ownerKey] + if ownerStorage == nil { + continue + } + if _, ok := ownerStorage.ownerRelation[n.storageRef.metaKey]; !ok { + continue + } + + n.ownerNodes = append(n.ownerNodes, ownerInfo{ + metaKey: ownerKey, + name: ownerref.Name, + }) + } +} + +func metaMatchFillter(meta metav1.TypeMeta) func(info *nodeInfo) bool { + return func(info *nodeInfo) bool { + return info.storageRef.meta.APIVersion == meta.APIVersion && + info.storageRef.meta.Kind == meta.Kind + } +} + +func objectExistFilter(info *nodeInfo) bool { + return info.isObjectExisted() +} diff --git a/resourcetopo/node_relation_utils.go b/resourcetopo/node_relation_utils.go new file mode 100644 index 0000000..e31b251 --- /dev/null +++ b/resourcetopo/node_relation_utils.go @@ -0,0 +1,175 @@ +/** + * Copyright 2024 KusionStack Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package resourcetopo + +import "container/list" + +func rangeAndSetLabelRelation(preNode, postNode *nodeInfo, m *manager) { + if labelRelationExist(preNode, postNode) { + return + } + addLabelRelation(preNode, postNode, m) +} + +func rangeAndSetDirectRefRelation(preNode, postNode *nodeInfo, m *manager) { + if directRelationExist(preNode, postNode) { + return + } + addDirectRefRelation(preNode, postNode, m) +} + +func addLabelRelation(preNode, postNode *nodeInfo, m *manager) { + if postNode.labelReferredPreOrders == nil { + postNode.labelReferredPreOrders = list.New() + } + if preNode.labelReferredPostOrders == nil { + preNode.labelReferredPostOrders = list.New() + } + + // relation add should be an atomic action + preNode.lock.Lock() + defer preNode.lock.Unlock() + postNode.lock.Lock() + defer postNode.lock.Unlock() + + postNode.labelReferredPreOrders.PushBack(preNode) + preNode.labelReferredPostOrders.PushBack(postNode) + + m.newRelationEvent(preNode, postNode, EventTypeAdd) +} + +func addDirectRefRelation(preNode, postNode *nodeInfo, m *manager) { + if postNode.directReferredPreOrders == nil { + postNode.directReferredPreOrders = list.New() + } + if preNode.directReferredPostOrders == nil { + preNode.directReferredPostOrders = list.New() + } + + // relation add should be an atomic action + preNode.lock.Lock() + postNode.lock.Lock() + postNode.directReferredPreOrders.PushBack(preNode) + preNode.directReferredPostOrders.PushBack(postNode) + postNode.lock.Unlock() + preNode.lock.Unlock() + + if postNode.isObjectExisted() { + m.newRelationEvent(preNode, postNode, EventTypeAdd) + } +} + +// deleteLabelRelation return true if this relation existed and removed successful. +// for relation delete condition, postDelete action need to be called by caller. +func deleteLabelRelation(preNode, postNode *nodeInfo) bool { + // relation delete should be an atomic action + preNode.lock.Lock() + defer preNode.lock.Unlock() + postNode.lock.Lock() + defer postNode.lock.Unlock() + + return removeFromList(preNode.labelReferredPostOrders, postNode) && + (removeFromList(postNode.labelReferredPreOrders, preNode)) +} + +// deleteDirectRelation return true if this relation existed and removed successful. +// for relation delete condition, postDelete action need to be called by caller. +func deleteDirectRelation(preNode, postNode *nodeInfo) bool { + // relation delete should be an atomic action + preNode.lock.Lock() + defer preNode.lock.Unlock() + postNode.lock.Lock() + defer postNode.lock.Unlock() + + return removeFromList(preNode.directReferredPostOrders, postNode) && + removeFromList(postNode.directReferredPreOrders, preNode) +} + +func deleteAllRelation(node *nodeInfo) { + node.lock.Lock() + + rangeNodeList(node.directReferredPostOrders, func(postNode *nodeInfo) { + postNode.lock.Lock() + removeFromList(postNode.directReferredPreOrders, node) + postNode.lock.Unlock() + + postNode.preOrderRelationDeleted(node) + }) + node.directReferredPostOrders = nil + + rangeNodeList(node.labelReferredPostOrders, func(postNode *nodeInfo) { + postNode.lock.Lock() + removeFromList(postNode.labelReferredPreOrders, node) + postNode.lock.Unlock() + + postNode.preOrderRelationDeleted(node) + }) + node.labelReferredPostOrders = nil + + node.lock.Unlock() + + // In case of deadlock, we will always expect to lock pre node before post node. + for { + node.lock.RLock() + if node.labelReferredPreOrders == nil || node.labelReferredPreOrders.Len() == 0 { + node.labelReferredPreOrders = nil + node.lock.RUnlock() + break + } else { + preNode := node.labelReferredPreOrders.Front().Value.(*nodeInfo) + node.lock.RUnlock() + + deleteLabelRelation(preNode, node) + preNode.postOrderRelationDeleted(node) + } + } + + var virtualNodelist = list.New() + node.lock.RLock() + rangeNodeList(node.directReferredPreOrders, func(preNode *nodeInfo) { + if preNode.storageRef.virtualResource { + // remember virtual nodes to delete later. + virtualNodelist.PushBack(preNode) + } else { + // notice relation deleted, but hold this place in case this object will be recreated + preNode.postOrderRelationDeleted(node) + } + }) + node.lock.RUnlock() + rangeNodeList(virtualNodelist, func(preNode *nodeInfo) { + deleteDirectRelation(preNode, node) + preNode.postOrderRelationDeleted(node) + }) +} + +func labelRelationExist(preOrder, postOrder *nodeInfo) bool { + preOrder.lock.RLock() + defer preOrder.lock.RUnlock() + postOrder.lock.RLock() + defer postOrder.lock.RUnlock() + + return existInList(preOrder.labelReferredPostOrders, postOrder) +} + +func directRelationExist(preOrder, postOrder *nodeInfo) bool { + preOrder.lock.RLock() + defer preOrder.lock.RUnlock() + postOrder.lock.RLock() + defer postOrder.lock.RUnlock() + + return existInList(preOrder.directReferredPostOrders, postOrder) +} diff --git a/resourcetopo/node_storage.go b/resourcetopo/node_storage.go new file mode 100644 index 0000000..b91bbe2 --- /dev/null +++ b/resourcetopo/node_storage.go @@ -0,0 +1,305 @@ +/** + * Copyright 2024 KusionStack Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package resourcetopo + +import ( + "fmt" + "sync" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" +) + +var _ TopoNodeStorage = &nodeStorage{} + +// nodeStorage is an implementation of TopoNodeStorage. +// It's the entrance of all nodes of same meta resource type and related config. +type nodeStorage struct { + manager *manager + + meta metav1.TypeMeta // meta is the resource type this nodeStorage handle + metaKey string // metakey is generated key string from meta + virtualResource bool // virtualResource means whether this type resource is virtual or not + + preOrderResources map[string]*nodeStorage // metaKey => corresponding nodeStorage reference + postNoticeRelation map[string]interface{} // set for configured reverse-notice relation of post resources metaKey + ownerRelation map[string]interface{} // set for configured owner relation of post resources metaKey + + resolvers []RelationResolver // resolvers list whose PreMeta is of this type + discoverers []VirtualResourceDiscoverer // discovers list whose PostMeta is of this type + + handlersLock sync.Mutex // lock to protect handlers update operations + relationUpdateHandler map[string][]RelationHandler // post-order metaKey => RelationHandler list + nodeUpdateHandler []NodeHandler // NodeHandler list + + storageLock sync.RWMutex // lock to protect namespacedInfo CRUD operations + namespacedInfo map[string]map[string]*nodeInfo // namespace => name => object info +} + +func newNodeStorage(manager *manager, informer cache.SharedInformer, meta metav1.TypeMeta) *nodeStorage { + s := &nodeStorage{ + manager: manager, + meta: meta, + metaKey: generateMetaKey(meta), + namespacedInfo: make(map[string]map[string]*nodeInfo), + postNoticeRelation: make(map[string]interface{}), + ownerRelation: make(map[string]interface{}), + } + + informer.AddEventHandler(s) + return s +} + +func newVirtualStorage(manager *manager, meta metav1.TypeMeta) *nodeStorage { + return &nodeStorage{ + manager: manager, + meta: meta, + metaKey: generateMetaKey(meta), + virtualResource: true, + namespacedInfo: make(map[string]map[string]*nodeInfo), + } +} + +// GetNode return the ref to Node that match the node's name. +func (s *nodeStorage) GetNode(namespacedName types.NamespacedName) (NodeInfo, error) { + node := s.getNode(namespacedName.Namespace, namespacedName.Name) + if node != nil && !node.objectExisted { + return nil, nil + } + return node, nil +} + +func (s *nodeStorage) addNodeHandler(handler NodeHandler) { + s.handlersLock.Lock() + defer s.handlersLock.Unlock() + + s.nodeUpdateHandler = append(s.nodeUpdateHandler, handler) +} + +func (s *nodeStorage) addRelationHandler(postMeta metav1.TypeMeta, relationHandler RelationHandler) { + s.handlersLock.Lock() + defer s.handlersLock.Unlock() + if s.relationUpdateHandler == nil { + s.relationUpdateHandler = make(map[string][]RelationHandler) + } + postKey := generateMetaKey(postMeta) + + s.relationUpdateHandler[postKey] = append(s.relationUpdateHandler[postKey], relationHandler) +} + +func (s *nodeStorage) addRelationConfig(r *RelationResolver) error { + for _, p := range r.PostMetas { + postStorage := s.manager.getStorage(p) + if postStorage == nil { + return fmt.Errorf("failed to get storage with meta %v", p) + } + postStorage.addPreOrder(s.meta) + } + s.resolvers = append(s.resolvers, *r) + + if len(r.ReverseNotice) > 0 { + for _, meta := range r.ReverseNotice { + s.postNoticeRelation[generateMetaKey(meta)] = nil + } + } + if len(r.OwnerRelation) > 0 { + for _, meta := range r.OwnerRelation { + s.ownerRelation[generateMetaKey(meta)] = nil + } + } + + return nil +} + +func (s *nodeStorage) addDiscoverConfig(d *VirtualResourceDiscoverer) error { + s.addPreOrder(d.PreMeta) + s.discoverers = append(s.discoverers, *d) + return nil +} + +func (s *nodeStorage) addPreOrder(preOrder metav1.TypeMeta) { + if s.preOrderResources == nil { + s.preOrderResources = make(map[string]*nodeStorage) + } + key := generateMetaKey(preOrder) + preStorage := s.manager.getStorage(preOrder) + if preStorage != nil { + s.preOrderResources[key] = preStorage + } +} + +func (s *nodeStorage) getOrCreateNode(namespace, name string) *nodeInfo { + if node := s.getNode(namespace, name); node != nil { + return node + } + return s.createNode(namespace, name) +} + +func (s *nodeStorage) getNode(namespace, name string) *nodeInfo { + namespace = getNamespacedKey(namespace) + s.storageLock.RLock() + defer s.storageLock.RUnlock() + + namespcedInfo := s.namespacedInfo[namespace] + if namespcedInfo != nil { + return namespcedInfo[name] + } else { + return nil + } +} + +func (s *nodeStorage) createNode(namespace, name string) *nodeInfo { + node := newNode(s, namespace, name) + namespace = getNamespacedKey(namespace) + s.storageLock.Lock() + defer s.storageLock.Unlock() + + if s.namespacedInfo[namespace] == nil { + s.namespacedInfo[namespace] = make(map[string]*nodeInfo) + } + s.namespacedInfo[namespace][name] = node + + if s.virtualResource { + s.manager.newNodeEvent(node, EventTypeAdd) + } + + return node +} + +func (s *nodeStorage) deleteNode(namespace, name string) { + namespace = getNamespacedKey(namespace) + s.storageLock.Lock() + defer s.storageLock.Unlock() + + namedInfo := s.namespacedInfo[namespace] + if namedInfo != nil { + delete(namedInfo, name) + } +} + +func (s *nodeStorage) getMatchedNodeListWithOwner(namespace string, labelSelector *metav1.LabelSelector, owner *nodeInfo) []*nodeInfo { + selector, err := metav1.LabelSelectorAsSelector(labelSelector) + if err != nil { + klog.Errorf("Failed to resolve labelSelector %v: %s", labelSelector, err.Error()) + return nil + } + + var res []*nodeInfo + appendFunc := func(info *nodeInfo) { + for _, nodeOwner := range info.ownerNodes { + if nodeOwner.metaKey == owner.storageRef.metaKey && + nodeOwner.name == owner.name { + res = append(res, info) + return + } + } + } + + // for owner relation resource, k8s promise they are in same namespace or both clusterScoped + namespace = getNamespacedKey(namespace) + s.storageLock.RLock() + defer s.storageLock.RUnlock() + + getMatchedNodeList(selector, s.namespacedInfo[namespace], appendFunc) + + return res +} + +func (s *nodeStorage) getMatchedNodeList(namespace string, labelSelector *metav1.LabelSelector) []*nodeInfo { + selector, err := metav1.LabelSelectorAsSelector(labelSelector) + if err != nil { + klog.Errorf("Failed to resolve labelSelector %v: %s", labelSelector, err.Error()) + return nil + } + + var res []*nodeInfo + appendFunc := func(info *nodeInfo) { + res = append(res, info) + } + + s.storageLock.RLock() + defer s.storageLock.RUnlock() + + if !isClusterNamespace(namespace) { + getMatchedNodeList(selector, s.namespacedInfo[namespace], appendFunc) + return res + } + for _, v := range s.namespacedInfo { + getMatchedNodeList(selector, v, appendFunc) + } + return res +} + +// checkForLabelUpdate called when postNode is newly added into graph, +// call this to check any pre node need to add new relation +func (s *nodeStorage) checkForLabelUpdate(postNode *nodeInfo) { + ns := getNamespacedKey(postNode.namespace) + s.storageLock.RLock() + defer s.storageLock.RUnlock() + + nodeList := s.namespacedInfo[ns] + for _, n := range nodeList { + if len(n.relations) == 0 { + continue + } + for _, relation := range n.relations { + if !typeEqual(relation.PostMeta, postNode.storageRef.meta) { + continue + } + selector, err := metav1.LabelSelectorAsSelector(relation.LabelSelector) + if err != nil { + klog.Errorf("Failed to resolve selector %v: %s", relation.LabelSelector, err.Error()) + continue + } + if postNode.matched(selector) { + if _, ok := s.ownerRelation[postNode.storageRef.metaKey]; ok && !postNode.ownerMatched(n) { + continue + } + rangeAndSetLabelRelation(n, postNode, s.manager) + } else { + if deleteLabelRelation(n, postNode) { + n.postOrderRelationDeleted(postNode) + } + } + } + } +} + +func getMatchedNodeList(selector labels.Selector, m map[string]*nodeInfo, appendFunc func(info *nodeInfo)) { + for _, n := range m { + if n.matched(selector) { + appendFunc(n) + } + } +} + +const allNamespaceKey = "_all_namespaces" + +func getNamespacedKey(namespace string) string { + if len(namespace) == 0 { + return allNamespaceKey + } else { + return namespace + } +} + +func isClusterNamespace(namespace string) bool { + return len(namespace) == 0 +} diff --git a/resourcetopo/node_topology_updater.go b/resourcetopo/node_topology_updater.go new file mode 100644 index 0000000..e7282c5 --- /dev/null +++ b/resourcetopo/node_topology_updater.go @@ -0,0 +1,208 @@ +/** + * Copyright 2024 KusionStack Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package resourcetopo + +import ( + "golang.org/x/exp/slices" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" +) + +var _ cache.ResourceEventHandler = &nodeStorage{} + +func (s *nodeStorage) OnAdd(obj interface{}) { + topoObject, ok := obj.(Object) + if !ok { + klog.Errorf("Failed to transform to k8s object %v, ignore this add nodeEvent", obj) + return + } + node := s.getOrCreateNode(topoObject.GetNamespace(), topoObject.GetName()) + s.addNode(topoObject, node) + + s.manager.newNodeEvent(node, EventTypeAdd) + node.propagateNodeChange(s.manager) +} + +func (s *nodeStorage) OnUpdate(oldObj, newObj interface{}) { + newTopoObj, ok := newObj.(Object) + if !ok { + klog.Errorf("Failed to transform to k8s object %v, ignore this update nodeEvent", newObj) + return + } + oldTopoObj, ok := oldObj.(Object) + if !ok { + klog.Errorf("Failed to transform to k8s object %v, ignore this update nodeEvent", oldObj) + return + } + node := s.getNode(newTopoObj.GetNamespace(), newTopoObj.GetName()) + if node == nil { + node = s.createNode(newTopoObj.GetNamespace(), newTopoObj.GetName()) + s.addNode(newTopoObj, node) + return + } + + var resolvedRelations []ResourceRelation + for _, resolver := range s.resolvers { + relations := resolver.Resolve(newTopoObj) + resolvedRelations = append(resolvedRelations, relations...) + } + + slices.SortFunc(resolvedRelations, compareResourceRelation) + sortedSlicesCompare(node.relations, resolvedRelations, + func(relation ResourceRelation) { + s.removeResourceRelation(node, &relation) + }, + func(relation ResourceRelation) { + s.addResourceRelation(node, &relation) + }, + compareResourceRelation) + node.relations = resolvedRelations + + if !node.labelEqualed(newTopoObj.GetLabels()) { + node.updateNodeMeta(newTopoObj) + for _, preStorage := range s.preOrderResources { + preStorage.checkForLabelUpdate(node) + } + } + + for _, discover := range s.discoverers { + newDiscoverd := discover.Discover(newTopoObj) + oldDiscoverd := discover.Discover(oldTopoObj) + slices.SortFunc(newDiscoverd, compareNodeName) + slices.SortFunc(oldDiscoverd, compareNodeName) + + discoveredStorage := s.manager.getStorage(discover.PreMeta) + sortedSlicesCompare(newDiscoverd, oldDiscoverd, + func(name types.NamespacedName) { + discoveredNode := discoveredStorage.getOrCreateNode(name.Namespace, name.Name) + addDirectRefRelation(discoveredNode, node, s.manager) + }, + func(namespacedName types.NamespacedName) { + discoveredNode := discoveredStorage.getNode(namespacedName.Namespace, namespacedName.Name) + if deleteDirectRelation(discoveredNode, node) { + // deleted relation will not be called by later node.propagateNodeChange + discoveredNode.postOrderRelationDeleted(node) + } + }, + compareNodeName) + } + + s.manager.newNodeEvent(node, EventTypeUpdate) + node.propagateNodeChange(s.manager) +} + +func (s *nodeStorage) OnDelete(obj interface{}) { + topoObject, ok := obj.(metav1.Object) + if !ok { + klog.Errorf("Failed to transform to k8s object %v, ignore this delete nodeEvent", obj) + return + } + node := s.getNode(topoObject.GetNamespace(), topoObject.GetName()) + if node == nil { + return + } + + node.preObjectDeleted() + deleteAllRelation(node) + if node.readyToDelete() { + s.deleteNode(node.namespace, node.name) + } + + s.manager.newNodeEvent(node, EventTypeDelete) +} + +func (s *nodeStorage) addNode(obj Object, node *nodeInfo) { + node.updateNodeMeta(obj) + for _, resolver := range s.resolvers { + relations := resolver.Resolve(obj) + node.relations = append(node.relations, relations...) + for _, relation := range relations { + s.addResourceRelation(node, &relation) + } + } + + slices.SortFunc(node.relations, compareResourceRelation) + + for _, discoverer := range s.discoverers { + preStorage := s.manager.getStorage(discoverer.PreMeta) + preObjs := discoverer.Discover(obj) + for _, preObj := range preObjs { + preNode := preStorage.getOrCreateNode(preObj.Namespace, preObj.Name) + addDirectRefRelation(preNode, node, s.manager) + } + } + + for _, preOrderStorage := range s.preOrderResources { + preOrderStorage.checkForLabelUpdate(node) + } +} + +func (s *nodeStorage) addResourceRelation(node *nodeInfo, relation *ResourceRelation) { + postMeta := relation.PostMeta + postMetaKey := generateMetaKey(postMeta) + postStorage := s.manager.getStorage(postMeta) + if postStorage == nil { + klog.Errorf("Failed to get node storage by meta %s, ignore this relation", postMetaKey) + return + } + if len(relation.DirectRefs) > 0 { + for _, ref := range relation.DirectRefs { + postNode := postStorage.getOrCreateNode(ref.Namespace, ref.Name) + rangeAndSetDirectRefRelation(node, postNode, s.manager) + } + } + + if relation.LabelSelector != nil { + var postNodes []*nodeInfo + if _, ok := s.ownerRelation[postMetaKey]; ok { + postNodes = postStorage.getMatchedNodeListWithOwner(node.namespace, relation.LabelSelector, node) + } else { + postNodes = postStorage.getMatchedNodeList(node.namespace, relation.LabelSelector) + } + for _, postNode := range postNodes { + rangeAndSetLabelRelation(node, postNode, s.manager) + } + } +} + +func (s *nodeStorage) removeResourceRelation(node *nodeInfo, relation *ResourceRelation) { + postStorage := s.manager.getStorage(relation.PostMeta) + if postStorage == nil { + klog.Error("Failed to get node Storage by %s, ignore this delete request", + generateMetaKey(relation.PostMeta)) + return + } + if len(relation.DirectRefs) > 0 { + for _, ref := range relation.DirectRefs { + postNode := postStorage.getNode(ref.Namespace, ref.Name) + if deleteDirectRelation(node, postNode) { + postNode.preOrderRelationDeleted(node) + } + } + } + + if relation.LabelSelector != nil { + postNodes := postStorage.getMatchedNodeList(node.namespace, relation.LabelSelector) + for _, postNode := range postNodes { + if deleteLabelRelation(node, postNode) { + postNode.preOrderRelationDeleted(node) + } + } + } +} diff --git a/resourcetopo/resourcetopo_test.go b/resourcetopo/resourcetopo_test.go new file mode 100644 index 0000000..b7533b2 --- /dev/null +++ b/resourcetopo/resourcetopo_test.go @@ -0,0 +1,1315 @@ +/** + * Copyright 2024 KusionStack Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package resourcetopo + +import ( + "context" + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/klog/v2" +) + +const defaultTimeoutSecond = 1 + +func TestResourceTopo(t *testing.T) { + SetDefaultEventuallyTimeout(defaultTimeoutSecond * time.Second) + RegisterFailHandler(Fail) + RunSpecs(t, "resourcetopo suite test") +} + +var _ = Describe("test suite with ists config(label selector and virtual rersource)", func() { + var manager Manager + var fakeClient *fake.Clientset + var podHandler, stsHandler, istsHandler *objecthandler + var podStsRelation, stsIstsRelation *relationHandler + + var podStorage, stsStorage, istsStorage TopoNodeStorage + var ctx context.Context + var cancel func() + + checkAll := func() bool { + return podHandler.matchExpected() && + stsHandler.matchExpected() && + istsHandler.matchExpected() && + podStsRelation.matchExpected() && + stsIstsRelation.matchExpected() + } + + BeforeEach(func() { + fakeClient = fake.NewSimpleClientset() + k8sInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0) + var err error + manager, err = NewResourcesTopoManager(*buildManagerConfig(buildInspectTopoConfig(k8sInformerFactory))) + Expect(err).NotTo(HaveOccurred()) + stsStorage, _ = manager.GetTopoNodeStorage(StatefulSetMeta) + podStorage, _ = manager.GetTopoNodeStorage(PodMeta) + istsStorage, _ = manager.GetTopoNodeStorage(InspectStatefulSetMeta) + Expect(stsStorage).NotTo(BeNil()) + Expect(podStorage).NotTo(BeNil()) + Expect(istsStorage).NotTo(BeNil()) + + ctx, cancel = context.WithCancel((context.Background())) + podHandler = &objecthandler{} + Expect(manager.AddNodeHandler(PodMeta, podHandler)).Should(Succeed()) + stsHandler = &objecthandler{} + Expect(manager.AddNodeHandler(StatefulSetMeta, stsHandler)).Should(Succeed()) + istsHandler = &objecthandler{} + Expect(manager.AddNodeHandler(InspectStatefulSetMeta, istsHandler)).Should(Succeed()) + + podStsRelation = &relationHandler{} + Expect(manager.AddRelationHandler(StatefulSetMeta, PodMeta, podStsRelation)).Should(Succeed()) + stsIstsRelation = &relationHandler{} + Expect(manager.AddRelationHandler(InspectStatefulSetMeta, StatefulSetMeta, stsIstsRelation)).Should(Succeed()) + + manager.Start(ctx.Done()) + k8sInformerFactory.Start(ctx.Done()) + k8sInformerFactory.WaitForCacheSync(ctx.Done()) + }) + AfterEach(func() { + if !checkAll() { + klog.Infof("end with object [sts %s, pod %s, ists %s]", stsHandler.string(), podHandler.string(), istsHandler.string()) + klog.Infof("end with relation [stsIsts %s, podSts %s]", stsIstsRelation.string(), podStsRelation.string()) + } + cancel() + }) + + It("create single pod", func() { + podName := "test1" + podHandler.addCallExpected() + _, err := fakeClient.CoreV1().Pods(namespaceDefault).Create(ctx, newPod(namespaceDefault, podName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + + Eventually(func(g Gomega) { + + pod, _ := podStorage.GetNode(types.NamespacedName{Namespace: namespaceDefault, Name: podName}) + g.Expect(pod).NotTo(BeNil()) + g.Expect(len(pod.GetPostOrders())).To(Equal(0)) + g.Expect(len(pod.GetPreOrders())).To(Equal(0)) + + g.Expect(pod.TypeInfo()).To(Equal(PodMeta)) + g.Expect(pod.NodeInfo()).To(Equal(types.NamespacedName{Namespace: namespaceDefault, Name: podName})) + + g.Expect(checkAll()).To(Equal(true)) + }).Should(Succeed()) + }) + + It("create single statefulSet", func() { + stsName := "testSts" + var err error + + stsHandler.addCallExpected() + istsHandler.addCallExpected() + istsHandler.relatedCallExpected() + stsIstsRelation.addCallExpected() + _, err = fakeClient.AppsV1().StatefulSets(namespaceDefault).Create(ctx, newStatefulSet(namespaceDefault, stsName, "apps", stsName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + + Eventually(func(g Gomega) { + sts, _ := stsStorage.GetNode(types.NamespacedName{Namespace: namespaceDefault, Name: stsName}) + g.Expect(sts).NotTo(BeNil()) + + preOrders := sts.GetPreOrders() + g.Expect(len(preOrders)).To(Equal(1)) + postOrders := sts.GetPostOrders() + g.Expect(len(postOrders)).To(Equal(0)) + g.Expect(checkAll()).To(Equal(true)) + }).Should(Succeed()) + }) + + It("create pod and sts", func() { + podName := "testpod" + stsName := "testSts" + + podHandler.addCallExpected() + _, err := fakeClient.CoreV1().Pods(namespaceDefault).Create(ctx, newPod(namespaceDefault, podName, "apps", stsName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + stsHandler.addCallExpected() + istsHandler.addCallExpected() + istsHandler.relatedCallExpected() + podStsRelation.addCallExpected() + stsIstsRelation.addCallExpected() + _, err = fakeClient.AppsV1().StatefulSets(namespaceDefault).Create(ctx, newStatefulSet(namespaceDefault, stsName, "apps", stsName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + + Eventually(func(g Gomega) { + pod, _ := podStorage.GetNode(types.NamespacedName{Namespace: namespaceDefault, Name: podName}) + g.Expect(pod).NotTo(BeNil()) + g.Expect(len(pod.GetPostOrders())).To(Equal(0)) + preOrders := pod.GetPreOrders() + g.Expect(len(preOrders)).To(Equal(1)) + g.Expect(pod.TypeInfo()).To(Equal(PodMeta)) + g.Expect(pod.NodeInfo()).To(Equal(types.NamespacedName{Namespace: namespaceDefault, Name: podName})) + g.Expect(checkAll()).To(Equal(true)) + }).Should(Succeed()) + + }) + + It("create sts and pod", func() { + podName := "testpod2" + stsName := "testSts2" + + stsHandler.addCallExpected() + istsHandler.addCallExpected() + istsHandler.relatedCallExpected() + stsIstsRelation.addCallExpected() + _, err := fakeClient.AppsV1().StatefulSets(namespaceDefault).Create(ctx, newStatefulSet(namespaceDefault, stsName, "apps", stsName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + podHandler.addCallExpected() + stsHandler.relatedCallExpected() + istsHandler.relatedCallExpected() + podStsRelation.addCallExpected() + _, err = fakeClient.CoreV1().Pods(namespaceDefault).Create(ctx, newPod(namespaceDefault, podName, "apps", stsName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + + Eventually(func(g Gomega) { + pod, _ := podStorage.GetNode(types.NamespacedName{Namespace: namespaceDefault, Name: podName}) + g.Expect(pod).NotTo(BeNil()) + g.Expect(len(pod.GetPostOrders())).To(Equal(0)) + g.Expect(len(pod.GetPreOrders())).To(Equal(1)) + g.Expect(pod.TypeInfo()).To(Equal(PodMeta)) + g.Expect(pod.NodeInfo()).To(Equal(types.NamespacedName{Namespace: namespaceDefault, Name: podName})) + g.Expect(checkAll()).To(Equal(true)) + }).Should(Succeed()) + }) + + It("create pod1, sts and pod2", func() { + var err error + pod1Name := "testpod1" + pod2Name := "testpos2" + stsName := "testSts2" + + podHandler.addCallExpected() + _, err = fakeClient.CoreV1().Pods(namespaceDefault).Create(ctx, newPod(namespaceDefault, pod1Name, "apps", stsName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + stsHandler.addCallExpected() + istsHandler.addCallExpected() + istsHandler.relatedCallExpected() + podStsRelation.addCallExpected() + stsIstsRelation.addCallExpected() + _, err = fakeClient.AppsV1().StatefulSets(namespaceDefault).Create(ctx, newStatefulSet(namespaceDefault, stsName, "apps", stsName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + podHandler.addCallExpected() + stsHandler.relatedCallExpected() + istsHandler.relatedCallExpected() + podStsRelation.addCallExpected() + _, err = fakeClient.CoreV1().Pods(namespaceDefault).Create(ctx, newPod(namespaceDefault, pod2Name, "apps", stsName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + + Eventually(func(g Gomega) { + sts, _ := stsStorage.GetNode(types.NamespacedName{Namespace: namespaceDefault, Name: stsName}) + g.Expect(sts).NotTo(BeNil()) + + preOrders := sts.GetPreOrders() + g.Expect(len(preOrders)).To(Equal(1)) + postOrders := sts.GetPostOrders() + g.Expect(len(postOrders)).To(Equal(2)) + g.Expect(checkAll()).To(Equal(true)) + }).Should(Succeed()) + }) + + It("create pod and sts; delete pod and sts", func() { + var err error + pod1Name := "testpod1" + stsName := "testSts2" + + podHandler.addCallExpected() + _, err = fakeClient.CoreV1().Pods(namespaceDefault).Create(ctx, newPod(namespaceDefault, pod1Name, "apps", stsName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + stsHandler.addCallExpected() + istsHandler.addCallExpected() + istsHandler.relatedCallExpected() + podStsRelation.addCallExpected() + stsIstsRelation.addCallExpected() + _, err = fakeClient.AppsV1().StatefulSets(namespaceDefault).Create(ctx, newStatefulSet(namespaceDefault, stsName, "apps", stsName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + podHandler.deleteCallExpected() + stsHandler.relatedCallExpected() + istsHandler.relatedCallExpected() + podStsRelation.deleteCallExpected() + err = fakeClient.CoreV1().Pods(namespaceDefault).Delete(ctx, pod1Name, metav1.DeleteOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + stsHandler.deleteCallExpected() + istsHandler.relatedCallExpected() + istsHandler.deleteCallExpected() + stsIstsRelation.deleteCallExpected() + err = fakeClient.AppsV1().StatefulSets(namespaceDefault).Delete(ctx, stsName, metav1.DeleteOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + Eventually(func(g Gomega) { + sts, _ := stsStorage.GetNode(types.NamespacedName{Namespace: namespaceDefault, Name: stsName}) + g.Expect(sts).To(BeNil()) + }).Should(Succeed()) + }) + + It("create pod and sts; delete sts and pod", func() { + var err error + pod1Name := "testpod1" + stsName := "testSts2" + + podHandler.addCallExpected() + _, err = fakeClient.CoreV1().Pods(namespaceDefault).Create(ctx, newPod(namespaceDefault, pod1Name, "apps", stsName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + stsHandler.addCallExpected() + istsHandler.addCallExpected() + istsHandler.relatedCallExpected() + podStsRelation.addCallExpected() + stsIstsRelation.addCallExpected() + _, err = fakeClient.AppsV1().StatefulSets(namespaceDefault).Create(ctx, newStatefulSet(namespaceDefault, stsName, "apps", stsName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + stsHandler.deleteCallExpected() + istsHandler.relatedCallExpected() + istsHandler.deleteCallExpected() + podStsRelation.deleteCallExpected() + stsIstsRelation.deleteCallExpected() + err = fakeClient.AppsV1().StatefulSets(namespaceDefault).Delete(ctx, stsName, metav1.DeleteOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + podHandler.deleteCallExpected() + err = fakeClient.CoreV1().Pods(namespaceDefault).Delete(ctx, pod1Name, metav1.DeleteOptions{}) + Expect(err).To(BeNil()) + + Eventually(func(g Gomega) { + pod, _ := podStorage.GetNode(types.NamespacedName{Namespace: namespaceDefault, Name: pod1Name}) + sts, _ := stsStorage.GetNode(types.NamespacedName{Namespace: namespaceDefault, Name: stsName}) + g.Expect(sts).To(BeNil()) + g.Expect(pod).To(BeNil()) + g.Expect(checkAll()).To(Equal(true)) + }).Should(Succeed()) + }) + + It("create pod and sts; update pod and sts", func() { + var err error + pod1Name := "testpod1" + stsName := "testSts2" + + podHandler.addCallExpected() + _, err = fakeClient.CoreV1().Pods(namespaceDefault).Create(ctx, newPod(namespaceDefault, pod1Name, "apps", stsName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + stsHandler.addCallExpected() + istsHandler.addCallExpected() + istsHandler.relatedCallExpected() + podStsRelation.addCallExpected() + stsIstsRelation.addCallExpected() + _, err = fakeClient.AppsV1().StatefulSets(namespaceDefault).Create(ctx, newStatefulSet(namespaceDefault, stsName, "apps", stsName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + podHandler.updateCallExpected() + stsHandler.relatedCallExpected() + istsHandler.relatedCallExpected() + _, err = fakeClient.CoreV1().Pods(namespaceDefault).Update(ctx, newPod(namespaceDefault, pod1Name, "apps", stsName), metav1.UpdateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + stsHandler.updateCallExpected() + istsHandler.relatedCallExpected() + _, err = fakeClient.AppsV1().StatefulSets(namespaceDefault).Update(ctx, newStatefulSet(namespaceDefault, stsName, "apps", stsName), metav1.UpdateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + Eventually(func(g Gomega) { + sts, _ := stsStorage.GetNode(types.NamespacedName{Namespace: namespaceDefault, Name: stsName}) + g.Expect(sts).NotTo(BeNil()) + preOrders := sts.GetPreOrders() + g.Expect(len(preOrders)).To(Equal(1)) + postOrders := sts.GetPostOrders() + g.Expect(len(postOrders)).To(Equal(1)) + g.Expect(checkAll()).To(Equal(true)) + }).Should(Succeed()) + }) + + It("create pod and sts; update pod label to no longer match", func() { + var err error + pod1Name := "testpod1" + stsName := "testSts2" + + podHandler.addCallExpected() + _, err = fakeClient.CoreV1().Pods(namespaceDefault).Create(ctx, newPod(namespaceDefault, pod1Name, "apps", stsName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + stsHandler.addCallExpected() + istsHandler.addCallExpected() + istsHandler.relatedCallExpected() + podStsRelation.addCallExpected() + stsIstsRelation.addCallExpected() + _, err = fakeClient.AppsV1().StatefulSets(namespaceDefault).Create(ctx, newStatefulSet(namespaceDefault, stsName, "apps", stsName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + podHandler.updateCallExpected() + stsHandler.relatedCallExpected() + istsHandler.relatedCallExpected() + podStsRelation.deleteCallExpected() + _, err = fakeClient.CoreV1().Pods(namespaceDefault).Update(ctx, newPod(namespaceDefault, pod1Name, "apps", stsName+"failed"), metav1.UpdateOptions{}) + Expect(err).To(BeNil()) + + Eventually(func(g Gomega) { + sts, _ := stsStorage.GetNode(types.NamespacedName{Namespace: namespaceDefault, Name: stsName}) + g.Expect(sts).NotTo(BeNil()) + preOrders := sts.GetPreOrders() + g.Expect(len(preOrders)).To(Equal(1)) + postOrders := sts.GetPostOrders() + g.Expect(len(postOrders)).To(Equal(0)) + g.Expect(checkAll()).To(Equal(true)) + }).Should(Succeed()) + + }) + + It("create pod and sts; update sts label to no longer match", func() { + var err error + pod1Name := "testpod1" + stsName := "testSts2" + + podHandler.addCallExpected() + _, err = fakeClient.CoreV1().Pods(namespaceDefault).Create(ctx, newPod(namespaceDefault, pod1Name, "apps", stsName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + stsHandler.addCallExpected() + istsHandler.addCallExpected() + istsHandler.relatedCallExpected() + podStsRelation.addCallExpected() + stsIstsRelation.addCallExpected() + _, err = fakeClient.AppsV1().StatefulSets(namespaceDefault).Create(ctx, newStatefulSet(namespaceDefault, stsName, "apps", stsName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + stsHandler.updateCallExpected() + istsHandler.relatedCallExpected() + podStsRelation.deleteCallExpected() + + _, err = fakeClient.AppsV1().StatefulSets(namespaceDefault).Update(ctx, newStatefulSet(namespaceDefault, stsName, "apps", stsName+"failed"), metav1.UpdateOptions{}) + Expect(err).To(BeNil()) + + Eventually(func(g Gomega) { + sts, _ := stsStorage.GetNode(types.NamespacedName{Namespace: namespaceDefault, Name: stsName}) + g.Expect(sts).NotTo(BeNil()) + preOrders := sts.GetPreOrders() + postOrders := sts.GetPostOrders() + g.Expect(len(preOrders)).To(Equal(1)) + g.Expect(len(postOrders)).To(Equal(0)) + g.Expect(checkAll()).To(Equal(true)) + }).Should(Succeed()) + }) +}) + +var _ = Describe("test suite with cluster role config(cluster role and direct reference)", func() { + var manager Manager + var fakeClient *fake.Clientset + var clusterRoleBindingHandler, clusterRoleHandler, saHandler *objecthandler + var saBindingRelation, roleBindingRelation *relationHandler + + var clusterRoleBindingStorage, clusterroleStorage, saStorage TopoNodeStorage + var ctx context.Context + var cancel func() + + checkAll := func() bool { + return clusterRoleBindingHandler.matchExpected() && + clusterRoleHandler.matchExpected() && + saHandler.matchExpected() && + saBindingRelation.matchExpected() && + roleBindingRelation.matchExpected() + } + + BeforeEach(func() { + fakeClient = fake.NewSimpleClientset() + k8sInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0) + var err error + manager, err = NewResourcesTopoManager(*buildManagerConfig(buildClusterTest(k8sInformerFactory))) + Expect(err).NotTo(HaveOccurred()) + + clusterRoleBindingStorage, _ = manager.GetTopoNodeStorage(ClusterRoleBindingMeta) + clusterroleStorage, _ = manager.GetTopoNodeStorage(ClusterRoleMeta) + saStorage, _ = manager.GetTopoNodeStorage(ServiceAccountMeta) + + Expect(clusterRoleBindingStorage).NotTo(BeNil()) + Expect(clusterroleStorage).NotTo(BeNil()) + Expect(saStorage).NotTo(BeNil()) + + ctx, cancel = context.WithCancel((context.Background())) + clusterRoleBindingHandler = &objecthandler{} + Expect(manager.AddNodeHandler(ClusterRoleBindingMeta, clusterRoleBindingHandler)).NotTo(HaveOccurred()) + clusterRoleHandler = &objecthandler{} + Expect(manager.AddNodeHandler(ClusterRoleMeta, clusterRoleHandler)).NotTo(HaveOccurred()) + saHandler = &objecthandler{} + Expect(manager.AddNodeHandler(ServiceAccountMeta, saHandler)).NotTo(HaveOccurred()) + + roleBindingRelation = &relationHandler{} + Expect(manager.AddRelationHandler(ClusterRoleBindingMeta, ClusterRoleMeta, roleBindingRelation)).To(BeNil()) + saBindingRelation = &relationHandler{} + Expect(manager.AddRelationHandler(ClusterRoleBindingMeta, ServiceAccountMeta, saBindingRelation)).To(BeNil()) + + manager.Start(ctx.Done()) + k8sInformerFactory.Start(ctx.Done()) + k8sInformerFactory.WaitForCacheSync(ctx.Done()) + }) + AfterEach(func() { + if !checkAll() { + klog.Infof("end with object [%s, %s, %s]", clusterRoleBindingHandler.string(), clusterRoleHandler.string(), saHandler.string()) + klog.Infof("end with relation [%s, %s]", roleBindingRelation.string(), saBindingRelation.string()) + } + cancel() + }) + + It("create single clusterRoleBinding", func() { + ns := "testclusterresource" + crbName := "crbtest" + crName := "crName" + saName := "saName" + + clusterRoleBindingHandler.addCallExpected() + Expect(fakeClient.RbacV1().ClusterRoleBindings().Create(ctx, newClusterRoleBinding(crbName, crName, []types.NamespacedName{{Name: saName, Namespace: ns}}), metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + + Eventually(func(g Gomega) { + crb, _ := clusterRoleBindingStorage.GetNode(types.NamespacedName{Name: crbName}) + g.Expect(crb).NotTo(BeNil()) + g.Expect(len(crb.GetPostOrders())).To(Equal(0)) + g.Expect(clusterroleStorage.GetNode(types.NamespacedName{Name: crName})).To(BeNil()) + g.Expect(saStorage.GetNode(types.NamespacedName{Name: saName, Namespace: ns})).To(BeNil()) + }).Should(Succeed()) + }) + + It("create serviceAccount, clusterRole and clusterRoleBinding", func() { + ns := "testclusterresource" + crbName := "crbtest" + crName := "crName" + saName := "saName" + + saHandler.addCallExpected() + Expect(fakeClient.CoreV1().ServiceAccounts(ns).Create(ctx, newServiceAccount(ns, saName), metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + + clusterRoleHandler.addCallExpected() + Expect(fakeClient.RbacV1().ClusterRoles().Create(ctx, newClusterRole(crName), metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + + clusterRoleBindingHandler.addCallExpected() + saBindingRelation.addCallExpected() + roleBindingRelation.addCallExpected() + Expect(fakeClient.RbacV1().ClusterRoleBindings().Create(ctx, newClusterRoleBinding(crbName, crName, []types.NamespacedName{{Name: saName, Namespace: ns}}), metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + }) + + It("create clusterRoleBinding and related serviceAccount, clusterRole", func() { + ns := "testclusterresource" + crbName := "crbtest" + crName := "crName" + saName := "saName" + + clusterRoleBindingHandler.addCallExpected() + Expect(fakeClient.RbacV1().ClusterRoleBindings().Create(ctx, newClusterRoleBinding(crbName, crName, []types.NamespacedName{{Name: saName, Namespace: ns}}), metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + + saHandler.addCallExpected() + clusterRoleBindingHandler.relatedCallExpected() + saBindingRelation.addCallExpected() + Expect(fakeClient.CoreV1().ServiceAccounts(ns).Create(ctx, newServiceAccount(ns, saName), metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + + clusterRoleHandler.addCallExpected() + clusterRoleBindingHandler.relatedCallExpected() + roleBindingRelation.addCallExpected() + Expect(fakeClient.RbacV1().ClusterRoles().Create(ctx, newClusterRole(crName), metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + }) + + It("create all and delete clusterRole and serviceAccount", func() { + ns := "testclusterresource" + crbName := "crbtest" + crName := "crName" + saName := "saName" + + clusterRoleBindingHandler.addCallExpected() + Expect(fakeClient.RbacV1().ClusterRoleBindings().Create(ctx, newClusterRoleBinding(crbName, crName, []types.NamespacedName{{Name: saName, Namespace: ns}}), metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + + saHandler.addCallExpected() + clusterRoleBindingHandler.relatedCallExpected() + saBindingRelation.addCallExpected() + Expect(fakeClient.CoreV1().ServiceAccounts(ns).Create(ctx, newServiceAccount(ns, saName), metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + + clusterRoleHandler.addCallExpected() + clusterRoleBindingHandler.relatedCallExpected() + roleBindingRelation.addCallExpected() + Expect(fakeClient.RbacV1().ClusterRoles().Create(ctx, newClusterRole(crName), metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + + saHandler.deleteCallExpected() + clusterRoleBindingHandler.relatedCallExpected() + saBindingRelation.deleteCallExpected() + Expect(fakeClient.CoreV1().ServiceAccounts(ns).Delete(ctx, saName, metav1.DeleteOptions{})).NotTo(HaveOccurred()) + syncStatus(checkAll) + + clusterRoleHandler.deleteCallExpected() + clusterRoleBindingHandler.relatedCallExpected() + roleBindingRelation.deleteCallExpected() + Expect(fakeClient.RbacV1().ClusterRoles().Delete(ctx, crName, metav1.DeleteOptions{})).NotTo(HaveOccurred()) + syncStatus(checkAll) + + Eventually(func(g Gomega) { + clusterrolebindingNode, _ := clusterRoleBindingStorage.GetNode(types.NamespacedName{Name: crbName}) + g.Expect(clusterrolebindingNode).NotTo(BeNil()) + g.Expect(len(clusterrolebindingNode.GetPostOrders())).To(Equal(0)) + }).Should(Succeed()) + }) + + It("create all and delete clusterRoleBinding", func() { + ns := "testclusterresource" + crbName := "crbtest" + crName := "crName" + saName := "saName" + + clusterRoleBindingHandler.addCallExpected() + Expect(fakeClient.RbacV1().ClusterRoleBindings().Create(ctx, newClusterRoleBinding(crbName, crName, []types.NamespacedName{{Name: saName, Namespace: ns}}), metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + + saHandler.addCallExpected() + clusterRoleBindingHandler.relatedCallExpected() + saBindingRelation.addCallExpected() + Expect(fakeClient.CoreV1().ServiceAccounts(ns).Create(ctx, newServiceAccount(ns, saName), metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + + clusterRoleHandler.addCallExpected() + clusterRoleBindingHandler.relatedCallExpected() + roleBindingRelation.addCallExpected() + Expect(fakeClient.RbacV1().ClusterRoles().Create(ctx, newClusterRole(crName), metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + + clusterRoleBindingHandler.deleteCallExpected() + roleBindingRelation.deleteCallExpected() + saBindingRelation.deleteCallExpected() + Expect(fakeClient.RbacV1().ClusterRoleBindings().Delete(ctx, crbName, metav1.DeleteOptions{})).NotTo(HaveOccurred()) + syncStatus(checkAll) + + Eventually(func(g Gomega) { + clusterrolebindingNode, _ := clusterRoleBindingStorage.GetNode(types.NamespacedName{Name: crbName}) + g.Expect(clusterrolebindingNode).To(BeNil()) + clusterrole, _ := clusterroleStorage.GetNode(types.NamespacedName{Name: crName}) + g.Expect(clusterrole).NotTo(BeNil()) + + }).Should(Succeed()) + }) + + It("create all, delete serviceAccount/clusterRole and create again, should match", func() { + ns := "testclusterresource" + crbName := "crbtest" + crName := "crName" + saName := "saName" + + clusterRoleBindingHandler.addCallExpected() + Expect(fakeClient.RbacV1().ClusterRoleBindings().Create(ctx, newClusterRoleBinding(crbName, crName, []types.NamespacedName{{Name: saName, Namespace: ns}}), metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + + saHandler.addCallExpected() + clusterRoleBindingHandler.relatedCallExpected() + saBindingRelation.addCallExpected() + Expect(fakeClient.CoreV1().ServiceAccounts(ns).Create(ctx, newServiceAccount(ns, saName), metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + + clusterRoleHandler.addCallExpected() + clusterRoleBindingHandler.relatedCallExpected() + roleBindingRelation.addCallExpected() + Expect(fakeClient.RbacV1().ClusterRoles().Create(ctx, newClusterRole(crName), metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + + saHandler.deleteCallExpected() + clusterRoleBindingHandler.relatedCallExpected() + saBindingRelation.deleteCallExpected() + Expect(fakeClient.CoreV1().ServiceAccounts(ns).Delete(ctx, saName, metav1.DeleteOptions{})).NotTo(HaveOccurred()) + syncStatus(checkAll) + + clusterRoleHandler.deleteCallExpected() + clusterRoleBindingHandler.relatedCallExpected() + roleBindingRelation.deleteCallExpected() + Expect(fakeClient.RbacV1().ClusterRoles().Delete(ctx, crName, metav1.DeleteOptions{})).NotTo(HaveOccurred()) + syncStatus(checkAll) + + saHandler.addCallExpected() + clusterRoleBindingHandler.relatedCallExpected() + saBindingRelation.addCallExpected() + Expect(fakeClient.CoreV1().ServiceAccounts(ns).Create(ctx, newServiceAccount(ns, saName), metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + + clusterRoleHandler.addCallExpected() + clusterRoleBindingHandler.relatedCallExpected() + roleBindingRelation.addCallExpected() + Expect(fakeClient.RbacV1().ClusterRoles().Create(ctx, newClusterRole(crName), metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + + Eventually(func(g Gomega) { + clusterrolebindingNode, _ := clusterRoleBindingStorage.GetNode(types.NamespacedName{Name: crbName}) + g.Expect(clusterrolebindingNode).NotTo(BeNil()) + postNodes := clusterrolebindingNode.GetPostOrders() + g.Expect(len(postNodes)).To(Equal(2)) + }).Should(Succeed()) + }) + + It("create all and delete all", func() { + ns := "testclusterresource" + crbName := "crbtest" + crName := "crName" + saName := "saName" + + clusterRoleBindingHandler.addCallExpected() + Expect(fakeClient.RbacV1().ClusterRoleBindings().Create(ctx, newClusterRoleBinding(crbName, crName, []types.NamespacedName{{Name: saName, Namespace: ns}}), metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + + saHandler.addCallExpected() + clusterRoleBindingHandler.relatedCallExpected() + saBindingRelation.addCallExpected() + Expect(fakeClient.CoreV1().ServiceAccounts(ns).Create(ctx, newServiceAccount(ns, saName), metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + + clusterRoleHandler.addCallExpected() + clusterRoleBindingHandler.relatedCallExpected() + roleBindingRelation.addCallExpected() + Expect(fakeClient.RbacV1().ClusterRoles().Create(ctx, newClusterRole(crName), metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + + saHandler.deleteCallExpected() + clusterRoleBindingHandler.relatedCallExpected() + saBindingRelation.deleteCallExpected() + Expect(fakeClient.CoreV1().ServiceAccounts(ns).Delete(ctx, saName, metav1.DeleteOptions{})).NotTo(HaveOccurred()) + syncStatus(checkAll) + + clusterRoleHandler.deleteCallExpected() + clusterRoleBindingHandler.relatedCallExpected() + roleBindingRelation.deleteCallExpected() + Expect(fakeClient.RbacV1().ClusterRoles().Delete(ctx, crName, metav1.DeleteOptions{})).NotTo(HaveOccurred()) + syncStatus(checkAll) + + clusterRoleBindingHandler.deleteCallExpected() + Expect(fakeClient.RbacV1().ClusterRoleBindings().Delete(ctx, crbName, metav1.DeleteOptions{})).NotTo(HaveOccurred()) + syncStatus(checkAll) + + Eventually(func(g Gomega) { + g.Expect(clusterRoleBindingStorage.GetNode(types.NamespacedName{Name: crbName})).To(BeNil()) + g.Expect(clusterroleStorage.GetNode(types.NamespacedName{Name: crName})).To(BeNil()) + g.Expect(saStorage.GetNode(types.NamespacedName{Name: saName, Namespace: ns})).To(BeNil()) + }).Should(Succeed()) + }) +}) + +var _ = Describe("test suite with svc and pod config(label selector and reverse notice configured)", func() { + var manager Manager + var fakeClient *fake.Clientset + var podHandler, svcHandler *objecthandler + var podSvcRelation *relationHandler + + var podStorage, svcStorage TopoNodeStorage + var ctx context.Context + var cancel func() + + checkAll := func() bool { + return podHandler.matchExpected() && + svcHandler.matchExpected() && + podSvcRelation.matchExpected() + } + + BeforeEach(func() { + fakeClient = fake.NewSimpleClientset() + k8sInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0) + var err error + manager, err = NewResourcesTopoManager(*buildManagerConfig(buildSvcPodTest(k8sInformerFactory))) + Expect(err).NotTo(HaveOccurred()) + podStorage, _ = manager.GetTopoNodeStorage(PodMeta) + Expect(podStorage).NotTo(BeNil()) + svcStorage, _ = manager.GetTopoNodeStorage(ServiceMeta) + Expect(svcStorage).NotTo(BeNil()) + + ctx, cancel = context.WithCancel((context.Background())) + podHandler = &objecthandler{} + Expect(manager.AddNodeHandler(PodMeta, podHandler)).To(BeNil()) + svcHandler = &objecthandler{} + Expect(manager.AddNodeHandler(ServiceMeta, svcHandler)).To(BeNil()) + + podSvcRelation = &relationHandler{} + Expect(manager.AddRelationHandler(ServiceMeta, PodMeta, podSvcRelation)).To(BeNil()) + + manager.Start(ctx.Done()) + k8sInformerFactory.Start(ctx.Done()) + k8sInformerFactory.WaitForCacheSync(ctx.Done()) + }) + AfterEach(func() { + if !checkAll() { + klog.Infof("end with object [%s, %s]", podHandler.string(), svcHandler.string()) + klog.Infof("end with relation [%s]", podSvcRelation.string()) + } + cancel() + }) + + It("create pod and svc", func() { + var err error + podName := "test1" + svcName := "testSvc" + + podHandler.addCallExpected() + _, err = fakeClient.CoreV1().Pods(namespaceDefault).Create(ctx, newPod(namespaceDefault, podName, "apps", podName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + svcHandler.addCallExpected() + podHandler.relatedCallExpected() + podSvcRelation.addCallExpected() + _, err = fakeClient.CoreV1().Services(namespaceDefault).Create(ctx, newSvc(namespaceDefault, svcName, "apps", podName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + + Eventually(func(g Gomega) { + pod, _ := podStorage.GetNode(types.NamespacedName{Namespace: namespaceDefault, Name: podName}) + g.Expect(pod).NotTo(BeNil()) + g.Expect(len(pod.GetPostOrders())).To(Equal(0)) + g.Expect(len(pod.GetPreOrders())).To(Equal(1)) + + g.Expect(pod.TypeInfo()).To(Equal(PodMeta)) + g.Expect(pod.NodeInfo()).To(Equal(types.NamespacedName{Namespace: namespaceDefault, Name: podName})) + + g.Expect(checkAll()).To(Equal(true)) + }).Should(Succeed()) + }) + + It("create svc and pod", func() { + var err error + podName := "testPod" + svcName := "testSvc" + + svcHandler.addCallExpected() + _, err = fakeClient.CoreV1().Services(namespaceDefault).Create(ctx, newSvc(namespaceDefault, svcName, "apps", podName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + podHandler.addCallExpected() + podSvcRelation.addCallExpected() + _, err = fakeClient.CoreV1().Pods(namespaceDefault).Create(ctx, newPod(namespaceDefault, podName, "apps", podName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + + Eventually(func(g Gomega) { + svcNode, _ := svcStorage.GetNode(types.NamespacedName{Namespace: namespaceDefault, Name: svcName}) + g.Expect(svcNode).NotTo(BeNil()) + g.Expect(len(svcNode.GetPreOrders())).To(Equal(0)) + g.Expect(len(svcNode.GetPostOrders())).To(Equal(1)) + + g.Expect(svcNode.TypeInfo()).To(Equal(ServiceMeta)) + g.Expect(svcNode.NodeInfo()).To(Equal(types.NamespacedName{Namespace: namespaceDefault, Name: svcName})) + + g.Expect(checkAll()).To(Equal(true)) + }).Should(Succeed()) + }) + + It("create svc and two pods", func() { + var err error + podName := "testPod" + pod2Name := "testPod2" + svcName := "testSvc" + + svcHandler.addCallExpected() + _, err = fakeClient.CoreV1().Services(namespaceDefault).Create(ctx, newSvc(namespaceDefault, svcName, "apps", svcName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + podHandler.addCallExpected() + podSvcRelation.addCallExpected() + _, err = fakeClient.CoreV1().Pods(namespaceDefault).Create(ctx, newPod(namespaceDefault, podName, "apps", svcName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + podHandler.addCallExpected() + podSvcRelation.addCallExpected() + _, err = fakeClient.CoreV1().Pods(namespaceDefault).Create(ctx, newPod(namespaceDefault, pod2Name, "apps", svcName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + + Eventually(func(g Gomega) { + svcNode, _ := svcStorage.GetNode(types.NamespacedName{Namespace: namespaceDefault, Name: svcName}) + g.Expect(svcNode).NotTo(BeNil()) + g.Expect(len(svcNode.GetPreOrders())).To(Equal(0)) + g.Expect(len(svcNode.GetPostOrders())).To(Equal(2)) + + g.Expect(svcNode.TypeInfo()).To(Equal(ServiceMeta)) + g.Expect(svcNode.NodeInfo()).To(Equal(types.NamespacedName{Namespace: namespaceDefault, Name: svcName})) + + g.Expect(checkAll()).To(Equal(true)) + }).Should(Succeed()) + }) + + It("create pod, svc and pod", func() { + var err error + podName := "testPod" + pod2Name := "testPod2" + svcName := "testSvc" + + podHandler.addCallExpected() + _, err = fakeClient.CoreV1().Pods(namespaceDefault).Create(ctx, newPod(namespaceDefault, podName, "apps", svcName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + svcHandler.addCallExpected() + podHandler.relatedCallExpected() + podSvcRelation.addCallExpected() + _, err = fakeClient.CoreV1().Services(namespaceDefault).Create(ctx, newSvc(namespaceDefault, svcName, "apps", svcName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + podHandler.addCallExpected() + podSvcRelation.addCallExpected() + _, err = fakeClient.CoreV1().Pods(namespaceDefault).Create(ctx, newPod(namespaceDefault, pod2Name, "apps", svcName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + + Eventually(func(g Gomega) { + svcNode, _ := svcStorage.GetNode(types.NamespacedName{Namespace: namespaceDefault, Name: svcName}) + g.Expect(svcNode).NotTo(BeNil()) + g.Expect(len(svcNode.GetPreOrders())).To(Equal(0)) + g.Expect(len(svcNode.GetPostOrders())).To(Equal(2)) + + g.Expect(svcNode.TypeInfo()).To(Equal(ServiceMeta)) + g.Expect(svcNode.NodeInfo()).To(Equal(types.NamespacedName{Namespace: namespaceDefault, Name: svcName})) + + g.Expect(checkAll()).To(Equal(true)) + }).Should(Succeed()) + }) + + It("create svc and no relation pod", func() { + var err error + podName := "testPod" + pod2Name := "testPod2" + svcName := "testSvc" + + podHandler.addCallExpected() + _, err = fakeClient.CoreV1().Pods(namespaceDefault).Create(ctx, newPod(namespaceDefault, podName, "apps", podName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + svcHandler.addCallExpected() + _, err = fakeClient.CoreV1().Services(namespaceDefault).Create(ctx, newSvc(namespaceDefault, svcName, "apps", svcName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + podHandler.addCallExpected() + _, err = fakeClient.CoreV1().Pods(namespaceDefault).Create(ctx, newPod(namespaceDefault, pod2Name, "apps", pod2Name), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + }) + + It("create two svc-pod group", func() { + var err error + podName := "testPod" + podName2 := "testPod2" + podName3 := "testPod3" + svcName := "testSvc" + svcName2 := "testSvc2" + + podHandler.addCallExpected() + _, err = fakeClient.CoreV1().Pods(namespaceDefault).Create(ctx, newPod(namespaceDefault, podName, "app1", svcName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + podHandler.addCallExpected() + _, err = fakeClient.CoreV1().Pods(namespaceDefault).Create(ctx, newPod(namespaceDefault, podName2, "app2", svcName2), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + svcHandler.addCallExpected() + podSvcRelation.addCallExpected() + podHandler.relatedCallExpected() + _, err = fakeClient.CoreV1().Services(namespaceDefault).Create(ctx, newSvc(namespaceDefault, svcName, "app1", svcName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + svcHandler.addCallExpected() + podSvcRelation.addCallExpected() + podHandler.relatedCallExpected() + _, err = fakeClient.CoreV1().Services(namespaceDefault).Create(ctx, newSvc(namespaceDefault, svcName2, "app2", svcName2), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + podHandler.addCallExpected() + podSvcRelation.addCallExpected().addCallExpected() + _, err = fakeClient.CoreV1().Pods(namespaceDefault).Create(ctx, newPod(namespaceDefault, podName3, "app2", svcName2, "app1", svcName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + }) + + It("create pod and svc; delete pod", func() { + var err error + podName := "test1" + svcName := "testSvc" + + podHandler.addCallExpected() + _, err = fakeClient.CoreV1().Pods(namespaceDefault).Create(ctx, newPod(namespaceDefault, podName, "apps", podName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + svcHandler.addCallExpected() + podHandler.relatedCallExpected() + podSvcRelation.addCallExpected() + _, err = fakeClient.CoreV1().Services(namespaceDefault).Create(ctx, newSvc(namespaceDefault, svcName, "apps", podName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + svcHandler.deleteCallExpected() + podSvcRelation.deleteCallExpected() + podHandler.relatedCallExpected() + Expect(fakeClient.CoreV1().Services(namespaceDefault).Delete(ctx, svcName, metav1.DeleteOptions{})).To(BeNil()) + syncStatus(checkAll) + }) + + It("create pod and svc; delete svc", func() { + var err error + podName := "test1" + svcName := "testSvc" + + podHandler.addCallExpected() + _, err = fakeClient.CoreV1().Pods(namespaceDefault).Create(ctx, newPod(namespaceDefault, podName, "apps", podName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + svcHandler.addCallExpected() + podHandler.relatedCallExpected() + podSvcRelation.addCallExpected() + _, err = fakeClient.CoreV1().Services(namespaceDefault).Create(ctx, newSvc(namespaceDefault, svcName, "apps", podName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + podHandler.deleteCallExpected() + podSvcRelation.deleteCallExpected() + Expect(fakeClient.CoreV1().Pods(namespaceDefault).Delete(ctx, podName, metav1.DeleteOptions{})).To(BeNil()) + syncStatus(checkAll) + }) + + It("create pod and svc; update svc", func() { + var err error + podName := "test1" + svcName := "testSvc" + + podHandler.addCallExpected() + _, err = fakeClient.CoreV1().Pods(namespaceDefault).Create(ctx, newPod(namespaceDefault, podName, "apps", podName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + svcHandler.addCallExpected() + podHandler.relatedCallExpected() + podSvcRelation.addCallExpected() + _, err = fakeClient.CoreV1().Services(namespaceDefault).Create(ctx, newSvc(namespaceDefault, svcName, "apps", podName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + svcHandler.updateCallExpected() + podHandler.relatedCallExpected() + _, err = fakeClient.CoreV1().Services(namespaceDefault).Update(ctx, newSvc(namespaceDefault, svcName, "apps", podName), metav1.UpdateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + }) + + It("create pod and svc; update svc selector to no longer match", func() { + var err error + podName := "test1" + svcName := "testSvc" + + podHandler.addCallExpected() + _, err = fakeClient.CoreV1().Pods(namespaceDefault).Create(ctx, newPod(namespaceDefault, podName, "apps", podName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + svcHandler.addCallExpected() + podHandler.relatedCallExpected() + podSvcRelation.addCallExpected() + _, err = fakeClient.CoreV1().Services(namespaceDefault).Create(ctx, newSvc(namespaceDefault, svcName, "apps", podName), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + svcHandler.updateCallExpected() + podHandler.relatedCallExpected() + podSvcRelation.deleteCallExpected() + _, err = fakeClient.CoreV1().Services(namespaceDefault).Update(ctx, newSvc(namespaceDefault, svcName, "apps", podName+"failed"), metav1.UpdateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + }) +}) + +var _ = Describe("test suite with deploy config(label selector and owner reference configured)", func() { + var manager Manager + var fakeClient *fake.Clientset + var podHandler, replicasetHandler, deployHandler *objecthandler + var podRsRelation, rsDeployRelation *relationHandler + + var podStorage, replicasetStorage, deployStorage TopoNodeStorage + var ctx context.Context + var cancel func() + + checkAll := func() bool { + return podHandler.matchExpected() && + replicasetHandler.matchExpected() && + deployHandler.matchExpected() && + podRsRelation.matchExpected() && + rsDeployRelation.matchExpected() + } + + BeforeEach(func() { + fakeClient = fake.NewSimpleClientset() + k8sInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0) + var err error + manager, err = NewResourcesTopoManager(*buildManagerConfig(buildDeployTopoConfig(k8sInformerFactory))) + Expect(err).NotTo(HaveOccurred()) + replicasetStorage, _ = manager.GetTopoNodeStorage(ReplicaSetMeta) + podStorage, _ = manager.GetTopoNodeStorage(PodMeta) + deployStorage, _ = manager.GetTopoNodeStorage(DeployMeta) + Expect(replicasetStorage).NotTo(BeNil()) + Expect(podStorage).NotTo(BeNil()) + Expect(deployStorage).NotTo(BeNil()) + + ctx, cancel = context.WithCancel((context.Background())) + podHandler = &objecthandler{} + Expect(manager.AddNodeHandler(PodMeta, podHandler)).Should(Succeed()) + replicasetHandler = &objecthandler{} + Expect(manager.AddNodeHandler(ReplicaSetMeta, replicasetHandler)).Should(Succeed()) + deployHandler = &objecthandler{} + Expect(manager.AddNodeHandler(DeployMeta, deployHandler)).Should(Succeed()) + + podRsRelation = &relationHandler{} + Expect(manager.AddRelationHandler(ReplicaSetMeta, PodMeta, podRsRelation)).Should(Succeed()) + rsDeployRelation = &relationHandler{} + Expect(manager.AddRelationHandler(DeployMeta, ReplicaSetMeta, rsDeployRelation)).Should(Succeed()) + + manager.Start(ctx.Done()) + k8sInformerFactory.Start(ctx.Done()) + k8sInformerFactory.WaitForCacheSync(ctx.Done()) + }) + AfterEach(func() { + if !checkAll() { + klog.Infof("end with object [rs %s, pod %s, deploy %s]", replicasetHandler.string(), podHandler.string(), deployHandler.string()) + klog.Infof("end with relation [rsDeploy %s, podRs %s]", rsDeployRelation.string(), podRsRelation.string()) + } + cancel() + }) + + It("create objects with label owner match", func() { + deployName := "testDeploy" + rsName := deployName + "zxcvb" + podName := rsName + "asdfg" + labels := []string{"apps", deployName} + var err error + + deployHandler.addCallExpected() + _, err = fakeClient.AppsV1().Deployments(namespaceDefault).Create(ctx, newDeploy(namespaceDefault, deployName, labels...), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + replicasetHandler.addCallExpected() + rsDeployRelation.addCallExpected() + deployHandler.relatedCallExpected() + rs := newReplicaSet(namespaceDefault, rsName, labels...) + setOwner(rs, DeployMeta, deployName) + Expect(fakeClient.AppsV1().ReplicaSets(namespaceDefault).Create(ctx, rs, metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + + podHandler.addCallExpected() + podRsRelation.addCallExpected() + replicasetHandler.relatedCallExpected() + deployHandler.relatedCallExpected() + pod := newPod(namespaceDefault, podName, labels...) + setOwner(pod, ReplicaSetMeta, rsName) + Expect(fakeClient.CoreV1().Pods(namespaceDefault).Create(ctx, pod, metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + }) + + It("create objects with label owner match(2)", func() { + deployName := "testDeploy" + rsName := deployName + "zxcvb" + podName := rsName + "asdfg" + labels := []string{"apps", deployName} + var err error + + podHandler.addCallExpected() + pod := newPod(namespaceDefault, podName, labels...) + setOwner(pod, ReplicaSetMeta, rsName) + Expect(fakeClient.CoreV1().Pods(namespaceDefault).Create(ctx, pod, metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + + replicasetHandler.addCallExpected() + podRsRelation.addCallExpected() + rs := newReplicaSet(namespaceDefault, rsName, labels...) + setOwner(rs, DeployMeta, deployName) + Expect(fakeClient.AppsV1().ReplicaSets(namespaceDefault).Create(ctx, rs, metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + + deployHandler.addCallExpected() + rsDeployRelation.addCallExpected() + _, err = fakeClient.AppsV1().Deployments(namespaceDefault).Create(ctx, newDeploy(namespaceDefault, deployName, labels...), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + }) + + It("create objects with label match but owner not match", func() { + deployName := "testDeploy" + rsName := deployName + "zxcvb" + podName := rsName + "asdfg" + labels := []string{"apps", deployName} + var err error + + deployHandler.addCallExpected() + _, err = fakeClient.AppsV1().Deployments(namespaceDefault).Create(ctx, newDeploy(namespaceDefault, deployName, labels...), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + replicasetHandler.addCallExpected() + rsDeployRelation.addCallExpected() + deployHandler.relatedCallExpected() + rs := newReplicaSet(namespaceDefault, rsName, labels...) + setOwner(rs, DeployMeta, deployName) + Expect(fakeClient.AppsV1().ReplicaSets(namespaceDefault).Create(ctx, rs, metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + + podHandler.addCallExpected() + pod := newPod(namespaceDefault, podName, labels...) + setOwner(pod, ReplicaSetMeta, rsName+"failed") + Expect(fakeClient.CoreV1().Pods(namespaceDefault).Create(ctx, pod, metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + }) + + It("create objects with label match but owner not match(2)", func() { + deployName := "testDeploy" + rsName := deployName + "zxcvb" + podName := rsName + "asdfg" + labels := []string{"apps", deployName} + var err error + + replicasetHandler.addCallExpected() + rs := newReplicaSet(namespaceDefault, rsName, labels...) + setOwner(rs, DeployMeta, deployName) + Expect(fakeClient.AppsV1().ReplicaSets(namespaceDefault).Create(ctx, rs, metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + + podHandler.addCallExpected() + podRsRelation.addCallExpected() + replicasetHandler.relatedCallExpected() + pod := newPod(namespaceDefault, podName, labels...) + setOwner(pod, ReplicaSetMeta, rsName) + Expect(fakeClient.CoreV1().Pods(namespaceDefault).Create(ctx, pod, metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + + deployHandler.addCallExpected() + _, err = fakeClient.AppsV1().Deployments(namespaceDefault).Create(ctx, newDeploy(namespaceDefault, deployName+"failed", labels...), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + }) + + It("create two deploy with same label config", func() { + deployName := "testDeploy" + rsName := deployName + "zxcvb" + podName1 := rsName + "asdfg" + podName2 := rsName + "asdfh" + + deployName2 := "testDeploy2" + rsName2 := deployName2 + "zxcvc" + podName21 := rsName2 + "asdfi" + + labels := []string{"apps", deployName} + var err error + + deployHandler.addCallExpected() + _, err = fakeClient.AppsV1().Deployments(namespaceDefault).Create(ctx, newDeploy(namespaceDefault, deployName, labels...), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + replicasetHandler.addCallExpected() + rsDeployRelation.addCallExpected() + deployHandler.relatedCallExpected() + rs := newReplicaSet(namespaceDefault, rsName, labels...) + setOwner(rs, DeployMeta, deployName) + Expect(fakeClient.AppsV1().ReplicaSets(namespaceDefault).Create(ctx, rs, metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + + podHandler.addCallExpected() + podRsRelation.addCallExpected() + replicasetHandler.relatedCallExpected() + deployHandler.relatedCallExpected() + pod := newPod(namespaceDefault, podName1, labels...) + setOwner(pod, ReplicaSetMeta, rsName) + Expect(fakeClient.CoreV1().Pods(namespaceDefault).Create(ctx, pod, metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + + podHandler.addCallExpected() + podRsRelation.addCallExpected() + replicasetHandler.relatedCallExpected() + deployHandler.relatedCallExpected() + pod2 := newPod(namespaceDefault, podName2, labels...) + setOwner(pod2, ReplicaSetMeta, rsName) + Expect(fakeClient.CoreV1().Pods(namespaceDefault).Create(ctx, pod2, metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + + podHandler.addCallExpected() + pod21 := newPod(namespaceDefault, podName21, labels...) + setOwner(pod21, ReplicaSetMeta, rsName2) + Expect(fakeClient.CoreV1().Pods(namespaceDefault).Create(ctx, pod21, metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + + replicasetHandler.addCallExpected() + podRsRelation.addCallExpected() + rs2 := newReplicaSet(namespaceDefault, rsName2, labels...) + setOwner(rs2, DeployMeta, deployName2) + Expect(fakeClient.AppsV1().ReplicaSets(namespaceDefault).Create(ctx, rs2, metav1.CreateOptions{})).NotTo(BeNil()) + syncStatus(checkAll) + + deployHandler.addCallExpected() + rsDeployRelation.addCallExpected() + _, err = fakeClient.AppsV1().Deployments(namespaceDefault).Create(ctx, newDeploy(namespaceDefault, deployName2, labels...), metav1.CreateOptions{}) + Expect(err).To(BeNil()) + syncStatus(checkAll) + + Eventually(func(g Gomega) { + rs1, _ := replicasetStorage.GetNode(types.NamespacedName{Name: rsName, Namespace: namespaceDefault}) + Expect(rs1).NotTo(BeNil()) + Expect(len(rs1.GetPreOrders())).To(Equal(1)) + Expect(len(rs1.GetPostOrders())).To(Equal(2)) + + rs2, _ := replicasetStorage.GetNode(types.NamespacedName{Name: rsName2, Namespace: namespaceDefault}) + Expect(rs2).NotTo(BeNil()) + Expect(len(rs2.GetPreOrders())).To(Equal(1)) + Expect(len(rs2.GetPostOrders())).To(Equal(1)) + }).Should(Succeed()) + }) +}) diff --git a/resourcetopo/resourcetopo_utils_test.go b/resourcetopo/resourcetopo_utils_test.go new file mode 100644 index 0000000..67e4e79 --- /dev/null +++ b/resourcetopo/resourcetopo_utils_test.go @@ -0,0 +1,487 @@ +/** + * Copyright 2024 KusionStack Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package resourcetopo + +import ( + "fmt" + + "github.com/hashicorp/consul/sdk/testutil/retry" + . "github.com/onsi/ginkgo" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/informers" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" +) + +const namespaceDefault = "default" + +var ( + PodMeta = metav1.TypeMeta{Kind: "Pod", APIVersion: "core/v1"} + ServiceMeta = metav1.TypeMeta{Kind: "Service", APIVersion: "core/v1"} + ReplicaSetMeta = metav1.TypeMeta{Kind: "ReplicaSet", APIVersion: "apps/v1"} + DeployMeta = metav1.TypeMeta{Kind: "Deployment", APIVersion: "apps/v1"} + StatefulSetMeta = metav1.TypeMeta{Kind: "StatefulSet", APIVersion: "apps/v1"} + InspectStatefulSetMeta = metav1.TypeMeta{Kind: "StatefulSet", APIVersion: "inspect/v1"} + ClusterRoleBindingMeta = metav1.TypeMeta{Kind: "ClusterRoleBinding", APIVersion: "rbac/v1"} + ClusterRoleMeta = metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac/v1"} + ServiceAccountMeta = metav1.TypeMeta{Kind: "ServiceAccount", APIVersion: "core/v1"} +) + +func GetInformer(meta metav1.TypeMeta, k8sInformerFactory informers.SharedInformerFactory) cache.SharedIndexInformer { + gvk := meta.String() + switch gvk { + case PodMeta.String(): + return k8sInformerFactory.Core().V1().Pods().Informer() + case ServiceMeta.String(): + return k8sInformerFactory.Core().V1().Services().Informer() + case StatefulSetMeta.String(): + return k8sInformerFactory.Apps().V1().StatefulSets().Informer() + case ClusterRoleBindingMeta.String(): + return k8sInformerFactory.Rbac().V1().ClusterRoleBindings().Informer() + case ClusterRoleMeta.String(): + return k8sInformerFactory.Rbac().V1().ClusterRoles().Informer() + case ServiceAccountMeta.String(): + return k8sInformerFactory.Core().V1().ServiceAccounts().Informer() + case DeployMeta.String(): + return k8sInformerFactory.Apps().V1().Deployments().Informer() + case ReplicaSetMeta.String(): + return k8sInformerFactory.Apps().V1().ReplicaSets().Informer() + + default: + return nil + } +} + +func buildManagerConfig(config *TopologyConfig) *ManagerConfig { + return &ManagerConfig{ + TopologyConfig: config, + } +} + +func buildInspectTopoConfig(k8sInformerFactory informers.SharedInformerFactory) *TopologyConfig { + return &TopologyConfig{ + GetInformer: func(meta metav1.TypeMeta) cache.SharedInformer { + return GetInformer(meta, k8sInformerFactory) + }, + Resolvers: []RelationResolver{ + { + PreMeta: StatefulSetMeta, + PostMetas: []metav1.TypeMeta{PodMeta}, + Resolve: func(preOrder Object) []ResourceRelation { + stsObj, ok := preOrder.(*appsv1.StatefulSet) + if !ok { + return nil + } + labelSelector := stsObj.Spec.Selector + return []ResourceRelation{ + { + PostMeta: PodMeta, + LabelSelector: labelSelector, + }, + } + }, + }, + }, + Discoverers: []VirtualResourceDiscoverer{ + { + PreMeta: InspectStatefulSetMeta, + PostMeta: StatefulSetMeta, + Discover: func(postObject Object) []types.NamespacedName { + sts := postObject.(*appsv1.StatefulSet) + + return []types.NamespacedName{ + { + Name: sts.Name, + Namespace: sts.Namespace, + }, + } + }, + }, + }, + } +} + +func buildClusterTest(k8sInformerFactory informers.SharedInformerFactory) *TopologyConfig { + return &TopologyConfig{ + GetInformer: func(meta metav1.TypeMeta) cache.SharedInformer { + return GetInformer(meta, k8sInformerFactory) + }, + Resolvers: []RelationResolver{ + { + PreMeta: ClusterRoleBindingMeta, + PostMetas: []metav1.TypeMeta{ClusterRoleMeta, PodMeta, ServiceAccountMeta}, + Resolve: func(preOrder Object) []ResourceRelation { + crbObject, ok := preOrder.(*rbacv1.ClusterRoleBinding) + if !ok { + return nil + } + var saRefs []types.NamespacedName + + for _, s := range crbObject.Subjects { + switch s.Kind { + case ServiceAccountMeta.Kind: + saRefs = append(saRefs, types.NamespacedName{Name: s.Name, Namespace: s.Namespace}) + } + } + return []ResourceRelation{ + { + PostMeta: ClusterRoleMeta, + DirectRefs: []types.NamespacedName{ + { + Name: crbObject.RoleRef.Name, + }, + }, + }, + { + PostMeta: ServiceAccountMeta, + DirectRefs: saRefs, + }, + } + }, + }, + }, + } +} + +func buildSvcPodTest(k8sInformerFactory informers.SharedInformerFactory) *TopologyConfig { + return &TopologyConfig{ + GetInformer: func(meta metav1.TypeMeta) cache.SharedInformer { + return GetInformer(meta, k8sInformerFactory) + }, + Resolvers: []RelationResolver{ + { + + PreMeta: ServiceMeta, + PostMetas: []metav1.TypeMeta{PodMeta}, + ReverseNotice: []metav1.TypeMeta{PodMeta}, + Resolve: func(preOrder Object) []ResourceRelation { + svcObject, ok := preOrder.(*corev1.Service) + if !ok { + return nil + } + label := (svcObject.Spec.Selector) + return []ResourceRelation{ + { + PostMeta: PodMeta, + LabelSelector: &metav1.LabelSelector{ + MatchLabels: label, + }, + }, + } + }, + }, + }, + } +} + +func buildDeployTopoConfig(k8sInformerFactory informers.SharedInformerFactory) *TopologyConfig { + return &TopologyConfig{ + GetInformer: func(meta metav1.TypeMeta) cache.SharedInformer { + return GetInformer(meta, k8sInformerFactory) + }, + Resolvers: []RelationResolver{ + { + PreMeta: DeployMeta, + PostMetas: []metav1.TypeMeta{ReplicaSetMeta}, + OwnerRelation: []metav1.TypeMeta{ReplicaSetMeta}, + Resolve: func(preOrder Object) []ResourceRelation { + deployObj, ok := preOrder.(*appsv1.Deployment) + if !ok { + return nil + } + labelSelector := deployObj.Spec.Selector + return []ResourceRelation{ + { + PostMeta: ReplicaSetMeta, + LabelSelector: labelSelector, + }, + } + }, + }, + { + PreMeta: ReplicaSetMeta, + PostMetas: []metav1.TypeMeta{PodMeta}, + OwnerRelation: []metav1.TypeMeta{PodMeta}, + Resolve: func(preOrder Object) []ResourceRelation { + rsObj, ok := preOrder.(*appsv1.ReplicaSet) + if !ok { + return nil + } + labelSelector := rsObj.Spec.Selector + return []ResourceRelation{ + { + PostMeta: PodMeta, + LabelSelector: labelSelector, + }, + } + }, + }, + }, + } +} + +func newPod(namespace, name string, labels ...string) *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: *newObjectMeta(namespace, name, labels), + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: name, + Image: "busybox", + }, + }, + }, + } +} + +func newStatefulSet(namespace, name string, labels ...string) *appsv1.StatefulSet { + sts := &appsv1.StatefulSet{ + ObjectMeta: *newObjectMeta(namespace, name, labels), + Spec: appsv1.StatefulSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + + Name: name, + Image: "busybox", + }, + }, + }, + }, + }, + } + sts.Spec.Selector = &metav1.LabelSelector{ + MatchLabels: sts.ObjectMeta.Labels, + } + return sts +} + +func newSvc(namespace, name string, labels ...string) *corev1.Service { + svc := &corev1.Service{ + ObjectMeta: *newObjectMeta(namespace, name, labels), + Spec: corev1.ServiceSpec{}, + } + svc.Spec.Selector = svc.Labels + return svc +} + +func newDeploy(namespace, name string, labels ...string) *appsv1.Deployment { + deploy := &appsv1.Deployment{ + ObjectMeta: *newObjectMeta(namespace, name, labels), + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + + Name: name, + Image: "busybox", + }, + }, + }, + }, + }, + } + deploy.Spec.Selector = &metav1.LabelSelector{ + MatchLabels: deploy.ObjectMeta.Labels, + } + return deploy +} + +func newReplicaSet(namespace, name string, labels ...string) *appsv1.ReplicaSet { + replicaset := &appsv1.ReplicaSet{ + ObjectMeta: *newObjectMeta(namespace, name, labels), + Spec: appsv1.ReplicaSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + + Name: name, + Image: "busybox", + }, + }, + }, + }, + }, + } + replicaset.Spec.Selector = &metav1.LabelSelector{ + MatchLabels: replicaset.ObjectMeta.Labels, + } + return replicaset +} + +func newClusterRole(name string) *rbacv1.ClusterRole { + return &rbacv1.ClusterRole{ + ObjectMeta: *newObjectMeta("", name, nil), + } +} + +func newClusterRoleBinding(name string, clusterRole string, sas []types.NamespacedName) *rbacv1.ClusterRoleBinding { + crb := &rbacv1.ClusterRoleBinding{ + ObjectMeta: *newObjectMeta("", name, nil), + RoleRef: rbacv1.RoleRef{ + APIGroup: ClusterRoleMeta.APIVersion, + Kind: ClusterRoleMeta.Kind, + Name: clusterRole, + }, + } + for _, sa := range sas { + crb.Subjects = append(crb.Subjects, rbacv1.Subject{ + Kind: ServiceAccountMeta.Kind, + Name: sa.Name, + Namespace: sa.Namespace, + }) + } + return crb +} + +func newServiceAccount(namespace, name string, labels ...string) *corev1.ServiceAccount { + return &corev1.ServiceAccount{ + ObjectMeta: *newObjectMeta(namespace, name, labels), + } +} + +func syncStatus(f func() bool) { + retry.RunWith(retry.TwoSeconds(), GinkgoT(), func(r *retry.R) { + if !f() { + r.Fatal() + } + }) +} + +func newObjectMeta(namespace, name string, labels []string) *metav1.ObjectMeta { + labelMap := make(map[string]string) + for i := 0; i < len(labels)-1; i += 2 { + labelMap[labels[i]] = labels[i+1] + } + return &metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: labelMap, + } +} + +func setOwner(object metav1.Object, meta metav1.TypeMeta, ownerName string) { + object.SetOwnerReferences(append(object.GetOwnerReferences(), metav1.OwnerReference{ + APIVersion: meta.APIVersion, + Kind: meta.Kind, + Name: ownerName, + })) +} + +var _ NodeHandler = &objecthandler{} + +type objecthandler struct { + addCounter int + updateCounter int + relatedCounter int + deletedCounter int +} + +// change loglevel flag to 0 to enable log output +const loglevel = 1 + +func (o *objecthandler) OnAdd(info NodeInfo) { + klog.V(loglevel).Infof("received added object %v %v", info.TypeInfo(), info.NodeInfo()) + o.addCounter-- +} + +func (o *objecthandler) OnUpdate(info NodeInfo) { + klog.V(loglevel).Infof("received updated object %v %v", info.TypeInfo(), info.NodeInfo()) + o.updateCounter-- +} + +func (o *objecthandler) OnDelete(info NodeInfo) { + klog.V(loglevel).Infof("received deleted object %v %v", info.TypeInfo(), info.NodeInfo()) + o.deletedCounter-- +} + +func (o *objecthandler) OnRelatedUpdate(info NodeInfo) { + klog.V(loglevel).Infof("received related updated object %v %v", info.TypeInfo(), info.NodeInfo()) + o.relatedCounter-- +} + +func (o *objecthandler) addCallExpected() *objecthandler { + o.addCounter++ + return o +} + +func (o *objecthandler) updateCallExpected() *objecthandler { + o.updateCounter++ + return o +} + +func (o *objecthandler) deleteCallExpected() *objecthandler { + o.deletedCounter++ + return o +} + +func (o *objecthandler) relatedCallExpected() *objecthandler { + o.relatedCounter++ + return o +} + +func (h *objecthandler) matchExpected() bool { + return h.addCounter == 0 && h.updateCounter == 0 && h.deletedCounter == 0 && h.relatedCounter == 0 +} + +func (o *objecthandler) string() string { + return fmt.Sprintf("{add: %d, update: %d, delete %d, relatedUpdate %d}", o.addCounter, o.updateCounter, o.deletedCounter, o.relatedCounter) +} + +var _ RelationHandler = &relationHandler{} + +type relationHandler struct { + addCounter int + deleteCounter int +} + +func (r *relationHandler) OnAdd(preNode NodeInfo, postNode NodeInfo) { + klog.V(loglevel).Infof("received added relation, preNode %v %v, postNode %v %v", + preNode.TypeInfo(), preNode.NodeInfo(), postNode.TypeInfo(), postNode.NodeInfo()) + r.addCounter-- +} + +func (r *relationHandler) OnDelete(preNode NodeInfo, postNode NodeInfo) { + klog.V(loglevel).Infof("received deleted relation, preNode %v %v, postNode %v %v", + preNode.TypeInfo(), preNode.NodeInfo(), postNode.TypeInfo(), postNode.NodeInfo()) + r.deleteCounter-- +} + +func (r *relationHandler) addCallExpected() *relationHandler { + r.addCounter++ + return r +} + +func (r *relationHandler) deleteCallExpected() *relationHandler { + r.deleteCounter++ + return r +} + +func (h *relationHandler) matchExpected() bool { + return h.addCounter == 0 && h.deleteCounter == 0 +} + +func (o *relationHandler) string() string { + return fmt.Sprintf("{add: %d, delete %d}", o.addCounter, o.deleteCounter) +} diff --git a/resourcetopo/types.go b/resourcetopo/types.go new file mode 100644 index 0000000..2f91fb6 --- /dev/null +++ b/resourcetopo/types.go @@ -0,0 +1,170 @@ +/** + * Copyright 2024 KusionStack Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package resourcetopo + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/cache" +) + +// TopologyConfig offers a way to describe the relation among kubernetes or kubernetes-likely resource. +type TopologyConfig struct { + // GetInformer return a working SharedInformer for objects of meta type + GetInformer func(meta metav1.TypeMeta) cache.SharedInformer + Resolvers []RelationResolver + Discoverers []VirtualResourceDiscoverer +} + +type ManagerConfig struct { + NodeEventQueueSize int // size of NodeEvent cache channel, defaults to defaultNodeEventQueueSize + RelationEventQueueSize int // size of RelationEvent cache channel, defaults to defaultRelationEventQueueSize + + TopologyConfig *TopologyConfig +} + +type Manager interface { + // AddTopologyConfig add new resource topology config to this manager + AddTopologyConfig(config TopologyConfig) error + + // AddRelationHandler add a new relation handler for the relation change between + // the two type nodes configured by preMeta and postMeta + AddRelationHandler(preMeta, postMeta metav1.TypeMeta, handler RelationHandler) error + + // AddNodeHandler add a new node handler for nodes of type configured by meta. + AddNodeHandler(meta metav1.TypeMeta, handler NodeHandler) error + + // Start to handler relation events and node events. + // Start can only be called once. + Start(stopCh <-chan struct{}) + + // GetTopoNodeStorage return the ref to TopoNodeStorage that match resource meta. + GetTopoNodeStorage(meta metav1.TypeMeta) (TopoNodeStorage, error) + + // GetNode return the ref to Node that match the resouorce meta and node's name if existed. + GetNode(meta metav1.TypeMeta, name types.NamespacedName) (NodeInfo, error) +} + +type TopoNodeStorage interface { + // GetNode return the ref to Node that match the node's name. + GetNode(name types.NamespacedName) (NodeInfo, error) +} + +type NodeInfo interface { + // TypeInfo return the node's resource type info + TypeInfo() metav1.TypeMeta + + // NodeInfo return the node's namespaced name + NodeInfo() types.NamespacedName + + // GetPreOrders return the pre-order node slice for this node + GetPreOrders() []NodeInfo + + // GetPostOrders return the post-order node slice for this node + GetPostOrders() []NodeInfo + + // GetPreOrdersWithMeta return the pre-order nodes slice that match this resource meta + GetPreOrdersWithMeta(meta metav1.TypeMeta) []NodeInfo + + // GetPostOrdersWithMeta return the post-order node slice that match this resource meta + GetPostOrdersWithMeta(meta metav1.TypeMeta) []NodeInfo +} + +// VirtualResourceDiscoverer support to discover a 'virtual' resource from another type and describe the relation between them, +// the virtual resource could be an aggregated resource that not exist in etcd, or a business resource type like order id. +type VirtualResourceDiscoverer struct { + // PreMeta define the virtual resource type of pre-order in the topology graph, + // which is not existed in the api-server. + PreMeta metav1.TypeMeta + + // PostMeta define from which resource type to discover, + // it may be k8s resource, custom resource, or another virtual resource. + PostMeta metav1.TypeMeta + + // Discover need to resolve the inputted postObject and return pre-order object names of PostMeta . + Discover func(postObject Object) []types.NamespacedName +} + +// RelationResolver offers a way to describe relation among objects of PreMeta resource type and PostMetas resource types. +type RelationResolver struct { + // PreMeta define the resource type of pre-order in the topology graph. + PreMeta metav1.TypeMeta + + // PostMetas define a series of resource types of post-orders in the topology graph. + PostMetas []metav1.TypeMeta + + // Resolve need to resolve the inputted preObject and return ResourceRelations of PostMeta types. + Resolve func(preObject Object) []ResourceRelation + + // OwnerRelation offered a way to configure kubernetes owner-reference relation, + // for workload-pod relation, selector is not enough to show the relation between them. + // If post meta configured in it, will only add a relation if post node has pre's owner configured. + // could be nil if there is no need. + OwnerRelation []metav1.TypeMeta + + // ReverseNotice offered a way to configure notice propagate direction, + // for pod diagnose scenario, pod's status want to include upstream service objects. + // If post meta configured in it, will notice post node if pre node has changed, + // and relevantly, no longer to notice pre node when post node change. + // could be nil if there is no need. + ReverseNotice []metav1.TypeMeta +} + +// ResourceRelation is generated by Resolve function set in RelationResolver, +// to express the relation between a pre-object and its post objects of PostMeta type. +type ResourceRelation struct { + // PostMeta restricted the post object(s) resource type. + PostMeta metav1.TypeMeta + + // DirectRefs offers a direct reference way for pod-pvc type relation, could be nil if it's empty. + DirectRefs []types.NamespacedName + + // LabelSelector offers a kubernetes label-selector way for svc-pod type relation, could be nil if it's empty. + LabelSelector *metav1.LabelSelector +} + +type RelationHandler interface { + // OnAdd is called after relation newly added between preOrder and postOrder + OnAdd(preOrder NodeInfo, postOrder NodeInfo) + + // OnDelete is called after relation deleted between preOrder and postOrder + OnDelete(preOrder NodeInfo, postOrder NodeInfo) +} + +type NodeHandler interface { + // OnAdd is called when the node related resource object is added + OnAdd(info NodeInfo) + + // OnUpdate is called when the node related resource object is updated + OnUpdate(info NodeInfo) + + // OnDelete is called when the node related resource object is deleted + OnDelete(info NodeInfo) + + // OnRelatedUpdate is called when the nodes' + // post-order(or pre-order if ReverseNotice configured) object is updated + OnRelatedUpdate(info NodeInfo) +} + +// Object define get namespace/name/labels for now, +// func name need to keep with metav1.Object. +type Object interface { + GetName() string + GetNamespace() string + GetLabels() map[string]string + GetOwnerReferences() []metav1.OwnerReference +} diff --git a/resourcetopo/utils.go b/resourcetopo/utils.go new file mode 100644 index 0000000..ddd75c0 --- /dev/null +++ b/resourcetopo/utils.go @@ -0,0 +1,182 @@ +/** + * Copyright 2024 KusionStack Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package resourcetopo + +import ( + "container/list" + "fmt" + "strings" + + "golang.org/x/exp/slices" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +func generateMetaKey(meta metav1.TypeMeta) string { + return fmt.Sprintf("%s/%s", meta.APIVersion, meta.Kind) +} + +func generateKey(APIVersion, Kind string) string { + return fmt.Sprintf("%s/%s", APIVersion, Kind) +} + +func checkNode(s *nodeStorage, stack *list.List, visited map[string]bool) error { + stack.PushBack(s.metaKey) + + for _, next := range s.preOrderResources { + if existInList(stack, next.metaKey) { + return fmt.Errorf("DAG check for resource %s failed", next.metaKey) + } else { + visited[next.metaKey] = true + if err := checkNode(next, stack, visited); err != nil { + return err + } + } + } + + stack.Remove(stack.Back()) + return nil +} + +func rangeNodeList(l *list.List, nodeFunc func(n *nodeInfo)) { + if l == nil || l.Len() == 0 { + return + } + for ele := l.Front(); ele != nil; ele = ele.Next() { + nodeFunc(ele.Value.(*nodeInfo)) + } +} + +func transformToNodeSliceWithFilters(l *list.List, filters ...func(*nodeInfo) bool) []NodeInfo { + if l == nil || l.Len() == 0 { + return []NodeInfo{} + } + var res []NodeInfo +LOOP: + for ele := l.Front(); ele != nil; ele = ele.Next() { + n := ele.Value.(*nodeInfo) + if len(filters) > 0 { + for _, filter := range filters { + if !filter(n) { + continue LOOP + } + } + } + res = append(res, n) + } + return res +} + +func removeFromList(l *list.List, v interface{}) bool { + if l == nil || l.Len() == 0 { + return false + } + for ele := l.Front(); ele != nil; ele = ele.Next() { + if ele.Value == v { + l.Remove(ele) + return true + } + } + return false +} + +func existInList(l *list.List, value interface{}) bool { + if l == nil || l.Len() == 0 { + return false + } + for ele := l.Front(); ele != nil; ele = ele.Next() { + if ele.Value == value { + return true + } + } + return false +} + +func compareResourceRelation(a, b ResourceRelation) int { + var res int + if res = strings.Compare(a.PostMeta.APIVersion, b.PostMeta.APIVersion); res != 0 { + return res + } + if res = strings.Compare(a.PostMeta.Kind, b.PostMeta.Kind); res != 0 { + return res + } + + if res = len(a.DirectRefs) - len(b.DirectRefs); res != 0 { + return res + } + slices.SortFunc(a.DirectRefs, compareNodeName) + slices.SortFunc(b.DirectRefs, compareNodeName) + for i := 0; i < len(a.DirectRefs); i++ { + if res = strings.Compare(a.DirectRefs[i].Namespace, b.DirectRefs[i].Namespace); res != 0 { + return res + } + if res = strings.Compare(a.DirectRefs[i].Name, b.DirectRefs[i].Name); res != 0 { + return res + } + } + + if a.LabelSelector == nil && b.LabelSelector == nil { + return 0 + } else if a.LabelSelector == nil && b.LabelSelector != nil { + return -1 + } else if a.LabelSelector != nil && b.LabelSelector == nil { + return 1 + } else if res = a.LabelSelector.Size() - b.LabelSelector.Size(); res != 0 { + return res + } else { + return strings.Compare(a.LabelSelector.String(), b.LabelSelector.String()) + } +} + +func compareNodeName(a, b types.NamespacedName) int { + if res := strings.Compare(a.Namespace, b.Namespace); res != 0 { + return res + } else { + return strings.Compare(a.Name, b.Name) + } +} + +// sortedSlicesCompare implement an ordered array compare algorithm. +func sortedSlicesCompare[S ~[]E, E any](slice1, slice2 S, diffFunc1, diffFunc2 func(E), compareFunc func(E, E) int) { + i, j := 0, 0 + for i < len(slice1) && j < len(slice2) { + res := compareFunc(slice1[i], slice2[j]) + if res == 0 { + i++ + j++ + continue + } else if res > 0 { + diffFunc2(slice2[j]) + j++ + } else { + diffFunc1(slice1[i]) + i++ + } + } + for i < len(slice1) { + diffFunc1(slice1[i]) + i++ + } + for j < len(slice2) { + diffFunc2(slice2[j]) + j++ + } +} + +func typeEqual(t1, t2 metav1.TypeMeta) bool { + return t1.Kind == t2.Kind && t1.APIVersion == t2.APIVersion +} diff --git a/resourcetopo/utils_test.go b/resourcetopo/utils_test.go new file mode 100644 index 0000000..7d40751 --- /dev/null +++ b/resourcetopo/utils_test.go @@ -0,0 +1,277 @@ +/** + * Copyright 2024 KusionStack Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package resourcetopo + +import ( + "testing" + + "golang.org/x/exp/slices" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +func Test_compareResourceRelation(t *testing.T) { + tests := []struct { + name string + want int + argsA ResourceRelation + argsB ResourceRelation + }{ + { + name: "all empty equal", + want: 0, + argsA: ResourceRelation{}, + argsB: ResourceRelation{}, + }, + { + name: "A empty and B not empty", + want: -1, + argsA: ResourceRelation{}, + argsB: ResourceRelation{ + PostMeta: PodMeta, + }, + }, + { + name: "direct ref equal", + argsA: ResourceRelation{ + PostMeta: PodMeta, + DirectRefs: []types.NamespacedName{ + { + Name: "pod1", + Namespace: namespaceDefault, + }, + { + Name: "pod2", + Namespace: namespaceDefault, + }, + { + Name: "pod3", + Namespace: namespaceDefault, + }, + }, + LabelSelector: nil, + }, + argsB: ResourceRelation{ + PostMeta: PodMeta, + DirectRefs: []types.NamespacedName{ + { + Name: "pod2", + Namespace: namespaceDefault, + }, + { + Name: "pod3", + Namespace: namespaceDefault, + }, + { + Name: "pod1", + Namespace: namespaceDefault, + }, + }, + LabelSelector: nil, + }, + }, + { + name: "label selector equal", + argsA: ResourceRelation{ + PostMeta: PodMeta, + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "apps": "test", + "a": "b", + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "app", + Operator: metav1.LabelSelectorOpDoesNotExist, + }, + }, + }, + }, + argsB: ResourceRelation{ + PostMeta: PodMeta, + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "a": "b", + "apps": "test", + }, + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "app", + Operator: metav1.LabelSelectorOpDoesNotExist, + }, + }, + }, + }, + }, + + { + name: "direct ref size not equal", + want: -2, + argsA: ResourceRelation{ + PostMeta: PodMeta, + DirectRefs: []types.NamespacedName{ + { + Name: "pod1", + Namespace: namespaceDefault, + }, + }, + }, + argsB: ResourceRelation{ + PostMeta: PodMeta, + DirectRefs: []types.NamespacedName{ + { + Name: "pod2", + Namespace: namespaceDefault, + }, + { + Name: "pod3", + Namespace: namespaceDefault, + }, + { + Name: "pod1", + Namespace: namespaceDefault, + }, + }, + }, + }, + { + name: "direct ref not equal", + want: 1, + argsA: ResourceRelation{ + PostMeta: PodMeta, + DirectRefs: []types.NamespacedName{ + { + Name: "pod1", + Namespace: "ns1", + }, + }, + }, + argsB: ResourceRelation{ + PostMeta: PodMeta, + DirectRefs: []types.NamespacedName{ + { + Name: "pod1", + Namespace: "ns0", + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := compareResourceRelation(tt.argsA, tt.argsB); got != tt.want { + t.Errorf("compareResourceRelation() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_slicesCompare(t *testing.T) { + type args[S interface{ ~[]E }, E any] struct { + slice1 S + slice2 S + expectDiffSlice1 S + expectDiffSlice2 S + } + type testCase struct { + name string + args args[[]int, int] + } + tests := []testCase{ + { + name: "empty equal", + args: args[[]int, int]{ + slice1: []int{}, + slice2: []int{}, + expectDiffSlice1: []int{}, + expectDiffSlice2: []int{}, + }, + }, + { + name: "equal slices", + args: args[[]int, int]{ + slice1: []int{1, 3, 5}, + slice2: []int{1, 3, 5}, + expectDiffSlice1: []int{}, + expectDiffSlice2: []int{}, + }, + }, + { + name: "empty slice1", + args: args[[]int, int]{ + slice1: []int{}, + slice2: []int{1, 3, 5}, + expectDiffSlice1: []int{}, + expectDiffSlice2: []int{1, 3, 5}, + }, + }, + { + name: "empty slice2", + args: args[[]int, int]{ + slice1: []int{1, 3, 5}, + slice2: []int{}, + expectDiffSlice1: []int{1, 3, 5}, + expectDiffSlice2: []int{}, + }, + }, + { + name: "case4", + args: args[[]int, int]{ + slice1: []int{1, 3, 5, 7, 9}, + slice2: []int{1, 5, 7}, + expectDiffSlice1: []int{3, 9}, + expectDiffSlice2: []int{}, + }, + }, + { + name: "case5", + args: args[[]int, int]{ + slice1: []int{1, 3, 5, 7, 9}, + slice2: []int{2, 5, 6, 8, 10}, + expectDiffSlice1: []int{1, 3, 7, 9}, + expectDiffSlice2: []int{2, 6, 8, 10}, + }, + }, + { + name: "case6", + args: args[[]int, int]{ + slice1: []int{1, 3, 5, 7, 9}, + slice2: []int{2, 4, 6, 8, 10}, + expectDiffSlice1: []int{1, 3, 5, 7, 9}, + expectDiffSlice2: []int{2, 4, 6, 8, 10}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + diffSlice1, diffSlice2 := []int{}, []int{} + sortedSlicesCompare(tt.args.slice1, tt.args.slice2, func(i int) { + diffSlice1 = append(diffSlice1, i) + }, func(i int) { + diffSlice2 = append(diffSlice2, i) + }, func(a int, b int) int { + return a - b + }) + if !slices.Equal(diffSlice1, tt.args.expectDiffSlice1) { + t.Errorf("sortedSlicesCompare got %v, want %v", diffSlice1, tt.args.expectDiffSlice1) + } + if !slices.Equal(diffSlice2, tt.args.expectDiffSlice2) { + t.Errorf("sortedSlicesCompare got %v, want %v", diffSlice2, tt.args.expectDiffSlice2) + } + }) + } +}