From fab1407f1497a25e0e1dded2b082c94e881cb166 Mon Sep 17 00:00:00 2001 From: constanca-m Date: Thu, 7 Dec 2023 10:05:21 +0100 Subject: [PATCH 01/61] Share watchers between metricsets. --- .../module/kubernetes/util/kubernetes.go | 873 +++++++++++------- .../module/kubernetes/util/kubernetes_test.go | 438 ++++++++- 2 files changed, 957 insertions(+), 354 deletions(-) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 26728fccdae..411673ef539 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -24,6 +24,7 @@ import ( "sync" "time" + "gotest.tools/gotestsum/log" k8sclient "k8s.io/client-go/kubernetes" "k8s.io/apimachinery/pkg/api/meta" @@ -38,6 +39,19 @@ import ( "github.com/elastic/elastic-agent-libs/mapstr" ) +type kubernetesConfig struct { + KubeConfig string `config:"kube_config"` + KubeClientOptions kubernetes.KubeClientOptions `config:"kube_client_options"` + + Node string `config:"node"` + SyncPeriod time.Duration `config:"sync_period"` + + // AddMetadata enables enriching metricset events with metadata from the API server + AddMetadata bool `config:"add_metadata"` + AddResourceMetadata *metadata.AddResourceMetadataConfig `config:"add_resource_metadata"` + Namespace string `config:"namespace"` +} + // Enricher takes Kubernetes events and enrich them with k8s metadata type Enricher interface { // Start will start the Kubernetes watcher on the first call, does nothing on the rest @@ -51,31 +65,34 @@ type Enricher interface { Enrich([]mapstr.M) } -type kubernetesConfig struct { - KubeConfig string `config:"kube_config"` - KubeClientOptions kubernetes.KubeClientOptions `config:"kube_client_options"` +type enricher struct { + sync.RWMutex + metadata map[string]mapstr.M + index func(mapstr.M) string + resourceName string + isPod bool + config *kubernetesConfig +} - Node string `config:"node"` - SyncPeriod time.Duration `config:"sync_period"` +type nilEnricher struct{} - // AddMetadata enables enriching metricset events with metadata from the API server - AddMetadata bool `config:"add_metadata"` - AddResourceMetadata *metadata.AddResourceMetadataConfig `config:"add_resource_metadata"` - Namespace string `config:"namespace"` +func (*nilEnricher) Start() {} +func (*nilEnricher) Stop() {} +func (*nilEnricher) Enrich([]mapstr.M) {} + +type watcherData struct { + whichAreUsing []string // list of resources using this watcher + watcher kubernetes.Watcher + started bool // true if watcher has started, false otherwise } -type enricher struct { - sync.RWMutex - metadata map[string]mapstr.M - index func(mapstr.M) string - watcher kubernetes.Watcher - watchersStarted bool - watchersStartedLock sync.Mutex - namespaceWatcher kubernetes.Watcher - nodeWatcher kubernetes.Watcher - replicasetWatcher kubernetes.Watcher - jobWatcher kubernetes.Watcher - isPod bool +type watchers struct { + watchersMap map[string]*watcherData + lock sync.RWMutex +} + +var resourceWatchers = watchers{ + watchersMap: make(map[string]*watcherData), } const selector = "kubernetes" @@ -129,155 +146,397 @@ func getResource(resourceName string) kubernetes.Resource { } } -// NewResourceMetadataEnricher returns an Enricher configured for kubernetes resource events +// getExtraWatchers returns a list of the extra resources to watch based on some resource. +// The full list can be seen in https://github.com/elastic/beats/issues/37243, at Expected Watchers section. +func getExtraWatchers(resourceName string, config *kubernetesConfig) []string { + switch resourceName { + case PodResource: + extra := []string{NamespaceResource, NodeResource} + // We need to create watchers for ReplicaSets and Jobs that it might belong to, + // in order to be able to retrieve 2nd layer Owner metadata like in case of: + // Deployment -> Replicaset -> Pod + // CronJob -> job -> Pod + if config.AddResourceMetadata != nil && config.AddResourceMetadata.Deployment { + extra = append(extra, ReplicaSetResource) + } + if config.AddResourceMetadata != nil && config.AddResourceMetadata.CronJob { + extra = append(extra, JobResource) + } + return extra + case ServiceResource: + return []string{NamespaceResource} + case DeploymentResource: + return []string{NamespaceResource} + case ReplicaSetResource: + return []string{NamespaceResource} + case StatefulSetResource: + return []string{NamespaceResource} + case DaemonSetResource: + return []string{NamespaceResource} + case JobResource: + return []string{NamespaceResource} + case CronJobResource: + return []string{NamespaceResource} + case PersistentVolumeResource: + return []string{} + case PersistentVolumeClaimResource: + return []string{NamespaceResource} + case StorageClassResource: + return []string{} + case NodeResource: + return []string{} + case NamespaceResource: + return []string{} + default: + return []string{} + } +} + +// getWatchOptions builds the kubernetes.WatchOptions{} needed for the watcher based on the config and nodeScope +func getWatchOptions(config *kubernetesConfig, nodeScope bool, client k8sclient.Interface, log *logp.Logger) (*kubernetes.WatchOptions, error) { + var err error + options := kubernetes.WatchOptions{ + SyncTimeout: config.SyncPeriod, + Namespace: config.Namespace, + } + + // Watch objects in the node only + if nodeScope { + nd := &kubernetes.DiscoverKubernetesNodeParams{ + ConfigHost: config.Node, + Client: client, + IsInCluster: kubernetes.IsInCluster(config.KubeConfig), + HostUtils: &kubernetes.DefaultDiscoveryUtils{}, + } + options.Node, err = kubernetes.DiscoverKubernetesNode(log, nd) + if err != nil { + return nil, fmt.Errorf("couldn't discover kubernetes node: %s", err) + } + } + return &options, err +} + +// startWatcher starts a watcher for a specific resource +func startWatcher( + resourceName string, + resource kubernetes.Resource, + options kubernetes.WatchOptions, + client k8sclient.Interface) (bool, error) { + + resourceWatchers.lock.Lock() + defer resourceWatchers.lock.Unlock() + + _, ok := resourceWatchers.watchersMap[resourceName] + // if it does not exist, create the watcher + if !ok { + watcher, err := kubernetes.NewNamedWatcher(resourceName, client, resource, options, nil) + if err != nil { + return false, err + } + resourceWatchers.watchersMap[resourceName] = &watcherData{watcher: watcher, started: false} + return true, nil + } + return false, nil +} + +func addToWhichAreUsing(resourceName string, usingName string) { + resourceWatchers.lock.Lock() + defer resourceWatchers.lock.Unlock() + + data, ok := resourceWatchers.watchersMap[resourceName] + if ok { + contains := false + for _, which := range data.whichAreUsing { + if which == usingName { + contains = true + break + } + } + // add this resource to the list of resources using it + if !contains { + data.whichAreUsing = append(data.whichAreUsing, usingName) + } + } +} + +// removeToWhichAreUsing returns true if element was removed and new size of array. +// The cache should be locked when called. +func removeToWhichAreUsing(resourceName string, notUsingName string) (bool, int) { + data, ok := resourceWatchers.watchersMap[resourceName] + removed := false + if ok { + newIndex := 0 + for i, which := range data.whichAreUsing { + if which == notUsingName { + removed = true + } else { + data.whichAreUsing[newIndex] = data.whichAreUsing[i] + newIndex++ + } + } + data.whichAreUsing = data.whichAreUsing[:newIndex] + return removed, len(data.whichAreUsing) + } + return removed, 0 +} + +// startAllWatchers starts all the watchers required by a specific resource +func startAllWatchers( + client k8sclient.Interface, + resourceName string, + nodeScope bool, + config *kubernetesConfig, + log *logp.Logger, +) error { + res := getResource(resourceName) + if res == nil { + return fmt.Errorf("resource for name %s does not exist. Watcher cannot be created", resourceName) + } + + options, err := getWatchOptions(config, nodeScope, client, log) + if err != nil { + return err + } + + // Create a watcher for the given resource. + // If it fails, we return an error, so we can stop the extra watchers from starting. + created, err := startWatcher(resourceName, res, *options, client) + if err != nil { + return fmt.Errorf("error initializing Kubernetes watcher %s, required by %s: %s", resourceName, resourceName, err) + } else if created { + log.Debugf("Started watcher %s successfully, created by %s.", resourceName, resourceName) + } + addToWhichAreUsing(resourceName, resourceName) + + // Create the extra watchers required by this resource + extraWatchers := getExtraWatchers(resourceName, config) + for _, extra := range extraWatchers { + extraRes := getResource(extra) + if extraRes != nil { + created, err = startWatcher(extra, extraRes, *options, client) + if err != nil { + log.Errorf("Error initializing Kubernetes watcher %s, required by %s: %s", extra, resourceName, err) + } else { + if created { + log.Debugf("Started watcher %s successfully, created by %s.", extra, resourceName) + } + // add this resource to the ones using the extra resource + addToWhichAreUsing(extra, resourceName) + } + } else { + log.Errorf("Resource for name %s does not exist. Watcher cannot be created.", extra) + } + } + + return nil +} + +// createMetadataGen creates the metadata generator for resources in general +func createMetadataGen(client k8sclient.Interface, commonConfig *conf.C, config *kubernetesConfig, resourceName string) (*metadata.Resource, error) { + // check if the resource is namespace aware + extras := getExtraWatchers(resourceName, config) + namespaceAware := false + for _, extra := range extras { + if extra == NamespaceResource { + namespaceAware = true + break + } + } + + resourceWatchers.lock.RLock() + defer resourceWatchers.lock.RUnlock() + + resourceWatcher := resourceWatchers.watchersMap[resourceName] + // This should not be possible since the watchers should have been created before + if resourceWatcher == nil { + return nil, fmt.Errorf("could not create the metadata generator, as the watcher for %s does not exist", resourceName) + } + + var metaGen *metadata.Resource + if namespaceAware { + namespaceWatcher := resourceWatchers.watchersMap[NamespaceResource] + + if namespaceWatcher == nil { + return nil, fmt.Errorf("could not create the metadata generator, as the watcher for namespace does not exist") + } + + n := metadata.NewNamespaceMetadataGenerator(config.AddResourceMetadata.Namespace, + (*namespaceWatcher).watcher.Store(), client) + metaGen = metadata.NewNamespaceAwareResourceMetadataGenerator(commonConfig, client, n) + } else { + metaGen = metadata.NewResourceMetadataGenerator(commonConfig, client) + } + + return metaGen, nil +} + +// createMetadataGenSpecific creates the metadata generator for a specific resource - pod or service +func createMetadataGenSpecific(client k8sclient.Interface, commonConfig *conf.C, config *kubernetesConfig, resourceName string) (metadata.MetaGen, error) { + resourceWatchers.lock.RLock() + defer resourceWatchers.lock.RUnlock() + + // The watcher for the resource needs to exist + resWatcher := resourceWatchers.watchersMap[resourceName] + if resWatcher == nil { + return nil, fmt.Errorf("could not create the metadata generator, as the watcher for %s does not exist", resourceName) + } + + var metaGen metadata.MetaGen + if resourceName == PodResource { + var nodeWatcher kubernetes.Watcher + if watcher := resourceWatchers.watchersMap[NodeResource]; watcher != nil { + nodeWatcher = (*watcher).watcher + } + var namespaceWatcher kubernetes.Watcher + if watcher := resourceWatchers.watchersMap[NamespaceResource]; watcher != nil { + namespaceWatcher = (*watcher).watcher + } + var replicaSetWatcher kubernetes.Watcher + if watcher := resourceWatchers.watchersMap[ReplicaSetResource]; watcher != nil { + replicaSetWatcher = (*watcher).watcher + } + var jobWatcher kubernetes.Watcher + if watcher := resourceWatchers.watchersMap[JobResource]; watcher != nil { + jobWatcher = (*watcher).watcher + } + + metaGen = metadata.GetPodMetaGen(commonConfig, (*resWatcher).watcher, nodeWatcher, namespaceWatcher, replicaSetWatcher, + jobWatcher, config.AddResourceMetadata) + return metaGen, nil + } else if resourceName == ServiceResource { + namespaceWatcher := resourceWatchers.watchersMap[NamespaceResource] + if namespaceWatcher == nil { + return nil, fmt.Errorf("could not create the metadata generator, as the watcher for namespace does not exist") + } + namespaceMeta := metadata.NewNamespaceMetadataGenerator(config.AddResourceMetadata.Namespace, + (*namespaceWatcher).watcher.Store(), client) + metaGen = metadata.NewServiceMetadataGenerator(commonConfig, (*resWatcher).watcher.Store(), + namespaceMeta, client) + return metaGen, nil + } + + // Should never reach this part, as this function is only for service or pod resources + return metaGen, fmt.Errorf("failed to create a metadata generator for resource %s", resourceName) +} + func NewResourceMetadataEnricher( base mb.BaseMetricSet, resourceName string, metricsRepo *MetricsRepo, nodeScope bool) Enricher { - var replicaSetWatcher, jobWatcher kubernetes.Watcher + log := logp.NewLogger(selector) config, err := GetValidatedConfig(base) if err != nil { - logp.Info("Kubernetes metricset enriching is disabled") + log.Info("Kubernetes metricset enriching is disabled") return &nilEnricher{} } - res := getResource(resourceName) - if res == nil { + // This type of config is needed for the metadata generator + commonMetaConfig := metadata.Config{} + if err := base.Module().UnpackConfig(&commonMetaConfig); err != nil { + log.Errorf("Error initializing Kubernetes metadata enricher: %s", err) return &nilEnricher{} } + commonConfig, _ := conf.NewConfigFrom(&commonMetaConfig) client, err := kubernetes.GetKubernetesClient(config.KubeConfig, config.KubeClientOptions) if err != nil { - logp.Err("Error creating Kubernetes client: %s", err) + log.Errorf("Error creating Kubernetes client: %s", err) return &nilEnricher{} } - watcher, nodeWatcher, namespaceWatcher := getResourceMetadataWatchers(config, res, client, nodeScope) - - if watcher == nil { + err = startAllWatchers(client, resourceName, nodeScope, config, log) + if err != nil { + log.Errorf("Error starting the watchers: %s", err) return &nilEnricher{} } - // GetPodMetaGen requires cfg of type Config - commonMetaConfig := metadata.Config{} - if err := base.Module().UnpackConfig(&commonMetaConfig); err != nil { - logp.Err("Error initializing Kubernetes metadata enricher: %s", err) + var specificMetaGen metadata.MetaGen + var generalMetaGen *metadata.Resource + if resourceName == ServiceResource || resourceName == PodResource { + specificMetaGen, err = createMetadataGenSpecific(client, commonConfig, config, resourceName) + } else { + generalMetaGen, err = createMetadataGen(client, commonConfig, config, resourceName) + } + if err != nil { + log.Errorf("Error trying to create the metadata generators: %s", err) return &nilEnricher{} } - cfg, _ := conf.NewConfigFrom(&commonMetaConfig) - // if Resource is Pod then we need to create watchers for Replicasets and Jobs that it might belongs to - // in order to be able to retrieve 2nd layer Owner metadata like in case of: - // Deployment -> Replicaset -> Pod - // CronJob -> job -> Pod - if resourceName == PodResource { - if config.AddResourceMetadata.Deployment { - replicaSetWatcher, err = kubernetes.NewNamedWatcher("resource_metadata_enricher_rs", client, &kubernetes.ReplicaSet{}, kubernetes.WatchOptions{ - SyncTimeout: config.SyncPeriod, - }, nil) - if err != nil { - logp.Err("Error creating watcher for %T due to error %+v", &kubernetes.ReplicaSet{}, err) - return &nilEnricher{} - } - } + updateFunc := func(m map[string]mapstr.M, r kubernetes.Resource) { + accessor, _ := meta.Accessor(r) + id := join(accessor.GetNamespace(), accessor.GetName()) //nolint:all - if config.AddResourceMetadata.CronJob { - jobWatcher, err = kubernetes.NewNamedWatcher("resource_metadata_enricher_job", client, &kubernetes.Job{}, kubernetes.WatchOptions{ - SyncTimeout: config.SyncPeriod, - }, nil) - if err != nil { - logp.Err("Error creating watcher for %T due to error %+v", &kubernetes.Job{}, err) - return &nilEnricher{} + switch r := r.(type) { + case *kubernetes.Pod: + m[id] = specificMetaGen.Generate(r) + + case *kubernetes.Node: + nodeName := r.GetObjectMeta().GetName() + metrics := NewNodeMetrics() + if cpu, ok := r.Status.Capacity["cpu"]; ok { + if q, err := resource.ParseQuantity(cpu.String()); err == nil { + metrics.CoresAllocatable = NewFloat64Metric(float64(q.MilliValue()) / 1000) + } } + if memory, ok := r.Status.Capacity["memory"]; ok { + if q, err := resource.ParseQuantity(memory.String()); err == nil { + metrics.MemoryAllocatable = NewFloat64Metric(float64(q.Value())) + } + } + nodeStore, _ := metricsRepo.AddNodeStore(nodeName) + nodeStore.SetNodeMetrics(metrics) + + m[id] = generalMetaGen.Generate(NodeResource, r) + + case *kubernetes.Deployment: + m[id] = generalMetaGen.Generate(DeploymentResource, r) + case *kubernetes.Job: + m[id] = generalMetaGen.Generate(JobResource, r) + case *kubernetes.CronJob: + m[id] = generalMetaGen.Generate(CronJobResource, r) + case *kubernetes.Service: + m[id] = specificMetaGen.Generate(r) + case *kubernetes.StatefulSet: + m[id] = generalMetaGen.Generate(StatefulSetResource, r) + case *kubernetes.Namespace: + m[id] = generalMetaGen.Generate(NamespaceResource, r) + case *kubernetes.ReplicaSet: + m[id] = generalMetaGen.Generate(ReplicaSetResource, r) + case *kubernetes.DaemonSet: + m[id] = generalMetaGen.Generate(DaemonSetResource, r) + case *kubernetes.PersistentVolume: + m[id] = generalMetaGen.Generate(PersistentVolumeResource, r) + case *kubernetes.PersistentVolumeClaim: + m[id] = generalMetaGen.Generate(PersistentVolumeClaimResource, r) + case *kubernetes.StorageClass: + m[id] = generalMetaGen.Generate(StorageClassResource, r) + default: + m[id] = generalMetaGen.Generate(r.GetObjectKind().GroupVersionKind().Kind, r) } } - podMetaGen := metadata.GetPodMetaGen(cfg, watcher, nodeWatcher, namespaceWatcher, replicaSetWatcher, jobWatcher, config.AddResourceMetadata) - - namespaceMeta := metadata.NewNamespaceMetadataGenerator(config.AddResourceMetadata.Namespace, namespaceWatcher.Store(), watcher.Client()) - serviceMetaGen := metadata.NewServiceMetadataGenerator(cfg, watcher.Store(), namespaceMeta, watcher.Client()) + deleteFunc := func(m map[string]mapstr.M, r kubernetes.Resource) { + accessor, _ := meta.Accessor(r) - metaGen := metadata.NewNamespaceAwareResourceMetadataGenerator(cfg, watcher.Client(), namespaceMeta) - - enricher := buildMetadataEnricher(watcher, nodeWatcher, namespaceWatcher, replicaSetWatcher, jobWatcher, - // update - func(m map[string]mapstr.M, r kubernetes.Resource) { - accessor, _ := meta.Accessor(r) - id := join(accessor.GetNamespace(), accessor.GetName()) //nolint:all + switch r := r.(type) { + case *kubernetes.Node: + nodeName := r.GetObjectMeta().GetName() + metricsRepo.DeleteNodeStore(nodeName) + } - switch r := r.(type) { - case *kubernetes.Pod: - m[id] = podMetaGen.Generate(r) + id := join(accessor.GetNamespace(), accessor.GetName()) + delete(m, id) + } - case *kubernetes.Node: - nodeName := r.GetObjectMeta().GetName() - metrics := NewNodeMetrics() - if cpu, ok := r.Status.Capacity["cpu"]; ok { - if q, err := resource.ParseQuantity(cpu.String()); err == nil { - metrics.CoresAllocatable = NewFloat64Metric(float64(q.MilliValue()) / 1000) - } - } - if memory, ok := r.Status.Capacity["memory"]; ok { - if q, err := resource.ParseQuantity(memory.String()); err == nil { - metrics.MemoryAllocatable = NewFloat64Metric(float64(q.Value())) - } - } - nodeStore, _ := metricsRepo.AddNodeStore(nodeName) - nodeStore.SetNodeMetrics(metrics) - - m[id] = metaGen.Generate(NodeResource, r) - - case *kubernetes.Deployment: - m[id] = metaGen.Generate(DeploymentResource, r) - case *kubernetes.Job: - m[id] = metaGen.Generate(JobResource, r) - case *kubernetes.CronJob: - m[id] = metaGen.Generate(CronJobResource, r) - case *kubernetes.Service: - m[id] = serviceMetaGen.Generate(r) - case *kubernetes.StatefulSet: - m[id] = metaGen.Generate(StatefulSetResource, r) - case *kubernetes.Namespace: - m[id] = metaGen.Generate(NamespaceResource, r) - case *kubernetes.ReplicaSet: - m[id] = metaGen.Generate(ReplicaSetResource, r) - case *kubernetes.DaemonSet: - m[id] = metaGen.Generate(DaemonSetResource, r) - case *kubernetes.PersistentVolume: - m[id] = metaGen.Generate(PersistentVolumeResource, r) - case *kubernetes.PersistentVolumeClaim: - m[id] = metaGen.Generate(PersistentVolumeClaimResource, r) - case *kubernetes.StorageClass: - m[id] = metaGen.Generate(StorageClassResource, r) - default: - m[id] = metaGen.Generate(r.GetObjectKind().GroupVersionKind().Kind, r) - } - }, - // delete - func(m map[string]mapstr.M, r kubernetes.Resource) { - accessor, _ := meta.Accessor(r) - - switch r := r.(type) { - case *kubernetes.Node: - nodeName := r.GetObjectMeta().GetName() - metricsRepo.DeleteNodeStore(nodeName) - } + indexFunc := func(e mapstr.M) string { + return join(getString(e, mb.ModuleDataKey+".namespace"), getString(e, "name")) + } - id := join(accessor.GetNamespace(), accessor.GetName()) - delete(m, id) - }, - // index - func(e mapstr.M) string { - return join(getString(e, mb.ModuleDataKey+".namespace"), getString(e, "name")) - }, - ) - - // Configure the enricher for Pods, so pod specific metadata ends up in the right place when - // calling Enrich - if _, ok := res.(*kubernetes.Pod); ok { + enricher := buildMetadataEnricher(resourceName, config, updateFunc, deleteFunc, indexFunc) + if resourceName == PodResource { enricher.isPod = true } @@ -290,132 +549,115 @@ func NewContainerMetadataEnricher( metricsRepo *MetricsRepo, nodeScope bool) Enricher { - var replicaSetWatcher, jobWatcher kubernetes.Watcher + log := logp.NewLogger(selector) + config, err := GetValidatedConfig(base) if err != nil { - logp.Info("Kubernetes metricset enriching is disabled") + log.Info("Kubernetes metricset enriching is disabled") return &nilEnricher{} } - client, err := kubernetes.GetKubernetesClient(config.KubeConfig, config.KubeClientOptions) - if err != nil { - logp.Err("Error creating Kubernetes client: %s", err) + // This type of config is needed for the metadata generator + commonMetaConfig := metadata.Config{} + if err := base.Module().UnpackConfig(&commonMetaConfig); err != nil { + log.Errorf("Error initializing Kubernetes metadata enricher: %s", err) return &nilEnricher{} } + commonConfig, _ := conf.NewConfigFrom(&commonMetaConfig) - watcher, nodeWatcher, namespaceWatcher := getResourceMetadataWatchers(config, &kubernetes.Pod{}, client, nodeScope) - if watcher == nil { + client, err := kubernetes.GetKubernetesClient(config.KubeConfig, config.KubeClientOptions) + if err != nil { + log.Errorf("Error creating Kubernetes client: %s", err) return &nilEnricher{} } - // Resource is Pod so we need to create watchers for Replicasets and Jobs that it might belongs to - // in order to be able to retrieve 2nd layer Owner metadata like in case of: - // Deployment -> Replicaset -> Pod - // CronJob -> job -> Pod - if config.AddResourceMetadata.Deployment { - replicaSetWatcher, err = kubernetes.NewNamedWatcher("resource_metadata_enricher_rs", client, &kubernetes.ReplicaSet{}, kubernetes.WatchOptions{ - SyncTimeout: config.SyncPeriod, - }, nil) - if err != nil { - logp.Err("Error creating watcher for %T due to error %+v", &kubernetes.Namespace{}, err) - return &nilEnricher{} - } - } - if config.AddResourceMetadata.CronJob { - jobWatcher, err = kubernetes.NewNamedWatcher("resource_metadata_enricher_job", client, &kubernetes.Job{}, kubernetes.WatchOptions{ - SyncTimeout: config.SyncPeriod, - }, nil) - if err != nil { - logp.Err("Error creating watcher for %T due to error %+v", &kubernetes.Job{}, err) - return &nilEnricher{} - } + err = startAllWatchers(client, PodResource, nodeScope, config, log) + if err != nil { + log.Errorf("Error starting the watchers: %s", err) + return &nilEnricher{} } - commonMetaConfig := metadata.Config{} - if err := base.Module().UnpackConfig(&commonMetaConfig); err != nil { - logp.Err("Error initializing Kubernetes metadata enricher: %s", err) + metaGen, err := createMetadataGenSpecific(client, commonConfig, config, PodResource) + if err != nil { + log.Errorf("Error trying to create the metadata generators: %s", err) return &nilEnricher{} } - cfg, _ := conf.NewConfigFrom(&commonMetaConfig) - metaGen := metadata.GetPodMetaGen(cfg, watcher, nodeWatcher, namespaceWatcher, replicaSetWatcher, jobWatcher, config.AddResourceMetadata) + updateFunc := func(m map[string]mapstr.M, r kubernetes.Resource) { + pod, ok := r.(*kubernetes.Pod) + if !ok { + base.Logger().Debugf("Error while casting event: %s", ok) + } + pmeta := metaGen.Generate(pod) - enricher := buildMetadataEnricher(watcher, nodeWatcher, namespaceWatcher, replicaSetWatcher, jobWatcher, - // update - func(m map[string]mapstr.M, r kubernetes.Resource) { - pod, ok := r.(*kubernetes.Pod) - if !ok { - base.Logger().Debugf("Error while casting event: %s", ok) + statuses := make(map[string]*kubernetes.PodContainerStatus) + mapStatuses := func(s []kubernetes.PodContainerStatus) { + for i := range s { + statuses[s[i].Name] = &s[i] } - pmeta := metaGen.Generate(pod) + } + mapStatuses(pod.Status.ContainerStatuses) + mapStatuses(pod.Status.InitContainerStatuses) - statuses := make(map[string]*kubernetes.PodContainerStatus) - mapStatuses := func(s []kubernetes.PodContainerStatus) { - for i := range s { - statuses[s[i].Name] = &s[i] + nodeStore, _ := metricsRepo.AddNodeStore(pod.Spec.NodeName) + podId := NewPodId(pod.Namespace, pod.Name) + podStore, _ := nodeStore.AddPodStore(podId) + + for _, container := range append(pod.Spec.Containers, pod.Spec.InitContainers...) { + cmeta := mapstr.M{} + metrics := NewContainerMetrics() + + if cpu, ok := container.Resources.Limits["cpu"]; ok { + if q, err := resource.ParseQuantity(cpu.String()); err == nil { + metrics.CoresLimit = NewFloat64Metric(float64(q.MilliValue()) / 1000) + } + } + if memory, ok := container.Resources.Limits["memory"]; ok { + if q, err := resource.ParseQuantity(memory.String()); err == nil { + metrics.MemoryLimit = NewFloat64Metric(float64(q.Value())) } } - mapStatuses(pod.Status.ContainerStatuses) - mapStatuses(pod.Status.InitContainerStatuses) - nodeStore, _ := metricsRepo.AddNodeStore(pod.Spec.NodeName) - podId := NewPodId(pod.Namespace, pod.Name) - podStore, _ := nodeStore.AddPodStore(podId) + containerStore, _ := podStore.AddContainerStore(container.Name) + containerStore.SetContainerMetrics(metrics) - for _, container := range append(pod.Spec.Containers, pod.Spec.InitContainers...) { - cmeta := mapstr.M{} - metrics := NewContainerMetrics() + if s, ok := statuses[container.Name]; ok { + // Extracting id and runtime ECS fields from ContainerID + // which is in the form of :// + split := strings.Index(s.ContainerID, "://") + if split != -1 { + kubernetes2.ShouldPut(cmeta, "container.id", s.ContainerID[split+3:], base.Logger()) - if cpu, ok := container.Resources.Limits["cpu"]; ok { - if q, err := resource.ParseQuantity(cpu.String()); err == nil { - metrics.CoresLimit = NewFloat64Metric(float64(q.MilliValue()) / 1000) - } - } - if memory, ok := container.Resources.Limits["memory"]; ok { - if q, err := resource.ParseQuantity(memory.String()); err == nil { - metrics.MemoryLimit = NewFloat64Metric(float64(q.Value())) - } + kubernetes2.ShouldPut(cmeta, "container.runtime", s.ContainerID[:split], base.Logger()) } + } - containerStore, _ := podStore.AddContainerStore(container.Name) - containerStore.SetContainerMetrics(metrics) + id := join(pod.GetObjectMeta().GetNamespace(), pod.GetObjectMeta().GetName(), container.Name) + cmeta.DeepUpdate(pmeta) + m[id] = cmeta + } + } - if s, ok := statuses[container.Name]; ok { - // Extracting id and runtime ECS fields from ContainerID - // which is in the form of :// - split := strings.Index(s.ContainerID, "://") - if split != -1 { - kubernetes2.ShouldPut(cmeta, "container.id", s.ContainerID[split+3:], base.Logger()) + deleteFunc := func(m map[string]mapstr.M, r kubernetes.Resource) { + pod, ok := r.(*kubernetes.Pod) + if !ok { + base.Logger().Debugf("Error while casting event: %s", ok) + } + podId := NewPodId(pod.Namespace, pod.Name) + nodeStore := metricsRepo.GetNodeStore(pod.Spec.NodeName) + nodeStore.DeletePodStore(podId) - kubernetes2.ShouldPut(cmeta, "container.runtime", s.ContainerID[:split], base.Logger()) - } - } + for _, container := range append(pod.Spec.Containers, pod.Spec.InitContainers...) { + id := join(pod.ObjectMeta.GetNamespace(), pod.GetObjectMeta().GetName(), container.Name) + delete(m, id) + } + } - id := join(pod.GetObjectMeta().GetNamespace(), pod.GetObjectMeta().GetName(), container.Name) - cmeta.DeepUpdate(pmeta) - m[id] = cmeta - } - }, - // delete - func(m map[string]mapstr.M, r kubernetes.Resource) { - pod, ok := r.(*kubernetes.Pod) - if !ok { - base.Logger().Debugf("Error while casting event: %s", ok) - } - podId := NewPodId(pod.Namespace, pod.Name) - nodeStore := metricsRepo.GetNodeStore(pod.Spec.NodeName) - nodeStore.DeletePodStore(podId) + indexFunc := func(e mapstr.M) string { + return join(getString(e, mb.ModuleDataKey+".namespace"), getString(e, mb.ModuleDataKey+".pod.name"), getString(e, "name")) + } - for _, container := range append(pod.Spec.Containers, pod.Spec.InitContainers...) { - id := join(pod.ObjectMeta.GetNamespace(), pod.GetObjectMeta().GetName(), container.Name) - delete(m, id) - } - }, - // index - func(e mapstr.M) string { - return join(getString(e, mb.ModuleDataKey+".namespace"), getString(e, mb.ModuleDataKey+".pod.name"), getString(e, "name")) - }, - ) + enricher := buildMetadataEnricher(PodResource, config, updateFunc, deleteFunc, indexFunc) return enricher } @@ -474,12 +716,6 @@ func getResourceMetadataWatchers( return watcher, nodeWatcher, namespaceWatcher } -func GetDefaultDisabledMetaConfig() *kubernetesConfig { - return &kubernetesConfig{ - AddMetadata: false, - } -} - func GetValidatedConfig(base mb.BaseMetricSet) (*kubernetesConfig, error) { config, err := GetConfig(base) if err != nil { @@ -530,113 +766,104 @@ func join(fields ...string) string { } func buildMetadataEnricher( - watcher kubernetes.Watcher, - nodeWatcher kubernetes.Watcher, - namespaceWatcher kubernetes.Watcher, - replicasetWatcher kubernetes.Watcher, - jobWatcher kubernetes.Watcher, + resourceName string, + config *kubernetesConfig, update func(map[string]mapstr.M, kubernetes.Resource), delete func(map[string]mapstr.M, kubernetes.Resource), index func(e mapstr.M) string) *enricher { enricher := enricher{ - metadata: map[string]mapstr.M{}, - index: index, - watcher: watcher, - nodeWatcher: nodeWatcher, - namespaceWatcher: namespaceWatcher, - replicasetWatcher: replicasetWatcher, - jobWatcher: jobWatcher, - } - - watcher.AddEventHandler(kubernetes.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - enricher.Lock() - defer enricher.Unlock() - update(enricher.metadata, obj.(kubernetes.Resource)) - }, - UpdateFunc: func(obj interface{}) { - enricher.Lock() - defer enricher.Unlock() - update(enricher.metadata, obj.(kubernetes.Resource)) - }, - DeleteFunc: func(obj interface{}) { - enricher.Lock() - defer enricher.Unlock() - delete(enricher.metadata, obj.(kubernetes.Resource)) - }, - }) + metadata: map[string]mapstr.M{}, + index: index, + resourceName: resourceName, + config: config, + } + + resourceWatchers.lock.Lock() + defer resourceWatchers.lock.Unlock() + + watcher := resourceWatchers.watchersMap[resourceName] + if watcher != nil { + watcher.watcher.AddEventHandler(kubernetes.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + enricher.Lock() + defer enricher.Unlock() + update(enricher.metadata, obj.(kubernetes.Resource)) + }, + UpdateFunc: func(obj interface{}) { + enricher.Lock() + defer enricher.Unlock() + update(enricher.metadata, obj.(kubernetes.Resource)) + }, + DeleteFunc: func(obj interface{}) { + enricher.Lock() + defer enricher.Unlock() + delete(enricher.metadata, obj.(kubernetes.Resource)) + }, + }) + } return &enricher } -func (m *enricher) Start() { - m.watchersStartedLock.Lock() - defer m.watchersStartedLock.Unlock() - if !m.watchersStarted { - if m.nodeWatcher != nil { - if err := m.nodeWatcher.Start(); err != nil { - logp.Warn("Error starting node watcher: %s", err) - } - } +func (e *enricher) Start() { + resourceWatchers.lock.Lock() + defer resourceWatchers.lock.Unlock() - if m.namespaceWatcher != nil { - if err := m.namespaceWatcher.Start(); err != nil { - logp.Warn("Error starting namespace watcher: %s", err) - } - } - - if m.replicasetWatcher != nil { - if err := m.replicasetWatcher.Start(); err != nil { - logp.Warn("Error starting replicaset watcher: %s", err) - } + resourceWatcher := resourceWatchers.watchersMap[e.resourceName] + if resourceWatcher != nil && resourceWatcher.watcher != nil && !resourceWatcher.started { + if err := resourceWatcher.watcher.Start(); err != nil { + log.Warnf("Error starting %s watcher: %s", e.resourceName, err) + } else { + resourceWatcher.started = true } + } - if m.jobWatcher != nil { - if err := m.jobWatcher.Start(); err != nil { - logp.Warn("Error starting job watcher: %s", err) + extras := getExtraWatchers(e.resourceName, e.config) + for _, extra := range extras { + extraWatcher := resourceWatchers.watchersMap[extra] + if extraWatcher != nil && extraWatcher.watcher != nil && !extraWatcher.started { + if err := extraWatcher.watcher.Start(); err != nil { + log.Warnf("Error starting %s watcher: %s", extra, err) + } else { + extraWatcher.started = true } } - - err := m.watcher.Start() - if err != nil { - logp.Warn("Error starting Kubernetes watcher: %s", err) - } - m.watchersStarted = true } } -func (m *enricher) Stop() { - m.watchersStartedLock.Lock() - defer m.watchersStartedLock.Unlock() - if m.watchersStarted { - m.watcher.Stop() - - if m.namespaceWatcher != nil { - m.namespaceWatcher.Stop() - } - - if m.nodeWatcher != nil { - m.nodeWatcher.Stop() - } +func (e *enricher) Stop() { + resourceWatchers.lock.Lock() + defer resourceWatchers.lock.Unlock() - if m.replicasetWatcher != nil { - m.replicasetWatcher.Stop() + resourceWatcher := resourceWatchers.watchersMap[e.resourceName] + if resourceWatcher != nil && resourceWatcher.watcher != nil && resourceWatcher.whichAreUsing != nil && resourceWatcher.started { + _, size := removeToWhichAreUsing(e.resourceName, e.resourceName) + if size == 0 { + resourceWatcher.watcher.Stop() + resourceWatcher.started = false } + } - if m.jobWatcher != nil { - m.jobWatcher.Stop() + extras := getExtraWatchers(e.resourceName, e.config) + for _, extra := range extras { + extraWatcher := resourceWatchers.watchersMap[extra] + if extraWatcher != nil && extraWatcher.watcher != nil && extraWatcher.whichAreUsing != nil && extraWatcher.started { + _, size := removeToWhichAreUsing(extra, e.resourceName) + if size == 0 { + extraWatcher.watcher.Stop() + extraWatcher.started = false + } } - - m.watchersStarted = false } } -func (m *enricher) Enrich(events []mapstr.M) { - m.RLock() - defer m.RUnlock() +func (e *enricher) Enrich(events []mapstr.M) { + e.RLock() + defer e.RUnlock() + for _, event := range events { - if meta := m.metadata[m.index(event)]; meta != nil { + if meta := e.metadata[e.index(event)]; meta != nil { k8s, err := meta.GetValue("kubernetes") if err != nil { continue @@ -646,7 +873,7 @@ func (m *enricher) Enrich(events []mapstr.M) { continue } - if m.isPod { + if e.isPod { // apply pod meta at metricset level if podMeta, ok := k8sMeta["pod"].(mapstr.M); ok { event.DeepUpdate(podMeta) @@ -670,12 +897,6 @@ func (m *enricher) Enrich(events []mapstr.M) { } } -type nilEnricher struct{} - -func (*nilEnricher) Start() {} -func (*nilEnricher) Stop() {} -func (*nilEnricher) Enrich([]mapstr.M) {} - func CreateEvent(event mapstr.M, namespace string) (mb.Event, error) { var moduleFieldsMapStr mapstr.M moduleFields, ok := event[mb.ModuleDataKey] diff --git a/metricbeat/module/kubernetes/util/kubernetes_test.go b/metricbeat/module/kubernetes/util/kubernetes_test.go index 2d0d3e46113..8f047b34993 100644 --- a/metricbeat/module/kubernetes/util/kubernetes_test.go +++ b/metricbeat/module/kubernetes/util/kubernetes_test.go @@ -20,33 +20,384 @@ package util import ( "fmt" "testing" + "time" - k8s "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/cache" - - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + k8s "k8s.io/client-go/kubernetes" + k8sfake "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/tools/cache" + kubernetes2 "github.com/elastic/beats/v7/libbeat/autodiscover/providers/kubernetes" "github.com/elastic/elastic-agent-autodiscover/kubernetes" + "github.com/elastic/elastic-agent-autodiscover/kubernetes/metadata" + conf "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" - - kubernetes2 "github.com/elastic/beats/v7/libbeat/autodiscover/providers/kubernetes" ) -var ( - logger = logp.NewLogger("kubernetes") -) +func TestWatchOptions(t *testing.T) { + log := logp.NewLogger("test") + + client := k8sfake.NewSimpleClientset() + config := &kubernetesConfig{ + Namespace: "test-ns", + SyncPeriod: time.Minute, + Node: "test-node", + } + + options, err := getWatchOptions(config, false, client, log) + require.NoError(t, err) + require.Equal(t, options.SyncTimeout, config.SyncPeriod) + require.Equal(t, options.Namespace, config.Namespace) + require.NotEqual(t, options.Node, config.Node) + + options, err = getWatchOptions(config, true, client, log) + require.NoError(t, err) + require.Equal(t, options.SyncTimeout, config.SyncPeriod) + require.Equal(t, options.Namespace, config.Namespace) + require.Equal(t, options.Node, config.Node) +} + +func TestStartWatcher(t *testing.T) { + // make sure the watchers map is empty for the test + resourceWatchers.lock.Lock() + resourceWatchers.watchersMap = make(map[string]*watcherData) + resourceWatchers.lock.Unlock() + + client := k8sfake.NewSimpleClientset() + config := &kubernetesConfig{ + Namespace: "test-ns", + SyncPeriod: time.Minute, + Node: "test-node", + } + log := logp.NewLogger("test") + + options, err := getWatchOptions(config, false, client, log) + require.NoError(t, err) + + created, err := startWatcher(NamespaceResource, &kubernetes.Node{}, *options, client) + require.True(t, created) + require.NoError(t, err) + + resourceWatchers.lock.Lock() + require.Equal(t, 1, len(resourceWatchers.watchersMap)) + require.NotNil(t, resourceWatchers.watchersMap[NamespaceResource]) + require.NotNil(t, resourceWatchers.watchersMap[NamespaceResource].watcher) + resourceWatchers.lock.Unlock() + + created, err = startWatcher(NamespaceResource, &kubernetes.Namespace{}, *options, client) + require.False(t, created) + require.NoError(t, err) + + resourceWatchers.lock.Lock() + require.Equal(t, 1, len(resourceWatchers.watchersMap)) + require.NotNil(t, resourceWatchers.watchersMap[NamespaceResource]) + require.NotNil(t, resourceWatchers.watchersMap[NamespaceResource].watcher) + resourceWatchers.lock.Unlock() + + created, err = startWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client) + require.True(t, created) + require.NoError(t, err) + + resourceWatchers.lock.Lock() + require.Equal(t, 2, len(resourceWatchers.watchersMap)) + require.NotNil(t, resourceWatchers.watchersMap[DeploymentResource]) + require.NotNil(t, resourceWatchers.watchersMap[NamespaceResource]) + resourceWatchers.lock.Unlock() +} + +func TestAddToWhichAreUsing(t *testing.T) { + // make sure the watchers map is empty for the test + resourceWatchers.lock.Lock() + resourceWatchers.watchersMap = make(map[string]*watcherData) + resourceWatchers.lock.Unlock() + + client := k8sfake.NewSimpleClientset() + config := &kubernetesConfig{ + Namespace: "test-ns", + SyncPeriod: time.Minute, + Node: "test-node", + } + log := logp.NewLogger("test") + + options, err := getWatchOptions(config, false, client, log) + require.NoError(t, err) + + // Create the new entry with watcher and nil string array first + created, err := startWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client) + require.True(t, created) + require.NoError(t, err) + + resourceWatchers.lock.Lock() + require.NotNil(t, resourceWatchers.watchersMap[DeploymentResource].watcher) + require.Nil(t, resourceWatchers.watchersMap[DeploymentResource].whichAreUsing) + resourceWatchers.lock.Unlock() + + addToWhichAreUsing(DeploymentResource, DeploymentResource) + resourceWatchers.lock.Lock() + require.NotNil(t, resourceWatchers.watchersMap[DeploymentResource].whichAreUsing) + require.Equal(t, []string{DeploymentResource}, resourceWatchers.watchersMap[DeploymentResource].whichAreUsing) + resourceWatchers.lock.Unlock() + + addToWhichAreUsing(DeploymentResource, PodResource) + resourceWatchers.lock.Lock() + require.Equal(t, []string{DeploymentResource, PodResource}, resourceWatchers.watchersMap[DeploymentResource].whichAreUsing) + resourceWatchers.lock.Unlock() +} + +func TestRemoveToWhichAreUsing(t *testing.T) { + // make sure the watchers map is empty for the test + resourceWatchers.lock.Lock() + resourceWatchers.watchersMap = make(map[string]*watcherData) + resourceWatchers.lock.Unlock() + + client := k8sfake.NewSimpleClientset() + config := &kubernetesConfig{ + Namespace: "test-ns", + SyncPeriod: time.Minute, + Node: "test-node", + } + log := logp.NewLogger("test") + + options, err := getWatchOptions(config, false, client, log) + require.NoError(t, err) + + // Create the new entry with watcher and nil string array first + created, err := startWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client) + require.True(t, created) + require.NoError(t, err) + + addToWhichAreUsing(DeploymentResource, DeploymentResource) + addToWhichAreUsing(DeploymentResource, PodResource) + + resourceWatchers.lock.Lock() + defer resourceWatchers.lock.Unlock() + + removed, size := removeToWhichAreUsing(DeploymentResource, DeploymentResource) + require.True(t, removed) + require.Equal(t, 1, size) + + removed, size = removeToWhichAreUsing(DeploymentResource, DeploymentResource) + require.False(t, removed) + require.Equal(t, 1, size) + + removed, size = removeToWhichAreUsing(DeploymentResource, PodResource) + require.True(t, removed) + require.Equal(t, 0, size) +} + +func TestStartAllWatchers(t *testing.T) { + // make sure the watchers map is empty for the test + resourceWatchers.lock.Lock() + resourceWatchers.watchersMap = make(map[string]*watcherData) + resourceWatchers.lock.Unlock() + + client := k8sfake.NewSimpleClientset() + config := &kubernetesConfig{ + Namespace: "test-ns", + SyncPeriod: time.Minute, + Node: "test-node", + AddResourceMetadata: &metadata.AddResourceMetadataConfig{ + CronJob: false, + Deployment: true, + }, + } + log := logp.NewLogger("test") + + // Start watchers based on a resource that does not exist should cause an error + err := startAllWatchers(client, "does-not-exist", false, config, log) + require.Error(t, err) + resourceWatchers.lock.Lock() + require.Equal(t, 0, len(resourceWatchers.watchersMap)) + resourceWatchers.lock.Unlock() + + // Start watcher for a resource that requires other resources, should start all the watchers + extras := getExtraWatchers(PodResource, config) + err = startAllWatchers(client, PodResource, false, config, log) + require.NoError(t, err) -func TestBuildMetadataEnricher(t *testing.T) { - watcher := mockWatcher{} - nodeWatcher := mockWatcher{} - namespaceWatcher := mockWatcher{} - rsWatcher := mockWatcher{} - jobWatcher := mockWatcher{} + // Check that all the required watchers are in the map + resourceWatchers.lock.Lock() + // we add 1 to the expected result to represent the resource itself + require.Equal(t, len(extras)+1, len(resourceWatchers.watchersMap)) + for _, extra := range extras { + require.NotNil(t, resourceWatchers.watchersMap[extra]) + } + resourceWatchers.lock.Unlock() +} + +func TestCreateMetaGen(t *testing.T) { + // make sure the watchers map is empty for the test + resourceWatchers.lock.Lock() + resourceWatchers.watchersMap = make(map[string]*watcherData) + resourceWatchers.lock.Unlock() + + commonMetaConfig := metadata.Config{} + commonConfig, err := conf.NewConfigFrom(&commonMetaConfig) + require.NoError(t, err) + + log := logp.NewLogger("test") + config := &kubernetesConfig{ + Namespace: "test-ns", + SyncPeriod: time.Minute, + Node: "test-node", + AddResourceMetadata: &metadata.AddResourceMetadataConfig{ + CronJob: false, + Deployment: true, + }, + } + client := k8sfake.NewSimpleClientset() + + _, err = createMetadataGen(client, commonConfig, config, DeploymentResource) + // At this point, no watchers were created + require.Error(t, err) + + // Create the watchers necessary for the metadata generator + err = startAllWatchers(client, DeploymentResource, false, config, log) + require.NoError(t, err) + + // Create the generators, this time without error + _, err = createMetadataGen(client, commonConfig, config, DeploymentResource) + require.NoError(t, err) +} + +func TestCreateMetaGenSpecific(t *testing.T) { + // make sure the watchers map is empty for the test + resourceWatchers.lock.Lock() + resourceWatchers.watchersMap = make(map[string]*watcherData) + resourceWatchers.lock.Unlock() + + commonMetaConfig := metadata.Config{} + commonConfig, err := conf.NewConfigFrom(&commonMetaConfig) + require.NoError(t, err) + + log := logp.NewLogger("test") + config := &kubernetesConfig{ + Namespace: "test-ns", + SyncPeriod: time.Minute, + Node: "test-node", + AddResourceMetadata: &metadata.AddResourceMetadataConfig{ + CronJob: false, + Deployment: true, + }, + } + client := k8sfake.NewSimpleClientset() + + // For pod: + + _, err = createMetadataGenSpecific(client, commonConfig, config, PodResource) + // At this point, no watchers were created + require.Error(t, err) + + // Create the pod resource + the extras + err = startAllWatchers(client, PodResource, false, config, log) + require.NoError(t, err) + + _, err = createMetadataGenSpecific(client, commonConfig, config, PodResource) + // At this point, no watchers were created + require.NoError(t, err) + + // For service: + _, err = createMetadataGenSpecific(client, commonConfig, config, ServiceResource) + // At this point, no watchers were created + require.Error(t, err) + + // Create the service resource + the extras + err = startAllWatchers(client, ServiceResource, false, config, log) + require.NoError(t, err) + + _, err = createMetadataGenSpecific(client, commonConfig, config, ServiceResource) + // At this point, no watchers were created + require.NoError(t, err) +} + +func TestBuildMetadataEnricher_Start_Stop(t *testing.T) { + // make sure the watchers map is empty for the test + resourceWatchers.lock.Lock() + resourceWatchers.watchersMap = make(map[string]*watcherData) + resourceWatchers.lock.Unlock() + + resourceWatchers.lock.Lock() + resourceWatchers.watchersMap[NamespaceResource] = &watcherData{ + watcher: &mockWatcher{}, + started: true, + whichAreUsing: []string{NamespaceResource, DeploymentResource}, + } + resourceWatchers.watchersMap[DeploymentResource] = &watcherData{ + watcher: &mockWatcher{}, + started: true, + whichAreUsing: []string{DeploymentResource}, + } + resourceWatchers.lock.Unlock() + + funcs := mockFuncs{} + config := &kubernetesConfig{ + Namespace: "test-ns", + SyncPeriod: time.Minute, + Node: "test-node", + AddResourceMetadata: &metadata.AddResourceMetadataConfig{ + CronJob: false, + Deployment: false, + }, + } + + enricherNamespace := buildMetadataEnricher(NamespaceResource, config, funcs.update, funcs.delete, funcs.index) + resourceWatchers.lock.Lock() + watcher := resourceWatchers.watchersMap[NamespaceResource] + // it was initialized with starting = true + require.True(t, watcher.started) + resourceWatchers.lock.Unlock() + + // starting should not affect this result + enricherNamespace.Start() + resourceWatchers.lock.Lock() + watcher = resourceWatchers.watchersMap[NamespaceResource] + require.True(t, watcher.started) + resourceWatchers.lock.Unlock() + + // Stopping should not stop the watcher because it is still being used by DeploymentResource + enricherNamespace.Stop() + resourceWatchers.lock.Lock() + watcher = resourceWatchers.watchersMap[NamespaceResource] + require.True(t, watcher.started) + require.Equal(t, []string{DeploymentResource}, watcher.whichAreUsing) + resourceWatchers.lock.Unlock() + + // Stopping the deployment watcher should stop now both watchers + enricherDeployment := buildMetadataEnricher(DeploymentResource, config, funcs.update, funcs.delete, funcs.index) + enricherDeployment.Stop() + + resourceWatchers.lock.Lock() + watcher = resourceWatchers.watchersMap[NamespaceResource] + + require.False(t, watcher.started) + require.Equal(t, []string{}, watcher.whichAreUsing) + + watcher = resourceWatchers.watchersMap[DeploymentResource] + require.False(t, watcher.started) + require.Equal(t, []string{}, watcher.whichAreUsing) + + resourceWatchers.lock.Unlock() + +} + +func TestBuildMetadataEnricher_EventHandler(t *testing.T) { + // make sure the watchers map is empty for the test + resourceWatchers.lock.Lock() + resourceWatchers.watchersMap = make(map[string]*watcherData) + resourceWatchers.lock.Unlock() + + resourceWatchers.lock.Lock() + resourceWatchers.watchersMap[PodResource] = &watcherData{ + watcher: &mockWatcher{}, + started: false, + whichAreUsing: []string{PodResource}, + } + resourceWatchers.lock.Unlock() funcs := mockFuncs{} resource := &v1.Pod{ @@ -60,15 +411,36 @@ func TestBuildMetadataEnricher(t *testing.T) { }, } - enricher := buildMetadataEnricher(&watcher, &nodeWatcher, &namespaceWatcher, &rsWatcher, &jobWatcher, funcs.update, funcs.delete, funcs.index) - assert.NotNil(t, watcher.handler) + config := &kubernetesConfig{ + Namespace: "test-ns", + SyncPeriod: time.Minute, + Node: "test-node", + AddResourceMetadata: &metadata.AddResourceMetadataConfig{ + CronJob: false, + Deployment: false, + }, + } + + enricher := buildMetadataEnricher(PodResource, config, funcs.update, funcs.delete, funcs.index) + resourceWatchers.lock.Lock() + wData := resourceWatchers.watchersMap[PodResource] + mockW := wData.watcher.(*mockWatcher) + require.NotNil(t, mockW.handler) + resourceWatchers.lock.Unlock() enricher.Start() - assert.True(t, watcher.started) + resourceWatchers.lock.Lock() + watcher := resourceWatchers.watchersMap[PodResource] + require.True(t, watcher.started) + resourceWatchers.lock.Unlock() - // Emit an event - watcher.handler.OnAdd(resource) - assert.Equal(t, resource, funcs.updated) + resourceWatchers.lock.Lock() + wData = resourceWatchers.watchersMap[PodResource] + mockW = wData.watcher.(*mockWatcher) + mockW.handler.OnAdd(resource) + resourceWatchers.lock.Unlock() + + require.Equal(t, resource, funcs.updated) // Test enricher events := []mapstr.M{ @@ -77,7 +449,7 @@ func TestBuildMetadataEnricher(t *testing.T) { } enricher.Enrich(events) - assert.Equal(t, []mapstr.M{ + require.Equal(t, []mapstr.M{ {"name": "unknown"}, { "name": "enrich", @@ -94,7 +466,7 @@ func TestBuildMetadataEnricher(t *testing.T) { enricher.isPod = true enricher.Enrich(events) - assert.Equal(t, []mapstr.M{ + require.Equal(t, []mapstr.M{ {"name": "unknown"}, { "name": "enrich", @@ -105,8 +477,13 @@ func TestBuildMetadataEnricher(t *testing.T) { }, events) // Emit delete event - watcher.handler.OnDelete(resource) - assert.Equal(t, resource, funcs.deleted) + resourceWatchers.lock.Lock() + wData = resourceWatchers.watchersMap[PodResource] + mockW = wData.watcher.(*mockWatcher) + mockW.handler.OnDelete(resource) + resourceWatchers.lock.Unlock() + + require.Equal(t, resource, funcs.deleted) events = []mapstr.M{ {"name": "unknown"}, @@ -114,10 +491,16 @@ func TestBuildMetadataEnricher(t *testing.T) { } enricher.Enrich(events) - assert.Equal(t, []mapstr.M{ + require.Equal(t, []mapstr.M{ {"name": "unknown"}, {"name": "enrich"}, }, events) + + enricher.Stop() + resourceWatchers.lock.Lock() + watcher = resourceWatchers.watchersMap[PodResource] + require.False(t, watcher.started) + resourceWatchers.lock.Unlock() } type mockFuncs struct { @@ -137,6 +520,7 @@ func (f *mockFuncs) update(m map[string]mapstr.M, obj kubernetes.Resource) { }, }, } + logger := logp.NewLogger("kubernetes") for k, v := range accessor.GetLabels() { kubernetes2.ShouldPut(meta, fmt.Sprintf("kubernetes.%v", k), v, logger) } @@ -157,11 +541,9 @@ func (f *mockFuncs) index(m mapstr.M) string { type mockWatcher struct { handler kubernetes.ResourceEventHandler - started bool } func (m *mockWatcher) Start() error { - m.started = true return nil } From 888dfbedad19d7af8af2565a7539705ef0e6a94c Mon Sep 17 00:00:00 2001 From: constanca-m Date: Thu, 7 Dec 2023 10:19:10 +0100 Subject: [PATCH 02/61] update CHANGELOG-developer.next.asciidoc --- CHANGELOG-developer.next.asciidoc | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG-developer.next.asciidoc b/CHANGELOG-developer.next.asciidoc index 4e650a193d1..2fae9d8751a 100644 --- a/CHANGELOG-developer.next.asciidoc +++ b/CHANGELOG-developer.next.asciidoc @@ -94,6 +94,7 @@ The list below covers the major changes between 7.0.0-rc2 and main only. ==== Added +- Update watchers to be shared between metricsets in Kubernetes module. {pull}37332[37332] - Add new metricset in Kubernetes module, `state_namespace`. {pull}36406[36406] - Add configuration for APM instrumentation and expose the tracer trough the Beat object. {pull}17938[17938] - Make the behavior of clientWorker and netClientWorker consistent when error is returned from publisher pipeline From e56dc9a222863f0004fbd516f7de3586a7fff02a Mon Sep 17 00:00:00 2001 From: constanca-m Date: Thu, 7 Dec 2023 10:33:12 +0100 Subject: [PATCH 03/61] fix lint error. --- metricbeat/module/kubernetes/util/kubernetes.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 411673ef539..d321833ed2c 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -210,7 +210,7 @@ func getWatchOptions(config *kubernetesConfig, nodeScope bool, client k8sclient. } options.Node, err = kubernetes.DiscoverKubernetesNode(log, nd) if err != nil { - return nil, fmt.Errorf("couldn't discover kubernetes node: %s", err) + return nil, fmt.Errorf("couldn't discover kubernetes node: %w", err) } } return &options, err @@ -302,7 +302,7 @@ func startAllWatchers( // If it fails, we return an error, so we can stop the extra watchers from starting. created, err := startWatcher(resourceName, res, *options, client) if err != nil { - return fmt.Errorf("error initializing Kubernetes watcher %s, required by %s: %s", resourceName, resourceName, err) + return fmt.Errorf("error initializing Kubernetes watcher %s, required by %s: %w", resourceName, resourceName, err) } else if created { log.Debugf("Started watcher %s successfully, created by %s.", resourceName, resourceName) } From 5ebc1b370aee576100d4a5474e123a96c171d0c6 Mon Sep 17 00:00:00 2001 From: constanca-m Date: Thu, 7 Dec 2023 10:42:35 +0100 Subject: [PATCH 04/61] - Remove getResourceMetadataWatchers - remove nolint:all --- .../module/kubernetes/util/kubernetes.go | 56 +------------------ 1 file changed, 1 insertion(+), 55 deletions(-) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index d321833ed2c..68e944a42a8 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -467,7 +467,7 @@ func NewResourceMetadataEnricher( updateFunc := func(m map[string]mapstr.M, r kubernetes.Resource) { accessor, _ := meta.Accessor(r) - id := join(accessor.GetNamespace(), accessor.GetName()) //nolint:all + id := join(accessor.GetNamespace(), accessor.GetName()) switch r := r.(type) { case *kubernetes.Pod: @@ -662,60 +662,6 @@ func NewContainerMetadataEnricher( return enricher } -func getResourceMetadataWatchers( - config *kubernetesConfig, - resource kubernetes.Resource, - client k8sclient.Interface, nodeScope bool) (kubernetes.Watcher, kubernetes.Watcher, kubernetes.Watcher) { - - var err error - - options := kubernetes.WatchOptions{ - SyncTimeout: config.SyncPeriod, - Namespace: config.Namespace, - } - - log := logp.NewLogger(selector) - - // Watch objects in the node only - if nodeScope { - nd := &kubernetes.DiscoverKubernetesNodeParams{ - ConfigHost: config.Node, - Client: client, - IsInCluster: kubernetes.IsInCluster(config.KubeConfig), - HostUtils: &kubernetes.DefaultDiscoveryUtils{}, - } - options.Node, err = kubernetes.DiscoverKubernetesNode(log, nd) - if err != nil { - logp.Err("Couldn't discover kubernetes node: %s", err) - return nil, nil, nil - } - } - - log.Debugf("Initializing a new Kubernetes watcher using host: %v", config.Node) - - watcher, err := kubernetes.NewNamedWatcher("resource_metadata_enricher", client, resource, options, nil) - if err != nil { - logp.Err("Error initializing Kubernetes watcher: %s", err) - return nil, nil, nil - } - - nodeWatcher, err := kubernetes.NewNamedWatcher("resource_metadata_enricher_node", client, &kubernetes.Node{}, options, nil) - if err != nil { - logp.Err("Error creating watcher for %T due to error %+v", &kubernetes.Node{}, err) - return watcher, nil, nil - } - - namespaceWatcher, err := kubernetes.NewNamedWatcher("resource_metadata_enricher_namespace", client, &kubernetes.Namespace{}, kubernetes.WatchOptions{ - SyncTimeout: config.SyncPeriod, - }, nil) - if err != nil { - logp.Err("Error creating watcher for %T due to error %+v", &kubernetes.Namespace{}, err) - return watcher, nodeWatcher, nil - } - - return watcher, nodeWatcher, namespaceWatcher -} - func GetValidatedConfig(base mb.BaseMetricSet) (*kubernetesConfig, error) { config, err := GetConfig(base) if err != nil { From 8dd0ad6968f48bf120c9c3f4df04bccc4ffe45c9 Mon Sep 17 00:00:00 2001 From: constanca-m Date: Wed, 27 Dec 2023 09:30:36 +0100 Subject: [PATCH 05/61] Move map initialization to kubernetes top level --- .../helper/kubernetes/state_metricset.go | 6 +- .../module/kubernetes/container/container.go | 6 +- metricbeat/module/kubernetes/kubernetes.go | 8 ++ metricbeat/module/kubernetes/node/node.go | 6 +- metricbeat/module/kubernetes/pod/pod.go | 6 +- .../state_container/state_container.go | 6 +- .../module/kubernetes/util/kubernetes.go | 66 +++++++----- .../module/kubernetes/util/kubernetes_test.go | 102 +++++++----------- 8 files changed, 99 insertions(+), 107 deletions(-) diff --git a/metricbeat/helper/kubernetes/state_metricset.go b/metricbeat/helper/kubernetes/state_metricset.go index 51929d73509..5519f980615 100644 --- a/metricbeat/helper/kubernetes/state_metricset.go +++ b/metricbeat/helper/kubernetes/state_metricset.go @@ -88,7 +88,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { BaseMetricSet: base, prometheusClient: prometheusClient, prometheusMapping: mapping, - enricher: util.NewResourceMetadataEnricher(base, resourceName, mod.GetMetricsRepo(), false), + enricher: util.NewResourceMetadataEnricher(base, resourceName, mod.GetMetricsRepo(), mod.GetResourceWatchers(), false), mod: mod, }, nil } @@ -108,7 +108,7 @@ func (m *MetricSet) Fetch(reporter mb.ReporterV2) { resourceName = "state_namespace" } - m.enricher.Start() + m.enricher.Start(m.mod.GetResourceWatchers()) families, err := m.mod.GetStateMetricsFamilies(m.prometheusClient) if err != nil { @@ -139,6 +139,6 @@ func (m *MetricSet) Fetch(reporter mb.ReporterV2) { // Close stops this metricset func (m *MetricSet) Close() error { - m.enricher.Stop() + m.enricher.Stop(m.mod.GetResourceWatchers()) return nil } diff --git a/metricbeat/module/kubernetes/container/container.go b/metricbeat/module/kubernetes/container/container.go index c277406faee..d1071f613de 100644 --- a/metricbeat/module/kubernetes/container/container.go +++ b/metricbeat/module/kubernetes/container/container.go @@ -75,7 +75,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return &MetricSet{ BaseMetricSet: base, http: http, - enricher: util.NewContainerMetadataEnricher(base, mod.GetMetricsRepo(), true), + enricher: util.NewContainerMetadataEnricher(base, mod.GetMetricsRepo(), mod.GetResourceWatchers(), true), mod: mod, }, nil } @@ -84,7 +84,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // format. It publishes the event which is then forwarded to the output. In case // of an error set the Error field of mb.Event or simply call report.Error(). func (m *MetricSet) Fetch(reporter mb.ReporterV2) { - m.enricher.Start() + m.enricher.Start(m.mod.GetResourceWatchers()) body, err := m.mod.GetKubeletStats(m.http) if err != nil { @@ -131,6 +131,6 @@ func (m *MetricSet) Fetch(reporter mb.ReporterV2) { // Close stops this metricset func (m *MetricSet) Close() error { - m.enricher.Stop() + m.enricher.Stop(m.mod.GetResourceWatchers()) return nil } diff --git a/metricbeat/module/kubernetes/kubernetes.go b/metricbeat/module/kubernetes/kubernetes.go index 1cb9ca037f9..23611e0b63c 100644 --- a/metricbeat/module/kubernetes/kubernetes.go +++ b/metricbeat/module/kubernetes/kubernetes.go @@ -42,6 +42,7 @@ type Module interface { GetStateMetricsFamilies(prometheus p.Prometheus) ([]*p.MetricFamily, error) GetKubeletStats(http *helper.HTTP) ([]byte, error) GetMetricsRepo() *util.MetricsRepo + GetResourceWatchers() *util.Watchers } type familiesCache struct { @@ -86,6 +87,7 @@ type module struct { kubeStateMetricsCache *kubeStateMetricsCache kubeletStatsCache *kubeletStatsCache metricsRepo *util.MetricsRepo + resourceWatchers *util.Watchers cacheHash uint64 } @@ -97,6 +99,7 @@ func ModuleBuilder() func(base mb.BaseModule) (mb.Module, error) { cacheMap: make(map[uint64]*statsCache), } metricsRepo := util.NewMetricsRepo() + resourceWatchers := util.NewWatchers() return func(base mb.BaseModule) (mb.Module, error) { hash, err := generateCacheHash(base.Config().Hosts) if err != nil { @@ -108,6 +111,7 @@ func ModuleBuilder() func(base mb.BaseModule) (mb.Module, error) { kubeStateMetricsCache: kubeStateMetricsCache, kubeletStatsCache: kubeletStatsCache, metricsRepo: metricsRepo, + resourceWatchers: resourceWatchers, cacheHash: hash, } return &m, nil @@ -162,3 +166,7 @@ func generateCacheHash(host []string) (uint64, error) { func (m *module) GetMetricsRepo() *util.MetricsRepo { return m.metricsRepo } + +func (m *module) GetResourceWatchers() *util.Watchers { + return m.resourceWatchers +} diff --git a/metricbeat/module/kubernetes/node/node.go b/metricbeat/module/kubernetes/node/node.go index 69bfcc2139e..909b9fa52b9 100644 --- a/metricbeat/module/kubernetes/node/node.go +++ b/metricbeat/module/kubernetes/node/node.go @@ -75,7 +75,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return &MetricSet{ BaseMetricSet: base, http: http, - enricher: util.NewResourceMetadataEnricher(base, util.NodeResource, mod.GetMetricsRepo(), false), + enricher: util.NewResourceMetadataEnricher(base, util.NodeResource, mod.GetMetricsRepo(), mod.GetResourceWatchers(), false), mod: mod, }, nil } @@ -84,7 +84,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // format. It publishes the event which is then forwarded to the output. In case // of an error set the Error field of mb.Event or simply call report.Error(). func (m *MetricSet) Fetch(reporter mb.ReporterV2) { - m.enricher.Start() + m.enricher.Start(m.mod.GetResourceWatchers()) body, err := m.mod.GetKubeletStats(m.http) if err != nil { @@ -115,6 +115,6 @@ func (m *MetricSet) Fetch(reporter mb.ReporterV2) { // Close stops this metricset func (m *MetricSet) Close() error { - m.enricher.Stop() + m.enricher.Stop(m.mod.GetResourceWatchers()) return nil } diff --git a/metricbeat/module/kubernetes/pod/pod.go b/metricbeat/module/kubernetes/pod/pod.go index 485a72f11b7..aaa4a9b1bb5 100644 --- a/metricbeat/module/kubernetes/pod/pod.go +++ b/metricbeat/module/kubernetes/pod/pod.go @@ -76,7 +76,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return &MetricSet{ BaseMetricSet: base, http: http, - enricher: util.NewResourceMetadataEnricher(base, util.PodResource, mod.GetMetricsRepo(), true), + enricher: util.NewResourceMetadataEnricher(base, util.PodResource, mod.GetMetricsRepo(), mod.GetResourceWatchers(), true), mod: mod, }, nil } @@ -85,7 +85,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // format. It publishes the event which is then forwarded to the output. In case // of an error set the Error field of mb.Event or simply call report.Error(). func (m *MetricSet) Fetch(reporter mb.ReporterV2) { - m.enricher.Start() + m.enricher.Start(m.mod.GetResourceWatchers()) body, err := m.mod.GetKubeletStats(m.http) if err != nil { @@ -133,6 +133,6 @@ func (m *MetricSet) Fetch(reporter mb.ReporterV2) { // Close stops this metricset func (m *MetricSet) Close() error { - m.enricher.Stop() + m.enricher.Stop(m.mod.GetResourceWatchers()) return nil } diff --git a/metricbeat/module/kubernetes/state_container/state_container.go b/metricbeat/module/kubernetes/state_container/state_container.go index 86ffb6c0782..0c46e60f51a 100644 --- a/metricbeat/module/kubernetes/state_container/state_container.go +++ b/metricbeat/module/kubernetes/state_container/state_container.go @@ -115,7 +115,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return &MetricSet{ BaseMetricSet: base, prometheus: prometheus, - enricher: util.NewContainerMetadataEnricher(base, mod.GetMetricsRepo(), false), + enricher: util.NewContainerMetadataEnricher(base, mod.GetMetricsRepo(), mod.GetResourceWatchers(), false), mod: mod, }, nil } @@ -124,7 +124,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // format. It publishes the event which is then forwarded to the output. In case // of an error set the Error field of mb.Event or simply call report.Error(). func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { - m.enricher.Start() + m.enricher.Start(m.mod.GetResourceWatchers()) families, err := m.mod.GetStateMetricsFamilies(m.prometheus) if err != nil { @@ -196,6 +196,6 @@ func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { // Close stops this metricset func (m *MetricSet) Close() error { - m.enricher.Stop() + m.enricher.Stop(m.mod.GetResourceWatchers()) return nil } diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 68e944a42a8..36a1435dc8c 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -56,10 +56,10 @@ type kubernetesConfig struct { type Enricher interface { // Start will start the Kubernetes watcher on the first call, does nothing on the rest // errors are logged as warning - Start() + Start(*Watchers) // Stop will stop the Kubernetes watcher - Stop() + Stop(*Watchers) // Enrich the given list of events Enrich([]mapstr.M) @@ -76,8 +76,8 @@ type enricher struct { type nilEnricher struct{} -func (*nilEnricher) Start() {} -func (*nilEnricher) Stop() {} +func (*nilEnricher) Start(*Watchers) {} +func (*nilEnricher) Stop(*Watchers) {} func (*nilEnricher) Enrich([]mapstr.M) {} type watcherData struct { @@ -86,15 +86,11 @@ type watcherData struct { started bool // true if watcher has started, false otherwise } -type watchers struct { +type Watchers struct { watchersMap map[string]*watcherData lock sync.RWMutex } -var resourceWatchers = watchers{ - watchersMap: make(map[string]*watcherData), -} - const selector = "kubernetes" const ( @@ -113,6 +109,13 @@ const ( NamespaceResource = "state_namespace" ) +func NewWatchers() *Watchers { + watchers := &Watchers{ + watchersMap: make(map[string]*watcherData), + } + return watchers +} + func getResource(resourceName string) kubernetes.Resource { switch resourceName { case PodResource: @@ -221,7 +224,8 @@ func startWatcher( resourceName string, resource kubernetes.Resource, options kubernetes.WatchOptions, - client k8sclient.Interface) (bool, error) { + client k8sclient.Interface, + resourceWatchers *Watchers) (bool, error) { resourceWatchers.lock.Lock() defer resourceWatchers.lock.Unlock() @@ -239,7 +243,7 @@ func startWatcher( return false, nil } -func addToWhichAreUsing(resourceName string, usingName string) { +func addToWhichAreUsing(resourceName string, usingName string, resourceWatchers *Watchers) { resourceWatchers.lock.Lock() defer resourceWatchers.lock.Unlock() @@ -261,7 +265,7 @@ func addToWhichAreUsing(resourceName string, usingName string) { // removeToWhichAreUsing returns true if element was removed and new size of array. // The cache should be locked when called. -func removeToWhichAreUsing(resourceName string, notUsingName string) (bool, int) { +func removeToWhichAreUsing(resourceName string, notUsingName string, resourceWatchers *Watchers) (bool, int) { data, ok := resourceWatchers.watchersMap[resourceName] removed := false if ok { @@ -287,6 +291,7 @@ func startAllWatchers( nodeScope bool, config *kubernetesConfig, log *logp.Logger, + resourceWatchers *Watchers, ) error { res := getResource(resourceName) if res == nil { @@ -300,20 +305,20 @@ func startAllWatchers( // Create a watcher for the given resource. // If it fails, we return an error, so we can stop the extra watchers from starting. - created, err := startWatcher(resourceName, res, *options, client) + created, err := startWatcher(resourceName, res, *options, client, resourceWatchers) if err != nil { return fmt.Errorf("error initializing Kubernetes watcher %s, required by %s: %w", resourceName, resourceName, err) } else if created { log.Debugf("Started watcher %s successfully, created by %s.", resourceName, resourceName) } - addToWhichAreUsing(resourceName, resourceName) + addToWhichAreUsing(resourceName, resourceName, resourceWatchers) // Create the extra watchers required by this resource extraWatchers := getExtraWatchers(resourceName, config) for _, extra := range extraWatchers { extraRes := getResource(extra) if extraRes != nil { - created, err = startWatcher(extra, extraRes, *options, client) + created, err = startWatcher(extra, extraRes, *options, client, resourceWatchers) if err != nil { log.Errorf("Error initializing Kubernetes watcher %s, required by %s: %s", extra, resourceName, err) } else { @@ -321,7 +326,7 @@ func startAllWatchers( log.Debugf("Started watcher %s successfully, created by %s.", extra, resourceName) } // add this resource to the ones using the extra resource - addToWhichAreUsing(extra, resourceName) + addToWhichAreUsing(extra, resourceName, resourceWatchers) } } else { log.Errorf("Resource for name %s does not exist. Watcher cannot be created.", extra) @@ -332,7 +337,7 @@ func startAllWatchers( } // createMetadataGen creates the metadata generator for resources in general -func createMetadataGen(client k8sclient.Interface, commonConfig *conf.C, config *kubernetesConfig, resourceName string) (*metadata.Resource, error) { +func createMetadataGen(client k8sclient.Interface, commonConfig *conf.C, config *kubernetesConfig, resourceName string, resourceWatchers *Watchers) (*metadata.Resource, error) { // check if the resource is namespace aware extras := getExtraWatchers(resourceName, config) namespaceAware := false @@ -371,7 +376,7 @@ func createMetadataGen(client k8sclient.Interface, commonConfig *conf.C, config } // createMetadataGenSpecific creates the metadata generator for a specific resource - pod or service -func createMetadataGenSpecific(client k8sclient.Interface, commonConfig *conf.C, config *kubernetesConfig, resourceName string) (metadata.MetaGen, error) { +func createMetadataGenSpecific(client k8sclient.Interface, commonConfig *conf.C, config *kubernetesConfig, resourceName string, resourceWatchers *Watchers) (metadata.MetaGen, error) { resourceWatchers.lock.RLock() defer resourceWatchers.lock.RUnlock() @@ -423,6 +428,7 @@ func NewResourceMetadataEnricher( base mb.BaseMetricSet, resourceName string, metricsRepo *MetricsRepo, + resourceWatchers *Watchers, nodeScope bool) Enricher { log := logp.NewLogger(selector) @@ -447,7 +453,7 @@ func NewResourceMetadataEnricher( return &nilEnricher{} } - err = startAllWatchers(client, resourceName, nodeScope, config, log) + err = startAllWatchers(client, resourceName, nodeScope, config, log, resourceWatchers) if err != nil { log.Errorf("Error starting the watchers: %s", err) return &nilEnricher{} @@ -456,9 +462,9 @@ func NewResourceMetadataEnricher( var specificMetaGen metadata.MetaGen var generalMetaGen *metadata.Resource if resourceName == ServiceResource || resourceName == PodResource { - specificMetaGen, err = createMetadataGenSpecific(client, commonConfig, config, resourceName) + specificMetaGen, err = createMetadataGenSpecific(client, commonConfig, config, resourceName, resourceWatchers) } else { - generalMetaGen, err = createMetadataGen(client, commonConfig, config, resourceName) + generalMetaGen, err = createMetadataGen(client, commonConfig, config, resourceName, resourceWatchers) } if err != nil { log.Errorf("Error trying to create the metadata generators: %s", err) @@ -535,7 +541,7 @@ func NewResourceMetadataEnricher( return join(getString(e, mb.ModuleDataKey+".namespace"), getString(e, "name")) } - enricher := buildMetadataEnricher(resourceName, config, updateFunc, deleteFunc, indexFunc) + enricher := buildMetadataEnricher(resourceName, resourceWatchers, config, updateFunc, deleteFunc, indexFunc) if resourceName == PodResource { enricher.isPod = true } @@ -547,6 +553,7 @@ func NewResourceMetadataEnricher( func NewContainerMetadataEnricher( base mb.BaseMetricSet, metricsRepo *MetricsRepo, + resourceWatchers *Watchers, nodeScope bool) Enricher { log := logp.NewLogger(selector) @@ -571,13 +578,13 @@ func NewContainerMetadataEnricher( return &nilEnricher{} } - err = startAllWatchers(client, PodResource, nodeScope, config, log) + err = startAllWatchers(client, PodResource, nodeScope, config, log, resourceWatchers) if err != nil { log.Errorf("Error starting the watchers: %s", err) return &nilEnricher{} } - metaGen, err := createMetadataGenSpecific(client, commonConfig, config, PodResource) + metaGen, err := createMetadataGenSpecific(client, commonConfig, config, PodResource, resourceWatchers) if err != nil { log.Errorf("Error trying to create the metadata generators: %s", err) return &nilEnricher{} @@ -657,7 +664,7 @@ func NewContainerMetadataEnricher( return join(getString(e, mb.ModuleDataKey+".namespace"), getString(e, mb.ModuleDataKey+".pod.name"), getString(e, "name")) } - enricher := buildMetadataEnricher(PodResource, config, updateFunc, deleteFunc, indexFunc) + enricher := buildMetadataEnricher(PodResource, resourceWatchers, config, updateFunc, deleteFunc, indexFunc) return enricher } @@ -713,6 +720,7 @@ func join(fields ...string) string { func buildMetadataEnricher( resourceName string, + resourceWatchers *Watchers, config *kubernetesConfig, update func(map[string]mapstr.M, kubernetes.Resource), delete func(map[string]mapstr.M, kubernetes.Resource), @@ -752,7 +760,7 @@ func buildMetadataEnricher( return &enricher } -func (e *enricher) Start() { +func (e *enricher) Start(resourceWatchers *Watchers) { resourceWatchers.lock.Lock() defer resourceWatchers.lock.Unlock() @@ -778,13 +786,13 @@ func (e *enricher) Start() { } } -func (e *enricher) Stop() { +func (e *enricher) Stop(resourceWatchers *Watchers) { resourceWatchers.lock.Lock() defer resourceWatchers.lock.Unlock() resourceWatcher := resourceWatchers.watchersMap[e.resourceName] if resourceWatcher != nil && resourceWatcher.watcher != nil && resourceWatcher.whichAreUsing != nil && resourceWatcher.started { - _, size := removeToWhichAreUsing(e.resourceName, e.resourceName) + _, size := removeToWhichAreUsing(e.resourceName, e.resourceName, resourceWatchers) if size == 0 { resourceWatcher.watcher.Stop() resourceWatcher.started = false @@ -795,7 +803,7 @@ func (e *enricher) Stop() { for _, extra := range extras { extraWatcher := resourceWatchers.watchersMap[extra] if extraWatcher != nil && extraWatcher.watcher != nil && extraWatcher.whichAreUsing != nil && extraWatcher.started { - _, size := removeToWhichAreUsing(extra, e.resourceName) + _, size := removeToWhichAreUsing(extra, e.resourceName, resourceWatchers) if size == 0 { extraWatcher.watcher.Stop() extraWatcher.started = false diff --git a/metricbeat/module/kubernetes/util/kubernetes_test.go b/metricbeat/module/kubernetes/util/kubernetes_test.go index 8f047b34993..f87417ab138 100644 --- a/metricbeat/module/kubernetes/util/kubernetes_test.go +++ b/metricbeat/module/kubernetes/util/kubernetes_test.go @@ -63,10 +63,7 @@ func TestWatchOptions(t *testing.T) { } func TestStartWatcher(t *testing.T) { - // make sure the watchers map is empty for the test - resourceWatchers.lock.Lock() - resourceWatchers.watchersMap = make(map[string]*watcherData) - resourceWatchers.lock.Unlock() + resourceWatchers := NewWatchers() client := k8sfake.NewSimpleClientset() config := &kubernetesConfig{ @@ -79,7 +76,7 @@ func TestStartWatcher(t *testing.T) { options, err := getWatchOptions(config, false, client, log) require.NoError(t, err) - created, err := startWatcher(NamespaceResource, &kubernetes.Node{}, *options, client) + created, err := startWatcher(NamespaceResource, &kubernetes.Node{}, *options, client, resourceWatchers) require.True(t, created) require.NoError(t, err) @@ -89,7 +86,7 @@ func TestStartWatcher(t *testing.T) { require.NotNil(t, resourceWatchers.watchersMap[NamespaceResource].watcher) resourceWatchers.lock.Unlock() - created, err = startWatcher(NamespaceResource, &kubernetes.Namespace{}, *options, client) + created, err = startWatcher(NamespaceResource, &kubernetes.Namespace{}, *options, client, resourceWatchers) require.False(t, created) require.NoError(t, err) @@ -99,7 +96,7 @@ func TestStartWatcher(t *testing.T) { require.NotNil(t, resourceWatchers.watchersMap[NamespaceResource].watcher) resourceWatchers.lock.Unlock() - created, err = startWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client) + created, err = startWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers) require.True(t, created) require.NoError(t, err) @@ -111,10 +108,7 @@ func TestStartWatcher(t *testing.T) { } func TestAddToWhichAreUsing(t *testing.T) { - // make sure the watchers map is empty for the test - resourceWatchers.lock.Lock() - resourceWatchers.watchersMap = make(map[string]*watcherData) - resourceWatchers.lock.Unlock() + resourceWatchers := NewWatchers() client := k8sfake.NewSimpleClientset() config := &kubernetesConfig{ @@ -128,7 +122,7 @@ func TestAddToWhichAreUsing(t *testing.T) { require.NoError(t, err) // Create the new entry with watcher and nil string array first - created, err := startWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client) + created, err := startWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers) require.True(t, created) require.NoError(t, err) @@ -137,23 +131,20 @@ func TestAddToWhichAreUsing(t *testing.T) { require.Nil(t, resourceWatchers.watchersMap[DeploymentResource].whichAreUsing) resourceWatchers.lock.Unlock() - addToWhichAreUsing(DeploymentResource, DeploymentResource) + addToWhichAreUsing(DeploymentResource, DeploymentResource, resourceWatchers) resourceWatchers.lock.Lock() require.NotNil(t, resourceWatchers.watchersMap[DeploymentResource].whichAreUsing) require.Equal(t, []string{DeploymentResource}, resourceWatchers.watchersMap[DeploymentResource].whichAreUsing) resourceWatchers.lock.Unlock() - addToWhichAreUsing(DeploymentResource, PodResource) + addToWhichAreUsing(DeploymentResource, PodResource, resourceWatchers) resourceWatchers.lock.Lock() require.Equal(t, []string{DeploymentResource, PodResource}, resourceWatchers.watchersMap[DeploymentResource].whichAreUsing) resourceWatchers.lock.Unlock() } func TestRemoveToWhichAreUsing(t *testing.T) { - // make sure the watchers map is empty for the test - resourceWatchers.lock.Lock() - resourceWatchers.watchersMap = make(map[string]*watcherData) - resourceWatchers.lock.Unlock() + resourceWatchers := NewWatchers() client := k8sfake.NewSimpleClientset() config := &kubernetesConfig{ @@ -167,34 +158,31 @@ func TestRemoveToWhichAreUsing(t *testing.T) { require.NoError(t, err) // Create the new entry with watcher and nil string array first - created, err := startWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client) + created, err := startWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers) require.True(t, created) require.NoError(t, err) - addToWhichAreUsing(DeploymentResource, DeploymentResource) - addToWhichAreUsing(DeploymentResource, PodResource) + addToWhichAreUsing(DeploymentResource, DeploymentResource, resourceWatchers) + addToWhichAreUsing(DeploymentResource, PodResource, resourceWatchers) resourceWatchers.lock.Lock() defer resourceWatchers.lock.Unlock() - removed, size := removeToWhichAreUsing(DeploymentResource, DeploymentResource) + removed, size := removeToWhichAreUsing(DeploymentResource, DeploymentResource, resourceWatchers) require.True(t, removed) require.Equal(t, 1, size) - removed, size = removeToWhichAreUsing(DeploymentResource, DeploymentResource) + removed, size = removeToWhichAreUsing(DeploymentResource, DeploymentResource, resourceWatchers) require.False(t, removed) require.Equal(t, 1, size) - removed, size = removeToWhichAreUsing(DeploymentResource, PodResource) + removed, size = removeToWhichAreUsing(DeploymentResource, PodResource, resourceWatchers) require.True(t, removed) require.Equal(t, 0, size) } func TestStartAllWatchers(t *testing.T) { - // make sure the watchers map is empty for the test - resourceWatchers.lock.Lock() - resourceWatchers.watchersMap = make(map[string]*watcherData) - resourceWatchers.lock.Unlock() + resourceWatchers := NewWatchers() client := k8sfake.NewSimpleClientset() config := &kubernetesConfig{ @@ -209,7 +197,7 @@ func TestStartAllWatchers(t *testing.T) { log := logp.NewLogger("test") // Start watchers based on a resource that does not exist should cause an error - err := startAllWatchers(client, "does-not-exist", false, config, log) + err := startAllWatchers(client, "does-not-exist", false, config, log, resourceWatchers) require.Error(t, err) resourceWatchers.lock.Lock() require.Equal(t, 0, len(resourceWatchers.watchersMap)) @@ -217,7 +205,7 @@ func TestStartAllWatchers(t *testing.T) { // Start watcher for a resource that requires other resources, should start all the watchers extras := getExtraWatchers(PodResource, config) - err = startAllWatchers(client, PodResource, false, config, log) + err = startAllWatchers(client, PodResource, false, config, log, resourceWatchers) require.NoError(t, err) // Check that all the required watchers are in the map @@ -231,10 +219,7 @@ func TestStartAllWatchers(t *testing.T) { } func TestCreateMetaGen(t *testing.T) { - // make sure the watchers map is empty for the test - resourceWatchers.lock.Lock() - resourceWatchers.watchersMap = make(map[string]*watcherData) - resourceWatchers.lock.Unlock() + resourceWatchers := NewWatchers() commonMetaConfig := metadata.Config{} commonConfig, err := conf.NewConfigFrom(&commonMetaConfig) @@ -252,24 +237,21 @@ func TestCreateMetaGen(t *testing.T) { } client := k8sfake.NewSimpleClientset() - _, err = createMetadataGen(client, commonConfig, config, DeploymentResource) + _, err = createMetadataGen(client, commonConfig, config, DeploymentResource, resourceWatchers) // At this point, no watchers were created require.Error(t, err) // Create the watchers necessary for the metadata generator - err = startAllWatchers(client, DeploymentResource, false, config, log) + err = startAllWatchers(client, DeploymentResource, false, config, log, resourceWatchers) require.NoError(t, err) // Create the generators, this time without error - _, err = createMetadataGen(client, commonConfig, config, DeploymentResource) + _, err = createMetadataGen(client, commonConfig, config, DeploymentResource, resourceWatchers) require.NoError(t, err) } func TestCreateMetaGenSpecific(t *testing.T) { - // make sure the watchers map is empty for the test - resourceWatchers.lock.Lock() - resourceWatchers.watchersMap = make(map[string]*watcherData) - resourceWatchers.lock.Unlock() + resourceWatchers := NewWatchers() commonMetaConfig := metadata.Config{} commonConfig, err := conf.NewConfigFrom(&commonMetaConfig) @@ -289,37 +271,34 @@ func TestCreateMetaGenSpecific(t *testing.T) { // For pod: - _, err = createMetadataGenSpecific(client, commonConfig, config, PodResource) + _, err = createMetadataGenSpecific(client, commonConfig, config, PodResource, resourceWatchers) // At this point, no watchers were created require.Error(t, err) // Create the pod resource + the extras - err = startAllWatchers(client, PodResource, false, config, log) + err = startAllWatchers(client, PodResource, false, config, log, resourceWatchers) require.NoError(t, err) - _, err = createMetadataGenSpecific(client, commonConfig, config, PodResource) + _, err = createMetadataGenSpecific(client, commonConfig, config, PodResource, resourceWatchers) // At this point, no watchers were created require.NoError(t, err) // For service: - _, err = createMetadataGenSpecific(client, commonConfig, config, ServiceResource) + _, err = createMetadataGenSpecific(client, commonConfig, config, ServiceResource, resourceWatchers) // At this point, no watchers were created require.Error(t, err) // Create the service resource + the extras - err = startAllWatchers(client, ServiceResource, false, config, log) + err = startAllWatchers(client, ServiceResource, false, config, log, resourceWatchers) require.NoError(t, err) - _, err = createMetadataGenSpecific(client, commonConfig, config, ServiceResource) + _, err = createMetadataGenSpecific(client, commonConfig, config, ServiceResource, resourceWatchers) // At this point, no watchers were created require.NoError(t, err) } func TestBuildMetadataEnricher_Start_Stop(t *testing.T) { - // make sure the watchers map is empty for the test - resourceWatchers.lock.Lock() - resourceWatchers.watchersMap = make(map[string]*watcherData) - resourceWatchers.lock.Unlock() + resourceWatchers := NewWatchers() resourceWatchers.lock.Lock() resourceWatchers.watchersMap[NamespaceResource] = &watcherData{ @@ -345,7 +324,7 @@ func TestBuildMetadataEnricher_Start_Stop(t *testing.T) { }, } - enricherNamespace := buildMetadataEnricher(NamespaceResource, config, funcs.update, funcs.delete, funcs.index) + enricherNamespace := buildMetadataEnricher(NamespaceResource, resourceWatchers, config, funcs.update, funcs.delete, funcs.index) resourceWatchers.lock.Lock() watcher := resourceWatchers.watchersMap[NamespaceResource] // it was initialized with starting = true @@ -353,14 +332,14 @@ func TestBuildMetadataEnricher_Start_Stop(t *testing.T) { resourceWatchers.lock.Unlock() // starting should not affect this result - enricherNamespace.Start() + enricherNamespace.Start(resourceWatchers) resourceWatchers.lock.Lock() watcher = resourceWatchers.watchersMap[NamespaceResource] require.True(t, watcher.started) resourceWatchers.lock.Unlock() // Stopping should not stop the watcher because it is still being used by DeploymentResource - enricherNamespace.Stop() + enricherNamespace.Stop(resourceWatchers) resourceWatchers.lock.Lock() watcher = resourceWatchers.watchersMap[NamespaceResource] require.True(t, watcher.started) @@ -368,8 +347,8 @@ func TestBuildMetadataEnricher_Start_Stop(t *testing.T) { resourceWatchers.lock.Unlock() // Stopping the deployment watcher should stop now both watchers - enricherDeployment := buildMetadataEnricher(DeploymentResource, config, funcs.update, funcs.delete, funcs.index) - enricherDeployment.Stop() + enricherDeployment := buildMetadataEnricher(DeploymentResource, resourceWatchers, config, funcs.update, funcs.delete, funcs.index) + enricherDeployment.Stop(resourceWatchers) resourceWatchers.lock.Lock() watcher = resourceWatchers.watchersMap[NamespaceResource] @@ -386,10 +365,7 @@ func TestBuildMetadataEnricher_Start_Stop(t *testing.T) { } func TestBuildMetadataEnricher_EventHandler(t *testing.T) { - // make sure the watchers map is empty for the test - resourceWatchers.lock.Lock() - resourceWatchers.watchersMap = make(map[string]*watcherData) - resourceWatchers.lock.Unlock() + resourceWatchers := NewWatchers() resourceWatchers.lock.Lock() resourceWatchers.watchersMap[PodResource] = &watcherData{ @@ -421,14 +397,14 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { }, } - enricher := buildMetadataEnricher(PodResource, config, funcs.update, funcs.delete, funcs.index) + enricher := buildMetadataEnricher(PodResource, resourceWatchers, config, funcs.update, funcs.delete, funcs.index) resourceWatchers.lock.Lock() wData := resourceWatchers.watchersMap[PodResource] mockW := wData.watcher.(*mockWatcher) require.NotNil(t, mockW.handler) resourceWatchers.lock.Unlock() - enricher.Start() + enricher.Start(resourceWatchers) resourceWatchers.lock.Lock() watcher := resourceWatchers.watchersMap[PodResource] require.True(t, watcher.started) @@ -496,7 +472,7 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { {"name": "enrich"}, }, events) - enricher.Stop() + enricher.Stop(resourceWatchers) resourceWatchers.lock.Lock() watcher = resourceWatchers.watchersMap[PodResource] require.False(t, watcher.started) From 15de770ae26a07e72a255fedb863c2d99a671a04 Mon Sep 17 00:00:00 2001 From: constanca Date: Mon, 8 Jan 2024 09:10:56 +0100 Subject: [PATCH 06/61] Refactor function name and add comments Signed-off-by: constanca --- metricbeat/module/kubernetes/util/kubernetes.go | 10 ++++++---- metricbeat/module/kubernetes/util/kubernetes_test.go | 6 +++--- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index ab607865c72..ecfd80a5e61 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -243,6 +243,8 @@ func startWatcher( return false, nil } +// addToWhichAreUsing adds resource identified by usingName to the list of resources using the watcher +// identified by resourceName func addToWhichAreUsing(resourceName string, usingName string, resourceWatchers *Watchers) { resourceWatchers.lock.Lock() defer resourceWatchers.lock.Unlock() @@ -263,9 +265,9 @@ func addToWhichAreUsing(resourceName string, usingName string, resourceWatchers } } -// removeToWhichAreUsing returns true if element was removed and new size of array. +// removeFromWhichAreUsing returns true if element was removed and new size of array. // The cache should be locked when called. -func removeToWhichAreUsing(resourceName string, notUsingName string, resourceWatchers *Watchers) (bool, int) { +func removeFromWhichAreUsing(resourceName string, notUsingName string, resourceWatchers *Watchers) (bool, int) { data, ok := resourceWatchers.watchersMap[resourceName] removed := false if ok { @@ -793,7 +795,7 @@ func (e *enricher) Stop(resourceWatchers *Watchers) { resourceWatcher := resourceWatchers.watchersMap[e.resourceName] if resourceWatcher != nil && resourceWatcher.watcher != nil && resourceWatcher.whichAreUsing != nil && resourceWatcher.started { - _, size := removeToWhichAreUsing(e.resourceName, e.resourceName, resourceWatchers) + _, size := removeFromWhichAreUsing(e.resourceName, e.resourceName, resourceWatchers) if size == 0 { resourceWatcher.watcher.Stop() resourceWatcher.started = false @@ -804,7 +806,7 @@ func (e *enricher) Stop(resourceWatchers *Watchers) { for _, extra := range extras { extraWatcher := resourceWatchers.watchersMap[extra] if extraWatcher != nil && extraWatcher.watcher != nil && extraWatcher.whichAreUsing != nil && extraWatcher.started { - _, size := removeToWhichAreUsing(extra, e.resourceName, resourceWatchers) + _, size := removeFromWhichAreUsing(extra, e.resourceName, resourceWatchers) if size == 0 { extraWatcher.watcher.Stop() extraWatcher.started = false diff --git a/metricbeat/module/kubernetes/util/kubernetes_test.go b/metricbeat/module/kubernetes/util/kubernetes_test.go index f87417ab138..199748f7d80 100644 --- a/metricbeat/module/kubernetes/util/kubernetes_test.go +++ b/metricbeat/module/kubernetes/util/kubernetes_test.go @@ -168,15 +168,15 @@ func TestRemoveToWhichAreUsing(t *testing.T) { resourceWatchers.lock.Lock() defer resourceWatchers.lock.Unlock() - removed, size := removeToWhichAreUsing(DeploymentResource, DeploymentResource, resourceWatchers) + removed, size := removeFromWhichAreUsing(DeploymentResource, DeploymentResource, resourceWatchers) require.True(t, removed) require.Equal(t, 1, size) - removed, size = removeToWhichAreUsing(DeploymentResource, DeploymentResource, resourceWatchers) + removed, size = removeFromWhichAreUsing(DeploymentResource, DeploymentResource, resourceWatchers) require.False(t, removed) require.Equal(t, 1, size) - removed, size = removeToWhichAreUsing(DeploymentResource, PodResource, resourceWatchers) + removed, size = removeFromWhichAreUsing(DeploymentResource, PodResource, resourceWatchers) require.True(t, removed) require.Equal(t, 0, size) } From c3f57c4038656ba0fbeffdcecff15f37c9de0d9d Mon Sep 17 00:00:00 2001 From: constanca Date: Fri, 26 Jan 2024 14:11:27 +0100 Subject: [PATCH 07/61] - Rename resourcesUsing - Rename createWatchers - Remove unnecessary nil conditions for the watchers Signed-off-by: constanca --- .../module/kubernetes/util/kubernetes.go | 78 ++++++++++--------- .../module/kubernetes/util/kubernetes_test.go | 70 ++++++++--------- 2 files changed, 76 insertions(+), 72 deletions(-) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index ecfd80a5e61..4c7f9c1b168 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -81,9 +81,9 @@ func (*nilEnricher) Stop(*Watchers) {} func (*nilEnricher) Enrich([]mapstr.M) {} type watcherData struct { - whichAreUsing []string // list of resources using this watcher - watcher kubernetes.Watcher - started bool // true if watcher has started, false otherwise + resourcesUsing []string // list of resources using this watcher + watcher kubernetes.Watcher + started bool // true if watcher has started, false otherwise } type Watchers struct { @@ -219,8 +219,8 @@ func getWatchOptions(config *kubernetesConfig, nodeScope bool, client k8sclient. return &options, err } -// startWatcher starts a watcher for a specific resource -func startWatcher( +// createWatcher creates a watcher for a specific resource +func createWatcher( resourceName string, resource kubernetes.Resource, options kubernetes.WatchOptions, @@ -243,16 +243,16 @@ func startWatcher( return false, nil } -// addToWhichAreUsing adds resource identified by usingName to the list of resources using the watcher +// addToResourcesUsing adds resource identified by usingName to the list of resources using the watcher // identified by resourceName -func addToWhichAreUsing(resourceName string, usingName string, resourceWatchers *Watchers) { +func addToResourcesUsing(resourceName string, usingName string, resourceWatchers *Watchers) { resourceWatchers.lock.Lock() defer resourceWatchers.lock.Unlock() data, ok := resourceWatchers.watchersMap[resourceName] if ok { contains := false - for _, which := range data.whichAreUsing { + for _, which := range data.resourcesUsing { if which == usingName { contains = true break @@ -260,34 +260,34 @@ func addToWhichAreUsing(resourceName string, usingName string, resourceWatchers } // add this resource to the list of resources using it if !contains { - data.whichAreUsing = append(data.whichAreUsing, usingName) + data.resourcesUsing = append(data.resourcesUsing, usingName) } } } -// removeFromWhichAreUsing returns true if element was removed and new size of array. +// removeFromResourcesUsing returns true if element was removed and new size of array. // The cache should be locked when called. -func removeFromWhichAreUsing(resourceName string, notUsingName string, resourceWatchers *Watchers) (bool, int) { +func removeFromResourcesUsing(resourceName string, notUsingName string, resourceWatchers *Watchers) (bool, int) { data, ok := resourceWatchers.watchersMap[resourceName] removed := false if ok { newIndex := 0 - for i, which := range data.whichAreUsing { + for i, which := range data.resourcesUsing { if which == notUsingName { removed = true } else { - data.whichAreUsing[newIndex] = data.whichAreUsing[i] + data.resourcesUsing[newIndex] = data.resourcesUsing[i] newIndex++ } } - data.whichAreUsing = data.whichAreUsing[:newIndex] - return removed, len(data.whichAreUsing) + data.resourcesUsing = data.resourcesUsing[:newIndex] + return removed, len(data.resourcesUsing) } return removed, 0 } -// startAllWatchers starts all the watchers required by a specific resource -func startAllWatchers( +// createAllWatchers creates all the watchers required by a specific resource +func createAllWatchers( client k8sclient.Interface, resourceName string, nodeScope bool, @@ -306,29 +306,29 @@ func startAllWatchers( } // Create a watcher for the given resource. - // If it fails, we return an error, so we can stop the extra watchers from starting. - created, err := startWatcher(resourceName, res, *options, client, resourceWatchers) + // If it fails, we return an error, so we can stop the extra watchers from creating. + created, err := createWatcher(resourceName, res, *options, client, resourceWatchers) if err != nil { return fmt.Errorf("error initializing Kubernetes watcher %s, required by %s: %w", resourceName, resourceName, err) } else if created { - log.Debugf("Started watcher %s successfully, created by %s.", resourceName, resourceName) + log.Debugf("Created watcher %s successfully, created by %s.", resourceName, resourceName) } - addToWhichAreUsing(resourceName, resourceName, resourceWatchers) + addToResourcesUsing(resourceName, resourceName, resourceWatchers) // Create the extra watchers required by this resource extraWatchers := getExtraWatchers(resourceName, config) for _, extra := range extraWatchers { extraRes := getResource(extra) if extraRes != nil { - created, err = startWatcher(extra, extraRes, *options, client, resourceWatchers) + created, err = createWatcher(extra, extraRes, *options, client, resourceWatchers) if err != nil { log.Errorf("Error initializing Kubernetes watcher %s, required by %s: %s", extra, resourceName, err) } else { if created { - log.Debugf("Started watcher %s successfully, created by %s.", extra, resourceName) + log.Debugf("Created watcher %s successfully, created by %s.", extra, resourceName) } // add this resource to the ones using the extra resource - addToWhichAreUsing(extra, resourceName, resourceWatchers) + addToResourcesUsing(extra, resourceName, resourceWatchers) } } else { log.Errorf("Resource for name %s does not exist. Watcher cannot be created.", extra) @@ -341,12 +341,16 @@ func startAllWatchers( // createMetadataGen creates the metadata generator for resources in general func createMetadataGen(client k8sclient.Interface, commonConfig *conf.C, config *kubernetesConfig, resourceName string, resourceWatchers *Watchers) (*metadata.Resource, error) { // check if the resource is namespace aware - extras := getExtraWatchers(resourceName, config) namespaceAware := false - for _, extra := range extras { - if extra == NamespaceResource { - namespaceAware = true - break + if resourceName == NamespaceResource { + namespaceAware = true + } else { + extras := getExtraWatchers(resourceName, config) + for _, extra := range extras { + if extra == NamespaceResource { + namespaceAware = true + break + } } } @@ -455,7 +459,7 @@ func NewResourceMetadataEnricher( return &nilEnricher{} } - err = startAllWatchers(client, resourceName, nodeScope, config, log, resourceWatchers) + err = createAllWatchers(client, resourceName, nodeScope, config, log, resourceWatchers) if err != nil { log.Errorf("Error starting the watchers: %s", err) return &nilEnricher{} @@ -580,7 +584,7 @@ func NewContainerMetadataEnricher( return &nilEnricher{} } - err = startAllWatchers(client, PodResource, nodeScope, config, log, resourceWatchers) + err = createAllWatchers(client, PodResource, nodeScope, config, log, resourceWatchers) if err != nil { log.Errorf("Error starting the watchers: %s", err) return &nilEnricher{} @@ -768,7 +772,7 @@ func (e *enricher) Start(resourceWatchers *Watchers) { defer resourceWatchers.lock.Unlock() resourceWatcher := resourceWatchers.watchersMap[e.resourceName] - if resourceWatcher != nil && resourceWatcher.watcher != nil && !resourceWatcher.started { + if resourceWatcher != nil && !resourceWatcher.started { if err := resourceWatcher.watcher.Start(); err != nil { log.Warnf("Error starting %s watcher: %s", e.resourceName, err) } else { @@ -779,7 +783,7 @@ func (e *enricher) Start(resourceWatchers *Watchers) { extras := getExtraWatchers(e.resourceName, e.config) for _, extra := range extras { extraWatcher := resourceWatchers.watchersMap[extra] - if extraWatcher != nil && extraWatcher.watcher != nil && !extraWatcher.started { + if extraWatcher != nil && !extraWatcher.started { if err := extraWatcher.watcher.Start(); err != nil { log.Warnf("Error starting %s watcher: %s", extra, err) } else { @@ -794,8 +798,8 @@ func (e *enricher) Stop(resourceWatchers *Watchers) { defer resourceWatchers.lock.Unlock() resourceWatcher := resourceWatchers.watchersMap[e.resourceName] - if resourceWatcher != nil && resourceWatcher.watcher != nil && resourceWatcher.whichAreUsing != nil && resourceWatcher.started { - _, size := removeFromWhichAreUsing(e.resourceName, e.resourceName, resourceWatchers) + if resourceWatcher != nil && resourceWatcher.started { + _, size := removeFromResourcesUsing(e.resourceName, e.resourceName, resourceWatchers) if size == 0 { resourceWatcher.watcher.Stop() resourceWatcher.started = false @@ -805,8 +809,8 @@ func (e *enricher) Stop(resourceWatchers *Watchers) { extras := getExtraWatchers(e.resourceName, e.config) for _, extra := range extras { extraWatcher := resourceWatchers.watchersMap[extra] - if extraWatcher != nil && extraWatcher.watcher != nil && extraWatcher.whichAreUsing != nil && extraWatcher.started { - _, size := removeFromWhichAreUsing(extra, e.resourceName, resourceWatchers) + if extraWatcher != nil && extraWatcher.started { + _, size := removeFromResourcesUsing(extra, e.resourceName, resourceWatchers) if size == 0 { extraWatcher.watcher.Stop() extraWatcher.started = false diff --git a/metricbeat/module/kubernetes/util/kubernetes_test.go b/metricbeat/module/kubernetes/util/kubernetes_test.go index 199748f7d80..fa099adfb17 100644 --- a/metricbeat/module/kubernetes/util/kubernetes_test.go +++ b/metricbeat/module/kubernetes/util/kubernetes_test.go @@ -62,7 +62,7 @@ func TestWatchOptions(t *testing.T) { require.Equal(t, options.Node, config.Node) } -func TestStartWatcher(t *testing.T) { +func TestCreateWatcher(t *testing.T) { resourceWatchers := NewWatchers() client := k8sfake.NewSimpleClientset() @@ -76,7 +76,7 @@ func TestStartWatcher(t *testing.T) { options, err := getWatchOptions(config, false, client, log) require.NoError(t, err) - created, err := startWatcher(NamespaceResource, &kubernetes.Node{}, *options, client, resourceWatchers) + created, err := createWatcher(NamespaceResource, &kubernetes.Node{}, *options, client, resourceWatchers) require.True(t, created) require.NoError(t, err) @@ -86,7 +86,7 @@ func TestStartWatcher(t *testing.T) { require.NotNil(t, resourceWatchers.watchersMap[NamespaceResource].watcher) resourceWatchers.lock.Unlock() - created, err = startWatcher(NamespaceResource, &kubernetes.Namespace{}, *options, client, resourceWatchers) + created, err = createWatcher(NamespaceResource, &kubernetes.Namespace{}, *options, client, resourceWatchers) require.False(t, created) require.NoError(t, err) @@ -96,7 +96,7 @@ func TestStartWatcher(t *testing.T) { require.NotNil(t, resourceWatchers.watchersMap[NamespaceResource].watcher) resourceWatchers.lock.Unlock() - created, err = startWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers) + created, err = createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers) require.True(t, created) require.NoError(t, err) @@ -122,24 +122,24 @@ func TestAddToWhichAreUsing(t *testing.T) { require.NoError(t, err) // Create the new entry with watcher and nil string array first - created, err := startWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers) + created, err := createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers) require.True(t, created) require.NoError(t, err) resourceWatchers.lock.Lock() require.NotNil(t, resourceWatchers.watchersMap[DeploymentResource].watcher) - require.Nil(t, resourceWatchers.watchersMap[DeploymentResource].whichAreUsing) + require.Nil(t, resourceWatchers.watchersMap[DeploymentResource].resourcesUsing) resourceWatchers.lock.Unlock() - addToWhichAreUsing(DeploymentResource, DeploymentResource, resourceWatchers) + addToResourcesUsing(DeploymentResource, DeploymentResource, resourceWatchers) resourceWatchers.lock.Lock() - require.NotNil(t, resourceWatchers.watchersMap[DeploymentResource].whichAreUsing) - require.Equal(t, []string{DeploymentResource}, resourceWatchers.watchersMap[DeploymentResource].whichAreUsing) + require.NotNil(t, resourceWatchers.watchersMap[DeploymentResource].resourcesUsing) + require.Equal(t, []string{DeploymentResource}, resourceWatchers.watchersMap[DeploymentResource].resourcesUsing) resourceWatchers.lock.Unlock() - addToWhichAreUsing(DeploymentResource, PodResource, resourceWatchers) + addToResourcesUsing(DeploymentResource, PodResource, resourceWatchers) resourceWatchers.lock.Lock() - require.Equal(t, []string{DeploymentResource, PodResource}, resourceWatchers.watchersMap[DeploymentResource].whichAreUsing) + require.Equal(t, []string{DeploymentResource, PodResource}, resourceWatchers.watchersMap[DeploymentResource].resourcesUsing) resourceWatchers.lock.Unlock() } @@ -158,30 +158,30 @@ func TestRemoveToWhichAreUsing(t *testing.T) { require.NoError(t, err) // Create the new entry with watcher and nil string array first - created, err := startWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers) + created, err := createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers) require.True(t, created) require.NoError(t, err) - addToWhichAreUsing(DeploymentResource, DeploymentResource, resourceWatchers) - addToWhichAreUsing(DeploymentResource, PodResource, resourceWatchers) + addToResourcesUsing(DeploymentResource, DeploymentResource, resourceWatchers) + addToResourcesUsing(DeploymentResource, PodResource, resourceWatchers) resourceWatchers.lock.Lock() defer resourceWatchers.lock.Unlock() - removed, size := removeFromWhichAreUsing(DeploymentResource, DeploymentResource, resourceWatchers) + removed, size := removeFromResourcesUsing(DeploymentResource, DeploymentResource, resourceWatchers) require.True(t, removed) require.Equal(t, 1, size) - removed, size = removeFromWhichAreUsing(DeploymentResource, DeploymentResource, resourceWatchers) + removed, size = removeFromResourcesUsing(DeploymentResource, DeploymentResource, resourceWatchers) require.False(t, removed) require.Equal(t, 1, size) - removed, size = removeFromWhichAreUsing(DeploymentResource, PodResource, resourceWatchers) + removed, size = removeFromResourcesUsing(DeploymentResource, PodResource, resourceWatchers) require.True(t, removed) require.Equal(t, 0, size) } -func TestStartAllWatchers(t *testing.T) { +func TestCreateAllWatchers(t *testing.T) { resourceWatchers := NewWatchers() client := k8sfake.NewSimpleClientset() @@ -197,7 +197,7 @@ func TestStartAllWatchers(t *testing.T) { log := logp.NewLogger("test") // Start watchers based on a resource that does not exist should cause an error - err := startAllWatchers(client, "does-not-exist", false, config, log, resourceWatchers) + err := createAllWatchers(client, "does-not-exist", false, config, log, resourceWatchers) require.Error(t, err) resourceWatchers.lock.Lock() require.Equal(t, 0, len(resourceWatchers.watchersMap)) @@ -205,7 +205,7 @@ func TestStartAllWatchers(t *testing.T) { // Start watcher for a resource that requires other resources, should start all the watchers extras := getExtraWatchers(PodResource, config) - err = startAllWatchers(client, PodResource, false, config, log, resourceWatchers) + err = createAllWatchers(client, PodResource, false, config, log, resourceWatchers) require.NoError(t, err) // Check that all the required watchers are in the map @@ -242,7 +242,7 @@ func TestCreateMetaGen(t *testing.T) { require.Error(t, err) // Create the watchers necessary for the metadata generator - err = startAllWatchers(client, DeploymentResource, false, config, log, resourceWatchers) + err = createAllWatchers(client, DeploymentResource, false, config, log, resourceWatchers) require.NoError(t, err) // Create the generators, this time without error @@ -276,7 +276,7 @@ func TestCreateMetaGenSpecific(t *testing.T) { require.Error(t, err) // Create the pod resource + the extras - err = startAllWatchers(client, PodResource, false, config, log, resourceWatchers) + err = createAllWatchers(client, PodResource, false, config, log, resourceWatchers) require.NoError(t, err) _, err = createMetadataGenSpecific(client, commonConfig, config, PodResource, resourceWatchers) @@ -289,7 +289,7 @@ func TestCreateMetaGenSpecific(t *testing.T) { require.Error(t, err) // Create the service resource + the extras - err = startAllWatchers(client, ServiceResource, false, config, log, resourceWatchers) + err = createAllWatchers(client, ServiceResource, false, config, log, resourceWatchers) require.NoError(t, err) _, err = createMetadataGenSpecific(client, commonConfig, config, ServiceResource, resourceWatchers) @@ -302,14 +302,14 @@ func TestBuildMetadataEnricher_Start_Stop(t *testing.T) { resourceWatchers.lock.Lock() resourceWatchers.watchersMap[NamespaceResource] = &watcherData{ - watcher: &mockWatcher{}, - started: true, - whichAreUsing: []string{NamespaceResource, DeploymentResource}, + watcher: &mockWatcher{}, + started: true, + resourcesUsing: []string{NamespaceResource, DeploymentResource}, } resourceWatchers.watchersMap[DeploymentResource] = &watcherData{ - watcher: &mockWatcher{}, - started: true, - whichAreUsing: []string{DeploymentResource}, + watcher: &mockWatcher{}, + started: true, + resourcesUsing: []string{DeploymentResource}, } resourceWatchers.lock.Unlock() @@ -343,7 +343,7 @@ func TestBuildMetadataEnricher_Start_Stop(t *testing.T) { resourceWatchers.lock.Lock() watcher = resourceWatchers.watchersMap[NamespaceResource] require.True(t, watcher.started) - require.Equal(t, []string{DeploymentResource}, watcher.whichAreUsing) + require.Equal(t, []string{DeploymentResource}, watcher.resourcesUsing) resourceWatchers.lock.Unlock() // Stopping the deployment watcher should stop now both watchers @@ -354,11 +354,11 @@ func TestBuildMetadataEnricher_Start_Stop(t *testing.T) { watcher = resourceWatchers.watchersMap[NamespaceResource] require.False(t, watcher.started) - require.Equal(t, []string{}, watcher.whichAreUsing) + require.Equal(t, []string{}, watcher.resourcesUsing) watcher = resourceWatchers.watchersMap[DeploymentResource] require.False(t, watcher.started) - require.Equal(t, []string{}, watcher.whichAreUsing) + require.Equal(t, []string{}, watcher.resourcesUsing) resourceWatchers.lock.Unlock() @@ -369,9 +369,9 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { resourceWatchers.lock.Lock() resourceWatchers.watchersMap[PodResource] = &watcherData{ - watcher: &mockWatcher{}, - started: false, - whichAreUsing: []string{PodResource}, + watcher: &mockWatcher{}, + started: false, + resourcesUsing: []string{PodResource}, } resourceWatchers.lock.Unlock() From 81bc17143ff8801d53786387835fbc75f37fa234 Mon Sep 17 00:00:00 2001 From: constanca Date: Fri, 26 Jan 2024 15:12:36 +0100 Subject: [PATCH 08/61] Use metricset to distinguish between the ones that are using which resources. Signed-off-by: constanca --- .../helper/kubernetes/state_metricset.go | 16 +- metricbeat/module/kubernetes/node/node.go | 2 +- metricbeat/module/kubernetes/pod/pod.go | 2 +- .../module/kubernetes/util/kubernetes.go | 101 ++++++----- .../module/kubernetes/util/kubernetes_test.go | 165 ++++++++++++------ 5 files changed, 186 insertions(+), 100 deletions(-) diff --git a/metricbeat/helper/kubernetes/state_metricset.go b/metricbeat/helper/kubernetes/state_metricset.go index 5519f980615..7bfeec344fc 100644 --- a/metricbeat/helper/kubernetes/state_metricset.go +++ b/metricbeat/helper/kubernetes/state_metricset.go @@ -29,8 +29,6 @@ import ( k8smod "github.com/elastic/beats/v7/metricbeat/module/kubernetes" ) -const prefix = "state_" - /* mappings stores the metrics for each metricset. The key of the map is the name of the metricset and the values are the mapping of the metricset metrics. @@ -45,7 +43,7 @@ var lock sync.RWMutex // The New method will be called after the setup of the module and before starting to fetch data func Init(name string, mapping *prometheus.MetricsMapping) { if name != util.NamespaceResource { - name = prefix + name + name = util.StateMetricsetPrefix + name } lock.Lock() mappings[name] = mapping @@ -79,16 +77,16 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { mapping := mappings[base.Name()] lock.Unlock() - resourceName := base.Name() - if resourceName != util.NamespaceResource { - resourceName = strings.ReplaceAll(resourceName, prefix, "") - } + //resourceName := base.Name() + //if resourceName != util.NamespaceResource { + // resourceName = strings.ReplaceAll(resourceName, prefix, "") + //} return &MetricSet{ BaseMetricSet: base, prometheusClient: prometheusClient, prometheusMapping: mapping, - enricher: util.NewResourceMetadataEnricher(base, resourceName, mod.GetMetricsRepo(), mod.GetResourceWatchers(), false), + enricher: util.NewResourceMetadataEnricher(base, mod.GetMetricsRepo(), mod.GetResourceWatchers(), false), mod: mod, }, nil } @@ -103,7 +101,7 @@ func (m *MetricSet) Fetch(reporter mb.ReporterV2) { // for the state_namespace metricset. resourceName := m.BaseMetricSet.Name() if resourceName != util.NamespaceResource { - resourceName = strings.ReplaceAll(resourceName, prefix, "") + resourceName = strings.ReplaceAll(resourceName, util.StateMetricsetPrefix, "") } else { resourceName = "state_namespace" } diff --git a/metricbeat/module/kubernetes/node/node.go b/metricbeat/module/kubernetes/node/node.go index 909b9fa52b9..e862d83e92a 100644 --- a/metricbeat/module/kubernetes/node/node.go +++ b/metricbeat/module/kubernetes/node/node.go @@ -75,7 +75,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return &MetricSet{ BaseMetricSet: base, http: http, - enricher: util.NewResourceMetadataEnricher(base, util.NodeResource, mod.GetMetricsRepo(), mod.GetResourceWatchers(), false), + enricher: util.NewResourceMetadataEnricher(base, mod.GetMetricsRepo(), mod.GetResourceWatchers(), false), mod: mod, }, nil } diff --git a/metricbeat/module/kubernetes/pod/pod.go b/metricbeat/module/kubernetes/pod/pod.go index aaa4a9b1bb5..fe20641b432 100644 --- a/metricbeat/module/kubernetes/pod/pod.go +++ b/metricbeat/module/kubernetes/pod/pod.go @@ -76,7 +76,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return &MetricSet{ BaseMetricSet: base, http: http, - enricher: util.NewResourceMetadataEnricher(base, util.PodResource, mod.GetMetricsRepo(), mod.GetResourceWatchers(), true), + enricher: util.NewResourceMetadataEnricher(base, mod.GetMetricsRepo(), mod.GetResourceWatchers(), true), mod: mod, }, nil } diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 4c7f9c1b168..c04eae3ca68 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -67,11 +67,12 @@ type Enricher interface { type enricher struct { sync.RWMutex - metadata map[string]mapstr.M - index func(mapstr.M) string - resourceName string - isPod bool - config *kubernetesConfig + metadata map[string]mapstr.M + index func(mapstr.M) string + metricsetName string + resourceName string + isPod bool + config *kubernetesConfig } type nilEnricher struct{} @@ -81,9 +82,12 @@ func (*nilEnricher) Stop(*Watchers) {} func (*nilEnricher) Enrich([]mapstr.M) {} type watcherData struct { - resourcesUsing []string // list of resources using this watcher - watcher kubernetes.Watcher - started bool // true if watcher has started, false otherwise + // list of metricsets using this watcher + // metricsets are used instead of resource names to avoid conflicts between + // state_pod / pod, state_node / node, state_container / container + metricsetsUsing []string + watcher kubernetes.Watcher + started bool // true if watcher has started, false otherwise } type Watchers struct { @@ -93,6 +97,8 @@ type Watchers struct { const selector = "kubernetes" +const StateMetricsetPrefix = "state_" + const ( PodResource = "pod" ServiceResource = "service" @@ -195,6 +201,17 @@ func getExtraWatchers(resourceName string, config *kubernetesConfig) []string { } } +// getResourceName returns the name of the resource for a metricset +// Example: state_pod metricset uses pod resource +// Exception is state_namespace +func getResourceName(metricsetName string) string { + resourceName := metricsetName + if resourceName != NamespaceResource { + resourceName = strings.ReplaceAll(resourceName, StateMetricsetPrefix, "") + } + return resourceName +} + // getWatchOptions builds the kubernetes.WatchOptions{} needed for the watcher based on the config and nodeScope func getWatchOptions(config *kubernetesConfig, nodeScope bool, client k8sclient.Interface, log *logp.Logger) (*kubernetes.WatchOptions, error) { var err error @@ -243,52 +260,53 @@ func createWatcher( return false, nil } -// addToResourcesUsing adds resource identified by usingName to the list of resources using the watcher +// addToMetricsetsUsing adds metricset identified by metricsetUsing to the list of resources using the watcher // identified by resourceName -func addToResourcesUsing(resourceName string, usingName string, resourceWatchers *Watchers) { +func addToMetricsetsUsing(resourceName string, metricsetUsing string, resourceWatchers *Watchers) { resourceWatchers.lock.Lock() defer resourceWatchers.lock.Unlock() data, ok := resourceWatchers.watchersMap[resourceName] if ok { contains := false - for _, which := range data.resourcesUsing { - if which == usingName { + for _, which := range data.metricsetsUsing { + if which == metricsetUsing { contains = true break } } // add this resource to the list of resources using it if !contains { - data.resourcesUsing = append(data.resourcesUsing, usingName) + data.metricsetsUsing = append(data.metricsetsUsing, metricsetUsing) } } } -// removeFromResourcesUsing returns true if element was removed and new size of array. +// removeFromMetricsetsUsing returns true if element was removed and new size of array. // The cache should be locked when called. -func removeFromResourcesUsing(resourceName string, notUsingName string, resourceWatchers *Watchers) (bool, int) { +func removeFromMetricsetsUsing(resourceName string, notUsingName string, resourceWatchers *Watchers) (bool, int) { data, ok := resourceWatchers.watchersMap[resourceName] removed := false if ok { newIndex := 0 - for i, which := range data.resourcesUsing { + for i, which := range data.metricsetsUsing { if which == notUsingName { removed = true } else { - data.resourcesUsing[newIndex] = data.resourcesUsing[i] + data.metricsetsUsing[newIndex] = data.metricsetsUsing[i] newIndex++ } } - data.resourcesUsing = data.resourcesUsing[:newIndex] - return removed, len(data.resourcesUsing) + data.metricsetsUsing = data.metricsetsUsing[:newIndex] + return removed, len(data.metricsetsUsing) } return removed, 0 } -// createAllWatchers creates all the watchers required by a specific resource +// createAllWatchers creates all the watchers required by a metricset func createAllWatchers( client k8sclient.Interface, + metricsetName string, resourceName string, nodeScope bool, config *kubernetesConfig, @@ -309,11 +327,11 @@ func createAllWatchers( // If it fails, we return an error, so we can stop the extra watchers from creating. created, err := createWatcher(resourceName, res, *options, client, resourceWatchers) if err != nil { - return fmt.Errorf("error initializing Kubernetes watcher %s, required by %s: %w", resourceName, resourceName, err) + return fmt.Errorf("error initializing Kubernetes watcher %s, required by %s: %w", resourceName, metricsetName, err) } else if created { - log.Debugf("Created watcher %s successfully, created by %s.", resourceName, resourceName) + log.Debugf("Created watcher %s successfully, created by %s.", resourceName, metricsetName) } - addToResourcesUsing(resourceName, resourceName, resourceWatchers) + addToMetricsetsUsing(resourceName, metricsetName, resourceWatchers) // Create the extra watchers required by this resource extraWatchers := getExtraWatchers(resourceName, config) @@ -322,13 +340,13 @@ func createAllWatchers( if extraRes != nil { created, err = createWatcher(extra, extraRes, *options, client, resourceWatchers) if err != nil { - log.Errorf("Error initializing Kubernetes watcher %s, required by %s: %s", extra, resourceName, err) + log.Errorf("Error initializing Kubernetes watcher %s, required by %s: %s", extra, metricsetName, err) } else { if created { - log.Debugf("Created watcher %s successfully, created by %s.", extra, resourceName) + log.Debugf("Created watcher %s successfully, created by %s.", extra, metricsetName) } - // add this resource to the ones using the extra resource - addToResourcesUsing(extra, resourceName, resourceWatchers) + // add this metricset to the ones using the extra resource + addToMetricsetsUsing(extra, metricsetName, resourceWatchers) } } else { log.Errorf("Resource for name %s does not exist. Watcher cannot be created.", extra) @@ -432,11 +450,9 @@ func createMetadataGenSpecific(client k8sclient.Interface, commonConfig *conf.C, func NewResourceMetadataEnricher( base mb.BaseMetricSet, - resourceName string, metricsRepo *MetricsRepo, resourceWatchers *Watchers, nodeScope bool) Enricher { - log := logp.NewLogger(selector) config, err := GetValidatedConfig(base) @@ -459,7 +475,10 @@ func NewResourceMetadataEnricher( return &nilEnricher{} } - err = createAllWatchers(client, resourceName, nodeScope, config, log, resourceWatchers) + metricsetName := base.Name() + resourceName := getResourceName(metricsetName) + + err = createAllWatchers(client, metricsetName, resourceName, nodeScope, config, log, resourceWatchers) if err != nil { log.Errorf("Error starting the watchers: %s", err) return &nilEnricher{} @@ -547,7 +566,7 @@ func NewResourceMetadataEnricher( return join(getString(e, mb.ModuleDataKey+".namespace"), getString(e, "name")) } - enricher := buildMetadataEnricher(resourceName, resourceWatchers, config, updateFunc, deleteFunc, indexFunc) + enricher := buildMetadataEnricher(metricsetName, resourceName, resourceWatchers, config, updateFunc, deleteFunc, indexFunc) if resourceName == PodResource { enricher.isPod = true } @@ -584,7 +603,9 @@ func NewContainerMetadataEnricher( return &nilEnricher{} } - err = createAllWatchers(client, PodResource, nodeScope, config, log, resourceWatchers) + metricsetName := base.Name() + + err = createAllWatchers(client, metricsetName, PodResource, nodeScope, config, log, resourceWatchers) if err != nil { log.Errorf("Error starting the watchers: %s", err) return &nilEnricher{} @@ -670,7 +691,7 @@ func NewContainerMetadataEnricher( return join(getString(e, mb.ModuleDataKey+".namespace"), getString(e, mb.ModuleDataKey+".pod.name"), getString(e, "name")) } - enricher := buildMetadataEnricher(PodResource, resourceWatchers, config, updateFunc, deleteFunc, indexFunc) + enricher := buildMetadataEnricher(metricsetName, PodResource, resourceWatchers, config, updateFunc, deleteFunc, indexFunc) return enricher } @@ -726,6 +747,7 @@ func join(fields ...string) string { } func buildMetadataEnricher( + metricsetName string, resourceName string, resourceWatchers *Watchers, config *kubernetesConfig, @@ -734,10 +756,11 @@ func buildMetadataEnricher( index func(e mapstr.M) string) *enricher { enricher := enricher{ - metadata: map[string]mapstr.M{}, - index: index, - resourceName: resourceName, - config: config, + metadata: map[string]mapstr.M{}, + index: index, + resourceName: resourceName, + metricsetName: metricsetName, + config: config, } resourceWatchers.lock.Lock() @@ -799,7 +822,7 @@ func (e *enricher) Stop(resourceWatchers *Watchers) { resourceWatcher := resourceWatchers.watchersMap[e.resourceName] if resourceWatcher != nil && resourceWatcher.started { - _, size := removeFromResourcesUsing(e.resourceName, e.resourceName, resourceWatchers) + _, size := removeFromMetricsetsUsing(e.resourceName, e.metricsetName, resourceWatchers) if size == 0 { resourceWatcher.watcher.Stop() resourceWatcher.started = false @@ -810,7 +833,7 @@ func (e *enricher) Stop(resourceWatchers *Watchers) { for _, extra := range extras { extraWatcher := resourceWatchers.watchersMap[extra] if extraWatcher != nil && extraWatcher.started { - _, size := removeFromResourcesUsing(extra, e.resourceName, resourceWatchers) + _, size := removeFromMetricsetsUsing(extra, e.metricsetName, resourceWatchers) if size == 0 { extraWatcher.watcher.Stop() extraWatcher.started = false diff --git a/metricbeat/module/kubernetes/util/kubernetes_test.go b/metricbeat/module/kubernetes/util/kubernetes_test.go index fa099adfb17..19eaab3e01f 100644 --- a/metricbeat/module/kubernetes/util/kubernetes_test.go +++ b/metricbeat/module/kubernetes/util/kubernetes_test.go @@ -19,24 +19,20 @@ package util import ( "fmt" - "testing" - "time" - - "github.com/stretchr/testify/require" - v1 "k8s.io/api/core/v1" + kubernetes2 "github.com/elastic/beats/v7/libbeat/autodiscover/providers/kubernetes" + "github.com/elastic/elastic-agent-autodiscover/kubernetes/metadata" + conf "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/mapstr" "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" k8s "k8s.io/client-go/kubernetes" - k8sfake "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/cache" + "testing" + "time" - kubernetes2 "github.com/elastic/beats/v7/libbeat/autodiscover/providers/kubernetes" "github.com/elastic/elastic-agent-autodiscover/kubernetes" - "github.com/elastic/elastic-agent-autodiscover/kubernetes/metadata" - conf "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/logp" - "github.com/elastic/elastic-agent-libs/mapstr" + "github.com/stretchr/testify/require" + k8sfake "k8s.io/client-go/kubernetes/fake" ) func TestWatchOptions(t *testing.T) { @@ -107,7 +103,7 @@ func TestCreateWatcher(t *testing.T) { resourceWatchers.lock.Unlock() } -func TestAddToWhichAreUsing(t *testing.T) { +func TestAddToMetricsetsUsing(t *testing.T) { resourceWatchers := NewWatchers() client := k8sfake.NewSimpleClientset() @@ -128,22 +124,24 @@ func TestAddToWhichAreUsing(t *testing.T) { resourceWatchers.lock.Lock() require.NotNil(t, resourceWatchers.watchersMap[DeploymentResource].watcher) - require.Nil(t, resourceWatchers.watchersMap[DeploymentResource].resourcesUsing) + require.Nil(t, resourceWatchers.watchersMap[DeploymentResource].metricsetsUsing) resourceWatchers.lock.Unlock() - addToResourcesUsing(DeploymentResource, DeploymentResource, resourceWatchers) + metricsetDeployment := "state_deployment" + addToMetricsetsUsing(DeploymentResource, metricsetDeployment, resourceWatchers) resourceWatchers.lock.Lock() - require.NotNil(t, resourceWatchers.watchersMap[DeploymentResource].resourcesUsing) - require.Equal(t, []string{DeploymentResource}, resourceWatchers.watchersMap[DeploymentResource].resourcesUsing) + require.NotNil(t, resourceWatchers.watchersMap[DeploymentResource].metricsetsUsing) + require.Equal(t, []string{metricsetDeployment}, resourceWatchers.watchersMap[DeploymentResource].metricsetsUsing) resourceWatchers.lock.Unlock() - addToResourcesUsing(DeploymentResource, PodResource, resourceWatchers) + metricsetContainer := "container" + addToMetricsetsUsing(DeploymentResource, metricsetContainer, resourceWatchers) resourceWatchers.lock.Lock() - require.Equal(t, []string{DeploymentResource, PodResource}, resourceWatchers.watchersMap[DeploymentResource].resourcesUsing) + require.Equal(t, []string{metricsetDeployment, metricsetContainer}, resourceWatchers.watchersMap[DeploymentResource].metricsetsUsing) resourceWatchers.lock.Unlock() } -func TestRemoveToWhichAreUsing(t *testing.T) { +func TestRemoveFromMetricsetsUsing(t *testing.T) { resourceWatchers := NewWatchers() client := k8sfake.NewSimpleClientset() @@ -162,21 +160,23 @@ func TestRemoveToWhichAreUsing(t *testing.T) { require.True(t, created) require.NoError(t, err) - addToResourcesUsing(DeploymentResource, DeploymentResource, resourceWatchers) - addToResourcesUsing(DeploymentResource, PodResource, resourceWatchers) + metricsetDeployment := "state_deployment" + metricsetPod := "state_pod" + addToMetricsetsUsing(DeploymentResource, metricsetDeployment, resourceWatchers) + addToMetricsetsUsing(DeploymentResource, metricsetPod, resourceWatchers) resourceWatchers.lock.Lock() defer resourceWatchers.lock.Unlock() - removed, size := removeFromResourcesUsing(DeploymentResource, DeploymentResource, resourceWatchers) + removed, size := removeFromMetricsetsUsing(DeploymentResource, metricsetDeployment, resourceWatchers) require.True(t, removed) require.Equal(t, 1, size) - removed, size = removeFromResourcesUsing(DeploymentResource, DeploymentResource, resourceWatchers) + removed, size = removeFromMetricsetsUsing(DeploymentResource, metricsetDeployment, resourceWatchers) require.False(t, removed) require.Equal(t, 1, size) - removed, size = removeFromResourcesUsing(DeploymentResource, PodResource, resourceWatchers) + removed, size = removeFromMetricsetsUsing(DeploymentResource, metricsetPod, resourceWatchers) require.True(t, removed) require.Equal(t, 0, size) } @@ -197,15 +197,16 @@ func TestCreateAllWatchers(t *testing.T) { log := logp.NewLogger("test") // Start watchers based on a resource that does not exist should cause an error - err := createAllWatchers(client, "does-not-exist", false, config, log, resourceWatchers) + err := createAllWatchers(client, "does-not-exist", "does-not-exist", false, config, log, resourceWatchers) require.Error(t, err) resourceWatchers.lock.Lock() require.Equal(t, 0, len(resourceWatchers.watchersMap)) resourceWatchers.lock.Unlock() // Start watcher for a resource that requires other resources, should start all the watchers + metricsetPod := "pod" extras := getExtraWatchers(PodResource, config) - err = createAllWatchers(client, PodResource, false, config, log, resourceWatchers) + err = createAllWatchers(client, metricsetPod, PodResource, false, config, log, resourceWatchers) require.NoError(t, err) // Check that all the required watchers are in the map @@ -242,7 +243,8 @@ func TestCreateMetaGen(t *testing.T) { require.Error(t, err) // Create the watchers necessary for the metadata generator - err = createAllWatchers(client, DeploymentResource, false, config, log, resourceWatchers) + metricsetDeployment := "state_deployment" + err = createAllWatchers(client, metricsetDeployment, DeploymentResource, false, config, log, resourceWatchers) require.NoError(t, err) // Create the generators, this time without error @@ -270,17 +272,17 @@ func TestCreateMetaGenSpecific(t *testing.T) { client := k8sfake.NewSimpleClientset() // For pod: + metricsetPod := "pod" _, err = createMetadataGenSpecific(client, commonConfig, config, PodResource, resourceWatchers) // At this point, no watchers were created require.Error(t, err) // Create the pod resource + the extras - err = createAllWatchers(client, PodResource, false, config, log, resourceWatchers) + err = createAllWatchers(client, metricsetPod, PodResource, false, config, log, resourceWatchers) require.NoError(t, err) _, err = createMetadataGenSpecific(client, commonConfig, config, PodResource, resourceWatchers) - // At this point, no watchers were created require.NoError(t, err) // For service: @@ -289,27 +291,30 @@ func TestCreateMetaGenSpecific(t *testing.T) { require.Error(t, err) // Create the service resource + the extras - err = createAllWatchers(client, ServiceResource, false, config, log, resourceWatchers) + metricsetService := "state_service" + err = createAllWatchers(client, metricsetService, ServiceResource, false, config, log, resourceWatchers) require.NoError(t, err) _, err = createMetadataGenSpecific(client, commonConfig, config, ServiceResource, resourceWatchers) - // At this point, no watchers were created require.NoError(t, err) } func TestBuildMetadataEnricher_Start_Stop(t *testing.T) { resourceWatchers := NewWatchers() + metricsetNamespace := "state_namespace" + metricsetDeployment := "state_deployment" + resourceWatchers.lock.Lock() resourceWatchers.watchersMap[NamespaceResource] = &watcherData{ - watcher: &mockWatcher{}, - started: true, - resourcesUsing: []string{NamespaceResource, DeploymentResource}, + watcher: &mockWatcher{}, + started: false, + metricsetsUsing: []string{metricsetNamespace, metricsetDeployment}, } resourceWatchers.watchersMap[DeploymentResource] = &watcherData{ - watcher: &mockWatcher{}, - started: true, - resourcesUsing: []string{DeploymentResource}, + watcher: &mockWatcher{}, + started: true, + metricsetsUsing: []string{metricsetDeployment}, } resourceWatchers.lock.Unlock() @@ -324,54 +329,113 @@ func TestBuildMetadataEnricher_Start_Stop(t *testing.T) { }, } - enricherNamespace := buildMetadataEnricher(NamespaceResource, resourceWatchers, config, funcs.update, funcs.delete, funcs.index) + enricherNamespace := buildMetadataEnricher(metricsetNamespace, NamespaceResource, resourceWatchers, config, + funcs.update, funcs.delete, funcs.index) resourceWatchers.lock.Lock() watcher := resourceWatchers.watchersMap[NamespaceResource] - // it was initialized with starting = true - require.True(t, watcher.started) + require.False(t, watcher.started) resourceWatchers.lock.Unlock() - // starting should not affect this result enricherNamespace.Start(resourceWatchers) resourceWatchers.lock.Lock() watcher = resourceWatchers.watchersMap[NamespaceResource] require.True(t, watcher.started) resourceWatchers.lock.Unlock() - // Stopping should not stop the watcher because it is still being used by DeploymentResource + // Stopping should not stop the watcher because it is still being used by deployment metricset enricherNamespace.Stop(resourceWatchers) resourceWatchers.lock.Lock() watcher = resourceWatchers.watchersMap[NamespaceResource] require.True(t, watcher.started) - require.Equal(t, []string{DeploymentResource}, watcher.resourcesUsing) + require.Equal(t, []string{metricsetDeployment}, watcher.metricsetsUsing) resourceWatchers.lock.Unlock() // Stopping the deployment watcher should stop now both watchers - enricherDeployment := buildMetadataEnricher(DeploymentResource, resourceWatchers, config, funcs.update, funcs.delete, funcs.index) + enricherDeployment := buildMetadataEnricher(metricsetDeployment, DeploymentResource, resourceWatchers, config, + funcs.update, funcs.delete, funcs.index) enricherDeployment.Stop(resourceWatchers) resourceWatchers.lock.Lock() watcher = resourceWatchers.watchersMap[NamespaceResource] require.False(t, watcher.started) - require.Equal(t, []string{}, watcher.resourcesUsing) + require.Equal(t, []string{}, watcher.metricsetsUsing) watcher = resourceWatchers.watchersMap[DeploymentResource] require.False(t, watcher.started) - require.Equal(t, []string{}, watcher.resourcesUsing) + require.Equal(t, []string{}, watcher.metricsetsUsing) + + resourceWatchers.lock.Unlock() +} + +func TestBuildMetadataEnricher_Start_Stop_SameResources(t *testing.T) { + resourceWatchers := NewWatchers() + + metricsetPod := "pod" + metricsetStatePod := "state_pod" + + resourceWatchers.lock.Lock() + resourceWatchers.watchersMap[PodResource] = &watcherData{ + watcher: &mockWatcher{}, + started: false, + metricsetsUsing: []string{metricsetStatePod, metricsetPod}, + } resourceWatchers.lock.Unlock() + funcs := mockFuncs{} + config := &kubernetesConfig{ + Namespace: "test-ns", + SyncPeriod: time.Minute, + Node: "test-node", + AddResourceMetadata: &metadata.AddResourceMetadataConfig{ + CronJob: false, + Deployment: false, + }, + } + + enricherPod := buildMetadataEnricher(metricsetPod, PodResource, resourceWatchers, config, + funcs.update, funcs.delete, funcs.index) + resourceWatchers.lock.Lock() + watcher := resourceWatchers.watchersMap[PodResource] + require.False(t, watcher.started) + resourceWatchers.lock.Unlock() + + enricherPod.Start(resourceWatchers) + resourceWatchers.lock.Lock() + watcher = resourceWatchers.watchersMap[PodResource] + require.True(t, watcher.started) + resourceWatchers.lock.Unlock() + + // Stopping should not stop the watcher because it is still being used by state_pod metricset + enricherPod.Stop(resourceWatchers) + resourceWatchers.lock.Lock() + watcher = resourceWatchers.watchersMap[PodResource] + require.True(t, watcher.started) + require.Equal(t, []string{metricsetStatePod}, watcher.metricsetsUsing) + resourceWatchers.lock.Unlock() + + // Stopping the state_pod watcher should stop pod watcher + enricherStatePod := buildMetadataEnricher(metricsetStatePod, PodResource, resourceWatchers, config, + funcs.update, funcs.delete, funcs.index) + enricherStatePod.Stop(resourceWatchers) + + resourceWatchers.lock.Lock() + watcher = resourceWatchers.watchersMap[PodResource] + require.False(t, watcher.started) + require.Equal(t, []string{}, watcher.metricsetsUsing) + resourceWatchers.lock.Unlock() } +/* func TestBuildMetadataEnricher_EventHandler(t *testing.T) { resourceWatchers := NewWatchers() resourceWatchers.lock.Lock() resourceWatchers.watchersMap[PodResource] = &watcherData{ - watcher: &mockWatcher{}, - started: false, - resourcesUsing: []string{PodResource}, + watcher: &mockWatcher{}, + started: false, + metricsetsUsing: []string{PodResource}, } resourceWatchers.lock.Unlock() @@ -478,6 +542,7 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { require.False(t, watcher.started) resourceWatchers.lock.Unlock() } +*/ type mockFuncs struct { updated kubernetes.Resource From 809a442caafc4846f22c094454f035a5a5350eae Mon Sep 17 00:00:00 2001 From: constanca Date: Fri, 26 Jan 2024 15:19:33 +0100 Subject: [PATCH 09/61] goimports Signed-off-by: constanca --- metricbeat/module/kubernetes/util/kubernetes_test.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/metricbeat/module/kubernetes/util/kubernetes_test.go b/metricbeat/module/kubernetes/util/kubernetes_test.go index d516103b33e..f0cf9445c0f 100644 --- a/metricbeat/module/kubernetes/util/kubernetes_test.go +++ b/metricbeat/module/kubernetes/util/kubernetes_test.go @@ -19,16 +19,17 @@ package util import ( "fmt" + "testing" + "time" + kubernetes2 "github.com/elastic/beats/v7/libbeat/autodiscover/providers/kubernetes" "github.com/elastic/elastic-agent-autodiscover/kubernetes/metadata" conf "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/mapstr" "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" k8s "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" - "testing" - "time" - "github.com/elastic/elastic-agent-autodiscover/kubernetes" "github.com/elastic/elastic-agent-libs/logp" From b76e38d85a2f60c7727d759ea93a28303071f874 Mon Sep 17 00:00:00 2001 From: constanca Date: Fri, 26 Jan 2024 15:31:24 +0100 Subject: [PATCH 10/61] run mage check and make update Signed-off-by: constanca --- .../module/kubernetes/util/kubernetes_test.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/metricbeat/module/kubernetes/util/kubernetes_test.go b/metricbeat/module/kubernetes/util/kubernetes_test.go index f0cf9445c0f..4e6b5add6af 100644 --- a/metricbeat/module/kubernetes/util/kubernetes_test.go +++ b/metricbeat/module/kubernetes/util/kubernetes_test.go @@ -22,19 +22,21 @@ import ( "testing" "time" - kubernetes2 "github.com/elastic/beats/v7/libbeat/autodiscover/providers/kubernetes" - "github.com/elastic/elastic-agent-autodiscover/kubernetes/metadata" - conf "github.com/elastic/elastic-agent-libs/config" - "github.com/elastic/elastic-agent-libs/mapstr" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" k8s "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" - "github.com/elastic/elastic-agent-autodiscover/kubernetes" - "github.com/elastic/elastic-agent-libs/logp" + kubernetes2 "github.com/elastic/beats/v7/libbeat/autodiscover/providers/kubernetes" + "github.com/elastic/elastic-agent-autodiscover/kubernetes/metadata" + conf "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/mapstr" + "github.com/stretchr/testify/require" k8sfake "k8s.io/client-go/kubernetes/fake" + + "github.com/elastic/elastic-agent-autodiscover/kubernetes" + "github.com/elastic/elastic-agent-libs/logp" ) func TestWatchOptions(t *testing.T) { From 4cc8a47ca891b64c66a50ddb0ed6149c7e0e5d6e Mon Sep 17 00:00:00 2001 From: constanca Date: Mon, 29 Jan 2024 10:47:02 +0100 Subject: [PATCH 11/61] Pass add resource metadata instead of whole config. Signed-off-by: constanca --- .../module/kubernetes/util/kubernetes.go | 34 +++++++++++-------- .../module/kubernetes/util/kubernetes_test.go | 14 ++++---- 2 files changed, 26 insertions(+), 22 deletions(-) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index c04eae3ca68..c8da89e9873 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -157,7 +157,7 @@ func getResource(resourceName string) kubernetes.Resource { // getExtraWatchers returns a list of the extra resources to watch based on some resource. // The full list can be seen in https://github.com/elastic/beats/issues/37243, at Expected Watchers section. -func getExtraWatchers(resourceName string, config *kubernetesConfig) []string { +func getExtraWatchers(resourceName string, addResourceMetadata *metadata.AddResourceMetadataConfig) []string { switch resourceName { case PodResource: extra := []string{NamespaceResource, NodeResource} @@ -165,10 +165,10 @@ func getExtraWatchers(resourceName string, config *kubernetesConfig) []string { // in order to be able to retrieve 2nd layer Owner metadata like in case of: // Deployment -> Replicaset -> Pod // CronJob -> job -> Pod - if config.AddResourceMetadata != nil && config.AddResourceMetadata.Deployment { + if addResourceMetadata != nil && addResourceMetadata.Deployment { extra = append(extra, ReplicaSetResource) } - if config.AddResourceMetadata != nil && config.AddResourceMetadata.CronJob { + if addResourceMetadata != nil && addResourceMetadata.CronJob { extra = append(extra, JobResource) } return extra @@ -334,7 +334,7 @@ func createAllWatchers( addToMetricsetsUsing(resourceName, metricsetName, resourceWatchers) // Create the extra watchers required by this resource - extraWatchers := getExtraWatchers(resourceName, config) + extraWatchers := getExtraWatchers(resourceName, config.AddResourceMetadata) for _, extra := range extraWatchers { extraRes := getResource(extra) if extraRes != nil { @@ -357,13 +357,15 @@ func createAllWatchers( } // createMetadataGen creates the metadata generator for resources in general -func createMetadataGen(client k8sclient.Interface, commonConfig *conf.C, config *kubernetesConfig, resourceName string, resourceWatchers *Watchers) (*metadata.Resource, error) { +func createMetadataGen(client k8sclient.Interface, commonConfig *conf.C, addResourceMetadata *metadata.AddResourceMetadataConfig, + resourceName string, resourceWatchers *Watchers) (*metadata.Resource, error) { + // check if the resource is namespace aware namespaceAware := false if resourceName == NamespaceResource { namespaceAware = true } else { - extras := getExtraWatchers(resourceName, config) + extras := getExtraWatchers(resourceName, addResourceMetadata) for _, extra := range extras { if extra == NamespaceResource { namespaceAware = true @@ -389,7 +391,7 @@ func createMetadataGen(client k8sclient.Interface, commonConfig *conf.C, config return nil, fmt.Errorf("could not create the metadata generator, as the watcher for namespace does not exist") } - n := metadata.NewNamespaceMetadataGenerator(config.AddResourceMetadata.Namespace, + n := metadata.NewNamespaceMetadataGenerator(addResourceMetadata.Namespace, (*namespaceWatcher).watcher.Store(), client) metaGen = metadata.NewNamespaceAwareResourceMetadataGenerator(commonConfig, client, n) } else { @@ -400,7 +402,9 @@ func createMetadataGen(client k8sclient.Interface, commonConfig *conf.C, config } // createMetadataGenSpecific creates the metadata generator for a specific resource - pod or service -func createMetadataGenSpecific(client k8sclient.Interface, commonConfig *conf.C, config *kubernetesConfig, resourceName string, resourceWatchers *Watchers) (metadata.MetaGen, error) { +func createMetadataGenSpecific(client k8sclient.Interface, commonConfig *conf.C, addResourceMetadata *metadata.AddResourceMetadataConfig, + resourceName string, resourceWatchers *Watchers) (metadata.MetaGen, error) { + resourceWatchers.lock.RLock() defer resourceWatchers.lock.RUnlock() @@ -430,14 +434,14 @@ func createMetadataGenSpecific(client k8sclient.Interface, commonConfig *conf.C, } metaGen = metadata.GetPodMetaGen(commonConfig, (*resWatcher).watcher, nodeWatcher, namespaceWatcher, replicaSetWatcher, - jobWatcher, config.AddResourceMetadata) + jobWatcher, addResourceMetadata) return metaGen, nil } else if resourceName == ServiceResource { namespaceWatcher := resourceWatchers.watchersMap[NamespaceResource] if namespaceWatcher == nil { return nil, fmt.Errorf("could not create the metadata generator, as the watcher for namespace does not exist") } - namespaceMeta := metadata.NewNamespaceMetadataGenerator(config.AddResourceMetadata.Namespace, + namespaceMeta := metadata.NewNamespaceMetadataGenerator(addResourceMetadata.Namespace, (*namespaceWatcher).watcher.Store(), client) metaGen = metadata.NewServiceMetadataGenerator(commonConfig, (*resWatcher).watcher.Store(), namespaceMeta, client) @@ -487,9 +491,9 @@ func NewResourceMetadataEnricher( var specificMetaGen metadata.MetaGen var generalMetaGen *metadata.Resource if resourceName == ServiceResource || resourceName == PodResource { - specificMetaGen, err = createMetadataGenSpecific(client, commonConfig, config, resourceName, resourceWatchers) + specificMetaGen, err = createMetadataGenSpecific(client, commonConfig, config.AddResourceMetadata, resourceName, resourceWatchers) } else { - generalMetaGen, err = createMetadataGen(client, commonConfig, config, resourceName, resourceWatchers) + generalMetaGen, err = createMetadataGen(client, commonConfig, config.AddResourceMetadata, resourceName, resourceWatchers) } if err != nil { log.Errorf("Error trying to create the metadata generators: %s", err) @@ -611,7 +615,7 @@ func NewContainerMetadataEnricher( return &nilEnricher{} } - metaGen, err := createMetadataGenSpecific(client, commonConfig, config, PodResource, resourceWatchers) + metaGen, err := createMetadataGenSpecific(client, commonConfig, config.AddResourceMetadata, PodResource, resourceWatchers) if err != nil { log.Errorf("Error trying to create the metadata generators: %s", err) return &nilEnricher{} @@ -803,7 +807,7 @@ func (e *enricher) Start(resourceWatchers *Watchers) { } } - extras := getExtraWatchers(e.resourceName, e.config) + extras := getExtraWatchers(e.resourceName, e.config.AddResourceMetadata) for _, extra := range extras { extraWatcher := resourceWatchers.watchersMap[extra] if extraWatcher != nil && !extraWatcher.started { @@ -829,7 +833,7 @@ func (e *enricher) Stop(resourceWatchers *Watchers) { } } - extras := getExtraWatchers(e.resourceName, e.config) + extras := getExtraWatchers(e.resourceName, e.config.AddResourceMetadata) for _, extra := range extras { extraWatcher := resourceWatchers.watchersMap[extra] if extraWatcher != nil && extraWatcher.started { diff --git a/metricbeat/module/kubernetes/util/kubernetes_test.go b/metricbeat/module/kubernetes/util/kubernetes_test.go index 4e6b5add6af..8657e66918a 100644 --- a/metricbeat/module/kubernetes/util/kubernetes_test.go +++ b/metricbeat/module/kubernetes/util/kubernetes_test.go @@ -209,7 +209,7 @@ func TestCreateAllWatchers(t *testing.T) { // Start watcher for a resource that requires other resources, should start all the watchers metricsetPod := "pod" - extras := getExtraWatchers(PodResource, config) + extras := getExtraWatchers(PodResource, config.AddResourceMetadata) err = createAllWatchers(client, metricsetPod, PodResource, false, config, log, resourceWatchers) require.NoError(t, err) @@ -242,7 +242,7 @@ func TestCreateMetaGen(t *testing.T) { } client := k8sfake.NewSimpleClientset() - _, err = createMetadataGen(client, commonConfig, config, DeploymentResource, resourceWatchers) + _, err = createMetadataGen(client, commonConfig, config.AddResourceMetadata, DeploymentResource, resourceWatchers) // At this point, no watchers were created require.Error(t, err) @@ -252,7 +252,7 @@ func TestCreateMetaGen(t *testing.T) { require.NoError(t, err) // Create the generators, this time without error - _, err = createMetadataGen(client, commonConfig, config, DeploymentResource, resourceWatchers) + _, err = createMetadataGen(client, commonConfig, config.AddResourceMetadata, DeploymentResource, resourceWatchers) require.NoError(t, err) } @@ -278,7 +278,7 @@ func TestCreateMetaGenSpecific(t *testing.T) { // For pod: metricsetPod := "pod" - _, err = createMetadataGenSpecific(client, commonConfig, config, PodResource, resourceWatchers) + _, err = createMetadataGenSpecific(client, commonConfig, config.AddResourceMetadata, PodResource, resourceWatchers) // At this point, no watchers were created require.Error(t, err) @@ -286,11 +286,11 @@ func TestCreateMetaGenSpecific(t *testing.T) { err = createAllWatchers(client, metricsetPod, PodResource, false, config, log, resourceWatchers) require.NoError(t, err) - _, err = createMetadataGenSpecific(client, commonConfig, config, PodResource, resourceWatchers) + _, err = createMetadataGenSpecific(client, commonConfig, config.AddResourceMetadata, PodResource, resourceWatchers) require.NoError(t, err) // For service: - _, err = createMetadataGenSpecific(client, commonConfig, config, ServiceResource, resourceWatchers) + _, err = createMetadataGenSpecific(client, commonConfig, config.AddResourceMetadata, ServiceResource, resourceWatchers) // At this point, no watchers were created require.Error(t, err) @@ -299,7 +299,7 @@ func TestCreateMetaGenSpecific(t *testing.T) { err = createAllWatchers(client, metricsetService, ServiceResource, false, config, log, resourceWatchers) require.NoError(t, err) - _, err = createMetadataGenSpecific(client, commonConfig, config, ServiceResource, resourceWatchers) + _, err = createMetadataGenSpecific(client, commonConfig, config.AddResourceMetadata, ServiceResource, resourceWatchers) require.NoError(t, err) } From ab475d13c0ab797136eb6892aea59b5dc62b702a Mon Sep 17 00:00:00 2001 From: constanca Date: Mon, 29 Jan 2024 13:12:54 +0100 Subject: [PATCH 12/61] Refactor watch options to include namespace only when necessary Signed-off-by: constanca --- .../module/kubernetes/util/kubernetes.go | 49 +++++++------------ .../module/kubernetes/util/kubernetes_test.go | 12 ++--- 2 files changed, 22 insertions(+), 39 deletions(-) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index c8da89e9873..bd195bab050 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -217,7 +217,6 @@ func getWatchOptions(config *kubernetesConfig, nodeScope bool, client k8sclient. var err error options := kubernetes.WatchOptions{ SyncTimeout: config.SyncPeriod, - Namespace: config.Namespace, } // Watch objects in the node only @@ -236,13 +235,22 @@ func getWatchOptions(config *kubernetesConfig, nodeScope bool, client k8sclient. return &options, err } +func isNamespaced(resourceName string) bool { + if resourceName == NodeResource || resourceName == PersistentVolumeResource || resourceName == StorageClassResource || + resourceName == NamespaceResource { + return false + } + return true +} + // createWatcher creates a watcher for a specific resource func createWatcher( resourceName string, resource kubernetes.Resource, options kubernetes.WatchOptions, client k8sclient.Interface, - resourceWatchers *Watchers) (bool, error) { + resourceWatchers *Watchers, + namespace string) (bool, error) { resourceWatchers.lock.Lock() defer resourceWatchers.lock.Unlock() @@ -250,6 +258,10 @@ func createWatcher( _, ok := resourceWatchers.watchersMap[resourceName] // if it does not exist, create the watcher if !ok { + // check if we need to add namespace to the options + if isNamespaced(resourceName) { + options.Namespace = namespace + } watcher, err := kubernetes.NewNamedWatcher(resourceName, client, resource, options, nil) if err != nil { return false, err @@ -325,7 +337,7 @@ func createAllWatchers( // Create a watcher for the given resource. // If it fails, we return an error, so we can stop the extra watchers from creating. - created, err := createWatcher(resourceName, res, *options, client, resourceWatchers) + created, err := createWatcher(resourceName, res, *options, client, resourceWatchers, config.Namespace) if err != nil { return fmt.Errorf("error initializing Kubernetes watcher %s, required by %s: %w", resourceName, metricsetName, err) } else if created { @@ -338,7 +350,7 @@ func createAllWatchers( for _, extra := range extraWatchers { extraRes := getResource(extra) if extraRes != nil { - created, err = createWatcher(extra, extraRes, *options, client, resourceWatchers) + created, err = createWatcher(extra, extraRes, *options, client, resourceWatchers, config.Namespace) if err != nil { log.Errorf("Error initializing Kubernetes watcher %s, required by %s: %s", extra, metricsetName, err) } else { @@ -360,20 +372,6 @@ func createAllWatchers( func createMetadataGen(client k8sclient.Interface, commonConfig *conf.C, addResourceMetadata *metadata.AddResourceMetadataConfig, resourceName string, resourceWatchers *Watchers) (*metadata.Resource, error) { - // check if the resource is namespace aware - namespaceAware := false - if resourceName == NamespaceResource { - namespaceAware = true - } else { - extras := getExtraWatchers(resourceName, addResourceMetadata) - for _, extra := range extras { - if extra == NamespaceResource { - namespaceAware = true - break - } - } - } - resourceWatchers.lock.RLock() defer resourceWatchers.lock.RUnlock() @@ -383,20 +381,7 @@ func createMetadataGen(client k8sclient.Interface, commonConfig *conf.C, addReso return nil, fmt.Errorf("could not create the metadata generator, as the watcher for %s does not exist", resourceName) } - var metaGen *metadata.Resource - if namespaceAware { - namespaceWatcher := resourceWatchers.watchersMap[NamespaceResource] - - if namespaceWatcher == nil { - return nil, fmt.Errorf("could not create the metadata generator, as the watcher for namespace does not exist") - } - - n := metadata.NewNamespaceMetadataGenerator(addResourceMetadata.Namespace, - (*namespaceWatcher).watcher.Store(), client) - metaGen = metadata.NewNamespaceAwareResourceMetadataGenerator(commonConfig, client, n) - } else { - metaGen = metadata.NewResourceMetadataGenerator(commonConfig, client) - } + metaGen := metadata.NewResourceMetadataGenerator(commonConfig, client) return metaGen, nil } diff --git a/metricbeat/module/kubernetes/util/kubernetes_test.go b/metricbeat/module/kubernetes/util/kubernetes_test.go index 8657e66918a..b56ecee4344 100644 --- a/metricbeat/module/kubernetes/util/kubernetes_test.go +++ b/metricbeat/module/kubernetes/util/kubernetes_test.go @@ -52,13 +52,11 @@ func TestWatchOptions(t *testing.T) { options, err := getWatchOptions(config, false, client, log) require.NoError(t, err) require.Equal(t, options.SyncTimeout, config.SyncPeriod) - require.Equal(t, options.Namespace, config.Namespace) require.NotEqual(t, options.Node, config.Node) options, err = getWatchOptions(config, true, client, log) require.NoError(t, err) require.Equal(t, options.SyncTimeout, config.SyncPeriod) - require.Equal(t, options.Namespace, config.Namespace) require.Equal(t, options.Node, config.Node) } @@ -76,7 +74,7 @@ func TestCreateWatcher(t *testing.T) { options, err := getWatchOptions(config, false, client, log) require.NoError(t, err) - created, err := createWatcher(NamespaceResource, &kubernetes.Node{}, *options, client, resourceWatchers) + created, err := createWatcher(NamespaceResource, &kubernetes.Node{}, *options, client, resourceWatchers, config.Namespace) require.True(t, created) require.NoError(t, err) @@ -86,7 +84,7 @@ func TestCreateWatcher(t *testing.T) { require.NotNil(t, resourceWatchers.watchersMap[NamespaceResource].watcher) resourceWatchers.lock.Unlock() - created, err = createWatcher(NamespaceResource, &kubernetes.Namespace{}, *options, client, resourceWatchers) + created, err = createWatcher(NamespaceResource, &kubernetes.Namespace{}, *options, client, resourceWatchers, config.Namespace) require.False(t, created) require.NoError(t, err) @@ -96,7 +94,7 @@ func TestCreateWatcher(t *testing.T) { require.NotNil(t, resourceWatchers.watchersMap[NamespaceResource].watcher) resourceWatchers.lock.Unlock() - created, err = createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers) + created, err = createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers, config.Namespace) require.True(t, created) require.NoError(t, err) @@ -122,7 +120,7 @@ func TestAddToMetricsetsUsing(t *testing.T) { require.NoError(t, err) // Create the new entry with watcher and nil string array first - created, err := createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers) + created, err := createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers, config.Namespace) require.True(t, created) require.NoError(t, err) @@ -160,7 +158,7 @@ func TestRemoveFromMetricsetsUsing(t *testing.T) { require.NoError(t, err) // Create the new entry with watcher and nil string array first - created, err := createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers) + created, err := createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers, config.Namespace) require.True(t, created) require.NoError(t, err) From eac4bde29c80f85fdd8e242094d25890cc193da7 Mon Sep 17 00:00:00 2001 From: constanca Date: Mon, 29 Jan 2024 15:41:48 +0100 Subject: [PATCH 13/61] Revert createMetaGen - removed namespace Signed-off-by: constanca --- metricbeat/module/kubernetes/util/kubernetes.go | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index bd195bab050..7ee353532cf 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -372,6 +372,8 @@ func createAllWatchers( func createMetadataGen(client k8sclient.Interface, commonConfig *conf.C, addResourceMetadata *metadata.AddResourceMetadataConfig, resourceName string, resourceWatchers *Watchers) (*metadata.Resource, error) { + namespaceAware := isNamespaced(resourceName) + resourceWatchers.lock.RLock() defer resourceWatchers.lock.RUnlock() @@ -381,7 +383,20 @@ func createMetadataGen(client k8sclient.Interface, commonConfig *conf.C, addReso return nil, fmt.Errorf("could not create the metadata generator, as the watcher for %s does not exist", resourceName) } - metaGen := metadata.NewResourceMetadataGenerator(commonConfig, client) + var metaGen *metadata.Resource + if namespaceAware { + namespaceWatcher := resourceWatchers.watchersMap[NamespaceResource] + + if namespaceWatcher == nil { + return nil, fmt.Errorf("could not create the metadata generator, as the watcher for namespace does not exist") + } + + n := metadata.NewNamespaceMetadataGenerator(addResourceMetadata.Namespace, + (*namespaceWatcher).watcher.Store(), client) + metaGen = metadata.NewNamespaceAwareResourceMetadataGenerator(commonConfig, client, n) + } else { + metaGen = metadata.NewResourceMetadataGenerator(commonConfig, client) + } return metaGen, nil } From 13e33a001a4e6761ebdeee5c0f66a75a66b8c6ac Mon Sep 17 00:00:00 2001 From: constanca Date: Mon, 29 Jan 2024 15:43:54 +0100 Subject: [PATCH 14/61] Revert createMetaGen - removed namespace Signed-off-by: constanca --- metricbeat/module/kubernetes/util/kubernetes.go | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 7ee353532cf..8b6f88d8a81 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -372,8 +372,6 @@ func createAllWatchers( func createMetadataGen(client k8sclient.Interface, commonConfig *conf.C, addResourceMetadata *metadata.AddResourceMetadataConfig, resourceName string, resourceWatchers *Watchers) (*metadata.Resource, error) { - namespaceAware := isNamespaced(resourceName) - resourceWatchers.lock.RLock() defer resourceWatchers.lock.RUnlock() @@ -384,13 +382,9 @@ func createMetadataGen(client k8sclient.Interface, commonConfig *conf.C, addReso } var metaGen *metadata.Resource - if namespaceAware { - namespaceWatcher := resourceWatchers.watchersMap[NamespaceResource] - - if namespaceWatcher == nil { - return nil, fmt.Errorf("could not create the metadata generator, as the watcher for namespace does not exist") - } + namespaceWatcher := resourceWatchers.watchersMap[NamespaceResource] + if namespaceWatcher != nil { n := metadata.NewNamespaceMetadataGenerator(addResourceMetadata.Namespace, (*namespaceWatcher).watcher.Store(), client) metaGen = metadata.NewNamespaceAwareResourceMetadataGenerator(commonConfig, client, n) From 688c406d9cdf6e897aa43c84fe3ebd99d5381642 Mon Sep 17 00:00:00 2001 From: constanca Date: Tue, 30 Jan 2024 17:38:03 +0100 Subject: [PATCH 15/61] remove log library Signed-off-by: constanca --- metricbeat/module/kubernetes/util/kubernetes.go | 14 ++++++++------ .../module/kubernetes/util/kubernetes_test.go | 11 +++++++---- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 8b6f88d8a81..619564e8be5 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -24,7 +24,6 @@ import ( "sync" "time" - "gotest.tools/gotestsum/log" k8sclient "k8s.io/client-go/kubernetes" "k8s.io/apimachinery/pkg/api/meta" @@ -73,6 +72,7 @@ type enricher struct { resourceName string isPod bool config *kubernetesConfig + log *logp.Logger } type nilEnricher struct{} @@ -564,7 +564,7 @@ func NewResourceMetadataEnricher( return join(getString(e, mb.ModuleDataKey+".namespace"), getString(e, "name")) } - enricher := buildMetadataEnricher(metricsetName, resourceName, resourceWatchers, config, updateFunc, deleteFunc, indexFunc) + enricher := buildMetadataEnricher(metricsetName, resourceName, resourceWatchers, config, updateFunc, deleteFunc, indexFunc, log) if resourceName == PodResource { enricher.isPod = true } @@ -689,7 +689,7 @@ func NewContainerMetadataEnricher( return join(getString(e, mb.ModuleDataKey+".namespace"), getString(e, mb.ModuleDataKey+".pod.name"), getString(e, "name")) } - enricher := buildMetadataEnricher(metricsetName, PodResource, resourceWatchers, config, updateFunc, deleteFunc, indexFunc) + enricher := buildMetadataEnricher(metricsetName, PodResource, resourceWatchers, config, updateFunc, deleteFunc, indexFunc, log) return enricher } @@ -751,7 +751,8 @@ func buildMetadataEnricher( config *kubernetesConfig, update func(map[string]mapstr.M, kubernetes.Resource), delete func(map[string]mapstr.M, kubernetes.Resource), - index func(e mapstr.M) string) *enricher { + index func(e mapstr.M) string, + log *logp.Logger) *enricher { enricher := enricher{ metadata: map[string]mapstr.M{}, @@ -759,6 +760,7 @@ func buildMetadataEnricher( resourceName: resourceName, metricsetName: metricsetName, config: config, + log: log, } resourceWatchers.lock.Lock() @@ -795,7 +797,7 @@ func (e *enricher) Start(resourceWatchers *Watchers) { resourceWatcher := resourceWatchers.watchersMap[e.resourceName] if resourceWatcher != nil && !resourceWatcher.started { if err := resourceWatcher.watcher.Start(); err != nil { - log.Warnf("Error starting %s watcher: %s", e.resourceName, err) + e.log.Warnf("Error starting %s watcher: %s", e.resourceName, err) } else { resourceWatcher.started = true } @@ -806,7 +808,7 @@ func (e *enricher) Start(resourceWatchers *Watchers) { extraWatcher := resourceWatchers.watchersMap[extra] if extraWatcher != nil && !extraWatcher.started { if err := extraWatcher.watcher.Start(); err != nil { - log.Warnf("Error starting %s watcher: %s", extra, err) + e.log.Warnf("Error starting %s watcher: %s", extra, err) } else { extraWatcher.started = true } diff --git a/metricbeat/module/kubernetes/util/kubernetes_test.go b/metricbeat/module/kubernetes/util/kubernetes_test.go index b56ecee4344..2c56b47da8a 100644 --- a/metricbeat/module/kubernetes/util/kubernetes_test.go +++ b/metricbeat/module/kubernetes/util/kubernetes_test.go @@ -331,8 +331,10 @@ func TestBuildMetadataEnricher_Start_Stop(t *testing.T) { }, } + log := logp.NewLogger(selector) + enricherNamespace := buildMetadataEnricher(metricsetNamespace, NamespaceResource, resourceWatchers, config, - funcs.update, funcs.delete, funcs.index) + funcs.update, funcs.delete, funcs.index, log) resourceWatchers.lock.Lock() watcher := resourceWatchers.watchersMap[NamespaceResource] require.False(t, watcher.started) @@ -354,7 +356,7 @@ func TestBuildMetadataEnricher_Start_Stop(t *testing.T) { // Stopping the deployment watcher should stop now both watchers enricherDeployment := buildMetadataEnricher(metricsetDeployment, DeploymentResource, resourceWatchers, config, - funcs.update, funcs.delete, funcs.index) + funcs.update, funcs.delete, funcs.index, log) enricherDeployment.Stop(resourceWatchers) resourceWatchers.lock.Lock() @@ -396,8 +398,9 @@ func TestBuildMetadataEnricher_Start_Stop_SameResources(t *testing.T) { }, } + log := logp.NewLogger(selector) enricherPod := buildMetadataEnricher(metricsetPod, PodResource, resourceWatchers, config, - funcs.update, funcs.delete, funcs.index) + funcs.update, funcs.delete, funcs.index, log) resourceWatchers.lock.Lock() watcher := resourceWatchers.watchersMap[PodResource] require.False(t, watcher.started) @@ -419,7 +422,7 @@ func TestBuildMetadataEnricher_Start_Stop_SameResources(t *testing.T) { // Stopping the state_pod watcher should stop pod watcher enricherStatePod := buildMetadataEnricher(metricsetStatePod, PodResource, resourceWatchers, config, - funcs.update, funcs.delete, funcs.index) + funcs.update, funcs.delete, funcs.index, log) enricherStatePod.Stop(resourceWatchers) resourceWatchers.lock.Lock() From e9783e01efdc6dc8ff52dd30461ad84584929c17 Mon Sep 17 00:00:00 2001 From: constanca Date: Fri, 2 Feb 2024 12:19:09 +0100 Subject: [PATCH 16/61] add enrichers to watchers. Signed-off-by: constanca --- .../helper/kubernetes/state_metricset.go | 5 --- .../module/kubernetes/util/kubernetes.go | 37 ++++++++++++------- .../module/kubernetes/util/kubernetes_test.go | 11 ++++-- 3 files changed, 31 insertions(+), 22 deletions(-) diff --git a/metricbeat/helper/kubernetes/state_metricset.go b/metricbeat/helper/kubernetes/state_metricset.go index 7bfeec344fc..aad813e0099 100644 --- a/metricbeat/helper/kubernetes/state_metricset.go +++ b/metricbeat/helper/kubernetes/state_metricset.go @@ -77,11 +77,6 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { mapping := mappings[base.Name()] lock.Unlock() - //resourceName := base.Name() - //if resourceName != util.NamespaceResource { - // resourceName = strings.ReplaceAll(resourceName, prefix, "") - //} - return &MetricSet{ BaseMetricSet: base, prometheusClient: prometheusClient, diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 619564e8be5..fc608ec4b05 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -86,8 +86,11 @@ type watcherData struct { // metricsets are used instead of resource names to avoid conflicts between // state_pod / pod, state_node / node, state_container / container metricsetsUsing []string - watcher kubernetes.Watcher - started bool // true if watcher has started, false otherwise + + watcher kubernetes.Watcher + started bool // true if watcher has started, false otherwise + + enrichers []*enricher // list of enrichers using this watcher } type Watchers struct { @@ -519,7 +522,6 @@ func NewResourceMetadataEnricher( nodeStore.SetNodeMetrics(metrics) m[id] = generalMetaGen.Generate(NodeResource, r) - case *kubernetes.Deployment: m[id] = generalMetaGen.Generate(DeploymentResource, r) case *kubernetes.Job: @@ -754,7 +756,7 @@ func buildMetadataEnricher( index func(e mapstr.M) string, log *logp.Logger) *enricher { - enricher := enricher{ + enricher := &enricher{ metadata: map[string]mapstr.M{}, index: index, resourceName: resourceName, @@ -768,26 +770,33 @@ func buildMetadataEnricher( watcher := resourceWatchers.watchersMap[resourceName] if watcher != nil { + watcher.enrichers = append(watcher.enrichers, enricher) watcher.watcher.AddEventHandler(kubernetes.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { - enricher.Lock() - defer enricher.Unlock() - update(enricher.metadata, obj.(kubernetes.Resource)) + for _, enricher := range watcher.enrichers { + enricher.Lock() + update(enricher.metadata, obj.(kubernetes.Resource)) + enricher.Unlock() + } }, UpdateFunc: func(obj interface{}) { - enricher.Lock() - defer enricher.Unlock() - update(enricher.metadata, obj.(kubernetes.Resource)) + for _, enricher := range watcher.enrichers { + enricher.Lock() + update(enricher.metadata, obj.(kubernetes.Resource)) + enricher.Unlock() + } }, DeleteFunc: func(obj interface{}) { - enricher.Lock() - defer enricher.Unlock() - delete(enricher.metadata, obj.(kubernetes.Resource)) + for _, enricher := range watcher.enrichers { + enricher.Lock() + delete(enricher.metadata, obj.(kubernetes.Resource)) + enricher.Unlock() + } }, }) } - return &enricher + return enricher } func (e *enricher) Start(resourceWatchers *Watchers) { diff --git a/metricbeat/module/kubernetes/util/kubernetes_test.go b/metricbeat/module/kubernetes/util/kubernetes_test.go index 2c56b47da8a..0e926e9d025 100644 --- a/metricbeat/module/kubernetes/util/kubernetes_test.go +++ b/metricbeat/module/kubernetes/util/kubernetes_test.go @@ -22,6 +22,10 @@ import ( "testing" "time" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" k8s "k8s.io/client-go/kubernetes" @@ -432,7 +436,6 @@ func TestBuildMetadataEnricher_Start_Stop_SameResources(t *testing.T) { resourceWatchers.lock.Unlock() } -/* func TestBuildMetadataEnricher_EventHandler(t *testing.T) { resourceWatchers := NewWatchers() @@ -466,7 +469,10 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { }, } - enricher := buildMetadataEnricher(PodResource, resourceWatchers, config, funcs.update, funcs.delete, funcs.index) + metricset := "pod" + log := logp.NewLogger(selector) + + enricher := buildMetadataEnricher(metricset, PodResource, resourceWatchers, config, funcs.update, funcs.delete, funcs.index, log) resourceWatchers.lock.Lock() wData := resourceWatchers.watchersMap[PodResource] mockW := wData.watcher.(*mockWatcher) @@ -547,7 +553,6 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { require.False(t, watcher.started) resourceWatchers.lock.Unlock() } -*/ type mockFuncs struct { updated kubernetes.Resource From 8e66700b0d185534a3181eb535335373eb8f6f0b Mon Sep 17 00:00:00 2001 From: constanca Date: Mon, 5 Feb 2024 16:07:46 +0100 Subject: [PATCH 17/61] Add metadataEvents to watcher Signed-off-by: constanca --- .../module/kubernetes/util/kubernetes.go | 113 +++++++++++++----- .../module/kubernetes/util/kubernetes_test.go | 35 ++++-- 2 files changed, 108 insertions(+), 40 deletions(-) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index fc608ec4b05..d61d81707e1 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -91,6 +91,8 @@ type watcherData struct { started bool // true if watcher has started, false otherwise enrichers []*enricher // list of enrichers using this watcher + + metadataEvents map[string]mapstr.M // resulted metadata events from the resource event handler } type Watchers struct { @@ -269,7 +271,11 @@ func createWatcher( if err != nil { return false, err } - resourceWatchers.watchersMap[resourceName] = &watcherData{watcher: watcher, started: false} + resourceWatchers.watchersMap[resourceName] = &watcherData{ + watcher: watcher, + started: false, + metadataEvents: make(map[string]mapstr.M), + } return true, nil } return false, nil @@ -497,13 +503,13 @@ func NewResourceMetadataEnricher( return &nilEnricher{} } - updateFunc := func(m map[string]mapstr.M, r kubernetes.Resource) { + updateFunc := func(r kubernetes.Resource) map[string]mapstr.M { accessor, _ := meta.Accessor(r) id := join(accessor.GetNamespace(), accessor.GetName()) switch r := r.(type) { case *kubernetes.Pod: - m[id] = specificMetaGen.Generate(r) + return map[string]mapstr.M{id: specificMetaGen.Generate(r)} case *kubernetes.Node: nodeName := r.GetObjectMeta().GetName() @@ -521,35 +527,35 @@ func NewResourceMetadataEnricher( nodeStore, _ := metricsRepo.AddNodeStore(nodeName) nodeStore.SetNodeMetrics(metrics) - m[id] = generalMetaGen.Generate(NodeResource, r) + return map[string]mapstr.M{id: generalMetaGen.Generate(NodeResource, r)} case *kubernetes.Deployment: - m[id] = generalMetaGen.Generate(DeploymentResource, r) + return map[string]mapstr.M{id: generalMetaGen.Generate(DeploymentResource, r)} case *kubernetes.Job: - m[id] = generalMetaGen.Generate(JobResource, r) + return map[string]mapstr.M{id: generalMetaGen.Generate(JobResource, r)} case *kubernetes.CronJob: - m[id] = generalMetaGen.Generate(CronJobResource, r) + return map[string]mapstr.M{id: generalMetaGen.Generate(CronJobResource, r)} case *kubernetes.Service: - m[id] = specificMetaGen.Generate(r) + return map[string]mapstr.M{id: specificMetaGen.Generate(r)} case *kubernetes.StatefulSet: - m[id] = generalMetaGen.Generate(StatefulSetResource, r) + return map[string]mapstr.M{id: generalMetaGen.Generate(StatefulSetResource, r)} case *kubernetes.Namespace: - m[id] = generalMetaGen.Generate(NamespaceResource, r) + return map[string]mapstr.M{id: generalMetaGen.Generate(NamespaceResource, r)} case *kubernetes.ReplicaSet: - m[id] = generalMetaGen.Generate(ReplicaSetResource, r) + return map[string]mapstr.M{id: generalMetaGen.Generate(ReplicaSetResource, r)} case *kubernetes.DaemonSet: - m[id] = generalMetaGen.Generate(DaemonSetResource, r) + return map[string]mapstr.M{id: generalMetaGen.Generate(DaemonSetResource, r)} case *kubernetes.PersistentVolume: - m[id] = generalMetaGen.Generate(PersistentVolumeResource, r) + return map[string]mapstr.M{id: generalMetaGen.Generate(PersistentVolumeResource, r)} case *kubernetes.PersistentVolumeClaim: - m[id] = generalMetaGen.Generate(PersistentVolumeClaimResource, r) + return map[string]mapstr.M{id: generalMetaGen.Generate(PersistentVolumeClaimResource, r)} case *kubernetes.StorageClass: - m[id] = generalMetaGen.Generate(StorageClassResource, r) + return map[string]mapstr.M{id: generalMetaGen.Generate(StorageClassResource, r)} default: - m[id] = generalMetaGen.Generate(r.GetObjectKind().GroupVersionKind().Kind, r) + return map[string]mapstr.M{id: generalMetaGen.Generate(r.GetObjectKind().GroupVersionKind().Kind, r)} } } - deleteFunc := func(m map[string]mapstr.M, r kubernetes.Resource) { + deleteFunc := func(r kubernetes.Resource) []string { accessor, _ := meta.Accessor(r) switch r := r.(type) { @@ -559,7 +565,7 @@ func NewResourceMetadataEnricher( } id := join(accessor.GetNamespace(), accessor.GetName()) - delete(m, id) + return []string{id} } indexFunc := func(e mapstr.M) string { @@ -617,7 +623,9 @@ func NewContainerMetadataEnricher( return &nilEnricher{} } - updateFunc := func(m map[string]mapstr.M, r kubernetes.Resource) { + updateFunc := func(r kubernetes.Resource) map[string]mapstr.M { + metadataEvents := make(map[string]mapstr.M) + pod, ok := r.(*kubernetes.Pod) if !ok { base.Logger().Debugf("Error while casting event: %s", ok) @@ -668,11 +676,14 @@ func NewContainerMetadataEnricher( id := join(pod.GetObjectMeta().GetNamespace(), pod.GetObjectMeta().GetName(), container.Name) cmeta.DeepUpdate(pmeta) - m[id] = cmeta + + metadataEvents[id] = cmeta } + return metadataEvents } - deleteFunc := func(m map[string]mapstr.M, r kubernetes.Resource) { + deleteFunc := func(r kubernetes.Resource) []string { + ids := make([]string, 0) pod, ok := r.(*kubernetes.Pod) if !ok { base.Logger().Debugf("Error while casting event: %s", ok) @@ -683,8 +694,10 @@ func NewContainerMetadataEnricher( for _, container := range append(pod.Spec.Containers, pod.Spec.InitContainers...) { id := join(pod.ObjectMeta.GetNamespace(), pod.GetObjectMeta().GetName(), container.Name) - delete(m, id) + ids = append(ids, id) } + + return ids } indexFunc := func(e mapstr.M) string { @@ -751,14 +764,14 @@ func buildMetadataEnricher( resourceName string, resourceWatchers *Watchers, config *kubernetesConfig, - update func(map[string]mapstr.M, kubernetes.Resource), - delete func(map[string]mapstr.M, kubernetes.Resource), - index func(e mapstr.M) string, + updateFunc func(kubernetes.Resource) map[string]mapstr.M, + deleteFunc func(kubernetes.Resource) []string, + indexFunc func(e mapstr.M) string, log *logp.Logger) *enricher { enricher := &enricher{ metadata: map[string]mapstr.M{}, - index: index, + index: indexFunc, resourceName: resourceName, metricsetName: metricsetName, config: config, @@ -771,25 +784,61 @@ func buildMetadataEnricher( watcher := resourceWatchers.watchersMap[resourceName] if watcher != nil { watcher.enrichers = append(watcher.enrichers, enricher) + + // Check if there are past events for this resource and update metadata if there is + for id, metadata := range watcher.metadataEvents { + enricher.metadata[id] = metadata + } + watcher.watcher.AddEventHandler(kubernetes.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { - for _, enricher := range watcher.enrichers { + var newMetadataEvents map[string]mapstr.M + for i, enricher := range watcher.enrichers { enricher.Lock() - update(enricher.metadata, obj.(kubernetes.Resource)) + if i == 0 { + newMetadataEvents = updateFunc(obj.(kubernetes.Resource)) + // add the new metadata to the watcher received metadata + for id, metadata := range newMetadataEvents { + watcher.metadataEvents[id] = metadata + } + } + for id, metadata := range newMetadataEvents { + enricher.metadata[id] = metadata + } enricher.Unlock() } }, UpdateFunc: func(obj interface{}) { - for _, enricher := range watcher.enrichers { + var updatedMetadataEvents map[string]mapstr.M + for i, enricher := range watcher.enrichers { enricher.Lock() - update(enricher.metadata, obj.(kubernetes.Resource)) + if i == 0 { + updatedMetadataEvents = updateFunc(obj.(kubernetes.Resource)) + // update the watcher metadata + for id, metadata := range updatedMetadataEvents { + watcher.metadataEvents[id] = metadata + } + } + for id, metadata := range updatedMetadataEvents { + enricher.metadata[id] = metadata + } enricher.Unlock() } }, DeleteFunc: func(obj interface{}) { - for _, enricher := range watcher.enrichers { + var ids []string + for i, enricher := range watcher.enrichers { enricher.Lock() - delete(enricher.metadata, obj.(kubernetes.Resource)) + if i == 0 { + ids = deleteFunc(obj.(kubernetes.Resource)) + // update this watcher events by removing all the metadata[id] + for _, id := range ids { + delete(watcher.metadataEvents, id) + } + } + for _, id := range ids { + delete(enricher.metadata, id) + } enricher.Unlock() } }, diff --git a/metricbeat/module/kubernetes/util/kubernetes_test.go b/metricbeat/module/kubernetes/util/kubernetes_test.go index 0e926e9d025..077a78fd3de 100644 --- a/metricbeat/module/kubernetes/util/kubernetes_test.go +++ b/metricbeat/module/kubernetes/util/kubernetes_test.go @@ -444,6 +444,7 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { watcher: &mockWatcher{}, started: false, metricsetsUsing: []string{PodResource}, + metadataEvents: make(map[string]mapstr.M), } resourceWatchers.lock.Unlock() @@ -459,6 +460,24 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { }, } + // check update function for the resulting add event + addEvent := map[string]mapstr.M{ + "enrich": { + "kubernetes": mapstr.M{ + "label": "value", + "pod": mapstr.M{ + "name": "enrich", + "uid": "mockuid", + }, + }, + "orchestrator": mapstr.M{ + "cluster": mapstr.M{ + "name": "gke-4242", + }, + }, + }, + } + config := &kubernetesConfig{ Namespace: "test-ns", SyncPeriod: time.Minute, @@ -483,12 +502,10 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { resourceWatchers.lock.Lock() watcher := resourceWatchers.watchersMap[PodResource] require.True(t, watcher.started) - resourceWatchers.lock.Unlock() - resourceWatchers.lock.Lock() - wData = resourceWatchers.watchersMap[PodResource] - mockW = wData.watcher.(*mockWatcher) + mockW = watcher.watcher.(*mockWatcher) mockW.handler.OnAdd(resource) + require.Equal(t, addEvent, watcher.metadataEvents) resourceWatchers.lock.Unlock() require.Equal(t, resource, funcs.updated) @@ -532,6 +549,7 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { wData = resourceWatchers.watchersMap[PodResource] mockW = wData.watcher.(*mockWatcher) mockW.handler.OnDelete(resource) + require.Equal(t, map[string]mapstr.M{}, watcher.metadataEvents) resourceWatchers.lock.Unlock() require.Equal(t, resource, funcs.deleted) @@ -560,7 +578,7 @@ type mockFuncs struct { indexed mapstr.M } -func (f *mockFuncs) update(m map[string]mapstr.M, obj kubernetes.Resource) { +func (f *mockFuncs) update(obj kubernetes.Resource) map[string]mapstr.M { accessor, _ := meta.Accessor(obj) f.updated = obj meta := mapstr.M{ @@ -576,13 +594,14 @@ func (f *mockFuncs) update(m map[string]mapstr.M, obj kubernetes.Resource) { kubernetes2.ShouldPut(meta, fmt.Sprintf("kubernetes.%v", k), v, logger) } kubernetes2.ShouldPut(meta, "orchestrator.cluster.name", "gke-4242", logger) - m[accessor.GetName()] = meta + id := accessor.GetName() + return map[string]mapstr.M{id: meta} } -func (f *mockFuncs) delete(m map[string]mapstr.M, obj kubernetes.Resource) { +func (f *mockFuncs) delete(obj kubernetes.Resource) []string { accessor, _ := meta.Accessor(obj) f.deleted = obj - delete(m, accessor.GetName()) + return []string{accessor.GetName()} } func (f *mockFuncs) index(m mapstr.M) string { From 0d248fffc0a86347436023aafbcfc994f26fb86e Mon Sep 17 00:00:00 2001 From: constanca Date: Tue, 6 Feb 2024 10:52:40 +0100 Subject: [PATCH 18/61] Add metadataEvents to watcher Signed-off-by: constanca --- .../module/kubernetes/util/kubernetes.go | 73 +++++++------ .../module/kubernetes/util/kubernetes_test.go | 102 ++++++++++++++---- 2 files changed, 119 insertions(+), 56 deletions(-) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index d61d81707e1..e6796b94fbd 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -92,7 +92,7 @@ type watcherData struct { enrichers []*enricher // list of enrichers using this watcher - metadataEvents map[string]mapstr.M // resulted metadata events from the resource event handler + metadataObjects map[string]kubernetes.Resource // resulted metadata events from the resource event handler } type Watchers struct { @@ -272,9 +272,9 @@ func createWatcher( return false, err } resourceWatchers.watchersMap[resourceName] = &watcherData{ - watcher: watcher, - started: false, - metadataEvents: make(map[string]mapstr.M), + watcher: watcher, + started: false, + metadataObjects: make(map[string]kubernetes.Resource), } return true, nil } @@ -785,23 +785,27 @@ func buildMetadataEnricher( if watcher != nil { watcher.enrichers = append(watcher.enrichers, enricher) - // Check if there are past events for this resource and update metadata if there is - for id, metadata := range watcher.metadataEvents { - enricher.metadata[id] = metadata + // Check if there are past events for this resource and update metadata if there are + for _, obj := range watcher.metadataObjects { + newMetadataEvents := updateFunc(obj.(kubernetes.Resource)) + // add the new metadata to the watcher received metadata + for id, metadata := range newMetadataEvents { + enricher.metadata[id] = metadata + } } watcher.watcher.AddEventHandler(kubernetes.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { - var newMetadataEvents map[string]mapstr.M - for i, enricher := range watcher.enrichers { + // we need to calculate the id again in case it is a pod with multiple containers + r := obj.(kubernetes.Resource) + accessor, _ := meta.Accessor(r) + id := join(accessor.GetNamespace(), accessor.GetName()) + watcher.metadataObjects[id] = r + + for _, enricher := range watcher.enrichers { enricher.Lock() - if i == 0 { - newMetadataEvents = updateFunc(obj.(kubernetes.Resource)) - // add the new metadata to the watcher received metadata - for id, metadata := range newMetadataEvents { - watcher.metadataEvents[id] = metadata - } - } + newMetadataEvents := updateFunc(obj.(kubernetes.Resource)) + // add the new metadata to the watcher received metadata for id, metadata := range newMetadataEvents { enricher.metadata[id] = metadata } @@ -809,16 +813,16 @@ func buildMetadataEnricher( } }, UpdateFunc: func(obj interface{}) { - var updatedMetadataEvents map[string]mapstr.M - for i, enricher := range watcher.enrichers { + // we need to calculate the id again in case it is a pod with multiple containers + r := obj.(kubernetes.Resource) + accessor, _ := meta.Accessor(r) + id := join(accessor.GetNamespace(), accessor.GetName()) + watcher.metadataObjects[id] = r + + for _, enricher := range watcher.enrichers { enricher.Lock() - if i == 0 { - updatedMetadataEvents = updateFunc(obj.(kubernetes.Resource)) - // update the watcher metadata - for id, metadata := range updatedMetadataEvents { - watcher.metadataEvents[id] = metadata - } - } + updatedMetadataEvents := updateFunc(obj.(kubernetes.Resource)) + // update the watcher metadata for id, metadata := range updatedMetadataEvents { enricher.metadata[id] = metadata } @@ -826,17 +830,18 @@ func buildMetadataEnricher( } }, DeleteFunc: func(obj interface{}) { - var ids []string - for i, enricher := range watcher.enrichers { + // we need to calculate the id again in case it is a pod with multiple containers + r := obj.(kubernetes.Resource) + accessor, _ := meta.Accessor(r) + id := join(accessor.GetNamespace(), accessor.GetName()) + delete(watcher.metadataObjects, id) + + for _, enricher := range watcher.enrichers { enricher.Lock() - if i == 0 { - ids = deleteFunc(obj.(kubernetes.Resource)) - // update this watcher events by removing all the metadata[id] - for _, id := range ids { - delete(watcher.metadataEvents, id) - } - } + ids := deleteFunc(obj.(kubernetes.Resource)) + // update this watcher events by removing all the metadata[id] for _, id := range ids { + delete(watcher.metadataObjects, id) delete(enricher.metadata, id) } enricher.Unlock() diff --git a/metricbeat/module/kubernetes/util/kubernetes_test.go b/metricbeat/module/kubernetes/util/kubernetes_test.go index 077a78fd3de..122a5c095e2 100644 --- a/metricbeat/module/kubernetes/util/kubernetes_test.go +++ b/metricbeat/module/kubernetes/util/kubernetes_test.go @@ -443,8 +443,8 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { resourceWatchers.watchersMap[PodResource] = &watcherData{ watcher: &mockWatcher{}, started: false, - metricsetsUsing: []string{PodResource}, - metadataEvents: make(map[string]mapstr.M), + metricsetsUsing: []string{"pod"}, + metadataObjects: make(map[string]kubernetes.Resource), } resourceWatchers.lock.Unlock() @@ -459,24 +459,8 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { Namespace: "default", }, } - - // check update function for the resulting add event - addEvent := map[string]mapstr.M{ - "enrich": { - "kubernetes": mapstr.M{ - "label": "value", - "pod": mapstr.M{ - "name": "enrich", - "uid": "mockuid", - }, - }, - "orchestrator": mapstr.M{ - "cluster": mapstr.M{ - "name": "gke-4242", - }, - }, - }, - } + id := "default:enrich" + metadataObjects := map[string]kubernetes.Resource{id: resource} config := &kubernetesConfig{ Namespace: "test-ns", @@ -505,7 +489,7 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { mockW = watcher.watcher.(*mockWatcher) mockW.handler.OnAdd(resource) - require.Equal(t, addEvent, watcher.metadataEvents) + require.Equal(t, metadataObjects, watcher.metadataObjects) resourceWatchers.lock.Unlock() require.Equal(t, resource, funcs.updated) @@ -549,7 +533,7 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { wData = resourceWatchers.watchersMap[PodResource] mockW = wData.watcher.(*mockWatcher) mockW.handler.OnDelete(resource) - require.Equal(t, map[string]mapstr.M{}, watcher.metadataEvents) + require.Equal(t, map[string]kubernetes.Resource{}, watcher.metadataObjects) resourceWatchers.lock.Unlock() require.Equal(t, resource, funcs.deleted) @@ -572,6 +556,80 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { resourceWatchers.lock.Unlock() } +// Test if we can add metadata from past events to an enricher that is associated +// with a resource that had already triggered the handler functions +func TestBuildMetadataEnricher_EventHandler_PastObjects(t *testing.T) { + resourceWatchers := NewWatchers() + + resourceWatchers.lock.Lock() + resourceWatchers.watchersMap[PodResource] = &watcherData{ + watcher: &mockWatcher{}, + started: false, + metricsetsUsing: []string{"pod", "state_pod"}, + metadataObjects: make(map[string]kubernetes.Resource), + } + resourceWatchers.lock.Unlock() + + funcs := mockFuncs{} + resource1 := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + UID: types.UID("mockuid"), + Name: "enrich", + Labels: map[string]string{ + "label": "value", + }, + Namespace: "default", + }, + } + id1 := "default:enrich" + resource2 := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + UID: types.UID("mockuid2"), + Name: "enrich-2", + Labels: map[string]string{ + "label": "value", + }, + Namespace: "default-2", + }, + } + id2 := "default-2:enrich-2" + + config := &kubernetesConfig{ + Namespace: "test-ns", + SyncPeriod: time.Minute, + Node: "test-node", + AddResourceMetadata: &metadata.AddResourceMetadataConfig{ + CronJob: false, + Deployment: false, + }, + } + + log := logp.NewLogger(selector) + + enricher := buildMetadataEnricher("pod", PodResource, resourceWatchers, config, funcs.update, funcs.delete, funcs.index, log) + enricher.Start(resourceWatchers) + + resourceWatchers.lock.Lock() + + watcher := resourceWatchers.watchersMap[PodResource] + mockW := watcher.watcher.(*mockWatcher) + + mockW.handler.OnAdd(resource1) + metadataObjects := map[string]kubernetes.Resource{id1: resource1} + require.Equal(t, metadataObjects, watcher.metadataObjects) + + mockW.handler.OnUpdate(resource2) + metadataObjects[id2] = resource2 + require.Equal(t, metadataObjects, watcher.metadataObjects) + + mockW.handler.OnDelete(resource1) + delete(metadataObjects, id1) + require.Equal(t, metadataObjects, watcher.metadataObjects) + + resourceWatchers.lock.Unlock() + +} + type mockFuncs struct { updated kubernetes.Resource deleted kubernetes.Resource From 2df018836fb8ac5309994b8e3ad99174c7b06133 Mon Sep 17 00:00:00 2001 From: constanca Date: Tue, 6 Feb 2024 11:29:39 +0100 Subject: [PATCH 19/61] Add ids to metadataEvents instead of whole object Signed-off-by: constanca --- .../module/kubernetes/util/kubernetes.go | 46 ++++++++++--------- .../module/kubernetes/util/kubernetes_test.go | 25 +++++----- 2 files changed, 38 insertions(+), 33 deletions(-) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index e6796b94fbd..218fd7f500d 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -92,7 +92,7 @@ type watcherData struct { enrichers []*enricher // list of enrichers using this watcher - metadataObjects map[string]kubernetes.Resource // resulted metadata events from the resource event handler + metadataObjects map[string]bool // map of ids of each object received by the handler functions } type Watchers struct { @@ -274,7 +274,7 @@ func createWatcher( resourceWatchers.watchersMap[resourceName] = &watcherData{ watcher: watcher, started: false, - metadataObjects: make(map[string]kubernetes.Resource), + metadataObjects: make(map[string]bool), } return true, nil } @@ -786,21 +786,27 @@ func buildMetadataEnricher( watcher.enrichers = append(watcher.enrichers, enricher) // Check if there are past events for this resource and update metadata if there are - for _, obj := range watcher.metadataObjects { - newMetadataEvents := updateFunc(obj.(kubernetes.Resource)) - // add the new metadata to the watcher received metadata - for id, metadata := range newMetadataEvents { - enricher.metadata[id] = metadata + for key, _ := range watcher.metadataObjects { + obj, exists, err := watcher.watcher.Store().GetByKey(key) + if err != nil { + log.Errorf("Error trying to get the object from the store: %s", err) + } else { + if exists { + newMetadataEvents := updateFunc(obj.(kubernetes.Resource)) + // add the new metadata to the watcher received metadata + for id, metadata := range newMetadataEvents { + enricher.metadata[id] = metadata + } + } } } watcher.watcher.AddEventHandler(kubernetes.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { - // we need to calculate the id again in case it is a pod with multiple containers - r := obj.(kubernetes.Resource) - accessor, _ := meta.Accessor(r) - id := join(accessor.GetNamespace(), accessor.GetName()) - watcher.metadataObjects[id] = r + // Add object to the list of metadata objects of this watcher + accessor, _ := meta.Accessor(obj.(kubernetes.Resource)) + id := accessor.GetNamespace() + "/" + accessor.GetName() + watcher.metadataObjects[id] = true for _, enricher := range watcher.enrichers { enricher.Lock() @@ -813,11 +819,10 @@ func buildMetadataEnricher( } }, UpdateFunc: func(obj interface{}) { - // we need to calculate the id again in case it is a pod with multiple containers - r := obj.(kubernetes.Resource) - accessor, _ := meta.Accessor(r) - id := join(accessor.GetNamespace(), accessor.GetName()) - watcher.metadataObjects[id] = r + // Add object to the list of metadata objects of this watcher + accessor, _ := meta.Accessor(obj.(kubernetes.Resource)) + id := accessor.GetNamespace() + "/" + accessor.GetName() + watcher.metadataObjects[id] = true for _, enricher := range watcher.enrichers { enricher.Lock() @@ -830,10 +835,9 @@ func buildMetadataEnricher( } }, DeleteFunc: func(obj interface{}) { - // we need to calculate the id again in case it is a pod with multiple containers - r := obj.(kubernetes.Resource) - accessor, _ := meta.Accessor(r) - id := join(accessor.GetNamespace(), accessor.GetName()) + // Remove object from the list of metadata objects of this watcher + accessor, _ := meta.Accessor(obj.(kubernetes.Resource)) + id := accessor.GetNamespace() + "/" + accessor.GetName() delete(watcher.metadataObjects, id) for _, enricher := range watcher.enrichers { diff --git a/metricbeat/module/kubernetes/util/kubernetes_test.go b/metricbeat/module/kubernetes/util/kubernetes_test.go index 122a5c095e2..fffe1d84909 100644 --- a/metricbeat/module/kubernetes/util/kubernetes_test.go +++ b/metricbeat/module/kubernetes/util/kubernetes_test.go @@ -444,7 +444,7 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { watcher: &mockWatcher{}, started: false, metricsetsUsing: []string{"pod"}, - metadataObjects: make(map[string]kubernetes.Resource), + metadataObjects: make(map[string]bool), } resourceWatchers.lock.Unlock() @@ -459,8 +459,8 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { Namespace: "default", }, } - id := "default:enrich" - metadataObjects := map[string]kubernetes.Resource{id: resource} + id := "default/enrich" + metadataObjects := map[string]bool{id: true} config := &kubernetesConfig{ Namespace: "test-ns", @@ -533,7 +533,7 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { wData = resourceWatchers.watchersMap[PodResource] mockW = wData.watcher.(*mockWatcher) mockW.handler.OnDelete(resource) - require.Equal(t, map[string]kubernetes.Resource{}, watcher.metadataObjects) + require.Equal(t, map[string]bool{}, watcher.metadataObjects) resourceWatchers.lock.Unlock() require.Equal(t, resource, funcs.deleted) @@ -559,6 +559,8 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { // Test if we can add metadata from past events to an enricher that is associated // with a resource that had already triggered the handler functions func TestBuildMetadataEnricher_EventHandler_PastObjects(t *testing.T) { + log := logp.NewLogger(selector) + resourceWatchers := NewWatchers() resourceWatchers.lock.Lock() @@ -566,7 +568,7 @@ func TestBuildMetadataEnricher_EventHandler_PastObjects(t *testing.T) { watcher: &mockWatcher{}, started: false, metricsetsUsing: []string{"pod", "state_pod"}, - metadataObjects: make(map[string]kubernetes.Resource), + metadataObjects: make(map[string]bool), } resourceWatchers.lock.Unlock() @@ -581,7 +583,7 @@ func TestBuildMetadataEnricher_EventHandler_PastObjects(t *testing.T) { Namespace: "default", }, } - id1 := "default:enrich" + id1 := "default/enrich" resource2 := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ UID: types.UID("mockuid2"), @@ -592,7 +594,7 @@ func TestBuildMetadataEnricher_EventHandler_PastObjects(t *testing.T) { Namespace: "default-2", }, } - id2 := "default-2:enrich-2" + id2 := "default-2/enrich-2" config := &kubernetesConfig{ Namespace: "test-ns", @@ -604,8 +606,6 @@ func TestBuildMetadataEnricher_EventHandler_PastObjects(t *testing.T) { }, } - log := logp.NewLogger(selector) - enricher := buildMetadataEnricher("pod", PodResource, resourceWatchers, config, funcs.update, funcs.delete, funcs.index, log) enricher.Start(resourceWatchers) @@ -615,11 +615,11 @@ func TestBuildMetadataEnricher_EventHandler_PastObjects(t *testing.T) { mockW := watcher.watcher.(*mockWatcher) mockW.handler.OnAdd(resource1) - metadataObjects := map[string]kubernetes.Resource{id1: resource1} + metadataObjects := map[string]bool{id1: true} require.Equal(t, metadataObjects, watcher.metadataObjects) mockW.handler.OnUpdate(resource2) - metadataObjects[id2] = resource2 + metadataObjects[id2] = true require.Equal(t, metadataObjects, watcher.metadataObjects) mockW.handler.OnDelete(resource1) @@ -669,6 +669,7 @@ func (f *mockFuncs) index(m mapstr.M) string { type mockWatcher struct { handler kubernetes.ResourceEventHandler + store cache.Store } func (m *mockWatcher) Start() error { @@ -684,7 +685,7 @@ func (m *mockWatcher) AddEventHandler(r kubernetes.ResourceEventHandler) { } func (m *mockWatcher) Store() cache.Store { - return nil + return m.store } func (m *mockWatcher) Client() k8s.Interface { From 8d556e49a748d9f43d1dd16e4ec29a751fa7dcfb Mon Sep 17 00:00:00 2001 From: constanca Date: Tue, 6 Feb 2024 15:50:03 +0100 Subject: [PATCH 20/61] Pass metadata.MetaGen and *metadata.Resource as parameters to enricher Signed-off-by: constanca --- .../module/kubernetes/util/kubernetes.go | 58 ++++++++++++++----- .../module/kubernetes/util/kubernetes_test.go | 40 ++++++++++--- 2 files changed, 75 insertions(+), 23 deletions(-) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 218fd7f500d..b1d1a5862a3 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -73,6 +73,11 @@ type enricher struct { isPod bool config *kubernetesConfig log *logp.Logger + + // needed for the metadata enricher + // One of these two is always nil + specificMetaGen metadata.MetaGen + generalMetaGen *metadata.Resource } type nilEnricher struct{} @@ -503,7 +508,7 @@ func NewResourceMetadataEnricher( return &nilEnricher{} } - updateFunc := func(r kubernetes.Resource) map[string]mapstr.M { + updateFunc := func(r kubernetes.Resource, specificMetaGen metadata.MetaGen, generalMetaGen *metadata.Resource) map[string]mapstr.M { accessor, _ := meta.Accessor(r) id := join(accessor.GetNamespace(), accessor.GetName()) @@ -572,7 +577,17 @@ func NewResourceMetadataEnricher( return join(getString(e, mb.ModuleDataKey+".namespace"), getString(e, "name")) } - enricher := buildMetadataEnricher(metricsetName, resourceName, resourceWatchers, config, updateFunc, deleteFunc, indexFunc, log) + enricher := buildMetadataEnricher( + metricsetName, + resourceName, + resourceWatchers, + config, + specificMetaGen, + generalMetaGen, + updateFunc, + deleteFunc, + indexFunc, + log) if resourceName == PodResource { enricher.isPod = true } @@ -623,7 +638,7 @@ func NewContainerMetadataEnricher( return &nilEnricher{} } - updateFunc := func(r kubernetes.Resource) map[string]mapstr.M { + updateFunc := func(r kubernetes.Resource, metaGen metadata.MetaGen, generalMetaGen *metadata.Resource) map[string]mapstr.M { metadataEvents := make(map[string]mapstr.M) pod, ok := r.(*kubernetes.Pod) @@ -704,7 +719,18 @@ func NewContainerMetadataEnricher( return join(getString(e, mb.ModuleDataKey+".namespace"), getString(e, mb.ModuleDataKey+".pod.name"), getString(e, "name")) } - enricher := buildMetadataEnricher(metricsetName, PodResource, resourceWatchers, config, updateFunc, deleteFunc, indexFunc, log) + enricher := buildMetadataEnricher( + metricsetName, + PodResource, + resourceWatchers, + config, + metaGen, + nil, + updateFunc, + deleteFunc, + indexFunc, + log, + ) return enricher } @@ -764,18 +790,22 @@ func buildMetadataEnricher( resourceName string, resourceWatchers *Watchers, config *kubernetesConfig, - updateFunc func(kubernetes.Resource) map[string]mapstr.M, + specificMetaGen metadata.MetaGen, + generalMetaGen *metadata.Resource, + updateFunc func(kubernetes.Resource, metadata.MetaGen, *metadata.Resource) map[string]mapstr.M, deleteFunc func(kubernetes.Resource) []string, indexFunc func(e mapstr.M) string, log *logp.Logger) *enricher { enricher := &enricher{ - metadata: map[string]mapstr.M{}, - index: indexFunc, - resourceName: resourceName, - metricsetName: metricsetName, - config: config, - log: log, + metadata: map[string]mapstr.M{}, + index: indexFunc, + resourceName: resourceName, + metricsetName: metricsetName, + specificMetaGen: specificMetaGen, + generalMetaGen: generalMetaGen, + config: config, + log: log, } resourceWatchers.lock.Lock() @@ -792,7 +822,7 @@ func buildMetadataEnricher( log.Errorf("Error trying to get the object from the store: %s", err) } else { if exists { - newMetadataEvents := updateFunc(obj.(kubernetes.Resource)) + newMetadataEvents := updateFunc(obj.(kubernetes.Resource), enricher.specificMetaGen, enricher.generalMetaGen) // add the new metadata to the watcher received metadata for id, metadata := range newMetadataEvents { enricher.metadata[id] = metadata @@ -810,7 +840,7 @@ func buildMetadataEnricher( for _, enricher := range watcher.enrichers { enricher.Lock() - newMetadataEvents := updateFunc(obj.(kubernetes.Resource)) + newMetadataEvents := updateFunc(obj.(kubernetes.Resource), enricher.specificMetaGen, enricher.generalMetaGen) // add the new metadata to the watcher received metadata for id, metadata := range newMetadataEvents { enricher.metadata[id] = metadata @@ -826,7 +856,7 @@ func buildMetadataEnricher( for _, enricher := range watcher.enrichers { enricher.Lock() - updatedMetadataEvents := updateFunc(obj.(kubernetes.Resource)) + updatedMetadataEvents := updateFunc(obj.(kubernetes.Resource), enricher.specificMetaGen, enricher.generalMetaGen) // update the watcher metadata for id, metadata := range updatedMetadataEvents { enricher.metadata[id] = metadata diff --git a/metricbeat/module/kubernetes/util/kubernetes_test.go b/metricbeat/module/kubernetes/util/kubernetes_test.go index fffe1d84909..15bd48ac412 100644 --- a/metricbeat/module/kubernetes/util/kubernetes_test.go +++ b/metricbeat/module/kubernetes/util/kubernetes_test.go @@ -337,8 +337,18 @@ func TestBuildMetadataEnricher_Start_Stop(t *testing.T) { log := logp.NewLogger(selector) - enricherNamespace := buildMetadataEnricher(metricsetNamespace, NamespaceResource, resourceWatchers, config, - funcs.update, funcs.delete, funcs.index, log) + enricherNamespace := buildMetadataEnricher( + metricsetNamespace, + NamespaceResource, + resourceWatchers, + config, + nil, + nil, + funcs.update, + funcs.delete, + funcs.index, + log, + ) resourceWatchers.lock.Lock() watcher := resourceWatchers.watchersMap[NamespaceResource] require.False(t, watcher.started) @@ -359,8 +369,18 @@ func TestBuildMetadataEnricher_Start_Stop(t *testing.T) { resourceWatchers.lock.Unlock() // Stopping the deployment watcher should stop now both watchers - enricherDeployment := buildMetadataEnricher(metricsetDeployment, DeploymentResource, resourceWatchers, config, - funcs.update, funcs.delete, funcs.index, log) + enricherDeployment := buildMetadataEnricher( + metricsetDeployment, + DeploymentResource, + resourceWatchers, + config, + nil, + nil, + funcs.update, + funcs.delete, + funcs.index, + log, + ) enricherDeployment.Stop(resourceWatchers) resourceWatchers.lock.Lock() @@ -403,7 +423,7 @@ func TestBuildMetadataEnricher_Start_Stop_SameResources(t *testing.T) { } log := logp.NewLogger(selector) - enricherPod := buildMetadataEnricher(metricsetPod, PodResource, resourceWatchers, config, + enricherPod := buildMetadataEnricher(metricsetPod, PodResource, resourceWatchers, config, nil, nil, funcs.update, funcs.delete, funcs.index, log) resourceWatchers.lock.Lock() watcher := resourceWatchers.watchersMap[PodResource] @@ -425,7 +445,7 @@ func TestBuildMetadataEnricher_Start_Stop_SameResources(t *testing.T) { resourceWatchers.lock.Unlock() // Stopping the state_pod watcher should stop pod watcher - enricherStatePod := buildMetadataEnricher(metricsetStatePod, PodResource, resourceWatchers, config, + enricherStatePod := buildMetadataEnricher(metricsetStatePod, PodResource, resourceWatchers, config, nil, nil, funcs.update, funcs.delete, funcs.index, log) enricherStatePod.Stop(resourceWatchers) @@ -475,7 +495,8 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { metricset := "pod" log := logp.NewLogger(selector) - enricher := buildMetadataEnricher(metricset, PodResource, resourceWatchers, config, funcs.update, funcs.delete, funcs.index, log) + enricher := buildMetadataEnricher(metricset, PodResource, resourceWatchers, config, nil, nil, + funcs.update, funcs.delete, funcs.index, log) resourceWatchers.lock.Lock() wData := resourceWatchers.watchersMap[PodResource] mockW := wData.watcher.(*mockWatcher) @@ -606,7 +627,8 @@ func TestBuildMetadataEnricher_EventHandler_PastObjects(t *testing.T) { }, } - enricher := buildMetadataEnricher("pod", PodResource, resourceWatchers, config, funcs.update, funcs.delete, funcs.index, log) + enricher := buildMetadataEnricher("pod", PodResource, resourceWatchers, config, + nil, nil, funcs.update, funcs.delete, funcs.index, log) enricher.Start(resourceWatchers) resourceWatchers.lock.Lock() @@ -636,7 +658,7 @@ type mockFuncs struct { indexed mapstr.M } -func (f *mockFuncs) update(obj kubernetes.Resource) map[string]mapstr.M { +func (f *mockFuncs) update(obj kubernetes.Resource, specificMetaGen metadata.MetaGen, generalMetaGen *metadata.Resource) map[string]mapstr.M { accessor, _ := meta.Accessor(obj) f.updated = obj meta := mapstr.M{ From f1de11768d562aee17827e88f40184a5a58d2c30 Mon Sep 17 00:00:00 2001 From: constanca Date: Wed, 7 Feb 2024 13:04:44 +0100 Subject: [PATCH 21/61] - Save enrichers as map to avoid duplicates in list - Pass update and delete functions as parameters to enricher - Start dependency watchers before the main watcher Signed-off-by: constanca --- .../module/kubernetes/util/kubernetes.go | 74 ++++++++----------- .../module/kubernetes/util/kubernetes_test.go | 26 +++---- 2 files changed, 44 insertions(+), 56 deletions(-) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index b1d1a5862a3..87e954d99b3 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -68,16 +68,13 @@ type enricher struct { sync.RWMutex metadata map[string]mapstr.M index func(mapstr.M) string + updateFunc func(kubernetes.Resource) map[string]mapstr.M + deleteFunc func(kubernetes.Resource) []string metricsetName string resourceName string isPod bool config *kubernetesConfig log *logp.Logger - - // needed for the metadata enricher - // One of these two is always nil - specificMetaGen metadata.MetaGen - generalMetaGen *metadata.Resource } type nilEnricher struct{} @@ -87,15 +84,12 @@ func (*nilEnricher) Stop(*Watchers) {} func (*nilEnricher) Enrich([]mapstr.M) {} type watcherData struct { - // list of metricsets using this watcher - // metricsets are used instead of resource names to avoid conflicts between - // state_pod / pod, state_node / node, state_container / container - metricsetsUsing []string + metricsetsUsing []string // list of metricsets using this watcher watcher kubernetes.Watcher started bool // true if watcher has started, false otherwise - enrichers []*enricher // list of enrichers using this watcher + enrichers map[string]*enricher // map of enrichers using this watcher. The key is the metricset name metadataObjects map[string]bool // map of ids of each object received by the handler functions } @@ -280,6 +274,8 @@ func createWatcher( watcher: watcher, started: false, metadataObjects: make(map[string]bool), + enrichers: make(map[string]*enricher), + metricsetsUsing: make([]string, 0), } return true, nil } @@ -508,7 +504,7 @@ func NewResourceMetadataEnricher( return &nilEnricher{} } - updateFunc := func(r kubernetes.Resource, specificMetaGen metadata.MetaGen, generalMetaGen *metadata.Resource) map[string]mapstr.M { + updateFunc := func(r kubernetes.Resource) map[string]mapstr.M { accessor, _ := meta.Accessor(r) id := join(accessor.GetNamespace(), accessor.GetName()) @@ -582,8 +578,6 @@ func NewResourceMetadataEnricher( resourceName, resourceWatchers, config, - specificMetaGen, - generalMetaGen, updateFunc, deleteFunc, indexFunc, @@ -638,7 +632,7 @@ func NewContainerMetadataEnricher( return &nilEnricher{} } - updateFunc := func(r kubernetes.Resource, metaGen metadata.MetaGen, generalMetaGen *metadata.Resource) map[string]mapstr.M { + updateFunc := func(r kubernetes.Resource) map[string]mapstr.M { metadataEvents := make(map[string]mapstr.M) pod, ok := r.(*kubernetes.Pod) @@ -724,8 +718,6 @@ func NewContainerMetadataEnricher( PodResource, resourceWatchers, config, - metaGen, - nil, updateFunc, deleteFunc, indexFunc, @@ -790,22 +782,20 @@ func buildMetadataEnricher( resourceName string, resourceWatchers *Watchers, config *kubernetesConfig, - specificMetaGen metadata.MetaGen, - generalMetaGen *metadata.Resource, - updateFunc func(kubernetes.Resource, metadata.MetaGen, *metadata.Resource) map[string]mapstr.M, + updateFunc func(kubernetes.Resource) map[string]mapstr.M, deleteFunc func(kubernetes.Resource) []string, indexFunc func(e mapstr.M) string, log *logp.Logger) *enricher { enricher := &enricher{ - metadata: map[string]mapstr.M{}, - index: indexFunc, - resourceName: resourceName, - metricsetName: metricsetName, - specificMetaGen: specificMetaGen, - generalMetaGen: generalMetaGen, - config: config, - log: log, + metadata: map[string]mapstr.M{}, + index: indexFunc, + updateFunc: updateFunc, + deleteFunc: deleteFunc, + resourceName: resourceName, + metricsetName: metricsetName, + config: config, + log: log, } resourceWatchers.lock.Lock() @@ -813,7 +803,7 @@ func buildMetadataEnricher( watcher := resourceWatchers.watchersMap[resourceName] if watcher != nil { - watcher.enrichers = append(watcher.enrichers, enricher) + watcher.enrichers[metricsetName] = enricher // Check if there are past events for this resource and update metadata if there are for key, _ := range watcher.metadataObjects { @@ -822,7 +812,7 @@ func buildMetadataEnricher( log.Errorf("Error trying to get the object from the store: %s", err) } else { if exists { - newMetadataEvents := updateFunc(obj.(kubernetes.Resource), enricher.specificMetaGen, enricher.generalMetaGen) + newMetadataEvents := enricher.updateFunc(obj.(kubernetes.Resource)) // add the new metadata to the watcher received metadata for id, metadata := range newMetadataEvents { enricher.metadata[id] = metadata @@ -840,7 +830,7 @@ func buildMetadataEnricher( for _, enricher := range watcher.enrichers { enricher.Lock() - newMetadataEvents := updateFunc(obj.(kubernetes.Resource), enricher.specificMetaGen, enricher.generalMetaGen) + newMetadataEvents := enricher.updateFunc(obj.(kubernetes.Resource)) // add the new metadata to the watcher received metadata for id, metadata := range newMetadataEvents { enricher.metadata[id] = metadata @@ -856,7 +846,7 @@ func buildMetadataEnricher( for _, enricher := range watcher.enrichers { enricher.Lock() - updatedMetadataEvents := updateFunc(obj.(kubernetes.Resource), enricher.specificMetaGen, enricher.generalMetaGen) + updatedMetadataEvents := enricher.updateFunc(obj.(kubernetes.Resource)) // update the watcher metadata for id, metadata := range updatedMetadataEvents { enricher.metadata[id] = metadata @@ -872,10 +862,9 @@ func buildMetadataEnricher( for _, enricher := range watcher.enrichers { enricher.Lock() - ids := deleteFunc(obj.(kubernetes.Resource)) + ids := enricher.deleteFunc(obj.(kubernetes.Resource)) // update this watcher events by removing all the metadata[id] for _, id := range ids { - delete(watcher.metadataObjects, id) delete(enricher.metadata, id) } enricher.Unlock() @@ -891,15 +880,7 @@ func (e *enricher) Start(resourceWatchers *Watchers) { resourceWatchers.lock.Lock() defer resourceWatchers.lock.Unlock() - resourceWatcher := resourceWatchers.watchersMap[e.resourceName] - if resourceWatcher != nil && !resourceWatcher.started { - if err := resourceWatcher.watcher.Start(); err != nil { - e.log.Warnf("Error starting %s watcher: %s", e.resourceName, err) - } else { - resourceWatcher.started = true - } - } - + // we first need to start the dependencies extras := getExtraWatchers(e.resourceName, e.config.AddResourceMetadata) for _, extra := range extras { extraWatcher := resourceWatchers.watchersMap[extra] @@ -911,6 +892,15 @@ func (e *enricher) Start(resourceWatchers *Watchers) { } } } + + resourceWatcher := resourceWatchers.watchersMap[e.resourceName] + if resourceWatcher != nil && !resourceWatcher.started { + if err := resourceWatcher.watcher.Start(); err != nil { + e.log.Warnf("Error starting %s watcher: %s", e.resourceName, err) + } else { + resourceWatcher.started = true + } + } } func (e *enricher) Stop(resourceWatchers *Watchers) { diff --git a/metricbeat/module/kubernetes/util/kubernetes_test.go b/metricbeat/module/kubernetes/util/kubernetes_test.go index 15bd48ac412..71dae79da7d 100644 --- a/metricbeat/module/kubernetes/util/kubernetes_test.go +++ b/metricbeat/module/kubernetes/util/kubernetes_test.go @@ -130,13 +130,12 @@ func TestAddToMetricsetsUsing(t *testing.T) { resourceWatchers.lock.Lock() require.NotNil(t, resourceWatchers.watchersMap[DeploymentResource].watcher) - require.Nil(t, resourceWatchers.watchersMap[DeploymentResource].metricsetsUsing) + require.Equal(t, []string{}, resourceWatchers.watchersMap[DeploymentResource].metricsetsUsing) resourceWatchers.lock.Unlock() metricsetDeployment := "state_deployment" addToMetricsetsUsing(DeploymentResource, metricsetDeployment, resourceWatchers) resourceWatchers.lock.Lock() - require.NotNil(t, resourceWatchers.watchersMap[DeploymentResource].metricsetsUsing) require.Equal(t, []string{metricsetDeployment}, resourceWatchers.watchersMap[DeploymentResource].metricsetsUsing) resourceWatchers.lock.Unlock() @@ -316,11 +315,13 @@ func TestBuildMetadataEnricher_Start_Stop(t *testing.T) { watcher: &mockWatcher{}, started: false, metricsetsUsing: []string{metricsetNamespace, metricsetDeployment}, + enrichers: make(map[string]*enricher), } resourceWatchers.watchersMap[DeploymentResource] = &watcherData{ watcher: &mockWatcher{}, started: true, metricsetsUsing: []string{metricsetDeployment}, + enrichers: make(map[string]*enricher), } resourceWatchers.lock.Unlock() @@ -342,8 +343,6 @@ func TestBuildMetadataEnricher_Start_Stop(t *testing.T) { NamespaceResource, resourceWatchers, config, - nil, - nil, funcs.update, funcs.delete, funcs.index, @@ -374,8 +373,6 @@ func TestBuildMetadataEnricher_Start_Stop(t *testing.T) { DeploymentResource, resourceWatchers, config, - nil, - nil, funcs.update, funcs.delete, funcs.index, @@ -394,7 +391,6 @@ func TestBuildMetadataEnricher_Start_Stop(t *testing.T) { require.Equal(t, []string{}, watcher.metricsetsUsing) resourceWatchers.lock.Unlock() - } func TestBuildMetadataEnricher_Start_Stop_SameResources(t *testing.T) { @@ -408,6 +404,7 @@ func TestBuildMetadataEnricher_Start_Stop_SameResources(t *testing.T) { watcher: &mockWatcher{}, started: false, metricsetsUsing: []string{metricsetStatePod, metricsetPod}, + enrichers: make(map[string]*enricher), } resourceWatchers.lock.Unlock() @@ -423,7 +420,7 @@ func TestBuildMetadataEnricher_Start_Stop_SameResources(t *testing.T) { } log := logp.NewLogger(selector) - enricherPod := buildMetadataEnricher(metricsetPod, PodResource, resourceWatchers, config, nil, nil, + enricherPod := buildMetadataEnricher(metricsetPod, PodResource, resourceWatchers, config, funcs.update, funcs.delete, funcs.index, log) resourceWatchers.lock.Lock() watcher := resourceWatchers.watchersMap[PodResource] @@ -445,7 +442,7 @@ func TestBuildMetadataEnricher_Start_Stop_SameResources(t *testing.T) { resourceWatchers.lock.Unlock() // Stopping the state_pod watcher should stop pod watcher - enricherStatePod := buildMetadataEnricher(metricsetStatePod, PodResource, resourceWatchers, config, nil, nil, + enricherStatePod := buildMetadataEnricher(metricsetStatePod, PodResource, resourceWatchers, config, funcs.update, funcs.delete, funcs.index, log) enricherStatePod.Stop(resourceWatchers) @@ -465,6 +462,7 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { started: false, metricsetsUsing: []string{"pod"}, metadataObjects: make(map[string]bool), + enrichers: make(map[string]*enricher), } resourceWatchers.lock.Unlock() @@ -495,7 +493,7 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { metricset := "pod" log := logp.NewLogger(selector) - enricher := buildMetadataEnricher(metricset, PodResource, resourceWatchers, config, nil, nil, + enricher := buildMetadataEnricher(metricset, PodResource, resourceWatchers, config, funcs.update, funcs.delete, funcs.index, log) resourceWatchers.lock.Lock() wData := resourceWatchers.watchersMap[PodResource] @@ -590,6 +588,7 @@ func TestBuildMetadataEnricher_EventHandler_PastObjects(t *testing.T) { started: false, metricsetsUsing: []string{"pod", "state_pod"}, metadataObjects: make(map[string]bool), + enrichers: make(map[string]*enricher), } resourceWatchers.lock.Unlock() @@ -628,7 +627,7 @@ func TestBuildMetadataEnricher_EventHandler_PastObjects(t *testing.T) { } enricher := buildMetadataEnricher("pod", PodResource, resourceWatchers, config, - nil, nil, funcs.update, funcs.delete, funcs.index, log) + funcs.update, funcs.delete, funcs.index, log) enricher.Start(resourceWatchers) resourceWatchers.lock.Lock() @@ -658,7 +657,7 @@ type mockFuncs struct { indexed mapstr.M } -func (f *mockFuncs) update(obj kubernetes.Resource, specificMetaGen metadata.MetaGen, generalMetaGen *metadata.Resource) map[string]mapstr.M { +func (f *mockFuncs) update(obj kubernetes.Resource) map[string]mapstr.M { accessor, _ := meta.Accessor(obj) f.updated = obj meta := mapstr.M{ @@ -691,7 +690,6 @@ func (f *mockFuncs) index(m mapstr.M) string { type mockWatcher struct { handler kubernetes.ResourceEventHandler - store cache.Store } func (m *mockWatcher) Start() error { @@ -707,7 +705,7 @@ func (m *mockWatcher) AddEventHandler(r kubernetes.ResourceEventHandler) { } func (m *mockWatcher) Store() cache.Store { - return m.store + return nil } func (m *mockWatcher) Client() k8s.Interface { From 186032ee5656eda8eb589cda44e82d8d71c0bc55 Mon Sep 17 00:00:00 2001 From: constanca Date: Wed, 7 Feb 2024 17:00:29 +0100 Subject: [PATCH 22/61] - Remove extras : and / on the resources names Signed-off-by: constanca --- .../module/kubernetes/util/kubernetes.go | 38 +++++++++++++++---- 1 file changed, 31 insertions(+), 7 deletions(-) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 87e954d99b3..384d48c7e21 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -506,7 +506,10 @@ func NewResourceMetadataEnricher( updateFunc := func(r kubernetes.Resource) map[string]mapstr.M { accessor, _ := meta.Accessor(r) - id := join(accessor.GetNamespace(), accessor.GetName()) + id := accessor.GetName() + if accessor.GetNamespace() != "" { + id = join(accessor.GetNamespace(), accessor.GetName()) + } switch r := r.(type) { case *kubernetes.Pod: @@ -565,12 +568,25 @@ func NewResourceMetadataEnricher( metricsRepo.DeleteNodeStore(nodeName) } - id := join(accessor.GetNamespace(), accessor.GetName()) + id := accessor.GetName() + if accessor.GetNamespace() != "" { + id = join(accessor.GetNamespace(), accessor.GetName()) + } return []string{id} } indexFunc := func(e mapstr.M) string { - return join(getString(e, mb.ModuleDataKey+".namespace"), getString(e, "name")) + name := getString(e, "name") + namespace := getString(e, mb.ModuleDataKey+".namespace") + id := "" + if name != "" && namespace != "" { + id = join(namespace, name) + } else if namespace != "" { + id = namespace + } else { + id = name + } + return id } enricher := buildMetadataEnricher( @@ -825,7 +841,10 @@ func buildMetadataEnricher( AddFunc: func(obj interface{}) { // Add object to the list of metadata objects of this watcher accessor, _ := meta.Accessor(obj.(kubernetes.Resource)) - id := accessor.GetNamespace() + "/" + accessor.GetName() + id := accessor.GetName() + if accessor.GetNamespace() != "" { + id = accessor.GetNamespace() + "/" + id + } watcher.metadataObjects[id] = true for _, enricher := range watcher.enrichers { @@ -841,13 +860,15 @@ func buildMetadataEnricher( UpdateFunc: func(obj interface{}) { // Add object to the list of metadata objects of this watcher accessor, _ := meta.Accessor(obj.(kubernetes.Resource)) - id := accessor.GetNamespace() + "/" + accessor.GetName() + id := accessor.GetName() + if accessor.GetNamespace() != "" { + id = accessor.GetNamespace() + "/" + id + } watcher.metadataObjects[id] = true for _, enricher := range watcher.enrichers { enricher.Lock() updatedMetadataEvents := enricher.updateFunc(obj.(kubernetes.Resource)) - // update the watcher metadata for id, metadata := range updatedMetadataEvents { enricher.metadata[id] = metadata } @@ -857,7 +878,10 @@ func buildMetadataEnricher( DeleteFunc: func(obj interface{}) { // Remove object from the list of metadata objects of this watcher accessor, _ := meta.Accessor(obj.(kubernetes.Resource)) - id := accessor.GetNamespace() + "/" + accessor.GetName() + id := accessor.GetName() + if accessor.GetNamespace() != "" { + id = accessor.GetNamespace() + "/" + id + } delete(watcher.metadataObjects, id) for _, enricher := range watcher.enrichers { From 65beebe5fc2daf9db364d1d15c36c1bbee0a729d Mon Sep 17 00:00:00 2001 From: constanca Date: Wed, 7 Feb 2024 17:28:18 +0100 Subject: [PATCH 23/61] Reuse accessor.GetNamespace() variable Signed-off-by: constanca --- .../module/kubernetes/util/kubernetes.go | 25 +++++++++++-------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 384d48c7e21..1ed4aa5c1f9 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -507,8 +507,9 @@ func NewResourceMetadataEnricher( updateFunc := func(r kubernetes.Resource) map[string]mapstr.M { accessor, _ := meta.Accessor(r) id := accessor.GetName() - if accessor.GetNamespace() != "" { - id = join(accessor.GetNamespace(), accessor.GetName()) + namespace := accessor.GetNamespace() + if namespace != "" { + id = join(namespace, id) } switch r := r.(type) { @@ -569,8 +570,9 @@ func NewResourceMetadataEnricher( } id := accessor.GetName() - if accessor.GetNamespace() != "" { - id = join(accessor.GetNamespace(), accessor.GetName()) + namespace := accessor.GetNamespace() + if namespace != "" { + id = join(namespace, id) } return []string{id} } @@ -842,8 +844,9 @@ func buildMetadataEnricher( // Add object to the list of metadata objects of this watcher accessor, _ := meta.Accessor(obj.(kubernetes.Resource)) id := accessor.GetName() - if accessor.GetNamespace() != "" { - id = accessor.GetNamespace() + "/" + id + namespace := accessor.GetNamespace() + if namespace != "" { + id = namespace + "/" + id } watcher.metadataObjects[id] = true @@ -861,8 +864,9 @@ func buildMetadataEnricher( // Add object to the list of metadata objects of this watcher accessor, _ := meta.Accessor(obj.(kubernetes.Resource)) id := accessor.GetName() - if accessor.GetNamespace() != "" { - id = accessor.GetNamespace() + "/" + id + namespace := accessor.GetNamespace() + if namespace != "" { + id = namespace + "/" + id } watcher.metadataObjects[id] = true @@ -879,8 +883,9 @@ func buildMetadataEnricher( // Remove object from the list of metadata objects of this watcher accessor, _ := meta.Accessor(obj.(kubernetes.Resource)) id := accessor.GetName() - if accessor.GetNamespace() != "" { - id = accessor.GetNamespace() + "/" + id + namespace := accessor.GetNamespace() + if namespace != "" { + id = namespace + "/" + id } delete(watcher.metadataObjects, id) From 59a4295fc97796fb28afa08bca8403752c8d2de5 Mon Sep 17 00:00:00 2001 From: constanca Date: Thu, 8 Feb 2024 09:24:01 +0100 Subject: [PATCH 24/61] Avoid concurrent goroutine when using handler functions Signed-off-by: constanca --- .../module/kubernetes/util/kubernetes.go | 9 +++++++++ .../module/kubernetes/util/kubernetes_test.go | 20 ++++++++++++++++--- 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 1ed4aa5c1f9..a25a453765c 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -841,6 +841,9 @@ func buildMetadataEnricher( watcher.watcher.AddEventHandler(kubernetes.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { + resourceWatchers.lock.Lock() + defer resourceWatchers.lock.Unlock() + // Add object to the list of metadata objects of this watcher accessor, _ := meta.Accessor(obj.(kubernetes.Resource)) id := accessor.GetName() @@ -861,6 +864,9 @@ func buildMetadataEnricher( } }, UpdateFunc: func(obj interface{}) { + resourceWatchers.lock.Lock() + defer resourceWatchers.lock.Unlock() + // Add object to the list of metadata objects of this watcher accessor, _ := meta.Accessor(obj.(kubernetes.Resource)) id := accessor.GetName() @@ -880,6 +886,9 @@ func buildMetadataEnricher( } }, DeleteFunc: func(obj interface{}) { + resourceWatchers.lock.Lock() + defer resourceWatchers.lock.Unlock() + // Remove object from the list of metadata objects of this watcher accessor, _ := meta.Accessor(obj.(kubernetes.Resource)) id := accessor.GetName() diff --git a/metricbeat/module/kubernetes/util/kubernetes_test.go b/metricbeat/module/kubernetes/util/kubernetes_test.go index 71dae79da7d..157bcfd2232 100644 --- a/metricbeat/module/kubernetes/util/kubernetes_test.go +++ b/metricbeat/module/kubernetes/util/kubernetes_test.go @@ -505,9 +505,12 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { resourceWatchers.lock.Lock() watcher := resourceWatchers.watchersMap[PodResource] require.True(t, watcher.started) - mockW = watcher.watcher.(*mockWatcher) + resourceWatchers.lock.Unlock() + mockW.handler.OnAdd(resource) + + resourceWatchers.lock.Lock() require.Equal(t, metadataObjects, watcher.metadataObjects) resourceWatchers.lock.Unlock() @@ -551,7 +554,11 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { resourceWatchers.lock.Lock() wData = resourceWatchers.watchersMap[PodResource] mockW = wData.watcher.(*mockWatcher) + resourceWatchers.lock.Unlock() + mockW.handler.OnDelete(resource) + + resourceWatchers.lock.Lock() require.Equal(t, map[string]bool{}, watcher.metadataObjects) resourceWatchers.lock.Unlock() @@ -634,21 +641,28 @@ func TestBuildMetadataEnricher_EventHandler_PastObjects(t *testing.T) { watcher := resourceWatchers.watchersMap[PodResource] mockW := watcher.watcher.(*mockWatcher) + resourceWatchers.lock.Unlock() mockW.handler.OnAdd(resource1) + + resourceWatchers.lock.Lock() metadataObjects := map[string]bool{id1: true} require.Equal(t, metadataObjects, watcher.metadataObjects) + resourceWatchers.lock.Unlock() mockW.handler.OnUpdate(resource2) + + resourceWatchers.lock.Lock() metadataObjects[id2] = true require.Equal(t, metadataObjects, watcher.metadataObjects) + resourceWatchers.lock.Unlock() mockW.handler.OnDelete(resource1) + + resourceWatchers.lock.Lock() delete(metadataObjects, id1) require.Equal(t, metadataObjects, watcher.metadataObjects) - resourceWatchers.lock.Unlock() - } type mockFuncs struct { From abc02010de6f58be4f809c4c7cf83405008f3327 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Constan=C3=A7a=20Manteigas?= <113898685+constanca-m@users.noreply.github.com> Date: Thu, 8 Feb 2024 10:46:09 +0100 Subject: [PATCH 25/61] Update metricbeat/module/kubernetes/util/kubernetes.go Co-authored-by: Michael Katsoulis --- metricbeat/module/kubernetes/util/kubernetes.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index a25a453765c..fb431880ab1 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -282,7 +282,7 @@ func createWatcher( return false, nil } -// addToMetricsetsUsing adds metricset identified by metricsetUsing to the list of resources using the watcher +// addToMetricsetsUsing adds metricset identified by metricsetUsing to the list of resources using the shared watcher // identified by resourceName func addToMetricsetsUsing(resourceName string, metricsetUsing string, resourceWatchers *Watchers) { resourceWatchers.lock.Lock() From cb77cd82031ee9b186be6d655b722694cd629c2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Constan=C3=A7a=20Manteigas?= <113898685+constanca-m@users.noreply.github.com> Date: Thu, 8 Feb 2024 10:46:31 +0100 Subject: [PATCH 26/61] Update metricbeat/module/kubernetes/util/kubernetes.go Co-authored-by: Michael Katsoulis --- metricbeat/module/kubernetes/util/kubernetes.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index fb431880ab1..095265f8c72 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -304,7 +304,8 @@ func addToMetricsetsUsing(resourceName string, metricsetUsing string, resourceWa } } -// removeFromMetricsetsUsing returns true if element was removed and new size of array. +// removeFromMetricsetsUsing removes the metricset from the list of resources using the shared watcher. +// It returns true if element was removed and new size of array. // The cache should be locked when called. func removeFromMetricsetsUsing(resourceName string, notUsingName string, resourceWatchers *Watchers) (bool, int) { data, ok := resourceWatchers.watchersMap[resourceName] From 3a67987e1968cafe85644b6aa5d02c300ea32859 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Constan=C3=A7a=20Manteigas?= <113898685+constanca-m@users.noreply.github.com> Date: Thu, 8 Feb 2024 10:47:06 +0100 Subject: [PATCH 27/61] Update metricbeat/module/kubernetes/util/kubernetes.go Co-authored-by: Michael Katsoulis --- metricbeat/module/kubernetes/util/kubernetes.go | 1 + 1 file changed, 1 insertion(+) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 095265f8c72..1ef1158786f 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -357,6 +357,7 @@ func createAllWatchers( addToMetricsetsUsing(resourceName, metricsetName, resourceWatchers) // Create the extra watchers required by this resource + // For example pod requires also namespace and node watcher and possibly replicaset and job watcher. extraWatchers := getExtraWatchers(resourceName, config.AddResourceMetadata) for _, extra := range extraWatchers { extraRes := getResource(extra) From f2d65a49f5da14cc9d86625ff9cde4419295088b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Constan=C3=A7a=20Manteigas?= <113898685+constanca-m@users.noreply.github.com> Date: Thu, 8 Feb 2024 10:47:27 +0100 Subject: [PATCH 28/61] Update metricbeat/module/kubernetes/util/kubernetes.go Co-authored-by: Michael Katsoulis --- metricbeat/module/kubernetes/util/kubernetes.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 1ef1158786f..df16964af03 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -380,7 +380,7 @@ func createAllWatchers( return nil } -// createMetadataGen creates the metadata generator for resources in general +// createMetadataGen creates the metadata generator for resources other than pod and service func createMetadataGen(client k8sclient.Interface, commonConfig *conf.C, addResourceMetadata *metadata.AddResourceMetadataConfig, resourceName string, resourceWatchers *Watchers) (*metadata.Resource, error) { From 1d1a1425dbbb022359d2df409f7c62f0509757fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Constan=C3=A7a=20Manteigas?= <113898685+constanca-m@users.noreply.github.com> Date: Thu, 8 Feb 2024 10:49:37 +0100 Subject: [PATCH 29/61] Update metricbeat/module/kubernetes/util/kubernetes.go Co-authored-by: Michael Katsoulis --- metricbeat/module/kubernetes/util/kubernetes.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index df16964af03..43e8284986e 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -458,6 +458,12 @@ func createMetadataGenSpecific(client k8sclient.Interface, commonConfig *conf.C, return metaGen, fmt.Errorf("failed to create a metadata generator for resource %s", resourceName) } +// NewResourceMetadataEnricher returns a metadata enricher for a given resource +// For the metadata enrichment, resource watchers are used which are shared between +// the different metricsets. For example for pod metricset, a pod watcher, a namespace and +// node watcher are by default needed in addition to job and replicaset watcher according +// to configuration. These watchers will be also used by other metricsets that require them +// like state_pod, state_container, node etc. func NewResourceMetadataEnricher( base mb.BaseMetricSet, metricsRepo *MetricsRepo, From 992400e69844fc6fff16e41536e64d6571e91b55 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Constan=C3=A7a=20Manteigas?= <113898685+constanca-m@users.noreply.github.com> Date: Thu, 8 Feb 2024 10:49:53 +0100 Subject: [PATCH 30/61] Update metricbeat/module/kubernetes/util/kubernetes.go Co-authored-by: Michael Katsoulis --- metricbeat/module/kubernetes/util/kubernetes.go | 1 + 1 file changed, 1 insertion(+) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 43e8284986e..d053b8c6534 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -478,6 +478,7 @@ func NewResourceMetadataEnricher( } // This type of config is needed for the metadata generator + // and includes detailed settings for metadata enrichment commonMetaConfig := metadata.Config{} if err := base.Module().UnpackConfig(&commonMetaConfig); err != nil { log.Errorf("Error initializing Kubernetes metadata enricher: %s", err) From 563cce77b61467a5a3fd0eb8e1328c3f545d2164 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Constan=C3=A7a=20Manteigas?= <113898685+constanca-m@users.noreply.github.com> Date: Thu, 8 Feb 2024 10:50:30 +0100 Subject: [PATCH 31/61] Update metricbeat/module/kubernetes/util/kubernetes.go Co-authored-by: Michael Katsoulis --- metricbeat/module/kubernetes/util/kubernetes.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index d053b8c6534..c6563904578 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -513,6 +513,8 @@ func NewResourceMetadataEnricher( return &nilEnricher{} } + // updateFunc to be used as the resource watcher's add and update handler. + // It is responsible of generating the metadata for a detected resource updateFunc := func(r kubernetes.Resource) map[string]mapstr.M { accessor, _ := meta.Accessor(r) id := accessor.GetName() From 91706196ab384b9053ea0792c60c48c7e64f10f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Constan=C3=A7a=20Manteigas?= <113898685+constanca-m@users.noreply.github.com> Date: Thu, 8 Feb 2024 11:09:38 +0100 Subject: [PATCH 32/61] Update metricbeat/module/kubernetes/util/kubernetes.go Co-authored-by: Michael Katsoulis --- metricbeat/module/kubernetes/util/kubernetes.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index c6563904578..229f8fc39d2 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -571,6 +571,8 @@ func NewResourceMetadataEnricher( } } + // deleteFunc to be used as the resource watcher's delete handler + // If a resource deletion is detected it returns the id of the resource deleteFunc := func(r kubernetes.Resource) []string { accessor, _ := meta.Accessor(r) From 014d255fb4aed8b6ddc5f869b118a1784a7feb3f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Constan=C3=A7a=20Manteigas?= <113898685+constanca-m@users.noreply.github.com> Date: Thu, 8 Feb 2024 11:09:53 +0100 Subject: [PATCH 33/61] Update metricbeat/module/kubernetes/util/kubernetes.go Co-authored-by: Michael Katsoulis --- metricbeat/module/kubernetes/util/kubernetes.go | 1 + 1 file changed, 1 insertion(+) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 229f8fc39d2..e40f7523091 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -590,6 +590,7 @@ func NewResourceMetadataEnricher( return []string{id} } + // indexFunc retrieves the resource id from a given event indexFunc := func(e mapstr.M) string { name := getString(e, "name") namespace := getString(e, mb.ModuleDataKey+".namespace") From ed338f713e2ccb727a2289cf26e290597c4309a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Constan=C3=A7a=20Manteigas?= <113898685+constanca-m@users.noreply.github.com> Date: Thu, 8 Feb 2024 11:10:07 +0100 Subject: [PATCH 34/61] Update metricbeat/module/kubernetes/util/kubernetes.go Co-authored-by: Michael Katsoulis --- metricbeat/module/kubernetes/util/kubernetes.go | 1 + 1 file changed, 1 insertion(+) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index e40f7523091..49da4925365 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -605,6 +605,7 @@ func NewResourceMetadataEnricher( return id } + // create a metadata enricher for this metricset enricher := buildMetadataEnricher( metricsetName, resourceName, From 10bd3c351d69d2aed44eebc5a78652627e28bd03 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Constan=C3=A7a=20Manteigas?= <113898685+constanca-m@users.noreply.github.com> Date: Thu, 8 Feb 2024 11:10:27 +0100 Subject: [PATCH 35/61] Update metricbeat/module/kubernetes/util/kubernetes.go Co-authored-by: Michael Katsoulis --- metricbeat/module/kubernetes/util/kubernetes.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 49da4925365..302977b483d 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -810,6 +810,10 @@ func join(fields ...string) string { return strings.Join(fields, ":") } +// buildMetadataEnricher builds and returns a metadata enricher for a given metricset. +// It appends the new enricher to the watcher.enrichers map for the given resource watcher. +// It also updates the add, update and delete event handlers of the watcher in order to retrieve +// the metadata of all enrichers associated to that watcher. func buildMetadataEnricher( metricsetName string, resourceName string, From 8c05496aa835b73da197188091d7ff4d21359772 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Constan=C3=A7a=20Manteigas?= <113898685+constanca-m@users.noreply.github.com> Date: Thu, 8 Feb 2024 11:10:43 +0100 Subject: [PATCH 36/61] Update metricbeat/module/kubernetes/util/kubernetes.go Co-authored-by: Michael Katsoulis --- metricbeat/module/kubernetes/util/kubernetes.go | 1 + 1 file changed, 1 insertion(+) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 302977b483d..3e04ac73242 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -986,6 +986,7 @@ func (e *enricher) Stop(resourceWatchers *Watchers) { } } +// Enrich enriches events with metadata saved in the enricher.metadata map func (e *enricher) Enrich(events []mapstr.M) { e.RLock() defer e.RUnlock() From b55176abb55276b8786fdbab930a36b8588fe94a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Constan=C3=A7a=20Manteigas?= <113898685+constanca-m@users.noreply.github.com> Date: Thu, 8 Feb 2024 11:11:00 +0100 Subject: [PATCH 37/61] Update metricbeat/module/kubernetes/util/kubernetes.go Co-authored-by: Michael Katsoulis --- metricbeat/module/kubernetes/util/kubernetes.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 3e04ac73242..9a281a9da8b 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -960,6 +960,8 @@ func (e *enricher) Start(resourceWatchers *Watchers) { } } +// Stop removes the enricher's metricset as a user of the associated watchers. +// If no metricset is using the watchers anymore it stops them. func (e *enricher) Stop(resourceWatchers *Watchers) { resourceWatchers.lock.Lock() defer resourceWatchers.lock.Unlock() From d1506bab6087ebf71d98b3fd60c4529afff9cc18 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Constan=C3=A7a=20Manteigas?= <113898685+constanca-m@users.noreply.github.com> Date: Thu, 8 Feb 2024 11:11:12 +0100 Subject: [PATCH 38/61] Update metricbeat/module/kubernetes/util/kubernetes.go Co-authored-by: Michael Katsoulis --- metricbeat/module/kubernetes/util/kubernetes.go | 1 + 1 file changed, 1 insertion(+) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 9a281a9da8b..9ca7a4c41d6 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -950,6 +950,7 @@ func (e *enricher) Start(resourceWatchers *Watchers) { } } + // Start the main watcher if not already started. resourceWatcher := resourceWatchers.watchersMap[e.resourceName] if resourceWatcher != nil && !resourceWatcher.started { if err := resourceWatcher.watcher.Start(); err != nil { From 68471b875b0930715027fc783e470d0f563c760c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Constan=C3=A7a=20Manteigas?= <113898685+constanca-m@users.noreply.github.com> Date: Thu, 8 Feb 2024 11:11:33 +0100 Subject: [PATCH 39/61] Update metricbeat/module/kubernetes/util/kubernetes.go Co-authored-by: Michael Katsoulis --- metricbeat/module/kubernetes/util/kubernetes.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 9ca7a4c41d6..d1690d0b366 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -937,7 +937,10 @@ func (e *enricher) Start(resourceWatchers *Watchers) { resourceWatchers.lock.Lock() defer resourceWatchers.lock.Unlock() - // we first need to start the dependencies + // Each resource may require multiple watchers. We firstly start the + // extra watchers as they are a dependency for the main resource watcher + // For example a pod watcher requires namespace and node watcher to be started + // first. extras := getExtraWatchers(e.resourceName, e.config.AddResourceMetadata) for _, extra := range extras { extraWatcher := resourceWatchers.watchersMap[extra] From 63cb59e8bdfb739055ada0e078f481970eff7f76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Constan=C3=A7a=20Manteigas?= <113898685+constanca-m@users.noreply.github.com> Date: Thu, 8 Feb 2024 11:11:53 +0100 Subject: [PATCH 40/61] Update metricbeat/module/kubernetes/util/kubernetes.go Co-authored-by: Michael Katsoulis --- metricbeat/module/kubernetes/util/kubernetes.go | 1 + 1 file changed, 1 insertion(+) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index d1690d0b366..719db28db93 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -933,6 +933,7 @@ func buildMetadataEnricher( return enricher } +// Start starts all the watchers associated with a given enricher resource func (e *enricher) Start(resourceWatchers *Watchers) { resourceWatchers.lock.Lock() defer resourceWatchers.lock.Unlock() From 8be51eacad451a1f7ccf0745c406c6cfcc1f3c6b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Constan=C3=A7a=20Manteigas?= <113898685+constanca-m@users.noreply.github.com> Date: Thu, 8 Feb 2024 11:12:15 +0100 Subject: [PATCH 41/61] Update metricbeat/module/kubernetes/util/kubernetes.go Co-authored-by: Michael Katsoulis --- metricbeat/module/kubernetes/util/kubernetes.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 719db28db93..0be9882ef58 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -864,6 +864,8 @@ func buildMetadataEnricher( defer resourceWatchers.lock.Unlock() // Add object to the list of metadata objects of this watcher + // so it can be used by enrichers created after the event is + // triggered accessor, _ := meta.Accessor(obj.(kubernetes.Resource)) id := accessor.GetName() namespace := accessor.GetNamespace() From 7aedbe8b2b3d6fa62ed6c75842ca1ae9d0df5bf2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Constan=C3=A7a=20Manteigas?= <113898685+constanca-m@users.noreply.github.com> Date: Thu, 8 Feb 2024 11:12:35 +0100 Subject: [PATCH 42/61] Update metricbeat/module/kubernetes/util/kubernetes.go Co-authored-by: Michael Katsoulis --- metricbeat/module/kubernetes/util/kubernetes.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 0be9882ef58..37e40072461 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -858,6 +858,9 @@ func buildMetadataEnricher( } } + // AddEventHandler sets add, update and delete methods of watcher. + // Those methods are triggered when an event is detected for a + // resource creation, update or deletion. watcher.watcher.AddEventHandler(kubernetes.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { resourceWatchers.lock.Lock() From a902efe715d940d41d607e884073d628dafc23a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Constan=C3=A7a=20Manteigas?= <113898685+constanca-m@users.noreply.github.com> Date: Thu, 8 Feb 2024 11:13:00 +0100 Subject: [PATCH 43/61] Update metricbeat/module/kubernetes/util/kubernetes.go Co-authored-by: Michael Katsoulis --- metricbeat/module/kubernetes/util/kubernetes.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 37e40072461..755d31db10c 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -842,7 +842,11 @@ func buildMetadataEnricher( if watcher != nil { watcher.enrichers[metricsetName] = enricher - // Check if there are past events for this resource and update metadata if there are + // Check if this shared watcher has already detected resources from a previous enricher. + // In that case, for each resource, call the updateFunc of the current enricher to + // update its metadata. This is needed in cases where the watcher has already been + // notified for new/updated resources while the enricher for current metricset has not + // built yet(example is pod, state_pod metricsets). for key, _ := range watcher.metadataObjects { obj, exists, err := watcher.watcher.Store().GetByKey(key) if err != nil { From 60e6556e079f5aff487006fdb15095207c545c2c Mon Sep 17 00:00:00 2001 From: constanca Date: Thu, 8 Feb 2024 11:23:01 +0100 Subject: [PATCH 44/61] run mage check and make update Signed-off-by: constanca --- .../module/kubernetes/util/kubernetes.go | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 755d31db10c..afd2be5ad2c 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -262,7 +262,7 @@ func createWatcher( _, ok := resourceWatchers.watchersMap[resourceName] // if it does not exist, create the watcher if !ok { - // check if we need to add namespace to the options + // check if we need to add namespace to the watcher options if isNamespaced(resourceName) { options.Namespace = namespace } @@ -513,8 +513,8 @@ func NewResourceMetadataEnricher( return &nilEnricher{} } - // updateFunc to be used as the resource watcher's add and update handler. - // It is responsible of generating the metadata for a detected resource + // updateFunc to be used as the resource watcher's add and update handler. + // It is responsible for generating the metadata for a detected resource updateFunc := func(r kubernetes.Resource) map[string]mapstr.M { accessor, _ := meta.Accessor(r) id := accessor.GetName() @@ -571,7 +571,7 @@ func NewResourceMetadataEnricher( } } - // deleteFunc to be used as the resource watcher's delete handler + // deleteFunc to be used as the resource watcher's delete handler // If a resource deletion is detected it returns the id of the resource deleteFunc := func(r kubernetes.Resource) []string { accessor, _ := meta.Accessor(r) @@ -590,7 +590,7 @@ func NewResourceMetadataEnricher( return []string{id} } - // indexFunc retrieves the resource id from a given event + // indexFunc retrieves the resource id from a given event indexFunc := func(e mapstr.M) string { name := getString(e, "name") namespace := getString(e, mb.ModuleDataKey+".namespace") @@ -605,7 +605,7 @@ func NewResourceMetadataEnricher( return id } - // create a metadata enricher for this metricset + // create a metadata enricher for this metricset enricher := buildMetadataEnricher( metricsetName, resourceName, @@ -846,7 +846,7 @@ func buildMetadataEnricher( // In that case, for each resource, call the updateFunc of the current enricher to // update its metadata. This is needed in cases where the watcher has already been // notified for new/updated resources while the enricher for current metricset has not - // built yet(example is pod, state_pod metricsets). + // built yet (example is pod, state_pod metricsets). for key, _ := range watcher.metadataObjects { obj, exists, err := watcher.watcher.Store().GetByKey(key) if err != nil { @@ -862,7 +862,7 @@ func buildMetadataEnricher( } } - // AddEventHandler sets add, update and delete methods of watcher. + // AddEventHandler sets add, update and delete methods of watcher. // Those methods are triggered when an event is detected for a // resource creation, update or deletion. watcher.watcher.AddEventHandler(kubernetes.ResourceEventHandlerFuncs{ @@ -870,7 +870,7 @@ func buildMetadataEnricher( resourceWatchers.lock.Lock() defer resourceWatchers.lock.Unlock() - // Add object to the list of metadata objects of this watcher + // Add object to the list of metadata objects of this watcher, // so it can be used by enrichers created after the event is // triggered accessor, _ := meta.Accessor(obj.(kubernetes.Resource)) @@ -963,7 +963,7 @@ func (e *enricher) Start(resourceWatchers *Watchers) { } } - // Start the main watcher if not already started. + // Start the main watcher if not already started. resourceWatcher := resourceWatchers.watchersMap[e.resourceName] if resourceWatcher != nil && !resourceWatcher.started { if err := resourceWatcher.watcher.Start(); err != nil { From adfce46031d8e6edb557a1bb9cb7dea662f467be Mon Sep 17 00:00:00 2001 From: constanca Date: Tue, 13 Feb 2024 11:18:36 +0100 Subject: [PATCH 45/61] adjust to node scope Signed-off-by: constanca --- .../module/kubernetes/util/kubernetes.go | 49 +++++++++++++------ .../module/kubernetes/util/kubernetes_test.go | 10 ++-- 2 files changed, 40 insertions(+), 19 deletions(-) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index afd2be5ad2c..a0d49cd26bf 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -84,14 +84,16 @@ func (*nilEnricher) Stop(*Watchers) {} func (*nilEnricher) Enrich([]mapstr.M) {} type watcherData struct { - metricsetsUsing []string // list of metricsets using this watcher - watcher kubernetes.Watcher started bool // true if watcher has started, false otherwise - enrichers map[string]*enricher // map of enrichers using this watcher. The key is the metricset name + metricsetsUsing []string // list of metricsets using this watcher + + enrichers map[string]*enricher // map of enrichers using this watcher. The key is the metricset name + metadataObjects map[string]bool // map of ids of each object received by the handler functions - metadataObjects map[string]bool // map of ids of each object received by the handler functions + nodeScope bool // whether this watcher is only for current node + needsRestart bool // whether this watcher needs a restart } type Watchers struct { @@ -254,12 +256,13 @@ func createWatcher( options kubernetes.WatchOptions, client k8sclient.Interface, resourceWatchers *Watchers, - namespace string) (bool, error) { + namespace string, + nodeScope bool) (bool, error) { resourceWatchers.lock.Lock() defer resourceWatchers.lock.Unlock() - _, ok := resourceWatchers.watchersMap[resourceName] + watcher, ok := resourceWatchers.watchersMap[resourceName] // if it does not exist, create the watcher if !ok { // check if we need to add namespace to the watcher options @@ -276,8 +279,16 @@ func createWatcher( metadataObjects: make(map[string]bool), enrichers: make(map[string]*enricher), metricsetsUsing: make([]string, 0), + needsRestart: false, + nodeScope: nodeScope, } return true, nil + } else if watcher.nodeScope != nodeScope && watcher.nodeScope { + // It might happen that the watcher already exists, but is only being used to monitor the resources + // of a single node. In that case, we need to check if we are trying to create a new watcher that will track + // the resources of multiple nodes. If it is the case, then we need to update the watcher. + watcher.nodeScope = nodeScope + watcher.needsRestart = true } return false, nil } @@ -348,7 +359,7 @@ func createAllWatchers( // Create a watcher for the given resource. // If it fails, we return an error, so we can stop the extra watchers from creating. - created, err := createWatcher(resourceName, res, *options, client, resourceWatchers, config.Namespace) + created, err := createWatcher(resourceName, res, *options, client, resourceWatchers, config.Namespace, nodeScope) if err != nil { return fmt.Errorf("error initializing Kubernetes watcher %s, required by %s: %w", resourceName, metricsetName, err) } else if created { @@ -362,7 +373,7 @@ func createAllWatchers( for _, extra := range extraWatchers { extraRes := getResource(extra) if extraRes != nil { - created, err = createWatcher(extra, extraRes, *options, client, resourceWatchers, config.Namespace) + created, err = createWatcher(extra, extraRes, *options, client, resourceWatchers, config.Namespace, false) if err != nil { log.Errorf("Error initializing Kubernetes watcher %s, required by %s: %s", extra, metricsetName, err) } else { @@ -963,14 +974,24 @@ func (e *enricher) Start(resourceWatchers *Watchers) { } } - // Start the main watcher if not already started. + // Start the main watcher if not already started or if a restart is needed resourceWatcher := resourceWatchers.watchersMap[e.resourceName] - if resourceWatcher != nil && !resourceWatcher.started { - if err := resourceWatcher.watcher.Start(); err != nil { - e.log.Warnf("Error starting %s watcher: %s", e.resourceName, err) - } else { - resourceWatcher.started = true + if resourceWatcher != nil { + if !resourceWatcher.started { + if err := resourceWatcher.watcher.Start(); err != nil { + e.log.Warnf("Error starting %s watcher: %s", e.resourceName, err) + } else { + resourceWatcher.started = true + } + } else if resourceWatcher.needsRestart { + resourceWatcher.watcher.Stop() + if err := resourceWatcher.watcher.Start(); err != nil { + e.log.Warnf("Error restarting %s watcher: %s", e.resourceName, err) + } else { + resourceWatcher.needsRestart = false + } } + } } diff --git a/metricbeat/module/kubernetes/util/kubernetes_test.go b/metricbeat/module/kubernetes/util/kubernetes_test.go index 157bcfd2232..8a764db99f6 100644 --- a/metricbeat/module/kubernetes/util/kubernetes_test.go +++ b/metricbeat/module/kubernetes/util/kubernetes_test.go @@ -78,7 +78,7 @@ func TestCreateWatcher(t *testing.T) { options, err := getWatchOptions(config, false, client, log) require.NoError(t, err) - created, err := createWatcher(NamespaceResource, &kubernetes.Node{}, *options, client, resourceWatchers, config.Namespace) + created, err := createWatcher(NamespaceResource, &kubernetes.Node{}, *options, client, resourceWatchers, config.Namespace, false) require.True(t, created) require.NoError(t, err) @@ -88,7 +88,7 @@ func TestCreateWatcher(t *testing.T) { require.NotNil(t, resourceWatchers.watchersMap[NamespaceResource].watcher) resourceWatchers.lock.Unlock() - created, err = createWatcher(NamespaceResource, &kubernetes.Namespace{}, *options, client, resourceWatchers, config.Namespace) + created, err = createWatcher(NamespaceResource, &kubernetes.Namespace{}, *options, client, resourceWatchers, config.Namespace, false) require.False(t, created) require.NoError(t, err) @@ -98,7 +98,7 @@ func TestCreateWatcher(t *testing.T) { require.NotNil(t, resourceWatchers.watchersMap[NamespaceResource].watcher) resourceWatchers.lock.Unlock() - created, err = createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers, config.Namespace) + created, err = createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers, config.Namespace, false) require.True(t, created) require.NoError(t, err) @@ -124,7 +124,7 @@ func TestAddToMetricsetsUsing(t *testing.T) { require.NoError(t, err) // Create the new entry with watcher and nil string array first - created, err := createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers, config.Namespace) + created, err := createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers, config.Namespace, false) require.True(t, created) require.NoError(t, err) @@ -161,7 +161,7 @@ func TestRemoveFromMetricsetsUsing(t *testing.T) { require.NoError(t, err) // Create the new entry with watcher and nil string array first - created, err := createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers, config.Namespace) + created, err := createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers, config.Namespace, false) require.True(t, created) require.NoError(t, err) From 15b673a27817b331588d15b5666167ccc9e9b63d Mon Sep 17 00:00:00 2001 From: constanca Date: Wed, 14 Feb 2024 09:04:32 +0100 Subject: [PATCH 46/61] update current watcher to restart watcher Signed-off-by: constanca --- .../module/kubernetes/util/kubernetes.go | 24 +++++++++++++------ 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index a0d49cd26bf..a5ce97a9f78 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -92,8 +92,8 @@ type watcherData struct { enrichers map[string]*enricher // map of enrichers using this watcher. The key is the metricset name metadataObjects map[string]bool // map of ids of each object received by the handler functions - nodeScope bool // whether this watcher is only for current node - needsRestart bool // whether this watcher needs a restart + nodeScope bool // whether this watcher is only for current node + restartWatcher kubernetes.Watcher // whether this watcher needs a restart } type Watchers struct { @@ -279,7 +279,7 @@ func createWatcher( metadataObjects: make(map[string]bool), enrichers: make(map[string]*enricher), metricsetsUsing: make([]string, 0), - needsRestart: false, + restartWatcher: nil, nodeScope: nodeScope, } return true, nil @@ -287,8 +287,17 @@ func createWatcher( // It might happen that the watcher already exists, but is only being used to monitor the resources // of a single node. In that case, we need to check if we are trying to create a new watcher that will track // the resources of multiple nodes. If it is the case, then we need to update the watcher. + // check if we need to add namespace to the watcher options + + if isNamespaced(resourceName) { + options.Namespace = namespace + } + restartWatcher, err := kubernetes.NewNamedWatcher(resourceName, client, resource, options, nil) + if err != nil { + return false, err + } + watcher.restartWatcher = restartWatcher watcher.nodeScope = nodeScope - watcher.needsRestart = true } return false, nil } @@ -983,12 +992,13 @@ func (e *enricher) Start(resourceWatchers *Watchers) { } else { resourceWatcher.started = true } - } else if resourceWatcher.needsRestart { + } else if resourceWatcher.restartWatcher != nil { resourceWatcher.watcher.Stop() - if err := resourceWatcher.watcher.Start(); err != nil { + if err := resourceWatcher.restartWatcher.Start(); err != nil { e.log.Warnf("Error restarting %s watcher: %s", e.resourceName, err) } else { - resourceWatcher.needsRestart = false + resourceWatcher.watcher = resourceWatcher.restartWatcher + resourceWatcher.restartWatcher = nil } } From 57135c9114bb7bc19d5d841037e637508f8bb7a5 Mon Sep 17 00:00:00 2001 From: constanca Date: Wed, 14 Feb 2024 17:05:32 +0100 Subject: [PATCH 47/61] Update all watchers - current and extra - according to watcher options Signed-off-by: constanca --- .../module/kubernetes/util/kubernetes.go | 158 +++++++++--------- .../module/kubernetes/util/kubernetes_test.go | 84 +++++----- 2 files changed, 126 insertions(+), 116 deletions(-) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index a5ce97a9f78..2ca2d49a169 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -83,7 +83,7 @@ func (*nilEnricher) Start(*Watchers) {} func (*nilEnricher) Stop(*Watchers) {} func (*nilEnricher) Enrich([]mapstr.M) {} -type watcherData struct { +type metaWatcher struct { watcher kubernetes.Watcher started bool // true if watcher has started, false otherwise @@ -97,8 +97,8 @@ type watcherData struct { } type Watchers struct { - watchersMap map[string]*watcherData - lock sync.RWMutex + metaWatchersMap map[string]*metaWatcher + lock sync.RWMutex } const selector = "kubernetes" @@ -123,7 +123,7 @@ const ( func NewWatchers() *Watchers { watchers := &Watchers{ - watchersMap: make(map[string]*watcherData), + metaWatchersMap: make(map[string]*metaWatcher), } return watchers } @@ -256,16 +256,21 @@ func createWatcher( options kubernetes.WatchOptions, client k8sclient.Interface, resourceWatchers *Watchers, - namespace string, - nodeScope bool) (bool, error) { + namespace string) (bool, error) { + + // We need to check the node scope to decide on whether a watcher should be updated or not + nodeScope := false + if options.Node != "" { + nodeScope = true + } resourceWatchers.lock.Lock() defer resourceWatchers.lock.Unlock() - watcher, ok := resourceWatchers.watchersMap[resourceName] - // if it does not exist, create the watcher + resourceMetaWatchers, ok := resourceWatchers.metaWatchersMap[resourceName] + // if it does not exist, create the resourceMetaWatchers if !ok { - // check if we need to add namespace to the watcher options + // check if we need to add namespace to the resourceMetaWatchers options if isNamespaced(resourceName) { options.Namespace = namespace } @@ -273,7 +278,7 @@ func createWatcher( if err != nil { return false, err } - resourceWatchers.watchersMap[resourceName] = &watcherData{ + resourceWatchers.metaWatchersMap[resourceName] = &metaWatcher{ watcher: watcher, started: false, metadataObjects: make(map[string]bool), @@ -283,12 +288,11 @@ func createWatcher( nodeScope: nodeScope, } return true, nil - } else if watcher.nodeScope != nodeScope && watcher.nodeScope { - // It might happen that the watcher already exists, but is only being used to monitor the resources - // of a single node. In that case, we need to check if we are trying to create a new watcher that will track - // the resources of multiple nodes. If it is the case, then we need to update the watcher. - // check if we need to add namespace to the watcher options - + } else if resourceMetaWatchers.nodeScope != nodeScope && resourceMetaWatchers.nodeScope { + // It might happen that the resourceMetaWatchers already exists, but is only being used to monitor the resources + // of a single node. In that case, we need to check if we are trying to create a new resourceMetaWatchers that will track + // the resources of multiple nodes. If it is the case, then we need to update the resourceMetaWatchers. + // check if we need to add namespace to the resourceMetaWatchers options if isNamespaced(resourceName) { options.Namespace = namespace } @@ -296,8 +300,10 @@ func createWatcher( if err != nil { return false, err } - watcher.restartWatcher = restartWatcher - watcher.nodeScope = nodeScope + // update the handler of the restart resourceMetaWatchers to match the current resourceMetaWatchers handler + restartWatcher.AddEventHandler(resourceMetaWatchers.watcher.GetEventHandler()) + resourceMetaWatchers.restartWatcher = restartWatcher + resourceMetaWatchers.nodeScope = nodeScope } return false, nil } @@ -308,7 +314,7 @@ func addToMetricsetsUsing(resourceName string, metricsetUsing string, resourceWa resourceWatchers.lock.Lock() defer resourceWatchers.lock.Unlock() - data, ok := resourceWatchers.watchersMap[resourceName] + data, ok := resourceWatchers.metaWatchersMap[resourceName] if ok { contains := false for _, which := range data.metricsetsUsing { @@ -328,7 +334,7 @@ func addToMetricsetsUsing(resourceName string, metricsetUsing string, resourceWa // It returns true if element was removed and new size of array. // The cache should be locked when called. func removeFromMetricsetsUsing(resourceName string, notUsingName string, resourceWatchers *Watchers) (bool, int) { - data, ok := resourceWatchers.watchersMap[resourceName] + data, ok := resourceWatchers.metaWatchersMap[resourceName] removed := false if ok { newIndex := 0 @@ -368,7 +374,7 @@ func createAllWatchers( // Create a watcher for the given resource. // If it fails, we return an error, so we can stop the extra watchers from creating. - created, err := createWatcher(resourceName, res, *options, client, resourceWatchers, config.Namespace, nodeScope) + created, err := createWatcher(resourceName, res, *options, client, resourceWatchers, config.Namespace) if err != nil { return fmt.Errorf("error initializing Kubernetes watcher %s, required by %s: %w", resourceName, metricsetName, err) } else if created { @@ -382,7 +388,7 @@ func createAllWatchers( for _, extra := range extraWatchers { extraRes := getResource(extra) if extraRes != nil { - created, err = createWatcher(extra, extraRes, *options, client, resourceWatchers, config.Namespace, false) + created, err = createWatcher(extra, extraRes, *options, client, resourceWatchers, config.Namespace) if err != nil { log.Errorf("Error initializing Kubernetes watcher %s, required by %s: %s", extra, metricsetName, err) } else { @@ -407,18 +413,18 @@ func createMetadataGen(client k8sclient.Interface, commonConfig *conf.C, addReso resourceWatchers.lock.RLock() defer resourceWatchers.lock.RUnlock() - resourceWatcher := resourceWatchers.watchersMap[resourceName] + resourceMetaWatcher := resourceWatchers.metaWatchersMap[resourceName] // This should not be possible since the watchers should have been created before - if resourceWatcher == nil { + if resourceMetaWatcher == nil { return nil, fmt.Errorf("could not create the metadata generator, as the watcher for %s does not exist", resourceName) } var metaGen *metadata.Resource - namespaceWatcher := resourceWatchers.watchersMap[NamespaceResource] - if namespaceWatcher != nil { + namespaceMetaWatcher := resourceWatchers.metaWatchersMap[NamespaceResource] + if namespaceMetaWatcher != nil { n := metadata.NewNamespaceMetadataGenerator(addResourceMetadata.Namespace, - (*namespaceWatcher).watcher.Store(), client) + (*namespaceMetaWatcher).watcher.Store(), client) metaGen = metadata.NewNamespaceAwareResourceMetadataGenerator(commonConfig, client, n) } else { metaGen = metadata.NewResourceMetadataGenerator(commonConfig, client) @@ -435,41 +441,41 @@ func createMetadataGenSpecific(client k8sclient.Interface, commonConfig *conf.C, defer resourceWatchers.lock.RUnlock() // The watcher for the resource needs to exist - resWatcher := resourceWatchers.watchersMap[resourceName] - if resWatcher == nil { + resourceMetaWatcher := resourceWatchers.metaWatchersMap[resourceName] + if resourceMetaWatcher == nil { return nil, fmt.Errorf("could not create the metadata generator, as the watcher for %s does not exist", resourceName) } var metaGen metadata.MetaGen if resourceName == PodResource { var nodeWatcher kubernetes.Watcher - if watcher := resourceWatchers.watchersMap[NodeResource]; watcher != nil { - nodeWatcher = (*watcher).watcher + if nodeMetaWatcher := resourceWatchers.metaWatchersMap[NodeResource]; nodeMetaWatcher != nil { + nodeWatcher = (*nodeMetaWatcher).watcher } var namespaceWatcher kubernetes.Watcher - if watcher := resourceWatchers.watchersMap[NamespaceResource]; watcher != nil { - namespaceWatcher = (*watcher).watcher + if namespaceMetaWatcher := resourceWatchers.metaWatchersMap[NamespaceResource]; namespaceMetaWatcher != nil { + namespaceWatcher = (*namespaceMetaWatcher).watcher } var replicaSetWatcher kubernetes.Watcher - if watcher := resourceWatchers.watchersMap[ReplicaSetResource]; watcher != nil { - replicaSetWatcher = (*watcher).watcher + if replicasetMetaWatcher := resourceWatchers.metaWatchersMap[ReplicaSetResource]; replicasetMetaWatcher != nil { + replicaSetWatcher = (*replicasetMetaWatcher).watcher } var jobWatcher kubernetes.Watcher - if watcher := resourceWatchers.watchersMap[JobResource]; watcher != nil { - jobWatcher = (*watcher).watcher + if jobMetaWatcher := resourceWatchers.metaWatchersMap[JobResource]; jobMetaWatcher != nil { + jobWatcher = (*jobMetaWatcher).watcher } - metaGen = metadata.GetPodMetaGen(commonConfig, (*resWatcher).watcher, nodeWatcher, namespaceWatcher, replicaSetWatcher, + metaGen = metadata.GetPodMetaGen(commonConfig, (*resourceMetaWatcher).watcher, nodeWatcher, namespaceWatcher, replicaSetWatcher, jobWatcher, addResourceMetadata) return metaGen, nil } else if resourceName == ServiceResource { - namespaceWatcher := resourceWatchers.watchersMap[NamespaceResource] - if namespaceWatcher == nil { + namespaceMetaWatcher := resourceWatchers.metaWatchersMap[NamespaceResource] + if namespaceMetaWatcher == nil { return nil, fmt.Errorf("could not create the metadata generator, as the watcher for namespace does not exist") } namespaceMeta := metadata.NewNamespaceMetadataGenerator(addResourceMetadata.Namespace, - (*namespaceWatcher).watcher.Store(), client) - metaGen = metadata.NewServiceMetadataGenerator(commonConfig, (*resWatcher).watcher.Store(), + (*namespaceMetaWatcher).watcher.Store(), client) + metaGen = metadata.NewServiceMetadataGenerator(commonConfig, (*resourceMetaWatcher).watcher.Store(), namespaceMeta, client) return metaGen, nil } @@ -858,17 +864,17 @@ func buildMetadataEnricher( resourceWatchers.lock.Lock() defer resourceWatchers.lock.Unlock() - watcher := resourceWatchers.watchersMap[resourceName] - if watcher != nil { - watcher.enrichers[metricsetName] = enricher + resourceMetaWatcher := resourceWatchers.metaWatchersMap[resourceName] + if resourceMetaWatcher != nil { + resourceMetaWatcher.enrichers[metricsetName] = enricher // Check if this shared watcher has already detected resources from a previous enricher. // In that case, for each resource, call the updateFunc of the current enricher to // update its metadata. This is needed in cases where the watcher has already been // notified for new/updated resources while the enricher for current metricset has not // built yet (example is pod, state_pod metricsets). - for key, _ := range watcher.metadataObjects { - obj, exists, err := watcher.watcher.Store().GetByKey(key) + for key, _ := range resourceMetaWatcher.metadataObjects { + obj, exists, err := resourceMetaWatcher.watcher.Store().GetByKey(key) if err != nil { log.Errorf("Error trying to get the object from the store: %s", err) } else { @@ -885,7 +891,7 @@ func buildMetadataEnricher( // AddEventHandler sets add, update and delete methods of watcher. // Those methods are triggered when an event is detected for a // resource creation, update or deletion. - watcher.watcher.AddEventHandler(kubernetes.ResourceEventHandlerFuncs{ + resourceMetaWatcher.watcher.AddEventHandler(kubernetes.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { resourceWatchers.lock.Lock() defer resourceWatchers.lock.Unlock() @@ -899,9 +905,9 @@ func buildMetadataEnricher( if namespace != "" { id = namespace + "/" + id } - watcher.metadataObjects[id] = true + resourceMetaWatcher.metadataObjects[id] = true - for _, enricher := range watcher.enrichers { + for _, enricher := range resourceMetaWatcher.enrichers { enricher.Lock() newMetadataEvents := enricher.updateFunc(obj.(kubernetes.Resource)) // add the new metadata to the watcher received metadata @@ -922,9 +928,9 @@ func buildMetadataEnricher( if namespace != "" { id = namespace + "/" + id } - watcher.metadataObjects[id] = true + resourceMetaWatcher.metadataObjects[id] = true - for _, enricher := range watcher.enrichers { + for _, enricher := range resourceMetaWatcher.enrichers { enricher.Lock() updatedMetadataEvents := enricher.updateFunc(obj.(kubernetes.Resource)) for id, metadata := range updatedMetadataEvents { @@ -944,9 +950,9 @@ func buildMetadataEnricher( if namespace != "" { id = namespace + "/" + id } - delete(watcher.metadataObjects, id) + delete(resourceMetaWatcher.metadataObjects, id) - for _, enricher := range watcher.enrichers { + for _, enricher := range resourceMetaWatcher.enrichers { enricher.Lock() ids := enricher.deleteFunc(obj.(kubernetes.Resource)) // update this watcher events by removing all the metadata[id] @@ -973,32 +979,32 @@ func (e *enricher) Start(resourceWatchers *Watchers) { // first. extras := getExtraWatchers(e.resourceName, e.config.AddResourceMetadata) for _, extra := range extras { - extraWatcher := resourceWatchers.watchersMap[extra] - if extraWatcher != nil && !extraWatcher.started { - if err := extraWatcher.watcher.Start(); err != nil { + extraWatcherMeta := resourceWatchers.metaWatchersMap[extra] + if extraWatcherMeta != nil && !extraWatcherMeta.started { + if err := extraWatcherMeta.watcher.Start(); err != nil { e.log.Warnf("Error starting %s watcher: %s", extra, err) } else { - extraWatcher.started = true + extraWatcherMeta.started = true } } } // Start the main watcher if not already started or if a restart is needed - resourceWatcher := resourceWatchers.watchersMap[e.resourceName] - if resourceWatcher != nil { - if !resourceWatcher.started { - if err := resourceWatcher.watcher.Start(); err != nil { + resourceMetaWatcher := resourceWatchers.metaWatchersMap[e.resourceName] + if resourceMetaWatcher != nil { + if !resourceMetaWatcher.started { + if err := resourceMetaWatcher.watcher.Start(); err != nil { e.log.Warnf("Error starting %s watcher: %s", e.resourceName, err) } else { - resourceWatcher.started = true + resourceMetaWatcher.started = true } - } else if resourceWatcher.restartWatcher != nil { - resourceWatcher.watcher.Stop() - if err := resourceWatcher.restartWatcher.Start(); err != nil { + } else if resourceMetaWatcher.restartWatcher != nil { + resourceMetaWatcher.watcher.Stop() + if err := resourceMetaWatcher.restartWatcher.Start(); err != nil { e.log.Warnf("Error restarting %s watcher: %s", e.resourceName, err) } else { - resourceWatcher.watcher = resourceWatcher.restartWatcher - resourceWatcher.restartWatcher = nil + resourceMetaWatcher.watcher = resourceMetaWatcher.restartWatcher + resourceMetaWatcher.restartWatcher = nil } } @@ -1011,23 +1017,23 @@ func (e *enricher) Stop(resourceWatchers *Watchers) { resourceWatchers.lock.Lock() defer resourceWatchers.lock.Unlock() - resourceWatcher := resourceWatchers.watchersMap[e.resourceName] - if resourceWatcher != nil && resourceWatcher.started { + resourceMetaWatcher := resourceWatchers.metaWatchersMap[e.resourceName] + if resourceMetaWatcher != nil && resourceMetaWatcher.started { _, size := removeFromMetricsetsUsing(e.resourceName, e.metricsetName, resourceWatchers) if size == 0 { - resourceWatcher.watcher.Stop() - resourceWatcher.started = false + resourceMetaWatcher.watcher.Stop() + resourceMetaWatcher.started = false } } extras := getExtraWatchers(e.resourceName, e.config.AddResourceMetadata) for _, extra := range extras { - extraWatcher := resourceWatchers.watchersMap[extra] - if extraWatcher != nil && extraWatcher.started { + extraMetaWatcher := resourceWatchers.metaWatchersMap[extra] + if extraMetaWatcher != nil && extraMetaWatcher.started { _, size := removeFromMetricsetsUsing(extra, e.metricsetName, resourceWatchers) if size == 0 { - extraWatcher.watcher.Stop() - extraWatcher.started = false + extraMetaWatcher.watcher.Stop() + extraMetaWatcher.started = false } } } diff --git a/metricbeat/module/kubernetes/util/kubernetes_test.go b/metricbeat/module/kubernetes/util/kubernetes_test.go index 8a764db99f6..57bbcbdc648 100644 --- a/metricbeat/module/kubernetes/util/kubernetes_test.go +++ b/metricbeat/module/kubernetes/util/kubernetes_test.go @@ -78,34 +78,34 @@ func TestCreateWatcher(t *testing.T) { options, err := getWatchOptions(config, false, client, log) require.NoError(t, err) - created, err := createWatcher(NamespaceResource, &kubernetes.Node{}, *options, client, resourceWatchers, config.Namespace, false) + created, err := createWatcher(NamespaceResource, &kubernetes.Node{}, *options, client, resourceWatchers, config.Namespace) require.True(t, created) require.NoError(t, err) resourceWatchers.lock.Lock() - require.Equal(t, 1, len(resourceWatchers.watchersMap)) - require.NotNil(t, resourceWatchers.watchersMap[NamespaceResource]) - require.NotNil(t, resourceWatchers.watchersMap[NamespaceResource].watcher) + require.Equal(t, 1, len(resourceWatchers.metaWatchersMap)) + require.NotNil(t, resourceWatchers.metaWatchersMap[NamespaceResource]) + require.NotNil(t, resourceWatchers.metaWatchersMap[NamespaceResource].watcher) resourceWatchers.lock.Unlock() - created, err = createWatcher(NamespaceResource, &kubernetes.Namespace{}, *options, client, resourceWatchers, config.Namespace, false) + created, err = createWatcher(NamespaceResource, &kubernetes.Namespace{}, *options, client, resourceWatchers, config.Namespace) require.False(t, created) require.NoError(t, err) resourceWatchers.lock.Lock() - require.Equal(t, 1, len(resourceWatchers.watchersMap)) - require.NotNil(t, resourceWatchers.watchersMap[NamespaceResource]) - require.NotNil(t, resourceWatchers.watchersMap[NamespaceResource].watcher) + require.Equal(t, 1, len(resourceWatchers.metaWatchersMap)) + require.NotNil(t, resourceWatchers.metaWatchersMap[NamespaceResource]) + require.NotNil(t, resourceWatchers.metaWatchersMap[NamespaceResource].watcher) resourceWatchers.lock.Unlock() - created, err = createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers, config.Namespace, false) + created, err = createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers, config.Namespace) require.True(t, created) require.NoError(t, err) resourceWatchers.lock.Lock() - require.Equal(t, 2, len(resourceWatchers.watchersMap)) - require.NotNil(t, resourceWatchers.watchersMap[DeploymentResource]) - require.NotNil(t, resourceWatchers.watchersMap[NamespaceResource]) + require.Equal(t, 2, len(resourceWatchers.metaWatchersMap)) + require.NotNil(t, resourceWatchers.metaWatchersMap[DeploymentResource]) + require.NotNil(t, resourceWatchers.metaWatchersMap[NamespaceResource]) resourceWatchers.lock.Unlock() } @@ -124,25 +124,25 @@ func TestAddToMetricsetsUsing(t *testing.T) { require.NoError(t, err) // Create the new entry with watcher and nil string array first - created, err := createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers, config.Namespace, false) + created, err := createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers, config.Namespace) require.True(t, created) require.NoError(t, err) resourceWatchers.lock.Lock() - require.NotNil(t, resourceWatchers.watchersMap[DeploymentResource].watcher) - require.Equal(t, []string{}, resourceWatchers.watchersMap[DeploymentResource].metricsetsUsing) + require.NotNil(t, resourceWatchers.metaWatchersMap[DeploymentResource].watcher) + require.Equal(t, []string{}, resourceWatchers.metaWatchersMap[DeploymentResource].metricsetsUsing) resourceWatchers.lock.Unlock() metricsetDeployment := "state_deployment" addToMetricsetsUsing(DeploymentResource, metricsetDeployment, resourceWatchers) resourceWatchers.lock.Lock() - require.Equal(t, []string{metricsetDeployment}, resourceWatchers.watchersMap[DeploymentResource].metricsetsUsing) + require.Equal(t, []string{metricsetDeployment}, resourceWatchers.metaWatchersMap[DeploymentResource].metricsetsUsing) resourceWatchers.lock.Unlock() metricsetContainer := "container" addToMetricsetsUsing(DeploymentResource, metricsetContainer, resourceWatchers) resourceWatchers.lock.Lock() - require.Equal(t, []string{metricsetDeployment, metricsetContainer}, resourceWatchers.watchersMap[DeploymentResource].metricsetsUsing) + require.Equal(t, []string{metricsetDeployment, metricsetContainer}, resourceWatchers.metaWatchersMap[DeploymentResource].metricsetsUsing) resourceWatchers.lock.Unlock() } @@ -161,7 +161,7 @@ func TestRemoveFromMetricsetsUsing(t *testing.T) { require.NoError(t, err) // Create the new entry with watcher and nil string array first - created, err := createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers, config.Namespace, false) + created, err := createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers, config.Namespace) require.True(t, created) require.NoError(t, err) @@ -205,7 +205,7 @@ func TestCreateAllWatchers(t *testing.T) { err := createAllWatchers(client, "does-not-exist", "does-not-exist", false, config, log, resourceWatchers) require.Error(t, err) resourceWatchers.lock.Lock() - require.Equal(t, 0, len(resourceWatchers.watchersMap)) + require.Equal(t, 0, len(resourceWatchers.metaWatchersMap)) resourceWatchers.lock.Unlock() // Start watcher for a resource that requires other resources, should start all the watchers @@ -217,9 +217,9 @@ func TestCreateAllWatchers(t *testing.T) { // Check that all the required watchers are in the map resourceWatchers.lock.Lock() // we add 1 to the expected result to represent the resource itself - require.Equal(t, len(extras)+1, len(resourceWatchers.watchersMap)) + require.Equal(t, len(extras)+1, len(resourceWatchers.metaWatchersMap)) for _, extra := range extras { - require.NotNil(t, resourceWatchers.watchersMap[extra]) + require.NotNil(t, resourceWatchers.metaWatchersMap[extra]) } resourceWatchers.lock.Unlock() } @@ -311,13 +311,13 @@ func TestBuildMetadataEnricher_Start_Stop(t *testing.T) { metricsetDeployment := "state_deployment" resourceWatchers.lock.Lock() - resourceWatchers.watchersMap[NamespaceResource] = &watcherData{ + resourceWatchers.metaWatchersMap[NamespaceResource] = &metaWatcher{ watcher: &mockWatcher{}, started: false, metricsetsUsing: []string{metricsetNamespace, metricsetDeployment}, enrichers: make(map[string]*enricher), } - resourceWatchers.watchersMap[DeploymentResource] = &watcherData{ + resourceWatchers.metaWatchersMap[DeploymentResource] = &metaWatcher{ watcher: &mockWatcher{}, started: true, metricsetsUsing: []string{metricsetDeployment}, @@ -349,20 +349,20 @@ func TestBuildMetadataEnricher_Start_Stop(t *testing.T) { log, ) resourceWatchers.lock.Lock() - watcher := resourceWatchers.watchersMap[NamespaceResource] + watcher := resourceWatchers.metaWatchersMap[NamespaceResource] require.False(t, watcher.started) resourceWatchers.lock.Unlock() enricherNamespace.Start(resourceWatchers) resourceWatchers.lock.Lock() - watcher = resourceWatchers.watchersMap[NamespaceResource] + watcher = resourceWatchers.metaWatchersMap[NamespaceResource] require.True(t, watcher.started) resourceWatchers.lock.Unlock() // Stopping should not stop the watcher because it is still being used by deployment metricset enricherNamespace.Stop(resourceWatchers) resourceWatchers.lock.Lock() - watcher = resourceWatchers.watchersMap[NamespaceResource] + watcher = resourceWatchers.metaWatchersMap[NamespaceResource] require.True(t, watcher.started) require.Equal(t, []string{metricsetDeployment}, watcher.metricsetsUsing) resourceWatchers.lock.Unlock() @@ -381,12 +381,12 @@ func TestBuildMetadataEnricher_Start_Stop(t *testing.T) { enricherDeployment.Stop(resourceWatchers) resourceWatchers.lock.Lock() - watcher = resourceWatchers.watchersMap[NamespaceResource] + watcher = resourceWatchers.metaWatchersMap[NamespaceResource] require.False(t, watcher.started) require.Equal(t, []string{}, watcher.metricsetsUsing) - watcher = resourceWatchers.watchersMap[DeploymentResource] + watcher = resourceWatchers.metaWatchersMap[DeploymentResource] require.False(t, watcher.started) require.Equal(t, []string{}, watcher.metricsetsUsing) @@ -400,7 +400,7 @@ func TestBuildMetadataEnricher_Start_Stop_SameResources(t *testing.T) { metricsetStatePod := "state_pod" resourceWatchers.lock.Lock() - resourceWatchers.watchersMap[PodResource] = &watcherData{ + resourceWatchers.metaWatchersMap[PodResource] = &metaWatcher{ watcher: &mockWatcher{}, started: false, metricsetsUsing: []string{metricsetStatePod, metricsetPod}, @@ -423,20 +423,20 @@ func TestBuildMetadataEnricher_Start_Stop_SameResources(t *testing.T) { enricherPod := buildMetadataEnricher(metricsetPod, PodResource, resourceWatchers, config, funcs.update, funcs.delete, funcs.index, log) resourceWatchers.lock.Lock() - watcher := resourceWatchers.watchersMap[PodResource] + watcher := resourceWatchers.metaWatchersMap[PodResource] require.False(t, watcher.started) resourceWatchers.lock.Unlock() enricherPod.Start(resourceWatchers) resourceWatchers.lock.Lock() - watcher = resourceWatchers.watchersMap[PodResource] + watcher = resourceWatchers.metaWatchersMap[PodResource] require.True(t, watcher.started) resourceWatchers.lock.Unlock() // Stopping should not stop the watcher because it is still being used by state_pod metricset enricherPod.Stop(resourceWatchers) resourceWatchers.lock.Lock() - watcher = resourceWatchers.watchersMap[PodResource] + watcher = resourceWatchers.metaWatchersMap[PodResource] require.True(t, watcher.started) require.Equal(t, []string{metricsetStatePod}, watcher.metricsetsUsing) resourceWatchers.lock.Unlock() @@ -447,7 +447,7 @@ func TestBuildMetadataEnricher_Start_Stop_SameResources(t *testing.T) { enricherStatePod.Stop(resourceWatchers) resourceWatchers.lock.Lock() - watcher = resourceWatchers.watchersMap[PodResource] + watcher = resourceWatchers.metaWatchersMap[PodResource] require.False(t, watcher.started) require.Equal(t, []string{}, watcher.metricsetsUsing) resourceWatchers.lock.Unlock() @@ -457,7 +457,7 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { resourceWatchers := NewWatchers() resourceWatchers.lock.Lock() - resourceWatchers.watchersMap[PodResource] = &watcherData{ + resourceWatchers.metaWatchersMap[PodResource] = &metaWatcher{ watcher: &mockWatcher{}, started: false, metricsetsUsing: []string{"pod"}, @@ -496,14 +496,14 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { enricher := buildMetadataEnricher(metricset, PodResource, resourceWatchers, config, funcs.update, funcs.delete, funcs.index, log) resourceWatchers.lock.Lock() - wData := resourceWatchers.watchersMap[PodResource] + wData := resourceWatchers.metaWatchersMap[PodResource] mockW := wData.watcher.(*mockWatcher) require.NotNil(t, mockW.handler) resourceWatchers.lock.Unlock() enricher.Start(resourceWatchers) resourceWatchers.lock.Lock() - watcher := resourceWatchers.watchersMap[PodResource] + watcher := resourceWatchers.metaWatchersMap[PodResource] require.True(t, watcher.started) mockW = watcher.watcher.(*mockWatcher) resourceWatchers.lock.Unlock() @@ -552,7 +552,7 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { // Emit delete event resourceWatchers.lock.Lock() - wData = resourceWatchers.watchersMap[PodResource] + wData = resourceWatchers.metaWatchersMap[PodResource] mockW = wData.watcher.(*mockWatcher) resourceWatchers.lock.Unlock() @@ -577,7 +577,7 @@ func TestBuildMetadataEnricher_EventHandler(t *testing.T) { enricher.Stop(resourceWatchers) resourceWatchers.lock.Lock() - watcher = resourceWatchers.watchersMap[PodResource] + watcher = resourceWatchers.metaWatchersMap[PodResource] require.False(t, watcher.started) resourceWatchers.lock.Unlock() } @@ -590,7 +590,7 @@ func TestBuildMetadataEnricher_EventHandler_PastObjects(t *testing.T) { resourceWatchers := NewWatchers() resourceWatchers.lock.Lock() - resourceWatchers.watchersMap[PodResource] = &watcherData{ + resourceWatchers.metaWatchersMap[PodResource] = &metaWatcher{ watcher: &mockWatcher{}, started: false, metricsetsUsing: []string{"pod", "state_pod"}, @@ -639,7 +639,7 @@ func TestBuildMetadataEnricher_EventHandler_PastObjects(t *testing.T) { resourceWatchers.lock.Lock() - watcher := resourceWatchers.watchersMap[PodResource] + watcher := resourceWatchers.metaWatchersMap[PodResource] mockW := watcher.watcher.(*mockWatcher) resourceWatchers.lock.Unlock() @@ -718,6 +718,10 @@ func (m *mockWatcher) AddEventHandler(r kubernetes.ResourceEventHandler) { m.handler = r } +func (m *mockWatcher) GetEventHandler() kubernetes.ResourceEventHandler { + return m.handler +} + func (m *mockWatcher) Store() cache.Store { return nil } From 519e3fb16de5807e27872fb14a26102a412fa7f9 Mon Sep 17 00:00:00 2001 From: constanca Date: Mon, 19 Feb 2024 10:39:19 +0100 Subject: [PATCH 48/61] Update go.mod Signed-off-by: constanca --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 44a59eddc19..3a47f0eb6f3 100644 --- a/go.mod +++ b/go.mod @@ -201,7 +201,7 @@ require ( github.com/awslabs/kinesis-aggregation/go/v2 v2.0.0-20220623125934-28468a6701b5 github.com/elastic/bayeux v1.0.5 github.com/elastic/ebpfevents v0.3.2 - github.com/elastic/elastic-agent-autodiscover v0.6.7 + github.com/elastic/elastic-agent-autodiscover v0.6.8 github.com/elastic/elastic-agent-libs v0.7.5 github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3 github.com/elastic/elastic-agent-system-metrics v0.9.1 diff --git a/go.sum b/go.sum index 7e9caccc9ca..e0fa59f8403 100644 --- a/go.sum +++ b/go.sum @@ -665,8 +665,8 @@ github.com/elastic/dhcp v0.0.0-20200227161230-57ec251c7eb3 h1:lnDkqiRFKm0rxdljqr github.com/elastic/dhcp v0.0.0-20200227161230-57ec251c7eb3/go.mod h1:aPqzac6AYkipvp4hufTyMj5PDIphF3+At8zr7r51xjY= github.com/elastic/ebpfevents v0.3.2 h1:UJ8kW5jw2TpUR5MEMaZ1O62sK9JQ+5xTlj+YpQC6BXc= github.com/elastic/ebpfevents v0.3.2/go.mod h1:o21z5xup/9dK8u0Hg9bZRflSqqj1Zu5h2dg2hSTcUPQ= -github.com/elastic/elastic-agent-autodiscover v0.6.7 h1:+KVjltN0rPsBrU8b156gV4lOTBgG/vt0efFCFARrf3g= -github.com/elastic/elastic-agent-autodiscover v0.6.7/go.mod h1:hFeFqneS2r4jD0/QzGkrNk0YVdN0JGh7lCWdsH7zcI4= +github.com/elastic/elastic-agent-autodiscover v0.6.8 h1:BSXz+QwjZAEt08G+T3GDGl14Bh9a6zD8luNCvZut/b8= +github.com/elastic/elastic-agent-autodiscover v0.6.8/go.mod h1:hFeFqneS2r4jD0/QzGkrNk0YVdN0JGh7lCWdsH7zcI4= github.com/elastic/elastic-agent-client/v7 v7.8.0 h1:GHFzDJIWpdgI0qDk5EcqbQJGvwTsl2E2vQK3/xe+MYQ= github.com/elastic/elastic-agent-client/v7 v7.8.0/go.mod h1:ihtjqJzYiIltlRhNruaSSc0ogxIhqPD5hOMKq16cI1s= github.com/elastic/elastic-agent-libs v0.7.5 h1:4UMqB3BREvhwecYTs/L23oQp1hs/XUkcunPlmTZn5yg= From 74d51e292d4cad5cee86fb6fca8e205711bf2d30 Mon Sep 17 00:00:00 2001 From: constanca Date: Mon, 19 Feb 2024 11:00:52 +0100 Subject: [PATCH 49/61] Add geteventhandler to mockwatcher Signed-off-by: constanca --- libbeat/autodiscover/providers/kubernetes/pod_test.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/libbeat/autodiscover/providers/kubernetes/pod_test.go b/libbeat/autodiscover/providers/kubernetes/pod_test.go index 1718dbe0752..4cc2d8bb393 100644 --- a/libbeat/autodiscover/providers/kubernetes/pod_test.go +++ b/libbeat/autodiscover/providers/kubernetes/pod_test.go @@ -2150,6 +2150,10 @@ func (s *mockUpdaterWatcher) Store() caches.Store { func (s *mockUpdaterWatcher) AddEventHandler(kubernetes.ResourceEventHandler) { } +func (s *mockUpdaterWatcher) GetEventHandler() kubernetes.ResourceEventHandler { + return nil +} + func (s *mockUpdaterStore) List() []interface{} { return s.objects } From b51c8733dae1698252d727a343533dfab45ea469 Mon Sep 17 00:00:00 2001 From: constanca Date: Mon, 19 Feb 2024 11:07:35 +0100 Subject: [PATCH 50/61] update NOTICE.txt Signed-off-by: constanca --- NOTICE.txt | 210 ++++++++++++++++++++++++++--------------------------- 1 file changed, 105 insertions(+), 105 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index c038c7027e3..45a9f830ca3 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1,7 +1,7 @@ Elastic Beats Copyright 2014-2024 Elasticsearch BV -This product includes software developed by The Apache Software +This product includes software developed by The Apache Software Foundation (http://www.apache.org/). ================================================================================ @@ -10862,7 +10862,7 @@ Contents of probable licence file $GOMODCACHE/github.com/digitalocean/go-libvirt Apache License ============== -_Version 2.0, January 2004_ +_Version 2.0, January 2004_ _<>_ ### Terms and Conditions for use, reproduction, and distribution @@ -11041,13 +11041,13 @@ the same “printed page” as the copyright notice for easier identification wi third-party archives. Copyright [yyyy] [name of copyright owner] - + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - + http://www.apache.org/licenses/LICENSE-2.0 - + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -12283,11 +12283,11 @@ various licenses: -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-autodiscover -Version: v0.6.7 +Version: v0.6.8 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-autodiscover@v0.6.7/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-autodiscover@v0.6.8/LICENSE: Apache License Version 2.0, January 2004 @@ -16494,7 +16494,7 @@ Mozilla Public License Version 2.0 means any form of the work other than Source Code Form. 1.7. "Larger Work" - means a work that combines Covered Software with other material, in + means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. 1.8. "License" @@ -26116,7 +26116,7 @@ Contents of probable licence file $GOMODCACHE/gopkg.in/natefinch/lumberjack.v2@v The MIT License (MIT) -Copyright (c) 2014 Nate Finch +Copyright (c) 2014 Nate Finch Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -26593,13 +26593,13 @@ Contents of probable licence file $GOMODCACHE/howett.net/plist@v1.0.0/LICENSE: Copyright (c) 2013, Dustin L. Howett. All rights reserved. Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: +modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. + list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. + and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED @@ -26613,7 +26613,7 @@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The views and conclusions contained in the software and documentation are those -of the authors and should not be interpreted as representing official policies, +of the authors and should not be interpreted as representing official policies, either expressed or implied, of the FreeBSD Project. -------------------------------------------------------------------------------- @@ -29557,28 +29557,28 @@ Licence type (autodetected): MIT Contents of probable licence file $GOMODCACHE/github.com/!azure/go-amqp@v0.16.0/LICENSE: - MIT License - - Copyright (C) 2017 Kale Blankenship - Portions Copyright (C) Microsoft Corporation - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE + MIT License + + Copyright (C) 2017 Kale Blankenship + Portions Copyright (C) Microsoft Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE -------------------------------------------------------------------------------- @@ -31259,27 +31259,27 @@ Licence type (autodetected): MIT Contents of probable licence file $GOMODCACHE/github.com/!azure!a!d/microsoft-authentication-library-for-go@v0.9.0/LICENSE: - MIT License - - Copyright (c) Microsoft Corporation. - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE -------------------------------------------------------------------------------- @@ -31587,27 +31587,27 @@ Licence type (autodetected): MIT Contents of probable licence file $GOMODCACHE/github.com/akavel/rsrc@v0.8.0/LICENSE.txt: -The MIT License (MIT) - -Copyright (c) 2013-2017 The rsrc Authors. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. +The MIT License (MIT) + +Copyright (c) 2013-2017 The rsrc Authors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. -------------------------------------------------------------------------------- @@ -38254,7 +38254,7 @@ Contents of probable licence file $GOMODCACHE/github.com/evanphx/json-patch@v4.1 Copyright (c) 2014, Evan Phoenix All rights reserved. -Redistribution and use in source and binary forms, with or without +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this @@ -38262,19 +38262,19 @@ modification, are permitted provided that the following conditions are met: * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. -* Neither the name of the Evan Phoenix nor the names of its contributors - may be used to endorse or promote products derived from this software +* Neither the name of the Evan Phoenix nor the names of its contributors + may be used to endorse or promote products derived from this software without specific prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. @@ -50568,22 +50568,22 @@ Contents of probable licence file $GOMODCACHE/github.com/smartystreets/assertion Copyright (c) 2016 SmartyStreets, LLC -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in all +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. NOTE: Various optional and subordinate components carry their own licensing @@ -54521,19 +54521,19 @@ Licence type (autodetected): BSD-2-Clause Contents of probable licence file $GOMODCACHE/gopkg.in/check.v1@v1.0.0-20201130134442-10cb98267c6c/LICENSE: Gocheck - A rich testing framework for Go - + Copyright (c) 2010-2013 Gustavo Niemeyer All rights reserved. Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: +modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. + list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. + and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED From 64ac1882337c70426ed556ff289971478ab7999d Mon Sep 17 00:00:00 2001 From: constanca Date: Mon, 19 Feb 2024 11:12:42 +0100 Subject: [PATCH 51/61] update NOTICE.txt Signed-off-by: constanca --- NOTICE.txt | 206 ++++++++++++++++++++++++++--------------------------- 1 file changed, 103 insertions(+), 103 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index 45a9f830ca3..764df61a934 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1,7 +1,7 @@ Elastic Beats Copyright 2014-2024 Elasticsearch BV -This product includes software developed by The Apache Software +This product includes software developed by The Apache Software Foundation (http://www.apache.org/). ================================================================================ @@ -10862,7 +10862,7 @@ Contents of probable licence file $GOMODCACHE/github.com/digitalocean/go-libvirt Apache License ============== -_Version 2.0, January 2004_ +_Version 2.0, January 2004_ _<>_ ### Terms and Conditions for use, reproduction, and distribution @@ -11041,13 +11041,13 @@ the same “printed page” as the copyright notice for easier identification wi third-party archives. Copyright [yyyy] [name of copyright owner] - + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - + http://www.apache.org/licenses/LICENSE-2.0 - + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -16494,7 +16494,7 @@ Mozilla Public License Version 2.0 means any form of the work other than Source Code Form. 1.7. "Larger Work" - means a work that combines Covered Software with other material, in + means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. 1.8. "License" @@ -26116,7 +26116,7 @@ Contents of probable licence file $GOMODCACHE/gopkg.in/natefinch/lumberjack.v2@v The MIT License (MIT) -Copyright (c) 2014 Nate Finch +Copyright (c) 2014 Nate Finch Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -26593,13 +26593,13 @@ Contents of probable licence file $GOMODCACHE/howett.net/plist@v1.0.0/LICENSE: Copyright (c) 2013, Dustin L. Howett. All rights reserved. Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: +modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. + list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. + and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED @@ -26613,7 +26613,7 @@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The views and conclusions contained in the software and documentation are those -of the authors and should not be interpreted as representing official policies, +of the authors and should not be interpreted as representing official policies, either expressed or implied, of the FreeBSD Project. -------------------------------------------------------------------------------- @@ -29557,28 +29557,28 @@ Licence type (autodetected): MIT Contents of probable licence file $GOMODCACHE/github.com/!azure/go-amqp@v0.16.0/LICENSE: - MIT License - - Copyright (C) 2017 Kale Blankenship - Portions Copyright (C) Microsoft Corporation - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE + MIT License + + Copyright (C) 2017 Kale Blankenship + Portions Copyright (C) Microsoft Corporation + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE -------------------------------------------------------------------------------- @@ -31259,27 +31259,27 @@ Licence type (autodetected): MIT Contents of probable licence file $GOMODCACHE/github.com/!azure!a!d/microsoft-authentication-library-for-go@v0.9.0/LICENSE: - MIT License - - Copyright (c) Microsoft Corporation. - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE -------------------------------------------------------------------------------- @@ -31587,27 +31587,27 @@ Licence type (autodetected): MIT Contents of probable licence file $GOMODCACHE/github.com/akavel/rsrc@v0.8.0/LICENSE.txt: -The MIT License (MIT) - -Copyright (c) 2013-2017 The rsrc Authors. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. +The MIT License (MIT) + +Copyright (c) 2013-2017 The rsrc Authors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. -------------------------------------------------------------------------------- @@ -38254,7 +38254,7 @@ Contents of probable licence file $GOMODCACHE/github.com/evanphx/json-patch@v4.1 Copyright (c) 2014, Evan Phoenix All rights reserved. -Redistribution and use in source and binary forms, with or without +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this @@ -38262,19 +38262,19 @@ modification, are permitted provided that the following conditions are met: * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. -* Neither the name of the Evan Phoenix nor the names of its contributors - may be used to endorse or promote products derived from this software +* Neither the name of the Evan Phoenix nor the names of its contributors + may be used to endorse or promote products derived from this software without specific prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. @@ -50568,22 +50568,22 @@ Contents of probable licence file $GOMODCACHE/github.com/smartystreets/assertion Copyright (c) 2016 SmartyStreets, LLC -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in all +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. NOTE: Various optional and subordinate components carry their own licensing @@ -54521,19 +54521,19 @@ Licence type (autodetected): BSD-2-Clause Contents of probable licence file $GOMODCACHE/gopkg.in/check.v1@v1.0.0-20201130134442-10cb98267c6c/LICENSE: Gocheck - A rich testing framework for Go - + Copyright (c) 2010-2013 Gustavo Niemeyer All rights reserved. Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: +modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. + list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. + and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED From d41e8374c8d8b1636717152f2243e40bdf35d317 Mon Sep 17 00:00:00 2001 From: MichaelKatsoulis Date: Thu, 22 Feb 2024 13:15:18 +0200 Subject: [PATCH 52/61] Never restart extra watchers --- .../module/kubernetes/util/kubernetes.go | 67 +++++++++++-------- .../module/kubernetes/util/kubernetes_test.go | 10 +-- 2 files changed, 44 insertions(+), 33 deletions(-) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 2ca2d49a169..f275935a33d 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -256,21 +256,28 @@ func createWatcher( options kubernetes.WatchOptions, client k8sclient.Interface, resourceWatchers *Watchers, - namespace string) (bool, error) { + namespace string, + extraWatcher bool) (bool, error) { // We need to check the node scope to decide on whether a watcher should be updated or not nodeScope := false if options.Node != "" { nodeScope = true } + // The nodescope for extra watchers node, namespace, replicaset and job should be always false + if extraWatcher { + nodeScope = false + options.Node = "" + } resourceWatchers.lock.Lock() defer resourceWatchers.lock.Unlock() - resourceMetaWatchers, ok := resourceWatchers.metaWatchersMap[resourceName] - // if it does not exist, create the resourceMetaWatchers + resourceMetaWatcher, ok := resourceWatchers.metaWatchersMap[resourceName] + + // if it does not exist, create the resourceMetaWatcher if !ok { - // check if we need to add namespace to the resourceMetaWatchers options + // check if we need to add namespace to the resourceMetaWatcher options if isNamespaced(resourceName) { options.Namespace = namespace } @@ -288,11 +295,11 @@ func createWatcher( nodeScope: nodeScope, } return true, nil - } else if resourceMetaWatchers.nodeScope != nodeScope && resourceMetaWatchers.nodeScope { - // It might happen that the resourceMetaWatchers already exists, but is only being used to monitor the resources - // of a single node. In that case, we need to check if we are trying to create a new resourceMetaWatchers that will track - // the resources of multiple nodes. If it is the case, then we need to update the resourceMetaWatchers. - // check if we need to add namespace to the resourceMetaWatchers options + } else if resourceMetaWatcher.nodeScope != nodeScope && resourceMetaWatcher.nodeScope { + // It might happen that the resourceMetaWatcher already exists, but is only being used to monitor the resources + // of a single node. In that case, we need to check if we are trying to create a new resourceMetaWatcher that will track + // the resources of multiple nodes. If it is the case, then we need to update the resourceMetaWatcher. + // check if we need to add namespace to the resourceMetaWatcher options if isNamespaced(resourceName) { options.Namespace = namespace } @@ -301,9 +308,9 @@ func createWatcher( return false, err } // update the handler of the restart resourceMetaWatchers to match the current resourceMetaWatchers handler - restartWatcher.AddEventHandler(resourceMetaWatchers.watcher.GetEventHandler()) - resourceMetaWatchers.restartWatcher = restartWatcher - resourceMetaWatchers.nodeScope = nodeScope + restartWatcher.AddEventHandler(resourceMetaWatcher.watcher.GetEventHandler()) + resourceMetaWatcher.restartWatcher = restartWatcher + resourceMetaWatcher.nodeScope = nodeScope } return false, nil } @@ -371,10 +378,9 @@ func createAllWatchers( if err != nil { return err } - // Create a watcher for the given resource. // If it fails, we return an error, so we can stop the extra watchers from creating. - created, err := createWatcher(resourceName, res, *options, client, resourceWatchers, config.Namespace) + created, err := createWatcher(resourceName, res, *options, client, resourceWatchers, config.Namespace, false) if err != nil { return fmt.Errorf("error initializing Kubernetes watcher %s, required by %s: %w", resourceName, metricsetName, err) } else if created { @@ -388,7 +394,7 @@ func createAllWatchers( for _, extra := range extraWatchers { extraRes := getResource(extra) if extraRes != nil { - created, err = createWatcher(extra, extraRes, *options, client, resourceWatchers, config.Namespace) + created, err = createWatcher(extra, extraRes, *options, client, resourceWatchers, config.Namespace, true) if err != nil { log.Errorf("Error initializing Kubernetes watcher %s, required by %s: %s", extra, metricsetName, err) } else { @@ -439,12 +445,15 @@ func createMetadataGenSpecific(client k8sclient.Interface, commonConfig *conf.C, resourceWatchers.lock.RLock() defer resourceWatchers.lock.RUnlock() - // The watcher for the resource needs to exist resourceMetaWatcher := resourceWatchers.metaWatchersMap[resourceName] if resourceMetaWatcher == nil { return nil, fmt.Errorf("could not create the metadata generator, as the watcher for %s does not exist", resourceName) } + mainWatcher := (*resourceMetaWatcher).watcher + if (*resourceMetaWatcher).restartWatcher != nil { + mainWatcher = (*resourceMetaWatcher).restartWatcher + } var metaGen metadata.MetaGen if resourceName == PodResource { @@ -464,8 +473,7 @@ func createMetadataGenSpecific(client k8sclient.Interface, commonConfig *conf.C, if jobMetaWatcher := resourceWatchers.metaWatchersMap[JobResource]; jobMetaWatcher != nil { jobWatcher = (*jobMetaWatcher).watcher } - - metaGen = metadata.GetPodMetaGen(commonConfig, (*resourceMetaWatcher).watcher, nodeWatcher, namespaceWatcher, replicaSetWatcher, + metaGen = metadata.GetPodMetaGen(commonConfig, mainWatcher, nodeWatcher, namespaceWatcher, replicaSetWatcher, jobWatcher, addResourceMetadata) return metaGen, nil } else if resourceName == ServiceResource { @@ -520,7 +528,6 @@ func NewResourceMetadataEnricher( metricsetName := base.Name() resourceName := getResourceName(metricsetName) - err = createAllWatchers(client, metricsetName, resourceName, nodeScope, config, log, resourceWatchers) if err != nil { log.Errorf("Error starting the watchers: %s", err) @@ -989,25 +996,29 @@ func (e *enricher) Start(resourceWatchers *Watchers) { } } - // Start the main watcher if not already started or if a restart is needed + // Start the main watcher if not already started + // If there is a restart watcher, stop the old watcher if started and start the restart watcher resourceMetaWatcher := resourceWatchers.metaWatchersMap[e.resourceName] if resourceMetaWatcher != nil { - if !resourceMetaWatcher.started { - if err := resourceMetaWatcher.watcher.Start(); err != nil { - e.log.Warnf("Error starting %s watcher: %s", e.resourceName, err) - } else { - resourceMetaWatcher.started = true + if resourceMetaWatcher.restartWatcher != nil { + if resourceMetaWatcher.started { + resourceMetaWatcher.watcher.Stop() } - } else if resourceMetaWatcher.restartWatcher != nil { - resourceMetaWatcher.watcher.Stop() if err := resourceMetaWatcher.restartWatcher.Start(); err != nil { e.log.Warnf("Error restarting %s watcher: %s", e.resourceName, err) } else { resourceMetaWatcher.watcher = resourceMetaWatcher.restartWatcher resourceMetaWatcher.restartWatcher = nil } + } else { + if !resourceMetaWatcher.started { + if err := resourceMetaWatcher.watcher.Start(); err != nil { + e.log.Warnf("Error starting %s watcher: %s", e.resourceName, err) + } else { + resourceMetaWatcher.started = true + } + } } - } } diff --git a/metricbeat/module/kubernetes/util/kubernetes_test.go b/metricbeat/module/kubernetes/util/kubernetes_test.go index 57bbcbdc648..b4e528100a6 100644 --- a/metricbeat/module/kubernetes/util/kubernetes_test.go +++ b/metricbeat/module/kubernetes/util/kubernetes_test.go @@ -78,7 +78,7 @@ func TestCreateWatcher(t *testing.T) { options, err := getWatchOptions(config, false, client, log) require.NoError(t, err) - created, err := createWatcher(NamespaceResource, &kubernetes.Node{}, *options, client, resourceWatchers, config.Namespace) + created, err := createWatcher(NamespaceResource, &kubernetes.Node{}, *options, client, resourceWatchers, config.Namespace, false) require.True(t, created) require.NoError(t, err) @@ -88,7 +88,7 @@ func TestCreateWatcher(t *testing.T) { require.NotNil(t, resourceWatchers.metaWatchersMap[NamespaceResource].watcher) resourceWatchers.lock.Unlock() - created, err = createWatcher(NamespaceResource, &kubernetes.Namespace{}, *options, client, resourceWatchers, config.Namespace) + created, err = createWatcher(NamespaceResource, &kubernetes.Namespace{}, *options, client, resourceWatchers, config.Namespace, true) require.False(t, created) require.NoError(t, err) @@ -98,7 +98,7 @@ func TestCreateWatcher(t *testing.T) { require.NotNil(t, resourceWatchers.metaWatchersMap[NamespaceResource].watcher) resourceWatchers.lock.Unlock() - created, err = createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers, config.Namespace) + created, err = createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers, config.Namespace, false) require.True(t, created) require.NoError(t, err) @@ -124,7 +124,7 @@ func TestAddToMetricsetsUsing(t *testing.T) { require.NoError(t, err) // Create the new entry with watcher and nil string array first - created, err := createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers, config.Namespace) + created, err := createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers, config.Namespace, false) require.True(t, created) require.NoError(t, err) @@ -161,7 +161,7 @@ func TestRemoveFromMetricsetsUsing(t *testing.T) { require.NoError(t, err) // Create the new entry with watcher and nil string array first - created, err := createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers, config.Namespace) + created, err := createWatcher(DeploymentResource, &kubernetes.Deployment{}, *options, client, resourceWatchers, config.Namespace, false) require.True(t, created) require.NoError(t, err) From 8fcdfc205ebe0221ed7a1046c673c9babfb5280d Mon Sep 17 00:00:00 2001 From: MichaelKatsoulis Date: Thu, 22 Feb 2024 13:24:43 +0200 Subject: [PATCH 53/61] Set resourceMetaWatcher as started --- metricbeat/module/kubernetes/util/kubernetes.go | 1 + 1 file changed, 1 insertion(+) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index f275935a33d..9d393a4d30b 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -1009,6 +1009,7 @@ func (e *enricher) Start(resourceWatchers *Watchers) { } else { resourceMetaWatcher.watcher = resourceMetaWatcher.restartWatcher resourceMetaWatcher.restartWatcher = nil + resourceMetaWatcher.started = true } } else { if !resourceMetaWatcher.started { From a7f709a153a66bdb5369128342bd21c36da6439e Mon Sep 17 00:00:00 2001 From: MichaelKatsoulis Date: Wed, 13 Mar 2024 12:38:50 +0200 Subject: [PATCH 54/61] Add a markdown with detailed explanation of metadata enrichment --- .../module/kubernetes/util/enrichers.md | 112 ++++++++++++++++++ 1 file changed, 112 insertions(+) create mode 100644 metricbeat/module/kubernetes/util/enrichers.md diff --git a/metricbeat/module/kubernetes/util/enrichers.md b/metricbeat/module/kubernetes/util/enrichers.md new file mode 100644 index 00000000000..a77995e3c42 --- /dev/null +++ b/metricbeat/module/kubernetes/util/enrichers.md @@ -0,0 +1,112 @@ +### Kubernetes metrics metadata enrichment + +- The following description is irrelevant to the metadata enrichment that happens due to the `add_kubernetes_metadata` processor and Kubernetes provider. +- The `add_kubernetes_metadata` processor is skipped in cases of Kubernetes module for metrics collection. It is only used in the case of logs collection when the Kubernetes autodiscover provider is not used. +-The Kubernetes autodiscover provider enriches with metadata mainly when it comes to log collection when it is configured. It is by default in the `container_logs` integration in the Elastic Agent. +- Metadata enrichment from the enrichers happens for all the following Kubernetes metricsets: `state_namespace`, `state_node`, `state_deployment`, `state_daemonset`, `state_replicaset`, `state_pod`, `state_container`, `state_job`, `state_cronjob`, `state_statefulset`, `state_service`, `state_persistentvolume`, `state_persistentvolumeclaim`, `state_storageclass`, `pod`, `container`, `node`. +- The reason that these metricsets trigger the metadata enrichment is because of the way they start. +- All `state_metricsets` (except `state_container`) trigger the shared [kubernetes.Init](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/state_daemonset/state_daemonset.go#L45) function when they get initialized. +- [kubernetes.Init](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/helper/kubernetes/state_metricset.go#L44) calls the [New metricsets method](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/helper/kubernetes/state_metricset.go#L80), which returns a new metricset with resource metadata enricher as part of it. +- Node, pod, container, and `state_container` metricsets do not trigger the `kubernetes.Init` rather they implement their own [New method](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/node/node.go#L66) with the enricher as part of the metricsets returned. +- All of the above metricsets trigger the [NewResourceMetadataEnricher](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/helper/kubernetes/state_metricset.go#L84) with only the exception of the container and `state_container` metricsets which trigger the [NewContainerMetadataEnricher](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/state_container/state_container.go#L118C23-L118C51). +- The [NewResourceMetadataEnricher](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L501) is the function responsible for creating and returning a metadata enricher. +- The enricher is responsible for the metadata enrichment. For that, resource watchers are used which are shared between the different metricsets. For example for pod metricset, a pod watcher, a namespace and node watcher are by default needed in addition to job and replicaset watcher based on the configuration. These watchers will be also used by other metricsets that require them +like state_pod, state_container, node, state_job etc. +- The shared watchers are stored in the module-level struct [resourceWatchers](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/kubernetes.go#L102). The struct is initialized when the Kubernetes module [starts](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L124). +- Each time a watcher gets created it is added to the struct along with some watcher important data. The [struct](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L99) consists of a metaWatchersMap which has as key the name of the resource this watcher is watching(i.e. pod or node) and as value a [metaWatcher](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L86C6-L86C17) struct. +- The metaWatcher struct holds all the relevant info for the specific watcher. These info include + - The `watcher` refers to the actual [kubernetes.Watcher](https://github.com/elastic/elastic-agent-autodiscover/blob/17950767e42c50365ecdd842349649cd70998f22/kubernetes/watcher.go#L47) interface responsible for monitoring the specified resource. + - The `started` boolean that holds the information if the watcher has started or not + - The `metricsetsUsing` field which is a slice with all the metricsets that are currently sharing this watcher. For example a pod watcher can be shared between pod, state_pod, container, state_container metricsets + - The enrichers map. The enricher is a struct that is bound to a metricset and does the actual metadata enrichment for the events of that metricset using an update function. The metaWatcher holds the information of all the enrichers of the metricsets that are using this watcher, so that it knows what to do in case a new watch event is triggered(i.e a pod gets updated. The pod watcher gets triggered. It needs to trigger the update function of all the enrichers that it has stored) + - The `metadataObjects` field which functions like a set, consisting of all the ids of the objects(resources) that this watcher has been triggered for. The id looks like `namespace_name-resource_name`. The reason this is needed will be explained later. + - `nodeScope` boolean indicates whether this watcher needs to monitor the specific resource only on the node where the beat/agent is running(For example, for pod metricset, nodescope is true meaning that the watcher needs to watch pods only in this node because the metricsets colletcs info only from local kubelet. This is not the case for state_pod where nodescope is false). + - The `restartWatcher` field is generally nil, signifying that there is no need for a new updated watcher.However, if the running watcher needs to be restarted for any reason, restartWatcher will be the new watcher. A reason that a running watcher may have to be restarted is in case it has been initially triggered by a metricset(i.e. pod) with nodescope true and then it was triggered again by metricsets(state_pod) with nodescope false. In that case the watch options need to change, so the old watcher must be stopped and the new restartWatcher must be started and take its place. More on that later. +- NewResourceMetadataEnricher function is called by a given metricset. Two different configurations are created. [config](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L508) holding metricset related configuration and [commonConfig](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L521C2-L521C14) which holds information specific to the metadata enrichment. +- After the metricset is mapped to a specifig resource (i.e state_pod is mapped to pod resource) [createAllWatchers](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L531) is called to create all needed watchers for this resource. +- createAllWatchers will first create the main watcher for that resource(i.e for pod resource it will be pod watcher) and then will try to create all the extra watchers needed. [getExtraWatchers](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L393C19-L393C35) will return all the extra watchers per resource. For example a pod resource needs node, namespace and (if configured) job and replicaset watchers. +- For each watcher that needs to be created [createWatcher](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L383C18-L383C31) is called. +- [CreateWatcher](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L253) function takes several arguments. + - `resourceName` is needed so that it will be the key in the resourceWatchers map where all the created watchers are stored. + - `resource` is the resource object(i.e Kubernetes.Pod{}) and is a requirement of [kubernetes.NewNamedWatcher](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L284C19-L284C45) function which creates the actual watcher. + - `options` are the watch options for a specific watcher. They act like watching filters. For example a pod watcher can be configured through the options to watch only for resources on a specific node or namespace, or watch all pods. In our case, watchers triggered by metricsets collecting from kubelet(pod and container metricsets only) are configured with `nodescope` true which is then translated to an extra [option](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L229) to watch only resources of that node. On the other hand, all state_metricsets are configured with nodescope false, as we need the watchers to collect metadata from resources across the entire cluster. For example the leader node collecting state_pod metrics, when it will try to enrich a pod found in the metrics with metadata, this pod may be running on a different node than the leader. So in case the pod watcher was only watching for pods in the leader node it would not have found any relevant metadata for that pod. + - `client` is the kubernetes client needed for the watcher creation + - `resourceWatchers` is the store that holds the info about the created watchers and their data. resourceWatchers get updated inside this function if a new watcher gets created. + - `namespace` is a configuration option that reflects to the `options` field we mentioned above. If set by the user, then watchers are watching for resources only in that namespace. + - `extraWatcher` boolean sets apart the watchers that are created as main watcher for a resource and the ones that are created as an extra watcher. For example for pod metricset, pod watcher is the main and node, namespace watchers are extra. This information is important, because the extra watchers (can only be node, namespace, job and replicaset) will never have the Node option set in their watch options. We will explain more. +- The createWatcher function first checks if a watcher of that resource already [exists](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L276) +- If it does not exist it creates it and [stores](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L288C3-L288C18) it in the `resourceWatchers` struct. When creating a new metaWatcher struct, thet `started` options set to false as the watcher has not been started yet(just created). The `metadataObjects`, `enrichers` and `metricsetsUsing` initiliazied to empty, the `restartWatcher` set to nil and the `nodeScope` according to the function input(except from cases when it is an extra watcher where it is hardcoded to false). +- If the watcher for that resource already exists, we check if it needs to be restarted. This situation can arise in specific cases. For instance, if a pod watcher has been created from a metricset like pod or container with nodeScope set to true (watching only from one node), and then another metricset like state_pod or state_container tries to create a pod watcher (which can happen only in the leader node), the watcher already exists. However, if we don't take any action in this scenario, the state_pod watcher would use a watcher that watches only from one node, leading to missing metadata, as described earlier. To address this, we need to update the watcher, mainly changing its watch options (removing options.Node). Unfortunately, a running watcher cannot be updated directly. Instead, we must stop it and create a new one with the correct watch options. The new restartWatcher must be identical to the old watcher, including the same handler function (more on that later), with the only difference being in the watch options. Consequently, the metaWatcher struct of the watcher gets updated with the restartWatcher. The process of stopping the old watcher and starting the new one is handled later on. +- After createAllWatchers function creates all the watchers that are needed and update the resourceWatchers struct, the code flow returns to NewResourceMetadataEnricher +- Now, let's delve into the creation of metadata generators and handler functions. Watchers, on their own, are responsible for subscribing to the Kubernetes API and monitoring changes in the resources they are configured to watch. Their primary function is to update their internal cache or store. However, to determine what actions to take when a change occurs, we rely on the so-called event handlers. +- Let's delve into the details of metadata generation for various resources. There is a difference between pod/service resources and all the rest. +In more details: + - For resources like pods or services, the [createMetadataGenSpecific](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L540) function is invoked to create the metadata generator. The pod metadata generator or the service metadata generator, are structs defined in [elastic-agent-autodiscover](https://github.com/elastic/elastic-agent-autodiscover/blob/e47c0f013820d394460a21b688e45de1ac7628de/kubernetes/metadata/pod.go#L40) repo and implement a [metagen interface](https://github.com/elastic/elastic-agent-autodiscover/blob/e47c0f013820d394460a21b688e45de1ac7628de/kubernetes/metadata/metadata.go#L39)and are designed to utilize the necessary [watchers](https://github.com/elastic/elastic-agent-autodiscover/blob/e47c0f013820d394460a21b688e45de1ac7628de/kubernetes/metadata/metadata.go#L100) to collect([Generate](https://github.com/elastic/elastic-agent-autodiscover/blob/e47c0f013820d394460a21b688e45de1ac7628de/kubernetes/metadata/pod.go#L87)) metadata for a specific resource. For example for pod named redis in namespace default, these generators leverage the pod watcher, collect all node metadata using the node watcher store and all namespace metadata using the namespacewatcher store. It is important to note that these generators do not make any direct API calls for metadata collection. This has been done by the watchers who update their stores with the updated resources and their metadata. The generators retrieve the metadata for the required resource from the watcher's store. The metadata generation is triggered by calling the Generate method.. All that is needed for the metadata generation, is the Generate method to be triggered. + - For all other resources (excluding pods or services), [createMetadataGen](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L416C6-L416C23) is called. + Depending on whether the specific resource is namespaced (e.g., deployment) or not (e.g., node), either[NewNamespaceAwareResourceMetadataGenerator](https://github.com/elastic/elastic-agent-autodiscover/blob/4554e51c00911209f8dfd463f52bc17f65e3f18f/kubernetes/metadata/resource.go#L62) or [NewResourceMetadataGenerator](https://github.com/elastic/elastic-agent-autodiscover/blob/4554e51c00911209f8dfd463f52bc17f65e3f18f/kubernetes/metadata/resource.go#L44C6-L44C34) is invoked. Both functions create a [Resource](https://github.com/elastic/elastic-agent-autodiscover/blob/4554e51c00911209f8dfd463f52bc17f65e3f18f/kubernetes/metadata/resource.go#L37C6-L37C14) struct that includes methods for metadata generation for a given resource kind, particularly the([Generate](https://github.com/elastic/elastic-agent-autodiscover/blob/4554e51c00911209f8dfd463f52bc17f65e3f18f/kubernetes/metadata/resource.go#L78)). NewNamespaceAwareResourceMetadataGenerator additionally utilizes [NewNamespaceMetadataGenerator](https://github.com/elastic/elastic-agent-autodiscover/blob/4554e51c00911209f8dfd463f52bc17f65e3f18f/kubernetes/metadata/namespace.go#L37C6-L37C14)namespace metadata from the relevant watcher's store. In summary, both createMetadataGenSpecific and createMetadataGen create metadata generators for each resource, implementing the Generate method to fetch all required metadata. +- At this stage, the focus is on crafting event handlers that make use of the previously defined metadata generators. The goal is to establish a handler function assigned to a watcher. When the watcher is triggered, either due to the appearance or update of a new pod, the assigned handler function is executed. This handler, in turn, invokes the metadata generator associated with that specific resource, utilizing its Generate method to collect relevant metadata. The gathered metadata is then stored in a metadata map, which is part of an enricher struct bound to a metricset. To elaborate further, when a metricset later gathers metrics from kubelet or ksm, the events are enriched with metadata. This metadata is retrieved from the map within the enricher's struct. Consequently, the enricher acts as a bridge, facilitating the integration of metadata collected by the event handler into the metrics collected by the metricset. +- In more details, updateFunc(https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L551) will be used as the resource watcher's add(i.e. new pod appears) and update(i.e. existing pod got a new label) handler. It is responsible for generating the metadata for a detected resource. This function is common for all kind or resources but the resource type is checked when the function get's called. It actually returns a map[string]mapstr.M{} where the key is an id in the form of [namespace:resource_name](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L556)(i.e default:redis) in case the resource is namespaced. If it is not then the id is just the resource's [name](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L553). The value of the map is the result of the Generate method of each resource's metadata generator. For example, it generates metadata for a [pod resource](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L561) differently than it would for a [node](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L579). In essence, this function is a versatile handler that, based on the type of resource, extracts relevant metadata using the corresponding metadata generator. The resulting map, structured with resource identifiers and their associated metadata, serves as a crucial dataset for enriching events with context when metrics are later collected. +- [deleteFunc](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L609C2-L609C12)has a straightforward purpose: it serves as the delete handler for the resource watcher. When the function is called, it implies that a resource deletion has been detected. Upon detecting a resource deletion, the deleteFunc removes the corresponding ID key from the map. After deleting the ID key, the function returns the ID of the deleted resource. In essence, this function ensures that, upon detecting the deletion of a resource, the associated metadata is appropriately removed from the dataset, maintaining an accurate representation of the current state of resources. +- [indexFunc](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L627C2-L627C11) is designed to extract the ID of a resource from a metricset event. For example, when pod metricset constructs an event to be published to ES, it then needs to enrich it with metadata. This event is related to a specific pod in a specific namespace. The function parses the metricset event to extract the relevant information needed to construct the resource ID. The ID is typically constructed based on the namespace and resource name associated with the event. The ID is used to retrieve the corresponding metadata from the enricher's map. +- After the functions are created [buildMetadataEnricher](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L642) is called to create an [enricher](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L67) struct. The enricher struct is tied to a specific metricset. The one metricset that called the NewResourceMetadataEnricher function. In summary, buildMetadataEnricher is a crucial part of the architecture, creating the enricher struct that facilitates the association of metadata with metricset events. +- buildMetadataEnricher builds and returns a metadata enricher for a given metricset. The enricher struct implements a [Start](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L979) method that initiates the watchers associated with the enricher (metricset), a [Stop](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L1028) method that terminates the watchers associated with the enricher and an [Enrich](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L1055C20-L1055C26) method that is responsible for enriching metricset events with metadata.. +- Relationship Between Enricher and Watcher: + - The relationship between enrichers and watchers is not one-to-one but rather many-to-one. + - For a specific resource kind (e.g., pod or node), there is only one watcher. + - However, multiple metricsets may want to use the same watcher (e.g., pod, state_pod, state_container using the pod watcher). + - The enrichers map in the [metaWatcher](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L92) struct serves as a connection between watchers and multiple metricset enrichers. +- This architecture efficiently handles the coordination between watchers and metricsets, ensuring that the appropriate metadata is collected and enriched for metricset events. +- buildMetadataEnricher function also [appends](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L876) the new enricher to the watcher. +- It also updates the [add](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L902), [update](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L927) and [delete](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L949) event handlers of the watcher to retrieve the metadata of all enrichers associated with that watcher. +- Event Handler Mechanism: + - The watcher's add, update, and delete event handlers (addFunc, updateFunc, deleteFunc) are updated to handle events for multiple enrichers. + - When a new object(See obj in [line](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L902C18-L902C21)) is created or updated, these handlers gets triggered by the watcher internal mechanism. + - They iterate over the enrichers map of the watcher and trigger the [UpdateFunc](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L919) of each enricher. + - The UpdateFunc calls the Generate method of the metricset's metadata generator, obtaining the metadata map (newMetadataEvents). + - The obtained metadata are then added to the enricher's metadata [map](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L922C7-L922C25) (enricher.metadata). +- Metadata Update Process: + - The buildMetadataEnricher function ensures that the watcher's event handlers are properly configured to update metadata for all associated enrichers. + - This mechanism guarantees that when an event for a specific metricset arrives, the metadata map of the corresponding enricher is up-to-date. + - The enricher's Enrich method can then utilize this metadata to enrich metricset events with the relevant information. +- But before doing that, there is an extra [step](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L878-L896). There is a scenario where existing resources (such as pods) trigger events before the corresponding enrichers (like state_pod) are initialized. To better understand this, let's think of the following scenario: + - A new pod metricset gets initiliazed to start collecting kubelet metrics + - NewResourceMetadataEnricher is called to create a new enricher for pod metadata + - It creates a pod watcher + - buildMetadataEnricher is called an the watcher.enrichers map includes only pod metricset's enricher + - The pod watcher event handler will only call the updateFunc of the one enricher it has, when triggered + - A new pod appears, the watcher handler function gets triggered and it executes the updatefunc of only the pod metricset's enricher + - So the pod metricset's enricher's metadata map will have the metadata for all pod resources + - state_pod metricsets gets initiliazied. This happens in leader node. First pod metricsets starts and then state_pod + - A new enricher for state_pod gets created, it uses the same existing pod watcher. Inside buildMetadataEnricher the watcher.enrichers map gets updated to include the state_pod enricher as well + - So whenever a new pod appears or gets updated, both enricher's updateFunc will be triggered, so both enrichers' metadata map will be up to date + - But what happens with pods that triggered a watcher event, in between the pod and state_pod initilization. So after the pod metricsets got initiliazed and before the state_pod got initiliazed + - This is very common. The moment a watcher starts watching(from pod metricset) it immidiately gets notified for all pods in the cluster and executes whatever its updateFunc says. In that case the pod enricher's updateFunc + - So when the watcher got updated with state_pod enricher, the events for the existing pods have already arrived and at that point the watcher did not call the state_pod's enricher updateFunc + - Outcome is that the state_pod enricher's metadata map won't have the existing pods metadata, because the metadata generate method of that enricher was never called + - So we need a way to handle those cases +- Watcher Initialization and MetadataObjects List: + - Each `metaWatchersMap` contains a crucial field called `metadataObjects`. + - This list is a record of all the object IDs (resource IDs) for which the watcher has been triggered. +- Updating MetadataObjects During Events: + - Whenever the `AddFunc` or `UpdateFunc` of the watcher is triggered (e.g., when a new object is detected or an existing one is updated), the `metadataObjects` field is updated. + - The ID of the detected object is [added](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L915) to the `metadataObjects` list. +- Proactive Synchronization with Existing Enrichers: + - Before updating the watcher's event handlers with new enrichers in the `buildMetadataEnricher` function, the existing `metadataObjects` list is [iterated](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L883C43-L883C58). +- Enricher UpdateFunc Execution: + - For each object ID in the `metadataObjects` list, the corresponding object is retrieved from the watcher's store. + - The `UpdateFunc` of the new enricher (the one that called the buildMetadataEnricher e.g., state_pod) is then executed for each existing object, ensuring that the metadata map of the new enricher is updated with metadata for all existing objects. +- This systematic approach guarantees that the metadata for existing resources, which triggered events before the initialization of certain enrichers, is correctly captured and updated in the metadata map of the new enrichers. +- After the enricher gets created (during a metricset's initialization), then it gets started. This happens inside Fetch method of each [metricset](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/helper/kubernetes/state_metricset.go#L104). +- [Start](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L979) method initiates the associated watchers, beginning with any extra watchers and then the primary watcher for the metricset. For example for pod metricset, we first start the namespace and node watcher and then the main pod watcher. This is intentional because if pod watcher starts first and gets triggered immediately(new pod appearance), then the namespace and node metadata won't be available as the namespace and node watcher haven't started yet. +- In cases where a watcher needs to be updated (e.g., changing watch options), a [restartWatcher](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L1003) is employed. This is applicable for pod watcher only. As already mentioned this can happen in leader nodes were a pod metricset created a watcher with nodescope(only watch for pods in current node), while state_pod needs the same watcher to watch pods in all nodes. +- The existing watcher is stopped, and the restartWatcher takes its place, ensuring seamless transitions. +- The restartWatcher [replaces](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L1010) the initial watcher and restartWatcher is set to nil. +- Last step of the metadata enrichment process is the actual [Enrich](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L1055) method is called during the generation of events by [metricsets](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/helper/kubernetes/state_metricset.go#L119). +- Inside Enrich, for each event(only metrics of same resource i.e redis pod), enrichers' [Index](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L1060) method identifies the associated resource, and metadata from the enricher's map are added to the event. Remember that the index method was created in the NewResourceMetadataEnricher function. +- So then the resource's metadata can be retrieved from the enricher's [metadata map](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L1060) and added to the event. Keep in mind that it was the watcher's event handler functions that had called all the relevant enrichers updateFunc to fill each enrichher's metadata map. +- Lastly, we have already explained in details the mechanism of NewResourceMetadataEnricher. However, for container and state_container metricsets [NewContainerMetadataEnricher](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L659) is called instead. +- The functionality is the same as NewResourceMetadataEnricher but as the resource is known(containers), the function operates as the resource was Pod. The reason is that we first need to collect pods and then get their containers. So it will only create pod related watchers and metadata generators. +- The difference comes to the creation of the enricher's updateFunc(https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L701). In there, we need to first call pod metadata generator's [Generate](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L708) method (as it will also include the pod's containers metadata). Then we iterate the pod's [containers](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L723) and construct the [ID](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L752) of each container in the form of namespace:pod:container. +- The container metadata are also enriched a bit more with useful fields like `container.id`, `container.runtime`. + + From 21f7503d45888aad5bf42d0d2f5e30f4684f4438 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Constan=C3=A7a=20Manteigas?= <113898685+constanca-m@users.noreply.github.com> Date: Mon, 25 Mar 2024 13:25:20 +0100 Subject: [PATCH 55/61] Update metricbeat/module/kubernetes/util/enrichers.md Co-authored-by: Tetiana Kravchenko --- metricbeat/module/kubernetes/util/enrichers.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/metricbeat/module/kubernetes/util/enrichers.md b/metricbeat/module/kubernetes/util/enrichers.md index a77995e3c42..f523766662d 100644 --- a/metricbeat/module/kubernetes/util/enrichers.md +++ b/metricbeat/module/kubernetes/util/enrichers.md @@ -35,7 +35,7 @@ like state_pod, state_container, node, state_job etc. - `namespace` is a configuration option that reflects to the `options` field we mentioned above. If set by the user, then watchers are watching for resources only in that namespace. - `extraWatcher` boolean sets apart the watchers that are created as main watcher for a resource and the ones that are created as an extra watcher. For example for pod metricset, pod watcher is the main and node, namespace watchers are extra. This information is important, because the extra watchers (can only be node, namespace, job and replicaset) will never have the Node option set in their watch options. We will explain more. - The createWatcher function first checks if a watcher of that resource already [exists](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L276) -- If it does not exist it creates it and [stores](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L288C3-L288C18) it in the `resourceWatchers` struct. When creating a new metaWatcher struct, thet `started` options set to false as the watcher has not been started yet(just created). The `metadataObjects`, `enrichers` and `metricsetsUsing` initiliazied to empty, the `restartWatcher` set to nil and the `nodeScope` according to the function input(except from cases when it is an extra watcher where it is hardcoded to false). +- If it does not exist it creates it and [stores](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L288C3-L288C18) it in the `resourceWatchers` struct. When creating a new metaWatcher struct, then `started` options set to false as the watcher has not been started yet(just created). The `metadataObjects`, `enrichers` and `metricsetsUsing` initiliazied to empty, the `restartWatcher` set to nil and the `nodeScope` according to the function input(except from cases when it is an extra watcher where it is hardcoded to false). - If the watcher for that resource already exists, we check if it needs to be restarted. This situation can arise in specific cases. For instance, if a pod watcher has been created from a metricset like pod or container with nodeScope set to true (watching only from one node), and then another metricset like state_pod or state_container tries to create a pod watcher (which can happen only in the leader node), the watcher already exists. However, if we don't take any action in this scenario, the state_pod watcher would use a watcher that watches only from one node, leading to missing metadata, as described earlier. To address this, we need to update the watcher, mainly changing its watch options (removing options.Node). Unfortunately, a running watcher cannot be updated directly. Instead, we must stop it and create a new one with the correct watch options. The new restartWatcher must be identical to the old watcher, including the same handler function (more on that later), with the only difference being in the watch options. Consequently, the metaWatcher struct of the watcher gets updated with the restartWatcher. The process of stopping the old watcher and starting the new one is handled later on. - After createAllWatchers function creates all the watchers that are needed and update the resourceWatchers struct, the code flow returns to NewResourceMetadataEnricher - Now, let's delve into the creation of metadata generators and handler functions. Watchers, on their own, are responsible for subscribing to the Kubernetes API and monitoring changes in the resources they are configured to watch. Their primary function is to update their internal cache or store. However, to determine what actions to take when a change occurs, we rely on the so-called event handlers. From 07819ebd412d8a12905701bb6f832a814f5eca22 Mon Sep 17 00:00:00 2001 From: MichaelKatsoulis Date: Wed, 27 Mar 2024 12:42:02 +0200 Subject: [PATCH 56/61] Update code comments and remove enrichers.md --- .../module/kubernetes/util/enrichers.md | 112 ---------------- .../module/kubernetes/util/kubernetes.go | 122 +++++++++++------- 2 files changed, 77 insertions(+), 157 deletions(-) delete mode 100644 metricbeat/module/kubernetes/util/enrichers.md diff --git a/metricbeat/module/kubernetes/util/enrichers.md b/metricbeat/module/kubernetes/util/enrichers.md deleted file mode 100644 index f523766662d..00000000000 --- a/metricbeat/module/kubernetes/util/enrichers.md +++ /dev/null @@ -1,112 +0,0 @@ -### Kubernetes metrics metadata enrichment - -- The following description is irrelevant to the metadata enrichment that happens due to the `add_kubernetes_metadata` processor and Kubernetes provider. -- The `add_kubernetes_metadata` processor is skipped in cases of Kubernetes module for metrics collection. It is only used in the case of logs collection when the Kubernetes autodiscover provider is not used. --The Kubernetes autodiscover provider enriches with metadata mainly when it comes to log collection when it is configured. It is by default in the `container_logs` integration in the Elastic Agent. -- Metadata enrichment from the enrichers happens for all the following Kubernetes metricsets: `state_namespace`, `state_node`, `state_deployment`, `state_daemonset`, `state_replicaset`, `state_pod`, `state_container`, `state_job`, `state_cronjob`, `state_statefulset`, `state_service`, `state_persistentvolume`, `state_persistentvolumeclaim`, `state_storageclass`, `pod`, `container`, `node`. -- The reason that these metricsets trigger the metadata enrichment is because of the way they start. -- All `state_metricsets` (except `state_container`) trigger the shared [kubernetes.Init](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/state_daemonset/state_daemonset.go#L45) function when they get initialized. -- [kubernetes.Init](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/helper/kubernetes/state_metricset.go#L44) calls the [New metricsets method](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/helper/kubernetes/state_metricset.go#L80), which returns a new metricset with resource metadata enricher as part of it. -- Node, pod, container, and `state_container` metricsets do not trigger the `kubernetes.Init` rather they implement their own [New method](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/node/node.go#L66) with the enricher as part of the metricsets returned. -- All of the above metricsets trigger the [NewResourceMetadataEnricher](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/helper/kubernetes/state_metricset.go#L84) with only the exception of the container and `state_container` metricsets which trigger the [NewContainerMetadataEnricher](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/state_container/state_container.go#L118C23-L118C51). -- The [NewResourceMetadataEnricher](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L501) is the function responsible for creating and returning a metadata enricher. -- The enricher is responsible for the metadata enrichment. For that, resource watchers are used which are shared between the different metricsets. For example for pod metricset, a pod watcher, a namespace and node watcher are by default needed in addition to job and replicaset watcher based on the configuration. These watchers will be also used by other metricsets that require them -like state_pod, state_container, node, state_job etc. -- The shared watchers are stored in the module-level struct [resourceWatchers](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/kubernetes.go#L102). The struct is initialized when the Kubernetes module [starts](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L124). -- Each time a watcher gets created it is added to the struct along with some watcher important data. The [struct](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L99) consists of a metaWatchersMap which has as key the name of the resource this watcher is watching(i.e. pod or node) and as value a [metaWatcher](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L86C6-L86C17) struct. -- The metaWatcher struct holds all the relevant info for the specific watcher. These info include - - The `watcher` refers to the actual [kubernetes.Watcher](https://github.com/elastic/elastic-agent-autodiscover/blob/17950767e42c50365ecdd842349649cd70998f22/kubernetes/watcher.go#L47) interface responsible for monitoring the specified resource. - - The `started` boolean that holds the information if the watcher has started or not - - The `metricsetsUsing` field which is a slice with all the metricsets that are currently sharing this watcher. For example a pod watcher can be shared between pod, state_pod, container, state_container metricsets - - The enrichers map. The enricher is a struct that is bound to a metricset and does the actual metadata enrichment for the events of that metricset using an update function. The metaWatcher holds the information of all the enrichers of the metricsets that are using this watcher, so that it knows what to do in case a new watch event is triggered(i.e a pod gets updated. The pod watcher gets triggered. It needs to trigger the update function of all the enrichers that it has stored) - - The `metadataObjects` field which functions like a set, consisting of all the ids of the objects(resources) that this watcher has been triggered for. The id looks like `namespace_name-resource_name`. The reason this is needed will be explained later. - - `nodeScope` boolean indicates whether this watcher needs to monitor the specific resource only on the node where the beat/agent is running(For example, for pod metricset, nodescope is true meaning that the watcher needs to watch pods only in this node because the metricsets colletcs info only from local kubelet. This is not the case for state_pod where nodescope is false). - - The `restartWatcher` field is generally nil, signifying that there is no need for a new updated watcher.However, if the running watcher needs to be restarted for any reason, restartWatcher will be the new watcher. A reason that a running watcher may have to be restarted is in case it has been initially triggered by a metricset(i.e. pod) with nodescope true and then it was triggered again by metricsets(state_pod) with nodescope false. In that case the watch options need to change, so the old watcher must be stopped and the new restartWatcher must be started and take its place. More on that later. -- NewResourceMetadataEnricher function is called by a given metricset. Two different configurations are created. [config](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L508) holding metricset related configuration and [commonConfig](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L521C2-L521C14) which holds information specific to the metadata enrichment. -- After the metricset is mapped to a specifig resource (i.e state_pod is mapped to pod resource) [createAllWatchers](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L531) is called to create all needed watchers for this resource. -- createAllWatchers will first create the main watcher for that resource(i.e for pod resource it will be pod watcher) and then will try to create all the extra watchers needed. [getExtraWatchers](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L393C19-L393C35) will return all the extra watchers per resource. For example a pod resource needs node, namespace and (if configured) job and replicaset watchers. -- For each watcher that needs to be created [createWatcher](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L383C18-L383C31) is called. -- [CreateWatcher](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L253) function takes several arguments. - - `resourceName` is needed so that it will be the key in the resourceWatchers map where all the created watchers are stored. - - `resource` is the resource object(i.e Kubernetes.Pod{}) and is a requirement of [kubernetes.NewNamedWatcher](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L284C19-L284C45) function which creates the actual watcher. - - `options` are the watch options for a specific watcher. They act like watching filters. For example a pod watcher can be configured through the options to watch only for resources on a specific node or namespace, or watch all pods. In our case, watchers triggered by metricsets collecting from kubelet(pod and container metricsets only) are configured with `nodescope` true which is then translated to an extra [option](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L229) to watch only resources of that node. On the other hand, all state_metricsets are configured with nodescope false, as we need the watchers to collect metadata from resources across the entire cluster. For example the leader node collecting state_pod metrics, when it will try to enrich a pod found in the metrics with metadata, this pod may be running on a different node than the leader. So in case the pod watcher was only watching for pods in the leader node it would not have found any relevant metadata for that pod. - - `client` is the kubernetes client needed for the watcher creation - - `resourceWatchers` is the store that holds the info about the created watchers and their data. resourceWatchers get updated inside this function if a new watcher gets created. - - `namespace` is a configuration option that reflects to the `options` field we mentioned above. If set by the user, then watchers are watching for resources only in that namespace. - - `extraWatcher` boolean sets apart the watchers that are created as main watcher for a resource and the ones that are created as an extra watcher. For example for pod metricset, pod watcher is the main and node, namespace watchers are extra. This information is important, because the extra watchers (can only be node, namespace, job and replicaset) will never have the Node option set in their watch options. We will explain more. -- The createWatcher function first checks if a watcher of that resource already [exists](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L276) -- If it does not exist it creates it and [stores](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L288C3-L288C18) it in the `resourceWatchers` struct. When creating a new metaWatcher struct, then `started` options set to false as the watcher has not been started yet(just created). The `metadataObjects`, `enrichers` and `metricsetsUsing` initiliazied to empty, the `restartWatcher` set to nil and the `nodeScope` according to the function input(except from cases when it is an extra watcher where it is hardcoded to false). -- If the watcher for that resource already exists, we check if it needs to be restarted. This situation can arise in specific cases. For instance, if a pod watcher has been created from a metricset like pod or container with nodeScope set to true (watching only from one node), and then another metricset like state_pod or state_container tries to create a pod watcher (which can happen only in the leader node), the watcher already exists. However, if we don't take any action in this scenario, the state_pod watcher would use a watcher that watches only from one node, leading to missing metadata, as described earlier. To address this, we need to update the watcher, mainly changing its watch options (removing options.Node). Unfortunately, a running watcher cannot be updated directly. Instead, we must stop it and create a new one with the correct watch options. The new restartWatcher must be identical to the old watcher, including the same handler function (more on that later), with the only difference being in the watch options. Consequently, the metaWatcher struct of the watcher gets updated with the restartWatcher. The process of stopping the old watcher and starting the new one is handled later on. -- After createAllWatchers function creates all the watchers that are needed and update the resourceWatchers struct, the code flow returns to NewResourceMetadataEnricher -- Now, let's delve into the creation of metadata generators and handler functions. Watchers, on their own, are responsible for subscribing to the Kubernetes API and monitoring changes in the resources they are configured to watch. Their primary function is to update their internal cache or store. However, to determine what actions to take when a change occurs, we rely on the so-called event handlers. -- Let's delve into the details of metadata generation for various resources. There is a difference between pod/service resources and all the rest. -In more details: - - For resources like pods or services, the [createMetadataGenSpecific](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L540) function is invoked to create the metadata generator. The pod metadata generator or the service metadata generator, are structs defined in [elastic-agent-autodiscover](https://github.com/elastic/elastic-agent-autodiscover/blob/e47c0f013820d394460a21b688e45de1ac7628de/kubernetes/metadata/pod.go#L40) repo and implement a [metagen interface](https://github.com/elastic/elastic-agent-autodiscover/blob/e47c0f013820d394460a21b688e45de1ac7628de/kubernetes/metadata/metadata.go#L39)and are designed to utilize the necessary [watchers](https://github.com/elastic/elastic-agent-autodiscover/blob/e47c0f013820d394460a21b688e45de1ac7628de/kubernetes/metadata/metadata.go#L100) to collect([Generate](https://github.com/elastic/elastic-agent-autodiscover/blob/e47c0f013820d394460a21b688e45de1ac7628de/kubernetes/metadata/pod.go#L87)) metadata for a specific resource. For example for pod named redis in namespace default, these generators leverage the pod watcher, collect all node metadata using the node watcher store and all namespace metadata using the namespacewatcher store. It is important to note that these generators do not make any direct API calls for metadata collection. This has been done by the watchers who update their stores with the updated resources and their metadata. The generators retrieve the metadata for the required resource from the watcher's store. The metadata generation is triggered by calling the Generate method.. All that is needed for the metadata generation, is the Generate method to be triggered. - - For all other resources (excluding pods or services), [createMetadataGen](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L416C6-L416C23) is called. - Depending on whether the specific resource is namespaced (e.g., deployment) or not (e.g., node), either[NewNamespaceAwareResourceMetadataGenerator](https://github.com/elastic/elastic-agent-autodiscover/blob/4554e51c00911209f8dfd463f52bc17f65e3f18f/kubernetes/metadata/resource.go#L62) or [NewResourceMetadataGenerator](https://github.com/elastic/elastic-agent-autodiscover/blob/4554e51c00911209f8dfd463f52bc17f65e3f18f/kubernetes/metadata/resource.go#L44C6-L44C34) is invoked. Both functions create a [Resource](https://github.com/elastic/elastic-agent-autodiscover/blob/4554e51c00911209f8dfd463f52bc17f65e3f18f/kubernetes/metadata/resource.go#L37C6-L37C14) struct that includes methods for metadata generation for a given resource kind, particularly the([Generate](https://github.com/elastic/elastic-agent-autodiscover/blob/4554e51c00911209f8dfd463f52bc17f65e3f18f/kubernetes/metadata/resource.go#L78)). NewNamespaceAwareResourceMetadataGenerator additionally utilizes [NewNamespaceMetadataGenerator](https://github.com/elastic/elastic-agent-autodiscover/blob/4554e51c00911209f8dfd463f52bc17f65e3f18f/kubernetes/metadata/namespace.go#L37C6-L37C14)namespace metadata from the relevant watcher's store. In summary, both createMetadataGenSpecific and createMetadataGen create metadata generators for each resource, implementing the Generate method to fetch all required metadata. -- At this stage, the focus is on crafting event handlers that make use of the previously defined metadata generators. The goal is to establish a handler function assigned to a watcher. When the watcher is triggered, either due to the appearance or update of a new pod, the assigned handler function is executed. This handler, in turn, invokes the metadata generator associated with that specific resource, utilizing its Generate method to collect relevant metadata. The gathered metadata is then stored in a metadata map, which is part of an enricher struct bound to a metricset. To elaborate further, when a metricset later gathers metrics from kubelet or ksm, the events are enriched with metadata. This metadata is retrieved from the map within the enricher's struct. Consequently, the enricher acts as a bridge, facilitating the integration of metadata collected by the event handler into the metrics collected by the metricset. -- In more details, updateFunc(https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L551) will be used as the resource watcher's add(i.e. new pod appears) and update(i.e. existing pod got a new label) handler. It is responsible for generating the metadata for a detected resource. This function is common for all kind or resources but the resource type is checked when the function get's called. It actually returns a map[string]mapstr.M{} where the key is an id in the form of [namespace:resource_name](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L556)(i.e default:redis) in case the resource is namespaced. If it is not then the id is just the resource's [name](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L553). The value of the map is the result of the Generate method of each resource's metadata generator. For example, it generates metadata for a [pod resource](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L561) differently than it would for a [node](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L579). In essence, this function is a versatile handler that, based on the type of resource, extracts relevant metadata using the corresponding metadata generator. The resulting map, structured with resource identifiers and their associated metadata, serves as a crucial dataset for enriching events with context when metrics are later collected. -- [deleteFunc](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L609C2-L609C12)has a straightforward purpose: it serves as the delete handler for the resource watcher. When the function is called, it implies that a resource deletion has been detected. Upon detecting a resource deletion, the deleteFunc removes the corresponding ID key from the map. After deleting the ID key, the function returns the ID of the deleted resource. In essence, this function ensures that, upon detecting the deletion of a resource, the associated metadata is appropriately removed from the dataset, maintaining an accurate representation of the current state of resources. -- [indexFunc](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L627C2-L627C11) is designed to extract the ID of a resource from a metricset event. For example, when pod metricset constructs an event to be published to ES, it then needs to enrich it with metadata. This event is related to a specific pod in a specific namespace. The function parses the metricset event to extract the relevant information needed to construct the resource ID. The ID is typically constructed based on the namespace and resource name associated with the event. The ID is used to retrieve the corresponding metadata from the enricher's map. -- After the functions are created [buildMetadataEnricher](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L642) is called to create an [enricher](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L67) struct. The enricher struct is tied to a specific metricset. The one metricset that called the NewResourceMetadataEnricher function. In summary, buildMetadataEnricher is a crucial part of the architecture, creating the enricher struct that facilitates the association of metadata with metricset events. -- buildMetadataEnricher builds and returns a metadata enricher for a given metricset. The enricher struct implements a [Start](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L979) method that initiates the watchers associated with the enricher (metricset), a [Stop](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L1028) method that terminates the watchers associated with the enricher and an [Enrich](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L1055C20-L1055C26) method that is responsible for enriching metricset events with metadata.. -- Relationship Between Enricher and Watcher: - - The relationship between enrichers and watchers is not one-to-one but rather many-to-one. - - For a specific resource kind (e.g., pod or node), there is only one watcher. - - However, multiple metricsets may want to use the same watcher (e.g., pod, state_pod, state_container using the pod watcher). - - The enrichers map in the [metaWatcher](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L92) struct serves as a connection between watchers and multiple metricset enrichers. -- This architecture efficiently handles the coordination between watchers and metricsets, ensuring that the appropriate metadata is collected and enriched for metricset events. -- buildMetadataEnricher function also [appends](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L876) the new enricher to the watcher. -- It also updates the [add](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L902), [update](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L927) and [delete](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L949) event handlers of the watcher to retrieve the metadata of all enrichers associated with that watcher. -- Event Handler Mechanism: - - The watcher's add, update, and delete event handlers (addFunc, updateFunc, deleteFunc) are updated to handle events for multiple enrichers. - - When a new object(See obj in [line](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L902C18-L902C21)) is created or updated, these handlers gets triggered by the watcher internal mechanism. - - They iterate over the enrichers map of the watcher and trigger the [UpdateFunc](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L919) of each enricher. - - The UpdateFunc calls the Generate method of the metricset's metadata generator, obtaining the metadata map (newMetadataEvents). - - The obtained metadata are then added to the enricher's metadata [map](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L922C7-L922C25) (enricher.metadata). -- Metadata Update Process: - - The buildMetadataEnricher function ensures that the watcher's event handlers are properly configured to update metadata for all associated enrichers. - - This mechanism guarantees that when an event for a specific metricset arrives, the metadata map of the corresponding enricher is up-to-date. - - The enricher's Enrich method can then utilize this metadata to enrich metricset events with the relevant information. -- But before doing that, there is an extra [step](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L878-L896). There is a scenario where existing resources (such as pods) trigger events before the corresponding enrichers (like state_pod) are initialized. To better understand this, let's think of the following scenario: - - A new pod metricset gets initiliazed to start collecting kubelet metrics - - NewResourceMetadataEnricher is called to create a new enricher for pod metadata - - It creates a pod watcher - - buildMetadataEnricher is called an the watcher.enrichers map includes only pod metricset's enricher - - The pod watcher event handler will only call the updateFunc of the one enricher it has, when triggered - - A new pod appears, the watcher handler function gets triggered and it executes the updatefunc of only the pod metricset's enricher - - So the pod metricset's enricher's metadata map will have the metadata for all pod resources - - state_pod metricsets gets initiliazied. This happens in leader node. First pod metricsets starts and then state_pod - - A new enricher for state_pod gets created, it uses the same existing pod watcher. Inside buildMetadataEnricher the watcher.enrichers map gets updated to include the state_pod enricher as well - - So whenever a new pod appears or gets updated, both enricher's updateFunc will be triggered, so both enrichers' metadata map will be up to date - - But what happens with pods that triggered a watcher event, in between the pod and state_pod initilization. So after the pod metricsets got initiliazed and before the state_pod got initiliazed - - This is very common. The moment a watcher starts watching(from pod metricset) it immidiately gets notified for all pods in the cluster and executes whatever its updateFunc says. In that case the pod enricher's updateFunc - - So when the watcher got updated with state_pod enricher, the events for the existing pods have already arrived and at that point the watcher did not call the state_pod's enricher updateFunc - - Outcome is that the state_pod enricher's metadata map won't have the existing pods metadata, because the metadata generate method of that enricher was never called - - So we need a way to handle those cases -- Watcher Initialization and MetadataObjects List: - - Each `metaWatchersMap` contains a crucial field called `metadataObjects`. - - This list is a record of all the object IDs (resource IDs) for which the watcher has been triggered. -- Updating MetadataObjects During Events: - - Whenever the `AddFunc` or `UpdateFunc` of the watcher is triggered (e.g., when a new object is detected or an existing one is updated), the `metadataObjects` field is updated. - - The ID of the detected object is [added](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L915) to the `metadataObjects` list. -- Proactive Synchronization with Existing Enrichers: - - Before updating the watcher's event handlers with new enrichers in the `buildMetadataEnricher` function, the existing `metadataObjects` list is [iterated](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L883C43-L883C58). -- Enricher UpdateFunc Execution: - - For each object ID in the `metadataObjects` list, the corresponding object is retrieved from the watcher's store. - - The `UpdateFunc` of the new enricher (the one that called the buildMetadataEnricher e.g., state_pod) is then executed for each existing object, ensuring that the metadata map of the new enricher is updated with metadata for all existing objects. -- This systematic approach guarantees that the metadata for existing resources, which triggered events before the initialization of certain enrichers, is correctly captured and updated in the metadata map of the new enrichers. -- After the enricher gets created (during a metricset's initialization), then it gets started. This happens inside Fetch method of each [metricset](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/helper/kubernetes/state_metricset.go#L104). -- [Start](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L979) method initiates the associated watchers, beginning with any extra watchers and then the primary watcher for the metricset. For example for pod metricset, we first start the namespace and node watcher and then the main pod watcher. This is intentional because if pod watcher starts first and gets triggered immediately(new pod appearance), then the namespace and node metadata won't be available as the namespace and node watcher haven't started yet. -- In cases where a watcher needs to be updated (e.g., changing watch options), a [restartWatcher](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L1003) is employed. This is applicable for pod watcher only. As already mentioned this can happen in leader nodes were a pod metricset created a watcher with nodescope(only watch for pods in current node), while state_pod needs the same watcher to watch pods in all nodes. -- The existing watcher is stopped, and the restartWatcher takes its place, ensuring seamless transitions. -- The restartWatcher [replaces](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L1010) the initial watcher and restartWatcher is set to nil. -- Last step of the metadata enrichment process is the actual [Enrich](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L1055) method is called during the generation of events by [metricsets](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/helper/kubernetes/state_metricset.go#L119). -- Inside Enrich, for each event(only metrics of same resource i.e redis pod), enrichers' [Index](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L1060) method identifies the associated resource, and metadata from the enricher's map are added to the event. Remember that the index method was created in the NewResourceMetadataEnricher function. -- So then the resource's metadata can be retrieved from the enricher's [metadata map](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L1060) and added to the event. Keep in mind that it was the watcher's event handler functions that had called all the relevant enrichers updateFunc to fill each enrichher's metadata map. -- Lastly, we have already explained in details the mechanism of NewResourceMetadataEnricher. However, for container and state_container metricsets [NewContainerMetadataEnricher](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L659) is called instead. -- The functionality is the same as NewResourceMetadataEnricher but as the resource is known(containers), the function operates as the resource was Pod. The reason is that we first need to collect pods and then get their containers. So it will only create pod related watchers and metadata generators. -- The difference comes to the creation of the enricher's updateFunc(https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L701). In there, we need to first call pod metadata generator's [Generate](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L708) method (as it will also include the pod's containers metadata). Then we iterate the pod's [containers](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L723) and construct the [ID](https://github.com/constanca-m/beats/blob/8fcdfc205ebe0221ed7a1046c673c9babfb5280d/metricbeat/module/kubernetes/util/kubernetes.go#L752) of each container in the form of namespace:pod:container. -- The container metadata are also enriched a bit more with useful fields like `container.id`, `container.runtime`. - - diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 9d393a4d30b..e8ad573e27c 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -84,16 +84,16 @@ func (*nilEnricher) Stop(*Watchers) {} func (*nilEnricher) Enrich([]mapstr.M) {} type metaWatcher struct { - watcher kubernetes.Watcher - started bool // true if watcher has started, false otherwise + watcher kubernetes.Watcher // watcher responsible for watching a specific resource + started bool // true if watcher has started, false otherwise - metricsetsUsing []string // list of metricsets using this watcher + metricsetsUsing []string // list of metricsets using this shared watcher(e.g. pod, container, state_pod) - enrichers map[string]*enricher // map of enrichers using this watcher. The key is the metricset name - metadataObjects map[string]bool // map of ids of each object received by the handler functions + enrichers map[string]*enricher // map of enrichers using this watcher. The key is the metricset name. Each metricset has its own enricher + metadataObjects map[string]bool // representation of a set of ids(in the form of namespace_name-resource_name) of each object received by the watcher's handler functions - nodeScope bool // whether this watcher is only for current node - restartWatcher kubernetes.Watcher // whether this watcher needs a restart + nodeScope bool // whether this watcher should watch for resources in current node or in whole cluster + restartWatcher kubernetes.Watcher // whether this watcher needs a restart. Only relevant in leader nodes due to metricsets with different nodescope(pod, state_pod) } type Watchers struct { @@ -207,9 +207,9 @@ func getExtraWatchers(resourceName string, addResourceMetadata *metadata.AddReso } } -// getResourceName returns the name of the resource for a metricset -// Example: state_pod metricset uses pod resource -// Exception is state_namespace +// getResourceName returns the name of the resource for a metricset. +// Example: state_pod metricset uses pod resource. +// Exception is state_namespace. func getResourceName(metricsetName string) string { resourceName := metricsetName if resourceName != NamespaceResource { @@ -218,14 +218,14 @@ func getResourceName(metricsetName string) string { return resourceName } -// getWatchOptions builds the kubernetes.WatchOptions{} needed for the watcher based on the config and nodeScope +// getWatchOptions builds the kubernetes.WatchOptions{} needed for the watcher based on the config and nodeScope. func getWatchOptions(config *kubernetesConfig, nodeScope bool, client k8sclient.Interface, log *logp.Logger) (*kubernetes.WatchOptions, error) { var err error options := kubernetes.WatchOptions{ SyncTimeout: config.SyncPeriod, } - // Watch objects in the node only + // Watch objects in the node only. if nodeScope { nd := &kubernetes.DiscoverKubernetesNodeParams{ ConfigHost: config.Node, @@ -249,7 +249,12 @@ func isNamespaced(resourceName string) bool { return true } -// createWatcher creates a watcher for a specific resource +// createWatcher creates a watcher for a specific resource if not already created and stores it in the resourceWatchers map. +// resourceName is the key in the resourceWatchers map where the created watcher gets stored. +// options are the watch options for a specific watcher. +// For example a watcher can be configured through options to watch only for resources on a specific node/namespace or in whole cluster. +// resourceWatchers is the store for all created watchers. +// extraWatcher bool sets apart the watchers that are created as main watcher for a resource and the ones that are created as an extra watcher. func createWatcher( resourceName string, resource kubernetes.Resource, @@ -259,12 +264,12 @@ func createWatcher( namespace string, extraWatcher bool) (bool, error) { - // We need to check the node scope to decide on whether a watcher should be updated or not + // We need to check the node scope to decide on whether a watcher should be updated or not. nodeScope := false if options.Node != "" { nodeScope = true } - // The nodescope for extra watchers node, namespace, replicaset and job should be always false + // The nodescope for extra watchers node, namespace, replicaset and job should be always false. if extraWatcher { nodeScope = false options.Node = "" @@ -273,11 +278,12 @@ func createWatcher( resourceWatchers.lock.Lock() defer resourceWatchers.lock.Unlock() + // Check if a watcher for the specific resource already exists. resourceMetaWatcher, ok := resourceWatchers.metaWatchersMap[resourceName] - // if it does not exist, create the resourceMetaWatcher + // If it does not exist, create the resourceMetaWatcher. if !ok { - // check if we need to add namespace to the resourceMetaWatcher options + // Check if we need to add namespace to the watcher's options. if isNamespaced(resourceName) { options.Namespace = namespace } @@ -287,7 +293,7 @@ func createWatcher( } resourceWatchers.metaWatchersMap[resourceName] = &metaWatcher{ watcher: watcher, - started: false, + started: false, // not started yet metadataObjects: make(map[string]bool), enrichers: make(map[string]*enricher), metricsetsUsing: make([]string, 0), @@ -296,10 +302,13 @@ func createWatcher( } return true, nil } else if resourceMetaWatcher.nodeScope != nodeScope && resourceMetaWatcher.nodeScope { - // It might happen that the resourceMetaWatcher already exists, but is only being used to monitor the resources - // of a single node. In that case, we need to check if we are trying to create a new resourceMetaWatcher that will track - // the resources of multiple nodes. If it is the case, then we need to update the resourceMetaWatcher. - // check if we need to add namespace to the resourceMetaWatcher options + // It might happen that the watcher already exists, but is only being used to monitor the resources + // of a single node(e.g. created by pod metricset). In that case, we need to check if we are trying to create a new watcher that will track + // the resources of whole cluster(e.g. in case of state_pod metricset). + // If it is the case, then we need to update the watcher by changing its watch options (removing options.Node) + // A running watcher cannot be updated directly. Instead, we must create a new one with the correct watch options. + // The new restartWatcher must be identical to the old watcher, including the same handler function, with the only difference being the watch options. + if isNamespaced(resourceName) { options.Namespace = namespace } @@ -307,7 +316,7 @@ func createWatcher( if err != nil { return false, err } - // update the handler of the restart resourceMetaWatchers to match the current resourceMetaWatchers handler + // update the handler of the restartWatcher to match the current watcher's handler. restartWatcher.AddEventHandler(resourceMetaWatcher.watcher.GetEventHandler()) resourceMetaWatcher.restartWatcher = restartWatcher resourceMetaWatcher.nodeScope = nodeScope @@ -316,7 +325,7 @@ func createWatcher( } // addToMetricsetsUsing adds metricset identified by metricsetUsing to the list of resources using the shared watcher -// identified by resourceName +// identified by resourceName. func addToMetricsetsUsing(resourceName string, metricsetUsing string, resourceWatchers *Watchers) { resourceWatchers.lock.Lock() defer resourceWatchers.lock.Unlock() @@ -378,7 +387,8 @@ func createAllWatchers( if err != nil { return err } - // Create a watcher for the given resource. + // Create the main watcher for the given resource. + // For example pod metricset's main watcher will be pod watcher. // If it fails, we return an error, so we can stop the extra watchers from creating. created, err := createWatcher(resourceName, res, *options, client, resourceWatchers, config.Namespace, false) if err != nil { @@ -386,9 +396,10 @@ func createAllWatchers( } else if created { log.Debugf("Created watcher %s successfully, created by %s.", resourceName, metricsetName) } + // add this metricset to the ones using the watcher addToMetricsetsUsing(resourceName, metricsetName, resourceWatchers) - // Create the extra watchers required by this resource + // Create any extra watchers required by this resource // For example pod requires also namespace and node watcher and possibly replicaset and job watcher. extraWatchers := getExtraWatchers(resourceName, config.AddResourceMetadata) for _, extra := range extraWatchers { @@ -401,7 +412,7 @@ func createAllWatchers( if created { log.Debugf("Created watcher %s successfully, created by %s.", extra, metricsetName) } - // add this metricset to the ones using the extra resource + // add this metricset to the ones using the extra watchers addToMetricsetsUsing(extra, metricsetName, resourceWatchers) } } else { @@ -412,7 +423,8 @@ func createAllWatchers( return nil } -// createMetadataGen creates the metadata generator for resources other than pod and service +// createMetadataGen creates and returns the metadata generator for resources other than pod and service +// metaGen is a struct of type Resource and implements Generate method for metadata generation for a given resource kind. func createMetadataGen(client k8sclient.Interface, commonConfig *conf.C, addResourceMetadata *metadata.AddResourceMetadataConfig, resourceName string, resourceWatchers *Watchers) (*metadata.Resource, error) { @@ -439,7 +451,8 @@ func createMetadataGen(client k8sclient.Interface, commonConfig *conf.C, addReso return metaGen, nil } -// createMetadataGenSpecific creates the metadata generator for a specific resource - pod or service +// createMetadataGenSpecific creates and returns the metadata generator for a specific resource - pod or service +// A metaGen struct implements a MetaGen interface and is designed to utilize the necessary watchers to collect(Generate) metadata for a specific resource. func createMetadataGenSpecific(client k8sclient.Interface, commonConfig *conf.C, addResourceMetadata *metadata.AddResourceMetadataConfig, resourceName string, resourceWatchers *Watchers) (metadata.MetaGen, error) { @@ -473,6 +486,8 @@ func createMetadataGenSpecific(client k8sclient.Interface, commonConfig *conf.C, if jobMetaWatcher := resourceWatchers.metaWatchersMap[JobResource]; jobMetaWatcher != nil { jobWatcher = (*jobMetaWatcher).watcher } + // For example for pod named redis in namespace default, the generator uses the pod watcher for pod metadata, + // collects all node metadata using the node watcher's store and all namespace metadata using the namespacewatcher's store. metaGen = metadata.GetPodMetaGen(commonConfig, mainWatcher, nodeWatcher, namespaceWatcher, replicaSetWatcher, jobWatcher, addResourceMetadata) return metaGen, nil @@ -505,6 +520,7 @@ func NewResourceMetadataEnricher( nodeScope bool) Enricher { log := logp.NewLogger(selector) + // metricset configuration config, err := GetValidatedConfig(base) if err != nil { log.Info("Kubernetes metricset enriching is disabled") @@ -528,6 +544,7 @@ func NewResourceMetadataEnricher( metricsetName := base.Name() resourceName := getResourceName(metricsetName) + // Create all watchers needed for this metricset err = createAllWatchers(client, metricsetName, resourceName, nodeScope, config, log, resourceWatchers) if err != nil { log.Errorf("Error starting the watchers: %s", err) @@ -536,6 +553,8 @@ func NewResourceMetadataEnricher( var specificMetaGen metadata.MetaGen var generalMetaGen *metadata.Resource + // Create the metadata generator to be used in the watcher's event handler. + // Both specificMetaGen and generalMetaGen implement Generate method for metadata collection. if resourceName == ServiceResource || resourceName == PodResource { specificMetaGen, err = createMetadataGenSpecific(client, commonConfig, config.AddResourceMetadata, resourceName, resourceWatchers) } else { @@ -547,7 +566,10 @@ func NewResourceMetadataEnricher( } // updateFunc to be used as the resource watcher's add and update handler. - // It is responsible for generating the metadata for a detected resource + // The handler function is executed when a watcher is triggered(i.e. new/updated resource). + // It is responsible for generating the metadata for a detected resource by executing the metadata generator's Generate method. + // It is a common handler for all resource watchers. The kind of resource(e.g. pod or deployment) is checked inside the function. + // It returns a map of a resourse identifier(i.e namespace-resource_name) as key and the metadata as value. updateFunc := func(r kubernetes.Resource) map[string]mapstr.M { accessor, _ := meta.Accessor(r) id := accessor.GetName() @@ -604,8 +626,9 @@ func NewResourceMetadataEnricher( } } - // deleteFunc to be used as the resource watcher's delete handler - // If a resource deletion is detected it returns the id of the resource + // deleteFunc to be used as the resource watcher's delete handler. + // The deleteFunc is executed when a watcher is triggered for a resource deletion(e.g. pod deleted). + // It returns the identifier of the resource. deleteFunc := func(r kubernetes.Resource) []string { accessor, _ := meta.Accessor(r) @@ -623,7 +646,9 @@ func NewResourceMetadataEnricher( return []string{id} } - // indexFunc retrieves the resource id from a given event + // indexFunc constructs and returns the resource identifier from a given event. + // If a resource is namespaced(e.g. pod) the identifier is in the form of namespace-resource_name. + // If it is not namespaced(e.g. node) the identifier is the resource's name. indexFunc := func(e mapstr.M) string { name := getString(e, "name") namespace := getString(e, mb.ModuleDataKey+".namespace") @@ -871,16 +896,19 @@ func buildMetadataEnricher( resourceWatchers.lock.Lock() defer resourceWatchers.lock.Unlock() + // Check if a watcher for this resource already exists. resourceMetaWatcher := resourceWatchers.metaWatchersMap[resourceName] if resourceMetaWatcher != nil { + // Append the new enricher to watcher's enrichers map. resourceMetaWatcher.enrichers[metricsetName] = enricher - // Check if this shared watcher has already detected resources from a previous enricher. + // Check if this shared watcher has already detected resources and collected their + // metadata for another enricher. // In that case, for each resource, call the updateFunc of the current enricher to - // update its metadata. This is needed in cases where the watcher has already been + // generate its metadata. This is needed in cases where the watcher has already been // notified for new/updated resources while the enricher for current metricset has not // built yet (example is pod, state_pod metricsets). - for key, _ := range resourceMetaWatcher.metadataObjects { + for key := range resourceMetaWatcher.metadataObjects { obj, exists, err := resourceMetaWatcher.watcher.Store().GetByKey(key) if err != nil { log.Errorf("Error trying to get the object from the store: %s", err) @@ -903,9 +931,10 @@ func buildMetadataEnricher( resourceWatchers.lock.Lock() defer resourceWatchers.lock.Unlock() - // Add object to the list of metadata objects of this watcher, - // so it can be used by enrichers created after the event is - // triggered + // Add object(detected resource) to the list of metadata objects of this watcher, + // so it can be used by enrichers created after the event is triggered. + // The identifier of the object is in the form of namespace/name so that + // it can be easily fetched from watcher's store in previous step. accessor, _ := meta.Accessor(obj.(kubernetes.Resource)) id := accessor.GetName() namespace := accessor.GetNamespace() @@ -913,7 +942,7 @@ func buildMetadataEnricher( id = namespace + "/" + id } resourceMetaWatcher.metadataObjects[id] = true - + // Execute the updateFunc of each enricher associated to thos watcher. for _, enricher := range resourceMetaWatcher.enrichers { enricher.Lock() newMetadataEvents := enricher.updateFunc(obj.(kubernetes.Resource)) @@ -975,12 +1004,12 @@ func buildMetadataEnricher( return enricher } -// Start starts all the watchers associated with a given enricher resource +// Start starts all the watchers associated with a given enricher's resource. func (e *enricher) Start(resourceWatchers *Watchers) { resourceWatchers.lock.Lock() defer resourceWatchers.lock.Unlock() - // Each resource may require multiple watchers. We firstly start the + // Each resource may require multiple watchers. Firstly, we start the // extra watchers as they are a dependency for the main resource watcher // For example a pod watcher requires namespace and node watcher to be started // first. @@ -996,8 +1025,9 @@ func (e *enricher) Start(resourceWatchers *Watchers) { } } - // Start the main watcher if not already started - // If there is a restart watcher, stop the old watcher if started and start the restart watcher + // Start the main watcher if not already started. + // If there is a restart watcher, stop the old watcher if started and start the restart watcher. + // Then the restart watcher replaces the the old watcher and resourceMetaWatcher is set to nil. resourceMetaWatcher := resourceWatchers.metaWatchersMap[e.resourceName] if resourceMetaWatcher != nil { if resourceMetaWatcher.restartWatcher != nil { @@ -1024,7 +1054,7 @@ func (e *enricher) Start(resourceWatchers *Watchers) { } // Stop removes the enricher's metricset as a user of the associated watchers. -// If no metricset is using the watchers anymore it stops them. +// If no metricset is using the watchers anymore, the watcher gets stopped. func (e *enricher) Stop(resourceWatchers *Watchers) { resourceWatchers.lock.Lock() defer resourceWatchers.lock.Unlock() @@ -1052,6 +1082,8 @@ func (e *enricher) Stop(resourceWatchers *Watchers) { } // Enrich enriches events with metadata saved in the enricher.metadata map +// This method is executed whenever a new event is created and about to be published. +// The enricher's index method is used to retrieve the resource identifier from each event. func (e *enricher) Enrich(events []mapstr.M) { e.RLock() defer e.RUnlock() From 8fa75996ae91b2524d1f5e5a80f8b5486339a6e6 Mon Sep 17 00:00:00 2001 From: MichaelKatsoulis Date: Wed, 27 Mar 2024 13:06:06 +0200 Subject: [PATCH 57/61] Update go.mod --- go.mod | 5 +++-- go.sum | 8 ++++---- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 786a280cefc..82fb6cf0252 100644 --- a/go.mod +++ b/go.mod @@ -2,6 +2,7 @@ module github.com/elastic/beats/v7 go 1.21 +// replace github.com/elastic/elastic-agent-autodiscover => /Users/michaliskatsoulis/go/src/github.com/elastic/elastic-agent-autodiscover require ( cloud.google.com/go/bigquery v1.55.0 cloud.google.com/go/monitoring v1.16.0 @@ -69,7 +70,7 @@ require ( github.com/dustin/go-humanize v1.0.1 github.com/eapache/go-resiliency v1.2.0 github.com/eclipse/paho.mqtt.golang v1.3.5 - github.com/elastic/elastic-agent-client/v7 v7.8.0 + github.com/elastic/elastic-agent-client/v7 v7.8.1 github.com/elastic/go-concert v0.2.0 github.com/elastic/go-libaudit/v2 v2.5.0 github.com/elastic/go-licenser v0.4.1 @@ -164,7 +165,7 @@ require ( google.golang.org/api v0.128.0 google.golang.org/genproto v0.0.0-20230920204549-e6e6cdab5c13 // indirect google.golang.org/grpc v1.58.3 - google.golang.org/protobuf v1.32.0 + google.golang.org/protobuf v1.33.0 gopkg.in/inf.v0 v0.9.1 gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect diff --git a/go.sum b/go.sum index 9521adc53db..f92e12e446d 100644 --- a/go.sum +++ b/go.sum @@ -667,8 +667,8 @@ github.com/elastic/ebpfevents v0.4.0 h1:M80eAeJnzvGQgU9cjJqkjFca9pjM3aq/TuZxJeom github.com/elastic/ebpfevents v0.4.0/go.mod h1:o21z5xup/9dK8u0Hg9bZRflSqqj1Zu5h2dg2hSTcUPQ= github.com/elastic/elastic-agent-autodiscover v0.6.8 h1:BSXz+QwjZAEt08G+T3GDGl14Bh9a6zD8luNCvZut/b8= github.com/elastic/elastic-agent-autodiscover v0.6.8/go.mod h1:hFeFqneS2r4jD0/QzGkrNk0YVdN0JGh7lCWdsH7zcI4= -github.com/elastic/elastic-agent-client/v7 v7.8.0 h1:GHFzDJIWpdgI0qDk5EcqbQJGvwTsl2E2vQK3/xe+MYQ= -github.com/elastic/elastic-agent-client/v7 v7.8.0/go.mod h1:ihtjqJzYiIltlRhNruaSSc0ogxIhqPD5hOMKq16cI1s= +github.com/elastic/elastic-agent-client/v7 v7.8.1 h1:J9wZc/0mUvSEok0X5iR5+n60Jgb+AWooKddb3XgPWqM= +github.com/elastic/elastic-agent-client/v7 v7.8.1/go.mod h1:axl1nkdqc84YRFkeJGD9jExKNPUrOrzf3DFo2m653nY= github.com/elastic/elastic-agent-libs v0.7.5 h1:4UMqB3BREvhwecYTs/L23oQp1hs/XUkcunPlmTZn5yg= github.com/elastic/elastic-agent-libs v0.7.5/go.mod h1:pGMj5myawdqu+xE+WKvM5FQzKQ/MonikkWOzoFTJxaU= github.com/elastic/elastic-agent-shipper-client v0.5.1-0.20230228231646-f04347b666f3 h1:sb+25XJn/JcC9/VL8HX4r4QXSUq4uTNzGS2kxOE7u1U= @@ -2674,8 +2674,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From 38d44f4eb54e32fc3fbdfd3fe0b1bd5d88f28e1e Mon Sep 17 00:00:00 2001 From: Michael Katsoulis Date: Thu, 28 Mar 2024 17:10:18 +0200 Subject: [PATCH 58/61] Update metricbeat/module/kubernetes/util/kubernetes.go Co-authored-by: Tetiana Kravchenko --- metricbeat/module/kubernetes/util/kubernetes.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index e8ad573e27c..ca3b99a8c83 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -1026,8 +1026,8 @@ func (e *enricher) Start(resourceWatchers *Watchers) { } // Start the main watcher if not already started. - // If there is a restart watcher, stop the old watcher if started and start the restart watcher. - // Then the restart watcher replaces the the old watcher and resourceMetaWatcher is set to nil. + // If there is a restartWatcher defined, stop the old watcher if started and start the restartWatcher. + // restartWatcher replaces the old watcher and resourceMetaWatcher.restartWatcher is set to nil. resourceMetaWatcher := resourceWatchers.metaWatchersMap[e.resourceName] if resourceMetaWatcher != nil { if resourceMetaWatcher.restartWatcher != nil { From 53fd63a83c62b3b516d2c8aa75bc4a64a02c9b6c Mon Sep 17 00:00:00 2001 From: constanca Date: Tue, 2 Apr 2024 15:58:36 +0200 Subject: [PATCH 59/61] Update go.mod and add comment about lock Signed-off-by: constanca --- go.mod | 1 - metricbeat/module/kubernetes/util/kubernetes.go | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/go.mod b/go.mod index e74e5d39962..745d47673b6 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,6 @@ module github.com/elastic/beats/v7 go 1.21 -// replace github.com/elastic/elastic-agent-autodiscover => /Users/michaliskatsoulis/go/src/github.com/elastic/elastic-agent-autodiscover require ( cloud.google.com/go/bigquery v1.55.0 cloud.google.com/go/monitoring v1.16.0 diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index ca3b99a8c83..78281046af5 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -325,7 +325,7 @@ func createWatcher( } // addToMetricsetsUsing adds metricset identified by metricsetUsing to the list of resources using the shared watcher -// identified by resourceName. +// identified by resourceName. The caller of this function should be holding the lock. func addToMetricsetsUsing(resourceName string, metricsetUsing string, resourceWatchers *Watchers) { resourceWatchers.lock.Lock() defer resourceWatchers.lock.Unlock() From b012f02c6f653fdf80dcc603bddc5b219e15d6ca Mon Sep 17 00:00:00 2001 From: constanca Date: Tue, 2 Apr 2024 16:17:56 +0200 Subject: [PATCH 60/61] Update comment about lock Signed-off-by: constanca --- metricbeat/module/kubernetes/util/kubernetes.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 78281046af5..d89ca006f0e 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -325,7 +325,7 @@ func createWatcher( } // addToMetricsetsUsing adds metricset identified by metricsetUsing to the list of resources using the shared watcher -// identified by resourceName. The caller of this function should be holding the lock. +// identified by resourceName. The caller of this function should not be holding the lock. func addToMetricsetsUsing(resourceName string, metricsetUsing string, resourceWatchers *Watchers) { resourceWatchers.lock.Lock() defer resourceWatchers.lock.Unlock() From 92c3d5bdcad566e4e12d34bd1eb9864b37c974d2 Mon Sep 17 00:00:00 2001 From: constanca Date: Wed, 3 Apr 2024 09:23:19 +0200 Subject: [PATCH 61/61] mage check Signed-off-by: constanca --- x-pack/filebeat/input/internal/httplog/roundtripper.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/x-pack/filebeat/input/internal/httplog/roundtripper.go b/x-pack/filebeat/input/internal/httplog/roundtripper.go index e8d5f8765ca..642245603f8 100644 --- a/x-pack/filebeat/input/internal/httplog/roundtripper.go +++ b/x-pack/filebeat/input/internal/httplog/roundtripper.go @@ -17,10 +17,11 @@ import ( "strconv" "time" - "github.com/elastic/elastic-agent-libs/logp" "go.uber.org/atomic" "go.uber.org/zap" "go.uber.org/zap/zapcore" + + "github.com/elastic/elastic-agent-libs/logp" ) var _ http.RoundTripper = (*LoggingRoundTripper)(nil)