From a33e63a9fe833a94b3b7d463fbfba04335bc6849 Mon Sep 17 00:00:00 2001 From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> Date: Fri, 6 Oct 2023 11:46:04 -0600 Subject: [PATCH] [receiver/kubeletstats] Add new CPU utilization metrics (#27276) **Description:** Adds new CPU utilization metrics with respect to pod/container CPU limits and requests **Link to tracking Issue:** Closes https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/24905 **Testing:** Added new unit tests and tested locally --- ...yaml => kubeletstats-cpu-utilization.yaml} | 6 +- .../kubeletstats-memory-utilization.yaml | 2 +- .../kubeletstatsreceiver/documentation.md | 32 +++ .../internal/kubelet/accumulator.go | 9 +- .../internal/kubelet/cpu.go | 15 +- .../internal/metadata/generated_config.go | 16 ++ .../metadata/generated_config_test.go | 8 + .../internal/metadata/generated_metrics.go | 228 ++++++++++++++++++ .../metadata/generated_metrics_test.go | 60 +++++ .../internal/metadata/metrics.go | 19 +- .../internal/metadata/testdata/config.yaml | 16 ++ receiver/kubeletstatsreceiver/metadata.yaml | 28 +++ receiver/kubeletstatsreceiver/scraper.go | 6 +- receiver/kubeletstatsreceiver/scraper_test.go | 62 ++++- .../kubeletstatsreceiver/testdata/pods.json | 4 +- 15 files changed, 476 insertions(+), 35 deletions(-) rename .chloggen/{kubeletstats-percentage-metrics.yaml => kubeletstats-cpu-utilization.yaml} (81%) diff --git a/.chloggen/kubeletstats-percentage-metrics.yaml b/.chloggen/kubeletstats-cpu-utilization.yaml similarity index 81% rename from .chloggen/kubeletstats-percentage-metrics.yaml rename to .chloggen/kubeletstats-cpu-utilization.yaml index 7b8f846878ab..9c60edfdd65e 100755 --- a/.chloggen/kubeletstats-percentage-metrics.yaml +++ b/.chloggen/kubeletstats-cpu-utilization.yaml @@ -7,15 +7,15 @@ change_type: enhancement component: kubeletstatsreceiver # A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). -note: Add new metrics for representing pod and memory consumption of pods and containers as a percentage of the defined resource limits. +note: Adds new `k8s.pod.cpu_limit_utilization`, `k8s.pod.cpu_request_utilization`, `k8s.container.cpu_limit_utilization`, and `k8s.container.cpu_request_utilization` metrics that represent the ratio of cpu used vs set limits and requests. # Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. -issues: [25835] +issues: [27276] # (Optional) One or more lines of additional information to render under the primary note. # These lines will be padded with 2 spaces and then inserted directly into the document. # Use pipe (|) for multiline entries. -subtext: These metrics represent how much of your resource limits a container or pod is consuming. +subtext: # If your change doesn't affect end users or the exported elements of any package, # you should instead start your pull request title with [chore] or use the "Skip Changelog" label. diff --git a/.chloggen/kubeletstats-memory-utilization.yaml b/.chloggen/kubeletstats-memory-utilization.yaml index 89e9788dce28..2f77ccf13784 100755 --- a/.chloggen/kubeletstats-memory-utilization.yaml +++ b/.chloggen/kubeletstats-memory-utilization.yaml @@ -7,7 +7,7 @@ change_type: enhancement component: kubeletstatsreceiver # A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). -note: Adds new `k8s.pod.memory.utilization` and `container.memory.utilization` metrics that represent the ratio of memory used vs limits set. +note: Adds new `k8s.pod.memory_limit_utilization`, `k8s.pod.memory_request_utilization`, `k8s.container.memory_limit_utilization`, and `k8s.container.memory_request_utilization` metrics that represent the ratio of memory used vs set limits and requests. # Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. issues: [25894] diff --git a/receiver/kubeletstatsreceiver/documentation.md b/receiver/kubeletstatsreceiver/documentation.md index bd2aede3283f..2b00e234a653 100644 --- a/receiver/kubeletstatsreceiver/documentation.md +++ b/receiver/kubeletstatsreceiver/documentation.md @@ -394,6 +394,22 @@ The time since the container started | ---- | ----------- | ---------- | ----------------------- | --------- | | s | Sum | Int | Cumulative | true | +### k8s.container.cpu_limit_utilization + +Container cpu utilization as a ratio of the container's limits + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Double | + +### k8s.container.cpu_request_utilization + +Container cpu utilization as a ratio of the container's requests + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Double | + ### k8s.container.memory_limit_utilization Container memory utilization as a ratio of the container's limits @@ -418,6 +434,22 @@ The time since the node started | ---- | ----------- | ---------- | ----------------------- | --------- | | s | Sum | Int | Cumulative | true | +### k8s.pod.cpu_limit_utilization + +Pod cpu utilization as a ratio of the pod's total container limits. If any container is missing a limit the metric is not emitted. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Double | + +### k8s.pod.cpu_request_utilization + +Pod cpu utilization as a ratio of the pod's total container requests. If any container is missing a request the metric is not emitted. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Double | + ### k8s.pod.memory_limit_utilization Pod memory utilization as a ratio of the pod's total container limits. If any container is missing a limit the metric is not emitted. diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go index 53d206b73710..bf9cb0e6d48e 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go @@ -56,7 +56,7 @@ func (a *metricDataAccumulator) nodeStats(s stats.NodeStats) { currentTime := pcommon.NewTimestampFromTime(a.time) addUptimeMetric(a.mbs.NodeMetricsBuilder, metadata.NodeUptimeMetrics.Uptime, s.StartTime, currentTime) - addCPUMetrics(a.mbs.NodeMetricsBuilder, metadata.NodeCPUMetrics, s.CPU, currentTime) + addCPUMetrics(a.mbs.NodeMetricsBuilder, metadata.NodeCPUMetrics, s.CPU, currentTime, resources{}) addMemoryMetrics(a.mbs.NodeMetricsBuilder, metadata.NodeMemoryMetrics, s.Memory, currentTime, resources{}) addFilesystemMetrics(a.mbs.NodeMetricsBuilder, metadata.NodeFilesystemMetrics, s.Fs, currentTime) addNetworkMetrics(a.mbs.NodeMetricsBuilder, metadata.NodeNetworkMetrics, s.Network, currentTime) @@ -76,7 +76,7 @@ func (a *metricDataAccumulator) podStats(s stats.PodStats) { currentTime := pcommon.NewTimestampFromTime(a.time) addUptimeMetric(a.mbs.PodMetricsBuilder, metadata.PodUptimeMetrics.Uptime, s.StartTime, currentTime) - addCPUMetrics(a.mbs.PodMetricsBuilder, metadata.PodCPUMetrics, s.CPU, currentTime) + addCPUMetrics(a.mbs.PodMetricsBuilder, metadata.PodCPUMetrics, s.CPU, currentTime, a.metadata.podResources[s.PodRef.UID]) addMemoryMetrics(a.mbs.PodMetricsBuilder, metadata.PodMemoryMetrics, s.Memory, currentTime, a.metadata.podResources[s.PodRef.UID]) addFilesystemMetrics(a.mbs.PodMetricsBuilder, metadata.PodFilesystemMetrics, s.EphemeralStorage, currentTime) addNetworkMetrics(a.mbs.PodMetricsBuilder, metadata.PodNetworkMetrics, s.Network, currentTime) @@ -108,9 +108,10 @@ func (a *metricDataAccumulator) containerStats(sPod stats.PodStats, s stats.Cont } currentTime := pcommon.NewTimestampFromTime(a.time) + resourceKey := sPod.PodRef.UID + s.Name addUptimeMetric(a.mbs.ContainerMetricsBuilder, metadata.ContainerUptimeMetrics.Uptime, s.StartTime, currentTime) - addCPUMetrics(a.mbs.ContainerMetricsBuilder, metadata.ContainerCPUMetrics, s.CPU, currentTime) - addMemoryMetrics(a.mbs.ContainerMetricsBuilder, metadata.ContainerMemoryMetrics, s.Memory, currentTime, a.metadata.containerResources[sPod.PodRef.UID+s.Name]) + addCPUMetrics(a.mbs.ContainerMetricsBuilder, metadata.ContainerCPUMetrics, s.CPU, currentTime, a.metadata.containerResources[resourceKey]) + addMemoryMetrics(a.mbs.ContainerMetricsBuilder, metadata.ContainerMemoryMetrics, s.Memory, currentTime, a.metadata.containerResources[resourceKey]) addFilesystemMetrics(a.mbs.ContainerMetricsBuilder, metadata.ContainerFilesystemMetrics, s.Rootfs, currentTime) a.m = append(a.m, a.mbs.ContainerMetricsBuilder.Emit( diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/cpu.go b/receiver/kubeletstatsreceiver/internal/kubelet/cpu.go index ad5d617efa21..00ec2e00451f 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/cpu.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/cpu.go @@ -10,20 +10,27 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata" ) -func addCPUMetrics(mb *metadata.MetricsBuilder, cpuMetrics metadata.CPUMetrics, s *stats.CPUStats, currentTime pcommon.Timestamp) { +func addCPUMetrics(mb *metadata.MetricsBuilder, cpuMetrics metadata.CPUMetrics, s *stats.CPUStats, currentTime pcommon.Timestamp, r resources) { if s == nil { return } - addCPUUsageMetric(mb, cpuMetrics.Utilization, s, currentTime) + addCPUUsageMetric(mb, cpuMetrics, s, currentTime, r) addCPUTimeMetric(mb, cpuMetrics.Time, s, currentTime) } -func addCPUUsageMetric(mb *metadata.MetricsBuilder, recordDataPoint metadata.RecordDoubleDataPointFunc, s *stats.CPUStats, currentTime pcommon.Timestamp) { +func addCPUUsageMetric(mb *metadata.MetricsBuilder, cpuMetrics metadata.CPUMetrics, s *stats.CPUStats, currentTime pcommon.Timestamp, r resources) { if s.UsageNanoCores == nil { return } value := float64(*s.UsageNanoCores) / 1_000_000_000 - recordDataPoint(mb, currentTime, value) + cpuMetrics.Utilization(mb, currentTime, value) + + if r.cpuLimit > 0 { + cpuMetrics.LimitUtilization(mb, currentTime, value/r.cpuLimit) + } + if r.cpuRequest > 0 { + cpuMetrics.RequestUtilization(mb, currentTime, value/r.cpuRequest) + } } func addCPUTimeMetric(mb *metadata.MetricsBuilder, recordDataPoint metadata.RecordDoubleDataPointFunc, s *stats.CPUStats, currentTime pcommon.Timestamp) { diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_config.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_config.go index f8c68b4cee49..95e88e99d43d 100644 --- a/receiver/kubeletstatsreceiver/internal/metadata/generated_config.go +++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_config.go @@ -37,6 +37,8 @@ type MetricsConfig struct { ContainerMemoryUsage MetricConfig `mapstructure:"container.memory.usage"` ContainerMemoryWorkingSet MetricConfig `mapstructure:"container.memory.working_set"` ContainerUptime MetricConfig `mapstructure:"container.uptime"` + K8sContainerCPULimitUtilization MetricConfig `mapstructure:"k8s.container.cpu_limit_utilization"` + K8sContainerCPURequestUtilization MetricConfig `mapstructure:"k8s.container.cpu_request_utilization"` K8sContainerMemoryLimitUtilization MetricConfig `mapstructure:"k8s.container.memory_limit_utilization"` K8sContainerMemoryRequestUtilization MetricConfig `mapstructure:"k8s.container.memory_request_utilization"` K8sNodeCPUTime MetricConfig `mapstructure:"k8s.node.cpu.time"` @@ -55,6 +57,8 @@ type MetricsConfig struct { K8sNodeUptime MetricConfig `mapstructure:"k8s.node.uptime"` K8sPodCPUTime MetricConfig `mapstructure:"k8s.pod.cpu.time"` K8sPodCPUUtilization MetricConfig `mapstructure:"k8s.pod.cpu.utilization"` + K8sPodCPULimitUtilization MetricConfig `mapstructure:"k8s.pod.cpu_limit_utilization"` + K8sPodCPURequestUtilization MetricConfig `mapstructure:"k8s.pod.cpu_request_utilization"` K8sPodFilesystemAvailable MetricConfig `mapstructure:"k8s.pod.filesystem.available"` K8sPodFilesystemCapacity MetricConfig `mapstructure:"k8s.pod.filesystem.capacity"` K8sPodFilesystemUsage MetricConfig `mapstructure:"k8s.pod.filesystem.usage"` @@ -114,6 +118,12 @@ func DefaultMetricsConfig() MetricsConfig { ContainerUptime: MetricConfig{ Enabled: false, }, + K8sContainerCPULimitUtilization: MetricConfig{ + Enabled: false, + }, + K8sContainerCPURequestUtilization: MetricConfig{ + Enabled: false, + }, K8sContainerMemoryLimitUtilization: MetricConfig{ Enabled: false, }, @@ -168,6 +178,12 @@ func DefaultMetricsConfig() MetricsConfig { K8sPodCPUUtilization: MetricConfig{ Enabled: true, }, + K8sPodCPULimitUtilization: MetricConfig{ + Enabled: false, + }, + K8sPodCPURequestUtilization: MetricConfig{ + Enabled: false, + }, K8sPodFilesystemAvailable: MetricConfig{ Enabled: true, }, diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_config_test.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_config_test.go index 149c19b8adcf..7b1259eff5ec 100644 --- a/receiver/kubeletstatsreceiver/internal/metadata/generated_config_test.go +++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_config_test.go @@ -38,6 +38,8 @@ func TestMetricsBuilderConfig(t *testing.T) { ContainerMemoryUsage: MetricConfig{Enabled: true}, ContainerMemoryWorkingSet: MetricConfig{Enabled: true}, ContainerUptime: MetricConfig{Enabled: true}, + K8sContainerCPULimitUtilization: MetricConfig{Enabled: true}, + K8sContainerCPURequestUtilization: MetricConfig{Enabled: true}, K8sContainerMemoryLimitUtilization: MetricConfig{Enabled: true}, K8sContainerMemoryRequestUtilization: MetricConfig{Enabled: true}, K8sNodeCPUTime: MetricConfig{Enabled: true}, @@ -56,6 +58,8 @@ func TestMetricsBuilderConfig(t *testing.T) { K8sNodeUptime: MetricConfig{Enabled: true}, K8sPodCPUTime: MetricConfig{Enabled: true}, K8sPodCPUUtilization: MetricConfig{Enabled: true}, + K8sPodCPULimitUtilization: MetricConfig{Enabled: true}, + K8sPodCPURequestUtilization: MetricConfig{Enabled: true}, K8sPodFilesystemAvailable: MetricConfig{Enabled: true}, K8sPodFilesystemCapacity: MetricConfig{Enabled: true}, K8sPodFilesystemUsage: MetricConfig{Enabled: true}, @@ -111,6 +115,8 @@ func TestMetricsBuilderConfig(t *testing.T) { ContainerMemoryUsage: MetricConfig{Enabled: false}, ContainerMemoryWorkingSet: MetricConfig{Enabled: false}, ContainerUptime: MetricConfig{Enabled: false}, + K8sContainerCPULimitUtilization: MetricConfig{Enabled: false}, + K8sContainerCPURequestUtilization: MetricConfig{Enabled: false}, K8sContainerMemoryLimitUtilization: MetricConfig{Enabled: false}, K8sContainerMemoryRequestUtilization: MetricConfig{Enabled: false}, K8sNodeCPUTime: MetricConfig{Enabled: false}, @@ -129,6 +135,8 @@ func TestMetricsBuilderConfig(t *testing.T) { K8sNodeUptime: MetricConfig{Enabled: false}, K8sPodCPUTime: MetricConfig{Enabled: false}, K8sPodCPUUtilization: MetricConfig{Enabled: false}, + K8sPodCPULimitUtilization: MetricConfig{Enabled: false}, + K8sPodCPURequestUtilization: MetricConfig{Enabled: false}, K8sPodFilesystemAvailable: MetricConfig{Enabled: false}, K8sPodFilesystemCapacity: MetricConfig{Enabled: false}, K8sPodFilesystemUsage: MetricConfig{Enabled: false}, diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go index aa72c769e7d3..03f94dc800bf 100644 --- a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go +++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go @@ -629,6 +629,104 @@ func newMetricContainerUptime(cfg MetricConfig) metricContainerUptime { return m } +type metricK8sContainerCPULimitUtilization struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills k8s.container.cpu_limit_utilization metric with initial data. +func (m *metricK8sContainerCPULimitUtilization) init() { + m.data.SetName("k8s.container.cpu_limit_utilization") + m.data.SetDescription("Container cpu utilization as a ratio of the container's limits") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricK8sContainerCPULimitUtilization) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricK8sContainerCPULimitUtilization) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricK8sContainerCPULimitUtilization) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricK8sContainerCPULimitUtilization(cfg MetricConfig) metricK8sContainerCPULimitUtilization { + m := metricK8sContainerCPULimitUtilization{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricK8sContainerCPURequestUtilization struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills k8s.container.cpu_request_utilization metric with initial data. +func (m *metricK8sContainerCPURequestUtilization) init() { + m.data.SetName("k8s.container.cpu_request_utilization") + m.data.SetDescription("Container cpu utilization as a ratio of the container's requests") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricK8sContainerCPURequestUtilization) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricK8sContainerCPURequestUtilization) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricK8sContainerCPURequestUtilization) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricK8sContainerCPURequestUtilization(cfg MetricConfig) metricK8sContainerCPURequestUtilization { + m := metricK8sContainerCPURequestUtilization{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricK8sContainerMemoryLimitUtilization struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -1527,6 +1625,104 @@ func newMetricK8sPodCPUUtilization(cfg MetricConfig) metricK8sPodCPUUtilization return m } +type metricK8sPodCPULimitUtilization struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills k8s.pod.cpu_limit_utilization metric with initial data. +func (m *metricK8sPodCPULimitUtilization) init() { + m.data.SetName("k8s.pod.cpu_limit_utilization") + m.data.SetDescription("Pod cpu utilization as a ratio of the pod's total container limits. If any container is missing a limit the metric is not emitted.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricK8sPodCPULimitUtilization) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricK8sPodCPULimitUtilization) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricK8sPodCPULimitUtilization) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricK8sPodCPULimitUtilization(cfg MetricConfig) metricK8sPodCPULimitUtilization { + m := metricK8sPodCPULimitUtilization{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricK8sPodCPURequestUtilization struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills k8s.pod.cpu_request_utilization metric with initial data. +func (m *metricK8sPodCPURequestUtilization) init() { + m.data.SetName("k8s.pod.cpu_request_utilization") + m.data.SetDescription("Pod cpu utilization as a ratio of the pod's total container requests. If any container is missing a request the metric is not emitted.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricK8sPodCPURequestUtilization) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricK8sPodCPURequestUtilization) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricK8sPodCPURequestUtilization) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricK8sPodCPURequestUtilization(cfg MetricConfig) metricK8sPodCPURequestUtilization { + m := metricK8sPodCPURequestUtilization{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricK8sPodFilesystemAvailable struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -2490,6 +2686,8 @@ type MetricsBuilder struct { metricContainerMemoryUsage metricContainerMemoryUsage metricContainerMemoryWorkingSet metricContainerMemoryWorkingSet metricContainerUptime metricContainerUptime + metricK8sContainerCPULimitUtilization metricK8sContainerCPULimitUtilization + metricK8sContainerCPURequestUtilization metricK8sContainerCPURequestUtilization metricK8sContainerMemoryLimitUtilization metricK8sContainerMemoryLimitUtilization metricK8sContainerMemoryRequestUtilization metricK8sContainerMemoryRequestUtilization metricK8sNodeCPUTime metricK8sNodeCPUTime @@ -2508,6 +2706,8 @@ type MetricsBuilder struct { metricK8sNodeUptime metricK8sNodeUptime metricK8sPodCPUTime metricK8sPodCPUTime metricK8sPodCPUUtilization metricK8sPodCPUUtilization + metricK8sPodCPULimitUtilization metricK8sPodCPULimitUtilization + metricK8sPodCPURequestUtilization metricK8sPodCPURequestUtilization metricK8sPodFilesystemAvailable metricK8sPodFilesystemAvailable metricK8sPodFilesystemCapacity metricK8sPodFilesystemCapacity metricK8sPodFilesystemUsage metricK8sPodFilesystemUsage @@ -2557,6 +2757,8 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSetting metricContainerMemoryUsage: newMetricContainerMemoryUsage(mbc.Metrics.ContainerMemoryUsage), metricContainerMemoryWorkingSet: newMetricContainerMemoryWorkingSet(mbc.Metrics.ContainerMemoryWorkingSet), metricContainerUptime: newMetricContainerUptime(mbc.Metrics.ContainerUptime), + metricK8sContainerCPULimitUtilization: newMetricK8sContainerCPULimitUtilization(mbc.Metrics.K8sContainerCPULimitUtilization), + metricK8sContainerCPURequestUtilization: newMetricK8sContainerCPURequestUtilization(mbc.Metrics.K8sContainerCPURequestUtilization), metricK8sContainerMemoryLimitUtilization: newMetricK8sContainerMemoryLimitUtilization(mbc.Metrics.K8sContainerMemoryLimitUtilization), metricK8sContainerMemoryRequestUtilization: newMetricK8sContainerMemoryRequestUtilization(mbc.Metrics.K8sContainerMemoryRequestUtilization), metricK8sNodeCPUTime: newMetricK8sNodeCPUTime(mbc.Metrics.K8sNodeCPUTime), @@ -2575,6 +2777,8 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSetting metricK8sNodeUptime: newMetricK8sNodeUptime(mbc.Metrics.K8sNodeUptime), metricK8sPodCPUTime: newMetricK8sPodCPUTime(mbc.Metrics.K8sPodCPUTime), metricK8sPodCPUUtilization: newMetricK8sPodCPUUtilization(mbc.Metrics.K8sPodCPUUtilization), + metricK8sPodCPULimitUtilization: newMetricK8sPodCPULimitUtilization(mbc.Metrics.K8sPodCPULimitUtilization), + metricK8sPodCPURequestUtilization: newMetricK8sPodCPURequestUtilization(mbc.Metrics.K8sPodCPURequestUtilization), metricK8sPodFilesystemAvailable: newMetricK8sPodFilesystemAvailable(mbc.Metrics.K8sPodFilesystemAvailable), metricK8sPodFilesystemCapacity: newMetricK8sPodFilesystemCapacity(mbc.Metrics.K8sPodFilesystemCapacity), metricK8sPodFilesystemUsage: newMetricK8sPodFilesystemUsage(mbc.Metrics.K8sPodFilesystemUsage), @@ -2667,6 +2871,8 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { mb.metricContainerMemoryUsage.emit(ils.Metrics()) mb.metricContainerMemoryWorkingSet.emit(ils.Metrics()) mb.metricContainerUptime.emit(ils.Metrics()) + mb.metricK8sContainerCPULimitUtilization.emit(ils.Metrics()) + mb.metricK8sContainerCPURequestUtilization.emit(ils.Metrics()) mb.metricK8sContainerMemoryLimitUtilization.emit(ils.Metrics()) mb.metricK8sContainerMemoryRequestUtilization.emit(ils.Metrics()) mb.metricK8sNodeCPUTime.emit(ils.Metrics()) @@ -2685,6 +2891,8 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { mb.metricK8sNodeUptime.emit(ils.Metrics()) mb.metricK8sPodCPUTime.emit(ils.Metrics()) mb.metricK8sPodCPUUtilization.emit(ils.Metrics()) + mb.metricK8sPodCPULimitUtilization.emit(ils.Metrics()) + mb.metricK8sPodCPURequestUtilization.emit(ils.Metrics()) mb.metricK8sPodFilesystemAvailable.emit(ils.Metrics()) mb.metricK8sPodFilesystemCapacity.emit(ils.Metrics()) mb.metricK8sPodFilesystemUsage.emit(ils.Metrics()) @@ -2784,6 +2992,16 @@ func (mb *MetricsBuilder) RecordContainerUptimeDataPoint(ts pcommon.Timestamp, v mb.metricContainerUptime.recordDataPoint(mb.startTime, ts, val) } +// RecordK8sContainerCPULimitUtilizationDataPoint adds a data point to k8s.container.cpu_limit_utilization metric. +func (mb *MetricsBuilder) RecordK8sContainerCPULimitUtilizationDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricK8sContainerCPULimitUtilization.recordDataPoint(mb.startTime, ts, val) +} + +// RecordK8sContainerCPURequestUtilizationDataPoint adds a data point to k8s.container.cpu_request_utilization metric. +func (mb *MetricsBuilder) RecordK8sContainerCPURequestUtilizationDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricK8sContainerCPURequestUtilization.recordDataPoint(mb.startTime, ts, val) +} + // RecordK8sContainerMemoryLimitUtilizationDataPoint adds a data point to k8s.container.memory_limit_utilization metric. func (mb *MetricsBuilder) RecordK8sContainerMemoryLimitUtilizationDataPoint(ts pcommon.Timestamp, val float64) { mb.metricK8sContainerMemoryLimitUtilization.recordDataPoint(mb.startTime, ts, val) @@ -2874,6 +3092,16 @@ func (mb *MetricsBuilder) RecordK8sPodCPUUtilizationDataPoint(ts pcommon.Timesta mb.metricK8sPodCPUUtilization.recordDataPoint(mb.startTime, ts, val) } +// RecordK8sPodCPULimitUtilizationDataPoint adds a data point to k8s.pod.cpu_limit_utilization metric. +func (mb *MetricsBuilder) RecordK8sPodCPULimitUtilizationDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricK8sPodCPULimitUtilization.recordDataPoint(mb.startTime, ts, val) +} + +// RecordK8sPodCPURequestUtilizationDataPoint adds a data point to k8s.pod.cpu_request_utilization metric. +func (mb *MetricsBuilder) RecordK8sPodCPURequestUtilizationDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricK8sPodCPURequestUtilization.recordDataPoint(mb.startTime, ts, val) +} + // RecordK8sPodFilesystemAvailableDataPoint adds a data point to k8s.pod.filesystem.available metric. func (mb *MetricsBuilder) RecordK8sPodFilesystemAvailableDataPoint(ts pcommon.Timestamp, val int64) { mb.metricK8sPodFilesystemAvailable.recordDataPoint(mb.startTime, ts, val) diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go index 11cb737ec6d2..8fbcb9a171d0 100644 --- a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go @@ -101,6 +101,12 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordContainerUptimeDataPoint(ts, 1) + allMetricsCount++ + mb.RecordK8sContainerCPULimitUtilizationDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordK8sContainerCPURequestUtilizationDataPoint(ts, 1) + allMetricsCount++ mb.RecordK8sContainerMemoryLimitUtilizationDataPoint(ts, 1) @@ -170,6 +176,12 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordK8sPodCPUUtilizationDataPoint(ts, 1) + allMetricsCount++ + mb.RecordK8sPodCPULimitUtilizationDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordK8sPodCPURequestUtilizationDataPoint(ts, 1) + defaultMetricsCount++ allMetricsCount++ mb.RecordK8sPodFilesystemAvailableDataPoint(ts, 1) @@ -429,6 +441,30 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) + case "k8s.container.cpu_limit_utilization": + assert.False(t, validatedMetrics["k8s.container.cpu_limit_utilization"], "Found a duplicate in the metrics slice: k8s.container.cpu_limit_utilization") + validatedMetrics["k8s.container.cpu_limit_utilization"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Container cpu utilization as a ratio of the container's limits", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + case "k8s.container.cpu_request_utilization": + assert.False(t, validatedMetrics["k8s.container.cpu_request_utilization"], "Found a duplicate in the metrics slice: k8s.container.cpu_request_utilization") + validatedMetrics["k8s.container.cpu_request_utilization"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Container cpu utilization as a ratio of the container's requests", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) case "k8s.container.memory_limit_utilization": assert.False(t, validatedMetrics["k8s.container.memory_limit_utilization"], "Found a duplicate in the metrics slice: k8s.container.memory_limit_utilization") validatedMetrics["k8s.container.memory_limit_utilization"] = true @@ -667,6 +703,30 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) assert.Equal(t, float64(1), dp.DoubleValue()) + case "k8s.pod.cpu_limit_utilization": + assert.False(t, validatedMetrics["k8s.pod.cpu_limit_utilization"], "Found a duplicate in the metrics slice: k8s.pod.cpu_limit_utilization") + validatedMetrics["k8s.pod.cpu_limit_utilization"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Pod cpu utilization as a ratio of the pod's total container limits. If any container is missing a limit the metric is not emitted.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + case "k8s.pod.cpu_request_utilization": + assert.False(t, validatedMetrics["k8s.pod.cpu_request_utilization"], "Found a duplicate in the metrics slice: k8s.pod.cpu_request_utilization") + validatedMetrics["k8s.pod.cpu_request_utilization"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Pod cpu utilization as a ratio of the pod's total container requests. If any container is missing a request the metric is not emitted.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) case "k8s.pod.filesystem.available": assert.False(t, validatedMetrics["k8s.pod.filesystem.available"], "Found a duplicate in the metrics slice: k8s.pod.filesystem.available") validatedMetrics["k8s.pod.filesystem.available"] = true diff --git a/receiver/kubeletstatsreceiver/internal/metadata/metrics.go b/receiver/kubeletstatsreceiver/internal/metadata/metrics.go index 196accd9411d..7aef32500de8 100644 --- a/receiver/kubeletstatsreceiver/internal/metadata/metrics.go +++ b/receiver/kubeletstatsreceiver/internal/metadata/metrics.go @@ -19,9 +19,10 @@ type MetricsBuilders struct { } type CPUMetrics struct { - Time RecordDoubleDataPointFunc - Utilization RecordDoubleDataPointFunc - UsagePercent RecordDoubleDataPointFunc + Time RecordDoubleDataPointFunc + Utilization RecordDoubleDataPointFunc + LimitUtilization RecordDoubleDataPointFunc + RequestUtilization RecordDoubleDataPointFunc } var NodeCPUMetrics = CPUMetrics{ @@ -30,13 +31,17 @@ var NodeCPUMetrics = CPUMetrics{ } var PodCPUMetrics = CPUMetrics{ - Time: (*MetricsBuilder).RecordK8sPodCPUTimeDataPoint, - Utilization: (*MetricsBuilder).RecordK8sPodCPUUtilizationDataPoint, + Time: (*MetricsBuilder).RecordK8sPodCPUTimeDataPoint, + Utilization: (*MetricsBuilder).RecordK8sPodCPUUtilizationDataPoint, + LimitUtilization: (*MetricsBuilder).RecordK8sPodCPULimitUtilizationDataPoint, + RequestUtilization: (*MetricsBuilder).RecordK8sPodCPURequestUtilizationDataPoint, } var ContainerCPUMetrics = CPUMetrics{ - Time: (*MetricsBuilder).RecordContainerCPUTimeDataPoint, - Utilization: (*MetricsBuilder).RecordContainerCPUUtilizationDataPoint, + Time: (*MetricsBuilder).RecordContainerCPUTimeDataPoint, + Utilization: (*MetricsBuilder).RecordContainerCPUUtilizationDataPoint, + LimitUtilization: (*MetricsBuilder).RecordK8sContainerCPULimitUtilizationDataPoint, + RequestUtilization: (*MetricsBuilder).RecordK8sContainerCPURequestUtilizationDataPoint, } type MemoryMetrics struct { diff --git a/receiver/kubeletstatsreceiver/internal/metadata/testdata/config.yaml b/receiver/kubeletstatsreceiver/internal/metadata/testdata/config.yaml index 35c5011d0d9b..c22b9fe2daf8 100644 --- a/receiver/kubeletstatsreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/kubeletstatsreceiver/internal/metadata/testdata/config.yaml @@ -25,6 +25,10 @@ all_set: enabled: true container.uptime: enabled: true + k8s.container.cpu_limit_utilization: + enabled: true + k8s.container.cpu_request_utilization: + enabled: true k8s.container.memory_limit_utilization: enabled: true k8s.container.memory_request_utilization: @@ -61,6 +65,10 @@ all_set: enabled: true k8s.pod.cpu.utilization: enabled: true + k8s.pod.cpu_limit_utilization: + enabled: true + k8s.pod.cpu_request_utilization: + enabled: true k8s.pod.filesystem.available: enabled: true k8s.pod.filesystem.capacity: @@ -156,6 +164,10 @@ none_set: enabled: false container.uptime: enabled: false + k8s.container.cpu_limit_utilization: + enabled: false + k8s.container.cpu_request_utilization: + enabled: false k8s.container.memory_limit_utilization: enabled: false k8s.container.memory_request_utilization: @@ -192,6 +204,10 @@ none_set: enabled: false k8s.pod.cpu.utilization: enabled: false + k8s.pod.cpu_limit_utilization: + enabled: false + k8s.pod.cpu_request_utilization: + enabled: false k8s.pod.filesystem.available: enabled: false k8s.pod.filesystem.capacity: diff --git a/receiver/kubeletstatsreceiver/metadata.yaml b/receiver/kubeletstatsreceiver/metadata.yaml index 56fc76893419..e384819097e7 100644 --- a/receiver/kubeletstatsreceiver/metadata.yaml +++ b/receiver/kubeletstatsreceiver/metadata.yaml @@ -217,6 +217,20 @@ metrics: gauge: value_type: int attributes: [] + k8s.pod.cpu_limit_utilization: + enabled: false + description: "Pod cpu utilization as a ratio of the pod's total container limits. If any container is missing a limit the metric is not emitted." + unit: 1 + gauge: + value_type: double + attributes: [ ] + k8s.pod.cpu_request_utilization: + enabled: false + description: "Pod cpu utilization as a ratio of the pod's total container requests. If any container is missing a request the metric is not emitted." + unit: 1 + gauge: + value_type: double + attributes: [ ] k8s.pod.memory_limit_utilization: enabled: false description: "Pod memory utilization as a ratio of the pod's total container limits. If any container is missing a limit the metric is not emitted." @@ -337,6 +351,20 @@ metrics: gauge: value_type: int attributes: [] + k8s.container.cpu_limit_utilization: + enabled: false + description: "Container cpu utilization as a ratio of the container's limits" + unit: 1 + gauge: + value_type: double + attributes: [ ] + k8s.container.cpu_request_utilization: + enabled: false + description: "Container cpu utilization as a ratio of the container's requests" + unit: 1 + gauge: + value_type: double + attributes: [ ] k8s.container.memory_limit_utilization: enabled: false description: "Container memory utilization as a ratio of the container's limits" diff --git a/receiver/kubeletstatsreceiver/scraper.go b/receiver/kubeletstatsreceiver/scraper.go index 580bac72fa00..f6aa46bc9970 100644 --- a/receiver/kubeletstatsreceiver/scraper.go +++ b/receiver/kubeletstatsreceiver/scraper.go @@ -59,7 +59,11 @@ func newKubletScraper( ContainerMetricsBuilder: metadata.NewMetricsBuilder(metricsConfig, set), OtherMetricsBuilder: metadata.NewMetricsBuilder(metricsConfig, set), }, - needsResources: metricsConfig.Metrics.K8sPodMemoryLimitUtilization.Enabled || + needsResources: metricsConfig.Metrics.K8sPodCPULimitUtilization.Enabled || + metricsConfig.Metrics.K8sPodCPURequestUtilization.Enabled || + metricsConfig.Metrics.K8sContainerCPULimitUtilization.Enabled || + metricsConfig.Metrics.K8sContainerCPURequestUtilization.Enabled || + metricsConfig.Metrics.K8sPodMemoryLimitUtilization.Enabled || metricsConfig.Metrics.K8sPodMemoryRequestUtilization.Enabled || metricsConfig.Metrics.K8sContainerMemoryLimitUtilization.Enabled || metricsConfig.Metrics.K8sContainerMemoryRequestUtilization.Enabled, diff --git a/receiver/kubeletstatsreceiver/scraper_test.go b/receiver/kubeletstatsreceiver/scraper_test.go index 06f3c5fc720d..c991bd109a5c 100644 --- a/receiver/kubeletstatsreceiver/scraper_test.go +++ b/receiver/kubeletstatsreceiver/scraper_test.go @@ -170,6 +170,12 @@ func TestScraperWithPercentMetrics(t *testing.T) { ContainerMemoryUsage: metadata.MetricConfig{ Enabled: false, }, + K8sContainerCPULimitUtilization: metadata.MetricConfig{ + Enabled: true, + }, + K8sContainerCPURequestUtilization: metadata.MetricConfig{ + Enabled: true, + }, K8sContainerMemoryLimitUtilization: metadata.MetricConfig{ Enabled: true, }, @@ -248,6 +254,12 @@ func TestScraperWithPercentMetrics(t *testing.T) { K8sPodMemoryUsage: metadata.MetricConfig{ Enabled: false, }, + K8sPodCPULimitUtilization: metadata.MetricConfig{ + Enabled: true, + }, + K8sPodCPURequestUtilization: metadata.MetricConfig{ + Enabled: true, + }, K8sPodMemoryLimitUtilization: metadata.MetricConfig{ Enabled: true, }, @@ -291,19 +303,43 @@ func TestScraperWithPercentMetrics(t *testing.T) { md, err := r.Scrape(context.Background()) require.NoError(t, err) - require.Equal(t, 4, md.DataPointCount()) - - assert.Equal(t, "k8s.pod.memory_limit_utilization", md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Name()) - assert.True(t, md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Gauge().DataPoints().At(0).DoubleValue() <= 1) - assert.True(t, md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Gauge().DataPoints().At(0).DoubleValue() >= 0) - assert.Equal(t, "k8s.pod.memory_request_utilization", md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1).Name()) - assert.True(t, md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1).Gauge().DataPoints().At(0).DoubleValue() > 1) - - assert.Equal(t, "k8s.container.memory_limit_utilization", md.ResourceMetrics().At(1).ScopeMetrics().At(0).Metrics().At(0).Name()) - assert.True(t, md.ResourceMetrics().At(1).ScopeMetrics().At(0).Metrics().At(0).Gauge().DataPoints().At(0).DoubleValue() <= 1) - assert.True(t, md.ResourceMetrics().At(1).ScopeMetrics().At(0).Metrics().At(0).Gauge().DataPoints().At(0).DoubleValue() >= 0) - assert.Equal(t, "k8s.container.memory_request_utilization", md.ResourceMetrics().At(1).ScopeMetrics().At(0).Metrics().At(1).Name()) - assert.True(t, md.ResourceMetrics().At(1).ScopeMetrics().At(0).Metrics().At(1).Gauge().DataPoints().At(0).DoubleValue() > 1) + require.Equal(t, 8, md.DataPointCount()) + + currentMetric := md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0) + assert.Equal(t, "k8s.pod.cpu_limit_utilization", currentMetric.Name()) + assert.True(t, currentMetric.Gauge().DataPoints().At(0).DoubleValue() <= 1) + assert.True(t, currentMetric.Gauge().DataPoints().At(0).DoubleValue() >= 0) + + currentMetric = md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1) + assert.Equal(t, "k8s.pod.cpu_request_utilization", currentMetric.Name()) + assert.True(t, currentMetric.Gauge().DataPoints().At(0).DoubleValue() > 1) + + currentMetric = md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(2) + assert.Equal(t, "k8s.pod.memory_limit_utilization", currentMetric.Name()) + assert.True(t, currentMetric.Gauge().DataPoints().At(0).DoubleValue() <= 1) + assert.True(t, currentMetric.Gauge().DataPoints().At(0).DoubleValue() >= 0) + + currentMetric = md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(3) + assert.Equal(t, "k8s.pod.memory_request_utilization", currentMetric.Name()) + assert.True(t, currentMetric.Gauge().DataPoints().At(0).DoubleValue() > 1) + + currentMetric = md.ResourceMetrics().At(1).ScopeMetrics().At(0).Metrics().At(0) + assert.Equal(t, "k8s.container.cpu_limit_utilization", currentMetric.Name()) + assert.True(t, currentMetric.Gauge().DataPoints().At(0).DoubleValue() <= 1) + assert.True(t, currentMetric.Gauge().DataPoints().At(0).DoubleValue() >= 0) + + currentMetric = md.ResourceMetrics().At(1).ScopeMetrics().At(0).Metrics().At(1) + assert.Equal(t, "k8s.container.cpu_request_utilization", currentMetric.Name()) + assert.True(t, currentMetric.Gauge().DataPoints().At(0).DoubleValue() > 1) + + currentMetric = md.ResourceMetrics().At(1).ScopeMetrics().At(0).Metrics().At(2) + assert.Equal(t, "k8s.container.memory_limit_utilization", currentMetric.Name()) + assert.True(t, currentMetric.Gauge().DataPoints().At(0).DoubleValue() <= 1) + assert.True(t, currentMetric.Gauge().DataPoints().At(0).DoubleValue() >= 0) + + currentMetric = md.ResourceMetrics().At(1).ScopeMetrics().At(0).Metrics().At(3) + assert.Equal(t, "k8s.container.memory_request_utilization", currentMetric.Name()) + assert.True(t, currentMetric.Gauge().DataPoints().At(0).DoubleValue() > 1) } diff --git a/receiver/kubeletstatsreceiver/testdata/pods.json b/receiver/kubeletstatsreceiver/testdata/pods.json index 070f8146b370..6ae35cc2e69c 100644 --- a/receiver/kubeletstatsreceiver/testdata/pods.json +++ b/receiver/kubeletstatsreceiver/testdata/pods.json @@ -11,11 +11,11 @@ "name":"kube-scheduler", "resources": { "requests": { - "cpu": "50m", + "cpu": "2m", "memory": "10M" }, "limits": { - "cpu": "100m", + "cpu": "4m", "memory": "100M" } }