Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[k8sclusterreceiver] add k8s.pod.status_reason optional metric #24764

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions .chloggen/k8sclusterreceiver-pod-status-reason.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
# Use this changelog template to create an entry for release notes.
# If your change doesn't affect end users, such as a test fix or a tooling change,
# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.

# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: enhancement

# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
component: k8sclusterreceiver

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: "Add k8s.pod.status_reason option metric"
dmitryax marked this conversation as resolved.
Show resolved Hide resolved

# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
issues: [24034]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:
18 changes: 18 additions & 0 deletions receiver/k8sclusterreceiver/documentation.md
Original file line number Diff line number Diff line change
Expand Up @@ -386,6 +386,24 @@ The usage for a particular resource with a configured limit.
| ---- | ----------- | ------ |
| resource | the name of the resource on which the quota is applied | Any Str |

## Optional Metrics

The following metrics are not emitted by default. Each of them can be enabled by applying the following configuration:

```yaml
metrics:
<metric_name>:
enabled: true
```

### k8s.pod.status_reason

Current status reason of the pod (1 - Evicted, 2 - NodeAffinity, 3 - NodeLost, 4 - Shutdown, 5 - UnexpectedAdmissionError, 6 - Unknown)

| Unit | Metric Type | Value Type |
| ---- | ----------- | ---------- |
| 1 | Gauge | Int |

## Resource Attributes

| Name | Description | Values | Enabled |
Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,8 @@ all_set:
enabled: true
k8s.pod.phase:
enabled: true
k8s.pod.status_reason:
enabled: true
k8s.replicaset.available:
enabled: true
k8s.replicaset.desired:
Expand Down Expand Up @@ -206,6 +208,8 @@ none_set:
enabled: false
k8s.pod.phase:
enabled: false
k8s.pod.status_reason:
enabled: false
k8s.replicaset.available:
enabled: false
k8s.replicaset.desired:
Expand Down
18 changes: 18 additions & 0 deletions receiver/k8sclusterreceiver/internal/pod/pods.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ func Transform(pod *corev1.Pod) *corev1.Pod {

func RecordMetrics(logger *zap.Logger, mb *metadata.MetricsBuilder, pod *corev1.Pod, ts pcommon.Timestamp) {
mb.RecordK8sPodPhaseDataPoint(ts, int64(phaseToInt(pod.Status.Phase)))
mb.RecordK8sPodStatusReasonDataPoint(ts, int64(reasonToInt(pod.Status.Reason)))
rb := mb.NewResourceBuilder()
rb.SetK8sNamespaceName(pod.Namespace)
rb.SetK8sNodeName(pod.Spec.NodeName)
Expand All @@ -83,6 +84,23 @@ func RecordMetrics(logger *zap.Logger, mb *metadata.MetricsBuilder, pod *corev1.
}
}

func reasonToInt(reason string) int32 {
switch reason {
case "Evicted":
return 1
case "NodeAffinity":
return 2
case "NodeLost":
return 3
case "Shutdown":
return 4
case "UnexpectedAdmissionError":
return 5
default:
return 6
}
}

func phaseToInt(phase corev1.PodPhase) int32 {
switch phase {
case corev1.PodPending:
Expand Down
26 changes: 26 additions & 0 deletions receiver/k8sclusterreceiver/internal/pod/pods_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,32 @@ func TestPodAndContainerMetricsReportCPUMetrics(t *testing.T) {
)
}

func TestPodStatusReasonAndContainerMetricsReportCPUMetrics(t *testing.T) {
pod := testutils.NewPodWithContainer(
"1",
testutils.NewPodSpecWithContainer("container-name"),
testutils.NewEvictedTerminatedPodStatusWithContainer("container-name", containerIDWithPreifx("container-id")),
)

mbc := metadata.DefaultMetricsBuilderConfig()
mbc.Metrics.K8sPodStatusReason.Enabled = true
ts := pcommon.Timestamp(time.Now().UnixNano())
mb := metadata.NewMetricsBuilder(mbc, receivertest.NewNopCreateSettings())
RecordMetrics(zap.NewNop(), mb, pod, ts)
m := mb.Emit()

expected, err := golden.ReadMetrics(filepath.Join("testdata", "expected_evicted.yaml"))
require.NoError(t, err)
require.NoError(t, pmetrictest.CompareMetrics(expected, m,
pmetrictest.IgnoreTimestamp(),
pmetrictest.IgnoreStartTimestamp(),
pmetrictest.IgnoreResourceMetricsOrder(),
pmetrictest.IgnoreMetricsOrder(),
pmetrictest.IgnoreScopeMetricsOrder(),
),
)
}

var containerIDWithPreifx = func(containerID string) string {
return "docker://" + containerID
}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
resourceMetrics:
- resource:
attributes:
- key: k8s.namespace.name
value:
stringValue: test-namespace
- key: k8s.node.name
value:
stringValue: test-node
- key: k8s.pod.name
value:
stringValue: test-pod-1
- key: k8s.pod.uid
value:
stringValue: test-pod-1-uid
- key: opencensus.resourcetype
value:
stringValue: k8s
schemaUrl: https://opentelemetry.io/schemas/1.18.0
scopeMetrics:
- metrics:
- description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown)
gauge:
dataPoints:
- asInt: "4"
name: k8s.pod.phase
unit: "1"
- description: Current status reason of the pod (1 - Evicted, 2 - NodeAffinity, 3 - NodeLost, 4 - Shutdown, 5 - UnexpectedAdmissionError, 6 - Unknown)
gauge:
dataPoints:
- asInt: "1"
name: k8s.pod.status_reason
unit: "1"
scope:
name: otelcol/k8sclusterreceiver
version: latest
- resource:
attributes:
- key: container.id
value:
stringValue: container-id
- key: container.image.name
value:
stringValue: container-image-name
- key: container.image.tag
value:
stringValue: latest
- key: k8s.container.name
value:
stringValue: container-name
- key: k8s.namespace.name
value:
stringValue: test-namespace
- key: k8s.node.name
value:
stringValue: test-node
- key: k8s.pod.name
value:
stringValue: test-pod-1
- key: k8s.pod.uid
value:
stringValue: test-pod-1-uid
- key: opencensus.resourcetype
value:
stringValue: container
schemaUrl: https://opentelemetry.io/schemas/1.18.0
scopeMetrics:
- metrics:
- description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that.
gauge:
dataPoints:
- asInt: "3"
name: k8s.container.restarts
unit: "1"
- description: Whether a container has passed its readiness probe (0 for no, 1 for yes)
gauge:
dataPoints:
- asInt: "1"
name: k8s.container.ready
unit: "1"
- description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details
gauge:
dataPoints:
- asDouble: 10
name: k8s.container.cpu_request
unit: "{cpu}"
- description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details
gauge:
dataPoints:
- asDouble: 20
name: k8s.container.cpu_limit
unit: "{cpu}"
scope:
name: otelcol/k8sclusterreceiver
version: latest
Loading