Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Metrics: Remove ingress_upstream_latency_seconds. #11795

Merged
merged 1 commit into from
Aug 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 0 additions & 6 deletions docs/user-guide/monitoring.md
Original file line number Diff line number Diff line change
Expand Up @@ -386,19 +386,13 @@ Prometheus metrics are exposed on port 10254.
The number of bytes sent to a client. **Deprecated**, use `nginx_ingress_controller_response_size`\
nginx var: `bytes_sent`

* `nginx_ingress_controller_ingress_upstream_latency_seconds` Summary\
Upstream service latency per Ingress. **Deprecated**, use `nginx_ingress_controller_connect_duration_seconds`\
nginx var: `upstream_connect_time`

```
# HELP nginx_ingress_controller_bytes_sent The number of bytes sent to a client. DEPRECATED! Use nginx_ingress_controller_response_size
# TYPE nginx_ingress_controller_bytes_sent histogram
# HELP nginx_ingress_controller_connect_duration_seconds The time spent on establishing a connection with the upstream server
# TYPE nginx_ingress_controller_connect_duration_seconds nginx_ingress_controller_connect_duration_seconds
* HELP nginx_ingress_controller_header_duration_seconds The time spent on receiving first header from the upstream server
# TYPE nginx_ingress_controller_header_duration_seconds histogram
# HELP nginx_ingress_controller_ingress_upstream_latency_seconds Upstream service latency per Ingress DEPRECATED! Use nginx_ingress_controller_connect_duration_seconds
# TYPE nginx_ingress_controller_ingress_upstream_latency_seconds summary
# HELP nginx_ingress_controller_request_duration_seconds The request processing time in milliseconds
# TYPE nginx_ingress_controller_request_duration_seconds histogram
# HELP nginx_ingress_controller_request_size The request length (including request line, header, and request body)
Expand Down
54 changes: 4 additions & 50 deletions internal/ingress/metric/collectors/socket.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,11 +64,10 @@ type metricMapping map[string]prometheus.Collector
type SocketCollector struct {
prometheus.Collector

upstreamLatency *prometheus.SummaryVec // TODO: DEPRECATED, remove
connectTime *prometheus.HistogramVec
headerTime *prometheus.HistogramVec
requestTime *prometheus.HistogramVec
responseTime *prometheus.HistogramVec
connectTime *prometheus.HistogramVec
headerTime *prometheus.HistogramVec
requestTime *prometheus.HistogramVec
responseTime *prometheus.HistogramVec

requestLength *prometheus.HistogramVec
responseLength *prometheus.HistogramVec
Expand Down Expand Up @@ -98,10 +97,6 @@ var requestTags = []string{
"canary",
}

// DefObjectives was removed in https://github.com/prometheus/client_golang/pull/262
// updating the library to latest version changed the output of the metrics
var defObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}

// NewSocketCollector creates a new SocketCollector instance using
// the ingress watch namespace and class used by the controller
func NewSocketCollector(pod, namespace, class string, metricsPerHost, reportStatusClasses bool, buckets HistogramBuckets, excludeMetrics []string) (*SocketCollector, error) {
Expand Down Expand Up @@ -248,19 +243,6 @@ func NewSocketCollector(pod, namespace, class string, metricsPerHost, reportStat
em,
mm,
),

upstreamLatency: summaryMetric(
&prometheus.SummaryOpts{
Name: "ingress_upstream_latency_seconds",
Help: "DEPRECATED Upstream service latency per Ingress",
Namespace: PrometheusNamespace,
ConstLabels: constLabels,
Objectives: defObjectives,
},
[]string{"ingress", "namespace", "service", "canary"},
em,
mm,
),
}

sc.metricMapping = mm
Expand All @@ -275,18 +257,6 @@ func containsMetric(excludeMetrics map[string]struct{}, name string) bool {
return false
}

func summaryMetric(opts *prometheus.SummaryOpts, requestTags []string, excludeMetrics map[string]struct{}, metricMapping metricMapping) *prometheus.SummaryVec {
if containsMetric(excludeMetrics, opts.Name) {
return nil
}
m := prometheus.NewSummaryVec(
*opts,
requestTags,
)
metricMapping[prometheus.BuildFQName(PrometheusNamespace, "", opts.Name)] = m
return m
}

func counterMetric(opts *prometheus.CounterOpts, requestTags []string, excludeMetrics map[string]struct{}, metricMapping metricMapping) *prometheus.CounterVec {
if containsMetric(excludeMetrics, opts.Name) {
return nil
Expand Down Expand Up @@ -358,13 +328,6 @@ func (sc *SocketCollector) handleMessage(msg []byte) {
collectorLabels["host"] = stats.Host
}

latencyLabels := prometheus.Labels{
"namespace": stats.Namespace,
"ingress": stats.Ingress,
"service": stats.Service,
"canary": stats.Canary,
}

if sc.requests != nil {
requestsMetric, err := sc.requests.GetMetricWith(collectorLabels)
if err != nil {
Expand All @@ -383,15 +346,6 @@ func (sc *SocketCollector) handleMessage(msg []byte) {
connectTimeMetric.Observe(stats.Latency)
}
}

if sc.upstreamLatency != nil {
latencyMetric, err := sc.upstreamLatency.GetMetricWith(latencyLabels)
if err != nil {
klog.ErrorS(err, "Error fetching latency metric")
} else {
latencyMetric.Observe(stats.Latency)
}
}
}

if stats.HeaderTime != -1 && sc.headerTime != nil {
Expand Down