Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Reset Prometheus request tags on conf reload #2728

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions cmd/nginx/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ import (
"k8s.io/ingress-nginx/internal/file"
"k8s.io/ingress-nginx/internal/ingress/annotations/class"
"k8s.io/ingress-nginx/internal/ingress/controller"
"k8s.io/ingress-nginx/internal/ingress/metric/collector"
metricCollector "k8s.io/ingress-nginx/internal/ingress/metric/collector"
"k8s.io/ingress-nginx/internal/k8s"
"k8s.io/ingress-nginx/internal/net/ssl"
"k8s.io/ingress-nginx/version"
Expand Down Expand Up @@ -127,13 +127,13 @@ func main() {
mux := http.NewServeMux()
go registerHandlers(conf.EnableProfiling, conf.ListenPorts.Health, ngx, mux)

err = collector.InitNGINXStatusCollector(conf.Namespace, class.IngressClass, conf.ListenPorts.Status)
err = metricCollector.InitNGINXStatusCollector(conf.Namespace, class.IngressClass, conf.ListenPorts.Status)

if err != nil {
glog.Fatalf("Error creating metric collector: %v", err)
}

err = collector.NewInstance(conf.Namespace, class.IngressClass)
err = metricCollector.InitSocketCollector(conf.Namespace, class.IngressClass)
if err != nil {
glog.Fatalf("Error creating unix socket server: %v", err)
}
Expand Down
3 changes: 3 additions & 0 deletions internal/ingress/controller/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ import (
"k8s.io/ingress-nginx/internal/ingress/annotations/healthcheck"
"k8s.io/ingress-nginx/internal/ingress/annotations/proxy"
ngx_config "k8s.io/ingress-nginx/internal/ingress/controller/config"
metricCollector "k8s.io/ingress-nginx/internal/ingress/metric/collector"
"k8s.io/ingress-nginx/internal/k8s"
)

Expand Down Expand Up @@ -176,6 +177,8 @@ func (n *NGINXController) syncIngress(interface{}) error {
return err
}

metricCollector.CleanOldBackendMetrics()

glog.Infof("Backend successfully reloaded.")
ConfigSuccess(true)
IncReloadCount()
Expand Down
198 changes: 116 additions & 82 deletions internal/ingress/metric/collector/collector.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,9 +63,16 @@ type SocketCollector struct {
ingressClass string
}

// NewInstance creates a new SocketCollector instance
func NewInstance(ns string, class string) error {
sc := SocketCollector{}
// Our program-wide socketCollector instance
var currentSocketCollector *SocketCollector

// InitSocketCollector launches the lua socket metric collector
func InitSocketCollector(ns string, class string) error {
if currentSocketCollector != nil {
return nil
}

currentSocketCollector = &SocketCollector{}

ns = strings.Replace(ns, "-", "_", -1)

Expand All @@ -74,87 +81,14 @@ func NewInstance(ns string, class string) error {
return err
}

sc.listener = listener
sc.ns = ns
sc.ingressClass = class

requestTags := []string{"host", "status", "protocol", "method", "path", "upstream_name", "upstream_ip", "upstream_status", "namespace", "ingress", "service"}
collectorTags := []string{"namespace", "ingress_class"}

sc.upstreamResponseTime = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "upstream_response_time_seconds",
Help: "The time spent on receiving the response from the upstream server",
Namespace: ns,
},
requestTags,
)

sc.requestTime = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "request_duration_seconds",
Help: "The request processing time in seconds",
Namespace: ns,
},
requestTags,
)

sc.requestLength = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "request_length_bytes",
Help: "The request length (including request line, header, and request body)",
Namespace: ns,
Buckets: prometheus.LinearBuckets(10, 10, 10), // 10 buckets, each 10 bytes wide.
},
requestTags,
)

sc.requests = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "requests",
Help: "The total number of client requests.",
Namespace: ns,
},
collectorTags,
)

sc.bytesSent = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "bytes_sent",
Help: "The the number of bytes sent to a client",
Namespace: ns,
Buckets: prometheus.ExponentialBuckets(10, 10, 7), // 7 buckets, exponential factor of 10.
},
requestTags,
)

sc.collectorSuccess = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: "collector_last_run_successful",
Help: "Whether the last collector run was successful (success = 1, failure = 0).",
Namespace: ns,
},
collectorTags,
)

sc.collectorSuccessTime = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: "collector_last_run_successful_timestamp_seconds",
Help: "Timestamp of the last successful collector run",
Namespace: ns,
},
collectorTags,
)
currentSocketCollector.listener = listener
currentSocketCollector.ns = ns
currentSocketCollector.ingressClass = class

prometheus.MustRegister(sc.upstreamResponseTime)
prometheus.MustRegister(sc.requestTime)
prometheus.MustRegister(sc.requestLength)
prometheus.MustRegister(sc.requests)
prometheus.MustRegister(sc.bytesSent)
prometheus.MustRegister(sc.collectorSuccess)
prometheus.MustRegister(sc.collectorSuccessTime)
currentSocketCollector.initRequestMetricStore()
currentSocketCollector.initCollectorMetricStore()

go sc.Run()
go currentSocketCollector.Run()

return nil
}
Expand Down Expand Up @@ -273,6 +207,106 @@ func (sc *SocketCollector) Run() {
}
}

// CleanOldBackendMetrics resets the server metrics cache: request metrics increase the cardinality
// of the "upstream_ip" tag on every scale up/down operation, rolling-upgrades, etc. We need to
// flush the Prometheus client "tag" cache to avoid a memory leak and prevent Prometheus from
// storing "constant" metrics for upstreams that don't exist any more.
func CleanOldBackendMetrics() {
currentSocketCollector.initRequestMetricStore()
}

func (sc *SocketCollector) initRequestMetricStore() {
if sc.upstreamResponseTime != nil {
prometheus.Unregister(sc.upstreamResponseTime)
}
if sc.requestTime != nil {
prometheus.Unregister(sc.requestTime)
}
if sc.requestLength != nil {
prometheus.Unregister(sc.requestLength)
}
if sc.bytesSent != nil {
prometheus.Unregister(sc.bytesSent)
}

requestTags := []string{"host", "status", "protocol", "method", "path", "upstream_name", "upstream_ip", "upstream_status", "namespace", "ingress", "service"}

sc.upstreamResponseTime = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "upstream_response_time_seconds",
Help: "The time spent on receiving the response from the upstream server",
Namespace: sc.ns,
},
requestTags,
)

sc.requestTime = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "request_duration_seconds",
Help: "The request processing time in seconds",
Namespace: sc.ns,
},
requestTags,
)

sc.requestLength = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "request_length_bytes",
Help: "The request length (including request line, header, and request body)",
Namespace: sc.ns,
Buckets: prometheus.LinearBuckets(10, 10, 10), // 10 buckets, each 10 bytes wide.
},
requestTags,
)

sc.bytesSent = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "bytes_sent",
Help: "The the number of bytes sent to a client",
Namespace: sc.ns,
Buckets: prometheus.ExponentialBuckets(10, 10, 7), // 7 buckets, exponential factor of 10.
},
requestTags,
)

prometheus.MustRegister(sc.upstreamResponseTime, sc.requestTime, sc.requestLength, sc.bytesSent)
}

func (sc *SocketCollector) initCollectorMetricStore() {
collectorTags := []string{"namespace", "ingress_class"}

sc.requests = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "requests",
Help: "The total number of client requests.",
Namespace: sc.ns,
},
collectorTags,
)

sc.collectorSuccess = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: "collector_last_run_successful",
Help: "Whether the last collector run was successful (success = 1, failure = 0).",
Namespace: sc.ns,
},
collectorTags,
)

sc.collectorSuccessTime = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: "collector_last_run_successful_timestamp_seconds",
Help: "Timestamp of the last successful collector run",
Namespace: sc.ns,
},
collectorTags,
)

prometheus.MustRegister(sc.requests)
prometheus.MustRegister(sc.collectorSuccess)
prometheus.MustRegister(sc.collectorSuccessTime)
}

const packetSize = 1024 * 65

// handleMessages process the content received in a network connection
Expand Down