Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

PMM-9870 fix collstats indexSizes metrics. #953

Merged
merged 9 commits into from
Nov 14, 2024
13 changes: 3 additions & 10 deletions exporter/collstats_collector.go
Original file line number Diff line number Diff line change
Expand Up @@ -99,23 +99,16 @@ func (d *collstatsCollector) collect(ch chan<- prometheus.Metric) {

aggregation := bson.D{
{
Key: "$collStats", Value: bson.M{
Key: "$collStats",
Value: bson.M{
// TODO: PMM-9568 : Add support to handle histogram metrics
"latencyStats": bson.M{"histograms": false},
"storageStats": bson.M{"scale": 1},
},
},
}
project := bson.D{
{
Key: "$project", Value: bson.M{
"storageStats.wiredTiger": 0,
"storageStats.indexDetails": 0,
},
},
}

cursor, err := client.Database(database).Collection(collection).Aggregate(d.ctx, mongo.Pipeline{aggregation, project})
cursor, err := client.Database(database).Collection(collection).Aggregate(d.ctx, mongo.Pipeline{aggregation})
if err != nil {
logger.Errorf("cannot get $collstats cursor for collection %s.%s: %s", database, collection, err)

Expand Down
15 changes: 11 additions & 4 deletions exporter/collstats_collector_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,21 +53,27 @@ func TestCollStatsCollector(t *testing.T) {
ti := labelsGetterMock{}

collection := []string{"testdb.testcol_00", "testdb.testcol_01", "testdb.testcol_02"}
c := newCollectionStatsCollector(ctx, client, logrus.New(), false, false, ti, collection)
logger := logrus.New()
c := newCollectionStatsCollector(ctx, client, logger, false, false, ti, collection)

// The last \n at the end of this string is important
expected := strings.NewReader(`
# HELP mongodb_collstats_latencyStats_commands_latency collstats.latencyStats.commands.
# HELP mongodb_collstats_latencyStats_commands_latency collstats.latencyStats.commands.latency
# TYPE mongodb_collstats_latencyStats_commands_latency untyped
mongodb_collstats_latencyStats_commands_latency{collection="testcol_00",database="testdb"} 0
mongodb_collstats_latencyStats_commands_latency{collection="testcol_01",database="testdb"} 0
mongodb_collstats_latencyStats_commands_latency{collection="testcol_02",database="testdb"} 0
# HELP mongodb_collstats_latencyStats_transactions_ops collstats.latencyStats.transactions.
# HELP mongodb_collstats_latencyStats_transactions_ops collstats.latencyStats.transactions.ops
# TYPE mongodb_collstats_latencyStats_transactions_ops untyped
mongodb_collstats_latencyStats_transactions_ops{collection="testcol_00",database="testdb"} 0
mongodb_collstats_latencyStats_transactions_ops{collection="testcol_01",database="testdb"} 0
mongodb_collstats_latencyStats_transactions_ops{collection="testcol_02",database="testdb"} 0
# HELP mongodb_collstats_storageStats_capped collstats.storageStats.
# HELP mongodb_collstats_storageStats_indexSizes collstats.storageStats.indexSizes
# TYPE mongodb_collstats_storageStats_indexSizes untyped
mongodb_collstats_storageStats_indexSizes{collection="testcol_00",database="testdb",index_name="_id_"} 4096
mongodb_collstats_storageStats_indexSizes{collection="testcol_01",database="testdb",index_name="_id_"} 4096
mongodb_collstats_storageStats_indexSizes{collection="testcol_02",database="testdb",index_name="_id_"} 4096
# HELP mongodb_collstats_storageStats_capped collstats.storageStats.capped
# TYPE mongodb_collstats_storageStats_capped untyped
mongodb_collstats_storageStats_capped{collection="testcol_00",database="testdb"} 0
mongodb_collstats_storageStats_capped{collection="testcol_01",database="testdb"} 0
Expand All @@ -81,6 +87,7 @@ mongodb_collstats_storageStats_capped{collection="testcol_02",database="testdb"}
filter := []string{
"mongodb_collstats_latencyStats_commands_latency",
"mongodb_collstats_storageStats_capped",
"mongodb_collstats_storageStats_indexSizes",
"mongodb_collstats_latencyStats_transactions_ops",
}
err := testutil.CollectAndCompare(c, expected, filter...)
Expand Down
26 changes: 14 additions & 12 deletions exporter/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,10 @@ func listCollections(ctx context.Context, client *mongo.Client, database string,
//
// - exclude: List of databases to be excluded. Useful to ignore system databases.
func databases(ctx context.Context, client *mongo.Client, filterInNamespaces []string, exclude []string) ([]string, error) {
opts := &options.ListDatabasesOptions{NameOnly: pointer.ToBool(true), AuthorizedDatabases: pointer.ToBool(true)}
opts := &options.ListDatabasesOptions{
NameOnly: pointer.ToBool(true),
AuthorizedDatabases: pointer.ToBool(true),
}

filter := bson.D{}

Expand All @@ -100,35 +103,34 @@ func databases(ctx context.Context, client *mongo.Client, filterInNamespaces []s
}

func makeExcludeFilter(exclude []string) *primitive.E {
filterExpressions := []bson.D{}
if len(exclude) == 0 {
return nil
}

filterExpressions := make([]bson.D, 0, len(exclude))
for _, dbname := range exclude {
filterExpressions = append(filterExpressions,
bson.D{{Key: "name", Value: bson.D{{Key: "$ne", Value: dbname}}}},
)
}

if len(filterExpressions) == 0 {
return nil
}

return &primitive.E{Key: "$and", Value: filterExpressions}
}

func makeDBsFilter(filterInNamespaces []string) *primitive.E {
filterExpressions := []bson.D{}

nss := removeEmptyStrings(filterInNamespaces)
if len(nss) == 0 {
return nil
}

filterExpressions := make([]bson.D, 0, len(nss))
for _, namespace := range nss {
parts := strings.Split(namespace, ".")
filterExpressions = append(filterExpressions,
bson.D{{Key: "name", Value: bson.D{{Key: "$eq", Value: parts[0]}}}},
)
}

if len(filterExpressions) == 0 {
return nil
}

return &primitive.E{Key: "$or", Value: filterExpressions}
}

Expand Down
9 changes: 5 additions & 4 deletions exporter/dbstats_collector_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,15 +58,16 @@ func TestDBStatsCollector(t *testing.T) {

ti := labelsGetterMock{}

c := newDBStatsCollector(ctx, client, logrus.New(), false, ti, []string{dbName}, false)
logger := logrus.New()
c := newDBStatsCollector(ctx, client, logger, false, ti, []string{dbName}, false)
expected := strings.NewReader(`
# HELP mongodb_dbstats_collections dbstats.
# HELP mongodb_dbstats_collections dbstats.collections
# TYPE mongodb_dbstats_collections untyped
mongodb_dbstats_collections{database="testdb"} 3
# HELP mongodb_dbstats_indexes dbstats.
# HELP mongodb_dbstats_indexes dbstats.indexes
# TYPE mongodb_dbstats_indexes untyped
mongodb_dbstats_indexes{database="testdb"} 3
# HELP mongodb_dbstats_objects dbstats.
# HELP mongodb_dbstats_objects dbstats.objects
# TYPE mongodb_dbstats_objects untyped
mongodb_dbstats_objects{database="testdb"} 30` + "\n")

Expand Down
5 changes: 3 additions & 2 deletions exporter/diagnostic_data_collector_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,10 +58,10 @@ func TestDiagnosticDataCollector(t *testing.T) {

// The last \n at the end of this string is important
expectedString := fmt.Sprintf(`
# HELP mongodb_oplog_stats_wt_btree_fixed_record_size %s.btree.
# HELP mongodb_oplog_stats_wt_btree_fixed_record_size %s.btree.fixed-record size
# TYPE mongodb_oplog_stats_wt_btree_fixed_record_size untyped
mongodb_oplog_stats_wt_btree_fixed_record_size 0
# HELP mongodb_oplog_stats_wt_transaction_update_conflicts %s.transaction.
# HELP mongodb_oplog_stats_wt_transaction_update_conflicts %s.transaction.update conflicts
# TYPE mongodb_oplog_stats_wt_transaction_update_conflicts untyped
mongodb_oplog_stats_wt_transaction_update_conflicts 0`, prefix, prefix)
expected := strings.NewReader(expectedString + "\n")
Expand Down Expand Up @@ -211,6 +211,7 @@ func TestAllDiagnosticDataCollectorMetrics(t *testing.T) {
client := tu.DefaultTestClient(ctx, t)

logger := logrus.New()
logger.SetLevel(logrus.DebugLevel)
ti := newTopologyInfo(ctx, client, logger)

dbBuildInfo, err := retrieveMongoDBBuildInfo(ctx, client, logger.WithField("component", "test"))
Expand Down
4 changes: 2 additions & 2 deletions exporter/indexstats_collector_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ func TestIndexStatsCollector(t *testing.T) {

// The last \n at the end of this string is important
expected := strings.NewReader(`
# HELP mongodb_indexstats_accesses_ops indexstats.accesses.
# HELP mongodb_indexstats_accesses_ops indexstats.accesses.ops
# TYPE mongodb_indexstats_accesses_ops untyped
mongodb_indexstats_accesses_ops{collection="testcol_00",database="testdb",key_name="_id_"} 0
mongodb_indexstats_accesses_ops{collection="testcol_00",database="testdb",key_name="idx_01"} 0
Expand Down Expand Up @@ -118,7 +118,7 @@ func TestDescendingIndexOverride(t *testing.T) {

// The last \n at the end of this string is important
expected := strings.NewReader(`
# HELP mongodb_indexstats_accesses_ops indexstats.accesses.
# HELP mongodb_indexstats_accesses_ops indexstats.accesses.ops
# TYPE mongodb_indexstats_accesses_ops untyped
mongodb_indexstats_accesses_ops{collection="testcol_00",database="testdb",key_name="_id_"} 0
mongodb_indexstats_accesses_ops{collection="testcol_00",database="testdb",key_name="f1_1"} 0
Expand Down
109 changes: 84 additions & 25 deletions exporter/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,24 +93,68 @@ var (
// mongodb_ss_opcounters{legacy_op_type="command"} 67923
//
nodeToPDMetrics = map[string]string{
"collStats.storageStats.indexDetails.": "index_name",
"globalLock.activeQueue.": "count_type",
"globalLock.locks.": "lock_type",
"serverStatus.asserts.": "assert_type",
"serverStatus.connections.": "conn_type",
"serverStatus.globalLock.currentQueue.": "count_type",
"serverStatus.metrics.commands.": "cmd_name",
"serverStatus.metrics.cursor.open.": "csr_type",
"serverStatus.metrics.document.": "doc_op_type",
"serverStatus.opLatencies.": "op_type",
"serverStatus.opReadConcernCounters.": "concern_type",
"serverStatus.opcounters.": "legacy_op_type",
"serverStatus.opcountersRepl.": "legacy_op_type",
"serverStatus.transactions.commitTypes.": "commit_type",
"serverStatus.wiredTiger.concurrentTransactions.": "txn_rw_type",
"serverStatus.queues.execution.": "txn_rw_type",
"serverStatus.wiredTiger.perf.": "perf_bucket",
"systemMetrics.disks.": "device_name",
"collStats.storageStats.indexDetails.": "index_name",
"globalLock.activeQueue.": "count_type",
"globalLock.locks.": "lock_type",
"serverStatus.asserts.": "assert_type",
"serverStatus.connections.": "conn_type",
"serverStatus.globalLock.currentQueue.": "count_type",
"serverStatus.metrics.commands.": "cmd_name",
"serverStatus.metrics.cursor.open.": "csr_type",
"serverStatus.metrics.document.": "doc_op_type",
"serverStatus.opLatencies.": "op_type",
"serverStatus.opReadConcernCounters.": "concern_type",
"serverStatus.opcounters.": "legacy_op_type",
"serverStatus.opcountersRepl.": "legacy_op_type",
"serverStatus.transactions.commitTypes.": "commit_type",
"serverStatus.wiredTiger.concurrentTransactions.": "txn_rw_type",
"serverStatus.queues.execution.": "txn_rw_type",
"serverStatus.wiredTiger.perf.": "perf_bucket",
"systemMetrics.disks.": "device_name",
"collstats.storageStats.indexSizes.": "index_name",
"config.transactions.stats.storageStats.indexSizes.": "index_name",
"config.image_collection.stats.storageStats.indexSizes.": "index_name",
}

// This map is used to add labels to some specific metrics.
// The difference from the case above that it works with middle nodes in the structure.
// For example, the fields under the storageStats.indexDetails. structure have this
// signature:
//
// "storageStats": primitive.M{
// "indexDetails": primitive.M{
// "_id_": primitive.M{
// "LSM": primitive.M{
// "bloom filter false positives": int32(0),
// "bloom filter hits": int32(0),
// "bloom filter misses": int32(0),
// ...
// },
// "block-manager": primitive.M{
// "allocations requiring file extension": int32(0),
// ...
// },
// ...
// },
// "name_1": primitive.M{
// ...
// },
// ...
// },
// },
//
// Applying the renaming rules, storageStats will become storageStats but instead of having metrics
// with the form storageStats.indexDetails.<index_name>.<metric_name> where index_name is each one of
// the fields inside the structure (_id_, name_1, etc), those keys will become labels for the same
// metric name. The label name is defined as the value for each metric name in the map and the value
// the label will have is the field name in the structure. Example.
//
// mongodb_storageStats_indexDetails_index_name_LSM_bloom_filter_false_positives{index_name="_id_"} 0
keyNodesToLabels = map[string]string{
"storageStats.indexDetails.": "index_name",
"config.image_collection.stats.storageStats.indexDetails.": "index_name",
"config.transactions.stats.storageStats.indexDetails.": "index_name",
"collstats.storageStats.indexDetails.": "index_name",
}

// Regular expressions used to make the metric name Prometheus-compatible
Expand Down Expand Up @@ -236,8 +280,11 @@ func rawToPrometheusMetric(rm *rawMetric) (prometheus.Metric, error) {
// by prometheus. For first level metrics, there is no prefix so we should use the metric name or
// the help would be empty.
func metricHelp(prefix, name string) string {
if _, ok := nodeToPDMetrics[prefix]; ok {
return strings.TrimSuffix(prefix, ".")
}
if prefix != "" {
return prefix
return prefix + name
}

return name
Expand All @@ -251,17 +298,29 @@ func makeMetrics(prefix string, m bson.M, labels map[string]string, compatibleMo
}

for k, val := range m {
nextPrefix := prefix + k

l := make(map[string]string)
if label, ok := keyNodesToLabels[prefix]; ok {
for k, v := range labels {
l[k] = v
}
l[label] = k
nextPrefix = prefix + label
} else {
l = labels
}
switch v := val.(type) {
case bson.M:
res = append(res, makeMetrics(prefix+k, v, labels, compatibleMode)...)
res = append(res, makeMetrics(nextPrefix, v, l, compatibleMode)...)
case map[string]interface{}:
res = append(res, makeMetrics(prefix+k, v, labels, compatibleMode)...)
res = append(res, makeMetrics(nextPrefix, v, l, compatibleMode)...)
case primitive.A:
res = append(res, processSlice(prefix, k, v, labels, compatibleMode)...)
res = append(res, processSlice(nextPrefix, v, l, compatibleMode)...)
case []interface{}:
continue
default:
rm, err := makeRawMetric(prefix, k, v, labels)
rm, err := makeRawMetric(prefix, k, v, l)
if err != nil {
invalidMetric := prometheus.NewInvalidMetric(prometheus.NewInvalidDesc(err), err)
res = append(res, invalidMetric)
Expand Down Expand Up @@ -302,7 +361,7 @@ func makeMetrics(prefix string, m bson.M, labels map[string]string, compatibleMo

// Extract maps from arrays. Only some structures like replicasets have arrays of members
// and each member is represented by a map[string]interface{}.
func processSlice(prefix, k string, v []interface{}, commonLabels map[string]string, compatibleMode bool) []prometheus.Metric {
func processSlice(prefix string, v []interface{}, commonLabels map[string]string, compatibleMode bool) []prometheus.Metric {
metrics := make([]prometheus.Metric, 0)
labels := make(map[string]string)
for name, value := range commonLabels {
Expand Down Expand Up @@ -332,7 +391,7 @@ func processSlice(prefix, k string, v []interface{}, commonLabels map[string]str
labels["member_idx"] = host
}

metrics = append(metrics, makeMetrics(prefix+k, s, labels, compatibleMode)...)
metrics = append(metrics, makeMetrics(prefix, s, labels, compatibleMode)...)
}

return metrics
Expand Down
2 changes: 1 addition & 1 deletion exporter/profile_status_collector_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ func TestProfileCollector(t *testing.T) {
c := newProfileCollector(ctx, client, logrus.New(), false, ti, 30)

expected := strings.NewReader(`
# HELP mongodb_profile_slow_query_count profile_slow_query.
# HELP mongodb_profile_slow_query_count profile_slow_query.count
# TYPE mongodb_profile_slow_query_count counter
mongodb_profile_slow_query_count{database="admin"} 0
mongodb_profile_slow_query_count{database="config"} 0
Expand Down
2 changes: 1 addition & 1 deletion exporter/replset_config_collector_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ func TestReplsetConfigCollector(t *testing.T) {

// The last \n at the end of this string is important
expected := strings.NewReader(`
# HELP mongodb_rs_cfg_protocolVersion rs_cfg.
# HELP mongodb_rs_cfg_protocolVersion rs_cfg.protocolVersion
# TYPE mongodb_rs_cfg_protocolVersion untyped
mongodb_rs_cfg_protocolVersion 1` + "\n")
// Filter metrics for 2 reasons:
Expand Down
12 changes: 10 additions & 2 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -151,11 +151,19 @@ func buildExporter(opts GlobalFlags, uri string, log *logrus.Logger) *exporter.E
nodeName = uriParsed.Host
}

collStatsNamespaces := []string{}
if opts.CollStatsNamespaces != "" {
collStatsNamespaces = strings.Split(opts.CollStatsNamespaces, ",")
}
indexStatsCollections := []string{}
if opts.IndexStatsCollections != "" {
indexStatsCollections = strings.Split(opts.IndexStatsCollections, ",")
}
exporterOpts := &exporter.Opts{
CollStatsNamespaces: strings.Split(opts.CollStatsNamespaces, ","),
CollStatsNamespaces: collStatsNamespaces,
CompatibleMode: opts.CompatibleMode,
DiscoveringMode: opts.DiscoveringMode,
IndexStatsCollections: strings.Split(opts.IndexStatsCollections, ","),
IndexStatsCollections: indexStatsCollections,
Logger: log,
URI: uri,
NodeName: nodeName,
Expand Down
Loading