Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove PData prefix/suffix from prometheus internal struct names #9767

Merged
merged 2 commits into from
May 6, 2022
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ type kv struct {
Key, Value string
}

func distPointPdata(ts pcommon.Timestamp, bounds []float64, counts []uint64) *pmetric.HistogramDataPoint {
func distPoint(ts pcommon.Timestamp, bounds []float64, counts []uint64) *pmetric.HistogramDataPoint {
hdp := pmetric.NewHistogramDataPoint()
hdp.SetExplicitBounds(bounds)
hdp.SetBucketCounts(counts)
Expand All @@ -42,7 +42,7 @@ func distPointPdata(ts pcommon.Timestamp, bounds []float64, counts []uint64) *pm
return &hdp
}

func cumulativeDistMetricPdata(name string, kvp []*kv, startTs pcommon.Timestamp, points ...*pmetric.HistogramDataPoint) *pmetric.Metric {
func cumulativeDistMetric(name string, kvp []*kv, startTs pcommon.Timestamp, points ...*pmetric.HistogramDataPoint) *pmetric.Metric {
metric := pmetric.NewMetric()
metric.SetName(name)
metric.SetDataType(pmetric.MetricDataTypeHistogram)
Expand All @@ -63,15 +63,15 @@ func cumulativeDistMetricPdata(name string, kvp []*kv, startTs pcommon.Timestamp
return &metric
}

func doublePointPdata(ts pcommon.Timestamp, value float64) *pmetric.NumberDataPoint {
func doublePoint(ts pcommon.Timestamp, value float64) *pmetric.NumberDataPoint {
ndp := pmetric.NewNumberDataPoint()
ndp.SetTimestamp(ts)
ndp.SetDoubleVal(value)

return &ndp
}

func gaugeMetricPdata(name string, kvp []*kv, startTs pcommon.Timestamp, points ...*pmetric.NumberDataPoint) *pmetric.Metric {
func gaugeMetric(name string, kvp []*kv, startTs pcommon.Timestamp, points ...*pmetric.NumberDataPoint) *pmetric.Metric {
metric := pmetric.NewMetric()
metric.SetName(name)
metric.SetDataType(pmetric.MetricDataTypeGauge)
Expand All @@ -89,7 +89,7 @@ func gaugeMetricPdata(name string, kvp []*kv, startTs pcommon.Timestamp, points
return &metric
}

func summaryPointPdata(ts pcommon.Timestamp, count uint64, sum float64, quantiles, values []float64) *pmetric.SummaryDataPoint {
func summaryPoint(ts pcommon.Timestamp, count uint64, sum float64, quantiles, values []float64) *pmetric.SummaryDataPoint {
sdp := pmetric.NewSummaryDataPoint()
sdp.SetTimestamp(ts)
sdp.SetCount(count)
Expand All @@ -103,7 +103,7 @@ func summaryPointPdata(ts pcommon.Timestamp, count uint64, sum float64, quantile
return &sdp
}

func summaryMetricPdata(name string, kvp []*kv, startTs pcommon.Timestamp, points ...*pmetric.SummaryDataPoint) *pmetric.Metric {
func summaryMetric(name string, kvp []*kv, startTs pcommon.Timestamp, points ...*pmetric.SummaryDataPoint) *pmetric.Metric {
metric := pmetric.NewMetric()
metric.SetName(name)
metric.SetDataType(pmetric.MetricDataTypeSummary)
Expand All @@ -121,7 +121,7 @@ func summaryMetricPdata(name string, kvp []*kv, startTs pcommon.Timestamp, point
return &metric
}

func sumMetricPdata(name string, kvp []*kv, startTs pcommon.Timestamp, points ...*pmetric.NumberDataPoint) *pmetric.Metric {
func sumMetric(name string, kvp []*kv, startTs pcommon.Timestamp, points ...*pmetric.NumberDataPoint) *pmetric.Metric {
metric := pmetric.NewMetric()
metric.SetName(name)
metric.SetDataType(pmetric.MetricDataTypeSum)
Expand Down
8 changes: 4 additions & 4 deletions receiver/prometheusreceiver/internal/ocastore.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ type OcaStore struct {
ctx context.Context

sink consumer.Metrics
jobsMap *JobsMapPdata
jobsMap *JobsMap
useStartTimeMetric bool
startTimeMetricRegex string
receiverID config.ComponentID
Expand All @@ -51,9 +51,9 @@ func NewOcaStore(
startTimeMetricRegex string,
receiverID config.ComponentID,
externalLabels labels.Labels) *OcaStore {
var jobsMap *JobsMapPdata
var jobsMap *JobsMap
if !useStartTimeMetric {
jobsMap = NewJobsMapPdata(gcInterval)
jobsMap = NewJobsMap(gcInterval)
}
return &OcaStore{
ctx: ctx,
Expand All @@ -68,7 +68,7 @@ func NewOcaStore(
}

func (o *OcaStore) Appender(ctx context.Context) storage.Appender {
return newTransactionPdata(
return newTransaction(
ctx,
&txConfig{
jobsMap: o.jobsMap,
Expand Down
70 changes: 35 additions & 35 deletions receiver/prometheusreceiver/internal/otlp_metricfamily.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,9 @@ import (
"go.uber.org/zap"
)

type metricFamilyPdata struct {
type metricFamily struct {
mtype pmetric.MetricDataType
groups map[string]*metricGroupPdata
groups map[string]*metricGroup
name string
mc MetadataCache
droppedTimeseries int
Expand All @@ -40,11 +40,11 @@ type metricFamilyPdata struct {
groupOrders map[string]int
}

// metricGroupPdata, represents a single metric of a metric family. for example a histogram metric is usually represent by
// metricGroup, represents a single metric of a metric family. for example a histogram metric is usually represent by
// a couple data complexValue (buckets and count/sum), a group of a metric family always share a same set of tags. for
// simple types like counter and gauge, each data point is a group of itself
type metricGroupPdata struct {
family *metricFamilyPdata
type metricGroup struct {
family *metricFamily
ts int64
ls labels.Labels
count float64
Expand All @@ -57,16 +57,16 @@ type metricGroupPdata struct {

var pdataStaleFlags = pmetric.NewMetricDataPointFlags(pmetric.MetricDataPointFlagNoRecordedValue)

func newMetricFamilyPdata(metricName string, mc MetadataCache, logger *zap.Logger) *metricFamilyPdata {
func newMetricFamily(metricName string, mc MetadataCache, logger *zap.Logger) *metricFamily {
metadata, familyName := metadataForMetric(metricName, mc)
mtype := convToPdataMetricType(metadata.Type)
mtype := convToMetricType(metadata.Type)
if mtype == pmetric.MetricDataTypeNone {
logger.Debug(fmt.Sprintf("Unknown-typed metric : %s %+v", metricName, metadata))
}

return &metricFamilyPdata{
return &metricFamily{
mtype: mtype,
groups: make(map[string]*metricGroupPdata),
groups: make(map[string]*metricGroup),
name: familyName,
mc: mc,
droppedTimeseries: 0,
Expand All @@ -80,9 +80,9 @@ func newMetricFamilyPdata(metricName string, mc MetadataCache, logger *zap.Logge
// updateLabelKeys is used to store all the label keys of a same metric family in observed order. since prometheus
// receiver removes any label with empty value before feeding it to an appender, in order to figure out all the labels
// from the same metric family we will need to keep track of what labels have ever been observed.
func (mf *metricFamilyPdata) updateLabelKeys(ls labels.Labels) {
func (mf *metricFamily) updateLabelKeys(ls labels.Labels) {
for _, l := range ls {
if isUsefulLabelPdata(mf.mtype, l.Name) {
if isUsefulLabel(mf.mtype, l.Name) {
if _, ok := mf.labelKeys[l.Name]; !ok {
mf.labelKeys[l.Name] = true
// use insertion sort to maintain order
Expand All @@ -97,8 +97,8 @@ func (mf *metricFamilyPdata) updateLabelKeys(ls labels.Labels) {
}

// includesMetric returns true if the metric is part of the family
func (mf *metricFamilyPdata) includesMetric(metricName string) bool {
if mf.isCumulativeTypePdata() {
func (mf *metricFamily) includesMetric(metricName string) bool {
if mf.isCumulativeType() {
// If it is a merged family type, then it should match the
// family name when suffixes are trimmed.
return normalizeMetricName(metricName) == mf.name
Expand All @@ -108,26 +108,26 @@ func (mf *metricFamilyPdata) includesMetric(metricName string) bool {
return metricName == mf.name
}

func (mf *metricFamilyPdata) getGroupKey(ls labels.Labels) string {
func (mf *metricFamily) getGroupKey(ls labels.Labels) string {
mf.updateLabelKeys(ls)
return dpgSignature(mf.labelKeysOrdered, ls)
}

func (mg *metricGroupPdata) sortPoints() {
func (mg *metricGroup) sortPoints() {
sort.Slice(mg.complexValue, func(i, j int) bool {
return mg.complexValue[i].boundary < mg.complexValue[j].boundary
})
}

func (mg *metricGroupPdata) toDistributionPoint(orderedLabelKeys []string, dest *pmetric.HistogramDataPointSlice) bool {
func (mg *metricGroup) toDistributionPoint(orderedLabelKeys []string, dest *pmetric.HistogramDataPointSlice) bool {
if !mg.hasCount || len(mg.complexValue) == 0 {
return false
}

mg.sortPoints()

// for OCAgent Proto, the bounds won't include +inf
// TODO: (@odeke-em) should we also check OpenTelemetry Pdata for bucket bounds?
// TODO: (@odeke-em) should we also check OpenTelemetry for bucket bounds?
codeboten marked this conversation as resolved.
Show resolved Hide resolved
bounds := make([]float64, len(mg.complexValue)-1)
bucketCounts := make([]uint64, len(mg.complexValue))

Expand Down Expand Up @@ -164,11 +164,11 @@ func (mg *metricGroupPdata) toDistributionPoint(orderedLabelKeys []string, dest

// The timestamp MUST be in retrieved from milliseconds and converted to nanoseconds.
tsNanos := pdataTimestampFromMs(mg.ts)
if mg.family.isCumulativeTypePdata() {
if mg.family.isCumulativeType() {
point.SetStartTimestamp(tsNanos) // metrics_adjuster adjusts the startTimestamp to the initial scrape timestamp
}
point.SetTimestamp(tsNanos)
populateAttributesPdata(orderedLabelKeys, mg.ls, point.Attributes())
populateAttributes(orderedLabelKeys, mg.ls, point.Attributes())

return true
}
Expand All @@ -178,7 +178,7 @@ func pdataTimestampFromMs(timeAtMs int64) pcommon.Timestamp {
return pcommon.NewTimestampFromTime(time.Unix(secs, ns))
}

func (mg *metricGroupPdata) toSummaryPoint(orderedLabelKeys []string, dest *pmetric.SummaryDataPointSlice) bool {
func (mg *metricGroup) toSummaryPoint(orderedLabelKeys []string, dest *pmetric.SummaryDataPointSlice) bool {
// expecting count to be provided, however, in the following two cases, they can be missed.
// 1. data is corrupted
// 2. ignored by startValue evaluation
Expand Down Expand Up @@ -216,19 +216,19 @@ func (mg *metricGroupPdata) toSummaryPoint(orderedLabelKeys []string, dest *pmet
// The timestamp MUST be in retrieved from milliseconds and converted to nanoseconds.
tsNanos := pdataTimestampFromMs(mg.ts)
point.SetTimestamp(tsNanos)
if mg.family.isCumulativeTypePdata() {
if mg.family.isCumulativeType() {
point.SetStartTimestamp(tsNanos) // metrics_adjuster adjusts the startTimestamp to the initial scrape timestamp
}
populateAttributesPdata(orderedLabelKeys, mg.ls, point.Attributes())
populateAttributes(orderedLabelKeys, mg.ls, point.Attributes())

return true
}

func (mg *metricGroupPdata) toNumberDataPoint(orderedLabelKeys []string, dest *pmetric.NumberDataPointSlice) bool {
func (mg *metricGroup) toNumberDataPoint(orderedLabelKeys []string, dest *pmetric.NumberDataPointSlice) bool {
var startTsNanos pcommon.Timestamp
tsNanos := pdataTimestampFromMs(mg.ts)
// gauge/undefined types have no start time.
if mg.family.isCumulativeTypePdata() {
if mg.family.isCumulativeType() {
startTsNanos = tsNanos // metrics_adjuster adjusts the startTimestamp to the initial scrape timestamp
}

Expand All @@ -240,12 +240,12 @@ func (mg *metricGroupPdata) toNumberDataPoint(orderedLabelKeys []string, dest *p
} else {
point.SetDoubleVal(mg.value)
}
populateAttributesPdata(orderedLabelKeys, mg.ls, point.Attributes())
populateAttributes(orderedLabelKeys, mg.ls, point.Attributes())

return true
}

func populateAttributesPdata(orderedKeys []string, ls labels.Labels, dest pcommon.Map) {
func populateAttributes(orderedKeys []string, ls labels.Labels, dest pcommon.Map) {
src := ls.Map()
for _, key := range orderedKeys {
if src[key] == "" {
Expand All @@ -257,18 +257,18 @@ func populateAttributesPdata(orderedKeys []string, ls labels.Labels, dest pcommo
}

// Purposefully being referenced to avoid lint warnings about being "unused".
var _ = (*metricFamilyPdata)(nil).updateLabelKeys
var _ = (*metricFamily)(nil).updateLabelKeys

func (mf *metricFamilyPdata) isCumulativeTypePdata() bool {
func (mf *metricFamily) isCumulativeType() bool {
return mf.mtype == pmetric.MetricDataTypeSum ||
mf.mtype == pmetric.MetricDataTypeHistogram ||
mf.mtype == pmetric.MetricDataTypeSummary
}

func (mf *metricFamilyPdata) loadMetricGroupOrCreate(groupKey string, ls labels.Labels, ts int64) *metricGroupPdata {
func (mf *metricFamily) loadMetricGroupOrCreate(groupKey string, ls labels.Labels, ts int64) *metricGroup {
mg, ok := mf.groups[groupKey]
if !ok {
mg = &metricGroupPdata{
mg = &metricGroup{
family: mf,
ts: ts,
ls: ls,
Expand All @@ -281,7 +281,7 @@ func (mf *metricFamilyPdata) loadMetricGroupOrCreate(groupKey string, ls labels.
return mg
}

func (mf *metricFamilyPdata) Add(metricName string, ls labels.Labels, t int64, v float64) error {
func (mf *metricFamily) Add(metricName string, ls labels.Labels, t int64, v float64) error {
groupKey := mf.getGroupKey(ls)
mg := mf.loadMetricGroupOrCreate(groupKey, ls, t)
switch mf.mtype {
Expand All @@ -297,7 +297,7 @@ func (mf *metricFamilyPdata) Add(metricName string, ls labels.Labels, t int64, v
mg.count = v
mg.hasCount = true
default:
boundary, err := getBoundaryPdata(mf.mtype, ls)
boundary, err := getBoundary(mf.mtype, ls)
if err != nil {
mf.droppedTimeseries++
return err
Expand All @@ -312,15 +312,15 @@ func (mf *metricFamilyPdata) Add(metricName string, ls labels.Labels, t int64, v
}

// getGroups to return groups in insertion order
func (mf *metricFamilyPdata) getGroups() []*metricGroupPdata {
groups := make([]*metricGroupPdata, len(mf.groupOrders))
func (mf *metricFamily) getGroups() []*metricGroup {
groups := make([]*metricGroup, len(mf.groupOrders))
for k, v := range mf.groupOrders {
groups[v] = mf.groups[k]
}
return groups
}

func (mf *metricFamilyPdata) ToMetricPdata(metrics *pmetric.MetricSlice) (int, int) {
func (mf *metricFamily) ToMetric(metrics *pmetric.MetricSlice) (int, int) {
metric := pmetric.NewMetric()
metric.SetDataType(mf.mtype)
metric.SetName(mf.name)
Expand Down
12 changes: 6 additions & 6 deletions receiver/prometheusreceiver/internal/otlp_metricfamily_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ func TestMetricGroupData_toDistributionUnitTest(t *testing.T) {
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
mp := newMetricFamilyPdata(tt.metricName, mc, zap.NewNop())
mp := newMetricFamily(tt.metricName, mc, zap.NewNop())
for _, tv := range tt.scrapes {
var lbls labels.Labels
if tv.extraLabel.Name != "" {
Expand All @@ -178,7 +178,7 @@ func TestMetricGroupData_toDistributionUnitTest(t *testing.T) {
require.NotNil(t, mp.groups[groupKey], "Expecting the groupKey to have a value given key:: "+groupKey)

sl := pmetric.NewMetricSlice()
mp.ToMetricPdata(&sl)
mp.ToMetric(&sl)

require.Equal(t, 1, sl.Len(), "Exactly one metric expected")
metric := sl.At(0)
Expand Down Expand Up @@ -378,7 +378,7 @@ func TestMetricGroupData_toSummaryUnitTest(t *testing.T) {
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
mp := newMetricFamilyPdata(tt.name, mc, zap.NewNop())
mp := newMetricFamily(tt.name, mc, zap.NewNop())
for _, lbs := range tt.labelsScrapes {
for _, scrape := range lbs.scrapes {
require.NoError(t, mp.Add(scrape.metric, lbs.labels.Copy(), scrape.at, scrape.value))
Expand All @@ -394,7 +394,7 @@ func TestMetricGroupData_toSummaryUnitTest(t *testing.T) {
require.NotNil(t, mp.groups[groupKey], "Expecting the groupKey to have a value given key:: "+groupKey)

sl := pmetric.NewMetricSlice()
mp.ToMetricPdata(&sl)
mp.ToMetric(&sl)

require.Equal(t, 1, sl.Len(), "Exactly one metric expected")
metric := sl.At(0)
Expand Down Expand Up @@ -467,7 +467,7 @@ func TestMetricGroupData_toNumberDataUnitTest(t *testing.T) {
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
mp := newMetricFamilyPdata(tt.metricKind, mc, zap.NewNop())
mp := newMetricFamily(tt.metricKind, mc, zap.NewNop())
for _, tv := range tt.scrapes {
require.NoError(t, mp.Add(tv.metric, tt.labels.Copy(), tv.at, tv.value))
}
Expand All @@ -477,7 +477,7 @@ func TestMetricGroupData_toNumberDataUnitTest(t *testing.T) {
require.NotNil(t, mp.groups[groupKey], "Expecting the groupKey to have a value given key:: "+groupKey)

sl := pmetric.NewMetricSlice()
mp.ToMetricPdata(&sl)
mp.ToMetric(&sl)

require.Equal(t, 1, sl.Len(), "Exactly one metric expected")
metric := sl.At(0)
Expand Down
Loading