-
Notifications
You must be signed in to change notification settings - Fork 851
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Add adaptable circular buffer implementation for ExponentialCounter. #4087
Changes from 11 commits
c650018
0ed0b6b
dd011bf
110d897
42ce93b
6fef5df
c2effc5
d31a643
242e01f
040655a
ffde91a
f07dba5
49785d6
5e5a391
2abffed
0315f68
4019280
c1439e0
7e2e1c9
bd8a251
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -8,7 +8,7 @@ | |
import io.opentelemetry.sdk.internal.PrimitiveLongList; | ||
import io.opentelemetry.sdk.metrics.data.ExponentialHistogramBuckets; | ||
import io.opentelemetry.sdk.metrics.internal.state.ExponentialCounter; | ||
import io.opentelemetry.sdk.metrics.internal.state.MapCounter; | ||
import io.opentelemetry.sdk.metrics.internal.state.ExponentialCounterFactory; | ||
import java.util.Collections; | ||
import java.util.List; | ||
import javax.annotation.Nonnull; | ||
|
@@ -23,27 +23,37 @@ | |
*/ | ||
final class DoubleExponentialHistogramBuckets implements ExponentialHistogramBuckets { | ||
|
||
public static final int MAX_SCALE = 20; | ||
|
||
private static final int MAX_BUCKETS = MapCounter.MAX_SIZE; | ||
|
||
private final ExponentialCounterFactory counterFactory; | ||
private ExponentialCounter counts; | ||
private BucketMapper bucketMapper; | ||
private int scale; | ||
|
||
DoubleExponentialHistogramBuckets() { | ||
this.counts = new MapCounter(); | ||
this.bucketMapper = new LogarithmMapper(MAX_SCALE); | ||
this.scale = MAX_SCALE; | ||
DoubleExponentialHistogramBuckets( | ||
int scale, int maxBuckets, ExponentialCounterFactory counterFactory) { | ||
this.counterFactory = counterFactory; | ||
this.counts = counterFactory.newCounter(maxBuckets); | ||
this.bucketMapper = new LogarithmMapper(scale); | ||
this.scale = scale; | ||
} | ||
|
||
// For copying | ||
DoubleExponentialHistogramBuckets(DoubleExponentialHistogramBuckets buckets) { | ||
this.counts = new MapCounter(buckets.counts); // copy counts | ||
this.counterFactory = buckets.counterFactory; | ||
this.counts = counterFactory.copy(buckets.counts); | ||
this.bucketMapper = new LogarithmMapper(buckets.scale); | ||
this.scale = buckets.scale; | ||
} | ||
|
||
/** Returns a copy of this bucket. */ | ||
DoubleExponentialHistogramBuckets copy() { | ||
return new DoubleExponentialHistogramBuckets(this); | ||
} | ||
|
||
/** Resets all counters in this bucket set to zero, but preserves scale. */ | ||
public void clear() { | ||
this.counts.clear(); | ||
} | ||
|
||
boolean record(double value) { | ||
if (value == 0.0) { | ||
// Guarded by caller. If passed 0 it would be a bug in the SDK. | ||
|
@@ -55,6 +65,12 @@ boolean record(double value) { | |
|
||
@Override | ||
public int getOffset() { | ||
// We need to unify the behavior of empty buckets. | ||
// Unfortunately, getIndexStart is not meaningful for empty counters, so we default to | ||
// returning 0 for offset and an empty list. | ||
if (counts.isEmpty()) { | ||
return 0; | ||
} | ||
return counts.getIndexStart(); | ||
} | ||
|
||
|
@@ -74,6 +90,9 @@ public List<Long> getBucketCounts() { | |
|
||
@Override | ||
public long getTotalCount() { | ||
if (counts.isEmpty()) { | ||
return 0; | ||
} | ||
long totalCount = 0; | ||
for (int i = counts.getIndexStart(); i <= counts.getIndexEnd(); i++) { | ||
totalCount += counts.get(i); | ||
|
@@ -90,7 +109,11 @@ void downscale(int by) { | |
} | ||
|
||
if (!counts.isEmpty()) { | ||
ExponentialCounter newCounts = new MapCounter(); | ||
// We want to preserve other optimisations here as well, e.g. integer size. | ||
// Instead of creating a new counter, we copy the existing one (for bucket size | ||
// optimisations), and clear the values before writing the new ones. | ||
ExponentialCounter newCounts = counterFactory.copy(counts); | ||
newCounts.clear(); | ||
|
||
for (int i = counts.getIndexStart(); i <= counts.getIndexEnd(); i++) { | ||
long count = counts.get(i); | ||
|
@@ -117,7 +140,7 @@ void downscale(int by) { | |
*/ | ||
static DoubleExponentialHistogramBuckets diff( | ||
DoubleExponentialHistogramBuckets a, DoubleExponentialHistogramBuckets b) { | ||
DoubleExponentialHistogramBuckets copy = new DoubleExponentialHistogramBuckets(a); | ||
DoubleExponentialHistogramBuckets copy = a.copy(); | ||
copy.mergeWith(b, /* additive= */ false); | ||
return copy; | ||
} | ||
|
@@ -133,11 +156,11 @@ static DoubleExponentialHistogramBuckets diff( | |
static DoubleExponentialHistogramBuckets merge( | ||
DoubleExponentialHistogramBuckets a, DoubleExponentialHistogramBuckets b) { | ||
if (b.counts.isEmpty()) { | ||
return new DoubleExponentialHistogramBuckets(a); | ||
return a; | ||
} else if (a.counts.isEmpty()) { | ||
return new DoubleExponentialHistogramBuckets(b); | ||
return b; | ||
} | ||
DoubleExponentialHistogramBuckets copy = new DoubleExponentialHistogramBuckets(a); | ||
DoubleExponentialHistogramBuckets copy = a.copy(); | ||
copy.mergeWith(b, /* additive= */ true); | ||
return copy; | ||
} | ||
|
@@ -218,7 +241,7 @@ int getScaleReduction(double value) { | |
int getScaleReduction(long newStart, long newEnd) { | ||
int scaleReduction = 0; | ||
|
||
while (newEnd - newStart + 1 > MAX_BUCKETS) { | ||
while (newEnd - newStart + 1 > counts.getMaxSize()) { | ||
newStart >>= 1; | ||
newEnd >>= 1; | ||
scaleReduction++; | ||
|
@@ -234,19 +257,44 @@ public boolean equals(@Nullable Object obj) { | |
DoubleExponentialHistogramBuckets other = (DoubleExponentialHistogramBuckets) obj; | ||
// Don't need to compare getTotalCount() because equivalent bucket counts | ||
// imply equivalent overall count. | ||
return getBucketCounts().equals(other.getBucketCounts()) | ||
&& this.getOffset() == other.getOffset() | ||
&& this.scale == other.scale; | ||
return this.scale == other.scale && sameBucketCounts(other); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why have you omitted the offset from edit: oh I see, you are comparing across an extended window of both buckets. I suppose two bucket sets can have different offset but represent the same measurements. The buckets with the lower offset must have all zeroes up until the first non-zero bucket on the other bucket set in order for this to be the case. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. exactly the case. Additionally the other bucket needs to have ONLY zeros after the first bucket set ends. We could fix this in the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Definitely worth a comment in here, either way. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yeah agreed, could use a comment. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Great point. I had documented the other method, but in-context, it wasn't very good. Added here. |
||
} | ||
|
||
/** | ||
* Tests if two bucket counts are equivalent semantically. | ||
* | ||
* <p>Semantic equivalence means: | ||
* | ||
* <ul> | ||
* <li>All counts are stored between indexStart/indexEnd. | ||
* <li>Offset does NOT need to be the same | ||
* </ul> | ||
*/ | ||
private boolean sameBucketCounts(DoubleExponentialHistogramBuckets other) { | ||
int min = Math.min(this.counts.getIndexStart(), other.counts.getIndexStart()); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can you confirm this logic? Shouldn't it be something like It looks like we could refer to an index that is below There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, actually that's the point. What can happen right now (with The previous MapCounter implementation actually did GC so any count that reached zero was removed from the Map. Unfortunately, that's not really feasible in the circular buffer implementation. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This seems to be what you are saying:
Makes sense to me. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ah I didn't notice the change from throwing to returning 0 for those indices. Can you confirm there is a unit test for this case? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. added |
||
int max = Math.max(this.counts.getIndexEnd(), other.counts.getIndexEnd()); | ||
for (int idx = min; idx <= max; idx++) { | ||
if (this.counts.get(idx) != other.counts.get(idx)) { | ||
return false; | ||
} | ||
} | ||
return true; | ||
} | ||
|
||
@Override | ||
public int hashCode() { | ||
int hash = 1; | ||
hash *= 1000003; | ||
hash ^= getOffset(); | ||
hash *= 1000003; | ||
hash ^= getBucketCounts().hashCode(); | ||
hash *= 1000003; | ||
// We need a new algorithm here that lines up w/ equals, so we only use non-zero counts. | ||
for (int idx = this.counts.getIndexStart(); idx <= this.counts.getIndexEnd(); idx++) { | ||
long count = this.counts.get(idx); | ||
if (count != 0) { | ||
hash ^= idx; | ||
hash *= 1000003; | ||
hash = (int) (hash ^ count); | ||
hash *= 1000003; | ||
} | ||
} | ||
hash ^= scale; | ||
// Don't need to hash getTotalCount() because equivalent bucket | ||
// counts imply equivalent overall count. | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,36 @@ | ||
/* | ||
* Copyright The OpenTelemetry Authors | ||
* SPDX-License-Identifier: Apache-2.0 | ||
*/ | ||
|
||
package io.opentelemetry.sdk.metrics.internal.aggregator; | ||
|
||
import io.opentelemetry.sdk.metrics.internal.state.ExponentialCounterFactory; | ||
|
||
/** The configuration for how to create exponential histogram buckets. */ | ||
final class ExponentialBucketStrategy { | ||
/** Starting scale of exponential buckets. */ | ||
private final int scale; | ||
/** The maximum number of buckets that will be used for positive or negative recordings. */ | ||
private final int maxBuckets; | ||
/** The mechanism of constructing and copying buckets. */ | ||
private final ExponentialCounterFactory counterFactory; | ||
|
||
private ExponentialBucketStrategy( | ||
int scale, int maxBuckets, ExponentialCounterFactory counterFactory) { | ||
this.scale = scale; | ||
this.maxBuckets = maxBuckets; | ||
this.counterFactory = counterFactory; | ||
} | ||
|
||
/** Constructs fresh new buckets with default settings. */ | ||
DoubleExponentialHistogramBuckets newBuckets() { | ||
return new DoubleExponentialHistogramBuckets(scale, maxBuckets, counterFactory); | ||
} | ||
|
||
/** Create a new strategy for generating Exponential Buckets. */ | ||
static ExponentialBucketStrategy newStrategy( | ||
int scale, int maxBuckets, ExponentialCounterFactory counterFactory) { | ||
return new ExponentialBucketStrategy(scale, maxBuckets, counterFactory); | ||
} | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
What do you foresee the instrument / user API looking like for this? Something like
new ExponentialHistogramInstrument(maxbuckets, startingScale)
?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There's a discussion on the specification right now, but I'll give you my thoughts: