diff --git a/sdk/metrics/src/jmh/java/io/opentelemetry/sdk/metrics/internal/aggregator/HistogramAggregationParam.java b/sdk/metrics/src/jmh/java/io/opentelemetry/sdk/metrics/internal/aggregator/HistogramAggregationParam.java index e76f94c4143..b62222af3c5 100644 --- a/sdk/metrics/src/jmh/java/io/opentelemetry/sdk/metrics/internal/aggregator/HistogramAggregationParam.java +++ b/sdk/metrics/src/jmh/java/io/opentelemetry/sdk/metrics/internal/aggregator/HistogramAggregationParam.java @@ -6,6 +6,7 @@ package io.opentelemetry.sdk.metrics.internal.aggregator; import io.opentelemetry.sdk.metrics.exemplar.ExemplarReservoir; +import io.opentelemetry.sdk.metrics.internal.state.ExponentialCounterFactory; import java.util.Collections; /** The types of histogram aggregation to benchmark. */ @@ -20,7 +21,20 @@ public enum HistogramAggregationParam { new DoubleHistogramAggregator( ExplicitBucketHistogramUtils.createBoundaryArray(Collections.emptyList()), ExemplarReservoir::noSamples)), - EXPONENTIAL(new DoubleExponentialHistogramAggregator(ExemplarReservoir::noSamples)); + EXPONENTIAL_SMALL_CIRCULAR_BUFFER( + new DoubleExponentialHistogramAggregator( + ExemplarReservoir::noSamples, + ExponentialBucketStrategy.newStrategy( + 20, 20, ExponentialCounterFactory.circularBufferCounter()))), + EXPONENTIAL_CIRCULAR_BUFFER( + new DoubleExponentialHistogramAggregator( + ExemplarReservoir::noSamples, + ExponentialBucketStrategy.newStrategy( + 20, 320, ExponentialCounterFactory.circularBufferCounter()))), + EXPONENTIAL_MAP_COUNTER( + new DoubleExponentialHistogramAggregator( + ExemplarReservoir::noSamples, + ExponentialBucketStrategy.newStrategy(20, 320, ExponentialCounterFactory.mapCounter()))); private final Aggregator aggregator; diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/aggregator/DoubleExponentialHistogramAggregator.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/aggregator/DoubleExponentialHistogramAggregator.java index 4c0c8ea3f77..c417aace6a0 100644 --- a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/aggregator/DoubleExponentialHistogramAggregator.java +++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/aggregator/DoubleExponentialHistogramAggregator.java @@ -13,6 +13,7 @@ import io.opentelemetry.sdk.metrics.data.MetricData; import io.opentelemetry.sdk.metrics.exemplar.ExemplarReservoir; import io.opentelemetry.sdk.metrics.internal.descriptor.MetricDescriptor; +import io.opentelemetry.sdk.metrics.internal.state.ExponentialCounterFactory; import io.opentelemetry.sdk.resources.Resource; import java.util.List; import java.util.Map; @@ -22,14 +23,24 @@ final class DoubleExponentialHistogramAggregator implements Aggregator { private final Supplier reservoirSupplier; + private final ExponentialBucketStrategy bucketStrategy; DoubleExponentialHistogramAggregator(Supplier reservoirSupplier) { + this( + reservoirSupplier, + ExponentialBucketStrategy.newStrategy( + 20, 320, ExponentialCounterFactory.circularBufferCounter())); + } + + DoubleExponentialHistogramAggregator( + Supplier reservoirSupplier, ExponentialBucketStrategy bucketStrategy) { this.reservoirSupplier = reservoirSupplier; + this.bucketStrategy = bucketStrategy; } @Override public AggregatorHandle createHandle() { - return new Handle(reservoirSupplier.get()); + return new Handle(reservoirSupplier.get(), this.bucketStrategy); } /** @@ -132,20 +143,19 @@ public MetricData toMetricData( } static final class Handle extends AggregatorHandle { - - private int scale; - private DoubleExponentialHistogramBuckets positiveBuckets; - private DoubleExponentialHistogramBuckets negativeBuckets; + private final ExponentialBucketStrategy bucketStrategy; + private final DoubleExponentialHistogramBuckets positiveBuckets; + private final DoubleExponentialHistogramBuckets negativeBuckets; private long zeroCount; private double sum; - Handle(ExemplarReservoir reservoir) { + Handle(ExemplarReservoir reservoir, ExponentialBucketStrategy bucketStrategy) { super(reservoir); this.sum = 0; this.zeroCount = 0; - this.scale = DoubleExponentialHistogramBuckets.MAX_SCALE; - this.positiveBuckets = new DoubleExponentialHistogramBuckets(); - this.negativeBuckets = new DoubleExponentialHistogramBuckets(); + this.bucketStrategy = bucketStrategy; + this.positiveBuckets = this.bucketStrategy.newBuckets(); + this.negativeBuckets = this.bucketStrategy.newBuckets(); } @Override @@ -153,11 +163,16 @@ protected synchronized ExponentialHistogramAccumulation doAccumulateThenReset( List exemplars) { ExponentialHistogramAccumulation acc = ExponentialHistogramAccumulation.create( - scale, sum, positiveBuckets, negativeBuckets, zeroCount, exemplars); + this.positiveBuckets.getScale(), + sum, + positiveBuckets.copy(), + negativeBuckets.copy(), + zeroCount, + exemplars); this.sum = 0; this.zeroCount = 0; - this.positiveBuckets = new DoubleExponentialHistogramBuckets(); - this.negativeBuckets = new DoubleExponentialHistogramBuckets(); + this.positiveBuckets.clear(); + this.negativeBuckets.clear(); return acc; } @@ -180,6 +195,8 @@ protected synchronized void doRecordDouble(double value) { // Record; If recording fails, calculate scale reduction and scale down to fit new value. // 2nd attempt at recording should work with new scale DoubleExponentialHistogramBuckets buckets = (c > 0) ? positiveBuckets : negativeBuckets; + // TODO: We should experiment with downscale on demand during sync execution and only + // unifying scale factor between positive/negative at collection time (doAccumulate). if (!buckets.record(value)) { // getScaleReduction() used with downScale() will scale down as required to record value, // fit inside max allowed buckets, and make sure index can be represented by int. @@ -196,7 +213,6 @@ protected void doRecordLong(long value) { void downScale(int by) { positiveBuckets.downscale(by); negativeBuckets.downscale(by); - this.scale -= by; } } } diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/aggregator/DoubleExponentialHistogramBuckets.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/aggregator/DoubleExponentialHistogramBuckets.java index 353109904e8..e6dbe1305a6 100644 --- a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/aggregator/DoubleExponentialHistogramBuckets.java +++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/aggregator/DoubleExponentialHistogramBuckets.java @@ -8,7 +8,7 @@ import io.opentelemetry.sdk.internal.PrimitiveLongList; import io.opentelemetry.sdk.metrics.data.ExponentialHistogramBuckets; import io.opentelemetry.sdk.metrics.internal.state.ExponentialCounter; -import io.opentelemetry.sdk.metrics.internal.state.MapCounter; +import io.opentelemetry.sdk.metrics.internal.state.ExponentialCounterFactory; import java.util.Collections; import java.util.List; import javax.annotation.Nonnull; @@ -23,27 +23,37 @@ */ final class DoubleExponentialHistogramBuckets implements ExponentialHistogramBuckets { - public static final int MAX_SCALE = 20; - - private static final int MAX_BUCKETS = MapCounter.MAX_SIZE; - + private final ExponentialCounterFactory counterFactory; private ExponentialCounter counts; private BucketMapper bucketMapper; private int scale; - DoubleExponentialHistogramBuckets() { - this.counts = new MapCounter(); - this.bucketMapper = new LogarithmMapper(MAX_SCALE); - this.scale = MAX_SCALE; + DoubleExponentialHistogramBuckets( + int scale, int maxBuckets, ExponentialCounterFactory counterFactory) { + this.counterFactory = counterFactory; + this.counts = counterFactory.newCounter(maxBuckets); + this.bucketMapper = new LogarithmMapper(scale); + this.scale = scale; } // For copying DoubleExponentialHistogramBuckets(DoubleExponentialHistogramBuckets buckets) { - this.counts = new MapCounter(buckets.counts); // copy counts + this.counterFactory = buckets.counterFactory; + this.counts = counterFactory.copy(buckets.counts); this.bucketMapper = new LogarithmMapper(buckets.scale); this.scale = buckets.scale; } + /** Returns a copy of this bucket. */ + DoubleExponentialHistogramBuckets copy() { + return new DoubleExponentialHistogramBuckets(this); + } + + /** Resets all counters in this bucket set to zero, but preserves scale. */ + public void clear() { + this.counts.clear(); + } + boolean record(double value) { if (value == 0.0) { // Guarded by caller. If passed 0 it would be a bug in the SDK. @@ -55,6 +65,12 @@ boolean record(double value) { @Override public int getOffset() { + // We need to unify the behavior of empty buckets. + // Unfortunately, getIndexStart is not meaningful for empty counters, so we default to + // returning 0 for offset and an empty list. + if (counts.isEmpty()) { + return 0; + } return counts.getIndexStart(); } @@ -74,6 +90,9 @@ public List getBucketCounts() { @Override public long getTotalCount() { + if (counts.isEmpty()) { + return 0; + } long totalCount = 0; for (int i = counts.getIndexStart(); i <= counts.getIndexEnd(); i++) { totalCount += counts.get(i); @@ -90,7 +109,11 @@ void downscale(int by) { } if (!counts.isEmpty()) { - ExponentialCounter newCounts = new MapCounter(); + // We want to preserve other optimisations here as well, e.g. integer size. + // Instead of creating a new counter, we copy the existing one (for bucket size + // optimisations), and clear the values before writing the new ones. + ExponentialCounter newCounts = counterFactory.copy(counts); + newCounts.clear(); for (int i = counts.getIndexStart(); i <= counts.getIndexEnd(); i++) { long count = counts.get(i); @@ -117,7 +140,7 @@ void downscale(int by) { */ static DoubleExponentialHistogramBuckets diff( DoubleExponentialHistogramBuckets a, DoubleExponentialHistogramBuckets b) { - DoubleExponentialHistogramBuckets copy = new DoubleExponentialHistogramBuckets(a); + DoubleExponentialHistogramBuckets copy = a.copy(); copy.mergeWith(b, /* additive= */ false); return copy; } @@ -133,11 +156,11 @@ static DoubleExponentialHistogramBuckets diff( static DoubleExponentialHistogramBuckets merge( DoubleExponentialHistogramBuckets a, DoubleExponentialHistogramBuckets b) { if (b.counts.isEmpty()) { - return new DoubleExponentialHistogramBuckets(a); + return a; } else if (a.counts.isEmpty()) { - return new DoubleExponentialHistogramBuckets(b); + return b; } - DoubleExponentialHistogramBuckets copy = new DoubleExponentialHistogramBuckets(a); + DoubleExponentialHistogramBuckets copy = a.copy(); copy.mergeWith(b, /* additive= */ true); return copy; } @@ -218,7 +241,7 @@ int getScaleReduction(double value) { int getScaleReduction(long newStart, long newEnd) { int scaleReduction = 0; - while (newEnd - newStart + 1 > MAX_BUCKETS) { + while (newEnd - newStart + 1 > counts.getMaxSize()) { newStart >>= 1; newEnd >>= 1; scaleReduction++; @@ -234,19 +257,48 @@ public boolean equals(@Nullable Object obj) { DoubleExponentialHistogramBuckets other = (DoubleExponentialHistogramBuckets) obj; // Don't need to compare getTotalCount() because equivalent bucket counts // imply equivalent overall count. - return getBucketCounts().equals(other.getBucketCounts()) - && this.getOffset() == other.getOffset() - && this.scale == other.scale; + // Additionally, we compare the "semantics" of bucket counts, that is + // it's ok for getOffset() to diverge as long as the populated counts remain + // the same. This is because we don't "normalize" buckets after doing + // difference/subtraction operations. + return this.scale == other.scale && sameBucketCounts(other); + } + + /** + * Tests if two bucket counts are equivalent semantically. + * + *

Semantic equivalence means: + * + *

    + *
  • All counts are stored between indexStart/indexEnd. + *
  • Offset does NOT need to be the same + *
+ */ + private boolean sameBucketCounts(DoubleExponentialHistogramBuckets other) { + int min = Math.min(this.counts.getIndexStart(), other.counts.getIndexStart()); + int max = Math.max(this.counts.getIndexEnd(), other.counts.getIndexEnd()); + for (int idx = min; idx <= max; idx++) { + if (this.counts.get(idx) != other.counts.get(idx)) { + return false; + } + } + return true; } @Override public int hashCode() { int hash = 1; hash *= 1000003; - hash ^= getOffset(); - hash *= 1000003; - hash ^= getBucketCounts().hashCode(); - hash *= 1000003; + // We need a new algorithm here that lines up w/ equals, so we only use non-zero counts. + for (int idx = this.counts.getIndexStart(); idx <= this.counts.getIndexEnd(); idx++) { + long count = this.counts.get(idx); + if (count != 0) { + hash ^= idx; + hash *= 1000003; + hash = (int) (hash ^ count); + hash *= 1000003; + } + } hash ^= scale; // Don't need to hash getTotalCount() because equivalent bucket // counts imply equivalent overall count. diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/aggregator/ExponentialBucketStrategy.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/aggregator/ExponentialBucketStrategy.java new file mode 100644 index 00000000000..9af45e99155 --- /dev/null +++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/aggregator/ExponentialBucketStrategy.java @@ -0,0 +1,36 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.sdk.metrics.internal.aggregator; + +import io.opentelemetry.sdk.metrics.internal.state.ExponentialCounterFactory; + +/** The configuration for how to create exponential histogram buckets. */ +final class ExponentialBucketStrategy { + /** Starting scale of exponential buckets. */ + private final int scale; + /** The maximum number of buckets that will be used for positive or negative recordings. */ + private final int maxBuckets; + /** The mechanism of constructing and copying buckets. */ + private final ExponentialCounterFactory counterFactory; + + private ExponentialBucketStrategy( + int scale, int maxBuckets, ExponentialCounterFactory counterFactory) { + this.scale = scale; + this.maxBuckets = maxBuckets; + this.counterFactory = counterFactory; + } + + /** Constructs fresh new buckets with default settings. */ + DoubleExponentialHistogramBuckets newBuckets() { + return new DoubleExponentialHistogramBuckets(scale, maxBuckets, counterFactory); + } + + /** Create a new strategy for generating Exponential Buckets. */ + static ExponentialBucketStrategy newStrategy( + int scale, int maxBuckets, ExponentialCounterFactory counterFactory) { + return new ExponentialBucketStrategy(scale, maxBuckets, counterFactory); + } +} diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/AdaptingCircularBufferCounter.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/AdaptingCircularBufferCounter.java new file mode 100644 index 00000000000..4eda81f9ec6 --- /dev/null +++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/AdaptingCircularBufferCounter.java @@ -0,0 +1,140 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.sdk.metrics.internal.state; + +/** + * A circle-buffer-backed exponential counter. + * + *

The first recorded value becomes the 'baseIndex'. Going backwards leads to start/stop index + * + *

This expand start/End index as it sees values. + * + *

This class is NOT thread-safe. It is expected to be behind a synchronized incrementer. + * + *

This class is internal and is hence not for public use. Its APIs are unstable and can change + * at any time + */ +public class AdaptingCircularBufferCounter implements ExponentialCounter { + private static final int NULL_INDEX = Integer.MIN_VALUE; + private int endIndex = NULL_INDEX; + private int startIndex = NULL_INDEX; + private int baseIndex = NULL_INDEX; + private final AdaptingIntegerArray backing; + + /** Constructs a circular buffer that will hold at most {@code maxSize} buckets. */ + public AdaptingCircularBufferCounter(int maxSize) { + this.backing = new AdaptingIntegerArray(maxSize); + } + + /** (Deep)-Copies the values from another exponential counter. */ + public AdaptingCircularBufferCounter(ExponentialCounter toCopy) { + // If toCopy is an AdaptingCircularBuffer, just do a copy of the underlying array + // and baseIndex. + if (toCopy instanceof AdaptingCircularBufferCounter) { + this.backing = ((AdaptingCircularBufferCounter) toCopy).backing.copy(); + this.startIndex = toCopy.getIndexStart(); + this.endIndex = toCopy.getIndexEnd(); + this.baseIndex = ((AdaptingCircularBufferCounter) toCopy).baseIndex; + } else { + // Copy values from some other implementation of ExponentialCounter. + this.backing = new AdaptingIntegerArray(toCopy.getMaxSize()); + this.startIndex = NULL_INDEX; + this.baseIndex = NULL_INDEX; + this.endIndex = NULL_INDEX; + for (int i = toCopy.getIndexStart(); i <= toCopy.getIndexEnd(); i++) { + long val = toCopy.get(i); + this.increment(i, val); + } + } + } + + @Override + public int getIndexStart() { + return startIndex; + } + + @Override + public int getIndexEnd() { + return endIndex; + } + + @Override + public boolean increment(int index, long delta) { + if (baseIndex == NULL_INDEX) { + startIndex = index; + endIndex = index; + baseIndex = index; + backing.increment(0, delta); + return true; + } + + if (index > endIndex) { + // Move end, check max size + if (index - startIndex + 1 > backing.length()) { + return false; + } + endIndex = index; + } else if (index < startIndex) { + // Move end, check max size + if (endIndex - index + 1 > backing.length()) { + return false; + } + startIndex = index; + } + int realIdx = toBufferIndex(index); + backing.increment(realIdx, delta); + return true; + } + + @Override + public long get(int index) { + if (index < startIndex || index > endIndex) { + return 0; + } + return backing.get(toBufferIndex(index)); + } + + @Override + public boolean isEmpty() { + return baseIndex == NULL_INDEX; + } + + @Override + public int getMaxSize() { + return backing.length(); + } + + @Override + public void clear() { + this.backing.clear(); + this.baseIndex = NULL_INDEX; + this.endIndex = NULL_INDEX; + this.startIndex = NULL_INDEX; + } + + private int toBufferIndex(int index) { + // Figure out the index relative to the start of the circular buffer. + int result = index - baseIndex; + if (result >= backing.length()) { + result -= backing.length(); + } else if (result < 0) { + result += backing.length(); + } + return result; + } + + @Override + public String toString() { + StringBuilder result = new StringBuilder("{"); + for (int i = startIndex; i <= endIndex && startIndex != NULL_INDEX; i++) { + if (i != startIndex) { + result.append(','); + } + result.append(i).append('=').append(get(i)); + } + return result.append("}").toString(); + } +} diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/AdaptingIntegerArray.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/AdaptingIntegerArray.java new file mode 100644 index 00000000000..37ece0b3837 --- /dev/null +++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/AdaptingIntegerArray.java @@ -0,0 +1,219 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.sdk.metrics.internal.state; + +import java.util.Arrays; +import javax.annotation.Nullable; + +/** + * An integer array that automatically expands its memory consumption (via copy/allocation) when + * reaching limits. This assumes counts remain low, to lower memory overhead. + * + *

This class is NOT thread-safe. It is expected to be behind a synchronized incrementer. + * + *

Instances start by attempting to store one-byte per-cell in the integer array. As values grow, + * this will automatically instantiate the next-size integer array (byte => short => int => long) + * and copy over values into the larger array. This class expects most usage to remain within the + * byte boundary (e.g. cell values < 128). + * + *

This class lives in the (very) hot path of metric recording. As such, we do "fun" things, like + * switch on markers and assume non-null based on presence of the markers, as such we suppress + * NullAway as it can't understand/express this level of guarantee. + * + *

Implementations MUST preserve the following: + * + *

    + *
  • If cellSize == BYTE then byteBacking is not null + *
  • If cellSize == SHORT then shortBacking is not null + *
  • If cellSize == INT then intBacking is not null + *
  • If cellSize == LONG then longBacking is not null + *
+ * + *

This class is internal and is hence not for public use. Its APIs are unstable and can change + * at any time. + */ +public class AdaptingIntegerArray { + @Nullable private byte[] byteBacking; + @Nullable private short[] shortBacking; + @Nullable private int[] intBacking; + @Nullable private long[] longBacking; + + /** Possible sizes of backing arrays. */ + private enum ArrayCellSize { + BYTE, + SHORT, + INT, + LONG + } + /** The current byte size of integer cells in this array. */ + private ArrayCellSize cellSize; + + /** Construct an adapting integer array of a given size. */ + public AdaptingIntegerArray(int size) { + this.cellSize = ArrayCellSize.BYTE; + this.byteBacking = new byte[size]; + } + + /** Creates deep copy of another adapting integer array. */ + @SuppressWarnings("NullAway") + private AdaptingIntegerArray(AdaptingIntegerArray toCopy) { + this.cellSize = toCopy.cellSize; + switch (cellSize) { + case BYTE: + this.byteBacking = Arrays.copyOf(toCopy.byteBacking, toCopy.byteBacking.length); + break; + case SHORT: + this.shortBacking = Arrays.copyOf(toCopy.shortBacking, toCopy.shortBacking.length); + break; + case INT: + this.intBacking = Arrays.copyOf(toCopy.intBacking, toCopy.intBacking.length); + break; + case LONG: + this.longBacking = Arrays.copyOf(toCopy.longBacking, toCopy.longBacking.length); + break; + } + } + + /** Returns a deep-copy of this array, preserving cell size. */ + public AdaptingIntegerArray copy() { + return new AdaptingIntegerArray(this); + } + + /** Add {@code count} to the value stored at {@code index}. */ + @SuppressWarnings("NullAway") + public void increment(int idx, long count) { + // TODO - prevent bad index + long result; + switch (cellSize) { + case BYTE: + result = byteBacking[idx] + count; + if (result > Byte.MAX_VALUE) { + // Resize + add + resizeToShort(); + increment(idx, count); + return; + } + byteBacking[idx] = (byte) result; + return; + case SHORT: + result = shortBacking[idx] + count; + if (result > Short.MAX_VALUE) { + resizeToInt(); + increment(idx, count); + return; + } + shortBacking[idx] = (short) result; + return; + case INT: + result = intBacking[idx] + count; + if (result > Integer.MAX_VALUE) { + resizeToLong(); + increment(idx, count); + return; + } + intBacking[idx] = (int) result; + return; + case LONG: + longBacking[idx] = longBacking[idx] + count; + return; + } + } + + /** Grab the value stored at {@code index}. */ + @SuppressWarnings("NullAway") + public long get(int index) { + long value = 0; + switch (this.cellSize) { + case BYTE: + value = this.byteBacking[index]; + break; + case SHORT: + value = this.shortBacking[index]; + break; + case INT: + value = this.intBacking[index]; + break; + case LONG: + value = this.longBacking[index]; + break; + } + return value; + } + + /** Return the length of this integer array. */ + @SuppressWarnings("NullAway") + public int length() { + int length = 0; + switch (this.cellSize) { + case BYTE: + length = this.byteBacking.length; + break; + case SHORT: + length = this.shortBacking.length; + break; + case INT: + length = this.intBacking.length; + break; + case LONG: + length = this.longBacking.length; + break; + } + return length; + } + /** Zeroes out all counts in this array. */ + @SuppressWarnings("NullAway") + public void clear() { + switch (this.cellSize) { + case BYTE: + Arrays.fill(this.byteBacking, (byte) 0); + break; + case SHORT: + Arrays.fill(this.shortBacking, (short) 0); + break; + case INT: + Arrays.fill(this.intBacking, 0); + break; + case LONG: + Arrays.fill(this.longBacking, 0L); + break; + } + } + + /** Convert from byte => short backing array. */ + @SuppressWarnings("NullAway") + private void resizeToShort() { + short[] shortBacking = new short[this.byteBacking.length]; + for (int i = 0; i < this.byteBacking.length; i++) { + shortBacking[i] = this.byteBacking[i]; + } + this.cellSize = ArrayCellSize.SHORT; + this.shortBacking = shortBacking; + this.byteBacking = null; + } + + /** Convert from short => int backing array. */ + @SuppressWarnings("NullAway") + private void resizeToInt() { + int[] intBacking = new int[this.shortBacking.length]; + for (int i = 0; i < this.shortBacking.length; i++) { + intBacking[i] = this.shortBacking[i]; + } + this.cellSize = ArrayCellSize.INT; + this.intBacking = intBacking; + this.shortBacking = null; + } + /** convert from int => long backing array. */ + @SuppressWarnings("NullAway") + private void resizeToLong() { + long[] longBacking = new long[this.intBacking.length]; + for (int i = 0; i < this.intBacking.length; i++) { + longBacking[i] = this.intBacking[i]; + } + this.cellSize = ArrayCellSize.LONG; + this.longBacking = longBacking; + this.intBacking = null; + } +} diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/ExponentialCounter.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/ExponentialCounter.java index 84c424c554f..4b89f53ef2d 100644 --- a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/ExponentialCounter.java +++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/ExponentialCounter.java @@ -15,6 +15,8 @@ public interface ExponentialCounter { /** * The first index with a recording. May be negative. * + *

Note: the returned value is not meaningful when isEmpty returns true. + * * @return the first index with a recording. */ int getIndexStart(); @@ -22,6 +24,8 @@ public interface ExponentialCounter { /** * The last index with a recording. May be negative. * + *

Note: the returned value is not meaningful when isEmpty returns true. + * * @return The last index with a recording. */ int getIndexEnd(); @@ -38,7 +42,7 @@ public interface ExponentialCounter { /** * Get the number of recordings for the given index. * - * @return the number of recordings for the index. + * @return the number of recordings for the index, or 0 if the index is out of bounds. */ long get(int index); @@ -48,4 +52,10 @@ public interface ExponentialCounter { * @return true if no recordings, false if at least one recording. */ boolean isEmpty(); + + /** Returns the maximum number of buckets allowed in this counter. */ + int getMaxSize(); + + /** Resets all bucket counts to zero and resets index start/end tracking. */ + void clear(); } diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/ExponentialCounterFactory.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/ExponentialCounterFactory.java new file mode 100644 index 00000000000..9c649eed748 --- /dev/null +++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/ExponentialCounterFactory.java @@ -0,0 +1,64 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.sdk.metrics.internal.state; + +/** + * Interface for constructing backing data structure for exponential histogram buckets. + * + *

This class is internal and is hence not for public use. Its APIs are unstable and can change + * at any time. + */ +public interface ExponentialCounterFactory { + /** + * Constructs a new {@link io.opentelemetry.sdk.metrics.internal.state.ExponentialCounter} with + * maximum bucket size. + * + * @param maxSize The maximum number of buckets allowed in the counter. + */ + ExponentialCounter newCounter(int maxSize); + + /** Returns a deep-copy of an ExponentialCounter. */ + ExponentialCounter copy(ExponentialCounter other); + + /** Constructs exponential counters using {@link MapCounter}. */ + static ExponentialCounterFactory mapCounter() { + return new ExponentialCounterFactory() { + @Override + public ExponentialCounter newCounter(int maxSize) { + return new MapCounter(maxSize); + } + + @Override + public ExponentialCounter copy(ExponentialCounter other) { + return new MapCounter(other); + } + + @Override + public String toString() { + return "mapCounter"; + } + }; + } + /** Constructs exponential counters using {@link AdaptingCircularBufferCounter}. */ + static ExponentialCounterFactory circularBufferCounter() { + return new ExponentialCounterFactory() { + @Override + public ExponentialCounter newCounter(int maxSize) { + return new AdaptingCircularBufferCounter(maxSize); + } + + @Override + public ExponentialCounter copy(ExponentialCounter other) { + return new AdaptingCircularBufferCounter(other); + } + + @Override + public String toString() { + return "circularBufferCounter"; + } + }; + } +} diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/MapCounter.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/MapCounter.java index 149f8f2db2c..eb1575e14b1 100644 --- a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/MapCounter.java +++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/MapCounter.java @@ -18,18 +18,17 @@ * at any time */ public class MapCounter implements ExponentialCounter { - - public static final int MAX_SIZE = 320; - private static final int NULL_INDEX = Integer.MIN_VALUE; + private final int maxSize; private final Map backing; private int indexStart; private int indexEnd; /** Instantiate a MapCounter. */ - public MapCounter() { - this.backing = new ConcurrentHashMap<>((int) Math.ceil(MAX_SIZE / 0.75) + 1); + public MapCounter(int maxSize) { + this.maxSize = maxSize; + this.backing = new ConcurrentHashMap<>((int) Math.ceil(maxSize / 0.75) + 1); this.indexEnd = NULL_INDEX; this.indexStart = NULL_INDEX; } @@ -40,7 +39,8 @@ public MapCounter() { * @param otherCounter another exponential counter to make a deep copy of. */ public MapCounter(ExponentialCounter otherCounter) { - this.backing = new ConcurrentHashMap<>((int) Math.ceil(MAX_SIZE / 0.75) + 1); + this.maxSize = otherCounter.getMaxSize(); + this.backing = new ConcurrentHashMap<>((int) Math.ceil(maxSize / 0.75) + 1); this.indexStart = otherCounter.getIndexStart(); this.indexEnd = otherCounter.getIndexEnd(); @@ -74,12 +74,12 @@ public boolean increment(int index, long delta) { // Extend window if possible. if it would exceed maxSize, then return false. if (index > indexEnd) { - if (index - indexStart + 1 > MAX_SIZE) { + if (index - indexStart + 1 > maxSize) { return false; } indexEnd = index; } else if (index < indexStart) { - if (indexEnd - index + 1 > MAX_SIZE) { + if (indexEnd - index + 1 > maxSize) { return false; } indexStart = index; @@ -92,7 +92,7 @@ public boolean increment(int index, long delta) { @Override public long get(int index) { if (index < indexStart || index > indexEnd) { - throw new IndexOutOfBoundsException(String.format("Index %d out of range.", index)); + return 0; } AtomicLong result = backing.get(index); if (result == null) { @@ -106,6 +106,18 @@ public boolean isEmpty() { return backing.isEmpty(); } + @Override + public int getMaxSize() { + return maxSize; + } + + @Override + public void clear() { + this.backing.clear(); + this.indexStart = NULL_INDEX; + this.indexEnd = NULL_INDEX; + } + private synchronized void doIncrement(int index, long delta) { long prevValue = backing.computeIfAbsent(index, k -> new AtomicLong(0)).getAndAdd(delta); diff --git a/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/internal/aggregator/AggregatorHandleTest.java b/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/internal/aggregator/AggregatorHandleTest.java index 8617f601627..a0b5eadb4cf 100644 --- a/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/internal/aggregator/AggregatorHandleTest.java +++ b/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/internal/aggregator/AggregatorHandleTest.java @@ -25,7 +25,7 @@ import org.mockito.junit.jupiter.MockitoExtension; @ExtendWith(MockitoExtension.class) -public class AggregatorHandleTest { +class AggregatorHandleTest { @Mock ExemplarReservoir reservoir; diff --git a/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/internal/aggregator/DoubleExponentialHistogramAggregatorTest.java b/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/internal/aggregator/DoubleExponentialHistogramAggregatorTest.java index 9f9741de5d5..0d2358a8606 100644 --- a/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/internal/aggregator/DoubleExponentialHistogramAggregatorTest.java +++ b/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/internal/aggregator/DoubleExponentialHistogramAggregatorTest.java @@ -37,7 +37,7 @@ import org.mockito.junit.jupiter.MockitoExtension; @ExtendWith(MockitoExtension.class) -public class DoubleExponentialHistogramAggregatorTest { +class DoubleExponentialHistogramAggregatorTest { @Mock ExemplarReservoir reservoir; @@ -178,6 +178,9 @@ void diffAccumulation() { getTestAccumulation(previousExemplars, 0, 1, -1); // Assure most recent exemplars are kept + // Note: This test relies on implementation details of ExponentialCounter, specifically it + // assumes that an Array of all zeros is the same as an empty counter array for negative + // buckets. assertThat(aggregator.diff(previousAccumulation, nextAccumulation)) .isEqualTo(getTestAccumulation(exemplars, 0, 1)); } diff --git a/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/internal/aggregator/DoubleExponentialHistogramBucketsTest.java b/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/internal/aggregator/DoubleExponentialHistogramBucketsTest.java index 2f9c73e3c85..68454206175 100644 --- a/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/internal/aggregator/DoubleExponentialHistogramBucketsTest.java +++ b/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/internal/aggregator/DoubleExponentialHistogramBucketsTest.java @@ -10,36 +10,49 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; +import io.opentelemetry.sdk.metrics.internal.state.ExponentialCounterFactory; import java.util.Arrays; import java.util.Collections; -import org.junit.jupiter.api.Test; +import java.util.stream.Stream; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; /** * These are extra test cases for buckets. Much of this class is already tested via more complex * test cases at {@link DoubleExponentialHistogramAggregatorTest}. */ -public class DoubleExponentialHistogramBucketsTest { +class DoubleExponentialHistogramBucketsTest { - @Test - void testRecordSimple() { + static Stream bucketStrategies() { + return Stream.of( + ExponentialBucketStrategy.newStrategy(20, 320, ExponentialCounterFactory.mapCounter()), + ExponentialBucketStrategy.newStrategy( + 20, 320, ExponentialCounterFactory.circularBufferCounter())); + } + + @ParameterizedTest + @MethodSource("bucketStrategies") + void testRecordSimple(ExponentialBucketStrategy buckets) { // Can only effectively test recording of one value here due to downscaling required. // More complex recording/downscaling operations are tested in the aggregator. - DoubleExponentialHistogramBuckets b = new DoubleExponentialHistogramBuckets(); + DoubleExponentialHistogramBuckets b = buckets.newBuckets(); b.record(1); b.record(1); b.record(1); assertThat(b).hasTotalCount(3).hasCounts(Collections.singletonList(3L)); } - @Test - void testRecordShouldError() { - DoubleExponentialHistogramBuckets b = new DoubleExponentialHistogramBuckets(); + @ParameterizedTest + @MethodSource("bucketStrategies") + void testRecordShouldError(ExponentialBucketStrategy buckets) { + DoubleExponentialHistogramBuckets b = buckets.newBuckets(); assertThatThrownBy(() -> b.record(0)).isInstanceOf(IllegalStateException.class); } - @Test - void testDownscale() { - DoubleExponentialHistogramBuckets b = new DoubleExponentialHistogramBuckets(); + @ParameterizedTest + @MethodSource("bucketStrategies") + void testDownscale(ExponentialBucketStrategy buckets) { + DoubleExponentialHistogramBuckets b = buckets.newBuckets(); b.downscale(20); // scale of zero is easy to reason with without a calculator b.record(1); b.record(2); @@ -48,38 +61,64 @@ void testDownscale() { assertThat(b).hasTotalCount(3).hasCounts(Arrays.asList(1L, 1L, 1L)).hasOffset(0); } - @Test - void testDownscaleShouldError() { - DoubleExponentialHistogramBuckets b = new DoubleExponentialHistogramBuckets(); + @ParameterizedTest + @MethodSource("bucketStrategies") + void testDownscaleShouldError(ExponentialBucketStrategy buckets) { + DoubleExponentialHistogramBuckets b = buckets.newBuckets(); assertThatThrownBy(() -> b.downscale(-1)).isInstanceOf(IllegalStateException.class); } - @Test - void testEqualsAndHashCode() { - DoubleExponentialHistogramBuckets a = new DoubleExponentialHistogramBuckets(); - DoubleExponentialHistogramBuckets b = new DoubleExponentialHistogramBuckets(); + @ParameterizedTest + @MethodSource("bucketStrategies") + void testEqualsAndHashCode(ExponentialBucketStrategy buckets) { + DoubleExponentialHistogramBuckets a = buckets.newBuckets(); + DoubleExponentialHistogramBuckets b = buckets.newBuckets(); - assertNotEquals(a, null); + assertThat(a).isNotNull(); assertEquals(a, b); assertEquals(b, a); - assertEquals(a.hashCode(), b.hashCode()); + assertThat(a).hasSameHashCodeAs(b); a.record(1); assertNotEquals(a, b); assertNotEquals(b, a); - assertNotEquals(a.hashCode(), b.hashCode()); + assertThat(a).doesNotHaveSameHashCodeAs(b); b.record(1); assertEquals(a, b); assertEquals(b, a); - assertEquals(a.hashCode(), b.hashCode()); + assertThat(a).hasSameHashCodeAs(b); + + // Now we start to play with altering offset, but having same effective counts. + DoubleExponentialHistogramBuckets empty = buckets.newBuckets(); + empty.downscale(20); + DoubleExponentialHistogramBuckets c = buckets.newBuckets(); + c.downscale(20); + assertThat(c.record(1)).isTrue(); + // Record can fail if scale is not set correctly. + assertThat(c.record(3)).isTrue(); + assertThat(c.getTotalCount()).isEqualTo(2); + + DoubleExponentialHistogramBuckets resultCc = DoubleExponentialHistogramBuckets.diff(c, c); + assertThat(c).isNotEqualTo(resultCc); + assertEquals(resultCc, empty); + assertThat(resultCc).hasSameHashCodeAs(empty); + + DoubleExponentialHistogramBuckets d = buckets.newBuckets(); + d.record(1); + // Downscale d to be the same as C but do NOT record the value 3. + d.downscale(20); + DoubleExponentialHistogramBuckets resultCd = DoubleExponentialHistogramBuckets.diff(c, d); + assertThat(c).isNotEqualTo(d); + assertThat(resultCd).isNotEqualTo(empty); } - @Test - void testToString() { + @ParameterizedTest + @MethodSource("bucketStrategies") + void testToString(ExponentialBucketStrategy buckets) { // Note this test may break once difference implementations for counts are developed since // the counts may have different toStrings(). - DoubleExponentialHistogramBuckets b = new DoubleExponentialHistogramBuckets(); + DoubleExponentialHistogramBuckets b = buckets.newBuckets(); b.record(1); assertThat(b.toString()) .isEqualTo("DoubleExponentialHistogramBuckets{scale: 20, offset: 0, counts: {0=1} }"); diff --git a/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/internal/aggregator/DoubleHistogramAggregatorTest.java b/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/internal/aggregator/DoubleHistogramAggregatorTest.java index 15ba146260e..723cf13c01f 100644 --- a/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/internal/aggregator/DoubleHistogramAggregatorTest.java +++ b/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/internal/aggregator/DoubleHistogramAggregatorTest.java @@ -33,7 +33,7 @@ import org.mockito.junit.jupiter.MockitoExtension; @ExtendWith(MockitoExtension.class) -public class DoubleHistogramAggregatorTest { +class DoubleHistogramAggregatorTest { @Mock ExemplarReservoir reservoir; diff --git a/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/internal/state/AdaptingIntegerArrayTest.java b/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/internal/state/AdaptingIntegerArrayTest.java new file mode 100644 index 00000000000..f168f2abaae --- /dev/null +++ b/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/internal/state/AdaptingIntegerArrayTest.java @@ -0,0 +1,85 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.sdk.metrics.internal.state; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.stream.Stream; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +class AdaptingIntegerArrayTest { + + // Set of values that require specific sized arrays to hold them. + static Stream interestingValues() { + return Stream.of( + 1L, // Fits in byte + Byte.MAX_VALUE + 1L, // Fits in Short + Short.MAX_VALUE + 1L, // First in Integer + Integer.MAX_VALUE + 1L // First in Long + ); + } + + @ParameterizedTest + @MethodSource("interestingValues") + void testSize(long value) { + AdaptingIntegerArray counter = new AdaptingIntegerArray(10); + assertThat(counter.length()).isEqualTo(10); + // Force array to change size, make sure size is the same. + counter.increment(0, value); + assertThat(counter.length()).isEqualTo(10); + } + + @ParameterizedTest + @MethodSource("interestingValues") + void testIncrementAndGet(long value) { + AdaptingIntegerArray counter = new AdaptingIntegerArray(10); + for (int idx = 0; idx < 10; idx += 1) { + assertThat(counter.get(idx)).isEqualTo(0); + counter.increment(idx, 1); + assertThat(counter.get(idx)).isEqualTo(1); + counter.increment(idx, value); + assertThat(counter.get(idx)).isEqualTo(value + 1); + } + } + + @Test + void testHandlesLongValues() { + AdaptingIntegerArray counter = new AdaptingIntegerArray(1); + assertThat(counter.get(0)).isEqualTo(0); + long expected = Integer.MAX_VALUE + 1L; + counter.increment(0, expected); + assertThat(counter.get(0)).isEqualTo(expected); + } + + @ParameterizedTest + @MethodSource("interestingValues") + void testCopy(long value) { + AdaptingIntegerArray counter = new AdaptingIntegerArray(1); + counter.increment(0, value); + assertThat(counter.get(0)).isEqualTo(value); + + AdaptingIntegerArray copy = counter.copy(); + assertThat(copy.get(0)).isEqualTo(value); + + counter.increment(0, 1); + assertThat(counter.get(0)).isEqualTo(value + 1); + assertThat(copy.get(0)).isEqualTo(value); + } + + @ParameterizedTest + @MethodSource("interestingValues") + void testClear(long value) { + AdaptingIntegerArray counter = new AdaptingIntegerArray(1); + counter.increment(0, value); + assertThat(counter.get(0)).isEqualTo(value); + + counter.clear(); + counter.increment(0, 1); + assertThat(counter.get(0)).isEqualTo(1); + } +} diff --git a/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/internal/state/ExponentialCounterTest.java b/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/internal/state/ExponentialCounterTest.java new file mode 100644 index 00000000000..618b43a392e --- /dev/null +++ b/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/internal/state/ExponentialCounterTest.java @@ -0,0 +1,127 @@ +/* + * Copyright The OpenTelemetry Authors + * SPDX-License-Identifier: Apache-2.0 + */ + +package io.opentelemetry.sdk.metrics.internal.state; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.stream.Stream; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +public class ExponentialCounterTest { + + static Stream counterProviders() { + return Stream.of( + ExponentialCounterFactory.mapCounter(), ExponentialCounterFactory.circularBufferCounter()); + } + + @ParameterizedTest + @MethodSource("counterProviders") + void returnsZeroOutsidePopulatedRange(ExponentialCounterFactory counterFactory) { + ExponentialCounter counter = counterFactory.newCounter(10); + assertThat(counter.get(0)).isEqualTo(0); + assertThat(counter.get(100)).isEqualTo(0); + counter.increment(2, 1); + counter.increment(99, 1); + assertThat(counter.get(0)).isEqualTo(0); + assertThat(counter.get(100)).isEqualTo(0); + } + + @ParameterizedTest + @MethodSource("counterProviders") + void expandLower(ExponentialCounterFactory counterFactory) { + ExponentialCounter counter = counterFactory.newCounter(320); + assertThat(counter.increment(10, 1)).isTrue(); + // Add BEFORE the initial see (array index 0) and make sure we wrap around the datastructure. + assertThat(counter.increment(0, 1)).isTrue(); + assertThat(counter.get(10)).isEqualTo(1); + assertThat(counter.get(0)).isEqualTo(1); + assertThat(counter.getIndexStart()).as("index start").isEqualTo(0); + assertThat(counter.getIndexEnd()).as("index end").isEqualTo(10); + // Add AFTER initial entry and just push back end. + assertThat(counter.increment(20, 1)).isTrue(); + assertThat(counter.get(20)).isEqualTo(1); + assertThat(counter.get(10)).isEqualTo(1); + assertThat(counter.get(0)).isEqualTo(1); + assertThat(counter.getIndexStart()).isEqualTo(0); + assertThat(counter.getIndexEnd()).isEqualTo(20); + } + + @ParameterizedTest + @MethodSource("counterProviders") + void shouldFailAtLimit(ExponentialCounterFactory counterFactory) { + ExponentialCounter counter = counterFactory.newCounter(320); + assertThat(counter.increment(0, 1)).isTrue(); + assertThat(counter.increment(319, 1)).isTrue(); + // Check state + assertThat(counter.getIndexStart()).as("index start").isEqualTo(0); + assertThat(counter.getIndexEnd()).as("index start").isEqualTo(319); + assertThat(counter.get(0)).as("counter[0]").isEqualTo(1); + assertThat(counter.get(319)).as("counter[319]").isEqualTo(1); + // Adding over the maximum # of buckets + assertThat(counter.increment(3000, 1)).isFalse(); + } + + @ParameterizedTest + @MethodSource("counterProviders") + void shouldCopyCounters(ExponentialCounterFactory counterFactory) { + ExponentialCounter counter = counterFactory.newCounter(2); + assertThat(counter.increment(2, 1)).isTrue(); + assertThat(counter.increment(1, 1)).isTrue(); + assertThat(counter.increment(3, 1)).isFalse(); + + ExponentialCounter copy = counterFactory.copy(counter); + assertThat(counter.get(2)).as("counter[2]").isEqualTo(1); + assertThat(copy.get(2)).as("copy[2]").isEqualTo(1); + assertThat(copy.getMaxSize()).isEqualTo(counter.getMaxSize()); + assertThat(copy.getIndexStart()).isEqualTo(counter.getIndexStart()); + assertThat(copy.getIndexEnd()).isEqualTo(counter.getIndexEnd()); + // Mutate copy and make sure original is unchanged. + assertThat(copy.increment(2, 1)).isTrue(); + assertThat(copy.get(2)).as("copy[2]").isEqualTo(2); + assertThat(counter.get(2)).as("counter[2]").isEqualTo(1); + } + + @ParameterizedTest + @MethodSource("counterProviders") + void shouldCopyMapCounters(ExponentialCounterFactory counterFactory) { + ExponentialCounter counter = ExponentialCounterFactory.mapCounter().newCounter(2); + assertThat(counter.increment(2, 1)).isTrue(); + assertThat(counter.increment(1, 1)).isTrue(); + assertThat(counter.increment(3, 1)).isFalse(); + + ExponentialCounter copy = counterFactory.copy(counter); + assertThat(counter.get(2)).as("counter[2]").isEqualTo(1); + assertThat(copy.get(2)).as("copy[2]").isEqualTo(1); + assertThat(copy.getMaxSize()).isEqualTo(counter.getMaxSize()); + assertThat(copy.getIndexStart()).isEqualTo(counter.getIndexStart()); + assertThat(copy.getIndexEnd()).isEqualTo(counter.getIndexEnd()); + // Mutate copy and make sure original is unchanged. + assertThat(copy.increment(2, 1)).isTrue(); + assertThat(copy.get(2)).as("copy[2]").isEqualTo(2); + assertThat(counter.get(2)).as("counter[2]").isEqualTo(1); + } + + @ParameterizedTest + @MethodSource("counterProviders") + void shouldCopyCircularBufferCounters(ExponentialCounterFactory counterFactory) { + ExponentialCounter counter = ExponentialCounterFactory.circularBufferCounter().newCounter(2); + assertThat(counter.increment(2, 1)).isTrue(); + assertThat(counter.increment(1, 1)).isTrue(); + assertThat(counter.increment(3, 1)).isFalse(); + + ExponentialCounter copy = counterFactory.copy(counter); + assertThat(counter.get(2)).as("counter[2]").isEqualTo(1); + assertThat(copy.get(2)).as("copy[2]").isEqualTo(1); + assertThat(copy.getMaxSize()).isEqualTo(counter.getMaxSize()); + assertThat(copy.getIndexStart()).isEqualTo(counter.getIndexStart()); + assertThat(copy.getIndexEnd()).isEqualTo(counter.getIndexEnd()); + // Mutate copy and make sure original is unchanged. + assertThat(copy.increment(2, 1)).isTrue(); + assertThat(copy.get(2)).as("copy[2]").isEqualTo(2); + assertThat(counter.get(2)).as("counter[2]").isEqualTo(1); + } +}