From cca98af45619d31795b7b3e0f66872e20660a9cc Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Tue, 30 Apr 2024 18:50:41 +0200 Subject: [PATCH] Optimise terms aggregations for single value fields (#107930) this commit optimise terms aggregations for single value fields. --- docs/changelog/107930.yaml | 5 + .../countedterms/CountedTermsAggregator.java | 44 ++++++--- .../bucket/prefix/IpPrefixAggregator.java | 94 ++++++++++--------- .../bucket/terms/LongRareTermsAggregator.java | 62 +++++++----- .../terms/MapStringTermsAggregator.java | 36 ++++++- .../bucket/terms/NumericTermsAggregator.java | 51 ++++++---- .../terms/StringRareTermsAggregator.java | 65 ++++++++----- 7 files changed, 242 insertions(+), 115 deletions(-) create mode 100644 docs/changelog/107930.yaml diff --git a/docs/changelog/107930.yaml b/docs/changelog/107930.yaml new file mode 100644 index 0000000000000..90af5c55b8604 --- /dev/null +++ b/docs/changelog/107930.yaml @@ -0,0 +1,5 @@ +pr: 107930 +summary: Optimise terms aggregations for single value fields +area: Aggregations +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregator.java index ba59026fbc12a..736b1c0c0c249 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregator.java @@ -8,6 +8,8 @@ package org.elasticsearch.search.aggregations.bucket.countedterms; +import org.apache.lucene.index.DocValues; +import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.util.BytesRef; import org.elasticsearch.core.Releasables; @@ -63,27 +65,47 @@ class CountedTermsAggregator extends TermsAggregator { @Override public LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, LeafBucketCollector sub) throws IOException { - SortedSetDocValues ords = valuesSource.ordinalsValues(aggCtx.getLeafReaderContext()); + final SortedSetDocValues ords = valuesSource.ordinalsValues(aggCtx.getLeafReaderContext()); + final SortedDocValues singleton = DocValues.unwrapSingleton(ords); + return singleton != null ? getLeafCollector(singleton, sub) : getLeafCollector(ords, sub); + } + + private LeafBucketCollector getLeafCollector(SortedSetDocValues ords, LeafBucketCollector sub) { return new LeafBucketCollectorBase(sub, ords) { @Override public void collect(int doc, long owningBucketOrd) throws IOException { - if (ords.advanceExact(doc) == false) { - return; - } - for (long ord = ords.nextOrd(); ord != NO_MORE_ORDS; ord = ords.nextOrd()) { - long bucketOrdinal = bucketOrds.add(owningBucketOrd, ords.lookupOrd(ord)); - if (bucketOrdinal < 0) { // already seen - bucketOrdinal = -1 - bucketOrdinal; - collectExistingBucket(sub, doc, bucketOrdinal); - } else { - collectBucket(sub, doc, bucketOrdinal); + if (ords.advanceExact(doc)) { + for (long ord = ords.nextOrd(); ord != NO_MORE_ORDS; ord = ords.nextOrd()) { + collectOrdinal(bucketOrds.add(owningBucketOrd, ords.lookupOrd(ord)), doc, sub); } } } }; } + private LeafBucketCollector getLeafCollector(SortedDocValues ords, LeafBucketCollector sub) { + return new LeafBucketCollectorBase(sub, ords) { + + @Override + public void collect(int doc, long owningBucketOrd) throws IOException { + if (ords.advanceExact(doc)) { + collectOrdinal(bucketOrds.add(owningBucketOrd, ords.lookupOrd(ords.ordValue())), doc, sub); + } + + } + }; + } + + private void collectOrdinal(long bucketOrdinal, int doc, LeafBucketCollector sub) throws IOException { + if (bucketOrdinal < 0) { // already seen + bucketOrdinal = -1 - bucketOrdinal; + collectExistingBucket(sub, doc, bucketOrdinal); + } else { + collectBucket(sub, doc, bucketOrdinal); + } + } + @Override public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { StringTerms.Bucket[][] topBucketsPerOrd = new StringTerms.Bucket[owningBucketOrds.length][]; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregator.java index ec95052f5c3f5..9b3d141c9c332 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregator.java @@ -8,9 +8,11 @@ package org.elasticsearch.search.aggregations.bucket.prefix; +import org.apache.lucene.index.BinaryDocValues; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.core.Releasables; +import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.search.aggregations.AggregationErrors; import org.elasticsearch.search.aggregations.AggregationExecutionContext; @@ -97,56 +99,62 @@ public IpPrefixAggregator( @Override protected LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, LeafBucketCollector sub) throws IOException { - return new IpPrefixLeafCollector(sub, config.getValuesSource().bytesValues(aggCtx.getLeafReaderContext()), ipPrefix); + final SortedBinaryDocValues values = config.getValuesSource().bytesValues(aggCtx.getLeafReaderContext()); + final BinaryDocValues singleton = FieldData.unwrapSingleton(values); + return singleton != null ? getLeafCollector(singleton, sub) : getLeafCollector(values, sub); } - private class IpPrefixLeafCollector extends LeafBucketCollectorBase { - private final IpPrefix ipPrefix; - private final LeafBucketCollector sub; - private final SortedBinaryDocValues values; - - IpPrefixLeafCollector(final LeafBucketCollector sub, final SortedBinaryDocValues values, final IpPrefix ipPrefix) { - super(sub, values); - this.sub = sub; - this.values = values; - this.ipPrefix = ipPrefix; - } - - @Override - public void collect(int doc, long owningBucketOrd) throws IOException { - BytesRef previousSubnet = null; - BytesRef subnet = new BytesRef(new byte[ipPrefix.netmask.length]); - BytesRef ipAddress; - if (values.advanceExact(doc)) { - int valuesCount = values.docValueCount(); - - for (int i = 0; i < valuesCount; ++i) { - ipAddress = values.nextValue(); - maskIpAddress(ipAddress, ipPrefix.netmask, subnet); - if (previousSubnet != null && subnet.bytesEquals(previousSubnet)) { - continue; + private LeafBucketCollector getLeafCollector(SortedBinaryDocValues values, LeafBucketCollector sub) { + + return new LeafBucketCollectorBase(sub, values) { + @Override + public void collect(int doc, long owningBucketOrd) throws IOException { + if (values.advanceExact(doc)) { + BytesRef previousSubnet = null; + for (int i = 0; i < values.docValueCount(); ++i) { + final BytesRef subnet = new BytesRef(new byte[ipPrefix.netmask.length]); + maskIpAddress(values.nextValue(), ipPrefix.netmask, subnet); + if (previousSubnet != null && subnet.bytesEquals(previousSubnet)) { + continue; + } + addBucketOrd(bucketOrds.add(owningBucketOrd, subnet), doc, sub); + previousSubnet = subnet; } - long bucketOrd = bucketOrds.add(owningBucketOrd, subnet); - if (bucketOrd < 0) { - bucketOrd = -1 - bucketOrd; - collectExistingBucket(sub, doc, bucketOrd); - } else { - collectBucket(sub, doc, bucketOrd); - } - previousSubnet = subnet; } } - } + }; + } - private static void maskIpAddress(final BytesRef ipAddress, final BytesRef subnetMask, final BytesRef subnet) { - assert ipAddress.length == 16 : "Invalid length for ip address [" + ipAddress.length + "] expected 16 bytes"; - // NOTE: IPv4 addresses are encoded as 16-bytes. As a result, we use an - // offset (12) to apply the subnet to the last 4 bytes (byes 12, 13, 14, 15) - // if the subnet mask is just a 4-bytes subnet mask. - int offset = subnetMask.length == 4 ? 12 : 0; - for (int i = 0; i < subnetMask.length; ++i) { - subnet.bytes[i] = (byte) (ipAddress.bytes[i + offset] & subnetMask.bytes[i]); + private LeafBucketCollector getLeafCollector(BinaryDocValues values, LeafBucketCollector sub) { + final BytesRef subnet = new BytesRef(new byte[ipPrefix.netmask.length]); + return new LeafBucketCollectorBase(sub, values) { + @Override + public void collect(int doc, long owningBucketOrd) throws IOException { + if (values.advanceExact(doc)) { + maskIpAddress(values.binaryValue(), ipPrefix.netmask, subnet); + addBucketOrd(bucketOrds.add(owningBucketOrd, subnet), doc, sub); + } } + }; + } + + private void addBucketOrd(long bucketOrd, int doc, LeafBucketCollector sub) throws IOException { + if (bucketOrd < 0) { + bucketOrd = -1 - bucketOrd; + collectExistingBucket(sub, doc, bucketOrd); + } else { + collectBucket(sub, doc, bucketOrd); + } + } + + private static void maskIpAddress(final BytesRef ipAddress, final BytesRef subnetMask, final BytesRef subnet) { + assert ipAddress.length == 16 : "Invalid length for ip address [" + ipAddress.length + "] expected 16 bytes"; + // NOTE: IPv4 addresses are encoded as 16-bytes. As a result, we use an + // offset (12) to apply the subnet to the last 4 bytes (byes 12, 13, 14, 15) + // if the subnet mask is just a 4-bytes subnet mask. + int offset = subnetMask.length == 4 ? 12 : 0; + for (int i = 0; i < subnetMask.length; ++i) { + subnet.bytes[i] = (byte) (ipAddress.bytes[i + offset] & subnetMask.bytes[i]); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongRareTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongRareTermsAggregator.java index 9bbc3809c0f6d..4e5c0e344420d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongRareTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongRareTermsAggregator.java @@ -7,7 +7,9 @@ */ package org.elasticsearch.search.aggregations.bucket.terms; +import org.apache.lucene.index.DocValues; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.SortedNumericDocValues; import org.elasticsearch.common.util.LongHash; import org.elasticsearch.common.util.SetBackedScalingCuckooFilter; @@ -66,36 +68,54 @@ protected static SortedNumericDocValues getValues(ValuesSource.Numeric valuesSou @Override public LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, LeafBucketCollector sub) throws IOException { - SortedNumericDocValues values = getValues(valuesSource, aggCtx.getLeafReaderContext()); + final SortedNumericDocValues values = getValues(valuesSource, aggCtx.getLeafReaderContext()); + final NumericDocValues singleton = DocValues.unwrapSingleton(values); + return singleton != null ? getLeafCollector(singleton, sub) : getLeafCollector(values, sub); + } + + private LeafBucketCollector getLeafCollector(SortedNumericDocValues values, LeafBucketCollector sub) { return new LeafBucketCollectorBase(sub, values) { @Override public void collect(int docId, long owningBucketOrd) throws IOException { - if (false == values.advanceExact(docId)) { - return; - } - int valuesCount = values.docValueCount(); - long previous = Long.MAX_VALUE; - for (int i = 0; i < valuesCount; ++i) { - long val = values.nextValue(); - if (i == 0 && previous == val) { - continue; - } - previous = val; - if (filter != null && false == filter.accept(val)) { - continue; - } - long bucketOrdinal = bucketOrds.add(owningBucketOrd, val); - if (bucketOrdinal < 0) { // already seen - bucketOrdinal = -1 - bucketOrdinal; - collectExistingBucket(sub, docId, bucketOrdinal); - } else { - collectBucket(sub, docId, bucketOrdinal); + if (values.advanceExact(docId)) { + long previous = Long.MAX_VALUE; + for (int i = 0; i < values.docValueCount(); ++i) { + long val = values.nextValue(); + if (i == 0 && previous == val) { + continue; + } + collectValue(val, docId, owningBucketOrd, sub); + previous = val; } } } }; } + private LeafBucketCollector getLeafCollector(NumericDocValues values, LeafBucketCollector sub) { + return new LeafBucketCollectorBase(sub, values) { + @Override + public void collect(int docId, long owningBucketOrd) throws IOException { + if (values.advanceExact(docId)) { + collectValue(values.longValue(), docId, owningBucketOrd, sub); + } + } + }; + } + + private void collectValue(long val, int docId, long owningBucketOrd, LeafBucketCollector sub) throws IOException { + if (filter == null || filter.accept(val)) { + long bucketOrdinal = bucketOrds.add(owningBucketOrd, val); + if (bucketOrdinal < 0) { // already seen + bucketOrdinal = -1 - bucketOrdinal; + collectExistingBucket(sub, docId, bucketOrdinal); + } else { + collectBucket(sub, docId, bucketOrdinal); + } + } + + } + @Override public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { /* diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java index 66ecdeb1a87bd..9cea884667325 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java @@ -7,6 +7,7 @@ */ package org.elasticsearch.search.aggregations.bucket.terms; +import org.apache.lucene.index.BinaryDocValues; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.BytesRef; @@ -16,6 +17,7 @@ import org.elasticsearch.common.util.ObjectArrayPriorityQueue; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; +import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregationExecutionContext; @@ -204,7 +206,19 @@ public LeafBucketCollector getLeafCollector( LongConsumer addRequestCircuitBreakerBytes, CollectConsumer consumer ) throws IOException { - SortedBinaryDocValues values = valuesSourceConfig.getValuesSource().bytesValues(ctx); + final SortedBinaryDocValues values = valuesSourceConfig.getValuesSource().bytesValues(ctx); + final BinaryDocValues singleton = FieldData.unwrapSingleton(values); + return singleton != null + ? getLeafCollector(includeExclude, singleton, sub, consumer) + : getLeafCollector(includeExclude, values, sub, consumer); + } + + private LeafBucketCollector getLeafCollector( + IncludeExclude.StringFilter includeExclude, + SortedBinaryDocValues values, + LeafBucketCollector sub, + CollectConsumer consumer + ) { return new LeafBucketCollectorBase(sub, values) { final BytesRefBuilder previous = new BytesRefBuilder(); @@ -233,6 +247,26 @@ public void collect(int doc, long owningBucketOrd) throws IOException { }; } + private LeafBucketCollector getLeafCollector( + IncludeExclude.StringFilter includeExclude, + BinaryDocValues values, + LeafBucketCollector sub, + CollectConsumer consumer + ) { + return new LeafBucketCollectorBase(sub, values) { + + @Override + public void collect(int doc, long owningBucketOrd) throws IOException { + if (values.advanceExact(doc)) { + BytesRef bytes = values.binaryValue(); + if (includeExclude == null || includeExclude.accept(bytes)) { + consumer.accept(sub, doc, owningBucketOrd, bytes); + } + } + } + }; + } + @Override public void close() {} } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/NumericTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/NumericTermsAggregator.java index cce5140a36af7..a438a78a2efcd 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/NumericTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/NumericTermsAggregator.java @@ -7,7 +7,9 @@ */ package org.elasticsearch.search.aggregations.bucket.terms; +import org.apache.lucene.index.DocValues; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.NumericUtils; @@ -86,33 +88,50 @@ public ScoreMode scoreMode() { @Override public LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, LeafBucketCollector sub) throws IOException { - SortedNumericDocValues values = resultStrategy.getValues(aggCtx.getLeafReaderContext()); - return resultStrategy.wrapCollector(new LeafBucketCollectorBase(sub, values) { + final SortedNumericDocValues values = resultStrategy.getValues(aggCtx.getLeafReaderContext()); + final NumericDocValues singleton = DocValues.unwrapSingleton(values); + return resultStrategy.wrapCollector(singleton != null ? getLeafCollector(singleton, sub) : getLeafCollector(values, sub)); + } + + private LeafBucketCollector getLeafCollector(SortedNumericDocValues values, LeafBucketCollector sub) { + return new LeafBucketCollectorBase(sub, values) { @Override public void collect(int doc, long owningBucketOrd) throws IOException { if (values.advanceExact(doc)) { - int valuesCount = values.docValueCount(); - long previous = Long.MAX_VALUE; - for (int i = 0; i < valuesCount; ++i) { + for (int i = 0; i < values.docValueCount(); ++i) { long val = values.nextValue(); if (previous != val || i == 0) { - if ((longFilter == null) || (longFilter.accept(val))) { - long bucketOrdinal = bucketOrds.add(owningBucketOrd, val); - if (bucketOrdinal < 0) { // already seen - bucketOrdinal = -1 - bucketOrdinal; - collectExistingBucket(sub, doc, bucketOrdinal); - } else { - collectBucket(sub, doc, bucketOrdinal); - } - } - + collectValue(val, doc, owningBucketOrd, sub); previous = val; } } } } - }); + }; + } + + private LeafBucketCollector getLeafCollector(NumericDocValues values, LeafBucketCollector sub) { + return new LeafBucketCollectorBase(sub, values) { + @Override + public void collect(int doc, long owningBucketOrd) throws IOException { + if (values.advanceExact(doc)) { + collectValue(values.longValue(), doc, owningBucketOrd, sub); + } + } + }; + } + + private void collectValue(long val, int doc, long owningBucketOrd, LeafBucketCollector sub) throws IOException { + if (longFilter == null || longFilter.accept(val)) { + long bucketOrdinal = bucketOrds.add(owningBucketOrd, val); + if (bucketOrdinal < 0) { // already seen + bucketOrdinal = -1 - bucketOrdinal; + collectExistingBucket(sub, doc, bucketOrdinal); + } else { + collectBucket(sub, doc, bucketOrdinal); + } + } } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringRareTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringRareTermsAggregator.java index 186ef8a9107b6..d9e064f32494b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringRareTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringRareTermsAggregator.java @@ -7,11 +7,13 @@ */ package org.elasticsearch.search.aggregations.bucket.terms; +import org.apache.lucene.index.BinaryDocValues; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.elasticsearch.common.util.BytesRefHash; import org.elasticsearch.common.util.SetBackedScalingCuckooFilter; import org.elasticsearch.core.Releasables; +import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregationExecutionContext; @@ -64,40 +66,57 @@ public class StringRareTermsAggregator extends AbstractRareTermsAggregator { @Override public LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, final LeafBucketCollector sub) throws IOException { final SortedBinaryDocValues values = valuesSource.bytesValues(aggCtx.getLeafReaderContext()); + final BinaryDocValues singleton = FieldData.unwrapSingleton(values); + return singleton != null ? getLeafCollector(singleton, sub) : getLeafCollector(values, sub); + } + + private LeafBucketCollector getLeafCollector(SortedBinaryDocValues values, LeafBucketCollector sub) { return new LeafBucketCollectorBase(sub, values) { final BytesRefBuilder previous = new BytesRefBuilder(); @Override public void collect(int docId, long owningBucketOrd) throws IOException { - if (false == values.advanceExact(docId)) { - return; - } - int valuesCount = values.docValueCount(); - previous.clear(); - - // SortedBinaryDocValues don't guarantee uniqueness so we - // need to take care of dups - for (int i = 0; i < valuesCount; ++i) { - BytesRef bytes = values.nextValue(); - if (filter != null && false == filter.accept(bytes)) { - continue; - } - if (i > 0 && previous.get().equals(bytes)) { - continue; - } - previous.copyBytes(bytes); - long bucketOrdinal = bucketOrds.add(owningBucketOrd, bytes); - if (bucketOrdinal < 0) { // already seen - bucketOrdinal = -1 - bucketOrdinal; - collectExistingBucket(sub, docId, bucketOrdinal); - } else { - collectBucket(sub, docId, bucketOrdinal); + if (values.advanceExact(docId)) { + previous.clear(); + // SortedBinaryDocValues don't guarantee uniqueness so we + // need to take care of dups + for (int i = 0; i < values.docValueCount(); ++i) { + BytesRef bytes = values.nextValue(); + if (i > 0 && previous.get().equals(bytes)) { + continue; + } + collectValue(bytes, docId, owningBucketOrd, sub); + previous.copyBytes(bytes); } } + } }; } + private LeafBucketCollector getLeafCollector(BinaryDocValues values, LeafBucketCollector sub) { + return new LeafBucketCollectorBase(sub, values) { + @Override + public void collect(int docId, long owningBucketOrd) throws IOException { + if (values.advanceExact(docId)) { + collectValue(values.binaryValue(), docId, owningBucketOrd, sub); + } + } + }; + } + + private void collectValue(BytesRef val, int doc, long owningBucketOrd, LeafBucketCollector sub) throws IOException { + if (filter == null || filter.accept(val)) { + long bucketOrdinal = bucketOrds.add(owningBucketOrd, val); + if (bucketOrdinal < 0) { // already seen + bucketOrdinal = -1 - bucketOrdinal; + collectExistingBucket(sub, doc, bucketOrdinal); + } else { + collectBucket(sub, doc, bucketOrdinal); + } + } + } + @Override public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { /*