Skip to content

Commit

Permalink
Renamed FastLongHash to ReorganizingLongHash and updated the default …
Browse files Browse the repository at this point in the history
…initial capacity

Signed-off-by: Ketan Verma <[email protected]>
  • Loading branch information
ketanv3 committed Jun 16, 2023
1 parent 4e86707 commit bc6f294
Show file tree
Hide file tree
Showing 4 changed files with 21 additions and 17 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.infra.Blackhole;
import org.opensearch.common.lease.Releasable;
import org.opensearch.core.common.lease.Releasable;

import java.util.Random;
import java.util.concurrent.TimeUnit;
Expand Down Expand Up @@ -48,7 +48,7 @@ public void add(Blackhole bh, HashTableOptions tableOpts, WorkloadOptions worklo
@State(Scope.Benchmark)
public static class HashTableOptions {

@Param({ "LongHash", "FastLongHash" })
@Param({ "LongHash", "ReorganizingLongHash" })
public String type;

@Param({ "1" })
Expand All @@ -65,8 +65,8 @@ public void setup() {
case "LongHash":
supplier = this::newLongHash;
break;
case "FastLongHash":
supplier = this::newFastLongHash;
case "ReorganizingLongHash":
supplier = this::newReorganizingLongHash;
break;
default:
throw new IllegalArgumentException("invalid hash table type: " + type);
Expand All @@ -93,9 +93,13 @@ public void close() {
};
}

private HashTable newFastLongHash() {
private HashTable newReorganizingLongHash() {
return new HashTable() {
private final FastLongHash table = new FastLongHash(initialCapacity, loadFactor, BigArrays.NON_RECYCLING_INSTANCE);
private final ReorganizingLongHash table = new ReorganizingLongHash(
initialCapacity,
loadFactor,
BigArrays.NON_RECYCLING_INSTANCE
);

@Override
public long add(long key) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
package org.opensearch.common.util;

import org.apache.lucene.util.hppc.BitMixer;
import org.opensearch.common.lease.Releasable;
import org.opensearch.core.common.lease.Releasable;

/**
* Specialized hash table implementation that maps a (primitive) long to long.
Expand All @@ -25,9 +25,9 @@
*
* @opensearch.internal
*/
public class FastLongHash implements Releasable {
public class ReorganizingLongHash implements Releasable {
private static final long MAX_CAPACITY = 1L << 32;
private static final long DEFAULT_INITIAL_CAPACITY = 1;
private static final long DEFAULT_INITIAL_CAPACITY = 32;
private static final float DEFAULT_LOAD_FACTOR = 0.6f;

/**
Expand Down Expand Up @@ -99,11 +99,11 @@ public class FastLongHash implements Releasable {
private static final long MASK_PSL = 0x7FFF000000000000L; // extract PSL
private static final long INCR_PSL = 0x0001000000000000L; // increment PSL by one

public FastLongHash(final BigArrays bigArrays) {
public ReorganizingLongHash(final BigArrays bigArrays) {
this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, bigArrays);
}

public FastLongHash(long initialCapacity, final float loadFactor, final BigArrays bigArrays) {
public ReorganizingLongHash(final long initialCapacity, final float loadFactor, final BigArrays bigArrays) {
assert initialCapacity > 0 : "initial capacity must be greater than 0";
assert loadFactor > 0 && loadFactor < 1 : "load factor must be between 0 and 1";

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
package org.opensearch.search.aggregations.bucket.terms;

import org.opensearch.common.util.BigArrays;
import org.opensearch.common.util.FastLongHash;
import org.opensearch.common.util.ReorganizingLongHash;
import org.opensearch.common.util.LongLongHash;
import org.opensearch.core.common.lease.Releasable;
import org.opensearch.search.aggregations.CardinalityUpperBound;
Expand Down Expand Up @@ -148,10 +148,10 @@ public long value() {
* @opensearch.internal
*/
public static class FromSingle extends LongKeyedBucketOrds {
private final FastLongHash ords;
private final ReorganizingLongHash ords;

public FromSingle(BigArrays bigArrays) {
ords = new FastLongHash(bigArrays);
ords = new ReorganizingLongHash(bigArrays);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,13 @@
import java.util.HashMap;
import java.util.Map;

public class FastLongHashTests extends OpenSearchTestCase {
public class ReorganizingLongHashTests extends OpenSearchTestCase {

public void testFuzzy() {
Map<Long, Long> reference = new HashMap<>();

try (
FastLongHash h = new FastLongHash(
ReorganizingLongHash h = new ReorganizingLongHash(
randomIntBetween(1, 100), // random capacity
0.6f + randomFloat() * 0.39f, // random load factor to verify collision resolution
BigArrays.NON_RECYCLING_INSTANCE
Expand Down Expand Up @@ -61,7 +61,7 @@ public void testFuzzy() {
}

public void testRearrangement() {
try (FastLongHash h = new FastLongHash(4, 0.6f, BigArrays.NON_RECYCLING_INSTANCE) {
try (ReorganizingLongHash h = new ReorganizingLongHash(4, 0.6f, BigArrays.NON_RECYCLING_INSTANCE) {
/**
* Overriding with an "identity" hash function to make it easier to reason about the placement
* of values in the hash table. The backing array of the hash table will have a size (8),
Expand Down

0 comments on commit bc6f294

Please sign in to comment.