Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Oak shared config #196

Merged
merged 9 commits into from
Jan 5, 2022
Merged
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@


public class OakBenchHash extends BenchOakMap {
private NativeMemoryAllocator ma;
private OakHashMap<BenchKey, BenchValue> oakHash;

public OakBenchHash(KeyGenerator keyGen, ValueGenerator valueGen) {
Expand All @@ -26,10 +25,6 @@ public OakBenchHash(KeyGenerator keyGen, ValueGenerator valueGen) {
/** {@inheritDoc} **/
@Override
public void init() {
ma = new NativeMemoryAllocator(OAK_MAX_OFF_MEMORY);
if (Parameters.confDetailedStats) {
ma.collectStats();
}
OakMapBuilder<BenchKey, BenchValue> builder = new OakMapBuilder<>(keyGen, keyGen, valueGen, minKey)
// 2048 * 8 = 16384 (2^14) entries in each chunk, each entry takes 24 bytes, each chunk requires
// approximately 393216 bytes ~= 393KB ~= 0.4 MB
Expand All @@ -40,7 +35,7 @@ public void init() {
// 2^28 * 24 = 6442450944 bytes ~= 6442451 KB ~= 6442 MB ~= 6.5 GB
.setPreallocHashChunksNum(Parameters.confSmallFootprint ? FirstLevelHashArray.HASH_CHUNK_NUM_DEFAULT
: FirstLevelHashArray.HASH_CHUNK_NUM_DEFAULT * 16)
.setMemoryAllocator(ma);
.setMemoryCapacity(OAK_MAX_OFF_MEMORY);
// capable to keep 2^28 keys
oakHash = builder.buildHashMap();
}
Expand All @@ -49,7 +44,6 @@ public void init() {
@Override
public void close() {
super.close();
ma = null;
oakHash = null;
}

Expand All @@ -76,14 +70,4 @@ public boolean ascendOak(BenchKey from, int length, Blackhole blackhole) {
public boolean descendOak(BenchKey from, int length, Blackhole blackhole) {
throw new UnsupportedOperationException("ALL ITERATORS ARE NOT YET SUPPORTED FOR HASH");
}

/** {@inheritDoc} **/
@Override
public void printMemStats() {
NativeMemoryAllocator.Stats stats = ma.getStats();
System.out.printf("\tReleased buffers: \t\t%d\n", stats.releasedBuffers);
System.out.printf("\tReleased bytes: \t\t%d\n", stats.releasedBytes);
System.out.printf("\tReclaimed buffers: \t\t%d\n", stats.reclaimedBuffers);
System.out.printf("\tReclaimed bytes: \t\t%d\n", stats.reclaimedBytes);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,10 @@
import com.yahoo.oak.synchrobench.contention.abstractions.BenchValue;
import com.yahoo.oak.synchrobench.contention.abstractions.KeyGenerator;
import com.yahoo.oak.synchrobench.contention.abstractions.ValueGenerator;
import com.yahoo.oak.synchrobench.contention.benchmark.Parameters;
import com.yahoo.oak.synchrobench.maps.BenchOakMap;
import org.openjdk.jmh.infra.Blackhole;

public class OakBenchMap extends BenchOakMap {
private NativeMemoryAllocator ma;
private OakMap<BenchKey, BenchValue> oak;

public OakBenchMap(KeyGenerator keyGen, ValueGenerator valueGen) {
Expand All @@ -25,21 +23,16 @@ public OakBenchMap(KeyGenerator keyGen, ValueGenerator valueGen) {
/** {@inheritDoc} **/
@Override
public void init() {
ma = new NativeMemoryAllocator(OAK_MAX_OFF_MEMORY);
if (Parameters.confDetailedStats) {
ma.collectStats();
}
OakMapBuilder<BenchKey, BenchValue> builder = new OakMapBuilder<>(keyGen, keyGen, valueGen, minKey)
.setOrderedChunkMaxItems(OrderedChunk.ORDERED_CHUNK_MAX_ITEMS_DEFAULT)
.setMemoryAllocator(ma);
.setMemoryCapacity(OAK_MAX_OFF_MEMORY);
oak = builder.buildOrderedMap();
}

/** {@inheritDoc} **/
@Override
public void close() {
super.close();
ma = null;
oak = null;
}

Expand Down Expand Up @@ -80,14 +73,4 @@ public boolean descendOak(BenchKey from, int length, Blackhole blackhole) {

return result;
}

/** {@inheritDoc} **/
@Override
public void printMemStats() {
NativeMemoryAllocator.Stats stats = ma.getStats();
System.out.printf("\tReleased buffers: \t\t%d\n", stats.releasedBuffers);
System.out.printf("\tReleased bytes: \t\t%d\n", stats.releasedBytes);
System.out.printf("\tReclaimed buffers: \t\t%d\n", stats.reclaimedBuffers);
System.out.printf("\tReclaimed bytes: \t\t%d\n", stats.reclaimedBytes);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -107,10 +107,4 @@ public interface CompositionalMap {
default float nonHeapAllocatedGB() {
return Float.NaN;
}

/**
* Prints memory statistics for debugging.
*/
default void printMemStats() {
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -210,10 +210,6 @@ public void iteration(int iteration) throws Exception {
if (!isWarmup) {
stats[iteration] = s;
s.printStats();

if (Parameters.confDetailedStats) {
oakBench.printMemStats();
}
}
} finally {
// Release the benchmark resources.
Expand Down
18 changes: 12 additions & 6 deletions core/src/main/java/com/yahoo/oak/BasicChunk.java
Original file line number Diff line number Diff line change
Expand Up @@ -21,27 +21,34 @@ enum State {


/*-------------- Members --------------*/
// to compare serilized and object keys
protected final OakSharedConfig<K, V> config;

// to compare serialized and object keys
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Isn't comparator part of the config?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Each instance holds its own references to the "config items" it needs.
I implemented it that way to avoid another pointer dereference each time we access these elements.
In addition, the config object is stored here to be passed to the child Chunk instance.

protected OakComparator<K> comparator;

// in split/compact process, represents parent of split (can be null!)
private final AtomicReference<BasicChunk<K, V>> creator;
// chunk can be in the following states: normal, frozen or infant(has a creator)
private final AtomicReference<State> state;
private final AtomicReference<Rebalancer<K, V>> rebalancer;
private final AtomicInteger pendingOps;
private final int maxItems;
protected final int maxItems;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why not to use getMaxItems()?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It seems more natural to me to access internal final fields directly. I don't see why we need a function call here.
For the same reason, we access externalSize and statistics directly.

protected AtomicInteger externalSize; // for updating oak's size (reference to one global per Oak size)
protected final Statistics statistics;

/*-------------- Constructors and creators --------------*/
/**
* This constructor is only used internally to instantiate a BasicChunk without a creator and a state.
* The caller should set the creator and state before returning the BasicChunk to the user.
*
* @param config shared configuration
* @param maxItems maximal capacity
*/
protected BasicChunk(int maxItems, AtomicInteger externalSize, OakComparator<K> comparator) {
protected BasicChunk(OakSharedConfig<K, V> config, int maxItems) {
this.config = config;
this.maxItems = maxItems;
this.externalSize = externalSize;
this.comparator = comparator;
this.externalSize = config.size;
this.comparator = config.comparator;
this.creator = new AtomicReference<>(null);
this.state = new AtomicReference<>(State.NORMAL);
this.pendingOps = new AtomicInteger();
Expand All @@ -55,7 +62,6 @@ protected BasicChunk(int maxItems, AtomicInteger externalSize, OakComparator<K>
protected void updateBasicChild(BasicChunk<K, V> child) {
child.creator.set(this);
child.state.set(State.INFANT);
return;
}
/*---------------Abstract Read methods -----------------------*/
abstract void readKey(ThreadContext ctx);
Expand Down
32 changes: 15 additions & 17 deletions core/src/main/java/com/yahoo/oak/EntryArray.java
Original file line number Diff line number Diff line change
Expand Up @@ -58,40 +58,38 @@ public class EntryArray<K, V> {
protected static final int VALUE_REF_OFFSET = 1;
static final int INVALID_ENTRY_INDEX = -1;

final MemoryManager valuesMemoryManager;
final MemoryManager keysMemoryManager;
final MemoryManager valuesMemoryManager;

// for writing the keys into the off-heap
liran-funaro marked this conversation as resolved.
Show resolved Hide resolved
final OakSerializer<K> keySerializer;
final OakSerializer<V> valueSerializer;

private final long[] entries; // array is initialized to 0 - this is important!
private final int fields; // # of primitive fields in each item of entries array

final int entriesCapacity; // number of entries (not longs) to be maximally held

// Counts number of entries inserted & not deleted. Pay attention that not all entries (counted
// in number of entries) are finally are finally considered existing by the OrderedChunk above
// in number of entries) are finally considered existing by the OrderedChunk above
liran-funaro marked this conversation as resolved.
Show resolved Hide resolved
// and participating in holding the "real" KV-mappings, the "real" are counted in OrderedChunk
protected final AtomicInteger numOfEntries;

// for writing the keys into the off-heap
final OakSerializer<K> keySerializer;
final OakSerializer<V> valueSerializer;

/**
* Create a new instance
* @param vMM for values off-heap allocations and releases
* @param kMM off-heap allocations and releases for keys
* @param config shared configuration
* @param additionalFieldCount number of additional fields
* @param entriesCapacity how many entries should this instance keep at maximum
* @param keySerializer used to serialize the key when written to off-heap
*/
EntryArray(MemoryManager vMM, MemoryManager kMM, int additionalFieldCount, int entriesCapacity,
OakSerializer<K> keySerializer,
OakSerializer<V> valueSerializer) {
this.valuesMemoryManager = vMM;
this.keysMemoryManager = kMM;
EntryArray(OakSharedConfig<K, V> config, int additionalFieldCount, int entriesCapacity) {
this.keysMemoryManager = config.keysMemoryManager;
this.valuesMemoryManager = config.valuesMemoryManager;
this.keySerializer = config.keySerializer;
this.valueSerializer = config.valueSerializer;

this.fields = additionalFieldCount + 2; // +2 for key and value references that always exist
this.entries = new long[entriesCapacity * this.fields];
this.numOfEntries = new AtomicInteger(0);
this.entriesCapacity = entriesCapacity;
this.keySerializer = keySerializer;
this.valueSerializer = valueSerializer;
}

/**
Expand Down
12 changes: 4 additions & 8 deletions core/src/main/java/com/yahoo/oak/EntryHashSet.java
Original file line number Diff line number Diff line change
Expand Up @@ -86,16 +86,12 @@ class EntryHashSet<K, V> extends EntryArray<K, V> {

/*----------------- Constructor -------------------*/
/**
* Create a new EntryHashSet
* @param vMM for values off-heap allocations and releases
* @param kMM off-heap allocations and releases for keys
* @param config shared configuration
* @param entriesCapacity how many entries should this EntryOrderedSet keep at maximum
* @param keySerializer used to serialize the key when written to off-heap
*/
EntryHashSet(MemoryManager vMM, MemoryManager kMM, int entriesCapacity, OakSerializer<K> keySerializer,
OakSerializer<V> valueSerializer, OakComparator<K> comparator) {
super(vMM, kMM, ADDITIONAL_FIELDS, entriesCapacity, keySerializer, valueSerializer);
this.comparator = comparator;
EntryHashSet(OakSharedConfig<K, V> config, int entriesCapacity) {
super(config, ADDITIONAL_FIELDS, entriesCapacity);
this.comparator = config.comparator;
}

/*---------- Private methods for managing key hash entry field -------------*/
Expand Down
11 changes: 4 additions & 7 deletions core/src/main/java/com/yahoo/oak/EntryOrderedSet.java
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ class EntryOrderedSet<K, V> extends EntryArray<K, V> {
private static final int ADDITIONAL_FIELDS = 1; // # of primitive fields in each item of entries array

// location of the first (head) node
private AtomicInteger headEntryIndex = new AtomicInteger(INVALID_ENTRY_INDEX);
private final AtomicInteger headEntryIndex = new AtomicInteger(INVALID_ENTRY_INDEX);

// points to next free index of entry array, counted in "entries" and not in integers
private final AtomicInteger nextFreeIndex;
Expand All @@ -73,14 +73,11 @@ class EntryOrderedSet<K, V> extends EntryArray<K, V> {

/**
* Create a new EntryOrderedSet
* @param vMM for values off-heap allocations and releases
* @param kMM off-heap allocations and releases for keys
* @param config shared configuration
* @param entriesCapacity how many entries should this EntryOrderedSet keep at maximum
* @param keySerializer used to serialize the key when written to off-heap
*/
EntryOrderedSet(MemoryManager vMM, MemoryManager kMM, int entriesCapacity, OakSerializer<K> keySerializer,
OakSerializer<V> valueSerializer) {
super(vMM, kMM, ADDITIONAL_FIELDS, entriesCapacity, keySerializer, valueSerializer);
EntryOrderedSet(OakSharedConfig<K, V> config, int entriesCapacity) {
super(config, ADDITIONAL_FIELDS, entriesCapacity);
this.nextFreeIndex = new AtomicInteger( 0);
}

Expand Down
11 changes: 5 additions & 6 deletions core/src/main/java/com/yahoo/oak/FirstLevelHashArray.java
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,8 @@ class FirstLevelHashArray<K, V> {
* num of pointers are going to point to another chunk and so on.
* Other creation patterns (like all the hash array entries referencing the same chunk) are possible.
* Majority of the parameters are needed for chunk creation.
* @param msbForFirstLevelHash number of most significant bits to be used to calculate the
* @param config shared configuration
* @param msbForFirstLevelHash number of most significant bits to be used to calculate the
* index in the hash array. It also affects directly the size of the
* array. It is going to be incorporated inside hashIndexCodec and
* transferred to chunks.
Expand All @@ -51,9 +52,8 @@ class FirstLevelHashArray<K, V> {
* different hashIndexCodecs are created. If FirstLevelHashArray size changes,
* hashIndexCodec is changed, but not necessarily hashIndexCodecForChunk, and visa versa
*/
FirstLevelHashArray(int msbForFirstLevelHash, int lsbForSecondLevelHash, AtomicInteger externalSize,
MemoryManager vMM, MemoryManager kMM, OakComparator<K> comparator,
OakSerializer<K> keySerializer, OakSerializer<V> valueSerializer, int multipleReferenceNum) {
FirstLevelHashArray(OakSharedConfig<K, V> config,
int msbForFirstLevelHash, int lsbForSecondLevelHash, int multipleReferenceNum) {

// the key hash (int) separation between MSB for first level ans LSB for second level,
// to be used by hash array and all chunks, until resize
Expand All @@ -79,8 +79,7 @@ class FirstLevelHashArray<K, V> {
// initiate chunks
for (int i = 0; i < chunks.length(); i++) {
if (currentSameRefer == multipleReferenceNum) {
c = new HashChunk<>(chunkSize, externalSize, vMM, kMM, comparator,
keySerializer, valueSerializer, hashIndexCodecForChunk);
c = new HashChunk<>(config, chunkSize, hashIndexCodecForChunk);
}
this.chunks.lazySet(i, c);
currentSameRefer--;
Expand Down
24 changes: 7 additions & 17 deletions core/src/main/java/com/yahoo/oak/HashChunk.java
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,6 @@

package com.yahoo.oak;

import java.util.concurrent.atomic.AtomicInteger;

class HashChunk<K, V> extends BasicChunk<K, V> {
// defaults
public static final int HASH_CHUNK_MAX_ITEMS_DEFAULT = 2048; //2^11
Expand All @@ -26,7 +24,7 @@ class HashChunk<K, V> extends BasicChunk<K, V> {
/**
* This constructor is only used when creating the first ever chunk/chunks (without a creator).
* The caller might set the creator before returning the HashChunk to the user.
*
* @param config shared configuration
* @param maxItems is the size of the entries array (not all the entries are going to be in use)
* IMPORTANT!: it is better to be a power of two,
* if not the rest of the entries are going to be waisted
Expand All @@ -40,28 +38,20 @@ class HashChunk<K, V> extends BasicChunk<K, V> {
* @param hashIndexCodec the codec initiated with the right amount of the least significant bits,
* to be used to get index from the key hash
*/
HashChunk(int maxItems, AtomicInteger externalSize, MemoryManager vMM, MemoryManager kMM,
OakComparator<K> comparator, OakSerializer<K> keySerializer,
OakSerializer<V> valueSerializer, UnionCodec hashIndexCodec) {

super(maxItems, externalSize, comparator);
assert Math.pow( 2, hashIndexCodec.getFirstBitSize() ) <= maxItems ;
HashChunk(OakSharedConfig<K, V> config, int maxItems, UnionCodec hashIndexCodec) {
super(config, maxItems);
assert Math.pow( 2, hashIndexCodec.getFirstBitSize() ) <= maxItems;

this.hashIndexCodec = hashIndexCodec;
this.entryHashSet = // must be called after setSecondLevelBitsThreshold
new EntryHashSet<>(vMM, kMM, getMaxItems(), keySerializer, valueSerializer,
comparator);
// must be called after setSecondLevelBitsThreshold
liran-funaro marked this conversation as resolved.
Show resolved Hide resolved
this.entryHashSet = new EntryHashSet<>(config, maxItems);
}

/**
* Create a child HashChunk where this HashChunk object as its creator.
*/
HashChunk<K, V> createChild(UnionCodec hashIndexCodec) {
HashChunk<K, V> child =
new HashChunk<>(getMaxItems(), externalSize,
entryHashSet.valuesMemoryManager, entryHashSet.keysMemoryManager,
comparator, entryHashSet.keySerializer, entryHashSet.valueSerializer,
hashIndexCodec);
HashChunk<K, V> child = new HashChunk<>(config, maxItems, hashIndexCodec);
updateBasicChild(child);
return child;
}
Expand Down
Loading