Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Oak shared config #196

Merged
merged 9 commits into from
Jan 5, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@


public class OakBenchHash extends BenchOakMap {
private NativeMemoryAllocator ma;
private OakHashMap<BenchKey, BenchValue> oakHash;

public OakBenchHash(KeyGenerator keyGen, ValueGenerator valueGen) {
Expand All @@ -26,10 +25,6 @@ public OakBenchHash(KeyGenerator keyGen, ValueGenerator valueGen) {
/** {@inheritDoc} **/
@Override
public void init() {
ma = new NativeMemoryAllocator(OAK_MAX_OFF_MEMORY);
if (Parameters.confDetailedStats) {
ma.collectStats();
}
OakMapBuilder<BenchKey, BenchValue> builder = new OakMapBuilder<>(keyGen, keyGen, valueGen, minKey)
// 2048 * 8 = 16384 (2^14) entries in each chunk, each entry takes 24 bytes, each chunk requires
// approximately 393216 bytes ~= 393KB ~= 0.4 MB
Expand All @@ -40,7 +35,7 @@ public void init() {
// 2^28 * 24 = 6442450944 bytes ~= 6442451 KB ~= 6442 MB ~= 6.5 GB
.setPreallocHashChunksNum(Parameters.confSmallFootprint ? FirstLevelHashArray.HASH_CHUNK_NUM_DEFAULT
: FirstLevelHashArray.HASH_CHUNK_NUM_DEFAULT * 16)
.setMemoryAllocator(ma);
.setMemoryCapacity(OAK_MAX_OFF_MEMORY);
// capable to keep 2^28 keys
oakHash = builder.buildHashMap();
}
Expand All @@ -49,7 +44,6 @@ public void init() {
@Override
public void close() {
super.close();
ma = null;
oakHash = null;
}

Expand All @@ -76,14 +70,4 @@ public boolean ascendOak(BenchKey from, int length, Blackhole blackhole) {
public boolean descendOak(BenchKey from, int length, Blackhole blackhole) {
throw new UnsupportedOperationException("ALL ITERATORS ARE NOT YET SUPPORTED FOR HASH");
}

/** {@inheritDoc} **/
@Override
public void printMemStats() {
NativeMemoryAllocator.Stats stats = ma.getStats();
System.out.printf("\tReleased buffers: \t\t%d\n", stats.releasedBuffers);
System.out.printf("\tReleased bytes: \t\t%d\n", stats.releasedBytes);
System.out.printf("\tReclaimed buffers: \t\t%d\n", stats.reclaimedBuffers);
System.out.printf("\tReclaimed bytes: \t\t%d\n", stats.reclaimedBytes);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,10 @@
import com.yahoo.oak.synchrobench.contention.abstractions.BenchValue;
import com.yahoo.oak.synchrobench.contention.abstractions.KeyGenerator;
import com.yahoo.oak.synchrobench.contention.abstractions.ValueGenerator;
import com.yahoo.oak.synchrobench.contention.benchmark.Parameters;
import com.yahoo.oak.synchrobench.maps.BenchOakMap;
import org.openjdk.jmh.infra.Blackhole;

public class OakBenchMap extends BenchOakMap {
private NativeMemoryAllocator ma;
private OakMap<BenchKey, BenchValue> oak;

public OakBenchMap(KeyGenerator keyGen, ValueGenerator valueGen) {
Expand All @@ -25,21 +23,16 @@ public OakBenchMap(KeyGenerator keyGen, ValueGenerator valueGen) {
/** {@inheritDoc} **/
@Override
public void init() {
ma = new NativeMemoryAllocator(OAK_MAX_OFF_MEMORY);
if (Parameters.confDetailedStats) {
ma.collectStats();
}
OakMapBuilder<BenchKey, BenchValue> builder = new OakMapBuilder<>(keyGen, keyGen, valueGen, minKey)
.setOrderedChunkMaxItems(OrderedChunk.ORDERED_CHUNK_MAX_ITEMS_DEFAULT)
.setMemoryAllocator(ma);
.setMemoryCapacity(OAK_MAX_OFF_MEMORY);
oak = builder.buildOrderedMap();
}

/** {@inheritDoc} **/
@Override
public void close() {
super.close();
ma = null;
oak = null;
}

Expand Down Expand Up @@ -80,14 +73,4 @@ public boolean descendOak(BenchKey from, int length, Blackhole blackhole) {

return result;
}

/** {@inheritDoc} **/
@Override
public void printMemStats() {
NativeMemoryAllocator.Stats stats = ma.getStats();
System.out.printf("\tReleased buffers: \t\t%d\n", stats.releasedBuffers);
System.out.printf("\tReleased bytes: \t\t%d\n", stats.releasedBytes);
System.out.printf("\tReclaimed buffers: \t\t%d\n", stats.reclaimedBuffers);
System.out.printf("\tReclaimed bytes: \t\t%d\n", stats.reclaimedBytes);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -107,10 +107,4 @@ public interface CompositionalMap {
default float nonHeapAllocatedGB() {
return Float.NaN;
}

/**
* Prints memory statistics for debugging.
*/
default void printMemStats() {
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -210,10 +210,6 @@ public void iteration(int iteration) throws Exception {
if (!isWarmup) {
stats[iteration] = s;
s.printStats();

if (Parameters.confDetailedStats) {
oakBench.printMemStats();
}
}
} finally {
// Release the benchmark resources.
Expand Down
16 changes: 8 additions & 8 deletions core/src/main/java/com/yahoo/oak/BasicChunk.java
Original file line number Diff line number Diff line change
Expand Up @@ -21,27 +21,28 @@ enum State {


/*-------------- Members --------------*/
// to compare serilized and object keys
protected OakComparator<K> comparator;
protected final OakSharedConfig<K, V> config;

// in split/compact process, represents parent of split (can be null!)
private final AtomicReference<BasicChunk<K, V>> creator;
// chunk can be in the following states: normal, frozen or infant(has a creator)
private final AtomicReference<State> state;
private final AtomicReference<Rebalancer<K, V>> rebalancer;
private final AtomicInteger pendingOps;
private final int maxItems;
protected AtomicInteger externalSize; // for updating oak's size (reference to one global per Oak size)
protected final int maxItems;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why not to use getMaxItems()?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It seems more natural to me to access internal final fields directly. I don't see why we need a function call here.
For the same reason, we access externalSize and statistics directly.

protected final Statistics statistics;

/*-------------- Constructors and creators --------------*/
/**
* This constructor is only used internally to instantiate a BasicChunk without a creator and a state.
* The caller should set the creator and state before returning the BasicChunk to the user.
*
* @param config shared configuration
* @param maxItems maximal capacity
*/
protected BasicChunk(int maxItems, AtomicInteger externalSize, OakComparator<K> comparator) {
protected BasicChunk(OakSharedConfig<K, V> config, int maxItems) {
this.config = config;
this.maxItems = maxItems;
this.externalSize = externalSize;
this.comparator = comparator;
this.creator = new AtomicReference<>(null);
this.state = new AtomicReference<>(State.NORMAL);
this.pendingOps = new AtomicInteger();
Expand All @@ -55,7 +56,6 @@ protected BasicChunk(int maxItems, AtomicInteger externalSize, OakComparator<K>
protected void updateBasicChild(BasicChunk<K, V> child) {
child.creator.set(this);
child.state.set(State.INFANT);
return;
}
/*---------------Abstract Read methods -----------------------*/
abstract void readKey(ThreadContext ctx);
Expand Down
45 changes: 17 additions & 28 deletions core/src/main/java/com/yahoo/oak/EntryArray.java
Original file line number Diff line number Diff line change
Expand Up @@ -58,40 +58,29 @@ public class EntryArray<K, V> {
protected static final int VALUE_REF_OFFSET = 1;
static final int INVALID_ENTRY_INDEX = -1;

final MemoryManager valuesMemoryManager;
final MemoryManager keysMemoryManager;
final OakSharedConfig<K, V> config;

private final long[] entries; // array is initialized to 0 - this is important!
private final int fields; // # of primitive fields in each item of entries array

final int entriesCapacity; // number of entries (not longs) to be maximally held

// Counts number of entries inserted & not deleted. Pay attention that not all entries (counted
// in number of entries) are finally are finally considered existing by the OrderedChunk above
// and participating in holding the "real" KV-mappings, the "real" are counted in OrderedChunk
// in number of entries) are finally considered existing by the Chunk above
// and participating in holding the "real" KV-mappings, the "real" are counted in Chunk
protected final AtomicInteger numOfEntries;

// for writing the keys into the off-heap
final OakSerializer<K> keySerializer;
final OakSerializer<V> valueSerializer;

/**
* Create a new instance
* @param vMM for values off-heap allocations and releases
* @param kMM off-heap allocations and releases for keys
* @param config shared configuration
* @param additionalFieldCount number of additional fields
* @param entriesCapacity how many entries should this instance keep at maximum
* @param keySerializer used to serialize the key when written to off-heap
*/
EntryArray(MemoryManager vMM, MemoryManager kMM, int additionalFieldCount, int entriesCapacity,
OakSerializer<K> keySerializer,
OakSerializer<V> valueSerializer) {
this.valuesMemoryManager = vMM;
this.keysMemoryManager = kMM;
EntryArray(OakSharedConfig<K, V> config, int additionalFieldCount, int entriesCapacity) {
this.config = config;
this.fields = additionalFieldCount + 2; // +2 for key and value references that always exist
this.entries = new long[entriesCapacity * this.fields];
this.numOfEntries = new AtomicInteger(0);
this.entriesCapacity = entriesCapacity;
this.keySerializer = keySerializer;
this.valueSerializer = valueSerializer;
}

/**
Expand Down Expand Up @@ -269,7 +258,7 @@ protected void copyEntriesFrom(EntryArray<K, V> other, int srcEntryIdx, int dest
* */
boolean isValueRefValidAndNotDeleted(int ei) {
long valRef = getValueReference(ei);
return valuesMemoryManager.isReferenceValidAndNotDeleted(valRef);
return config.valuesMemoryManager.isReferenceValidAndNotDeleted(valRef);
}

/**
Expand Down Expand Up @@ -381,7 +370,7 @@ void readKey(ThreadContext ctx) {
void readValue(ThreadContext ctx) {
readValue(ctx.value, ctx.entryIndex);
ctx.entryState = getValueState(ctx.value);
assert valuesMemoryManager.isReferenceConsistent(ctx.value.getSlice().getReference());
assert config.valuesMemoryManager.isReferenceConsistent(ctx.value.getSlice().getReference());
}

/**
Expand All @@ -395,13 +384,13 @@ private EntryState getValueState(ValueBuffer value) {
// remove: (1)off-heap delete bit, (2)reference deleted
// middle state: off-heap header marked deleted, but valid reference

if (!valuesMemoryManager.isReferenceValid(value.getSlice().getReference())) {
if (!config.valuesMemoryManager.isReferenceValid(value.getSlice().getReference())) {
// if there is no value associated with given key,
// thebvalue of this entry was never yet allocated
return EntryState.UNKNOWN;
}

if (valuesMemoryManager.isReferenceDeleted(value.getSlice().getReference())) {
if (config.valuesMemoryManager.isReferenceDeleted(value.getSlice().getReference())) {
// if value is valid the reference can still be deleted
return EntryState.DELETED;
}
Expand All @@ -421,10 +410,10 @@ private EntryState getValueState(ValueBuffer value) {
* @param keyBuffer the off-heap KeyBuffer to update with the new allocation
*/
void writeKey(K key, KeyBuffer keyBuffer) {
int keySize = keySerializer.calculateSize(key);
int keySize = config.keySerializer.calculateSize(key);
keyBuffer.getSlice().allocate(keySize, false);
assert keyBuffer.isAssociated();
ScopedWriteBuffer.serialize(keyBuffer.getSlice(), key, keySerializer);
ScopedWriteBuffer.serialize(keyBuffer.getSlice(), key, config.keySerializer);
}

/**
Expand All @@ -440,14 +429,14 @@ void writeKey(K key, KeyBuffer keyBuffer) {
void allocateValue(ThreadContext ctx, V value, boolean writeForMove) {

// the length of the given value plus its header
int valueDataSize = valueSerializer.calculateSize(value);
int valueDataSize = config.valueSerializer.calculateSize(value);

// The allocated slice includes all the needed information for further access,
// the reference is set in the slice as part of the alocation
ctx.newValue.getSlice().allocate(valueDataSize, writeForMove);
ctx.isNewValueForMove = writeForMove;

ScopedWriteBuffer.serialize(ctx.newValue.getSlice(), value, valueSerializer);
ScopedWriteBuffer.serialize(ctx.newValue.getSlice(), value, config.valueSerializer);
}

/**
Expand All @@ -466,7 +455,7 @@ ValueUtils.ValueResult writeValueCommit(ThreadContext ctx) {

long oldValueReference = ctx.value.getSlice().getReference();
long newValueReference = ctx.newValue.getSlice().getReference();
assert valuesMemoryManager.isReferenceValid(newValueReference);
assert config.valuesMemoryManager.isReferenceValid(newValueReference);

if (!casValueReference(ctx.entryIndex, oldValueReference, newValueReference)) {
return ValueUtils.ValueResult.FALSE;
Expand Down
31 changes: 12 additions & 19 deletions core/src/main/java/com/yahoo/oak/EntryHashSet.java
Original file line number Diff line number Diff line change
Expand Up @@ -75,8 +75,6 @@ class EntryHashSet<K, V> extends EntryArray<K, V> {
// number of entries candidates to try in case of collision
private final AtomicInteger collisionChainLength = new AtomicInteger(DEFAULT_COLLISION_CHAIN_LENGTH);

private final OakComparator<K> comparator;

// use union codec to encode key hash integer (first) with its update counter (second)
private static final UnionCodec HASH_CODEC = new UnionCodec(
KEY_HASH_BITS, // bits# to represent key hash as any integer
Expand All @@ -86,16 +84,11 @@ class EntryHashSet<K, V> extends EntryArray<K, V> {

/*----------------- Constructor -------------------*/
/**
* Create a new EntryHashSet
* @param vMM for values off-heap allocations and releases
* @param kMM off-heap allocations and releases for keys
* @param config shared configuration
* @param entriesCapacity how many entries should this EntryOrderedSet keep at maximum
* @param keySerializer used to serialize the key when written to off-heap
*/
EntryHashSet(MemoryManager vMM, MemoryManager kMM, int entriesCapacity, OakSerializer<K> keySerializer,
OakSerializer<V> valueSerializer, OakComparator<K> comparator) {
super(vMM, kMM, ADDITIONAL_FIELDS, entriesCapacity, keySerializer, valueSerializer);
this.comparator = comparator;
EntryHashSet(OakSharedConfig<K, V> config, int entriesCapacity) {
super(config, ADDITIONAL_FIELDS, entriesCapacity);
}

/*---------- Private methods for managing key hash entry field -------------*/
Expand Down Expand Up @@ -198,7 +191,7 @@ private boolean isKeyAndEntryKeyEqual(KeyBuffer tempKeyBuff, K key, int idx, int
}

assert tempKeyBuff.isAssociated();
return (0 == comparator.compareKeyAndSerializedKey(key, tempKeyBuff));
return (0 == config.comparator.compareKeyAndSerializedKey(key, tempKeyBuff));
}

/* Check the state of the entry in `idx`
Expand All @@ -213,7 +206,7 @@ private EntryState getEntryState(ThreadContext ctx, int idx, K key, int keyHash)
boolean valueReadResult = false;

// optimization, as invalid key reference should be the most frequent case
if (getKeyReference(idx) == keysMemoryManager.getInvalidReference()) {
if (getKeyReference(idx) == config.keysMemoryManager.getInvalidReference()) {
return EntryState.UNKNOWN;
}

Expand Down Expand Up @@ -346,7 +339,7 @@ boolean lookUpForGetOnly(ThreadContext ctx, K key, int idx, int keyHash) {
ctx.entryIndex = (idx + i) % entriesCapacity; // check the entry candidate, cyclic increase
ctx.keyHashAndUpdateCnt = getKeyHashAndUpdateCounter(ctx.entryIndex);
long keyReference = getKeyReference(ctx.entryIndex);
if (keyReference == keysMemoryManager.getInvalidReference()) {
if (keyReference == config.keysMemoryManager.getInvalidReference()) {
return false; // there is no such a key and there is no need to look forward
}

Expand Down Expand Up @@ -608,13 +601,13 @@ boolean deleteValueFinish(ThreadContext ctx) {
// The marking the delete bit happens only when no lock is taken, otherwise busy waits
// if the version is already different result is RETRY, if already deleted - FALSE
// we continue anyway, therefore disregard the logicalDelete() output
boolean isKeyReferenceDeleted = keysMemoryManager.isReferenceDeleted(expectedKeyReference);
boolean isKeyReferenceDeleted = config.keysMemoryManager.isReferenceDeleted(expectedKeyReference);
if (!isKeyReferenceDeleted) {
ctx.key.s.logicalDelete();
}

// Value's reference codec prepares the reference to be used after value is deleted
long newValueReference = valuesMemoryManager.alterReferenceForDelete(expectedValueReference);
long newValueReference = config.valuesMemoryManager.alterReferenceForDelete(expectedValueReference);
// Scenario:
// 1. The value's slice is marked as deleted off-heap and the thread that started
// deleteValueFinish falls asleep.
Expand All @@ -624,23 +617,23 @@ boolean deleteValueFinish(ThreadContext ctx) {
// This is ABA problem and resolved via always changing deleted variation of the reference
// Also value's off-heap slice is released to memory manager only after deleteValueFinish
// is done.
if (!valuesMemoryManager.isReferenceDeleted(expectedValueReference)) {
if (!config.valuesMemoryManager.isReferenceDeleted(expectedValueReference)) {
if (casEntryFieldLong(ctx.entryIndex, VALUE_REF_OFFSET, expectedValueReference,
newValueReference)) {
// the deletion of the value and its release should be successful only once and for one
// thread, therefore the reference and slice should be still valid here
assert valuesMemoryManager.isReferenceConsistent(getValueReference(ctx.entryIndex));
assert config.valuesMemoryManager.isReferenceConsistent(getValueReference(ctx.entryIndex));
assert ctx.value.isAssociated();
ctx.value.getSlice().release();
ctx.value.invalidate();
}
}
// mark key reference as deleted, if needed
if (!isKeyReferenceDeleted) {
long newKeyReference = keysMemoryManager.alterReferenceForDelete(expectedKeyReference);
long newKeyReference = config.keysMemoryManager.alterReferenceForDelete(expectedKeyReference);
if (casEntryFieldLong(ctx.entryIndex, KEY_REF_OFFSET, expectedKeyReference,
newKeyReference)) {
assert keysMemoryManager.isReferenceConsistent(getKeyReference(ctx.entryIndex));
assert config.keysMemoryManager.isReferenceConsistent(getKeyReference(ctx.entryIndex));
ctx.key.getSlice().release();
ctx.key.invalidate();
}
Expand Down
Loading