From 40e2d74775f53f798195570090afdf7cc95f1642 Mon Sep 17 00:00:00 2001 From: Ben Manes Date: Sat, 21 Dec 2024 12:38:46 -0800 Subject: [PATCH] remove sun.misc.unsafe leftover in the test and benchmark code - Removed the usage from a unit test that requires a predictable ThreadLocalRandom result. This now uses reflection and the runner must explicitly open the module for access. - Removed from a benchmark comparing table lookup mechanisms and the cache currently uses the VarHandles approach, - Removed JDK 7's ConcurrentHashMap as this was only useful for judging the performance benefit of the Java 8 rewrite when trying to set baseline expectations during the library's initial development. --- .github/workflows/analysis.yml | 2 +- caffeine/build.gradle.kts | 7 +- .../caffeine/SlotLookupBenchmark.java | 33 - .../benmanes/caffeine/cache/CacheType.java | 7 - .../cache/impl/ConcurrentHashMapV7.java | 1776 ----------------- .../caffeine/cache/BoundedLocalCacheTest.java | 4 + .../github/benmanes/caffeine/cache/Reset.java | 28 +- .../caffeine/cache/TimerWheelTest.java | 1 - .../testing/CaffeineCacheFromContext.java | 2 - gradle/config/spotbugs/exclude.xml | 10 +- .../java-library.caffeine.gradle.kts | 4 +- 11 files changed, 35 insertions(+), 1839 deletions(-) delete mode 100644 caffeine/src/jmh/java/com/github/benmanes/caffeine/cache/impl/ConcurrentHashMapV7.java diff --git a/.github/workflows/analysis.yml b/.github/workflows/analysis.yml index 4ca2fc52fe..2bb936fba5 100644 --- a/.github/workflows/analysis.yml +++ b/.github/workflows/analysis.yml @@ -23,7 +23,7 @@ jobs: forbiddenApis: runs-on: ubuntu-latest env: - JAVA_VERSION: 22 + JAVA_VERSION: 23 steps: - name: Harden Runner uses: step-security/harden-runner@0080882f6c36860b6ba35c610c98ce87d4e2f26f # v2.10.2 diff --git a/caffeine/build.gradle.kts b/caffeine/build.gradle.kts index f017d05c73..b64c0a1ce2 100644 --- a/caffeine/build.gradle.kts +++ b/caffeine/build.gradle.kts @@ -255,11 +255,11 @@ tasks.named("forbiddenApisJavaPoet").configure { } tasks.named("forbiddenApisTest").configure { - bundledSignatures.add("jdk-deprecated-18") + bundledSignatures.addAll(listOf("jdk-deprecated-18", "jdk-unsafe")) } tasks.named("forbiddenApisJmh").configure { - bundledSignatures.addAll(listOf("jdk-deprecated-18", "jdk-reflection")) + bundledSignatures.addAll(listOf("jdk-deprecated-18", "jdk-reflection", "jdk-unsafe")) } tasks.register("memoryOverhead") { @@ -314,7 +314,8 @@ for (scenario in Scenario.all()) { } else { parallel = "methods" excludeGroups("slow", "isolated", "lincheck") - jvmArgs("-XX:+UseG1GC", "-XX:+ParallelRefProcEnabled") + jvmArgs("-XX:+UseG1GC", "-XX:+ParallelRefProcEnabled", + "--add-opens", "java.base/java.lang=ALL-UNNAMED") threadCount = max(6, Runtime.getRuntime().availableProcessors() - 1) } } diff --git a/caffeine/src/jmh/java/com/github/benmanes/caffeine/SlotLookupBenchmark.java b/caffeine/src/jmh/java/com/github/benmanes/caffeine/SlotLookupBenchmark.java index 9313bbd6d6..700e76f52e 100644 --- a/caffeine/src/jmh/java/com/github/benmanes/caffeine/SlotLookupBenchmark.java +++ b/caffeine/src/jmh/java/com/github/benmanes/caffeine/SlotLookupBenchmark.java @@ -20,7 +20,6 @@ import java.util.concurrent.ThreadLocalRandom; import java.util.stream.LongStream; -import org.jctools.util.UnsafeAccess; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.Scope; import org.openjdk.jmh.annotations.Setup; @@ -55,7 +54,6 @@ public class SlotLookupBenchmark { static final VarHandle PROBE; ThreadLocal threadLocal; - long probeOffset; long[] array; @Setup @@ -75,11 +73,6 @@ public void setupBinarySearch() { array = LongStream.range(0, ARENA_SIZE).toArray(); } - @Setup - public void setupStriped64() { - probeOffset = UnsafeAccess.fieldOffset(Thread.class, "threadLocalRandomProbe"); - } - @Benchmark public int threadLocal() { // Emulates holding the arena slot in a thread-local @@ -113,32 +106,6 @@ private static int mix32(int x) { return (x >>> 16) ^ x; } - @Benchmark - public long striped64_unsafe(Blackhole blackhole) { - // Emulates finding the arena slot by reusing the thread-local random seed (j.u.c.a.Striped64) - int hash = getProbe_unsafe(); - if (hash == 0) { - blackhole.consume(ThreadLocalRandom.current()); // force initialization - hash = getProbe_unsafe(); - } - advanceProbe_unsafe(hash); - int index = selectSlot(hash); - return array[index]; - } - - @SuppressWarnings("SunApi") - private int getProbe_unsafe() { - return UnsafeAccess.UNSAFE.getInt(Thread.currentThread(), probeOffset); - } - - @SuppressWarnings("SunApi") - private void advanceProbe_unsafe(int probe) { - probe ^= probe << 13; // xorshift - probe ^= probe >>> 17; - probe ^= probe << 5; - UnsafeAccess.UNSAFE.putInt(Thread.currentThread(), probeOffset, probe); - } - @Benchmark public long striped64_varHandle(Blackhole blackhole) { // Emulates finding the arena slot by reusing the thread-local random seed (j.u.c.a.Striped64) diff --git a/caffeine/src/jmh/java/com/github/benmanes/caffeine/cache/CacheType.java b/caffeine/src/jmh/java/com/github/benmanes/caffeine/cache/CacheType.java index c58dec70c8..26c08e6e14 100644 --- a/caffeine/src/jmh/java/com/github/benmanes/caffeine/cache/CacheType.java +++ b/caffeine/src/jmh/java/com/github/benmanes/caffeine/cache/CacheType.java @@ -23,7 +23,6 @@ import com.github.benmanes.caffeine.cache.impl.Cache2k; import com.github.benmanes.caffeine.cache.impl.CaffeineCache; import com.github.benmanes.caffeine.cache.impl.CoherenceCache; -import com.github.benmanes.caffeine.cache.impl.ConcurrentHashMapV7; import com.github.benmanes.caffeine.cache.impl.ConcurrentMapCache; import com.github.benmanes.caffeine.cache.impl.Ehcache3; import com.github.benmanes.caffeine.cache.impl.GuavaCache; @@ -47,12 +46,6 @@ public enum CacheType { /* --------------- Unbounded --------------- */ - ConcurrentHashMapV7 { // see OpenJDK/7u40-b43 - @Override public BasicCache create(int maximumSize) { - return new ConcurrentMapCache<>( - new ConcurrentHashMapV7<>(maximumSize, 0.75f, CONCURRENCY_LEVEL)); - } - }, ConcurrentHashMap { @Override public BasicCache create(int maximumSize) { return new ConcurrentMapCache<>(new ConcurrentHashMap<>(maximumSize)); diff --git a/caffeine/src/jmh/java/com/github/benmanes/caffeine/cache/impl/ConcurrentHashMapV7.java b/caffeine/src/jmh/java/com/github/benmanes/caffeine/cache/impl/ConcurrentHashMapV7.java deleted file mode 100644 index 08dea3bd51..0000000000 --- a/caffeine/src/jmh/java/com/github/benmanes/caffeine/cache/impl/ConcurrentHashMapV7.java +++ /dev/null @@ -1,1776 +0,0 @@ -/* - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. Oracle designates this - * particular file as subject to the "Classpath" exception as provided - * by Oracle in the LICENSE file that accompanied this code. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -/* - * This file is available under and governed by the GNU General Public - * License version 2 only, as published by the Free Software Foundation. - * However, the following notice accompanied the original version of this - * file: - * - * Written by Doug Lea with assistance from members of JCP JSR-166 - * Expert Group and released to the public domain, as explained at - * http://creativecommons.org/publicdomain/zero/1.0/ - */ -package com.github.benmanes.caffeine.cache.impl; - -import static org.jctools.util.UnsafeAccess.UNSAFE; - -import java.io.IOException; -import java.io.ObjectInputStream; -import java.io.Serializable; -import java.util.AbstractCollection; -import java.util.AbstractMap; -import java.util.AbstractSet; -import java.util.Collection; -import java.util.ConcurrentModificationException; -import java.util.Enumeration; -import java.util.HashMap; -import java.util.Hashtable; -import java.util.Iterator; -import java.util.Map; -import java.util.NoSuchElementException; -import java.util.Set; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.ThreadLocalRandom; -import java.util.concurrent.locks.ReentrantLock; - -/** - * A hash table supporting full concurrency of retrievals and - * adjustable expected concurrency for updates. This class obeys the - * same functional specification as {@link java.util.Hashtable}, and - * includes versions of methods corresponding to each method of - * Hashtable. However, even though all operations are - * thread-safe, retrieval operations do not entail locking, - * and there is not any support for locking the entire table - * in a way that prevents all access. This class is fully - * interoperable with Hashtable in programs that rely on its - * thread safety but not on its synchronization details. - * - *

Retrieval operations (including get) generally do not - * block, so may overlap with update operations (including - * put and remove). Retrievals reflect the results - * of the most recently completed update operations holding - * upon their onset. For aggregate operations such as putAll - * and clear, concurrent retrievals may reflect insertion or - * removal of only some entries. Similarly, Iterators and - * Enumerations return elements reflecting the state of the hash table - * at some point at or since the creation of the iterator/enumeration. - * They do not throw {@link ConcurrentModificationException}. - * However, iterators are designed to be used by only one thread at a time. - * - *

The allowed concurrency among update operations is guided by - * the optional concurrencyLevel constructor argument - * (default 16), which is used as a hint for internal sizing. The - * table is internally partitioned to try to permit the indicated - * number of concurrent updates without contention. Because placement - * in hash tables is essentially random, the actual concurrency will - * vary. Ideally, you should choose a value to accommodate as many - * threads as will ever concurrently modify the table. Using a - * significantly higher value than you need can waste space and time, - * and a significantly lower value can lead to thread contention. But - * overestimates and underestimates within an order of magnitude do - * not usually have much noticeable impact. A value of one is - * appropriate when it is known that only one thread will modify and - * all others will only read. Also, resizing this or any other kind of - * hash table is a relatively slow operation, so, when possible, it is - * a good idea to provide estimates of expected table sizes in - * constructors. - * - *

This class and its views and iterators implement all of the - * optional methods of the {@link Map} and {@link Iterator} - * interfaces. - * - *

Like {@link Hashtable} but unlike {@link HashMap}, this class - * does not allow null to be used as a key or value. - * - *

This class is a member of the - * - * Java Collections Framework. - * - * @since 1.5 - * @author Doug Lea - * @param the type of keys maintained by this map - * @param the type of mapped values - */ -@SuppressWarnings({"all", "CheckReturnValue", "deprecation", "JdkObsolete", "LessThanOrEqualTo", - "rawtypes", "serial", "unchecked", "UnnecessaryParentheses", "UnusedNestedClass", - "UnusedVariable", "YodaCondition"}) -public class ConcurrentHashMapV7 extends AbstractMap - implements ConcurrentMap, Serializable { - private static final long serialVersionUID = 7249069246763182397L; - - /* - * The basic strategy is to subdivide the table among Segments, - * each of which itself is a concurrently readable hash table. To - * reduce footprint, all but one segments are constructed only - * when first needed (see ensureSegment). To maintain visibility - * in the presence of lazy construction, accesses to segments as - * well as elements of segment's table must use volatile access, - * which is done via Unsafe within methods segmentAt etc - * below. These provide the functionality of AtomicReferenceArrays - * but reduce the levels of indirection. Additionally, - * volatile-writes of table elements and entry "next" fields - * within locked operations use the cheaper "lazySet" forms of - * writes (via putOrderedObject) because these writes are always - * followed by lock releases that maintain sequential consistency - * of table updates. - * - * Historical note: The previous version of this class relied - * heavily on "final" fields, which avoided some volatile reads at - * the expense of a large initial footprint. Some remnants of - * that design (including forced construction of segment 0) exist - * to ensure serialization compatibility. - */ - - /* ---------------- Constants -------------- */ - - /** - * The default initial capacity for this table, - * used when not otherwise specified in a constructor. - */ - static final int DEFAULT_INITIAL_CAPACITY = 16; - - /** - * The default load factor for this table, used when not - * otherwise specified in a constructor. - */ - static final float DEFAULT_LOAD_FACTOR = 0.75f; - - /** - * The default concurrency level for this table, used when not - * otherwise specified in a constructor. - */ - static final int DEFAULT_CONCURRENCY_LEVEL = 16; - - /** - * The maximum capacity, used if a higher value is implicitly - * specified by either of the constructors with arguments. MUST - * be a power of two <= 1<<30 to ensure that entries are indexable - * using ints. - */ - static final int MAXIMUM_CAPACITY = 1 << 30; - - /** - * The minimum capacity for per-segment tables. Must be a power - * of two, at least two to avoid immediate resizing on next use - * after lazy construction. - */ - static final int MIN_SEGMENT_TABLE_CAPACITY = 2; - - /** - * The maximum number of segments to allow; used to bound - * constructor arguments. Must be power of two less than 1 << 24. - */ - static final int MAX_SEGMENTS = 1 << 16; // slightly conservative - - /** - * Number of unsynchronized retries in size and containsValue - * methods before resorting to locking. This is used to avoid - * unbounded retries if tables undergo continuous modification - * which would make it impossible to obtain an accurate result. - */ - static final int RETRIES_BEFORE_LOCK = 2; - - /* ---------------- Fields -------------- */ - - /** - * holds values which can't be initialized until after VM is booted. - */ - private static class Holder { - - /** - * Enable alternative hashing of String keys? - * - *

Unlike the other hash map implementations we do not implement a - * threshold for regulating whether alternative hashing is used for - * String keys. Alternative hashing is either enabled for all instances - * or disabled for all instances. - */ - static final boolean ALTERNATIVE_HASHING; - - static { - // Use the "threshold" system property even though our threshold - // behaviour is "ON" or "OFF". -// String altThreshold = java.security.AccessController.doPrivileged( -// new sun.security.action.GetPropertyAction( -// "jdk.map.althashing.threshold")); - - int threshold; - try { -// threshold = (null != altThreshold) -// ? Integer.parseInt(altThreshold) -// : Integer.MAX_VALUE; - threshold = Integer.MAX_VALUE; - - // disable alternative hashing if -1 - if (threshold == -1) { - threshold = Integer.MAX_VALUE; - } - - if (threshold < 0) { - throw new IllegalArgumentException("value must be positive integer."); - } - } catch(IllegalArgumentException failed) { - throw new Error("Illegal value for 'jdk.map.althashing.threshold'", failed); - } - ALTERNATIVE_HASHING = threshold <= MAXIMUM_CAPACITY; - } - } - - /** - * A randomizing value associated with this instance that is applied to - * hash code of keys to make hash collisions harder to find. - */ - private transient final int hashSeed = randomHashSeed(this); - - private static int randomHashSeed(ConcurrentHashMapV7 instance) { - return 0; - } - - /** - * Return a non-zero 32-bit pseudo random value. The {@code instance} object - * may be used as part of the value. - * - * @param instance an object to use if desired in choosing value. - * @return a non-zero 32-bit pseudo random value. - */ - public static int randomHashSeed(Object instance) { - int seed = ThreadLocalRandom.current().nextInt(); - - // force to non-zero. - return (0 != seed) ? seed : 1; - } - - /** - * Mask value for indexing into segments. The upper bits of a - * key's hash code are used to choose the segment. - */ - final int segmentMask; - - /** - * Shift value for indexing within segments. - */ - final int segmentShift; - - /** - * The segments, each of which is a specialized hash table. - */ - final Segment[] segments; - - transient Set keySet; - transient Set> entrySet; - transient Collection values; - - /** - * ConcurrentHashMap list entry. Note that this is never exported - * out as a user-visible Map.Entry. - */ - static final class HashEntry { - final int hash; - final K key; - volatile V value; - volatile HashEntry next; - - HashEntry(int hash, K key, V value, HashEntry next) { - this.hash = hash; - this.key = key; - this.value = value; - this.next = next; - } - - /** - * Sets next field with volatile write semantics. (See above - * about use of putOrderedObject.) - */ - final void setNext(HashEntry n) { - UNSAFE.putOrderedObject(this, nextOffset, n); - } - - // Unsafe mechanics - static final long nextOffset; - static { - try { - Class k = HashEntry.class; - nextOffset = UNSAFE.objectFieldOffset - (k.getDeclaredField("next")); - } catch (Exception e) { - throw new Error(e); - } - } - } - - /** - * Gets the ith element of given table (if nonnull) with volatile - * read semantics. Note: This is manually integrated into a few - * performance-sensitive methods to reduce call overhead. - */ - @SuppressWarnings("unchecked") - static final HashEntry entryAt(HashEntry[] tab, int i) { - return (tab == null) ? null : - (HashEntry) UNSAFE.getObjectVolatile - (tab, ((long)i << TSHIFT) + TBASE); - } - - /** - * Sets the ith element of given table, with volatile write - * semantics. (See above about use of putOrderedObject.) - */ - static final void setEntryAt(HashEntry[] tab, int i, - HashEntry e) { - UNSAFE.putOrderedObject(tab, ((long)i << TSHIFT) + TBASE, e); - } - - /** - * Applies a supplemental hash function to a given hashCode, which - * defends against poor quality hash functions. This is critical - * because ConcurrentHashMap uses power-of-two length hash tables, - * that otherwise encounter collisions for hashCodes that do not - * differ in lower or upper bits. - */ - private int hash(Object k) { - int h = hashSeed; - -// if ((0 != h) && (k instanceof String)) { -// return sun.misc.Hashing.stringHash32((String) k); -// } - - h ^= k.hashCode(); - - // Spread bits to regularize both segment and index locations, - // using variant of single-word Wang/Jenkins hash. - h += (h << 15) ^ 0xffffcd7d; - h ^= (h >>> 10); - h += (h << 3); - h ^= (h >>> 6); - h += (h << 2) + (h << 14); - return h ^ (h >>> 16); - } - - /** - * Segments are specialized versions of hash tables. This - * subclasses from ReentrantLock opportunistically, just to - * simplify some locking and avoid separate construction. - */ - static final class Segment extends ReentrantLock implements Serializable { - /* - * Segments maintain a table of entry lists that are always - * kept in a consistent state, so can be read (via volatile - * reads of segments and tables) without locking. This - * requires replicating nodes when necessary during table - * resizing, so the old lists can be traversed by readers - * still using old version of table. - * - * This class defines only mutative methods requiring locking. - * Except as noted, the methods of this class perform the - * per-segment versions of ConcurrentHashMap methods. (Other - * methods are integrated directly into ConcurrentHashMap - * methods.) These mutative methods use a form of controlled - * spinning on contention via methods scanAndLock and - * scanAndLockForPut. These intersperse tryLocks with - * traversals to locate nodes. The main benefit is to absorb - * cache misses (which are very common for hash tables) while - * obtaining locks so that traversal is faster once - * acquired. We do not actually use the found nodes since they - * must be re-acquired under lock anyway to ensure sequential - * consistency of updates (and in any case may be undetectably - * stale), but they will normally be much faster to re-locate. - * Also, scanAndLockForPut speculatively creates a fresh node - * to use in put if no node is found. - */ - - private static final long serialVersionUID = 2249069246763182397L; - - /** - * The maximum number of times to tryLock in a prescan before - * possibly blocking on acquire in preparation for a locked - * segment operation. On multiprocessors, using a bounded - * number of retries maintains cache acquired while locating - * nodes. - */ - static final int MAX_SCAN_RETRIES = - Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1; - - /** - * The per-segment table. Elements are accessed via - * entryAt/setEntryAt providing volatile semantics. - */ - transient volatile HashEntry[] table; - - /** - * The number of elements. Accessed only either within locks - * or among other volatile reads that maintain visibility. - */ - transient int count; - - /** - * The total number of mutative operations in this segment. - * Even though this may overflows 32 bits, it provides - * sufficient accuracy for stability checks in CHM isEmpty() - * and size() methods. Accessed only either within locks or - * among other volatile reads that maintain visibility. - */ - transient int modCount; - - /** - * The table is rehashed when its size exceeds this threshold. - * (The value of this field is always (int)(capacity * - * loadFactor).) - */ - transient int threshold; - - /** - * The load factor for the hash table. Even though this value - * is same for all segments, it is replicated to avoid needing - * links to outer object. - * @serial - */ - final float loadFactor; - - Segment(float lf, int threshold, HashEntry[] tab) { - this.loadFactor = lf; - this.threshold = threshold; - this.table = tab; - } - - final V put(K key, int hash, V value, boolean onlyIfAbsent) { - HashEntry node = tryLock() ? null : - scanAndLockForPut(key, hash, value); - V oldValue; - try { - HashEntry[] tab = table; - int index = (tab.length - 1) & hash; - HashEntry first = entryAt(tab, index); - for (HashEntry e = first;;) { - if (e != null) { - K k; - if ((k = e.key) == key || - (e.hash == hash && key.equals(k))) { - oldValue = e.value; - if (!onlyIfAbsent) { - e.value = value; - ++modCount; - } - break; - } - e = e.next; - } - else { - if (node != null) { - node.setNext(first); - } else { - node = new HashEntry(hash, key, value, first); - } - int c = count + 1; - if (c > threshold && tab.length < MAXIMUM_CAPACITY) { - rehash(node); - } else { - setEntryAt(tab, index, node); - } - ++modCount; - count = c; - oldValue = null; - break; - } - } - } finally { - unlock(); - } - return oldValue; - } - - /** - * Doubles size of table and repacks entries, also adding the - * given node to new table - */ - @SuppressWarnings("unchecked") - private void rehash(HashEntry node) { - /* - * Reclassify nodes in each list to new table. Because we - * are using power-of-two expansion, the elements from - * each bin must either stay at same index, or move with a - * power of two offset. We eliminate unnecessary node - * creation by catching cases where old nodes can be - * reused because their next fields won't change. - * Statistically, at the default threshold, only about - * one-sixth of them need cloning when a table - * doubles. The nodes they replace will be garbage - * collectable as soon as they are no longer referenced by - * any reader thread that may be in the midst of - * concurrently traversing table. Entry accesses use plain - * array indexing because they are followed by volatile - * table write. - */ - HashEntry[] oldTable = table; - int oldCapacity = oldTable.length; - int newCapacity = oldCapacity << 1; - threshold = (int)(newCapacity * loadFactor); - HashEntry[] newTable = - new HashEntry[newCapacity]; - int sizeMask = newCapacity - 1; - for (int i = 0; i < oldCapacity ; i++) { - HashEntry e = oldTable[i]; - if (e != null) { - HashEntry next = e.next; - int idx = e.hash & sizeMask; - if (next == null) { - newTable[idx] = e; - } else { // Reuse consecutive sequence at same slot - HashEntry lastRun = e; - int lastIdx = idx; - for (HashEntry last = next; - last != null; - last = last.next) { - int k = last.hash & sizeMask; - if (k != lastIdx) { - lastIdx = k; - lastRun = last; - } - } - newTable[lastIdx] = lastRun; - // Clone remaining nodes - for (HashEntry p = e; p != lastRun; p = p.next) { - V v = p.value; - int h = p.hash; - int k = h & sizeMask; - HashEntry n = newTable[k]; - newTable[k] = new HashEntry(h, p.key, v, n); - } - } - } - } - int nodeIndex = node.hash & sizeMask; // add the new node - node.setNext(newTable[nodeIndex]); - newTable[nodeIndex] = node; - table = newTable; - } - - /** - * Scans for a node containing given key while trying to - * acquire lock, creating and returning one if not found. Upon - * return, guarantees that lock is held. UNlike in most - * methods, calls to method equals are not screened: Since - * traversal speed doesn't matter, we might as well help warm - * up the associated code and accesses as well. - * - * @return a new node if key not found, else null - */ - private HashEntry scanAndLockForPut(K key, int hash, V value) { - HashEntry first = entryForHash(this, hash); - HashEntry e = first; - HashEntry node = null; - int retries = -1; // negative while locating node - while (!tryLock()) { - HashEntry f; // to recheck first below - if (retries < 0) { - if (e == null) { - if (node == null) { - node = new HashEntry(hash, key, value, null); - } - retries = 0; - } - else if (key.equals(e.key)) { - retries = 0; - } else { - e = e.next; - } - } - else if (++retries > MAX_SCAN_RETRIES) { - lock(); - break; - } - else if ((retries & 1) == 0 && - (f = entryForHash(this, hash)) != first) { - e = first = f; // re-traverse if entry changed - retries = -1; - } - } - return node; - } - - /** - * Scans for a node containing the given key while trying to - * acquire lock for a remove or replace operation. Upon - * return, guarantees that lock is held. Note that we must - * lock even if the key is not found, to ensure sequential - * consistency of updates. - */ - private void scanAndLock(Object key, int hash) { - // similar to but simpler than scanAndLockForPut - HashEntry first = entryForHash(this, hash); - HashEntry e = first; - int retries = -1; - while (!tryLock()) { - HashEntry f; - if (retries < 0) { - if (e == null || key.equals(e.key)) { - retries = 0; - } else { - e = e.next; - } - } - else if (++retries > MAX_SCAN_RETRIES) { - lock(); - break; - } - else if ((retries & 1) == 0 && - (f = entryForHash(this, hash)) != first) { - e = first = f; - retries = -1; - } - } - } - - /** - * Remove; match on key only if value null, else match both. - */ - final V remove(Object key, int hash, Object value) { - if (!tryLock()) { - scanAndLock(key, hash); - } - V oldValue = null; - try { - HashEntry[] tab = table; - int index = (tab.length - 1) & hash; - HashEntry e = entryAt(tab, index); - HashEntry pred = null; - while (e != null) { - K k; - HashEntry next = e.next; - if ((k = e.key) == key || - (e.hash == hash && key.equals(k))) { - V v = e.value; - if (value == null || value == v || value.equals(v)) { - if (pred == null) { - setEntryAt(tab, index, next); - } else { - pred.setNext(next); - } - ++modCount; - --count; - oldValue = v; - } - break; - } - pred = e; - e = next; - } - } finally { - unlock(); - } - return oldValue; - } - - final boolean replace(K key, int hash, V oldValue, V newValue) { - if (!tryLock()) { - scanAndLock(key, hash); - } - boolean replaced = false; - try { - HashEntry e; - for (e = entryForHash(this, hash); e != null; e = e.next) { - K k; - if ((k = e.key) == key || - (e.hash == hash && key.equals(k))) { - if (oldValue.equals(e.value)) { - e.value = newValue; - ++modCount; - replaced = true; - } - break; - } - } - } finally { - unlock(); - } - return replaced; - } - - final V replace(K key, int hash, V value) { - if (!tryLock()) { - scanAndLock(key, hash); - } - V oldValue = null; - try { - HashEntry e; - for (e = entryForHash(this, hash); e != null; e = e.next) { - K k; - if ((k = e.key) == key || - (e.hash == hash && key.equals(k))) { - oldValue = e.value; - e.value = value; - ++modCount; - break; - } - } - } finally { - unlock(); - } - return oldValue; - } - - final void clear() { - lock(); - try { - HashEntry[] tab = table; - for (int i = 0; i < tab.length ; i++) { - setEntryAt(tab, i, null); - } - ++modCount; - count = 0; - } finally { - unlock(); - } - } - } - - // Accessing segments - - /** - * Gets the jth element of given segment array (if nonnull) with - * volatile element access semantics via Unsafe. (The null check - * can trigger harmlessly only during deserialization.) Note: - * because each element of segments array is set only once (using - * fully ordered writes), some performance-sensitive methods rely - * on this method only as a recheck upon null reads. - */ - @SuppressWarnings("unchecked") - static final Segment segmentAt(Segment[] ss, int j) { - long u = (j << SSHIFT) + SBASE; - return ss == null ? null : - (Segment) UNSAFE.getObjectVolatile(ss, u); - } - - /** - * Returns the segment for the given index, creating it and - * recording in segment table (via CAS) if not already present. - * - * @param k the index - * @return the segment - */ - @SuppressWarnings("unchecked") - private Segment ensureSegment(int k) { - final Segment[] ss = this.segments; - long u = (k << SSHIFT) + SBASE; // raw offset - Segment seg; - if ((seg = (Segment)UNSAFE.getObjectVolatile(ss, u)) == null) { - Segment proto = ss[0]; // use segment 0 as prototype - int cap = proto.table.length; - float lf = proto.loadFactor; - int threshold = (int)(cap * lf); - HashEntry[] tab = new HashEntry[cap]; - if ((seg = (Segment)UNSAFE.getObjectVolatile(ss, u)) - == null) { // recheck - Segment s = new Segment(lf, threshold, tab); - while ((seg = (Segment)UNSAFE.getObjectVolatile(ss, u)) - == null) { - if (UNSAFE.compareAndSwapObject(ss, u, null, seg = s)) { - break; - } - } - } - } - return seg; - } - - // Hash-based segment and entry accesses - - /** - * Get the segment for the given hash - */ - @SuppressWarnings("unchecked") - private Segment segmentForHash(int h) { - long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE; - return (Segment) UNSAFE.getObjectVolatile(segments, u); - } - - /** - * Gets the table entry for the given segment and hash - */ - @SuppressWarnings("unchecked") - static final HashEntry entryForHash(Segment seg, int h) { - HashEntry[] tab; - return (seg == null || (tab = seg.table) == null) ? null : - (HashEntry) UNSAFE.getObjectVolatile - (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE); - } - - /* ---------------- Public operations -------------- */ - - /** - * Creates a new, empty map with the specified initial - * capacity, load factor and concurrency level. - * - * @param initialCapacity the initial capacity. The implementation - * performs internal sizing to accommodate this many elements. - * @param loadFactor the load factor threshold, used to control resizing. - * Resizing may be performed when the average number of elements per - * bin exceeds this threshold. - * @param concurrencyLevel the estimated number of concurrently - * updating threads. The implementation performs internal sizing - * to try to accommodate this many threads. - * @throws IllegalArgumentException if the initial capacity is - * negative or the load factor or concurrencyLevel are - * nonpositive. - */ - @SuppressWarnings("unchecked") - public ConcurrentHashMapV7(int initialCapacity, - float loadFactor, int concurrencyLevel) { - if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0) { - throw new IllegalArgumentException(); - } - if (concurrencyLevel > MAX_SEGMENTS) { - concurrencyLevel = MAX_SEGMENTS; - } - // Find power-of-two sizes best matching arguments - int sshift = 0; - int ssize = 1; - while (ssize < concurrencyLevel) { - ++sshift; - ssize <<= 1; - } - this.segmentShift = 32 - sshift; - this.segmentMask = ssize - 1; - if (initialCapacity > MAXIMUM_CAPACITY) { - initialCapacity = MAXIMUM_CAPACITY; - } - int c = initialCapacity / ssize; - if (c * ssize < initialCapacity) { - ++c; - } - int cap = MIN_SEGMENT_TABLE_CAPACITY; - while (cap < c) { - cap <<= 1; - } - // create segments and segments[0] - Segment s0 = - new Segment(loadFactor, (int)(cap * loadFactor), - new HashEntry[cap]); - Segment[] ss = new Segment[ssize]; - UNSAFE.putOrderedObject(ss, SBASE, s0); // ordered write of segments[0] - this.segments = ss; - } - - /** - * Creates a new, empty map with the specified initial capacity - * and load factor and with the default concurrencyLevel (16). - * - * @param initialCapacity The implementation performs internal - * sizing to accommodate this many elements. - * @param loadFactor the load factor threshold, used to control resizing. - * Resizing may be performed when the average number of elements per - * bin exceeds this threshold. - * @throws IllegalArgumentException if the initial capacity of - * elements is negative or the load factor is nonpositive - * - * @since 1.6 - */ - public ConcurrentHashMapV7(int initialCapacity, float loadFactor) { - this(initialCapacity, loadFactor, DEFAULT_CONCURRENCY_LEVEL); - } - - /** - * Creates a new, empty map with the specified initial capacity, - * and with default load factor (0.75) and concurrencyLevel (16). - * - * @param initialCapacity the initial capacity. The implementation - * performs internal sizing to accommodate this many elements. - * @throws IllegalArgumentException if the initial capacity of - * elements is negative. - */ - public ConcurrentHashMapV7(int initialCapacity) { - this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL); - } - - /** - * Creates a new, empty map with a default initial capacity (16), - * load factor (0.75) and concurrencyLevel (16). - */ - public ConcurrentHashMapV7() { - this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL); - } - - /** - * Creates a new map with the same mappings as the given map. - * The map is created with a capacity of 1.5 times the number - * of mappings in the given map or 16 (whichever is greater), - * and a default load factor (0.75) and concurrencyLevel (16). - * - * @param m the map - */ - @SuppressWarnings("this-escape") - public ConcurrentHashMapV7(Map m) { - this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1, - DEFAULT_INITIAL_CAPACITY), - DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL); - putAll(m); - } - - /** - * Returns true if this map contains no key-value mappings. - * - * @return true if this map contains no key-value mappings - */ - @Override - public boolean isEmpty() { - /* - * Sum per-segment modCounts to avoid mis-reporting when - * elements are concurrently added and removed in one segment - * while checking another, in which case the table was never - * actually empty at any point. (The sum ensures accuracy up - * through at least 1<<31 per-segment modifications before - * recheck.) Methods size() and containsValue() use similar - * constructions for stability checks. - */ - long sum = 0L; - final Segment[] segments = this.segments; - for (int j = 0; j < segments.length; ++j) { - Segment seg = segmentAt(segments, j); - if (seg != null) { - if (seg.count != 0) { - return false; - } - sum += seg.modCount; - } - } - if (sum != 0L) { // recheck unless no modifications - for (int j = 0; j < segments.length; ++j) { - Segment seg = segmentAt(segments, j); - if (seg != null) { - if (seg.count != 0) { - return false; - } - sum -= seg.modCount; - } - } - if (sum != 0L) { - return false; - } - } - return true; - } - - /** - * Returns the number of key-value mappings in this map. If the - * map contains more than Integer.MAX_VALUE elements, returns - * Integer.MAX_VALUE. - * - * @return the number of key-value mappings in this map - */ - @Override - public int size() { - // Try a few times to get accurate count. On failure due to - // continuous async changes in table, resort to locking. - final Segment[] segments = this.segments; - int size; - boolean overflow; // true if size overflows 32 bits - long sum; // sum of modCounts - long last = 0L; // previous sum - int retries = -1; // first iteration isn't retry - try { - for (;;) { - if (retries++ == RETRIES_BEFORE_LOCK) { - for (int j = 0; j < segments.length; ++j) - { - ensureSegment(j).lock(); // force creation - } - } - sum = 0L; - size = 0; - overflow = false; - for (int j = 0; j < segments.length; ++j) { - Segment seg = segmentAt(segments, j); - if (seg != null) { - sum += seg.modCount; - int c = seg.count; - if (c < 0 || (size += c) < 0) { - overflow = true; - } - } - } - if (sum == last) { - break; - } - last = sum; - } - } finally { - if (retries > RETRIES_BEFORE_LOCK) { - for (int j = 0; j < segments.length; ++j) { - segmentAt(segments, j).unlock(); - } - } - } - return overflow ? Integer.MAX_VALUE : size; - } - - /** - * Returns the value to which the specified key is mapped, - * or {@code null} if this map contains no mapping for the key. - * - *

More formally, if this map contains a mapping from a key - * {@code k} to a value {@code v} such that {@code key.equals(k)}, - * then this method returns {@code v}; otherwise it returns - * {@code null}. (There can be at most one such mapping.) - * - * @throws NullPointerException if the specified key is null - */ - @Override - public V get(Object key) { - Segment s; // manually integrate access methods to reduce overhead - HashEntry[] tab; - int h = hash(key); - long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE; - if ((s = (Segment)UNSAFE.getObjectVolatile(segments, u)) != null && - (tab = s.table) != null) { - for (HashEntry e = (HashEntry) UNSAFE.getObjectVolatile - (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE); - e != null; e = e.next) { - K k; - if ((k = e.key) == key || (e.hash == h && key.equals(k))) { - return e.value; - } - } - } - return null; - } - - /** - * Tests if the specified object is a key in this table. - * - * @param key possible key - * @return true if and only if the specified object - * is a key in this table, as determined by the - * equals method; false otherwise. - * @throws NullPointerException if the specified key is null - */ - @Override - @SuppressWarnings("unchecked") - public boolean containsKey(Object key) { - Segment s; // same as get() except no need for volatile value read - HashEntry[] tab; - int h = hash(key); - long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE; - if ((s = (Segment)UNSAFE.getObjectVolatile(segments, u)) != null && - (tab = s.table) != null) { - for (HashEntry e = (HashEntry) UNSAFE.getObjectVolatile - (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE); - e != null; e = e.next) { - K k; - if ((k = e.key) == key || (e.hash == h && key.equals(k))) { - return true; - } - } - } - return false; - } - - /** - * Returns true if this map maps one or more keys to the - * specified value. Note: This method requires a full internal - * traversal of the hash table, and so is much slower than - * method containsKey. - * - * @param value value whose presence in this map is to be tested - * @return true if this map maps one or more keys to the - * specified value - * @throws NullPointerException if the specified value is null - */ - @Override - public boolean containsValue(Object value) { - // Same idea as size() - if (value == null) { - throw new NullPointerException(); - } - final Segment[] segments = this.segments; - boolean found = false; - long last = 0; - int retries = -1; - try { - outer: for (;;) { - if (retries++ == RETRIES_BEFORE_LOCK) { - for (int j = 0; j < segments.length; ++j) - { - ensureSegment(j).lock(); // force creation - } - } - long hashSum = 0L; - int sum = 0; - for (int j = 0; j < segments.length; ++j) { - HashEntry[] tab; - Segment seg = segmentAt(segments, j); - if (seg != null && (tab = seg.table) != null) { - for (int i = 0 ; i < tab.length; i++) { - HashEntry e; - for (e = entryAt(tab, i); e != null; e = e.next) { - V v = e.value; - if (v != null && value.equals(v)) { - found = true; - break outer; - } - } - } - sum += seg.modCount; - } - } - if (retries > 0 && sum == last) { - break; - } - last = sum; - } - } finally { - if (retries > RETRIES_BEFORE_LOCK) { - for (int j = 0; j < segments.length; ++j) { - segmentAt(segments, j).unlock(); - } - } - } - return found; - } - - /** - * Legacy method testing if some key maps into the specified value - * in this table. This method is identical in functionality to - * {@link #containsValue}, and exists solely to ensure - * full compatibility with class {@link java.util.Hashtable}, - * which supported this method prior to introduction of the - * Java Collections framework. - - * @param value a value to search for - * @return true if and only if some key maps to the - * value argument in this table as - * determined by the equals method; - * false otherwise - * @throws NullPointerException if the specified value is null - */ - public boolean contains(Object value) { - return containsValue(value); - } - - /** - * Maps the specified key to the specified value in this table. - * Neither the key nor the value can be null. - * - *

The value can be retrieved by calling the get method - * with a key that is equal to the original key. - * - * @param key key with which the specified value is to be associated - * @param value value to be associated with the specified key - * @return the previous value associated with key, or - * null if there was no mapping for key - * @throws NullPointerException if the specified key or value is null - */ - @Override - @SuppressWarnings("unchecked") - public V put(K key, V value) { - Segment s; - if (value == null) { - throw new NullPointerException(); - } - int hash = hash(key); - int j = (hash >>> segmentShift) & segmentMask; - if ((s = (Segment)UNSAFE.getObject // nonvolatile; recheck - (segments, (j << SSHIFT) + SBASE)) == null) { - s = ensureSegment(j); - } - return s.put(key, hash, value, false); - } - - /** - * {@inheritDoc} - * - * @return the previous value associated with the specified key, - * or null if there was no mapping for the key - * @throws NullPointerException if the specified key or value is null - */ - @Override - @SuppressWarnings("unchecked") - public V putIfAbsent(K key, V value) { - Segment s; - if (value == null) { - throw new NullPointerException(); - } - int hash = hash(key); - int j = (hash >>> segmentShift) & segmentMask; - if ((s = (Segment)UNSAFE.getObject - (segments, (j << SSHIFT) + SBASE)) == null) { - s = ensureSegment(j); - } - return s.put(key, hash, value, true); - } - - /** - * Copies all of the mappings from the specified map to this one. - * These mappings replace any mappings that this map had for any of the - * keys currently in the specified map. - * - * @param m mappings to be stored in this map - */ - @Override - public void putAll(Map m) { - for (Map.Entry e : m.entrySet()) { - put(e.getKey(), e.getValue()); - } - } - - /** - * Removes the key (and its corresponding value) from this map. - * This method does nothing if the key is not in the map. - * - * @param key the key that needs to be removed - * @return the previous value associated with key, or - * null if there was no mapping for key - * @throws NullPointerException if the specified key is null - */ - @Override - public V remove(Object key) { - int hash = hash(key); - Segment s = segmentForHash(hash); - return s == null ? null : s.remove(key, hash, null); - } - - /** - * {@inheritDoc} - * - * @throws NullPointerException if the specified key is null - */ - @Override - public boolean remove(Object key, Object value) { - int hash = hash(key); - Segment s; - return value != null && (s = segmentForHash(hash)) != null && - s.remove(key, hash, value) != null; - } - - /** - * {@inheritDoc} - * - * @throws NullPointerException if any of the arguments are null - */ - @Override - public boolean replace(K key, V oldValue, V newValue) { - int hash = hash(key); - if (oldValue == null || newValue == null) { - throw new NullPointerException(); - } - Segment s = segmentForHash(hash); - return s != null && s.replace(key, hash, oldValue, newValue); - } - - /** - * {@inheritDoc} - * - * @return the previous value associated with the specified key, - * or null if there was no mapping for the key - * @throws NullPointerException if the specified key or value is null - */ - @Override - public V replace(K key, V value) { - int hash = hash(key); - if (value == null) { - throw new NullPointerException(); - } - Segment s = segmentForHash(hash); - return s == null ? null : s.replace(key, hash, value); - } - - /** - * Removes all of the mappings from this map. - */ - @Override - public void clear() { - final Segment[] segments = this.segments; - for (int j = 0; j < segments.length; ++j) { - Segment s = segmentAt(segments, j); - if (s != null) { - s.clear(); - } - } - } - - /** - * Returns a {@link Set} view of the keys contained in this map. - * The set is backed by the map, so changes to the map are - * reflected in the set, and vice-versa. The set supports element - * removal, which removes the corresponding mapping from this map, - * via the Iterator.remove, Set.remove, - * removeAll, retainAll, and clear - * operations. It does not support the add or - * addAll operations. - * - *

The view's iterator is a "weakly consistent" iterator - * that will never throw {@link ConcurrentModificationException}, - * and guarantees to traverse elements as they existed upon - * construction of the iterator, and may (but is not guaranteed to) - * reflect any modifications subsequent to construction. - */ - @Override - public Set keySet() { - Set ks = keySet; - return (ks != null) ? ks : (keySet = new KeySet()); - } - - /** - * Returns a {@link Collection} view of the values contained in this map. - * The collection is backed by the map, so changes to the map are - * reflected in the collection, and vice-versa. The collection - * supports element removal, which removes the corresponding - * mapping from this map, via the Iterator.remove, - * Collection.remove, removeAll, - * retainAll, and clear operations. It does not - * support the add or addAll operations. - * - *

The view's iterator is a "weakly consistent" iterator - * that will never throw {@link ConcurrentModificationException}, - * and guarantees to traverse elements as they existed upon - * construction of the iterator, and may (but is not guaranteed to) - * reflect any modifications subsequent to construction. - */ - @Override - public Collection values() { - Collection vs = values; - return (vs != null) ? vs : (values = new Values()); - } - - /** - * Returns a {@link Set} view of the mappings contained in this map. - * The set is backed by the map, so changes to the map are - * reflected in the set, and vice-versa. The set supports element - * removal, which removes the corresponding mapping from the map, - * via the Iterator.remove, Set.remove, - * removeAll, retainAll, and clear - * operations. It does not support the add or - * addAll operations. - * - *

The view's iterator is a "weakly consistent" iterator - * that will never throw {@link ConcurrentModificationException}, - * and guarantees to traverse elements as they existed upon - * construction of the iterator, and may (but is not guaranteed to) - * reflect any modifications subsequent to construction. - */ - @Override - public Set> entrySet() { - Set> es = entrySet; - return (es != null) ? es : (entrySet = new EntrySet()); - } - - /** - * Returns an enumeration of the keys in this table. - * - * @return an enumeration of the keys in this table - * @see #keySet() - */ - public Enumeration keys() { - return new KeyIterator(); - } - - /** - * Returns an enumeration of the values in this table. - * - * @return an enumeration of the values in this table - * @see #values() - */ - public Enumeration elements() { - return new ValueIterator(); - } - - /* ---------------- Iterator Support -------------- */ - - abstract class HashIterator { - int nextSegmentIndex; - int nextTableIndex; - HashEntry[] currentTable; - HashEntry nextEntry; - HashEntry lastReturned; - - HashIterator() { - nextSegmentIndex = segments.length - 1; - nextTableIndex = -1; - advance(); - } - - /** - * Set nextEntry to first node of next non-empty table - * (in backwards order, to simplify checks). - */ - final void advance() { - for (;;) { - if (nextTableIndex >= 0) { - if ((nextEntry = entryAt(currentTable, - nextTableIndex--)) != null) { - break; - } - } - else if (nextSegmentIndex >= 0) { - Segment seg = segmentAt(segments, nextSegmentIndex--); - if (seg != null && (currentTable = seg.table) != null) { - nextTableIndex = currentTable.length - 1; - } - } else { - break; - } - } - } - - final HashEntry nextEntry() { - HashEntry e = nextEntry; - if (e == null) { - throw new NoSuchElementException(); - } - lastReturned = e; // cannot assign until after null check - if ((nextEntry = e.next) == null) { - advance(); - } - return e; - } - - public final boolean hasNext() { return nextEntry != null; } - public final boolean hasMoreElements() { return nextEntry != null; } - - public final void remove() { - if (lastReturned == null) { - throw new IllegalStateException(); - } - ConcurrentHashMapV7.this.remove(lastReturned.key); - lastReturned = null; - } - } - - final class KeyIterator - extends HashIterator - implements Iterator, Enumeration - { - @Override - public final K next() { return super.nextEntry().key; } - @Override - public final K nextElement() { return super.nextEntry().key; } - } - - final class ValueIterator - extends HashIterator - implements Iterator, Enumeration - { - @Override - public final V next() { return super.nextEntry().value; } - @Override - public final V nextElement() { return super.nextEntry().value; } - } - - /** - * Custom Entry class used by EntryIterator.next(), that relays - * setValue changes to the underlying map. - */ - final class WriteThroughEntry - extends AbstractMap.SimpleEntry - { - WriteThroughEntry(K k, V v) { - super(k,v); - } - - /** - * Set our entry's value and write through to the map. The - * value to return is somewhat arbitrary here. Since a - * WriteThroughEntry does not necessarily track asynchronous - * changes, the most recent "previous" value could be - * different from what we return (or could even have been - * removed in which case the put will re-establish). We do not - * and cannot guarantee more. - */ - @Override - public V setValue(V value) { - if (value == null) { - throw new NullPointerException(); - } - V v = super.setValue(value); - ConcurrentHashMapV7.this.put(getKey(), value); - return v; - } - } - - final class EntryIterator - extends HashIterator - implements Iterator> - { - @Override - public Map.Entry next() { - HashEntry e = super.nextEntry(); - return new WriteThroughEntry(e.key, e.value); - } - } - - final class KeySet extends AbstractSet { - @Override - public Iterator iterator() { - return new KeyIterator(); - } - @Override - public int size() { - return ConcurrentHashMapV7.this.size(); - } - @Override - public boolean isEmpty() { - return ConcurrentHashMapV7.this.isEmpty(); - } - @Override - public boolean contains(Object o) { - return ConcurrentHashMapV7.this.containsKey(o); - } - @Override - public boolean remove(Object o) { - return ConcurrentHashMapV7.this.remove(o) != null; - } - @Override - public void clear() { - ConcurrentHashMapV7.this.clear(); - } - } - - final class Values extends AbstractCollection { - @Override - public Iterator iterator() { - return new ValueIterator(); - } - @Override - public int size() { - return ConcurrentHashMapV7.this.size(); - } - @Override - public boolean isEmpty() { - return ConcurrentHashMapV7.this.isEmpty(); - } - @Override - public boolean contains(Object o) { - return ConcurrentHashMapV7.this.containsValue(o); - } - @Override - public void clear() { - ConcurrentHashMapV7.this.clear(); - } - } - - final class EntrySet extends AbstractSet> { - @Override - public Iterator> iterator() { - return new EntryIterator(); - } - @Override - public boolean contains(Object o) { - if (!(o instanceof Map.Entry)) { - return false; - } - Map.Entry e = (Map.Entry)o; - V v = ConcurrentHashMapV7.this.get(e.getKey()); - return v != null && v.equals(e.getValue()); - } - @Override - public boolean remove(Object o) { - if (!(o instanceof Map.Entry)) { - return false; - } - Map.Entry e = (Map.Entry)o; - return ConcurrentHashMapV7.this.remove(e.getKey(), e.getValue()); - } - @Override - public int size() { - return ConcurrentHashMapV7.this.size(); - } - @Override - public boolean isEmpty() { - return ConcurrentHashMapV7.this.isEmpty(); - } - @Override - public void clear() { - ConcurrentHashMapV7.this.clear(); - } - } - - /* ---------------- Serialization Support -------------- */ - - /** - * Save the state of the ConcurrentHashMap instance to a - * stream (i.e., serialize it). - * @param s the stream - * @serialData - * the key (Object) and value (Object) - * for each key-value mapping, followed by a null pair. - * The key-value mappings are emitted in no particular order. - */ - private void writeObject(java.io.ObjectOutputStream s) throws IOException { - // force all segments for serialization compatibility - for (int k = 0; k < segments.length; ++k) { - ensureSegment(k); - } - s.defaultWriteObject(); - - final Segment[] segments = this.segments; - for (int k = 0; k < segments.length; ++k) { - Segment seg = segmentAt(segments, k); - seg.lock(); - try { - HashEntry[] tab = seg.table; - for (int i = 0; i < tab.length; ++i) { - HashEntry e; - for (e = entryAt(tab, i); e != null; e = e.next) { - s.writeObject(e.key); - s.writeObject(e.value); - } - } - } finally { - seg.unlock(); - } - } - s.writeObject(null); - s.writeObject(null); - } - - /** - * Reconstitute the ConcurrentHashMap instance from a - * stream (i.e., deserialize it). - * @param s the stream - */ - @SuppressWarnings("unchecked") - private void readObject(java.io.ObjectInputStream s) - throws IOException, ClassNotFoundException { - // Don't call defaultReadObject() - ObjectInputStream.GetField oisFields = s.readFields(); - final Segment[] oisSegments = (Segment[])oisFields.get("segments", null); - - final int ssize = oisSegments.length; - if (ssize < 1 || ssize > MAX_SEGMENTS - || (ssize & (ssize-1)) != 0 ) { - throw new java.io.InvalidObjectException("Bad number of segments:" - + ssize); - } - int sshift = 0, ssizeTmp = ssize; - while (ssizeTmp > 1) { - ++sshift; - ssizeTmp >>>= 1; - } - UNSAFE.putIntVolatile(this, SEGSHIFT_OFFSET, 32 - sshift); - UNSAFE.putIntVolatile(this, SEGMASK_OFFSET, ssize - 1); - UNSAFE.putObjectVolatile(this, SEGMENTS_OFFSET, oisSegments); - - // set hashMask - UNSAFE.putIntVolatile(this, HASHSEED_OFFSET, randomHashSeed(this)); - - // Re-initialize segments to be minimally sized, and let grow. - int cap = MIN_SEGMENT_TABLE_CAPACITY; - final Segment[] segments = this.segments; - for (int k = 0; k < segments.length; ++k) { - Segment seg = segments[k]; - if (seg != null) { - seg.threshold = (int)(cap * seg.loadFactor); - seg.table = new HashEntry[cap]; - } - } - - // Read the keys and values, and put the mappings in the table - for (;;) { - K key = (K) s.readObject(); - V value = (V) s.readObject(); - if (key == null) { - break; - } - put(key, value); - } - } - - // Unsafe mechanics - private static final long SBASE; - private static final int SSHIFT; - private static final long TBASE; - private static final int TSHIFT; - private static final long HASHSEED_OFFSET; - private static final long SEGSHIFT_OFFSET; - private static final long SEGMASK_OFFSET; - private static final long SEGMENTS_OFFSET; - - static { - int ss, ts; - try { - Class tc = HashEntry[].class; - Class sc = Segment[].class; - TBASE = UNSAFE.arrayBaseOffset(tc); - SBASE = UNSAFE.arrayBaseOffset(sc); - ts = UNSAFE.arrayIndexScale(tc); - ss = UNSAFE.arrayIndexScale(sc); - HASHSEED_OFFSET = UNSAFE.objectFieldOffset( - ConcurrentHashMapV7.class.getDeclaredField("hashSeed")); - SEGSHIFT_OFFSET = UNSAFE.objectFieldOffset( - ConcurrentHashMapV7.class.getDeclaredField("segmentShift")); - SEGMASK_OFFSET = UNSAFE.objectFieldOffset( - ConcurrentHashMapV7.class.getDeclaredField("segmentMask")); - SEGMENTS_OFFSET = UNSAFE.objectFieldOffset( - ConcurrentHashMapV7.class.getDeclaredField("segments")); - } catch (Exception e) { - throw new Error(e); - } - if ((ss & (ss-1)) != 0 || (ts & (ts-1)) != 0) { - throw new Error("data type scale not a power of two"); - } - SSHIFT = 31 - Integer.numberOfLeadingZeros(ss); - TSHIFT = 31 - Integer.numberOfLeadingZeros(ts); - } - - public static int murmur3_32(int[] data) { - return murmur3_32(0, data, 0, data.length); - } - - public static int murmur3_32(int seed, int[] data) { - return murmur3_32(seed, data, 0, data.length); - } - - public static int murmur3_32(int seed, int[] data, int offset, int len) { - int h1 = seed; - - int off = offset; - int end = offset + len; - - // body - while (off < end) { - int k1 = data[off++]; - - k1 *= 0xcc9e2d51; - k1 = Integer.rotateLeft(k1, 15); - k1 *= 0x1b873593; - - h1 ^= k1; - h1 = Integer.rotateLeft(h1, 13); - h1 = h1 * 5 + 0xe6546b64; - } - - // tail (always empty, as body is always 32-bit chunks) - - // finalization - - h1 ^= len * (Integer.SIZE / Byte.SIZE); - - // finalization mix force all bits of a hash block to avalanche - h1 ^= h1 >>> 16; - h1 *= 0x85ebca6b; - h1 ^= h1 >>> 13; - h1 *= 0xc2b2ae35; - h1 ^= h1 >>> 16; - - return h1; - } -} diff --git a/caffeine/src/test/java/com/github/benmanes/caffeine/cache/BoundedLocalCacheTest.java b/caffeine/src/test/java/com/github/benmanes/caffeine/cache/BoundedLocalCacheTest.java index d196cedc32..1047479153 100644 --- a/caffeine/src/test/java/com/github/benmanes/caffeine/cache/BoundedLocalCacheTest.java +++ b/caffeine/src/test/java/com/github/benmanes/caffeine/cache/BoundedLocalCacheTest.java @@ -1061,6 +1061,10 @@ public void evict_zeroWeight_victim(BoundedLocalCache cache, CacheCont @Test(dataProvider = "caches") @CacheSpec(population = Population.EMPTY, maximumSize = Maximum.FULL) public void evict_admit(BoundedLocalCache cache, CacheContext context) { + // This test require that the JVM is run to allow the internal random fields can be overridden + // by adding the JVM argument --add-opens java.base/java.lang=ALL-UNNAMED + Reset.resetThreadLocalRandom(); + cache.frequencySketch().ensureCapacity(context.maximumSize()); Int candidate = Int.valueOf(0); Int victim = Int.valueOf(1); diff --git a/caffeine/src/test/java/com/github/benmanes/caffeine/cache/Reset.java b/caffeine/src/test/java/com/github/benmanes/caffeine/cache/Reset.java index 10f96afc53..ec88aaee2b 100644 --- a/caffeine/src/test/java/com/github/benmanes/caffeine/cache/Reset.java +++ b/caffeine/src/test/java/com/github/benmanes/caffeine/cache/Reset.java @@ -15,7 +15,8 @@ */ package com.github.benmanes.caffeine.cache; -import org.jctools.util.UnsafeAccess; +import java.lang.reflect.Field; +import java.lang.reflect.InaccessibleObjectException; import com.github.benmanes.caffeine.cache.LocalAsyncCache.AbstractCacheView; @@ -25,22 +26,27 @@ * @author ben.manes@gmail.com (Ben Manes) */ public final class Reset { - static final long PROBE = UnsafeAccess.fieldOffset(Thread.class, "threadLocalRandomProbe"); - static final long SEED = UnsafeAccess.fieldOffset(Thread.class, "threadLocalRandomSeed"); - static final int RANDOM_SEED = 1_033_096_058; - static final int RANDOM_PROBE = 0x9e3779b9; + private static final int RANDOM_SEED = 1_033_096_058; + private static final int RANDOM_PROBE = 0x9e3779b9; private Reset() {} /** Forces the eviction jitter to be predictable. */ + @SuppressWarnings("PMD.AvoidAccessibilityAlteration") public static void resetThreadLocalRandom() { - setThreadLocalRandom(RANDOM_PROBE, RANDOM_SEED); - } + try { + Field probe = Thread.class.getDeclaredField("threadLocalRandomProbe"); + Field seed = Thread.class.getDeclaredField("threadLocalRandomSeed"); + probe.setAccessible(true); + seed.setAccessible(true); - @SuppressWarnings("SunApi") - public static void setThreadLocalRandom(int probe, int seed) { - UnsafeAccess.UNSAFE.putInt(Thread.currentThread(), PROBE, probe); - UnsafeAccess.UNSAFE.putLong(Thread.currentThread(), SEED, seed); + probe.setInt(Thread.currentThread(), RANDOM_PROBE); + seed.setLong(Thread.currentThread(), RANDOM_SEED); + } catch (InaccessibleObjectException e) { + throw new AssertionError("Requires --add-opens java.base/java.lang=ALL-UNNAMED", e); + } catch (NoSuchFieldException | IllegalAccessException e) { + throw new AssertionError("Failed to set ThreadLocalRandom fields", e); + } } /** Clears the internal state of the cache and becomes unusable. */ diff --git a/caffeine/src/test/java/com/github/benmanes/caffeine/cache/TimerWheelTest.java b/caffeine/src/test/java/com/github/benmanes/caffeine/cache/TimerWheelTest.java index 76342abddd..99ac348f83 100644 --- a/caffeine/src/test/java/com/github/benmanes/caffeine/cache/TimerWheelTest.java +++ b/caffeine/src/test/java/com/github/benmanes/caffeine/cache/TimerWheelTest.java @@ -83,7 +83,6 @@ public final class TimerWheelTest { @BeforeMethod public void beforeMethod() { - Reset.setThreadLocalRandom(random.nextInt(), random.nextInt()); timerWheel = new TimerWheel<>(); } diff --git a/caffeine/src/test/java/com/github/benmanes/caffeine/cache/testing/CaffeineCacheFromContext.java b/caffeine/src/test/java/com/github/benmanes/caffeine/cache/testing/CaffeineCacheFromContext.java index fecea631d5..9c2cb7d9c4 100644 --- a/caffeine/src/test/java/com/github/benmanes/caffeine/cache/testing/CaffeineCacheFromContext.java +++ b/caffeine/src/test/java/com/github/benmanes/caffeine/cache/testing/CaffeineCacheFromContext.java @@ -19,7 +19,6 @@ import com.github.benmanes.caffeine.cache.Cache; import com.github.benmanes.caffeine.cache.Caffeine; -import com.github.benmanes.caffeine.cache.Reset; import com.github.benmanes.caffeine.cache.Ticker; import com.github.benmanes.caffeine.cache.testing.CacheSpec.CacheExecutor; import com.github.benmanes.caffeine.cache.testing.CacheSpec.CacheExpiry; @@ -113,7 +112,6 @@ public static Cache newCaffeineCache(CacheContext context) { @SuppressWarnings("unchecked") var castedCache = (Cache) context.cache; - Reset.resetThreadLocalRandom(); return castedCache; } } diff --git a/gradle/config/spotbugs/exclude.xml b/gradle/config/spotbugs/exclude.xml index 45442f1632..545e62444c 100644 --- a/gradle/config/spotbugs/exclude.xml +++ b/gradle/config/spotbugs/exclude.xml @@ -296,9 +296,6 @@ - - - @@ -537,6 +534,13 @@ + + + + + diff --git a/gradle/plugins/src/main/kotlin/lifecycle/java-library.caffeine.gradle.kts b/gradle/plugins/src/main/kotlin/lifecycle/java-library.caffeine.gradle.kts index 440f9c230e..1934546c74 100644 --- a/gradle/plugins/src/main/kotlin/lifecycle/java-library.caffeine.gradle.kts +++ b/gradle/plugins/src/main/kotlin/lifecycle/java-library.caffeine.gradle.kts @@ -40,8 +40,8 @@ tasks.withType().configureEach { options.apply { javaModuleVersion = provider { version as String } compilerArgs.addAll(listOf("-Xlint:all", "-Xlint:-auxiliaryclass", "-Xlint:-classfile", - "-Xlint:-exports", "-Xlint:-processing", "-Xlint:-removal", "-Xlint:-requires-automatic", - "-parameters", "-Xmaxerrs", "500", "-Xmaxwarns", "500")) + "-Xlint:-exports", "-Xlint:-processing", "-Xlint:-requires-automatic", "-parameters", + "-Xmaxerrs", "500", "-Xmaxwarns", "500")) if (isCI()) { compilerArgs.add("-Werror") }