最近有点时间,翻了翻ConcurrentHashMap的源码学习了一下,对我自己认为比较重要的一些方法进行了学习,添加了一些必要的注释,拿出来与园子的小伙伴分享一下,有说的不对的地方,还请各位批评指正,欢迎交流。
话不多说,上源码:
package cn.com.wwh.concurrent;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.locks.*;
import java.util.*;
import java.io.Serializable;
import java.io.IOException;
/**
* A hash table supporting full concurrency of retrievals and adjustable
* expected concurrency for updates. This class obeys the same functional
* specification as {@link java.util.Hashtable}, and includes versions of
* methods corresponding to each method of <tt>Hashtable</tt>. However, even
* though all operations are thread-safe, retrieval operations do <em>not</em>
* entail locking, and there is <em>not</em> any support for locking the entire
* table in a way that prevents all access. This class is fully interoperable
* with <tt>Hashtable</tt> in programs that rely on its thread safety but not on
* its synchronization details.
*
* <p>
* Retrieval operations (including <tt>get</tt>) generally do not block, so may
* overlap with update operations (including <tt>put</tt> and <tt>remove</tt>).
* Retrievals reflect the results of the most recently <em>completed</em> update
* operations holding upon their onset. For aggregate operations such as
* <tt>putAll</tt> and <tt>clear</tt>, concurrent retrievals may reflect
* insertion or removal of only some entries. Similarly, Iterators and
* Enumerations return elements reflecting the state of the hash table at some
* point at or since the creation of the iterator/enumeration. They do
* <em>not</em> throw {@link ConcurrentModificationException}. However,
* iterators are designed to be used by only one thread at a time.
*
* <p>
* The allowed concurrency among update operations is guided by the optional
* <tt>concurrencyLevel</tt> constructor argument (default <tt>16</tt>), which
* is used as a hint for internal sizing. The table is internally partitioned to
* try to permit the indicated number of concurrent updates without contention.
* Because placement in hash tables is essentially random, the actual
* concurrency will vary. Ideally, you should choose a value to accommodate as
* many threads as will ever concurrently modify the table. Using a
* significantly higher value than you need can waste space and time, and a
* significantly lower value can lead to thread contention. But overestimates
* and underestimates within an order of magnitude do not usually have much
* noticeable impact. A value of one is appropriate when it is known that only
* one thread will modify and all others will only read. Also, resizing this or
* any other kind of hash table is a relatively slow operation, so, when
* possible, it is a good idea to provide estimates of expected table sizes in
* constructors.
*
* <p>
* This class and its views and iterators implement all of the <em>optional</em>
* methods of the {@link Map} and {@link Iterator} interfaces.
*
* <p>
* Like {@link Hashtable} but unlike {@link HashMap}, this class does
* <em>not</em> allow <tt>null</tt> to be used as a key or value.
*
* <p>
* This class is a member of the
* <a href="{@docRoot}/../technotes/guides/collections/index.html"> Java
* Collections Framework</a>.
*
* @since 1.5
* @author Doug Lea
* @param <K> the type of keys maintained by this map
* @param <V> the type of mapped values
*/
public class ConcurrentHashMap<K, V> extends AbstractMap<K, V> implements ConcurrentMap<K, V>, Serializable {
private static final long serialVersionUID = 7249069246763182397L;
/*
* The basic strategy is to subdivide the table among Segments, each of which
* itself is a concurrently readable hash table. To reduce footprint, all but
* one segments are constructed only when first needed (see ensureSegment). To
* maintain visibility in the presence of lazy construction, accesses to
* segments as well as elements of segment's table must use volatile access,
* which is done via Unsafe within methods segmentAt etc below. These provide
* the functionality of AtomicReferenceArrays but reduce the levels of
* indirection. Additionally, volatile-writes of table elements and entry "next"
* fields within locked operations use the cheaper "lazySet" forms of writes
* (via putOrderedObject) because these writes are always followed by lock
* releases that maintain sequential consistency of table updates.
*
* Historical note: The previous version of this class relied heavily on "final"
* fields, which avoided some volatile reads at the expense of a large initial
* footprint. Some remnants of that design (including forced construction of
* segment 0) exist to ensure serialization compatibility.
*/
/* ---------------- Constants -------------- */
/**
* The default initial capacity for this table, used when not otherwise
* specified in a constructor.
*/
static final int DEFAULT_INITIAL_CAPACITY = 16;
/**
* The default load factor for this table, used when not otherwise specified in
* a constructor.
*/
static final float DEFAULT_LOAD_FACTOR = 0.75f;
/**
* The default concurrency level for this table, used when not otherwise
* specified in a constructor.
*/
static final int DEFAULT_CONCURRENCY_LEVEL = 16;
/**
* The maximum capacity, used if a higher value is implicitly specified by
* either of the constructors with arguments. MUST be a power of two <= 1<<30 to
* ensure that entries are indexable using ints.
*/
static final int MAXIMUM_CAPACITY = 1 << 30;
/**
* The minimum capacity for per-segment tables. Must be a power of two, at least
* two to avoid immediate resizing on next use after lazy construction.
*/
static final int MIN_SEGMENT_TABLE_CAPACITY = 2;
/**
* The maximum number of segments to allow; used to bound constructor arguments.
* Must be power of two less than 1 << 24.
*/
static final int MAX_SEGMENTS = 1 << 16; // slightly conservative
/**
* Number of unsynchronized retries in size and containsValue methods before
* resorting to locking. This is used to avoid unbounded retries if tables
* undergo continuous modification which would make it impossible to obtain an
* accurate result.
*/
static final int RETRIES_BEFORE_LOCK = 2;
/* ---------------- Fields -------------- */
/**
* holds values which can't be initialized until after VM is booted.
*/
private static class Holder {
/**
* Enable alternative hashing of String keys?
*
* <p>
* Unlike the other hash map implementations we do not implement a threshold for
* regulating whether alternative hashing is used for String keys. Alternative
* hashing is either enabled for all instances or disabled for all instances.
*/
static final boolean ALTERNATIVE_HASHING;
static {
// Use the "threshold" system property even though our threshold
// behaviour is "ON" or "OFF".
String altThreshold = java.security.AccessController
.doPrivileged(new sun.security.action.GetPropertyAction("jdk.map.althashing.threshold"));
int threshold;
try {
threshold = (null != altThreshold) ? Integer.parseInt(altThreshold) : Integer.MAX_VALUE;
// disable alternative hashing if -1
if (threshold == -1) {
threshold = Integer.MAX_VALUE;
}
if (threshold < 0) {
throw new IllegalArgumentException("value must be positive integer.");
}
} catch (IllegalArgumentException failed) {
throw new Error("Illegal value for 'jdk.map.althashing.threshold'", failed);
}
ALTERNATIVE_HASHING = threshold <= MAXIMUM_CAPACITY;
}
}
/**
* A randomizing value associated with this instance that is applied to hash
* code of keys to make hash collisions harder to find.
*/
private transient final int hashSeed = randomHashSeed(this);
private static int randomHashSeed(ConcurrentHashMap instance) {
if (sun.misc.VM.isBooted() && Holder.ALTERNATIVE_HASHING) {
return sun.misc.Hashing.randomHashSeed(instance);
}
return 0;
}
/**
* Mask value for indexing into segments. The upper bits of a key's hash code
* are used to choose the segment.
*/
final int segmentMask;
/**
* Shift value for indexing within segments.
*/
final int segmentShift;
/**
* The segments, each of which is a specialized hash table.
*/
final Segment<K, V>[] segments;
transient Set<K> keySet;
transient Set<Map.Entry<K, V>> entrySet;
transient Collection<V> values;
/**
* ConcurrentHashMap list entry. Note that this is never exported out as a
* user-visible Map.Entry.
*/
static final class HashEntry<K, V> {
final int hash;
final K key;
volatile V value;
volatile HashEntry<K, V> next;
HashEntry(int hash, K key, V value, HashEntry<K, V> next) {
this.hash = hash;
this.key = key;
this.value = value;
this.next = next;
}
/**
* Sets next field with volatile write semantics. (See above about use of
* putOrderedObject.)
*/
final void setNext(HashEntry<K, V> n) {
UNSAFE.putOrderedObject(this, nextOffset, n);
}
// Unsafe mechanics
static final sun.misc.Unsafe UNSAFE;
static final long nextOffset;
static {
try {
UNSAFE = sun.misc.Unsafe.getUnsafe();
Class k = HashEntry.class;
nextOffset = UNSAFE.objectFieldOffset(k.getDeclaredField("next"));
} catch (Exception e) {
throw new Error(e);
}
}
}
/**
* Gets the ith element of given table (if nonnull) with volatile read
* semantics. Note: This is manually integrated into a few performance-sensitive
* methods to reduce call overhead.
*/
@SuppressWarnings("unchecked")
static final <K, V> HashEntry<K, V> entryAt(HashEntry<K, V>[] tab, int i) {
/* 获取HashEntry数组中,指定下标的头结点 */
return (HashEntry<K, V>) (tab == null ? null : UNSAFE.getObjectVolatile(tab, i << TSHIFT + TBASE));
}
/**
* Sets the ith element of given table, with volatile write semantics. (See
* above about use of putOrderedObject.)
*/
static final <K, V> void setEntryAt(HashEntry<K, V>[] tab, int i, HashEntry<K, V> e) {
UNSAFE.putOrderedObject(tab, (i << TSHIFT) + TBASE, e);
}
/**
* Applies a supplemental hash function to a given hashCode, which defends
* against poor quality hash functions. This is critical because
* ConcurrentHashMap uses power-of-two length hash tables, that otherwise
* encounter collisions for hashCodes that do not differ in lower or upper bits.
*/
private int hash(Object k) {
int h = hashSeed;
if ((0 != h) && (k instanceof String)) {
return sun.misc.Hashing.stringHash32((String) k);
}
h ^= k.hashCode();
// Spread bits to regularize both segment and index locations,
// using variant of single-word Wang/Jenkins hash.
h += (h << 15) ^ 0xffffcd7d;
h ^= (h >>> 10);
h += (h << 3);
h ^= (h >>> 6);
h += (h << 2) + (h << 14);
return h ^ (h >>> 16);
}
/**
* Segments are specialized versions of hash tables. This subclasses from
* ReentrantLock opportunistically, just to simplify some locking and avoid
* separate construction.
*/
static final class Segment<K, V> extends ReentrantLock implements Serializable {
/*
* Segments maintain a table of entry lists that are always kept in a consistent
* state, so can be read (via volatile reads of segments and tables) without
* locking. This requires replicating nodes when necessary during table
* resizing, so the old lists can be traversed by readers still using old
* version of table.
*
* This class defines only mutative methods requiring locking. Except as noted,
* the methods of this class perform the per-segment versions of
* ConcurrentHashMap methods. (Other methods are integrated directly into
* ConcurrentHashMap methods.) These mutative methods use a form of controlled
* spinning on contention via methods scanAndLock and scanAndLockForPut. These
* intersperse tryLocks with traversals to locate nodes. The main benefit is to
* absorb cache misses (which are very common for hash tables) while obtaining
* locks so that traversal is faster once acquired. We do not actually use the
* found nodes since they must be re-acquired under lock anyway to ensure
* sequential consistency of updates (and in any case may be undetectably
* stale), but they will normally be much faster to re-locate. Also,
* scanAndLockForPut speculatively creates a fresh node to use in put if no node
* is found.
*/
private static final long serialVersionUID = 2249069246763182397L;
/**
* The maximum number of times to tryLock in a prescan before possibly blocking
* on acquire in preparation for a locked segment operation. On multiprocessors,
* using a bounded number of retries maintains cache acquired while locating
* nodes.
*/
static final int MAX_SCAN_RETRIES = Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1;
/**
* The per-segment table. Elements are accessed via entryAt/setEntryAt providing
* volatile semantics.
*/
transient volatile HashEntry<K, V>[] table;
/**
* The number of elements. Accessed only either within locks or among other
* volatile reads that maintain visibility.
*/
transient int count;//当前segment中包含节点元素的数量
/**
* The total number of mutative operations in this segment. Even though this may
* overflows 32 bits, it provides sufficient accuracy for stability checks in
* CHM isEmpty() and size() methods. Accessed only either within locks or among
* other volatile reads that maintain visibility.
*/
transient int modCount;//发生写操作的次数
/**
* The table is rehashed when its size exceeds this threshold. (The value of
* this field is always <tt>(int)(capacity *
* loadFactor)</tt>.)
*/
transient int threshold;//进行扩容的阈值
/**
* The load factor for the hash table. Even though this value is same for all
* segments, it is replicated to avoid needing links to outer object.
*
* @serial
*/
final float loadFactor;//加载因子
Segment(float lf, int threshold, HashEntry<K, V>[] tab) {
this.loadFactor = lf;
this.threshold = threshold;
this.table = tab;
}
final V put(K key, int hash, V value, boolean onlyIfAbsent) {
/* Try get lock for synchronized */
HashEntry<K, V> node = tryLock() ? null : scanAndLockForPut(key, hash, value);
V oldValue = null;
try {
HashEntry<K, V>[] tab = table;
int index = (tab.length - 1) & hash;
/* 获取头结点 */
HashEntry<K, V> first = entryAt(tab, index);
for (HashEntry<K, V> e = first;;) {
if (e != null) {
K k;
if ((k = e.key) == key || (e.hash == hash && key.equals(k))) {
oldValue = e.value;
if (!onlyIfAbsent) {
e.value = value;
++modCount;
}
break;
}
e = e.next;
} else {
/* 获取锁的过程中,创建了node节点,将node直接链接到当前这条链的头结点,作为新的头结点 */
if (node != null) {
node.next = first;
} else {
/* 否则,创建新的头结点 */
node = new HashEntry<K, V>(hash, key, value, first);
}
int c = count + 1;
/* 判断当前Segment中的数组是否需要重新hash */
if (c > threshold && tab.length < MAXIMUM_CAPACITY) {
rehash(node);
} else {
setEntryAt(tab, index, node);
}
++modCount;
count = c;
oldValue = null;
break;
}
}
} finally {
unlock();
}
return oldValue;
}
/**
* Doubles size of table and repacks entries, also adding the given node to new
* table
*/
@SuppressWarnings("unchecked")
private void rehash(HashEntry<K, V> node) {
HashEntry<K, V>[] oldTable = table;
int oldCapacity = oldTable.length;
int newCapacity = oldCapacity << 1;
threshold = (int) (newCapacity * loadFactor);
HashEntry<K, V>[] newTable = new HashEntry[newCapacity];
int sizeMask = newCapacity - 1;
for (int i = 0; i < oldCapacity; i++) {
HashEntry<K, V> e = oldTable[i];
if (e != null) {
HashEntry<K, V> next = e.next;
int idx = e.hash & sizeMask;
/* 说明这条链上有且仅有一个结点 */
if (next == null) {
newTable[idx] = e;
} else {
HashEntry<K, V> lastRun = e;
int lastIdx = idx;
/*寻找扩容之后,不再原来这条链上的最后一个节点lastRun,这个节点之后的所有元素也必定不再原来链上,也必须转移到新的链上*/
for (HashEntry<K, V> last = next; last != null; last = last.next) {
int k = last.hash & sizeMask;
if (k != lastIdx) {
lastIdx = k;
lastRun = last;
}
}
/*扩容之后,原来某一条链上的节点最多只可能分布在扩容之后的两条链上,遍历完成之后这个lastIdx就是新链在HashEntry中的下标(lastIdx也可能是旧链的下标)*/
/*
* 重点注意:
* 1、因为HashEntry[]的扩容是倍增的,始终是2的幂次方,计算某个HashEntry所在HashEntry[]数组中下标的算法为:i = (e.hash >> segmentShift) & segmentMask;
* 2、假设扩容之前HashEntry[]的长度为2的k次方,要确定某个hashEntry在HashEntry[]中的下标m,按照上面算法,m只与hash值的后k位有关;
* 3、扩容之后HashEntry[]的长度变为了2的k+1次方,又因为hash、segmentShift、SegmentMask均为final,所以计算的新下标n只与hash值的后k+1位有关;
* 4、第2步和第3步的的后k位和后k+1位,差异只在第k+1位,这位要么为0,要么为1,所以扩容前后,节点的所在数组中的下标有如下关系:m == n 或者 m+(k<<1) = n;
* 5、根据第4步得出的结论,扩容之前每一条链上的HashEntry节点扩容之后只可能有两种分布情况:a、还分布在原来的下标为m链上;b、分布在新的m+(k<<1)这条链上,k不等于0;
*/
/*根据最上面第5步得出的结论,扩容之前不同的链上的元素扩容之后是不可能分布在同一条链上的,所以就有如下赋值操作 ,将lastRun之后的所有元素赋值到新的链上,也有可能旧链*/
newTable[lastIdx] = lastRun;
/* 将lastRun之前的节点采用头插法插入到链表的头部 */
for (HashEntry<K, V> p = e; p != lastRun; p = p.next) {
V v = p.value;
int h = p.hash;
int k = h & sizeMask;
HashEntry<K, V> n = newTable[k];
newTable[k] = new HashEntry<K, V>(h, p.key, v, n);
}
}
}
}
/* 重置这条链的头结点 */
int nodeIndex = node.hash & sizeMask;
node.setNext(newTable[nodeIndex]);
newTable[nodeIndex] = node;
table = newTable;
}
/**
* Scans for a node containing given key while trying to acquire lock, creating
* and returning one if not found. Upon return, guarantees that lock is held.
* UNlike in most methods, calls to method equals are not screened: Since
* traversal speed doesn't matter, we might as well help warm up the associated
* code and accesses as well.
*
* @return a new node if key not found, else null
*/
private HashEntry<K, V> scanAndLockForPut(K key, int hash, V value) {
HashEntry<K, V> first = entryForHash(this, hash);
HashEntry<K, V> e = first;
HashEntry<K, V> node = null;
int retries = -1;
/*自旋过程中,做一些额外的操作,避免cpu高使用率*/
while (!tryLock()) {
HashEntry<K, V> f;
if (retries < 0) {
if (e == null) {
/* 在自旋尝试的过程中,当前这条hashEntry链遍历完成了,但是没有找到这个节点就新建一个结点 */
if (node == null) {
node = new HashEntry<K, V>(hash, key, value, null);
retries = 0;
}
} else if (key.equals(e.key)) {
retries = 0;
} else {
e = e.next;
}
} else if (++retries > RETRIES_BEFORE_LOCK) {
/* 自旋达到了最大次数,则不再自旋获取,阻塞等待 */
lock();
break;
} else if (((retries & 1) == 0) && (f = entryForHash(this, hash)) != first) {
/* 自旋的过程中,头结点发生了变化,说明发生了并发的写操作,重新尝试 */
e = first = f;
retries = -1;
}
}
/* 第一次尝试就获取到了锁,或者当前的这条链还没遍历完就获取到了锁则node为空 */
return node;
}
/**
* Scans for a node containing the given key while trying to acquire lock for a
* remove or replace operation. Upon return, guarantees that lock is held. Note
* that we must lock even if the key is not found, to ensure sequential
* consistency of updates.
*/
private void scanAndLock(Object key, int hash) {
// similar to but simpler than scanAndLockForPut
HashEntry<K, V> first = entryForHash(this, hash);
HashEntry<K, V> e = first;
int retries = -1;
/* 自旋的时候做一些遍历操作,降低cpu的使用率 */
while (!tryLock()) {
HashEntry<K, V> f;
if (retries < 0) {
if (e == null || key.equals(e.key)) {
retries = 0;
}
e = e.next;
} else if (++retries > MAX_SCAN_RETRIES) {
lock();
break;
} else if (((retries & 1) == 0) && ((f = entryForHash(this, hash)) != first)) {
/* 尝试获取锁的过程中 */
e = first = f;
retries = -1;
}
}
}
/**
* Remove; match on key only if value null, else match both.
*/
final V remove(Object key, int hash, Object value) {
V oldValue = null;
if (!tryLock()) {
scanAndLock(key, hash);
}
try {
HashEntry<K, V>[] tab = table;
int index = (tab.length - 1) & hash;
/* current node */
HashEntry<K, V> e = entryAt(tab, index);
/* previous node before current node */
HashEntry<K, V> prev = null;
while (e != null) {
K k;
HashEntry<K, V> next = e.next;
if ((k = e.key) == key || ((e.hash == hash) && k.equals(key))) {
V v = e.value;
/* Value为null的时候只匹配key,否则key和value都需要匹配 */
if (value == null || value == v || value.equals(value)) {
if (prev == null) {
setEntryAt(tab, index, next);
} else {
prev.setNext(next);
}
modCount++;
count--;
oldValue = v;
}
break;
}
prev = e;
e = next;
}
} finally {
unlock();
}
return oldValue;
}
final boolean replace(K key, int hash, V oldValue, V newValue) {
boolean repalced = false;
if (!tryLock()) {
/* 获取锁失败,进行一定次数的自旋获取,还未成功则阻塞获取 */
scanAndLock(key, hash);
}
try {
for (HashEntry<K, V> e = entryForHash(this, hash); e != null; e = e.next) {
K k;
if ((k = e.key) == key || (e.hash == hash && key.equals(k))) {
/* key相等,并且oldValue值等于原来位置的值才进行替换操作 */
if (oldValue.equals(e.value)) {
e.value = newValue;
++modCount;
repalced = true;
}
break;
}
}
} finally {
unlock();
}
return repalced;
}
final V replace(K key, int hash, V value) {
V oldValue = null;
if (!tryLock()) {
scanAndLock(key, hash);
}
try {
HashEntry<K, V> e;
for (e = entryForHash(this, hash); e != null; e = e.next) {
K k;
if ((k = e.key) == key || (e.hash == hash && key.equals(k))) {
oldValue = e.value;
e.value = value;
++modCount;
break;
}
}
} finally {
unlock();
}
return oldValue;
}
final void clear() {
lock();
try {
HashEntry<K, V>[] tab = table;
for (int i = 0; i < tab.length; i++)
setEntryAt(tab, i, null);
++modCount;
count = 0;
} finally {
unlock();
}
}
}
// Accessing segments
/**
* Gets the jth element of given segment array (if nonnull) with volatile
* element access semantics via Unsafe. (The null check can trigger harmlessly
* only during deserialization.) Note: because each element of segments array is
* set only once (using fully ordered writes), some performance-sensitive
* methods rely on this method only as a recheck upon null reads.
*/
@SuppressWarnings("unchecked")
static final <K, V> Segment<K, V> segmentAt(Segment<K, V>[] ss, int j) {
long u = (j << SSHIFT) + SBASE;
return ss == null ? null : (Segment<K, V>) UNSAFE.getObjectVolatile(ss, u);
}
/**
* Returns the segment for the given index, creating it and recording in segment
* table (via CAS) if not already present.
*
* @param k the index
* @return the segment
*/
@SuppressWarnings("unchecked")
private Segment<K, V> ensureSegment(int k) {
/* 给当前位置创建一个新的segment */
Segment<K, V>[] ss = this.segments;
long u = (k << SSHIFT) + SBASE;
Segment<K, V> seg;
if ((seg = (Segment<K, V>) UNSAFE.getObjectVolatile(ss, u)) == null) {
/* 以segment[0]作为原型来初始化当前位置的segment,这块类似设计模式中的原型模式吧 */
Segment<K, V> proto = ss[0];
int len = proto.table.length;
float lf = proto.loadFactor;
int threshold = (int) (len * lf);
HashEntry<K, V>[] tab = new HashEntry[len];
if ((seg = (Segment<K, V>) UNSAFE.getObjectVolatile(ss, u)) == null) {
Segment<K, V> segment = new Segment<>(lf, threshold, tab);
while ((seg = (Segment<K, V>) UNSAFE.getObjectVolatile(ss, u)) == null) {
if (UNSAFE.compareAndSwapObject(ss, u, null, seg = segment)) {
break;
}
}
}
}
return seg;
}
// Hash-based segment and entry accesses
/**
* Get the segment for the given hash
*/
@SuppressWarnings("unchecked")
private Segment<K, V> segmentForHash(int h) {
/*根据hash值在segment[]数组中定位目标segment*/
long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE;
return (Segment<K, V>) UNSAFE.getObjectVolatile(segments, u);
}
/**
* Gets the table entry for the given segment and hash
*/
@SuppressWarnings("unchecked")
static final <K, V> HashEntry<K, V> entryForHash(Segment<K, V> seg, int h) {
/* 根据hash值获取当前segment中,维护的HashEntry数组中的某一条链的头结点 */
HashEntry<K, V>[] tab;
return (HashEntry<K, V>) ((seg == null || (tab = seg.table) == null) ? null
: UNSAFE.getObjectVolatile(seg, (((tab.length - 1) & h) << TSHIFT) + TBASE));
}
/* ---------------- Public operations -------------- */
/**
* Creates a new, empty map with the specified initial capacity, load factor and
* concurrency level.
*
* @param initialCapacity the initial capacity. The implementation performs
* internal sizing to accommodate this many elements.
* @param loadFactor the load factor threshold, used to control resizing.
* Resizing may be performed when the average number of
* elements per bin exceeds this threshold.
* @param concurrencyLevel the estimated number of concurrently updating
* threads. The implementation performs internal sizing
* to try to accommodate this many threads.
* @throws IllegalArgumentException if the initial capacity is negative or the
* load factor or concurrencyLevel are
* nonpositive.
*/
@SuppressWarnings("unchecked")
public ConcurrentHashMap(int initialCapacity, float loadFactor, int concurrencyLevel) {
if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0)
throw new IllegalArgumentException();
if (concurrencyLevel > MAX_SEGMENTS)
concurrencyLevel = MAX_SEGMENTS;
// Find power-of-two sizes best matching arguments
int sshift = 0;
int ssize = 1;
while (ssize < concurrencyLevel) {
++sshift;
ssize <<= 1;
}
/*注意 segmentShift和segmentMask是定义为final的*/
this.segmentShift = 32 - sshift;
this.segmentMask = ssize - 1;
if (initialCapacity > MAXIMUM_CAPACITY)
initialCapacity = MAXIMUM_CAPACITY;
int c = initialCapacity / ssize;
if (c * ssize < initialCapacity)
++c;
int cap = MIN_SEGMENT_TABLE_CAPACITY;
while (cap < c)
cap <<= 1;
// create segments and segments[0]
Segment<K, V> s0 = new Segment<K, V>(loadFactor, (int) (cap * loadFactor),
(HashEntry<K, V>[]) new HashEntry[cap]);
Segment<K, V>[] ss = (Segment<K, V>[]) new Segment[ssize];
UNSAFE.putOrderedObject(ss, SBASE, s0); // ordered write of segments[0]
this.segments = ss;
}
/**
* Creates a new, empty map with the specified initial capacity and load factor
* and with the default concurrencyLevel (16).
*
* @param initialCapacity The implementation performs internal sizing to
* accommodate this many elements.
* @param loadFactor the load factor threshold, used to control resizing.
* Resizing may be performed when the average number of
* elements per bin exceeds this threshold.
* @throws IllegalArgumentException if the initial capacity of elements is
* negative or the load factor is nonpositive
*
* @since 1.6
*/
public ConcurrentHashMap(int initialCapacity, float loadFactor) {
this(initialCapacity, loadFactor, DEFAULT_CONCURRENCY_LEVEL);
}
/**
* Creates a new, empty map with the specified initial capacity, and with
* default load factor (0.75) and concurrencyLevel (16).
*
* @param initialCapacity the initial capacity. The implementation performs
* internal sizing to accommodate this many elements.
* @throws IllegalArgumentException if the initial capacity of elements is
* negative.
*/
public ConcurrentHashMap(int initialCapacity) {
this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
}
/**
* Creates a new, empty map with a default initial capacity (16), load factor
* (0.75) and concurrencyLevel (16).
*/
public ConcurrentHashMap() {
this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
}
/**
* Creates a new map with the same mappings as the given map. The map is created
* with a capacity of 1.5 times the number of mappings in the given map or 16
* (whichever is greater), and a default load factor (0.75) and concurrencyLevel
* (16).
*
* @param m the map
*/
public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1, DEFAULT_INITIAL_CAPACITY), DEFAULT_LOAD_FACTOR,
DEFAULT_CONCURRENCY_LEVEL);
putAll(m);
}
/**
* Returns <tt>true</tt> if this map contains no key-value mappings.
*
* @return <tt>true</tt> if this map contains no key-value mappings
*/
public boolean isEmpty() {
/*
* Sum per-segment modCounts to avoid mis-reporting when elements are
* concurrently added and removed in one segment while checking another, in
* which case the table was never actually empty at any point. (The sum ensures
* accuracy up through at least 1<<31 per-segment modifications before recheck.)
* Methods size() and containsValue() use similar constructions for stability
* checks.
*/
/*
* 总体思路是遍历两次segment[]数组,不加锁操作 第一次遍历,
* 在遍历的过程中有任何一个segment中元素数量不为空,就立即返回false,否则就累加每个segment中写操作的次数modCount;
* 第一次遍历结束,如果累加的写操作的次数为0,直接返回true,说明segment[]数组中没有任何元素,否则再进行第二次遍历,任何一个segment不为空
* ,则返回false, 否则就进行第一次计算的累计写操作次数sum的减法操作,直到遍历完所有的segment,遍历结束之后,sum不等于0就说明
* 在这两次遍历的过程中发生了一次写操作,所以必定不为空。
*/
long sum = 0L;
final Segment<K, V>[] segments = this.segments;
for (int i = 0; i < segments.length; i++) {
Segment<K, V> seg = segmentAt(segments, i);
if (seg != null) {
if (seg.count != 0) {
return false;
}
sum += seg.modCount;
}
}
if (sum != 0L) {
for (int i = 0; i < segments.length; i++) {
Segment<K, V> seg = segmentAt(segments, i);
if (seg != null) {
if (seg.count != 0) {
return false;
}
sum -= seg.modCount;
}
}
if (sum != 0) {
return false;
}
}
return true;
}
/**
* Returns the number of key-value mappings in this map. If the map contains
* more than <tt>Integer.MAX_VALUE</tt> elements, returns
* <tt>Integer.MAX_VALUE</tt>.
*
* @return the number of key-value mappings in this map
*/
public int size() {
/* 统计size的过程中可以不同的segment中有并发的写操作,所以只能相对的统计某一个时间范围内的size大小 */
final Segment<K, V>[] segments = this.segments;
int size;// 计算的大小
long sum;// 本轮计算的累计并发次数
long last = 0L;// 上一次计算的累计并发次数
int retries = -1;// 初始重试次数
boolean overflow;// size是否溢出
try {
/*
* 总体思路:不加锁先尝试计算size,最大重试3次,要是在这个最大重试次数的范围内,segment[]中没有发生并发写操作,则结束,
* 否则对所有的segment进行加锁再统计size
*/
for (;;) {
/* 重试次数达到了阈值,则对所有的段进行加锁计算 */
if (retries++ == RETRIES_BEFORE_LOCK) {
for (int i = 0; i < segments.length; i++) {
segmentAt(segments, i).lock();
}
}
sum = 0L;
size = 0;
overflow = false;
for (int i = 0; i < segments.length; i++) {
Segment<K, V> seg = segmentAt(segments, i);
if (seg != null) {
sum += seg.modCount;
int c = seg.count;
/* 发生了长度溢出 */
if (c < 0 || (size += c) < 0) {
overflow = true;
}
}
}
/* 两次并发修改的次数一样说明这段时间的size是稳定的,则统计size结束,否则继续重试,达到阈值,仍不稳定,则对所有segment加锁,然后再计算 */
if (sum == last) {
break;
}
last = sum;
}
} finally {
if (retries > RETRIES_BEFORE_LOCK) {
for (int i = 0; i < segments.length; i++) {
segmentAt(segments, i).unlock();
}
}
}
return overflow ? Integer.MAX_VALUE : size;
}
/**
* Returns the value to which the specified key is mapped, or {@code null} if
* this map contains no mapping for the key.
*
* <p>
* More formally, if this map contains a mapping from a key {@code k} to a value
* {@code v} such that {@code key.equals(k)}, then this method returns
* {@code v}; otherwise it returns {@code null}. (There can be at most one such
* mapping.)
*
* @throws NullPointerException if the specified key is null
*/
@SuppressWarnings("unchecked")
public V get(Object key) {
Segment<K, V> seg;
HashEntry<K, V>[] tab;
int h = hash(key);
/* 根据key定位到其所在的segment[]数组中下标,在内存中所对应的地址 */
long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE;
if ((seg = (Segment<K, V>) UNSAFE.getObjectVolatile(segments, u)) != null && (tab = seg.table) != null) {
for (HashEntry<K, V> e = (HashEntry<K, V>) UNSAFE.getObjectVolatile(seg,
(((tab.length - 1) & h) << TSHIFT) + TBASE); e != null; e = e.next) {
K k;
if ((k = e.key) == key || (e.hash == h && key.equals(k))) {
return e.value;
}
}
}
return null;
}
/**
* Tests if the specified object is a key in this table.
*
* @param key possible key
* @return <tt>true</tt> if and only if the specified object is a key in this
* table, as determined by the <tt>equals</tt> method; <tt>false</tt>
* otherwise.
* @throws NullPointerException if the specified key is null
*/
@SuppressWarnings("unchecked")
public boolean containsKey(Object key) {
Segment<K, V> seg;
HashEntry<K, V>[] tab;
int h = hash(key);
long u = (((h >> segmentShift) & segmentMask) << SSHIFT) + SBASE;
if ((seg = (Segment<K, V>) UNSAFE.getObjectVolatile(segments, u)) != null && (tab = seg.table) != null) {
long ut = (((tab.length - 1) & h) << TSHIFT) + TBASE;
for (HashEntry<K, V> e = (HashEntry<K, V>) UNSAFE.getObjectVolatile(tab, ut); e != null; e = e.next) {
K k;
if ((k = e.key) == key || (e.hash == h && key.equals(k))) {
return true;
}
}
}
return false;
}
/**
* Returns <tt>true</tt> if this map maps one or more keys to the specified
* value. Note: This method requires a full internal traversal of the hash
* table, and so is much slower than method <tt>containsKey</tt>.
*
* @param value value whose presence in this map is to be tested
* @return <tt>true</tt> if this map maps one or more keys to the specified
* value
* @throws NullPointerException if the specified value is null
*/
public boolean containsValue(Object value) {
/* 总体和计算size方法思路一致,参见size注释 */
if (value == null) {
throw new NullPointerException();
}
final Segment<K, V>[] segments = this.segments;
boolean found = false;
long last = 0;
int retries = -1;
try {
/* 找到了直接跳出到这里 */
outer: for (;;) {
if (retries++ == RETRIES_BEFORE_LOCK) {
for (int i = 0; i < segments.length; i++) {
segmentAt(segments, i).lock();
}
}
/* 用来统计并发的操作次数 */
long sum = 0L;
for (int i = 0; i < segments.length; i++) {
Segment<K, V> seg = segmentAt(segments, i);
HashEntry<K, V>[] tab;
if (seg != null && (tab = seg.table) != null) {
for (int j = 0; j < tab.length; j++) {
HashEntry<K, V> e;
for (e = entryAt(tab, j); e != null; e = e.next) {
V v = e.value;
if (v != null && value.equals(v)) {
found = true;
/* 找到了匹配的value,则跳出到最外层的循环 */
break outer;
}
}
}
}
sum += seg.modCount;
}
if (retries > 0 && sum == last) {
break;
}
last = sum;
}
} finally {
if (retries > RETRIES_BEFORE_LOCK) {
for (int i = 0; i < segments.length; i++) {
segmentAt(segments, i).unlock();
}
}
}
return found;
}
/**
* Legacy method testing if some key maps into the specified value in this
* table. This method is identical in functionality to {@link #containsValue},
* and exists solely to ensure full compatibility with class
* {@link java.util.Hashtable}, which supported this method prior to
* introduction of the Java Collections framework.
*
* @param value a value to search for
* @return <tt>true</tt> if and only if some key maps to the <tt>value</tt>
* argument in this table as determined by the <tt>equals</tt> method;
* <tt>false</tt> otherwise
* @throws NullPointerException if the specified value is null
*/
public boolean contains(Object value) {
return containsValue(value);
}
/**
* Maps the specified key to the specified value in this table. Neither the key
* nor the value can be null.
*
* <p>
* The value can be retrieved by calling the <tt>get</tt> method with a key that
* is equal to the original key.
*
* @param key key with which the specified value is to be associated
* @param value value to be associated with the specified key
* @return the previous value associated with <tt>key</tt>, or <tt>null</tt> if
* there was no mapping for <tt>key</tt>
* @throws NullPointerException if the specified key or value is null
*/
@SuppressWarnings("unchecked")
public V put(K key, V value) {
Segment<K, V> s;
if (value == null) {
throw new NullPointerException();
}
int hash = hash(key);
int j = (hash >>> segmentShift) & segmentMask;
/* 如果当前的segment为空,则创建一个 */
if ((s = (Segment<K, V>) UNSAFE.getObject(segments, (j << SSHIFT) + SBASE)) == null) {
s = ensureSegment(j);
}
return s.put(key, hash, value, false);
}
/**
* {@inheritDoc}
*
* @return the previous value associated with the specified key, or
* <tt>null</tt> if there was no mapping for the key
* @throws NullPointerException if the specified key or value is null
*/
@SuppressWarnings("unchecked")
public V putIfAbsent(K key, V value) {
Segment<K, V> s;
if (value == null)
throw new NullPointerException();
int hash = hash(key);
int j = (hash >>> segmentShift) & segmentMask;
if ((s = (Segment<K, V>) UNSAFE.getObject(segments, (j << SSHIFT) + SBASE)) == null)
s = ensureSegment(j);
return s.put(key, hash, value, true);
}
/**
* Copies all of the mappings from the specified map to this one. These mappings
* replace any mappings that this map had for any of the keys currently in the
* specified map.
*
* @param m mappings to be stored in this map
*/
public void putAll(Map<? extends K, ? extends V> m) {
for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
put(e.getKey(), e.getValue());
}
/**
* Removes the key (and its corresponding value) from this map. This method does
* nothing if the key is not in the map.
*
* @param key the key that needs to be removed
* @return the previous value associated with <tt>key</tt>, or <tt>null</tt> if
* there was no mapping for <tt>key</tt>
* @throws NullPointerException if the specified key is null
*/
public V remove(Object key) {
int hash = hash(key);
Segment<K, V> s = segmentForHash(hash);
return s == null ? null : s.remove(key, hash, null);
}
/**
* {@inheritDoc}
*
* @throws NullPointerException if the specified key is null
*/
public boolean remove(Object key, Object value) {
int hash = hash(key);
Segment<K, V> s;
return value != null && (s = segmentForHash(hash)) != null && s.remove(key, hash, value) != null;
}
/**
* {@inheritDoc}
*
* @throws NullPointerException if any of the arguments are null
*/
public boolean replace(K key, V oldValue, V newValue) {
int hash = hash(key);
if (oldValue == null || newValue == null)
throw new NullPointerException();
Segment<K, V> s = segmentForHash(hash);
return s != null && s.replace(key, hash, oldValue, newValue);
}
/**
* {@inheritDoc}
*
* @return the previous value associated with the specified key, or
* <tt>null</tt> if there was no mapping for the key
* @throws NullPointerException if the specified key or value is null
*/
public V replace(K key, V value) {
int hash = hash(key);
if (value == null) {
throw new NullPointerException();
}
Segment<K, V> seg = segmentForHash(hash);
return seg == null ? null : seg.replace(key, hash, value);
}
/**
* Removes all of the mappings from this map.
*/
public void clear() {
final Segment<K, V>[] segments = this.segments;
for (int j = 0; j < segments.length; ++j) {
Segment<K, V> s = segmentAt(segments, j);
if (s != null)
s.clear();
}
}
/**
* Returns a {@link Set} view of the keys contained in this map. The set is
* backed by the map, so changes to the map are reflected in the set, and
* vice-versa. The set supports element removal, which removes the corresponding
* mapping from this map, via the <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
* <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations. It
* does not support the <tt>add</tt> or <tt>addAll</tt> operations.
*
* <p>
* The view's <tt>iterator</tt> is a "weakly consistent" iterator that will
* never throw {@link ConcurrentModificationException}, and guarantees to
* traverse elements as they existed upon construction of the iterator, and may
* (but is not guaranteed to) reflect any modifications subsequent to
* construction.
*/
public Set<K> keySet() {
Set<K> ks = keySet;
return (ks != null) ? ks : (keySet = new KeySet());
}
/**
* Returns a {@link Collection} view of the values contained in this map. The
* collection is backed by the map, so changes to the map are reflected in the
* collection, and vice-versa. The collection supports element removal, which
* removes the corresponding mapping from this map, via the
* <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>, <tt>removeAll</tt>,
* <tt>retainAll</tt>, and <tt>clear</tt> operations. It does not support the
* <tt>add</tt> or <tt>addAll</tt> operations.
*
* <p>
* The view's <tt>iterator</tt> is a "weakly consistent" iterator that will
* never throw {@link ConcurrentModificationException}, and guarantees to
* traverse elements as they existed upon construction of the iterator, and may
* (but is not guaranteed to) reflect any modifications subsequent to
* construction.
*/
public Collection<V> values() {
Collection<V> vs = values;
return (vs != null) ? vs : (values = new Values());
}
/**
* Returns a {@link Set} view of the mappings contained in this map. The set is
* backed by the map, so changes to the map are reflected in the set, and
* vice-versa. The set supports element removal, which removes the corresponding
* mapping from the map, via the <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
* <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations. It
* does not support the <tt>add</tt> or <tt>addAll</tt> operations.
*
* <p>
* The view's <tt>iterator</tt> is a "weakly consistent" iterator that will
* never throw {@link ConcurrentModificationException}, and guarantees to
* traverse elements as they existed upon construction of the iterator, and may
* (but is not guaranteed to) reflect any modifications subsequent to
* construction.
*/
public Set<Map.Entry<K, V>> entrySet() {
Set<Map.Entry<K, V>> es = entrySet;
return (es != null) ? es : (entrySet = new EntrySet());
}
/**
* Returns an enumeration of the keys in this table.
*
* @return an enumeration of the keys in this table
* @see #keySet()
*/
public Enumeration<K> keys() {
return new KeyIterator();
}
/**
* Returns an enumeration of the values in this table.
*
* @return an enumeration of the values in this table
* @see #values()
*/
public Enumeration<V> elements() {
return new ValueIterator();
}
/* ---------------- Iterator Support -------------- */
abstract class HashIterator {
int nextSegmentIndex;
int nextTableIndex;
HashEntry<K, V>[] currentTable;
HashEntry<K, V> nextEntry;
HashEntry<K, V> lastReturned;
HashIterator() {
nextSegmentIndex = segments.length - 1;
nextTableIndex = -1;
advance();
}
/**
* Set nextEntry to first node of next non-empty table (in backwards order, to
* simplify checks).
*/
final void advance() {
for (;;) {
if (nextTableIndex >= 0) {
if ((nextEntry = entryAt(currentTable, nextTableIndex--)) != null)
break;
} else if (nextSegmentIndex >= 0) {
Segment<K, V> seg = segmentAt(segments, nextSegmentIndex--);
if (seg != null && (currentTable = seg.table) != null)
nextTableIndex = currentTable.length - 1;
} else
break;
}
}
final HashEntry<K, V> nextEntry() {
HashEntry<K, V> e = nextEntry;
if (e == null)
throw new NoSuchElementException();
lastReturned = e; // cannot assign until after null check
if ((nextEntry = e.next) == null)
advance();
return e;
}
public final boolean hasNext() {
return nextEntry != null;
}
public final boolean hasMoreElements() {
return nextEntry != null;
}
public final void remove() {
if (lastReturned == null)
throw new IllegalStateException();
ConcurrentHashMap.this.remove(lastReturned.key);
lastReturned = null;
}
}
final class KeyIterator extends HashIterator implements Iterator<K>, Enumeration<K> {
public final K next() {
return super.nextEntry().key;
}
public final K nextElement() {
return super.nextEntry().key;
}
}
final class ValueIterator extends HashIterator implements Iterator<V>, Enumeration<V> {
public final V next() {
return super.nextEntry().value;
}
public final V nextElement() {
return super.nextEntry().value;
}
}
/**
* Custom Entry class used by EntryIterator.next(), that relays setValue changes
* to the underlying map.
*/
final class WriteThroughEntry extends AbstractMap.SimpleEntry<K, V> {
WriteThroughEntry(K k, V v) {
super(k, v);
}
/**
* Set our entry's value and write through to the map. The value to return is
* somewhat arbitrary here. Since a WriteThroughEntry does not necessarily track
* asynchronous changes, the most recent "previous" value could be different
* from what we return (or could even have been removed in which case the put
* will re-establish). We do not and cannot guarantee more.
*/
public V setValue(V value) {
if (value == null)
throw new NullPointerException();
V v = super.setValue(value);
ConcurrentHashMap.this.put(getKey(), value);
return v;
}
}
final class EntryIterator extends HashIterator implements Iterator<Entry<K, V>> {
public Map.Entry<K, V> next() {
HashEntry<K, V> e = super.nextEntry();
return new WriteThroughEntry(e.key, e.value);
}
}
final class KeySet extends AbstractSet<K> {
public Iterator<K> iterator() {
return new KeyIterator();
}
public int size() {
return ConcurrentHashMap.this.size();
}
public boolean isEmpty() {
return ConcurrentHashMap.this.isEmpty();
}
public boolean contains(Object o) {
return ConcurrentHashMap.this.containsKey(o);
}
public boolean remove(Object o) {
return ConcurrentHashMap.this.remove(o) != null;
}
public void clear() {
ConcurrentHashMap.this.clear();
}
}
final class Values extends AbstractCollection<V> {
public Iterator<V> iterator() {
return new ValueIterator();
}
public int size() {
return ConcurrentHashMap.this.size();
}
public boolean isEmpty() {
return ConcurrentHashMap.this.isEmpty();
}
public boolean contains(Object o) {
return ConcurrentHashMap.this.containsValue(o);
}
public void clear() {
ConcurrentHashMap.this.clear();
}
}
final class EntrySet extends AbstractSet<Map.Entry<K, V>> {
public Iterator<Map.Entry<K, V>> iterator() {
return new EntryIterator();
}
public boolean contains(Object o) {
if (!(o instanceof Map.Entry))
return false;
Map.Entry<?, ?> e = (Map.Entry<?, ?>) o;
V v = ConcurrentHashMap.this.get(e.getKey());
return v != null && v.equals(e.getValue());
}
public boolean remove(Object o) {
if (!(o instanceof Map.Entry))
return false;
Map.Entry<?, ?> e = (Map.Entry<?, ?>) o;
return ConcurrentHashMap.this.remove(e.getKey(), e.getValue());
}
public int size() {
return ConcurrentHashMap.this.size();
}
public boolean isEmpty() {
return ConcurrentHashMap.this.isEmpty();
}
public void clear() {
ConcurrentHashMap.this.clear();
}
}
/* ---------------- Serialization Support -------------- */
/**
* Save the state of the <tt>ConcurrentHashMap</tt> instance to a stream (i.e.,
* serialize it).
*
* @param s the stream
* @serialData the key (Object) and value (Object) for each key-value mapping,
* followed by a null pair. The key-value mappings are emitted in no
* particular order.
*/
private void writeObject(java.io.ObjectOutputStream s) throws IOException {
// force all segments for serialization compatibility
for (int k = 0; k < segments.length; ++k)
ensureSegment(k);
s.defaultWriteObject();
final Segment<K, V>[] segments = this.segments;
for (int k = 0; k < segments.length; ++k) {
Segment<K, V> seg = segmentAt(segments, k);
seg.lock();
try {
HashEntry<K, V>[] tab = seg.table;
for (int i = 0; i < tab.length; ++i) {
HashEntry<K, V> e;
for (e = entryAt(tab, i); e != null; e = e.next) {
s.writeObject(e.key);
s.writeObject(e.value);
}
}
} finally {
seg.unlock();
}
}
s.writeObject(null);
s.writeObject(null);
}
/**
* Reconstitute the <tt>ConcurrentHashMap</tt> instance from a stream (i.e.,
* deserialize it).
*
* @param s the stream
*/
@SuppressWarnings("unchecked")
private void readObject(java.io.ObjectInputStream s) throws IOException, ClassNotFoundException {
s.defaultReadObject();
// set hashMask
UNSAFE.putIntVolatile(this, HASHSEED_OFFSET, randomHashSeed(this));
// Re-initialize segments to be minimally sized, and let grow.
int cap = MIN_SEGMENT_TABLE_CAPACITY;
final Segment<K, V>[] segments = this.segments;
for (int k = 0; k < segments.length; ++k) {
Segment<K, V> seg = segments[k];
if (seg != null) {
seg.threshold = (int) (cap * seg.loadFactor);
seg.table = (HashEntry<K, V>[]) new HashEntry[cap];
}
}
// Read the keys and values, and put the mappings in the table
for (;;) {
K key = (K) s.readObject();
V value = (V) s.readObject();
if (key == null)
break;
put(key, value);
}
}
// Unsafe mechanics
private static final sun.misc.Unsafe UNSAFE;
private static final long SBASE;
private static final int SSHIFT;
private static final long TBASE;
private static final int TSHIFT;
private static final long HASHSEED_OFFSET;
static {
int ss, ts;
try {
UNSAFE = sun.misc.Unsafe.getUnsafe();
Class tc = HashEntry[].class;
Class sc = Segment[].class;
/* 获取数组中第0个元素在数组对象中的偏移量 */
TBASE = UNSAFE.arrayBaseOffset(tc);
SBASE = UNSAFE.arrayBaseOffset(sc);
/* 获取数组中每个元素的长度大小 */
ts = UNSAFE.arrayIndexScale(tc);
ss = UNSAFE.arrayIndexScale(sc);
HASHSEED_OFFSET = UNSAFE.objectFieldOffset(ConcurrentHashMap.class.getDeclaredField("hashSeed"));
} catch (Exception e) {
throw new Error(e);
}
if ((ss & (ss - 1)) != 0 || (ts & (ts - 1)) != 0)
throw new Error("data type scale not a power of two");
/* 数组中元素的大小用2的幂次表示,如ss =16,SSHIFT = 4; */
SSHIFT = 31 - Integer.numberOfLeadingZeros(ss);
TSHIFT = 31 - Integer.numberOfLeadingZeros(ts);
}
}