ConcurrentDictionary的数据结构主要由Tables和Node组成,其中Tables包括桶(Node,节点)数组、局部锁(Local lock)、每个锁保护的元素数量(PerLock)。Node包含用户实际操作的key和value,以及为实现链表数据结构的下一个节点(Next Node)的引用和当前节点key的原始(未取正)散列值。以及其它一些标识。
1 private class Tables 2 { 3 /// <summary> 4 /// 每个桶的单链表 5 /// </summary> 6 internal readonly Node[] m_buckets; 7 8 /// <summary> 9 /// 锁数组,每个锁都锁住table的一部分 10 /// </summary> 11 internal readonly object[] m_locks; 12 13 /// <summary> 14 /// 每个锁保护的元素的数量 15 /// </summary> 16 internal volatile int[] m_countPerLock; 17 18 /// <summary> 19 /// key的比较器 20 /// </summary> 21 internal readonly IEqualityComparer<TKey> m_comparer; 22 23 internal Tables(Node[] buckets, object[] locks, int[] countPerLock, IEqualityComparer<TKey> comparer) 24 { 25 m_buckets = buckets; 26 m_locks = locks; 27 m_countPerLock = countPerLock; 28 m_comparer = comparer; 29 } 30 } 31 32 private class Node 33 { 34 internal TKey m_key; 35 internal TValue m_value; 36 internal volatile Node m_next; 37 internal int m_hashcode; 38 39 internal Node(TKey key, TValue value, int hashcode, Node next) 40 { 41 m_key = key; 42 m_value = value; 43 m_next = next; 44 m_hashcode = hashcode; 45 } 46 }
当new一个ConcurrentDictionary时,默认调用无参构造函数,给定默认的并发数量(Environment.ProcessorCount)、默认的键比较器、默认的容量(桶的初始容量,为31),该容量是经过权衡得到,不能被最小的素数整除。之后再处理容量与并发数的关系、容量与锁的关系以及每个锁的最大元素数。将桶、锁对象、锁保护封装在一个对象中,并初始化。
1 //初始化 ConcurrentDictionary 类的新实例, 2 //该实例为空,具有默认的并发级别和默认的初始容量,并为键类型使用默认比较器。 3 public ConcurrentDictionary() : 4 this(DefaultConcurrencyLevel, DEFAULT_CAPACITY, true, EqualityComparer<TKey>.Default) { } 5 6 /// <summary> 7 /// 无参构造函数真正调用的函数 8 /// </summary> 9 /// <param name="concurrencyLevel">并发线程的可能数量(更改字典的线程可能数量)</param> 10 /// <param name="capacity">容量</param> 11 /// <param name="growLockArray">是否动态增加 striped lock 的大小</param> 12 /// <param name="comparer">比较器</param> 13 internal ConcurrentDictionary(int concurrencyLevel, int capacity, bool growLockArray, IEqualityComparer<TKey> comparer) 14 { 15 if (concurrencyLevel < 1) 16 { 17 throw new ArgumentOutOfRangeException("concurrencyLevel", GetResource("ConcurrentDictionary_ConcurrencyLevelMustBePositive")); 18 } 19 if (capacity < 0) 20 { 21 throw new ArgumentOutOfRangeException("capacity", GetResource("ConcurrentDictionary_CapacityMustNotBeNegative")); 22 } 23 if (comparer == null) throw new ArgumentNullException("comparer"); 24 25 //容量应当至少与并发数一致,否则会有锁对象浪费 26 if (capacity < concurrencyLevel) 27 { 28 capacity = concurrencyLevel; 29 } 30 31 //锁对象数组,大小为 并发线程的可能数量 32 object[] locks = new object[concurrencyLevel]; 33 for (int i = 0; i < locks.Length; i++) 34 { 35 locks[i] = new object(); 36 } 37 38 //每个锁保护的元素的数量 39 int[] countPerLock = new int[locks.Length]; 40 //单链表中的节点,表示特定的哈希存储桶(桶:Node类型的数组)。 41 Node[] buckets = new Node[capacity]; 42 //可以保持字典内部状态的表,将桶、锁对象、锁保护封装在一个对象中,以便一次原子操作 43 m_tables = new Tables(buckets, locks, countPerLock, comparer); 44 //是否动态增加 striped lock 的大小 45 m_growLockArray = growLockArray; 46 //在调整大小操作被触发之前,每个锁可锁住的最大(预计)元素数 47 //默认按锁个数平均分配,即Node总个数除以锁总个数 48 m_budget = buckets.Length / locks.Length; 49 }
当调用TryAdd时,实际调用的是内部公共方法TryAddInternal。如果存在key,则始终返回false,如果updateIfExists为true,则更新value,如果不存在key,则始终返回true,并且添加value。详细解读见代码。
1 /// <summary> 2 /// 尝试将指定的键和值添加到字典 3 /// </summary> 4 /// <param name="key">要添加的元素的键</param> 5 /// <param name="value">要添加的元素的值。对于引用类型,该值可以是空引用</param> 6 /// <returns>键值对添加成功则返回true,否则false</returns> 7 /// 异常: 8 // T:System.ArgumentNullException: 9 // key 为 null。 10 // T:System.OverflowException: 11 // 字典中已包含元素的最大数量(System.Int32.MaxValue)。 12 public bool TryAdd(TKey key, TValue value) 13 { 14 if (key == null) throw new ArgumentNullException("key"); 15 TValue dummy; 16 return TryAddInternal(key, value, false, true, out dummy); 17 } 18 19 /// <summary> 20 /// 对字典添加和修改的内部公共方法 21 /// 如果存在key,则始终返回false,如果updateIfExists为true,则更新value 22 /// 如果不存在key,则始终返回true,并且添加value 23 /// </summary> 24 [SuppressMessage("Microsoft.Concurrency", "CA8001", Justification = "Reviewed for thread safety")] 25 private bool TryAddInternal(TKey key, TValue value, bool updateIfExists, bool acquireLock, out TValue resultingValue) 26 { 27 while (true) 28 { 29 //桶序号(下标),锁序号(下标) 30 int bucketNo, lockNo; 31 int hashcode; 32 33 Tables tables = m_tables; 34 IEqualityComparer<TKey> comparer = tables.m_comparer; 35 hashcode = comparer.GetHashCode(key); 36 37 //获取桶下标、锁下标 38 GetBucketAndLockNo(hashcode, out bucketNo, out lockNo, tables.m_buckets.Length, tables.m_locks.Length); 39 40 bool resizeDesired = false; 41 bool lockTaken = false; 42 #if FEATURE_RANDOMIZED_STRING_HASHING 43 #if !FEATURE_CORECLR 44 bool resizeDueToCollisions = false; 45 #endif // !FEATURE_CORECLR 46 #endif 47 48 try 49 { 50 if (acquireLock) 51 //根据上面得到的锁的下标(lockNo),获取对应(lockNo)的对象锁 52 //hash落在不同的锁对象上,因此不同线程获取锁的对象可能不同,降低了“抢锁”概率 53 Monitor.Enter(tables.m_locks[lockNo], ref lockTaken); 54 55 //在这之前如果tables被修改则有可能未正确锁定,此时需要重试 56 if (tables != m_tables) 57 { 58 continue; 59 } 60 61 #if FEATURE_RANDOMIZED_STRING_HASHING 62 #if !FEATURE_CORECLR 63 int collisionCount = 0; 64 #endif // !FEATURE_CORECLR 65 #endif 66 67 // Try to find this key in the bucket 68 Node prev = null; 69 for (Node node = tables.m_buckets[bucketNo]; node != null; node = node.m_next) 70 { 71 Assert((prev == null && node == tables.m_buckets[bucketNo]) || prev.m_next == node); 72 //如果key已经存在 73 if (comparer.Equals(node.m_key, key)) 74 { 75 //如果允许更新,则更新该键值对的值 76 if (updateIfExists) 77 { 78 //如果可以原子操作则直接赋值 79 if (s_isValueWriteAtomic) 80 { 81 node.m_value = value; 82 } 83 //否则需要为更新创建一个新的节点,以便支持不能以原子方式写的类型, 84 //因为无锁读取也可能在此时发生 85 else 86 { 87 //node.m_next 新节点指向下一个节点 88 Node newNode = new Node(node.m_key, value, hashcode, node.m_next); 89 if (prev == null) 90 { 91 tables.m_buckets[bucketNo] = newNode; 92 } 93 else 94 { 95 //上一个节点指向新节点。此时完成单链表的新旧节点替换 96 prev.m_next = newNode; 97 } 98 } 99 resultingValue = value; 100 } 101 else 102 { 103 resultingValue = node.m_value; 104 } 105 return false; 106 } 107 //循环到最后时,prev是最后一个node(node.m_next==null) 108 prev = node; 109 110 #if FEATURE_RANDOMIZED_STRING_HASHING 111 #if !FEATURE_CORECLR 112 collisionCount++; 113 #endif // !FEATURE_CORECLR 114 #endif 115 } 116 117 #if FEATURE_RANDOMIZED_STRING_HASHING 118 #if !FEATURE_CORECLR 119 if(collisionCount > HashHelpers.HashCollisionThreshold && HashHelpers.IsWellKnownEqualityComparer(comparer)) 120 { 121 resizeDesired = true; 122 resizeDueToCollisions = true; 123 } 124 #endif // !FEATURE_CORECLR 125 #endif 126 127 //使用可变内存操作插入键值对 128 Volatile.Write<Node>(ref tables.m_buckets[bucketNo], new Node(key, value, hashcode, tables.m_buckets[bucketNo])); 129 checked 130 { 131 //第lockNo个锁保护的元素数量,并检查是否益处 132 tables.m_countPerLock[lockNo]++; 133 } 134 135 // 136 // If the number of elements guarded by this lock has exceeded the budget, resize the bucket table. 137 // It is also possible that GrowTable will increase the budget but won't resize the bucket table. 138 // That happens if the bucket table is found to be poorly utilized due to a bad hash function. 139 //如果第lockNo个锁要锁的元素超出预计,则需要调整 140 if (tables.m_countPerLock[lockNo] > m_budget) 141 { 142 resizeDesired = true; 143 } 144 } 145 finally 146 { 147 if (lockTaken) 148 //释放第lockNo个锁 149 Monitor.Exit(tables.m_locks[lockNo]); 150 } 151 152 // 153 // The fact that we got here means that we just performed an insertion. If necessary, we will grow the table. 154 // 155 // Concurrency notes: 156 // - Notice that we are not holding any locks at when calling GrowTable. This is necessary to prevent deadlocks. 157 // - As a result, it is possible that GrowTable will be called unnecessarily. But, GrowTable will obtain lock 0 158 // and then verify that the table we passed to it as the argument is still the current table. 159 // 160 if (resizeDesired) 161 { 162 #if FEATURE_RANDOMIZED_STRING_HASHING 163 #if !FEATURE_CORECLR 164 if (resizeDueToCollisions) 165 { 166 GrowTable(tables, (IEqualityComparer<TKey>)HashHelpers.GetRandomizedEqualityComparer(comparer), true, m_keyRehashCount); 167 } 168 else 169 #endif // !FEATURE_CORECLR 170 { 171 GrowTable(tables, tables.m_comparer, false, m_keyRehashCount); 172 } 173 #else 174 GrowTable(tables, tables.m_comparer, false, m_keyRehashCount); 175 #endif 176 } 177 178 resultingValue = value; 179 return true; 180 } 181 }