代码版本:1.12.7

主要代码在src/runtime/map.go

结构图

常量定义

每个bucket持有kv键值对数目

1
2
3
4
// Maximum number of key/value pairs a bucket can hold.
//设置一个bucket最多持有多kv键值对
bucketCntBits = 3
bucketCnt = 1 << bucketCntBits

扩容因子

1
2
3
4
5
	// Maximum average load of a bucket that triggers growth is 6.5.
// Represent as loadFactorNum/loadFactDen, to allow integer math.
//判断是否扩容的因子,按注释的意思是经多次测试选了6.5这个值,也就是当元素个数/bucket个数大于等于6.5时(换算下来就是元素个数80%左右的时候扩容),就会进行扩容,把bucket数量扩成原本的两倍
loadFactorNum = 13
loadFactorDen = 2

kv最大的大小,超过这个大小会转成指针来存储
1
2
3
4
5
6
7
	// Maximum key or value size to keep inline (instead of mallocing per element).
// Must fit in a uint8.
// Fast versions cannot handle big values - the cutoff size for
// fast versions in cmd/compile/internal/gc/walk.go must be at most this value.
//kv最大的大小,超过了这个大小就用指针来存储,单位是字节
maxKeySize = 128
maxValueSize = 128

bmap的定义只有部分内容,剩下的需要通过偏移来查找

1
2
3
4
5
6
7
8
	// data offset should be the size of the bmap struct, but needs to be
// aligned correctly. For amd64p32 this means 64-bit alignment
// even though pointers are 32 bit.
//数据的偏移量,因为bmap的结构里面只定义了一个 tophash,剩下的kv和ovf都是通过偏移量来算出来的,这么做的意图是为了保证兼容32位机和64位机
dataOffset = unsafe.Offsetof(struct {
b bmap
v int64
}{}.v)

tophash的状态

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
// Possible tophash values. We reserve a few possibilities for special marks.
// Each bucket (including its overflow buckets, if any) will have either all or none of its
// entries in the evacuated* states (except during the evacuate() method, which only happens
// during map writes and thus no one else can observe the map during that time).
//记录每个bucket内坑位的状态
//标示所在单元是个空单元,后续也么有非空单元
emptyRest = 0 // this cell is empty, and there are no more non-empty cells at higher indexes or overflows.
//标示所在单元是个空单元
emptyOne = 1 // this cell is empty
//标示数据有效,数据迁移在新表的前半部分,同大小扩容位置一样
evacuatedX = 2 // key/value is valid. Entry has been evacuated to first half of larger table.
//标示数据有效,数据迁移在新表的后半部分
evacuatedY = 3 // same as above, but evacuated to second half of larger table.
//标示所在单元是个空单元,相关数据已经迁移到新的hmap中
evacuatedEmpty = 4 // cell is empty, bucket is evacuated.
//标示正常单元tophash状态的最小值,因为0-4是做状态判断的
minTopHash = 5 // minimum tophash for a normal filled cell.

迭代器标记

1
2
3
4
// flags
//两个迭代器
iterator = 1 // there may be an iterator using buckets
oldIterator = 2 // there may be an iterator using oldbuckets

写入状态标记,通过这个标记来判断是否同时读写

1
2
//写入状态
hashWriting = 4 // a goroutine is writing to the map

扩容标记,用来判断是等比扩容(rehash)还是二倍扩容

1
2
//标记是同大小扩容
sameSizeGrow = 8 // the current map growth is to a new map of the same size

迭代器使用的一个标记值,用来判别真实数据

1
2
3
//用来做迭代器哨兵的一个值
// sentinel bucket ID for iterator checks
noCheck = 1<<(8*sys.PtrSize) - 1

0值定义

1
2
const maxZero = 1024 // must match value in cmd/compile/internal/gc/walk.go
var zeroVal [maxZero]byte

类型定义

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
// A header for a Go map.
//go map的核心部分
type hmap struct {
count int // 哈希表元素数量
flags uint8 // 标记哈希表状态
B uint8 // 哈希表内部桶数量(2^B个桶)
noverflow uint16 // 溢出桶数量
hash0 uint32 // 哈希因子
buckets unsafe.Pointer // 桶数组的首地址
oldbuckets unsafe.Pointer // 旧桶数组的首地址
nevacuate uintptr // 迁移计数
extra *mapextra // 拓展字段,里面存放一些不是所有哈希表都有的字段
}

type mapextra struct {
overflow *[]*bmap // 溢出桶指针数组的指针
oldoverflow *[]*bmap // 旧溢出桶指针数组的指针
nextOverflow *bmap // 下一个可用溢出桶的指针
}

// 桶数据结构
type bmap struct {
// tophash 记录每一个元素哈希后key的高8位
// 查找时,直接先对比tophash值,如果相等,在进行比较key。
tophash [bucketCnt]uint8 // 每个桶最多可以存放8个元素
//后面跟着8个k然后8个v
//最后还有个overflow指针
// Followed by bucketCnt keys and then bucketCnt values. // key1, key2,... and value1, value2, ...
// Followed by an overflow pointer. // ovf指针, 溢出桶指针

}

// A hash iteration structure.
// If you modify hiter, also change cmd/compile/internal/gc/reflect.go to indicate
// the layout of this structure.
//map迭代器
type hiter struct {
key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/internal/gc/range.go).
value unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go).
t *maptype
h *hmap
buckets unsafe.Pointer // bucket ptr at hash_iter initialization time
bptr *bmap // current bucket
overflow *[]*bmap // keeps overflow buckets of hmap.buckets alive
oldoverflow *[]*bmap // keeps overflow buckets of hmap.oldbuckets alive
startBucket uintptr // bucket iteration started at
offset uint8 // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
wrapped bool // already wrapped around from end of bucket array to beginning
B uint8
i uint8
bucket uintptr
checkBucket uintptr
}

// evacDst is an evacuation destination.
// 迁移用的
type evacDst struct {
b *bmap // 当前的目标bucket
i int // 迁移到b中的index
k unsafe.Pointer // 当前keys的起始地址
v unsafe.Pointer // 当前values的起始地址
}

函数定义

工具函数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
// bucketShift returns 1<<b, optimized for code generation.
//算桶个数的
func bucketShift(b uint8) uintptr {
if sys.GoarchAmd64|sys.GoarchAmd64p32|sys.Goarch386 != 0 {
b &= sys.PtrSize*8 - 1 // help x86 archs remove shift overflow checks
}
return uintptr(1) << b
}

// bucketMask returns 1<<b - 1, optimized for code generation.
//算桶个数掩码,方便&取值,把hash值映射到buckte时,golang会把bucket的数量规整为2的次幂,而有m=2b,则n%m=n&(m-1),用位运算规避mod的昂贵代价
func bucketMask(b uint8) uintptr {
return bucketShift(b) - 1
}

// tophash calculates the tophash value for hash.
//算topsh的
func tophash(hash uintptr) uint8 {
top := uint8(hash >> (sys.PtrSize*8 - 8))
if top < minTopHash {
//注意这里,小于minTopHash也就是5的时候+一个5,因为0-4是标记值
top += minTopHash
}
return top
}

//判断这个bucket是不是迁移了,因为都是一个桶一个桶的迁移,所以判断第0号位置就行
func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool {
b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.bucketsize)))
return evacuated(b)
}

func evacuated(b *bmap) bool {
h := b.tophash[0]
return h > emptyOne && h < minTopHash
}

//通过偏移算出overflow指向的bmap
func (b *bmap) overflow(t *maptype) *bmap {
return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize))
}

//设置一下overflow指向的位置
func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
*(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize)) = ovf
}

//找到keys的起始位置
func (b *bmap) keys() unsafe.Pointer {
return add(unsafe.Pointer(b), dataOffset)
}

// overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.
// 判断是不是超过了影响因子
func overLoadFactor(count int, B uint8) bool {
return count > bucketCnt && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
}

// tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets.
// Note that most of these overflow buckets must be in sparse use;
// if use was dense, then we'd have already triggered regular map growth.
//判断是不是overflow链的buckets太多了,按注释的意思,15是个经验值,也就是最多32768个
func tooManyOverflowBuckets(noverflow uint16, B uint8) bool {
// If the threshold is too low, we do extraneous work.
// If the threshold is too high, maps that grow and shrink can hold on to lots of unused memory.
// "too many" means (approximately) as many overflow buckets as regular buckets.
// See incrnoverflow for more details.
if B > 15 {
B = 15
}
// The compiler doesn't see here that B < 16; mask B to generate shorter shift code.
return noverflow >= uint16(1)<<(B&15)
}

// incrnoverflow increments h.noverflow.
// noverflow counts the number of overflow buckets.
// This is used to trigger same-size map growth.
// See also tooManyOverflowBuckets.
// To keep hmap small, noverflow is a uint16.
// When there are few buckets, noverflow is an exact count.
// When there are many buckets, noverflow is an approximate count.
// 增加overflow个数计数,这里有个特殊处理
// 因为noverflow是一个uint16,最大也就2^16,所以在这里做了个特殊处理,就是当h.B>=16的时候 做一个概率计算,只有1/2^(B-15)的概率加1,所以当太多的时候,这是一个近似值
func (h *hmap) incrnoverflow() {
// We trigger same-size map growth if there are
// as many overflow buckets as buckets.
// We need to be able to count to 1<<h.B.
if h.B < 16 {
h.noverflow++
return
}
// Increment with probability 1/(1<<(h.B-15)).
// When we reach 1<<15 - 1, we will have approximately
// as many overflow buckets as buckets.
mask := uint32(1)<<(h.B-15) - 1
// Example: if h.B == 18, then mask == 7,
// and fastrand & 7 == 0 with probability 1/8.
if fastrand()&mask == 0 {
h.noverflow++
}
}

// growing reports whether h is growing. The growth may be to the same size or bigger.
//判断是不是在扩容,直接判断oldbuckets指向是不是为空即可
func (h *hmap) growing() bool {
return h.oldbuckets != nil
}

// sameSizeGrow reports whether the current growth is to a map of the same size.
//判断是不是同大小扩容
func (h *hmap) sameSizeGrow() bool {
return h.flags&sameSizeGrow != 0
}

// noldbuckets calculates the number of buckets prior to the current map growth.
//算一下桶个数,如果不是同大小扩容的话,因为每一次是原来的2倍扩容,所以B-1,也就是/2
func (h *hmap) noldbuckets() uintptr {
oldB := h.B
if !h.sameSizeGrow() {
oldB--
}
return bucketShift(oldB)
}

// oldbucketmask provides a mask that can be applied to calculate n % noldbuckets().
//算掩码方便&取值
func (h *hmap) oldbucketmask() uintptr {
return h.noldbuckets() - 1
}

//判断能不能做为map的key
func ismapkey(t *_type) bool {
return t.alg.hash != nil
}

// isEmpty reports whether the given tophash array entry represents an empty bucket entry.
// 判断当前单元tophash标志位是不是空
func isEmpty(x uint8) bool {
return x <= emptyOne
}

核心函数

新建map a :=make(map[string]interface{})

新建步骤如下

  1. 如果hmap结构已经传入,复用,否则,即新创建一个。
  2. 根据元素数量,确定桶大小数量B(2^B个桶)
  3. 如果B=0,直接返回h即可,即不申请具体buckets内存,后续惰性申请。
  4. 申请内存块,这里的逻辑是如果B>=4, 即桶数量如果大于等于16时, 会增加1/16*B个桶,桶数量向上取整并使内存对齐, 这时候,根据对齐后的内存, 重新计算桶数量 。将对齐后的内存块,初始化为N个桶。 对于前2^B个桶, 都作为哈希表中的桶, 后面的桶作为哈希表的溢出桶。溢出桶是指:如果出现当前桶空间不足,将当前桶空间后面的ovf指针上挂上一个新的桶,来进行存储元素(类似链接法解决哈希冲突)。这个桶就叫溢出桶。
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
// makemap implements Go map creation for make(map[k]v, hint).
// If the compiler has determined that the map or the first bucket
// can be created on the stack, h and/or bucket may be non-nil.
// If h != nil, the map can be created directly in h.
// If h.buckets != nil, bucket pointed to can be used as the first bucket.
//新建一个map
func makemap(t *maptype, hint int, h *hmap) *hmap {
mem, overflow := math.MulUintptr(uintptr(hint), t.bucket.size)
if overflow || mem > maxAlloc {
hint = 0
}

// initialize Hmap
if h == nil {
h = new(hmap) //如果hmap指针为空, 创建hmap结构体。
}
}
h.hash0 = fastrand() // 初始化哈希种子

// Find the size parameter B which will hold the requested # of elements.
// For hint < 0 overLoadFactor returns false since hint < bucketCnt.
B := uint8(0)
for overLoadFactor(hint, B) {
// 根据元素数量,确定桶大小
B++
}
h.B = B

// allocate initial hash table
// if B == 0, the buckets field is allocated lazily later (in mapassign)
// If hint is large zeroing this memory could take a while.
// 桶数量为0时,即不申请内存,即不指定元素数量,后续惰性申请
if h.B != 0 {
var nextOverflow *bmap
h.buckets, nextOverflow = makeBucketArray(t, h.B, nil)// 申请桶内存块,具体见下方
if nextOverflow != nil {
// 如果溢出桶的地址不为空就把扩展字段里的nextovf指过去
h.extra = new(mapextra)
h.extra.nextOverflow = nextOverflow
}
}

return h
}

// makeBucketArray initializes a backing array for map buckets.
// 1<<b is the minimum number of buckets to allocate.
// dirtyalloc should either be nil or a bucket array previously
// allocated by makeBucketArray with the same t and b parameters.
// If dirtyalloc is nil a new backing array will be alloced and
// otherwise dirtyalloc will be cleared and reused as backing array.
//新建bucket数组,t是map类型,b是桶数的指数,dirtyalloc如果是空就新申请空间否则就清空这一块拿来用
func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets unsafe.Pointer, nextOverflow *bmap) {
base := bucketShift(b)
nbuckets := base
// For small b, overflow buckets are unlikely.
// Avoid the overhead of the calculation.
if b >= 4 {
// Add on the estimated number of overflow buckets
// required to insert the median number of elements
// used with this value of b.
// 如果B>=4 也就是bucket数目>=16 增加1/16个桶的空间
// 优先用预分配的overflow bucket,如果预分配的用完了,那么就malloc一个挂上去。注意golang的map不会shrink,内存只会越用越多,overflow bucket中的key全删了也不会释放
nbuckets += bucketShift(b - 4)
sz := t.bucket.size * nbuckets
up := roundupsize(sz)//做一下内存对齐
if up != sz {
nbuckets = up / t.bucket.size
}
}

//没有可以复用的就新建一个
if dirtyalloc == nil {
buckets = newarray(t.bucket, int(nbuckets))
} else {
// dirtyalloc was previously generated by
// the above newarray(t.bucket, int(nbuckets))
// but may not be empty.
buckets = dirtyalloc
size := t.bucket.size * nbuckets
if t.bucket.kind&kindNoPointers == 0 {
memclrHasPointers(buckets, size)
} else {
memclrNoHeapPointers(buckets, size)
}
}

if base != nbuckets {
// 如果内存块桶数量 大于2^B,将2^B个桶后面的桶初始化为溢出桶
// We preallocated some overflow buckets.
// To keep the overhead of tracking these overflow buckets to a minimum,
// we use the convention that if a preallocated overflow bucket's overflow
// pointer is nil, then there are more available by bumping the pointer.
// We need a safe non-nil pointer for the last overflow bucket; just use buckets.
// 溢出桶的起始地址
nextOverflow = (*bmap)(add(buckets, base*uintptr(t.bucketsize)))
// 内存块中最后一个溢出桶的位置
last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.bucketsize)))
// 将内存块最后一个溢出桶的ovf 指针 指向起始地址。 这里的目的是为了后面判断内存块中是否有可用的溢出桶
last.setoverflow(t, (*bmap)(buckets))
}
return buckets, nextOverflow
}

取值相关 a := m["a"] a,ok := m["a"] for k,v:=range m["a"]

map查找步骤如下:

  1. 根据哈希算法,以及哈希因子可以得到将key哈希后的值hashKey
  2. 将hashKey&(1<<B-1)即可知道当前的key所在的桶编号,根据桶编号,将指针偏移即可得到key所在桶的桶指针b。
  3. 判断当前hmap是否是处于扩容状态。 如果处于扩容状态,在旧hmap中在执行第二步,即获取在旧hmap中的桶指针oldb,根据桶指针,可以判断出当前桶是否已经迁移,未迁移,b=oldb
  4. 根据hash值,获取tophash值 ,即高8位字段,如果小于4 则+4。 在tophash值一致的情况下 获取key, 进行比对,如果相等,获取value 并返回。 不等, 继续循环。
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
//几个名字,简单明了,1就是返回value,2就是value+bool,k就是返回key+value。实现方式也都一样

// mapaccess1 returns a pointer to h[key]. Never returns nil, instead
// it will return a reference to the zero object for the value type if
// the key is not in the map.
// NOTE: The returned pointer may keep the whole map live, so don't
// hold onto it for very long.
//mapaccess1返回一个h[key]的指针,不会返回nil,如果不在map中就返回一个zero object的引用
func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if raceenabled && h != nil {
callerpc := getcallerpc()
pc := funcPC(mapaccess1)
racereadpc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.key, key, callerpc, pc)
}
if msanenabled && h != nil {
msanread(key, t.key.size)
}
if h == nil || h.count == 0 {
//处理 key是一个不可比较的类型的情况
if t.hashMightPanic() {
t.key.alg.hash(key, 0) // see issue 23734
}
return unsafe.Pointer(&zeroVal[0])
}
if h.flags&hashWriting != 0 {
// flags字段标示状态,这是如果多个goroutine同时对hmap进行修改,这里抛出异常
throw("concurrent map read and map write")
}
alg := t.key.alg // alg是maptype的哈希方法
hash := alg.hash(key, uintptr(h.hash0))// 进行哈希操作,统一使用一个哈希因子(hash0字段)
m := bucketMask(h.B)// 算一下掩码
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))// hash&m其实就是hash%h.B 就可以得到当前的key所在桶的编号,通过指针偏移,即可得到所在桶的桶指针
if c := h.oldbuckets; c != nil {
// 如果在扩容中
if !h.sameSizeGrow() {
// 如果不是等比例扩容,即发生的2倍扩容,则旧hmap的掩码为m>>1
// There used to be half as many buckets; mask down one more power of two.
m >>= 1
}
// 获取在旧hmap中所在桶的桶指针
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
if !evacuated(oldb) {
// 如果旧hmap所在桶未发生迁移,即数据还在旧hmap中时,直接将桶指针指向旧hmap中的桶
b = oldb
}
}
top := tophash(hash)// 获取tophash的值(64位中高8位的字段)
bucketloop:
// 遍历桶, 桶 -> 溢出桶-> 溢出桶-> nil 以类似的方式进行
for ; b != nil; b = b.overflow(t) {
// 遍历桶中的元素
for i := uintptr(0); i < bucketCnt; i++ {
if b.tophash[i] != top {
if b.tophash[i] == emptyRest {
//如果tophash不等,并且后续没有非空单元了,直接跳出
break bucketloop
}
continue
}
// 获取key
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
if t.indirectkey() {
// 如果key是个指针, 获取真正的数据
k = *((*unsafe.Pointer)(k))
}
if alg.equal(key, k) {// 比较key是否一致
//获取value
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
if t.indirectvalue() {
// 如果value是个指针, 获取真正的数据
v = *((*unsafe.Pointer)(v))
}
return v
}
}
}
//未找到返回0值
return unsafe.Pointer(&zeroVal[0])
}

//下面都跟上面一样,返回值不同而已
func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
callerpc := getcallerpc()
pc := funcPC(mapaccess2)
racereadpc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.key, key, callerpc, pc)
}
if msanenabled && h != nil {
msanread(key, t.key.size)
}
if h == nil || h.count == 0 {
if t.hashMightPanic() {
t.key.alg.hash(key, 0) // see issue 23734
}
return unsafe.Pointer(&zeroVal[0]), false
}
if h.flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
alg := t.key.alg
hash := alg.hash(key, uintptr(h.hash0))
m := bucketMask(h.B)
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
if !h.sameSizeGrow() {
// There used to be half as many buckets; mask down one more power of two.
m >>= 1
}
oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&m)*uintptr(t.bucketsize)))
if !evacuated(oldb) {
b = oldb
}
}
top := tophash(hash)
bucketloop:
for ; b != nil; b = b.overflow(t) {
for i := uintptr(0); i < bucketCnt; i++ {
if b.tophash[i] != top {
if b.tophash[i] == emptyRest {
break bucketloop
}
continue
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
if t.indirectkey() {
k = *((*unsafe.Pointer)(k))
}
if alg.equal(key, k) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
if t.indirectvalue() {
v = *((*unsafe.Pointer)(v))
}
return v, true
}
}
}
return unsafe.Pointer(&zeroVal[0]), false
}

// returns both key and value. Used by map iterator
func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
if h == nil || h.count == 0 {
return nil, nil
}
alg := t.key.alg
hash := alg.hash(key, uintptr(h.hash0))
m := bucketMask(h.B)
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
if !h.sameSizeGrow() {
// There used to be half as many buckets; mask down one more power of two.
m >>= 1
}
oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&m)*uintptr(t.bucketsize)))
if !evacuated(oldb) {
b = oldb
}
}
top := tophash(hash)
bucketloop:
for ; b != nil; b = b.overflow(t) {
for i := uintptr(0); i < bucketCnt; i++ {
if b.tophash[i] != top {
if b.tophash[i] == emptyRest {
break bucketloop
}
continue
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
if t.indirectkey() {
k = *((*unsafe.Pointer)(k))
}
if alg.equal(key, k) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
if t.indirectvalue() {
v = *((*unsafe.Pointer)(v))
}
return k, v
}
}
}
return nil, nil
}


// 迭代器遍历
// mapiterinit initializes the hiter struct used for ranging over maps.
// The hiter struct pointed to by 'it' is allocated on the stack
// by the compilers order pass or on the heap by reflect_mapiterinit.
// Both need to have zeroed hiter since the struct contains pointers.
// 迭代器的初始化
func mapiterinit(t *maptype, h *hmap, it *hiter) {
if raceenabled && h != nil {
callerpc := getcallerpc()
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit))
}

if h == nil || h.count == 0 {
return
}

if unsafe.Sizeof(hiter{})/sys.PtrSize != 12 {
throw("hash_iter size incorrect") // see cmd/compile/internal/gc/reflect.go
}
it.t = t
it.h = h

// grab snapshot of bucket state
it.B = h.B
it.buckets = h.buckets
if t.bucket.kind&kindNoPointers != 0 {
// Allocate the current slice and remember pointers to both current and old.
// This preserves all relevant overflow buckets alive even if
// the table grows and/or overflow buckets are added to the table
// while we are iterating.
h.createOverflow()
it.overflow = h.extra.overflow
it.oldoverflow = h.extra.oldoverflow
}

// decide where to start
// 随机值来选择一个遍历起点。。。这就是为什么遍历map的时候顺序不一样的原因
r := uintptr(fastrand())
//3位桶内位置+B位桶位置 >31的话 fastrand的32位随机数不够用得再加一个32位
if h.B > 31-bucketCntBits {
r += uintptr(fastrand()) << 31
}
it.startBucket = r & bucketMask(h.B)
//随机来算一个桶内起始位置
it.offset = uint8(r >> h.B & (bucketCnt - 1))

// iterator state
it.bucket = it.startBucket

// Remember we have an iterator.
// Can run concurrently with another mapiterinit().
if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
// 标记有一个迭代器
atomic.Or8(&h.flags, iterator|oldIterator)
}

mapiternext(it)
}

func mapiternext(it *hiter) {
h := it.h
if raceenabled {
callerpc := getcallerpc()
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext))
}
if h.flags&hashWriting != 0 {
throw("concurrent map iteration and map write")
}
t := it.t
bucket := it.bucket
b := it.bptr
i := it.i
checkBucket := it.checkBucket
alg := t.key.alg

next:
if b == nil {
if bucket == it.startBucket && it.wrapped {
// end of iteration
it.key = nil
it.value = nil
return
}
//这里一共有6种情况
//1.遍历开始到结束时都未扩容
//2.遍历开始时未扩容,遍历中等比扩容,it.b==h.B
//3.遍历开始时未扩容,遍历中二倍扩容,it.b!=h.B
//4.遍历开始时已经在等比扩容中,it.b==h.B
//5.遍历开始时已经在二倍扩容中,it.b==h.B
//这一段逻辑主要是保证b的指向一定是在有数据的buckets中,
//如果已经迁移了,那么就指向新buckets
//没有迁移就指向旧buckets
if h.growing() && it.B == h.B {
// 4,5情况会走这个,2在开始扩容后也会走这个,所以这里并不能判断是不是等比扩容,需要额外一个ckeckBucket后续来做判断
// 这个时候,如果迁移完了,checkBucket就为noCheck,也就是不用check,否则就是当前bucket
// Iterator was started in the middle of a grow, and the grow isn't done yet.
// If the bucket we're looking at hasn't been filled in yet (i.e. the old
// bucket hasn't been evacuated) then we need to iterate through the old
// bucket and only return the ones that will be migrated to this bucket.
oldbucket := bucket & it.h.oldbucketmask()
b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
if !evacuated(b) {
checkBucket = bucket
} else {
//1,3情况是这个,2在开始扩容前会走这个
b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
checkBucket = noCheck
}
} else {
b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
checkBucket = noCheck
}
bucket++
if bucket == bucketShift(it.B) {
bucket = 0
it.wrapped = true
}
i = 0
}
for ; i < bucketCnt; i++ {
offi := (i + it.offset) & (bucketCnt - 1)
if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty {
// 如果为空或者是迁移后的,直接跳过
// TODO: emptyRest is hard to use here, as we start iterating
// in the middle of a bucket. It's feasible, just tricky.
continue
}
k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
if t.indirectkey() {
k = *((*unsafe.Pointer)(k))
}
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize))

if checkBucket != noCheck && !h.sameSizeGrow() {
// 这种情况是当开始遍历还没有2倍扩容迁移旧桶的时候,需要跳过之后这个旧桶迁移过去的新的桶
// Special case: iterator was started during a grow to a larger size
// and the grow is not done yet. We're working on a bucket whose
// oldbucket has not been evacuated yet. Or at least, it wasn't
// evacuated when we started the bucket. So we're iterating
// through the oldbucket, skipping any keys that will go
// to the other new bucket (each oldbucket expands to two
// buckets during a grow).
//这里处理一种特殊情况 NaN != NaN
if t.reflexivekey() || alg.equal(k, k) {
// If the item in the oldbucket is not destined for
// the current new bucket in the iteration, skip it.
hash := alg.hash(k, uintptr(h.hash0))
if hash&bucketMask(it.B) != checkBucket {
continue
}
} else {
// Hash isn't repeatable if k != k (NaNs). We need a
// repeatable and randomish choice of which direction
// to send NaNs during evacuation. We'll use the low
// bit of tophash to decide which way NaNs go.
// NOTE: this case is why we need two evacuate tophash
// values, evacuatedX and evacuatedY, that differ in
// their low bit.
//evacuatedX=2、evacuatedY=3区分左右桶的一个用途,遍历NaNs可能出现的重复过滤:
//迁移时:原桶的 NaNs根据桶的(tophash&1),进行随机的左右放;注意:迁移到新的桶中的top被重新随机赋值了。
//遍历时: 根据计算原来桶中的状态,即evacuatedX=2、evacuatedY=3来判断。
//如果是evacuatedX,则tophash&1= 0
//如果是evacuatedY,则tophash&1= 1
//那么当前遍历的是桶的右边还是左边,则是用:checkBucket>>(it.B-1)的最高位来判断。
//因此,下面代码就是判断,原来的NaNs迁移的左右桶和访问左右桶不一致时,跳过
if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
continue
}
}
}
if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
!(t.reflexivekey() || alg.equal(k, k)) {
// This is the golden data, we can return it.
// OR
// key!=key, so the entry can't be deleted or updated, so we can just return it.
// That's lucky for us because when key!=key we can't look it up successfully.
it.key = k
if t.indirectvalue() {
v = *((*unsafe.Pointer)(v))
}
it.value = v
} else {
// 这个时候是迭代器开始之后hash表已经扩容完了
// The hash table has grown since the iterator was started.
// The golden data for this key is now somewhere else.
// Check the current hash table for the data.
// This code handles the case where the key
// has been deleted, updated, or deleted and reinserted.
// NOTE: we need to regrab the key as it has potentially been
// updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
// 处理key被删,更新,和删除后又插入同样的key的情况,
// 重新取一遍key value
rk, rv := mapaccessK(t, h, k)
if rk == nil {
continue // key has been deleted
}
it.key = rk
it.value = rv
}
it.bucket = bucket
if it.bptr != b { // avoid unnecessary write barrier; see issue 14921
it.bptr = b
}
it.i = i + 1
it.checkBucket = checkBucket
return
}
// 当前bucket遍历完了,开始遍历下一个
b = b.overflow(t)
i = 0
goto next
}

map赋值 m["a"]=1

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
// Like mapaccess, but allocates a slot for the key if it is not present in the map.
//key不在map中就申请单元,在里面就修改
func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if h == nil {
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
callerpc := getcallerpc()
pc := funcPC(mapassign)
racewritepc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.key, key, callerpc, pc)
}
if msanenabled {
msanread(key, t.key.size)
}
if h.flags&hashWriting != 0 {// 防止多个goroutine写
throw("concurrent map writes")
}
//计算key的hash值
alg := t.key.alg
hash := alg.hash(key, uintptr(h.hash0))

// Set hashWriting after calling alg.hash, since alg.hash may panic,
// in which case we have not actually done a write.
// flags置位hashWriting,类似于拿个锁
h.flags ^= hashWriting

if h.buckets == nil {
// 没有buckets就新建一个
h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
}

again:
//算一下桶位置
bucket := hash & bucketMask(h.B)
if h.growing() {
//如果在扩容中,干一下扩容的事情,具体看下方map扩容相关
growWork(t, h, bucket)
}
//根据偏移拿到具体的桶
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
top := tophash(hash)

var inserti *uint8
var insertk unsafe.Pointer
var val unsafe.Pointer
bucketloop:
for {
for i := uintptr(0); i < bucketCnt; i++ {
// 在tophash值不等的情况下,找到一个未使用的位置
if b.tophash[i] != top {
if isEmpty(b.tophash[i]) && inserti == nil {
inserti = &b.tophash[i]
insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
}
if b.tophash[i] == emptyRest {
break bucketloop
}
continue
}
// 在tophash相等的情况下, 进行比对key值是否相等
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
if t.indirectkey() {
k = *((*unsafe.Pointer)(k))
}
if !alg.equal(key, k) {
continue
}
// already have a mapping for key. Update it.
// 如果key相等,就将旧的key释放,即更新key
if t.needkeyupdate() {
typedmemmove(t.key, k, key)
}
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
goto done
}
// 查找下一个溢出桶
ovf := b.overflow(t)
if ovf == nil {
break
}
b = ovf
}

// Did not find mapping for key. Allocate new cell & add entry.

// If we hit the max load factor or we have too many overflow buckets,
// and we're not already in the middle of growing, start growing.
//扩容
if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
hashGrow(t, h)
goto again // Growing the table invalidates everything, so try again
}

/ 如果找不到位置,则新建一个溢出桶,
if inserti == nil {
// all current buckets are full, allocate a new one.
newb := h.newoverflow(t, b)
inserti = &newb.tophash[0]
insertk = add(unsafe.Pointer(newb), dataOffset)
val = add(insertk, bucketCnt*uintptr(t.keysize))
}

// store new key/value at insert position
if t.indirectkey() {
kmem := newobject(t.key)
*(*unsafe.Pointer)(insertk) = kmem
insertk = kmem
}
if t.indirectvalue() {
vmem := newobject(t.elem)
*(*unsafe.Pointer)(val) = vmem
}
// 将key移除,使用新key
typedmemmove(t.key, insertk, key)
// 保存tophash值
*inserti = top
h.count++

done:
if h.flags&hashWriting == 0 {
throw("concurrent map writes")
}
h.flags &^= hashWriting
if t.indirectvalue() {
val = *((*unsafe.Pointer)(val))
}
return val
}
//实际的赋值是在编译层做的,会多插一条MOV语句
//所以reflect的时候需要手动赋值
//go:linkname reflect_mapassign reflect.mapassign
func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
p := mapassign(t, h, key)
typedmemmove(t.elem, p, val) // 这里做的更新value的操作
}

map删除

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
if raceenabled && h != nil {
callerpc := getcallerpc()
pc := funcPC(mapdelete)
racewritepc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.key, key, callerpc, pc)
}
if msanenabled && h != nil {
msanread(key, t.key.size)
}
if h == nil || h.count == 0 {
if t.hashMightPanic() {
t.key.alg.hash(key, 0) // see issue 23734
}
return
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}

alg := t.key.alg
hash := alg.hash(key, uintptr(h.hash0))

// Set hashWriting after calling alg.hash, since alg.hash may panic,
// in which case we have not actually done a write (delete).
h.flags ^= hashWriting

bucket := hash & bucketMask(h.B)
if h.growing() {
growWork(t, h, bucket)
}
b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
bOrig := b
top := tophash(hash)
search:
for ; b != nil; b = b.overflow(t) {
for i := uintptr(0); i < bucketCnt; i++ {
if b.tophash[i] != top {
if b.tophash[i] == emptyRest {
break search
}
continue
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
k2 := k
if t.indirectkey() {
k2 = *((*unsafe.Pointer)(k2))
}
if !alg.equal(key, k2) {
continue
}
// 前面逻辑与取值一致
// Only clear key if there are pointers in it.
// 找到之后,如果是指针就清除掉
if t.indirectkey() {
*(*unsafe.Pointer)(k) = nil
} else if t.key.kind&kindNoPointers == 0 {
memclrHasPointers(k, t.key.size)
}
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
if t.indirectvalue() {
*(*unsafe.Pointer)(v) = nil
} else if t.elem.kind&kindNoPointers == 0 {
memclrHasPointers(v, t.elem.size)
} else {
memclrNoHeapPointers(v, t.elem.size)
}
//tophash单元置位
b.tophash[i] = emptyOne
// If the bucket now ends in a bunch of emptyOne states,
// change those to emptyRest states.
// It would be nice to make this a separate function, but
// for loops are not currently inlineable.
if i == bucketCnt-1 {
//如果遍历到bucket最后一个,判断ovf有没有别的
// 如果有并且tophash等于emptyRest证明后面没有非空单元,所以跳过后面的置位操作
if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
goto notLast
}
} else {
if b.tophash[i+1] != emptyRest {
goto notLast
}
}
for {
//删掉后要把tophash置位
b.tophash[i] = emptyRest
if i == 0 {
if b == bOrig {
//如果是第一个bucket的头,就证明做完了
break // beginning of initial bucket, we're done.
}
// Find previous bucket, continue at its last entry.
//找到前置的bucket
c := b
for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
}
i = bucketCnt - 1
} else {
//当前bucket index往前移
i--
}
if b.tophash[i] != emptyOne {
break
}
}
notLast:
//删了一个元素,所以计数-1
h.count--
break search
}
}

if h.flags&hashWriting == 0 {
throw("concurrent map writes")
}
h.flags &^= hashWriting
}

扩容

数据搬迁不是一次性完成,而是逐步的完成(在insert和remove时进行搬移),这样就分摊了扩容的耗时。同时为了避免有个bucket一直访问不到导致扩容无法完成,还会进行一个顺序扩容,每次因为写操作搬迁对应bucket后,还会按顺序搬迁未搬迁的bucket,所以最差情况下n次写操作,就保证搬迁完大小为n的map。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
func growWork(t *maptype, h *hmap, bucket uintptr) {
// make sure we evacuate the oldbucket corresponding
// to the bucket we're about to use
//具体的迁移方法
evacuate(t, h, bucket&h.oldbucketmask())

// evacuate one more oldbucket to make progress on growing
if h.growing() {
//迁移中的话多迁移一个
evacuate(t, h, h.nevacuate)
}
}

func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
// 取到具体的bucket
b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
// 拿到现在状态的h的B状态
newbit := h.noldbuckets()
if !evacuated(b) {
// TODO: reuse overflow buckets instead of using new ones, if there
// is no iterator using the old buckets. (If !oldIterator.)

// xy contains the x and y (low and high) evacuation destinations.
// x y分别代表同大小扩容后的位置和2倍扩容后的位置
// eg. B=2 扩容后B=4 当前迁移的bucket=2,所以newbit=4
// h.oldbuckets 0 1 2 3
// h.buckets 0 1 2 3 4 5 6 7 8 ....
// x y
var xy [2]evacDst
//填充旧的bucket数据
x := &xy[0]
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
x.k = add(unsafe.Pointer(x.b), dataOffset)
x.v = add(x.k, bucketCnt*uintptr(t.keysize))

if !h.sameSizeGrow() {
// Only calculate y pointers if we're growing bigger.
// Otherwise GC can see bad pointers.
// 扩容的时候只计算高位
// 填充新的bucket数据
y := &xy[1]
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
y.k = add(unsafe.Pointer(y.b), dataOffset)
y.v = add(y.k, bucketCnt*uintptr(t.keysize))
}

for ; b != nil; b = b.overflow(t) {
//取keys values 起始地址
k := add(unsafe.Pointer(b), dataOffset)
v := add(k, bucketCnt*uintptr(t.keysize))
for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) {
//遍历所有key value
top := b.tophash[i]
if isEmpty(top) {
// 如果是空就把标志位置为evacuatedEmpty
b.tophash[i] = evacuatedEmpty
continue
}
if top < minTopHash {
//top 理论上绝对比minTopHash大
throw("bad map state")
}
k2 := k
if t.indirectkey() {
//取k实际值
k2 = *((*unsafe.Pointer)(k2))
}
var useY uint8
if !h.sameSizeGrow() {
// Compute hash to make our evacuation decision (whether we need
// to send this key/value to bucket x or bucket y).
hash := t.key.alg.hash(k2, uintptr(h.hash0))
if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.alg.equal(k2, k2) {
// If key != key (NaNs), then the hash could be (and probably
// will be) entirely different from the old hash. Moreover,
// it isn't reproducible. Reproducibility is required in the
// presence of iterators, as our evacuation decision must
// match whatever decision the iterator made.
// Fortunately, we have the freedom to send these keys either
// way. Also, tophash is meaningless for these kinds of keys.
// We let the low bit of tophash drive the evacuation decision.
// We recompute a new random tophash for the next level so
// these keys will get evenly distributed across all buckets
// after multiple grows.
// 如果buckets没有迭代器,key也不等于key,也就是NaN这一类,就用tophash的最低位来判断,并且重新计算一个新的随机tophash
useY = top & 1
top = tophash(hash)
} else {
if hash&newbit != 0 {
//也是个随机处理,为了平分到两个桶内
useY = 1
}
}
}

if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY {
throw("bad evacuatedN")
}

b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY
//取到迁移目标
dst := &xy[useY] // evacuation destination

if dst.i == bucketCnt {
//如果个数超了,就新建一个新的bucket
dst.b = h.newoverflow(t, dst.b)
dst.i = 0
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
dst.v = add(dst.k, bucketCnt*uintptr(t.keysize))
}
//dst.i&(bucketCnt-1) 就是 dst.i%bucketCnt
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
if t.indirectkey() {
*(*unsafe.Pointer)(dst.k) = k2 // copy pointer
} else {
typedmemmove(t.key, dst.k, k) // copy value
}
if t.indirectvalue() {
*(*unsafe.Pointer)(dst.v) = *(*unsafe.Pointer)(v)
} else {
typedmemmove(t.elem, dst.v, v)
}
dst.i++
// These updates might push these pointers past the end of the
// key or value arrays. That's ok, as we have the overflow pointer
// at the end of the bucket to protect against pointing past the
// end of the bucket.
// k,v 指针往后移动一个
dst.k = add(dst.k, uintptr(t.keysize))
dst.v = add(dst.v, uintptr(t.valuesize))
}
}
// Unlink the overflow buckets & clear key/value to help GC.
if h.flags&oldIterator == 0 && t.bucket.kind&kindNoPointers == 0 {
//如果没有oldbuckets迭代器,清除一下旧的内存
b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
// Preserve b.tophash because the evacuation
// state is maintained there.
ptr := add(b, dataOffset)
n := uintptr(t.bucketsize) - dataOffset
memclrHasPointers(ptr, n)
}
}

if oldbucket == h.nevacuate {
//如果当前迁移的bucket等于迁移的数量
advanceEvacuationMark(h, t, newbit)
}
}

func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
h.nevacuate++
// Experiments suggest that 1024 is overkill by at least an order of magnitude.
// Put it in there as a safeguard anyway, to ensure O(1) behavior.
stop := h.nevacuate + 1024
if stop > newbit {
stop = newbit
}
for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) {
h.nevacuate++
}
if h.nevacuate == newbit { // newbit == # of oldbuckets
// newbit就是oldbuckets的数目,相等就证明迁移完成了
// Growing is all done. Free old main bucket array.
h.oldbuckets = nil
// Can discard old overflow buckets as well.
// If they are still referenced by an iterator,
// then the iterator holds a pointers to the slice.
if h.extra != nil {
h.extra.oldoverflow = nil
}
h.flags &^= sameSizeGrow
}
}