Source file src/runtime/map.go
1
2
3
4
5 package runtime
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56 import (
57 "runtime/internal/atomic"
58 "runtime/internal/math"
59 "runtime/internal/sys"
60 "unsafe"
61 )
62
63 const (
64
65 bucketCntBits = 3
66 bucketCnt = 1 << bucketCntBits
67
68
69
70 loadFactorNum = 13
71 loadFactorDen = 2
72
73
74
75
76
77 maxKeySize = 128
78 maxElemSize = 128
79
80
81
82
83 dataOffset = unsafe.Offsetof(struct {
84 b bmap
85 v int64
86 }{}.v)
87
88
89
90
91
92 emptyRest = 0
93 emptyOne = 1
94 evacuatedX = 2
95 evacuatedY = 3
96 evacuatedEmpty = 4
97 minTopHash = 5
98
99
100 iterator = 1
101 oldIterator = 2
102 hashWriting = 4
103 sameSizeGrow = 8
104
105
106 noCheck = 1<<(8*sys.PtrSize) - 1
107 )
108
109
110 func isEmpty(x uint8) bool {
111 return x <= emptyOne
112 }
113
114
115 type hmap struct {
116
117
118 count int
119 flags uint8
120 B uint8
121 noverflow uint16
122 hash0 uint32
123
124 buckets unsafe.Pointer
125 oldbuckets unsafe.Pointer
126 nevacuate uintptr
127
128 extra *mapextra
129 }
130
131
132 type mapextra struct {
133
134
135
136
137
138
139
140
141 overflow *[]*bmap
142 oldoverflow *[]*bmap
143
144
145 nextOverflow *bmap
146 }
147
148
149 type bmap struct {
150
151
152
153 tophash [bucketCnt]uint8
154
155
156
157
158
159 }
160
161
162
163
164 type hiter struct {
165 key unsafe.Pointer
166 elem unsafe.Pointer
167 t *maptype
168 h *hmap
169 buckets unsafe.Pointer
170 bptr *bmap
171 overflow *[]*bmap
172 oldoverflow *[]*bmap
173 startBucket uintptr
174 offset uint8
175 wrapped bool
176 B uint8
177 i uint8
178 bucket uintptr
179 checkBucket uintptr
180 }
181
182
183 func bucketShift(b uint8) uintptr {
184
185 return uintptr(1) << (b & (sys.PtrSize*8 - 1))
186 }
187
188
189 func bucketMask(b uint8) uintptr {
190 return bucketShift(b) - 1
191 }
192
193
194 func tophash(hash uintptr) uint8 {
195 top := uint8(hash >> (sys.PtrSize*8 - 8))
196 if top < minTopHash {
197 top += minTopHash
198 }
199 return top
200 }
201
202 func evacuated(b *bmap) bool {
203 h := b.tophash[0]
204 return h > emptyOne && h < minTopHash
205 }
206
207 func (b *bmap) overflow(t *maptype) *bmap {
208 return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize))
209 }
210
211 func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
212 *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize)) = ovf
213 }
214
215 func (b *bmap) keys() unsafe.Pointer {
216 return add(unsafe.Pointer(b), dataOffset)
217 }
218
219
220
221
222
223
224
225
226 func (h *hmap) incrnoverflow() {
227
228
229
230 if h.B < 16 {
231 h.noverflow++
232 return
233 }
234
235
236
237 mask := uint32(1)<<(h.B-15) - 1
238
239
240 if fastrand()&mask == 0 {
241 h.noverflow++
242 }
243 }
244
245 func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
246 var ovf *bmap
247 if h.extra != nil && h.extra.nextOverflow != nil {
248
249
250 ovf = h.extra.nextOverflow
251 if ovf.overflow(t) == nil {
252
253 h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.bucketsize)))
254 } else {
255
256
257
258 ovf.setoverflow(t, nil)
259 h.extra.nextOverflow = nil
260 }
261 } else {
262 ovf = (*bmap)(newobject(t.bucket))
263 }
264 h.incrnoverflow()
265 if t.bucket.ptrdata == 0 {
266 h.createOverflow()
267 *h.extra.overflow = append(*h.extra.overflow, ovf)
268 }
269 b.setoverflow(t, ovf)
270 return ovf
271 }
272
273 func (h *hmap) createOverflow() {
274 if h.extra == nil {
275 h.extra = new(mapextra)
276 }
277 if h.extra.overflow == nil {
278 h.extra.overflow = new([]*bmap)
279 }
280 }
281
282 func makemap64(t *maptype, hint int64, h *hmap) *hmap {
283 if int64(int(hint)) != hint {
284 hint = 0
285 }
286 return makemap(t, int(hint), h)
287 }
288
289
290
291
292 func makemap_small() *hmap {
293 h := new(hmap)
294 h.hash0 = fastrand()
295 return h
296 }
297
298
299
300
301
302
303 func makemap(t *maptype, hint int, h *hmap) *hmap {
304 mem, overflow := math.MulUintptr(uintptr(hint), t.bucket.size)
305 if overflow || mem > maxAlloc {
306 hint = 0
307 }
308
309
310 if h == nil {
311 h = new(hmap)
312 }
313 h.hash0 = fastrand()
314
315
316
317 B := uint8(0)
318 for overLoadFactor(hint, B) {
319 B++
320 }
321 h.B = B
322
323
324
325
326 if h.B != 0 {
327 var nextOverflow *bmap
328 h.buckets, nextOverflow = makeBucketArray(t, h.B, nil)
329 if nextOverflow != nil {
330 h.extra = new(mapextra)
331 h.extra.nextOverflow = nextOverflow
332 }
333 }
334
335 return h
336 }
337
338
339
340
341
342
343
344 func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets unsafe.Pointer, nextOverflow *bmap) {
345 base := bucketShift(b)
346 nbuckets := base
347
348
349 if b >= 4 {
350
351
352
353 nbuckets += bucketShift(b - 4)
354 sz := t.bucket.size * nbuckets
355 up := roundupsize(sz)
356 if up != sz {
357 nbuckets = up / t.bucket.size
358 }
359 }
360
361 if dirtyalloc == nil {
362 buckets = newarray(t.bucket, int(nbuckets))
363 } else {
364
365
366
367 buckets = dirtyalloc
368 size := t.bucket.size * nbuckets
369 if t.bucket.ptrdata != 0 {
370 memclrHasPointers(buckets, size)
371 } else {
372 memclrNoHeapPointers(buckets, size)
373 }
374 }
375
376 if base != nbuckets {
377
378
379
380
381
382 nextOverflow = (*bmap)(add(buckets, base*uintptr(t.bucketsize)))
383 last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.bucketsize)))
384 last.setoverflow(t, (*bmap)(buckets))
385 }
386 return buckets, nextOverflow
387 }
388
389
390
391
392
393
394 func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
395 if raceenabled && h != nil {
396 callerpc := getcallerpc()
397 pc := funcPC(mapaccess1)
398 racereadpc(unsafe.Pointer(h), callerpc, pc)
399 raceReadObjectPC(t.key, key, callerpc, pc)
400 }
401 if msanenabled && h != nil {
402 msanread(key, t.key.size)
403 }
404 if h == nil || h.count == 0 {
405 if t.hashMightPanic() {
406 t.key.alg.hash(key, 0)
407 }
408 return unsafe.Pointer(&zeroVal[0])
409 }
410 if h.flags&hashWriting != 0 {
411 throw("concurrent map read and map write")
412 }
413 alg := t.key.alg
414 hash := alg.hash(key, uintptr(h.hash0))
415 m := bucketMask(h.B)
416 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
417 if c := h.oldbuckets; c != nil {
418 if !h.sameSizeGrow() {
419
420 m >>= 1
421 }
422 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
423 if !evacuated(oldb) {
424 b = oldb
425 }
426 }
427 top := tophash(hash)
428 bucketloop:
429 for ; b != nil; b = b.overflow(t) {
430 for i := uintptr(0); i < bucketCnt; i++ {
431 if b.tophash[i] != top {
432 if b.tophash[i] == emptyRest {
433 break bucketloop
434 }
435 continue
436 }
437 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
438 if t.indirectkey() {
439 k = *((*unsafe.Pointer)(k))
440 }
441 if alg.equal(key, k) {
442 e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
443 if t.indirectelem() {
444 e = *((*unsafe.Pointer)(e))
445 }
446 return e
447 }
448 }
449 }
450 return unsafe.Pointer(&zeroVal[0])
451 }
452
453 func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
454 if raceenabled && h != nil {
455 callerpc := getcallerpc()
456 pc := funcPC(mapaccess2)
457 racereadpc(unsafe.Pointer(h), callerpc, pc)
458 raceReadObjectPC(t.key, key, callerpc, pc)
459 }
460 if msanenabled && h != nil {
461 msanread(key, t.key.size)
462 }
463 if h == nil || h.count == 0 {
464 if t.hashMightPanic() {
465 t.key.alg.hash(key, 0)
466 }
467 return unsafe.Pointer(&zeroVal[0]), false
468 }
469 if h.flags&hashWriting != 0 {
470 throw("concurrent map read and map write")
471 }
472 alg := t.key.alg
473 hash := alg.hash(key, uintptr(h.hash0))
474 m := bucketMask(h.B)
475 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
476 if c := h.oldbuckets; c != nil {
477 if !h.sameSizeGrow() {
478
479 m >>= 1
480 }
481 oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&m)*uintptr(t.bucketsize)))
482 if !evacuated(oldb) {
483 b = oldb
484 }
485 }
486 top := tophash(hash)
487 bucketloop:
488 for ; b != nil; b = b.overflow(t) {
489 for i := uintptr(0); i < bucketCnt; i++ {
490 if b.tophash[i] != top {
491 if b.tophash[i] == emptyRest {
492 break bucketloop
493 }
494 continue
495 }
496 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
497 if t.indirectkey() {
498 k = *((*unsafe.Pointer)(k))
499 }
500 if alg.equal(key, k) {
501 e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
502 if t.indirectelem() {
503 e = *((*unsafe.Pointer)(e))
504 }
505 return e, true
506 }
507 }
508 }
509 return unsafe.Pointer(&zeroVal[0]), false
510 }
511
512
513 func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
514 if h == nil || h.count == 0 {
515 return nil, nil
516 }
517 alg := t.key.alg
518 hash := alg.hash(key, uintptr(h.hash0))
519 m := bucketMask(h.B)
520 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
521 if c := h.oldbuckets; c != nil {
522 if !h.sameSizeGrow() {
523
524 m >>= 1
525 }
526 oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&m)*uintptr(t.bucketsize)))
527 if !evacuated(oldb) {
528 b = oldb
529 }
530 }
531 top := tophash(hash)
532 bucketloop:
533 for ; b != nil; b = b.overflow(t) {
534 for i := uintptr(0); i < bucketCnt; i++ {
535 if b.tophash[i] != top {
536 if b.tophash[i] == emptyRest {
537 break bucketloop
538 }
539 continue
540 }
541 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
542 if t.indirectkey() {
543 k = *((*unsafe.Pointer)(k))
544 }
545 if alg.equal(key, k) {
546 e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
547 if t.indirectelem() {
548 e = *((*unsafe.Pointer)(e))
549 }
550 return k, e
551 }
552 }
553 }
554 return nil, nil
555 }
556
557 func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer {
558 e := mapaccess1(t, h, key)
559 if e == unsafe.Pointer(&zeroVal[0]) {
560 return zero
561 }
562 return e
563 }
564
565 func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
566 e := mapaccess1(t, h, key)
567 if e == unsafe.Pointer(&zeroVal[0]) {
568 return zero, false
569 }
570 return e, true
571 }
572
573
574 func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
575 if h == nil {
576 panic(plainError("assignment to entry in nil map"))
577 }
578 if raceenabled {
579 callerpc := getcallerpc()
580 pc := funcPC(mapassign)
581 racewritepc(unsafe.Pointer(h), callerpc, pc)
582 raceReadObjectPC(t.key, key, callerpc, pc)
583 }
584 if msanenabled {
585 msanread(key, t.key.size)
586 }
587 if h.flags&hashWriting != 0 {
588 throw("concurrent map writes")
589 }
590 alg := t.key.alg
591 hash := alg.hash(key, uintptr(h.hash0))
592
593
594
595 h.flags ^= hashWriting
596
597 if h.buckets == nil {
598 h.buckets = newobject(t.bucket)
599 }
600
601 again:
602 bucket := hash & bucketMask(h.B)
603 if h.growing() {
604 growWork(t, h, bucket)
605 }
606 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
607 top := tophash(hash)
608
609 var inserti *uint8
610 var insertk unsafe.Pointer
611 var elem unsafe.Pointer
612 bucketloop:
613 for {
614 for i := uintptr(0); i < bucketCnt; i++ {
615 if b.tophash[i] != top {
616 if isEmpty(b.tophash[i]) && inserti == nil {
617 inserti = &b.tophash[i]
618 insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
619 elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
620 }
621 if b.tophash[i] == emptyRest {
622 break bucketloop
623 }
624 continue
625 }
626 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
627 if t.indirectkey() {
628 k = *((*unsafe.Pointer)(k))
629 }
630 if !alg.equal(key, k) {
631 continue
632 }
633
634 if t.needkeyupdate() {
635 typedmemmove(t.key, k, key)
636 }
637 elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
638 goto done
639 }
640 ovf := b.overflow(t)
641 if ovf == nil {
642 break
643 }
644 b = ovf
645 }
646
647
648
649
650
651 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
652 hashGrow(t, h)
653 goto again
654 }
655
656 if inserti == nil {
657
658 newb := h.newoverflow(t, b)
659 inserti = &newb.tophash[0]
660 insertk = add(unsafe.Pointer(newb), dataOffset)
661 elem = add(insertk, bucketCnt*uintptr(t.keysize))
662 }
663
664
665 if t.indirectkey() {
666 kmem := newobject(t.key)
667 *(*unsafe.Pointer)(insertk) = kmem
668 insertk = kmem
669 }
670 if t.indirectelem() {
671 vmem := newobject(t.elem)
672 *(*unsafe.Pointer)(elem) = vmem
673 }
674 typedmemmove(t.key, insertk, key)
675 *inserti = top
676 h.count++
677
678 done:
679 if h.flags&hashWriting == 0 {
680 throw("concurrent map writes")
681 }
682 h.flags &^= hashWriting
683 if t.indirectelem() {
684 elem = *((*unsafe.Pointer)(elem))
685 }
686 return elem
687 }
688
689 func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
690 if raceenabled && h != nil {
691 callerpc := getcallerpc()
692 pc := funcPC(mapdelete)
693 racewritepc(unsafe.Pointer(h), callerpc, pc)
694 raceReadObjectPC(t.key, key, callerpc, pc)
695 }
696 if msanenabled && h != nil {
697 msanread(key, t.key.size)
698 }
699 if h == nil || h.count == 0 {
700 if t.hashMightPanic() {
701 t.key.alg.hash(key, 0)
702 }
703 return
704 }
705 if h.flags&hashWriting != 0 {
706 throw("concurrent map writes")
707 }
708
709 alg := t.key.alg
710 hash := alg.hash(key, uintptr(h.hash0))
711
712
713
714 h.flags ^= hashWriting
715
716 bucket := hash & bucketMask(h.B)
717 if h.growing() {
718 growWork(t, h, bucket)
719 }
720 b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
721 bOrig := b
722 top := tophash(hash)
723 search:
724 for ; b != nil; b = b.overflow(t) {
725 for i := uintptr(0); i < bucketCnt; i++ {
726 if b.tophash[i] != top {
727 if b.tophash[i] == emptyRest {
728 break search
729 }
730 continue
731 }
732 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
733 k2 := k
734 if t.indirectkey() {
735 k2 = *((*unsafe.Pointer)(k2))
736 }
737 if !alg.equal(key, k2) {
738 continue
739 }
740
741 if t.indirectkey() {
742 *(*unsafe.Pointer)(k) = nil
743 } else if t.key.ptrdata != 0 {
744 memclrHasPointers(k, t.key.size)
745 }
746 e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
747 if t.indirectelem() {
748 *(*unsafe.Pointer)(e) = nil
749 } else if t.elem.ptrdata != 0 {
750 memclrHasPointers(e, t.elem.size)
751 } else {
752 memclrNoHeapPointers(e, t.elem.size)
753 }
754 b.tophash[i] = emptyOne
755
756
757
758
759 if i == bucketCnt-1 {
760 if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
761 goto notLast
762 }
763 } else {
764 if b.tophash[i+1] != emptyRest {
765 goto notLast
766 }
767 }
768 for {
769 b.tophash[i] = emptyRest
770 if i == 0 {
771 if b == bOrig {
772 break
773 }
774
775 c := b
776 for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
777 }
778 i = bucketCnt - 1
779 } else {
780 i--
781 }
782 if b.tophash[i] != emptyOne {
783 break
784 }
785 }
786 notLast:
787 h.count--
788 break search
789 }
790 }
791
792 if h.flags&hashWriting == 0 {
793 throw("concurrent map writes")
794 }
795 h.flags &^= hashWriting
796 }
797
798
799
800
801
802 func mapiterinit(t *maptype, h *hmap, it *hiter) {
803 if raceenabled && h != nil {
804 callerpc := getcallerpc()
805 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit))
806 }
807
808 if h == nil || h.count == 0 {
809 return
810 }
811
812 if unsafe.Sizeof(hiter{})/sys.PtrSize != 12 {
813 throw("hash_iter size incorrect")
814 }
815 it.t = t
816 it.h = h
817
818
819 it.B = h.B
820 it.buckets = h.buckets
821 if t.bucket.ptrdata == 0 {
822
823
824
825
826 h.createOverflow()
827 it.overflow = h.extra.overflow
828 it.oldoverflow = h.extra.oldoverflow
829 }
830
831
832 r := uintptr(fastrand())
833 if h.B > 31-bucketCntBits {
834 r += uintptr(fastrand()) << 31
835 }
836 it.startBucket = r & bucketMask(h.B)
837 it.offset = uint8(r >> h.B & (bucketCnt - 1))
838
839
840 it.bucket = it.startBucket
841
842
843
844 if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
845 atomic.Or8(&h.flags, iterator|oldIterator)
846 }
847
848 mapiternext(it)
849 }
850
851 func mapiternext(it *hiter) {
852 h := it.h
853 if raceenabled {
854 callerpc := getcallerpc()
855 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext))
856 }
857 if h.flags&hashWriting != 0 {
858 throw("concurrent map iteration and map write")
859 }
860 t := it.t
861 bucket := it.bucket
862 b := it.bptr
863 i := it.i
864 checkBucket := it.checkBucket
865 alg := t.key.alg
866
867 next:
868 if b == nil {
869 if bucket == it.startBucket && it.wrapped {
870
871 it.key = nil
872 it.elem = nil
873 return
874 }
875 if h.growing() && it.B == h.B {
876
877
878
879
880 oldbucket := bucket & it.h.oldbucketmask()
881 b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
882 if !evacuated(b) {
883 checkBucket = bucket
884 } else {
885 b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
886 checkBucket = noCheck
887 }
888 } else {
889 b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
890 checkBucket = noCheck
891 }
892 bucket++
893 if bucket == bucketShift(it.B) {
894 bucket = 0
895 it.wrapped = true
896 }
897 i = 0
898 }
899 for ; i < bucketCnt; i++ {
900 offi := (i + it.offset) & (bucketCnt - 1)
901 if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty {
902
903
904 continue
905 }
906 k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
907 if t.indirectkey() {
908 k = *((*unsafe.Pointer)(k))
909 }
910 e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.elemsize))
911 if checkBucket != noCheck && !h.sameSizeGrow() {
912
913
914
915
916
917
918
919 if t.reflexivekey() || alg.equal(k, k) {
920
921
922 hash := alg.hash(k, uintptr(h.hash0))
923 if hash&bucketMask(it.B) != checkBucket {
924 continue
925 }
926 } else {
927
928
929
930
931
932
933
934 if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
935 continue
936 }
937 }
938 }
939 if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
940 !(t.reflexivekey() || alg.equal(k, k)) {
941
942
943
944
945 it.key = k
946 if t.indirectelem() {
947 e = *((*unsafe.Pointer)(e))
948 }
949 it.elem = e
950 } else {
951
952
953
954
955
956
957
958 rk, re := mapaccessK(t, h, k)
959 if rk == nil {
960 continue
961 }
962 it.key = rk
963 it.elem = re
964 }
965 it.bucket = bucket
966 if it.bptr != b {
967 it.bptr = b
968 }
969 it.i = i + 1
970 it.checkBucket = checkBucket
971 return
972 }
973 b = b.overflow(t)
974 i = 0
975 goto next
976 }
977
978
979 func mapclear(t *maptype, h *hmap) {
980 if raceenabled && h != nil {
981 callerpc := getcallerpc()
982 pc := funcPC(mapclear)
983 racewritepc(unsafe.Pointer(h), callerpc, pc)
984 }
985
986 if h == nil || h.count == 0 {
987 return
988 }
989
990 if h.flags&hashWriting != 0 {
991 throw("concurrent map writes")
992 }
993
994 h.flags ^= hashWriting
995
996 h.flags &^= sameSizeGrow
997 h.oldbuckets = nil
998 h.nevacuate = 0
999 h.noverflow = 0
1000 h.count = 0
1001
1002
1003 if h.extra != nil {
1004 *h.extra = mapextra{}
1005 }
1006
1007
1008
1009
1010 _, nextOverflow := makeBucketArray(t, h.B, h.buckets)
1011 if nextOverflow != nil {
1012
1013
1014 h.extra.nextOverflow = nextOverflow
1015 }
1016
1017 if h.flags&hashWriting == 0 {
1018 throw("concurrent map writes")
1019 }
1020 h.flags &^= hashWriting
1021 }
1022
1023 func hashGrow(t *maptype, h *hmap) {
1024
1025
1026
1027 bigger := uint8(1)
1028 if !overLoadFactor(h.count+1, h.B) {
1029 bigger = 0
1030 h.flags |= sameSizeGrow
1031 }
1032 oldbuckets := h.buckets
1033 newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger, nil)
1034
1035 flags := h.flags &^ (iterator | oldIterator)
1036 if h.flags&iterator != 0 {
1037 flags |= oldIterator
1038 }
1039
1040 h.B += bigger
1041 h.flags = flags
1042 h.oldbuckets = oldbuckets
1043 h.buckets = newbuckets
1044 h.nevacuate = 0
1045 h.noverflow = 0
1046
1047 if h.extra != nil && h.extra.overflow != nil {
1048
1049 if h.extra.oldoverflow != nil {
1050 throw("oldoverflow is not nil")
1051 }
1052 h.extra.oldoverflow = h.extra.overflow
1053 h.extra.overflow = nil
1054 }
1055 if nextOverflow != nil {
1056 if h.extra == nil {
1057 h.extra = new(mapextra)
1058 }
1059 h.extra.nextOverflow = nextOverflow
1060 }
1061
1062
1063
1064 }
1065
1066
1067 func overLoadFactor(count int, B uint8) bool {
1068 return count > bucketCnt && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
1069 }
1070
1071
1072
1073
1074 func tooManyOverflowBuckets(noverflow uint16, B uint8) bool {
1075
1076
1077
1078
1079 if B > 15 {
1080 B = 15
1081 }
1082
1083 return noverflow >= uint16(1)<<(B&15)
1084 }
1085
1086
1087 func (h *hmap) growing() bool {
1088 return h.oldbuckets != nil
1089 }
1090
1091
1092 func (h *hmap) sameSizeGrow() bool {
1093 return h.flags&sameSizeGrow != 0
1094 }
1095
1096
1097 func (h *hmap) noldbuckets() uintptr {
1098 oldB := h.B
1099 if !h.sameSizeGrow() {
1100 oldB--
1101 }
1102 return bucketShift(oldB)
1103 }
1104
1105
1106 func (h *hmap) oldbucketmask() uintptr {
1107 return h.noldbuckets() - 1
1108 }
1109
1110 func growWork(t *maptype, h *hmap, bucket uintptr) {
1111
1112
1113 evacuate(t, h, bucket&h.oldbucketmask())
1114
1115
1116 if h.growing() {
1117 evacuate(t, h, h.nevacuate)
1118 }
1119 }
1120
1121 func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool {
1122 b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.bucketsize)))
1123 return evacuated(b)
1124 }
1125
1126
1127 type evacDst struct {
1128 b *bmap
1129 i int
1130 k unsafe.Pointer
1131 e unsafe.Pointer
1132 }
1133
1134 func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
1135 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
1136 newbit := h.noldbuckets()
1137 if !evacuated(b) {
1138
1139
1140
1141
1142 var xy [2]evacDst
1143 x := &xy[0]
1144 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
1145 x.k = add(unsafe.Pointer(x.b), dataOffset)
1146 x.e = add(x.k, bucketCnt*uintptr(t.keysize))
1147
1148 if !h.sameSizeGrow() {
1149
1150
1151 y := &xy[1]
1152 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
1153 y.k = add(unsafe.Pointer(y.b), dataOffset)
1154 y.e = add(y.k, bucketCnt*uintptr(t.keysize))
1155 }
1156
1157 for ; b != nil; b = b.overflow(t) {
1158 k := add(unsafe.Pointer(b), dataOffset)
1159 e := add(k, bucketCnt*uintptr(t.keysize))
1160 for i := 0; i < bucketCnt; i, k, e = i+1, add(k, uintptr(t.keysize)), add(e, uintptr(t.elemsize)) {
1161 top := b.tophash[i]
1162 if isEmpty(top) {
1163 b.tophash[i] = evacuatedEmpty
1164 continue
1165 }
1166 if top < minTopHash {
1167 throw("bad map state")
1168 }
1169 k2 := k
1170 if t.indirectkey() {
1171 k2 = *((*unsafe.Pointer)(k2))
1172 }
1173 var useY uint8
1174 if !h.sameSizeGrow() {
1175
1176
1177 hash := t.key.alg.hash(k2, uintptr(h.hash0))
1178 if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.alg.equal(k2, k2) {
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190 useY = top & 1
1191 top = tophash(hash)
1192 } else {
1193 if hash&newbit != 0 {
1194 useY = 1
1195 }
1196 }
1197 }
1198
1199 if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY {
1200 throw("bad evacuatedN")
1201 }
1202
1203 b.tophash[i] = evacuatedX + useY
1204 dst := &xy[useY]
1205
1206 if dst.i == bucketCnt {
1207 dst.b = h.newoverflow(t, dst.b)
1208 dst.i = 0
1209 dst.k = add(unsafe.Pointer(dst.b), dataOffset)
1210 dst.e = add(dst.k, bucketCnt*uintptr(t.keysize))
1211 }
1212 dst.b.tophash[dst.i&(bucketCnt-1)] = top
1213 if t.indirectkey() {
1214 *(*unsafe.Pointer)(dst.k) = k2
1215 } else {
1216 typedmemmove(t.key, dst.k, k)
1217 }
1218 if t.indirectelem() {
1219 *(*unsafe.Pointer)(dst.e) = *(*unsafe.Pointer)(e)
1220 } else {
1221 typedmemmove(t.elem, dst.e, e)
1222 }
1223 dst.i++
1224
1225
1226
1227
1228 dst.k = add(dst.k, uintptr(t.keysize))
1229 dst.e = add(dst.e, uintptr(t.elemsize))
1230 }
1231 }
1232
1233 if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
1234 b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
1235
1236
1237 ptr := add(b, dataOffset)
1238 n := uintptr(t.bucketsize) - dataOffset
1239 memclrHasPointers(ptr, n)
1240 }
1241 }
1242
1243 if oldbucket == h.nevacuate {
1244 advanceEvacuationMark(h, t, newbit)
1245 }
1246 }
1247
1248 func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
1249 h.nevacuate++
1250
1251
1252 stop := h.nevacuate + 1024
1253 if stop > newbit {
1254 stop = newbit
1255 }
1256 for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) {
1257 h.nevacuate++
1258 }
1259 if h.nevacuate == newbit {
1260
1261 h.oldbuckets = nil
1262
1263
1264
1265 if h.extra != nil {
1266 h.extra.oldoverflow = nil
1267 }
1268 h.flags &^= sameSizeGrow
1269 }
1270 }
1271
1272 func ismapkey(t *_type) bool {
1273 return t.alg.hash != nil
1274 }
1275
1276
1277
1278
1279 func reflect_makemap(t *maptype, cap int) *hmap {
1280
1281 if !ismapkey(t.key) {
1282 throw("runtime.reflect_makemap: unsupported map key type")
1283 }
1284 if t.key.size > maxKeySize && (!t.indirectkey() || t.keysize != uint8(sys.PtrSize)) ||
1285 t.key.size <= maxKeySize && (t.indirectkey() || t.keysize != uint8(t.key.size)) {
1286 throw("key size wrong")
1287 }
1288 if t.elem.size > maxElemSize && (!t.indirectelem() || t.elemsize != uint8(sys.PtrSize)) ||
1289 t.elem.size <= maxElemSize && (t.indirectelem() || t.elemsize != uint8(t.elem.size)) {
1290 throw("elem size wrong")
1291 }
1292 if t.key.align > bucketCnt {
1293 throw("key align too big")
1294 }
1295 if t.elem.align > bucketCnt {
1296 throw("elem align too big")
1297 }
1298 if t.key.size%uintptr(t.key.align) != 0 {
1299 throw("key size not a multiple of key align")
1300 }
1301 if t.elem.size%uintptr(t.elem.align) != 0 {
1302 throw("elem size not a multiple of elem align")
1303 }
1304 if bucketCnt < 8 {
1305 throw("bucketsize too small for proper alignment")
1306 }
1307 if dataOffset%uintptr(t.key.align) != 0 {
1308 throw("need padding in bucket (key)")
1309 }
1310 if dataOffset%uintptr(t.elem.align) != 0 {
1311 throw("need padding in bucket (elem)")
1312 }
1313
1314 return makemap(t, cap, nil)
1315 }
1316
1317
1318 func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
1319 elem, ok := mapaccess2(t, h, key)
1320 if !ok {
1321
1322 elem = nil
1323 }
1324 return elem
1325 }
1326
1327
1328 func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, elem unsafe.Pointer) {
1329 p := mapassign(t, h, key)
1330 typedmemmove(t.elem, p, elem)
1331 }
1332
1333
1334 func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
1335 mapdelete(t, h, key)
1336 }
1337
1338
1339 func reflect_mapiterinit(t *maptype, h *hmap) *hiter {
1340 it := new(hiter)
1341 mapiterinit(t, h, it)
1342 return it
1343 }
1344
1345
1346 func reflect_mapiternext(it *hiter) {
1347 mapiternext(it)
1348 }
1349
1350
1351 func reflect_mapiterkey(it *hiter) unsafe.Pointer {
1352 return it.key
1353 }
1354
1355
1356 func reflect_mapiterelem(it *hiter) unsafe.Pointer {
1357 return it.elem
1358 }
1359
1360
1361 func reflect_maplen(h *hmap) int {
1362 if h == nil {
1363 return 0
1364 }
1365 if raceenabled {
1366 callerpc := getcallerpc()
1367 racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
1368 }
1369 return h.count
1370 }
1371
1372
1373 func reflectlite_maplen(h *hmap) int {
1374 if h == nil {
1375 return 0
1376 }
1377 if raceenabled {
1378 callerpc := getcallerpc()
1379 racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
1380 }
1381 return h.count
1382 }
1383
1384
1385 func reflect_ismapkey(t *_type) bool {
1386 return ismapkey(t)
1387 }
1388
1389 const maxZero = 1024
1390 var zeroVal [maxZero]byte
1391
View as plain text