Source file src/runtime/mbitmap.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75 package runtime
76
77 import (
78 "runtime/internal/atomic"
79 "runtime/internal/sys"
80 "unsafe"
81 )
82
83 const (
84 bitPointer = 1 << 0
85 bitScan = 1 << 4
86
87 heapBitsShift = 1
88 wordsPerBitmapByte = 8 / 2
89
90
91 bitScanAll = bitScan | bitScan<<heapBitsShift | bitScan<<(2*heapBitsShift) | bitScan<<(3*heapBitsShift)
92 bitPointerAll = bitPointer | bitPointer<<heapBitsShift | bitPointer<<(2*heapBitsShift) | bitPointer<<(3*heapBitsShift)
93 )
94
95
96
97
98 func addb(p *byte, n uintptr) *byte {
99
100
101
102 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n))
103 }
104
105
106
107
108 func subtractb(p *byte, n uintptr) *byte {
109
110
111
112 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n))
113 }
114
115
116
117
118 func add1(p *byte) *byte {
119
120
121
122 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1))
123 }
124
125
126
127
128
129
130 func subtract1(p *byte) *byte {
131
132
133
134 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
135 }
136
137
138
139
140
141 type heapBits struct {
142 bitp *uint8
143 shift uint32
144 arena uint32
145 last *uint8
146 }
147
148
149
150 var _ = heapBits{arena: (1<<heapAddrBits)/heapArenaBytes - 1}
151
152
153
154
155
156
157
158
159
160
161 type markBits struct {
162 bytep *uint8
163 mask uint8
164 index uintptr
165 }
166
167
168 func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
169 bytep, mask := s.allocBits.bitp(allocBitIndex)
170 return markBits{bytep, mask, allocBitIndex}
171 }
172
173
174
175
176
177 func (s *mspan) refillAllocCache(whichByte uintptr) {
178 bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(whichByte)))
179 aCache := uint64(0)
180 aCache |= uint64(bytes[0])
181 aCache |= uint64(bytes[1]) << (1 * 8)
182 aCache |= uint64(bytes[2]) << (2 * 8)
183 aCache |= uint64(bytes[3]) << (3 * 8)
184 aCache |= uint64(bytes[4]) << (4 * 8)
185 aCache |= uint64(bytes[5]) << (5 * 8)
186 aCache |= uint64(bytes[6]) << (6 * 8)
187 aCache |= uint64(bytes[7]) << (7 * 8)
188 s.allocCache = ^aCache
189 }
190
191
192
193
194
195 func (s *mspan) nextFreeIndex() uintptr {
196 sfreeindex := s.freeindex
197 snelems := s.nelems
198 if sfreeindex == snelems {
199 return sfreeindex
200 }
201 if sfreeindex > snelems {
202 throw("s.freeindex > s.nelems")
203 }
204
205 aCache := s.allocCache
206
207 bitIndex := sys.Ctz64(aCache)
208 for bitIndex == 64 {
209
210 sfreeindex = (sfreeindex + 64) &^ (64 - 1)
211 if sfreeindex >= snelems {
212 s.freeindex = snelems
213 return snelems
214 }
215 whichByte := sfreeindex / 8
216
217 s.refillAllocCache(whichByte)
218 aCache = s.allocCache
219 bitIndex = sys.Ctz64(aCache)
220
221
222 }
223 result := sfreeindex + uintptr(bitIndex)
224 if result >= snelems {
225 s.freeindex = snelems
226 return snelems
227 }
228
229 s.allocCache >>= uint(bitIndex + 1)
230 sfreeindex = result + 1
231
232 if sfreeindex%64 == 0 && sfreeindex != snelems {
233
234
235
236
237
238 whichByte := sfreeindex / 8
239 s.refillAllocCache(whichByte)
240 }
241 s.freeindex = sfreeindex
242 return result
243 }
244
245
246 func (s *mspan) isFree(index uintptr) bool {
247 if index < s.freeindex {
248 return false
249 }
250 bytep, mask := s.allocBits.bitp(index)
251 return *bytep&mask == 0
252 }
253
254 func (s *mspan) objIndex(p uintptr) uintptr {
255 byteOffset := p - s.base()
256 if byteOffset == 0 {
257 return 0
258 }
259 if s.baseMask != 0 {
260
261 return byteOffset >> s.divShift
262 }
263 return uintptr(((uint64(byteOffset) >> s.divShift) * uint64(s.divMul)) >> s.divShift2)
264 }
265
266 func markBitsForAddr(p uintptr) markBits {
267 s := spanOf(p)
268 objIndex := s.objIndex(p)
269 return s.markBitsForIndex(objIndex)
270 }
271
272 func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
273 bytep, mask := s.gcmarkBits.bitp(objIndex)
274 return markBits{bytep, mask, objIndex}
275 }
276
277 func (s *mspan) markBitsForBase() markBits {
278 return markBits{(*uint8)(s.gcmarkBits), uint8(1), 0}
279 }
280
281
282 func (m markBits) isMarked() bool {
283 return *m.bytep&m.mask != 0
284 }
285
286
287 func (m markBits) setMarked() {
288
289
290
291 atomic.Or8(m.bytep, m.mask)
292 }
293
294
295 func (m markBits) setMarkedNonAtomic() {
296 *m.bytep |= m.mask
297 }
298
299
300 func (m markBits) clearMarked() {
301
302
303
304 atomic.And8(m.bytep, ^m.mask)
305 }
306
307
308 func markBitsForSpan(base uintptr) (mbits markBits) {
309 mbits = markBitsForAddr(base)
310 if mbits.mask != 1 {
311 throw("markBitsForSpan: unaligned start")
312 }
313 return mbits
314 }
315
316
317 func (m *markBits) advance() {
318 if m.mask == 1<<7 {
319 m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1))
320 m.mask = 1
321 } else {
322 m.mask = m.mask << 1
323 }
324 m.index++
325 }
326
327
328
329
330
331
332
333 func heapBitsForAddr(addr uintptr) (h heapBits) {
334
335 arena := arenaIndex(addr)
336 ha := mheap_.arenas[arena.l1()][arena.l2()]
337
338
339
340 if ha == nil {
341
342
343 return
344 }
345 h.bitp = &ha.bitmap[(addr/(sys.PtrSize*4))%heapArenaBitmapBytes]
346 h.shift = uint32((addr / sys.PtrSize) & 3)
347 h.arena = uint32(arena)
348 h.last = &ha.bitmap[len(ha.bitmap)-1]
349 return
350 }
351
352
353
354
355
356
357
358
359
360
361
362 func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) {
363 s = spanOf(p)
364
365 if s == nil || p < s.base() || p >= s.limit || s.state != mSpanInUse {
366 if s == nil || s.state == mSpanManual {
367
368
369
370 return
371 }
372
373
374
375 if debug.invalidptr != 0 {
376
377
378
379
380
381
382
383
384 printlock()
385 print("runtime: pointer ", hex(p))
386 if s.state != mSpanInUse {
387 print(" to unallocated span")
388 } else {
389 print(" to unused region of span")
390 }
391 print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", s.state, "\n")
392 if refBase != 0 {
393 print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
394 gcDumpObject("object", refBase, refOff)
395 }
396 getg().m.traceback = 2
397 throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
398 }
399 return
400 }
401
402
403 if s.baseMask != 0 {
404
405 base = s.base()
406 base = base + (p-base)&uintptr(s.baseMask)
407 objIndex = (base - s.base()) >> s.divShift
408
409
410
411 } else {
412 base = s.base()
413 if p-base >= s.elemsize {
414
415 objIndex = uintptr(p-base) >> s.divShift * uintptr(s.divMul) >> s.divShift2
416 base += objIndex * s.elemsize
417 }
418 }
419 return
420 }
421
422
423
424
425
426
427
428 func (h heapBits) next() heapBits {
429 if h.shift < 3*heapBitsShift {
430 h.shift += heapBitsShift
431 } else if h.bitp != h.last {
432 h.bitp, h.shift = add1(h.bitp), 0
433 } else {
434
435 return h.nextArena()
436 }
437 return h
438 }
439
440
441
442
443
444
445
446
447
448
449 func (h heapBits) nextArena() heapBits {
450 h.arena++
451 ai := arenaIdx(h.arena)
452 l2 := mheap_.arenas[ai.l1()]
453 if l2 == nil {
454
455
456
457 return heapBits{}
458 }
459 ha := l2[ai.l2()]
460 if ha == nil {
461 return heapBits{}
462 }
463 h.bitp, h.shift = &ha.bitmap[0], 0
464 h.last = &ha.bitmap[len(ha.bitmap)-1]
465 return h
466 }
467
468
469
470
471
472
473
474 func (h heapBits) forward(n uintptr) heapBits {
475 n += uintptr(h.shift) / heapBitsShift
476 nbitp := uintptr(unsafe.Pointer(h.bitp)) + n/4
477 h.shift = uint32(n%4) * heapBitsShift
478 if nbitp <= uintptr(unsafe.Pointer(h.last)) {
479 h.bitp = (*uint8)(unsafe.Pointer(nbitp))
480 return h
481 }
482
483
484 past := nbitp - (uintptr(unsafe.Pointer(h.last)) + 1)
485 h.arena += 1 + uint32(past/heapArenaBitmapBytes)
486 ai := arenaIdx(h.arena)
487 if l2 := mheap_.arenas[ai.l1()]; l2 != nil && l2[ai.l2()] != nil {
488 a := l2[ai.l2()]
489 h.bitp = &a.bitmap[past%heapArenaBitmapBytes]
490 h.last = &a.bitmap[len(a.bitmap)-1]
491 } else {
492 h.bitp, h.last = nil, nil
493 }
494 return h
495 }
496
497
498
499
500 func (h heapBits) forwardOrBoundary(n uintptr) (heapBits, uintptr) {
501 maxn := 4 * ((uintptr(unsafe.Pointer(h.last)) + 1) - uintptr(unsafe.Pointer(h.bitp)))
502 if n > maxn {
503 n = maxn
504 }
505 return h.forward(n), n
506 }
507
508
509
510
511
512
513
514 func (h heapBits) bits() uint32 {
515
516
517 return uint32(*h.bitp) >> (h.shift & 31)
518 }
519
520
521
522
523 func (h heapBits) morePointers() bool {
524 return h.bits()&bitScan != 0
525 }
526
527
528
529
530
531 func (h heapBits) isPointer() bool {
532 return h.bits()&bitPointer != 0
533 }
534
535
536
537
538
539 func (h heapBits) isCheckmarked(size uintptr) bool {
540 if size == sys.PtrSize {
541 return (*h.bitp>>h.shift)&bitPointer != 0
542 }
543
544
545
546
547 return (*h.bitp>>(heapBitsShift+h.shift))&bitScan != 0
548 }
549
550
551
552
553
554 func (h heapBits) setCheckmarked(size uintptr) {
555 if size == sys.PtrSize {
556 atomic.Or8(h.bitp, bitPointer<<h.shift)
557 return
558 }
559 atomic.Or8(h.bitp, bitScan<<(heapBitsShift+h.shift))
560 }
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589 func bulkBarrierPreWrite(dst, src, size uintptr) {
590 if (dst|src|size)&(sys.PtrSize-1) != 0 {
591 throw("bulkBarrierPreWrite: unaligned arguments")
592 }
593 if !writeBarrier.needed {
594 return
595 }
596 if s := spanOf(dst); s == nil {
597
598
599 for _, datap := range activeModules() {
600 if datap.data <= dst && dst < datap.edata {
601 bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
602 return
603 }
604 }
605 for _, datap := range activeModules() {
606 if datap.bss <= dst && dst < datap.ebss {
607 bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
608 return
609 }
610 }
611 return
612 } else if s.state != mSpanInUse || dst < s.base() || s.limit <= dst {
613
614
615
616
617
618
619 return
620 }
621
622 buf := &getg().m.p.ptr().wbBuf
623 h := heapBitsForAddr(dst)
624 if src == 0 {
625 for i := uintptr(0); i < size; i += sys.PtrSize {
626 if h.isPointer() {
627 dstx := (*uintptr)(unsafe.Pointer(dst + i))
628 if !buf.putFast(*dstx, 0) {
629 wbBufFlush(nil, 0)
630 }
631 }
632 h = h.next()
633 }
634 } else {
635 for i := uintptr(0); i < size; i += sys.PtrSize {
636 if h.isPointer() {
637 dstx := (*uintptr)(unsafe.Pointer(dst + i))
638 srcx := (*uintptr)(unsafe.Pointer(src + i))
639 if !buf.putFast(*dstx, *srcx) {
640 wbBufFlush(nil, 0)
641 }
642 }
643 h = h.next()
644 }
645 }
646 }
647
648
649
650
651
652
653
654
655
656
657 func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) {
658 if (dst|src|size)&(sys.PtrSize-1) != 0 {
659 throw("bulkBarrierPreWrite: unaligned arguments")
660 }
661 if !writeBarrier.needed {
662 return
663 }
664 buf := &getg().m.p.ptr().wbBuf
665 h := heapBitsForAddr(dst)
666 for i := uintptr(0); i < size; i += sys.PtrSize {
667 if h.isPointer() {
668 srcx := (*uintptr)(unsafe.Pointer(src + i))
669 if !buf.putFast(0, *srcx) {
670 wbBufFlush(nil, 0)
671 }
672 }
673 h = h.next()
674 }
675 }
676
677
678
679
680
681
682
683
684
685 func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
686 word := maskOffset / sys.PtrSize
687 bits = addb(bits, word/8)
688 mask := uint8(1) << (word % 8)
689
690 buf := &getg().m.p.ptr().wbBuf
691 for i := uintptr(0); i < size; i += sys.PtrSize {
692 if mask == 0 {
693 bits = addb(bits, 1)
694 if *bits == 0 {
695
696 i += 7 * sys.PtrSize
697 continue
698 }
699 mask = 1
700 }
701 if *bits&mask != 0 {
702 dstx := (*uintptr)(unsafe.Pointer(dst + i))
703 if src == 0 {
704 if !buf.putFast(*dstx, 0) {
705 wbBufFlush(nil, 0)
706 }
707 } else {
708 srcx := (*uintptr)(unsafe.Pointer(src + i))
709 if !buf.putFast(*dstx, *srcx) {
710 wbBufFlush(nil, 0)
711 }
712 }
713 }
714 mask <<= 1
715 }
716 }
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735 func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
736 if typ == nil {
737 throw("runtime: typeBitsBulkBarrier without type")
738 }
739 if typ.size != size {
740 println("runtime: typeBitsBulkBarrier with type ", typ.string(), " of size ", typ.size, " but memory size", size)
741 throw("runtime: invalid typeBitsBulkBarrier")
742 }
743 if typ.kind&kindGCProg != 0 {
744 println("runtime: typeBitsBulkBarrier with type ", typ.string(), " with GC prog")
745 throw("runtime: invalid typeBitsBulkBarrier")
746 }
747 if !writeBarrier.needed {
748 return
749 }
750 ptrmask := typ.gcdata
751 buf := &getg().m.p.ptr().wbBuf
752 var bits uint32
753 for i := uintptr(0); i < typ.ptrdata; i += sys.PtrSize {
754 if i&(sys.PtrSize*8-1) == 0 {
755 bits = uint32(*ptrmask)
756 ptrmask = addb(ptrmask, 1)
757 } else {
758 bits = bits >> 1
759 }
760 if bits&1 != 0 {
761 dstx := (*uintptr)(unsafe.Pointer(dst + i))
762 srcx := (*uintptr)(unsafe.Pointer(src + i))
763 if !buf.putFast(*dstx, *srcx) {
764 wbBufFlush(nil, 0)
765 }
766 }
767 }
768 }
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783 func (h heapBits) initSpan(s *mspan) {
784 size, n, total := s.layout()
785
786
787 s.freeindex = 0
788 s.allocCache = ^uint64(0)
789 s.nelems = n
790 s.allocBits = nil
791 s.gcmarkBits = nil
792 s.gcmarkBits = newMarkBits(s.nelems)
793 s.allocBits = newAllocBits(s.nelems)
794
795
796 nw := total / sys.PtrSize
797 if nw%wordsPerBitmapByte != 0 {
798 throw("initSpan: unaligned length")
799 }
800 if h.shift != 0 {
801 throw("initSpan: unaligned base")
802 }
803 for nw > 0 {
804 hNext, anw := h.forwardOrBoundary(nw)
805 nbyte := anw / wordsPerBitmapByte
806 if sys.PtrSize == 8 && size == sys.PtrSize {
807 bitp := h.bitp
808 for i := uintptr(0); i < nbyte; i++ {
809 *bitp = bitPointerAll | bitScanAll
810 bitp = add1(bitp)
811 }
812 } else {
813 memclrNoHeapPointers(unsafe.Pointer(h.bitp), nbyte)
814 }
815 h = hNext
816 nw -= anw
817 }
818 }
819
820
821
822 func (h heapBits) initCheckmarkSpan(size, n, total uintptr) {
823
824 if sys.PtrSize == 8 && size == sys.PtrSize {
825
826
827
828
829 for i := uintptr(0); i < n; i += wordsPerBitmapByte {
830 *h.bitp &^= bitPointerAll
831 h = h.forward(wordsPerBitmapByte)
832 }
833 return
834 }
835 for i := uintptr(0); i < n; i++ {
836 *h.bitp &^= bitScan << (heapBitsShift + h.shift)
837 h = h.forward(size / sys.PtrSize)
838 }
839 }
840
841
842
843
844
845 func (h heapBits) clearCheckmarkSpan(size, n, total uintptr) {
846
847 if sys.PtrSize == 8 && size == sys.PtrSize {
848
849
850
851
852 for i := uintptr(0); i < n; i += wordsPerBitmapByte {
853 *h.bitp |= bitPointerAll
854 h = h.forward(wordsPerBitmapByte)
855 }
856 }
857 }
858
859
860
861
862 var oneBitCount = [256]uint8{
863 0, 1, 1, 2, 1, 2, 2, 3,
864 1, 2, 2, 3, 2, 3, 3, 4,
865 1, 2, 2, 3, 2, 3, 3, 4,
866 2, 3, 3, 4, 3, 4, 4, 5,
867 1, 2, 2, 3, 2, 3, 3, 4,
868 2, 3, 3, 4, 3, 4, 4, 5,
869 2, 3, 3, 4, 3, 4, 4, 5,
870 3, 4, 4, 5, 4, 5, 5, 6,
871 1, 2, 2, 3, 2, 3, 3, 4,
872 2, 3, 3, 4, 3, 4, 4, 5,
873 2, 3, 3, 4, 3, 4, 4, 5,
874 3, 4, 4, 5, 4, 5, 5, 6,
875 2, 3, 3, 4, 3, 4, 4, 5,
876 3, 4, 4, 5, 4, 5, 5, 6,
877 3, 4, 4, 5, 4, 5, 5, 6,
878 4, 5, 5, 6, 5, 6, 6, 7,
879 1, 2, 2, 3, 2, 3, 3, 4,
880 2, 3, 3, 4, 3, 4, 4, 5,
881 2, 3, 3, 4, 3, 4, 4, 5,
882 3, 4, 4, 5, 4, 5, 5, 6,
883 2, 3, 3, 4, 3, 4, 4, 5,
884 3, 4, 4, 5, 4, 5, 5, 6,
885 3, 4, 4, 5, 4, 5, 5, 6,
886 4, 5, 5, 6, 5, 6, 6, 7,
887 2, 3, 3, 4, 3, 4, 4, 5,
888 3, 4, 4, 5, 4, 5, 5, 6,
889 3, 4, 4, 5, 4, 5, 5, 6,
890 4, 5, 5, 6, 5, 6, 6, 7,
891 3, 4, 4, 5, 4, 5, 5, 6,
892 4, 5, 5, 6, 5, 6, 6, 7,
893 4, 5, 5, 6, 5, 6, 6, 7,
894 5, 6, 6, 7, 6, 7, 7, 8}
895
896
897
898
899 func (s *mspan) countAlloc() int {
900 count := 0
901 maxIndex := s.nelems / 8
902 for i := uintptr(0); i < maxIndex; i++ {
903 mrkBits := *s.gcmarkBits.bytep(i)
904 count += int(oneBitCount[mrkBits])
905 }
906 if bitsInLastByte := s.nelems % 8; bitsInLastByte != 0 {
907 mrkBits := *s.gcmarkBits.bytep(maxIndex)
908 mask := uint8((1 << bitsInLastByte) - 1)
909 bits := mrkBits & mask
910 count += int(oneBitCount[bits])
911 }
912 return count
913 }
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938 func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
939 const doubleCheck = false
940
941
942
943
944
945
946
947
948
949 if sys.PtrSize == 8 && size == sys.PtrSize {
950
951
952
953
954 if doubleCheck {
955 h := heapBitsForAddr(x)
956 if !h.isPointer() {
957 throw("heapBitsSetType: pointer bit missing")
958 }
959 if !h.morePointers() {
960 throw("heapBitsSetType: scan bit missing")
961 }
962 }
963 return
964 }
965
966 h := heapBitsForAddr(x)
967 ptrmask := typ.gcdata
968
969
970
971
972
973
974 if size == 2*sys.PtrSize {
975 if typ.size == sys.PtrSize {
976
977
978
979
980
981
982
983
984 if sys.PtrSize == 4 && dataSize == sys.PtrSize {
985
986
987 *h.bitp &^= (bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << h.shift
988 *h.bitp |= (bitPointer | bitScan) << h.shift
989 } else {
990
991 *h.bitp |= (bitPointer | bitScan | bitPointer<<heapBitsShift) << h.shift
992 }
993 return
994 }
995
996
997 if doubleCheck {
998 if typ.size != 2*sys.PtrSize || typ.kind&kindGCProg != 0 {
999 print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, " gcprog=", typ.kind&kindGCProg != 0, "\n")
1000 throw("heapBitsSetType")
1001 }
1002 }
1003 b := uint32(*ptrmask)
1004 hb := (b & 3) | bitScan
1005
1006
1007
1008
1009 *h.bitp &^= (bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << h.shift
1010 *h.bitp |= uint8(hb << h.shift)
1011 return
1012 }
1013
1014
1015
1016
1017
1018
1019
1020
1021 outOfPlace := false
1022 if arenaIndex(x+size-1) != arenaIdx(h.arena) || (doubleCheck && fastrand()%2 == 0) {
1023
1024
1025
1026
1027
1028
1029 outOfPlace = true
1030 h.bitp = (*uint8)(unsafe.Pointer(x))
1031 h.last = nil
1032 }
1033
1034 var (
1035
1036 p *byte
1037 b uintptr
1038 nb uintptr
1039 endp *byte
1040 endnb uintptr
1041 pbits uintptr
1042
1043
1044 w uintptr
1045 nw uintptr
1046 hbitp *byte
1047 hb uintptr
1048 )
1049
1050 hbitp = h.bitp
1051
1052
1053
1054
1055
1056 if typ.kind&kindGCProg != 0 {
1057 heapBitsSetTypeGCProg(h, typ.ptrdata, typ.size, dataSize, size, addb(typ.gcdata, 4))
1058 if doubleCheck {
1059
1060
1061
1062
1063
1064
1065
1066 lock(&debugPtrmask.lock)
1067 if debugPtrmask.data == nil {
1068 debugPtrmask.data = (*byte)(persistentalloc(1<<20, 1, &memstats.other_sys))
1069 }
1070 ptrmask = debugPtrmask.data
1071 runGCProg(addb(typ.gcdata, 4), nil, ptrmask, 1)
1072 }
1073 goto Phase4
1074 }
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107 p = ptrmask
1108 if typ.size < dataSize {
1109
1110
1111
1112 const maxBits = sys.PtrSize*8 - 7
1113 if typ.ptrdata/sys.PtrSize <= maxBits {
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124 nb = typ.ptrdata / sys.PtrSize
1125 for i := uintptr(0); i < nb; i += 8 {
1126 b |= uintptr(*p) << i
1127 p = add1(p)
1128 }
1129 nb = typ.size / sys.PtrSize
1130
1131
1132
1133
1134
1135
1136
1137 pbits = b
1138 endnb = nb
1139 if nb+nb <= maxBits {
1140 for endnb <= sys.PtrSize*8 {
1141 pbits |= pbits << endnb
1142 endnb += endnb
1143 }
1144
1145
1146
1147 endnb = uintptr(maxBits/byte(nb)) * nb
1148 pbits &= 1<<endnb - 1
1149 b = pbits
1150 nb = endnb
1151 }
1152
1153
1154
1155 p = nil
1156 endp = nil
1157 } else {
1158
1159 n := (typ.ptrdata/sys.PtrSize+7)/8 - 1
1160 endp = addb(ptrmask, n)
1161 endnb = typ.size/sys.PtrSize - n*8
1162 }
1163 }
1164 if p != nil {
1165 b = uintptr(*p)
1166 p = add1(p)
1167 nb = 8
1168 }
1169
1170 if typ.size == dataSize {
1171
1172 nw = typ.ptrdata / sys.PtrSize
1173 } else {
1174
1175
1176
1177 nw = ((dataSize/typ.size-1)*typ.size + typ.ptrdata) / sys.PtrSize
1178 }
1179 if nw == 0 {
1180
1181 println("runtime: invalid type ", typ.string())
1182 throw("heapBitsSetType: called with non-pointer type")
1183 return
1184 }
1185 if nw < 2 {
1186
1187
1188 nw = 2
1189 }
1190
1191
1192
1193
1194
1195
1196 switch {
1197 default:
1198 throw("heapBitsSetType: unexpected shift")
1199
1200 case h.shift == 0:
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215 hb = b & bitPointerAll
1216 hb |= bitScan | bitScan<<(2*heapBitsShift) | bitScan<<(3*heapBitsShift)
1217 if w += 4; w >= nw {
1218 goto Phase3
1219 }
1220 *hbitp = uint8(hb)
1221 hbitp = add1(hbitp)
1222 b >>= 4
1223 nb -= 4
1224
1225 case sys.PtrSize == 8 && h.shift == 2:
1226
1227
1228
1229
1230
1231
1232 hb = (b & (bitPointer | bitPointer<<heapBitsShift)) << (2 * heapBitsShift)
1233
1234
1235 hb |= bitScan << (2 * heapBitsShift)
1236 b >>= 2
1237 nb -= 2
1238
1239
1240 *hbitp &^= uint8((bitPointer | bitScan | (bitPointer << heapBitsShift)) << (2 * heapBitsShift))
1241 *hbitp |= uint8(hb)
1242 hbitp = add1(hbitp)
1243 if w += 2; w >= nw {
1244
1245
1246
1247 hb = 0
1248 w += 4
1249 goto Phase3
1250 }
1251 }
1252
1253
1254
1255
1256
1257
1258
1259 nb -= 4
1260 for {
1261
1262
1263
1264
1265
1266 hb = b & bitPointerAll
1267 hb |= bitScanAll
1268 if w += 4; w >= nw {
1269 break
1270 }
1271 *hbitp = uint8(hb)
1272 hbitp = add1(hbitp)
1273 b >>= 4
1274
1275
1276 if p != endp {
1277
1278
1279
1280
1281 if nb < 8 {
1282 b |= uintptr(*p) << nb
1283 p = add1(p)
1284 } else {
1285
1286
1287
1288
1289 nb -= 8
1290 }
1291 } else if p == nil {
1292
1293
1294 if nb < 8 {
1295 b |= pbits << nb
1296 nb += endnb
1297 }
1298 nb -= 8
1299 } else {
1300
1301
1302 b |= uintptr(*p) << nb
1303 nb += endnb
1304 if nb < 8 {
1305 b |= uintptr(*ptrmask) << nb
1306 p = add1(ptrmask)
1307 } else {
1308 nb -= 8
1309 p = ptrmask
1310 }
1311 }
1312
1313
1314 hb = b & bitPointerAll
1315 hb |= bitScanAll
1316 if w += 4; w >= nw {
1317 break
1318 }
1319 *hbitp = uint8(hb)
1320 hbitp = add1(hbitp)
1321 b >>= 4
1322 }
1323
1324 Phase3:
1325
1326 if w > nw {
1327
1328
1329
1330 mask := uintptr(1)<<(4-(w-nw)) - 1
1331 hb &= mask | mask<<4
1332 }
1333
1334
1335 nw = size / sys.PtrSize
1336
1337
1338
1339 if w <= nw {
1340 *hbitp = uint8(hb)
1341 hbitp = add1(hbitp)
1342 hb = 0
1343 for w += 4; w <= nw; w += 4 {
1344 *hbitp = 0
1345 hbitp = add1(hbitp)
1346 }
1347 }
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357 if w == nw+2 {
1358 *hbitp = *hbitp&^(bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift) | uint8(hb)
1359 }
1360
1361 Phase4:
1362
1363 if outOfPlace {
1364
1365
1366 h := heapBitsForAddr(x)
1367
1368
1369 cnw := size / sys.PtrSize
1370 src := (*uint8)(unsafe.Pointer(x))
1371
1372
1373
1374
1375
1376
1377
1378 if doubleCheck {
1379 if !(h.shift == 0 || (sys.PtrSize == 8 && h.shift == 2)) {
1380 print("x=", x, " size=", size, " cnw=", h.shift, "\n")
1381 throw("bad start shift")
1382 }
1383 }
1384 if sys.PtrSize == 8 && h.shift == 2 {
1385 *h.bitp = *h.bitp&^((bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift)<<(2*heapBitsShift)) | *src
1386 h = h.next().next()
1387 cnw -= 2
1388 src = addb(src, 1)
1389 }
1390
1391
1392
1393 for cnw >= 4 {
1394
1395
1396 hNext, words := h.forwardOrBoundary(cnw / 4 * 4)
1397
1398
1399 n := words / 4
1400 memmove(unsafe.Pointer(h.bitp), unsafe.Pointer(src), n)
1401 cnw -= words
1402 h = hNext
1403 src = addb(src, n)
1404 }
1405 if doubleCheck && h.shift != 0 {
1406 print("cnw=", cnw, " h.shift=", h.shift, "\n")
1407 throw("bad shift after block copy")
1408 }
1409
1410 if cnw == 2 {
1411 *h.bitp = *h.bitp&^(bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift) | *src
1412 src = addb(src, 1)
1413 h = h.next().next()
1414 }
1415 if doubleCheck {
1416 if uintptr(unsafe.Pointer(src)) > x+size {
1417 throw("copy exceeded object size")
1418 }
1419 if !(cnw == 0 || cnw == 2) {
1420 print("x=", x, " size=", size, " cnw=", cnw, "\n")
1421 throw("bad number of remaining words")
1422 }
1423
1424 hbitp = h.bitp
1425 }
1426
1427 memclrNoHeapPointers(unsafe.Pointer(x), uintptr(unsafe.Pointer(src))-x)
1428 }
1429
1430
1431 if doubleCheck {
1432
1433
1434 end := heapBitsForAddr(x + size - sys.PtrSize).next()
1435 endAI := arenaIdx(end.arena)
1436 if !outOfPlace && (end.bitp == nil || (end.shift == 0 && end.bitp == &mheap_.arenas[endAI.l1()][endAI.l2()].bitmap[0])) {
1437
1438
1439
1440 end.arena--
1441 endAI = arenaIdx(end.arena)
1442 end.bitp = addb(&mheap_.arenas[endAI.l1()][endAI.l2()].bitmap[0], heapArenaBitmapBytes)
1443 end.last = nil
1444 }
1445 if typ.kind&kindGCProg == 0 && (hbitp != end.bitp || (w == nw+2) != (end.shift == 2)) {
1446 println("ended at wrong bitmap byte for", typ.string(), "x", dataSize/typ.size)
1447 print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n")
1448 print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n")
1449 h0 := heapBitsForAddr(x)
1450 print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
1451 print("ended at hbitp=", hbitp, " but next starts at bitp=", end.bitp, " shift=", end.shift, "\n")
1452 throw("bad heapBitsSetType")
1453 }
1454
1455
1456
1457 h := heapBitsForAddr(x)
1458 nptr := typ.ptrdata / sys.PtrSize
1459 ndata := typ.size / sys.PtrSize
1460 count := dataSize / typ.size
1461 totalptr := ((count-1)*typ.size + typ.ptrdata) / sys.PtrSize
1462 for i := uintptr(0); i < size/sys.PtrSize; i++ {
1463 j := i % ndata
1464 var have, want uint8
1465 have = (*h.bitp >> h.shift) & (bitPointer | bitScan)
1466 if i >= totalptr {
1467 want = 0
1468 if typ.kind&kindGCProg != 0 && i < (totalptr+3)/4*4 {
1469 want = bitScan
1470 }
1471 } else {
1472 if j < nptr && (*addb(ptrmask, j/8)>>(j%8))&1 != 0 {
1473 want |= bitPointer
1474 }
1475 if i != 1 {
1476 want |= bitScan
1477 } else {
1478 have &^= bitScan
1479 }
1480 }
1481 if have != want {
1482 println("mismatch writing bits for", typ.string(), "x", dataSize/typ.size)
1483 print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n")
1484 print("kindGCProg=", typ.kind&kindGCProg != 0, " outOfPlace=", outOfPlace, "\n")
1485 print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n")
1486 h0 := heapBitsForAddr(x)
1487 print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
1488 print("current bits h.bitp=", h.bitp, " h.shift=", h.shift, " *h.bitp=", hex(*h.bitp), "\n")
1489 print("ptrmask=", ptrmask, " p=", p, " endp=", endp, " endnb=", endnb, " pbits=", hex(pbits), " b=", hex(b), " nb=", nb, "\n")
1490 println("at word", i, "offset", i*sys.PtrSize, "have", hex(have), "want", hex(want))
1491 if typ.kind&kindGCProg != 0 {
1492 println("GC program:")
1493 dumpGCProg(addb(typ.gcdata, 4))
1494 }
1495 throw("bad heapBitsSetType")
1496 }
1497 h = h.next()
1498 }
1499 if ptrmask == debugPtrmask.data {
1500 unlock(&debugPtrmask.lock)
1501 }
1502 }
1503 }
1504
1505 var debugPtrmask struct {
1506 lock mutex
1507 data *byte
1508 }
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520 func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize uintptr, prog *byte) {
1521 if sys.PtrSize == 8 && allocSize%(4*sys.PtrSize) != 0 {
1522
1523 throw("heapBitsSetTypeGCProg: small allocation")
1524 }
1525 var totalBits uintptr
1526 if elemSize == dataSize {
1527 totalBits = runGCProg(prog, nil, h.bitp, 2)
1528 if totalBits*sys.PtrSize != progSize {
1529 println("runtime: heapBitsSetTypeGCProg: total bits", totalBits, "but progSize", progSize)
1530 throw("heapBitsSetTypeGCProg: unexpected bit count")
1531 }
1532 } else {
1533 count := dataSize / elemSize
1534
1535
1536
1537
1538
1539
1540
1541 var trailer [40]byte
1542 i := 0
1543 if n := elemSize/sys.PtrSize - progSize/sys.PtrSize; n > 0 {
1544
1545 trailer[i] = 0x01
1546 i++
1547 trailer[i] = 0
1548 i++
1549 if n > 1 {
1550
1551 trailer[i] = 0x81
1552 i++
1553 n--
1554 for ; n >= 0x80; n >>= 7 {
1555 trailer[i] = byte(n | 0x80)
1556 i++
1557 }
1558 trailer[i] = byte(n)
1559 i++
1560 }
1561 }
1562
1563 trailer[i] = 0x80
1564 i++
1565 n := elemSize / sys.PtrSize
1566 for ; n >= 0x80; n >>= 7 {
1567 trailer[i] = byte(n | 0x80)
1568 i++
1569 }
1570 trailer[i] = byte(n)
1571 i++
1572 n = count - 1
1573 for ; n >= 0x80; n >>= 7 {
1574 trailer[i] = byte(n | 0x80)
1575 i++
1576 }
1577 trailer[i] = byte(n)
1578 i++
1579 trailer[i] = 0
1580 i++
1581
1582 runGCProg(prog, &trailer[0], h.bitp, 2)
1583
1584
1585
1586
1587
1588
1589 totalBits = (elemSize*(count-1) + progSize) / sys.PtrSize
1590 }
1591 endProg := unsafe.Pointer(addb(h.bitp, (totalBits+3)/4))
1592 endAlloc := unsafe.Pointer(addb(h.bitp, allocSize/sys.PtrSize/wordsPerBitmapByte))
1593 memclrNoHeapPointers(endProg, uintptr(endAlloc)-uintptr(endProg))
1594 }
1595
1596
1597
1598
1599 func progToPointerMask(prog *byte, size uintptr) bitvector {
1600 n := (size/sys.PtrSize + 7) / 8
1601 x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
1602 x[len(x)-1] = 0xa1
1603 n = runGCProg(prog, nil, &x[0], 1)
1604 if x[len(x)-1] != 0xa1 {
1605 throw("progToPointerMask: overflow")
1606 }
1607 return bitvector{int32(n), &x[0]}
1608 }
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632 func runGCProg(prog, trailer, dst *byte, size int) uintptr {
1633 dstStart := dst
1634
1635
1636 var bits uintptr
1637 var nbits uintptr
1638
1639 p := prog
1640 Run:
1641 for {
1642
1643
1644 for ; nbits >= 8; nbits -= 8 {
1645 if size == 1 {
1646 *dst = uint8(bits)
1647 dst = add1(dst)
1648 bits >>= 8
1649 } else {
1650 v := bits&bitPointerAll | bitScanAll
1651 *dst = uint8(v)
1652 dst = add1(dst)
1653 bits >>= 4
1654 v = bits&bitPointerAll | bitScanAll
1655 *dst = uint8(v)
1656 dst = add1(dst)
1657 bits >>= 4
1658 }
1659 }
1660
1661
1662 inst := uintptr(*p)
1663 p = add1(p)
1664 n := inst & 0x7F
1665 if inst&0x80 == 0 {
1666
1667 if n == 0 {
1668
1669 if trailer != nil {
1670 p = trailer
1671 trailer = nil
1672 continue
1673 }
1674 break Run
1675 }
1676 nbyte := n / 8
1677 for i := uintptr(0); i < nbyte; i++ {
1678 bits |= uintptr(*p) << nbits
1679 p = add1(p)
1680 if size == 1 {
1681 *dst = uint8(bits)
1682 dst = add1(dst)
1683 bits >>= 8
1684 } else {
1685 v := bits&0xf | bitScanAll
1686 *dst = uint8(v)
1687 dst = add1(dst)
1688 bits >>= 4
1689 v = bits&0xf | bitScanAll
1690 *dst = uint8(v)
1691 dst = add1(dst)
1692 bits >>= 4
1693 }
1694 }
1695 if n %= 8; n > 0 {
1696 bits |= uintptr(*p) << nbits
1697 p = add1(p)
1698 nbits += n
1699 }
1700 continue Run
1701 }
1702
1703
1704 if n == 0 {
1705 for off := uint(0); ; off += 7 {
1706 x := uintptr(*p)
1707 p = add1(p)
1708 n |= (x & 0x7F) << off
1709 if x&0x80 == 0 {
1710 break
1711 }
1712 }
1713 }
1714
1715
1716 c := uintptr(0)
1717 for off := uint(0); ; off += 7 {
1718 x := uintptr(*p)
1719 p = add1(p)
1720 c |= (x & 0x7F) << off
1721 if x&0x80 == 0 {
1722 break
1723 }
1724 }
1725 c *= n
1726
1727
1728
1729
1730
1731
1732
1733
1734 src := dst
1735 const maxBits = sys.PtrSize*8 - 7
1736 if n <= maxBits {
1737
1738 pattern := bits
1739 npattern := nbits
1740
1741
1742 if size == 1 {
1743 src = subtract1(src)
1744 for npattern < n {
1745 pattern <<= 8
1746 pattern |= uintptr(*src)
1747 src = subtract1(src)
1748 npattern += 8
1749 }
1750 } else {
1751 src = subtract1(src)
1752 for npattern < n {
1753 pattern <<= 4
1754 pattern |= uintptr(*src) & 0xf
1755 src = subtract1(src)
1756 npattern += 4
1757 }
1758 }
1759
1760
1761
1762
1763
1764 if npattern > n {
1765 pattern >>= npattern - n
1766 npattern = n
1767 }
1768
1769
1770 if npattern == 1 {
1771
1772
1773
1774
1775
1776
1777 if pattern == 1 {
1778 pattern = 1<<maxBits - 1
1779 npattern = maxBits
1780 } else {
1781 npattern = c
1782 }
1783 } else {
1784 b := pattern
1785 nb := npattern
1786 if nb+nb <= maxBits {
1787
1788 for nb <= sys.PtrSize*8 {
1789 b |= b << nb
1790 nb += nb
1791 }
1792
1793
1794 nb = maxBits / npattern * npattern
1795 b &= 1<<nb - 1
1796 pattern = b
1797 npattern = nb
1798 }
1799 }
1800
1801
1802
1803
1804 for ; c >= npattern; c -= npattern {
1805 bits |= pattern << nbits
1806 nbits += npattern
1807 if size == 1 {
1808 for nbits >= 8 {
1809 *dst = uint8(bits)
1810 dst = add1(dst)
1811 bits >>= 8
1812 nbits -= 8
1813 }
1814 } else {
1815 for nbits >= 4 {
1816 *dst = uint8(bits&0xf | bitScanAll)
1817 dst = add1(dst)
1818 bits >>= 4
1819 nbits -= 4
1820 }
1821 }
1822 }
1823
1824
1825 if c > 0 {
1826 pattern &= 1<<c - 1
1827 bits |= pattern << nbits
1828 nbits += c
1829 }
1830 continue Run
1831 }
1832
1833
1834
1835
1836 off := n - nbits
1837 if size == 1 {
1838
1839 src = subtractb(src, (off+7)/8)
1840 if frag := off & 7; frag != 0 {
1841 bits |= uintptr(*src) >> (8 - frag) << nbits
1842 src = add1(src)
1843 nbits += frag
1844 c -= frag
1845 }
1846
1847
1848 for i := c / 8; i > 0; i-- {
1849 bits |= uintptr(*src) << nbits
1850 src = add1(src)
1851 *dst = uint8(bits)
1852 dst = add1(dst)
1853 bits >>= 8
1854 }
1855
1856 if c %= 8; c > 0 {
1857 bits |= (uintptr(*src) & (1<<c - 1)) << nbits
1858 nbits += c
1859 }
1860 } else {
1861
1862 src = subtractb(src, (off+3)/4)
1863 if frag := off & 3; frag != 0 {
1864 bits |= (uintptr(*src) & 0xf) >> (4 - frag) << nbits
1865 src = add1(src)
1866 nbits += frag
1867 c -= frag
1868 }
1869
1870
1871 for i := c / 4; i > 0; i-- {
1872 bits |= (uintptr(*src) & 0xf) << nbits
1873 src = add1(src)
1874 *dst = uint8(bits&0xf | bitScanAll)
1875 dst = add1(dst)
1876 bits >>= 4
1877 }
1878
1879 if c %= 4; c > 0 {
1880 bits |= (uintptr(*src) & (1<<c - 1)) << nbits
1881 nbits += c
1882 }
1883 }
1884 }
1885
1886
1887 var totalBits uintptr
1888 if size == 1 {
1889 totalBits = (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits
1890 nbits += -nbits & 7
1891 for ; nbits > 0; nbits -= 8 {
1892 *dst = uint8(bits)
1893 dst = add1(dst)
1894 bits >>= 8
1895 }
1896 } else {
1897 totalBits = (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*4 + nbits
1898 nbits += -nbits & 3
1899 for ; nbits > 0; nbits -= 4 {
1900 v := bits&0xf | bitScanAll
1901 *dst = uint8(v)
1902 dst = add1(dst)
1903 bits >>= 4
1904 }
1905 }
1906 return totalBits
1907 }
1908
1909
1910
1911
1912
1913
1914 func materializeGCProg(ptrdata uintptr, prog *byte) *mspan {
1915 s := mheap_.allocManual((ptrdata/(8*sys.PtrSize)+pageSize-1)/pageSize, &memstats.gc_sys)
1916 runGCProg(addb(prog, 4), nil, (*byte)(unsafe.Pointer(s.startAddr)), 1)
1917 return s
1918 }
1919 func dematerializeGCProg(s *mspan) {
1920 mheap_.freeManual(s, &memstats.gc_sys)
1921 }
1922
1923 func dumpGCProg(p *byte) {
1924 nptr := 0
1925 for {
1926 x := *p
1927 p = add1(p)
1928 if x == 0 {
1929 print("\t", nptr, " end\n")
1930 break
1931 }
1932 if x&0x80 == 0 {
1933 print("\t", nptr, " lit ", x, ":")
1934 n := int(x+7) / 8
1935 for i := 0; i < n; i++ {
1936 print(" ", hex(*p))
1937 p = add1(p)
1938 }
1939 print("\n")
1940 nptr += int(x)
1941 } else {
1942 nbit := int(x &^ 0x80)
1943 if nbit == 0 {
1944 for nb := uint(0); ; nb += 7 {
1945 x := *p
1946 p = add1(p)
1947 nbit |= int(x&0x7f) << nb
1948 if x&0x80 == 0 {
1949 break
1950 }
1951 }
1952 }
1953 count := 0
1954 for nb := uint(0); ; nb += 7 {
1955 x := *p
1956 p = add1(p)
1957 count |= int(x&0x7f) << nb
1958 if x&0x80 == 0 {
1959 break
1960 }
1961 }
1962 print("\t", nptr, " repeat ", nbit, " × ", count, "\n")
1963 nptr += nbit * count
1964 }
1965 }
1966 }
1967
1968
1969
1970 func getgcmaskcb(frame *stkframe, ctxt unsafe.Pointer) bool {
1971 target := (*stkframe)(ctxt)
1972 if frame.sp <= target.sp && target.sp < frame.varp {
1973 *target = *frame
1974 return false
1975 }
1976 return true
1977 }
1978
1979
1980
1981
1982 func reflect_gcbits(x interface{}) []byte {
1983 ret := getgcmask(x)
1984 typ := (*ptrtype)(unsafe.Pointer(efaceOf(&x)._type)).elem
1985 nptr := typ.ptrdata / sys.PtrSize
1986 for uintptr(len(ret)) > nptr && ret[len(ret)-1] == 0 {
1987 ret = ret[:len(ret)-1]
1988 }
1989 return ret
1990 }
1991
1992
1993
1994
1995 func getgcmask(ep interface{}) (mask []byte) {
1996 e := *efaceOf(&ep)
1997 p := e.data
1998 t := e._type
1999
2000 for _, datap := range activeModules() {
2001
2002 if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
2003 bitmap := datap.gcdatamask.bytedata
2004 n := (*ptrtype)(unsafe.Pointer(t)).elem.size
2005 mask = make([]byte, n/sys.PtrSize)
2006 for i := uintptr(0); i < n; i += sys.PtrSize {
2007 off := (uintptr(p) + i - datap.data) / sys.PtrSize
2008 mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
2009 }
2010 return
2011 }
2012
2013
2014 if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
2015 bitmap := datap.gcbssmask.bytedata
2016 n := (*ptrtype)(unsafe.Pointer(t)).elem.size
2017 mask = make([]byte, n/sys.PtrSize)
2018 for i := uintptr(0); i < n; i += sys.PtrSize {
2019 off := (uintptr(p) + i - datap.bss) / sys.PtrSize
2020 mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
2021 }
2022 return
2023 }
2024 }
2025
2026
2027 if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
2028 hbits := heapBitsForAddr(base)
2029 n := s.elemsize
2030 mask = make([]byte, n/sys.PtrSize)
2031 for i := uintptr(0); i < n; i += sys.PtrSize {
2032 if hbits.isPointer() {
2033 mask[i/sys.PtrSize] = 1
2034 }
2035 if i != 1*sys.PtrSize && !hbits.morePointers() {
2036 mask = mask[:i/sys.PtrSize]
2037 break
2038 }
2039 hbits = hbits.next()
2040 }
2041 return
2042 }
2043
2044
2045 if _g_ := getg(); _g_.m.curg.stack.lo <= uintptr(p) && uintptr(p) < _g_.m.curg.stack.hi {
2046 var frame stkframe
2047 frame.sp = uintptr(p)
2048 _g_ := getg()
2049 gentraceback(_g_.m.curg.sched.pc, _g_.m.curg.sched.sp, 0, _g_.m.curg, 0, nil, 1000, getgcmaskcb, noescape(unsafe.Pointer(&frame)), 0)
2050 if frame.fn.valid() {
2051 locals, _, _ := getStackMap(&frame, nil, false)
2052 if locals.n == 0 {
2053 return
2054 }
2055 size := uintptr(locals.n) * sys.PtrSize
2056 n := (*ptrtype)(unsafe.Pointer(t)).elem.size
2057 mask = make([]byte, n/sys.PtrSize)
2058 for i := uintptr(0); i < n; i += sys.PtrSize {
2059 off := (uintptr(p) + i - frame.varp + size) / sys.PtrSize
2060 mask[i/sys.PtrSize] = locals.ptrbit(off)
2061 }
2062 }
2063 return
2064 }
2065
2066
2067
2068
2069 return
2070 }
2071
View as plain text