...
Source file src/runtime/mgcsweepbuf.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/cpu"
9 "runtime/internal/atomic"
10 "runtime/internal/sys"
11 "unsafe"
12 )
13
14
15
16
17
18 type gcSweepBuf struct {
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35 spineLock mutex
36 spine unsafe.Pointer
37 spineLen uintptr
38 spineCap uintptr
39
40
41
42 index uint32
43 }
44
45 const (
46 gcSweepBlockEntries = 512
47 gcSweepBufInitSpineCap = 256
48 )
49
50 type gcSweepBlock struct {
51 spans [gcSweepBlockEntries]*mspan
52 }
53
54
55
56 func (b *gcSweepBuf) push(s *mspan) {
57
58 cursor := uintptr(atomic.Xadd(&b.index, +1) - 1)
59 top, bottom := cursor/gcSweepBlockEntries, cursor%gcSweepBlockEntries
60
61
62 spineLen := atomic.Loaduintptr(&b.spineLen)
63 var block *gcSweepBlock
64 retry:
65 if top < spineLen {
66 spine := atomic.Loadp(unsafe.Pointer(&b.spine))
67 blockp := add(spine, sys.PtrSize*top)
68 block = (*gcSweepBlock)(atomic.Loadp(blockp))
69 } else {
70
71
72 lock(&b.spineLock)
73
74
75 spineLen = atomic.Loaduintptr(&b.spineLen)
76 if top < spineLen {
77 unlock(&b.spineLock)
78 goto retry
79 }
80
81 if spineLen == b.spineCap {
82
83 newCap := b.spineCap * 2
84 if newCap == 0 {
85 newCap = gcSweepBufInitSpineCap
86 }
87 newSpine := persistentalloc(newCap*sys.PtrSize, cpu.CacheLineSize, &memstats.gc_sys)
88 if b.spineCap != 0 {
89
90
91 memmove(newSpine, b.spine, b.spineCap*sys.PtrSize)
92 }
93
94 atomic.StorepNoWB(unsafe.Pointer(&b.spine), newSpine)
95 b.spineCap = newCap
96
97
98
99
100
101
102
103 }
104
105
106 block = (*gcSweepBlock)(persistentalloc(unsafe.Sizeof(gcSweepBlock{}), cpu.CacheLineSize, &memstats.gc_sys))
107 blockp := add(b.spine, sys.PtrSize*top)
108
109 atomic.StorepNoWB(blockp, unsafe.Pointer(block))
110 atomic.Storeuintptr(&b.spineLen, spineLen+1)
111 unlock(&b.spineLock)
112 }
113
114
115 block.spans[bottom] = s
116 }
117
118
119
120
121 func (b *gcSweepBuf) pop() *mspan {
122 cursor := atomic.Xadd(&b.index, -1)
123 if int32(cursor) < 0 {
124 atomic.Xadd(&b.index, +1)
125 return nil
126 }
127
128
129
130 top, bottom := cursor/gcSweepBlockEntries, cursor%gcSweepBlockEntries
131 blockp := (**gcSweepBlock)(add(b.spine, sys.PtrSize*uintptr(top)))
132 block := *blockp
133 s := block.spans[bottom]
134
135 block.spans[bottom] = nil
136 return s
137 }
138
139
140
141
142
143
144
145 func (b *gcSweepBuf) numBlocks() int {
146 return int((atomic.Load(&b.index) + gcSweepBlockEntries - 1) / gcSweepBlockEntries)
147 }
148
149
150
151 func (b *gcSweepBuf) block(i int) []*mspan {
152
153
154 if i < 0 || uintptr(i) >= atomic.Loaduintptr(&b.spineLen) {
155 throw("block index out of range")
156 }
157
158
159 spine := atomic.Loadp(unsafe.Pointer(&b.spine))
160 blockp := add(spine, sys.PtrSize*uintptr(i))
161 block := (*gcSweepBlock)(atomic.Loadp(blockp))
162
163
164 cursor := uintptr(atomic.Load(&b.index))
165 top, bottom := cursor/gcSweepBlockEntries, cursor%gcSweepBlockEntries
166 var spans []*mspan
167 if uintptr(i) < top {
168 spans = block.spans[:]
169 } else {
170 spans = block.spans[:bottom]
171 }
172
173
174
175 for len(spans) > 0 && spans[len(spans)-1] == nil {
176 spans = spans[:len(spans)-1]
177 }
178 return spans
179 }
180
View as plain text