...
Source file src/runtime/mcentral.go
1
2
3
4
5
6
7
8
9
10
11
12
13 package runtime
14
15 import "runtime/internal/atomic"
16
17
18
19
20 type mcentral struct {
21 lock mutex
22 spanclass spanClass
23 nonempty mSpanList
24 empty mSpanList
25
26
27
28
29 nmalloc uint64
30 }
31
32
33 func (c *mcentral) init(spc spanClass) {
34 c.spanclass = spc
35 c.nonempty.init()
36 c.empty.init()
37 }
38
39
40 func (c *mcentral) cacheSpan() *mspan {
41
42 spanBytes := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) * _PageSize
43 deductSweepCredit(spanBytes, 0)
44
45 lock(&c.lock)
46 traceDone := false
47 if trace.enabled {
48 traceGCSweepStart()
49 }
50 sg := mheap_.sweepgen
51 retry:
52 var s *mspan
53 for s = c.nonempty.first; s != nil; s = s.next {
54 if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
55 c.nonempty.remove(s)
56 c.empty.insertBack(s)
57 unlock(&c.lock)
58 s.sweep(true)
59 goto havespan
60 }
61 if s.sweepgen == sg-1 {
62
63 continue
64 }
65
66 c.nonempty.remove(s)
67 c.empty.insertBack(s)
68 unlock(&c.lock)
69 goto havespan
70 }
71
72 for s = c.empty.first; s != nil; s = s.next {
73 if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
74
75
76 c.empty.remove(s)
77
78 c.empty.insertBack(s)
79 unlock(&c.lock)
80 s.sweep(true)
81 freeIndex := s.nextFreeIndex()
82 if freeIndex != s.nelems {
83 s.freeindex = freeIndex
84 goto havespan
85 }
86 lock(&c.lock)
87
88
89 goto retry
90 }
91 if s.sweepgen == sg-1 {
92
93 continue
94 }
95
96
97 break
98 }
99 if trace.enabled {
100 traceGCSweepDone()
101 traceDone = true
102 }
103 unlock(&c.lock)
104
105
106 s = c.grow()
107 if s == nil {
108 return nil
109 }
110 lock(&c.lock)
111 c.empty.insertBack(s)
112 unlock(&c.lock)
113
114
115
116 havespan:
117 if trace.enabled && !traceDone {
118 traceGCSweepDone()
119 }
120 n := int(s.nelems) - int(s.allocCount)
121 if n == 0 || s.freeindex == s.nelems || uintptr(s.allocCount) == s.nelems {
122 throw("span has no free objects")
123 }
124
125
126 atomic.Xadd64(&c.nmalloc, int64(n))
127 usedBytes := uintptr(s.allocCount) * s.elemsize
128 atomic.Xadd64(&memstats.heap_live, int64(spanBytes)-int64(usedBytes))
129 if trace.enabled {
130
131 traceHeapAlloc()
132 }
133 if gcBlackenEnabled != 0 {
134
135 gcController.revise()
136 }
137 freeByteBase := s.freeindex &^ (64 - 1)
138 whichByte := freeByteBase / 8
139
140 s.refillAllocCache(whichByte)
141
142
143
144 s.allocCache >>= s.freeindex % 64
145
146 return s
147 }
148
149
150 func (c *mcentral) uncacheSpan(s *mspan) {
151 if s.allocCount == 0 {
152 throw("uncaching span but s.allocCount == 0")
153 }
154
155 sg := mheap_.sweepgen
156 stale := s.sweepgen == sg+1
157 if stale {
158
159
160
161
162
163
164 atomic.Store(&s.sweepgen, sg-1)
165 } else {
166
167 atomic.Store(&s.sweepgen, sg)
168 }
169
170 n := int(s.nelems) - int(s.allocCount)
171 if n > 0 {
172
173
174
175
176 atomic.Xadd64(&c.nmalloc, -int64(n))
177
178 lock(&c.lock)
179 c.empty.remove(s)
180 c.nonempty.insert(s)
181 if !stale {
182
183
184
185
186
187
188
189 atomic.Xadd64(&memstats.heap_live, -int64(n)*int64(s.elemsize))
190 }
191 unlock(&c.lock)
192 }
193
194 if stale {
195
196
197 s.sweep(false)
198 }
199 }
200
201
202
203
204
205
206
207
208
209 func (c *mcentral) freeSpan(s *mspan, preserve bool, wasempty bool) bool {
210 if sg := mheap_.sweepgen; s.sweepgen == sg+1 || s.sweepgen == sg+3 {
211 throw("freeSpan given cached span")
212 }
213 s.needzero = 1
214
215 if preserve {
216
217
218 if !s.inList() {
219 throw("can't preserve unlinked span")
220 }
221 atomic.Store(&s.sweepgen, mheap_.sweepgen)
222 return false
223 }
224
225 lock(&c.lock)
226
227
228 if wasempty {
229 c.empty.remove(s)
230 c.nonempty.insert(s)
231 }
232
233
234
235
236
237 atomic.Store(&s.sweepgen, mheap_.sweepgen)
238
239 if s.allocCount != 0 {
240 unlock(&c.lock)
241 return false
242 }
243
244 c.nonempty.remove(s)
245 unlock(&c.lock)
246 mheap_.freeSpan(s, false)
247 return true
248 }
249
250
251 func (c *mcentral) grow() *mspan {
252 npages := uintptr(class_to_allocnpages[c.spanclass.sizeclass()])
253 size := uintptr(class_to_size[c.spanclass.sizeclass()])
254
255 s := mheap_.alloc(npages, c.spanclass, false, true)
256 if s == nil {
257 return nil
258 }
259
260
261
262 n := (npages << _PageShift) >> s.divShift * uintptr(s.divMul) >> s.divShift2
263 s.limit = s.base() + size*n
264 heapBitsForAddr(s.base()).initSpan(s)
265 return s
266 }
267
View as plain text