Source file src/runtime/mgcsweep.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25 package runtime
26
27 import (
28 "runtime/internal/atomic"
29 "unsafe"
30 )
31
32 var sweep sweepdata
33
34
35 type sweepdata struct {
36 lock mutex
37 g *g
38 parked bool
39 started bool
40
41 nbgsweep uint32
42 npausesweep uint32
43 }
44
45
46
47
48
49
50
51 func finishsweep_m() {
52
53
54
55
56
57 for sweepone() != ^uintptr(0) {
58 sweep.npausesweep++
59 }
60
61 nextMarkBitArenaEpoch()
62 }
63
64 func bgsweep(c chan int) {
65 sweep.g = getg()
66
67 lock(&sweep.lock)
68 sweep.parked = true
69 c <- 1
70 goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
71
72 for {
73 for sweepone() != ^uintptr(0) {
74 sweep.nbgsweep++
75 Gosched()
76 }
77 for freeSomeWbufs(true) {
78 Gosched()
79 }
80 lock(&sweep.lock)
81 if !isSweepDone() {
82
83
84
85 unlock(&sweep.lock)
86 continue
87 }
88 sweep.parked = true
89 goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
90 }
91 }
92
93
94
95 func sweepone() uintptr {
96 _g_ := getg()
97 sweepRatio := mheap_.sweepPagesPerByte
98
99
100
101 _g_.m.locks++
102 if atomic.Load(&mheap_.sweepdone) != 0 {
103 _g_.m.locks--
104 return ^uintptr(0)
105 }
106 atomic.Xadd(&mheap_.sweepers, +1)
107
108
109 var s *mspan
110 sg := mheap_.sweepgen
111 for {
112 s = mheap_.sweepSpans[1-sg/2%2].pop()
113 if s == nil {
114 atomic.Store(&mheap_.sweepdone, 1)
115 break
116 }
117 if s.state != mSpanInUse {
118
119
120
121 if !(s.sweepgen == sg || s.sweepgen == sg+3) {
122 print("runtime: bad span s.state=", s.state, " s.sweepgen=", s.sweepgen, " sweepgen=", sg, "\n")
123 throw("non in-use span in unswept list")
124 }
125 continue
126 }
127 if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
128 break
129 }
130 }
131
132
133 npages := ^uintptr(0)
134 if s != nil {
135 npages = s.npages
136 if s.sweep(false) {
137
138
139
140 atomic.Xadduintptr(&mheap_.reclaimCredit, npages)
141 } else {
142
143
144
145 npages = 0
146 }
147 }
148
149
150
151 if atomic.Xadd(&mheap_.sweepers, -1) == 0 && atomic.Load(&mheap_.sweepdone) != 0 {
152 if debug.gcpacertrace > 0 {
153 print("pacer: sweep done at heap size ", memstats.heap_live>>20, "MB; allocated ", (memstats.heap_live-mheap_.sweepHeapLiveBasis)>>20, "MB during sweep; swept ", mheap_.pagesSwept, " pages at ", sweepRatio, " pages/byte\n")
154 }
155 }
156 _g_.m.locks--
157 return npages
158 }
159
160
161
162
163
164
165
166 func isSweepDone() bool {
167 return mheap_.sweepdone != 0
168 }
169
170
171
172 func (s *mspan) ensureSwept() {
173
174
175
176 _g_ := getg()
177 if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
178 throw("mspan.ensureSwept: m is not locked")
179 }
180
181 sg := mheap_.sweepgen
182 spangen := atomic.Load(&s.sweepgen)
183 if spangen == sg || spangen == sg+3 {
184 return
185 }
186
187 if atomic.Cas(&s.sweepgen, sg-2, sg-1) {
188 s.sweep(false)
189 return
190 }
191
192 for {
193 spangen := atomic.Load(&s.sweepgen)
194 if spangen == sg || spangen == sg+3 {
195 break
196 }
197 osyield()
198 }
199 }
200
201
202
203
204
205
206 func (s *mspan) sweep(preserve bool) bool {
207
208
209 _g_ := getg()
210 if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
211 throw("mspan.sweep: m is not locked")
212 }
213 sweepgen := mheap_.sweepgen
214 if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
215 print("mspan.sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
216 throw("mspan.sweep: bad span state")
217 }
218
219 if trace.enabled {
220 traceGCSweepSpan(s.npages * _PageSize)
221 }
222
223 atomic.Xadd64(&mheap_.pagesSwept, int64(s.npages))
224
225 spc := s.spanclass
226 size := s.elemsize
227 res := false
228
229 c := _g_.m.mcache
230 freeToHeap := false
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248 specialp := &s.specials
249 special := *specialp
250 for special != nil {
251
252 objIndex := uintptr(special.offset) / size
253 p := s.base() + objIndex*size
254 mbits := s.markBitsForIndex(objIndex)
255 if !mbits.isMarked() {
256
257
258 hasFin := false
259 endOffset := p - s.base() + size
260 for tmp := special; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next {
261 if tmp.kind == _KindSpecialFinalizer {
262
263 mbits.setMarkedNonAtomic()
264 hasFin = true
265 break
266 }
267 }
268
269 for special != nil && uintptr(special.offset) < endOffset {
270
271
272 p := s.base() + uintptr(special.offset)
273 if special.kind == _KindSpecialFinalizer || !hasFin {
274
275 y := special
276 special = special.next
277 *specialp = special
278 freespecial(y, unsafe.Pointer(p), size)
279 } else {
280
281
282 specialp = &special.next
283 special = *specialp
284 }
285 }
286 } else {
287
288 specialp = &special.next
289 special = *specialp
290 }
291 }
292
293 if debug.allocfreetrace != 0 || debug.clobberfree != 0 || raceenabled || msanenabled {
294
295
296 mbits := s.markBitsForBase()
297 abits := s.allocBitsForIndex(0)
298 for i := uintptr(0); i < s.nelems; i++ {
299 if !mbits.isMarked() && (abits.index < s.freeindex || abits.isMarked()) {
300 x := s.base() + i*s.elemsize
301 if debug.allocfreetrace != 0 {
302 tracefree(unsafe.Pointer(x), size)
303 }
304 if debug.clobberfree != 0 {
305 clobberfree(unsafe.Pointer(x), size)
306 }
307 if raceenabled {
308 racefree(unsafe.Pointer(x), size)
309 }
310 if msanenabled {
311 msanfree(unsafe.Pointer(x), size)
312 }
313 }
314 mbits.advance()
315 abits.advance()
316 }
317 }
318
319
320 nalloc := uint16(s.countAlloc())
321 if spc.sizeclass() == 0 && nalloc == 0 {
322 s.needzero = 1
323 freeToHeap = true
324 }
325 nfreed := s.allocCount - nalloc
326 if nalloc > s.allocCount {
327 print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
328 throw("sweep increased allocation count")
329 }
330
331 s.allocCount = nalloc
332 wasempty := s.nextFreeIndex() == s.nelems
333 s.freeindex = 0
334 if trace.enabled {
335 getg().m.p.ptr().traceReclaimed += uintptr(nfreed) * s.elemsize
336 }
337
338
339
340 s.allocBits = s.gcmarkBits
341 s.gcmarkBits = newMarkBits(s.nelems)
342
343
344 s.refillAllocCache(0)
345
346
347
348
349
350
351 if freeToHeap || nfreed == 0 {
352
353
354 if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
355 print("mspan.sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
356 throw("mspan.sweep: bad span state after sweep")
357 }
358
359
360
361 atomic.Store(&s.sweepgen, sweepgen)
362 }
363
364 if nfreed > 0 && spc.sizeclass() != 0 {
365 c.local_nsmallfree[spc.sizeclass()] += uintptr(nfreed)
366 res = mheap_.central[spc].mcentral.freeSpan(s, preserve, wasempty)
367
368 } else if freeToHeap {
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385 if debug.efence > 0 {
386 s.limit = 0
387 sysFault(unsafe.Pointer(s.base()), size)
388 } else {
389 mheap_.freeSpan(s, true)
390 }
391 c.local_nlargefree++
392 c.local_largefree += size
393 res = true
394 }
395 if !res {
396
397
398 mheap_.sweepSpans[sweepgen/2%2].push(s)
399 }
400 return res
401 }
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420 func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr) {
421 if mheap_.sweepPagesPerByte == 0 {
422
423 return
424 }
425
426 if trace.enabled {
427 traceGCSweepStart()
428 }
429
430 retry:
431 sweptBasis := atomic.Load64(&mheap_.pagesSweptBasis)
432
433
434 newHeapLive := uintptr(atomic.Load64(&memstats.heap_live)-mheap_.sweepHeapLiveBasis) + spanBytes
435 pagesTarget := int64(mheap_.sweepPagesPerByte*float64(newHeapLive)) - int64(callerSweepPages)
436 for pagesTarget > int64(atomic.Load64(&mheap_.pagesSwept)-sweptBasis) {
437 if sweepone() == ^uintptr(0) {
438 mheap_.sweepPagesPerByte = 0
439 break
440 }
441 if atomic.Load64(&mheap_.pagesSweptBasis) != sweptBasis {
442
443 goto retry
444 }
445 }
446
447 if trace.enabled {
448 traceGCSweepDone()
449 }
450 }
451
452
453
454 func clobberfree(x unsafe.Pointer, size uintptr) {
455
456 for i := uintptr(0); i < size; i += 4 {
457 *(*uint32)(add(x, i)) = 0xdeadbeef
458 }
459 }
460
View as plain text