...

Source file src/runtime/mgcsweepbuf.go

     1	// Copyright 2016 The Go Authors. All rights reserved.
     2	// Use of this source code is governed by a BSD-style
     3	// license that can be found in the LICENSE file.
     4	
     5	package runtime
     6	
     7	import (
     8		"internal/cpu"
     9		"runtime/internal/atomic"
    10		"runtime/internal/sys"
    11		"unsafe"
    12	)
    13	
    14	// A gcSweepBuf is a set of *mspans.
    15	//
    16	// gcSweepBuf is safe for concurrent push operations *or* concurrent
    17	// pop operations, but not both simultaneously.
    18	type gcSweepBuf struct {
    19		// A gcSweepBuf is a two-level data structure consisting of a
    20		// growable spine that points to fixed-sized blocks. The spine
    21		// can be accessed without locks, but adding a block or
    22		// growing it requires taking the spine lock.
    23		//
    24		// Because each mspan covers at least 8K of heap and takes at
    25		// most 8 bytes in the gcSweepBuf, the growth of the spine is
    26		// quite limited.
    27		//
    28		// The spine and all blocks are allocated off-heap, which
    29		// allows this to be used in the memory manager and avoids the
    30		// need for write barriers on all of these. We never release
    31		// this memory because there could be concurrent lock-free
    32		// access and we're likely to reuse it anyway. (In principle,
    33		// we could do this during STW.)
    34	
    35		spineLock mutex
    36		spine     unsafe.Pointer // *[N]*gcSweepBlock, accessed atomically
    37		spineLen  uintptr        // Spine array length, accessed atomically
    38		spineCap  uintptr        // Spine array cap, accessed under lock
    39	
    40		// index is the first unused slot in the logical concatenation
    41		// of all blocks. It is accessed atomically.
    42		index uint32
    43	}
    44	
    45	const (
    46		gcSweepBlockEntries    = 512 // 4KB on 64-bit
    47		gcSweepBufInitSpineCap = 256 // Enough for 1GB heap on 64-bit
    48	)
    49	
    50	type gcSweepBlock struct {
    51		spans [gcSweepBlockEntries]*mspan
    52	}
    53	
    54	// push adds span s to buffer b. push is safe to call concurrently
    55	// with other push operations, but NOT to call concurrently with pop.
    56	func (b *gcSweepBuf) push(s *mspan) {
    57		// Obtain our slot.
    58		cursor := uintptr(atomic.Xadd(&b.index, +1) - 1)
    59		top, bottom := cursor/gcSweepBlockEntries, cursor%gcSweepBlockEntries
    60	
    61		// Do we need to add a block?
    62		spineLen := atomic.Loaduintptr(&b.spineLen)
    63		var block *gcSweepBlock
    64	retry:
    65		if top < spineLen {
    66			spine := atomic.Loadp(unsafe.Pointer(&b.spine))
    67			blockp := add(spine, sys.PtrSize*top)
    68			block = (*gcSweepBlock)(atomic.Loadp(blockp))
    69		} else {
    70			// Add a new block to the spine, potentially growing
    71			// the spine.
    72			lock(&b.spineLock)
    73			// spineLen cannot change until we release the lock,
    74			// but may have changed while we were waiting.
    75			spineLen = atomic.Loaduintptr(&b.spineLen)
    76			if top < spineLen {
    77				unlock(&b.spineLock)
    78				goto retry
    79			}
    80	
    81			if spineLen == b.spineCap {
    82				// Grow the spine.
    83				newCap := b.spineCap * 2
    84				if newCap == 0 {
    85					newCap = gcSweepBufInitSpineCap
    86				}
    87				newSpine := persistentalloc(newCap*sys.PtrSize, cpu.CacheLineSize, &memstats.gc_sys)
    88				if b.spineCap != 0 {
    89					// Blocks are allocated off-heap, so
    90					// no write barriers.
    91					memmove(newSpine, b.spine, b.spineCap*sys.PtrSize)
    92				}
    93				// Spine is allocated off-heap, so no write barrier.
    94				atomic.StorepNoWB(unsafe.Pointer(&b.spine), newSpine)
    95				b.spineCap = newCap
    96				// We can't immediately free the old spine
    97				// since a concurrent push with a lower index
    98				// could still be reading from it. We let it
    99				// leak because even a 1TB heap would waste
   100				// less than 2MB of memory on old spines. If
   101				// this is a problem, we could free old spines
   102				// during STW.
   103			}
   104	
   105			// Allocate a new block and add it to the spine.
   106			block = (*gcSweepBlock)(persistentalloc(unsafe.Sizeof(gcSweepBlock{}), cpu.CacheLineSize, &memstats.gc_sys))
   107			blockp := add(b.spine, sys.PtrSize*top)
   108			// Blocks are allocated off-heap, so no write barrier.
   109			atomic.StorepNoWB(blockp, unsafe.Pointer(block))
   110			atomic.Storeuintptr(&b.spineLen, spineLen+1)
   111			unlock(&b.spineLock)
   112		}
   113	
   114		// We have a block. Insert the span.
   115		block.spans[bottom] = s
   116	}
   117	
   118	// pop removes and returns a span from buffer b, or nil if b is empty.
   119	// pop is safe to call concurrently with other pop operations, but NOT
   120	// to call concurrently with push.
   121	func (b *gcSweepBuf) pop() *mspan {
   122		cursor := atomic.Xadd(&b.index, -1)
   123		if int32(cursor) < 0 {
   124			atomic.Xadd(&b.index, +1)
   125			return nil
   126		}
   127	
   128		// There are no concurrent spine or block modifications during
   129		// pop, so we can omit the atomics.
   130		top, bottom := cursor/gcSweepBlockEntries, cursor%gcSweepBlockEntries
   131		blockp := (**gcSweepBlock)(add(b.spine, sys.PtrSize*uintptr(top)))
   132		block := *blockp
   133		s := block.spans[bottom]
   134		// Clear the pointer for block(i).
   135		block.spans[bottom] = nil
   136		return s
   137	}
   138	
   139	// numBlocks returns the number of blocks in buffer b. numBlocks is
   140	// safe to call concurrently with any other operation. Spans that have
   141	// been pushed prior to the call to numBlocks are guaranteed to appear
   142	// in some block in the range [0, numBlocks()), assuming there are no
   143	// intervening pops. Spans that are pushed after the call may also
   144	// appear in these blocks.
   145	func (b *gcSweepBuf) numBlocks() int {
   146		return int((atomic.Load(&b.index) + gcSweepBlockEntries - 1) / gcSweepBlockEntries)
   147	}
   148	
   149	// block returns the spans in the i'th block of buffer b. block is
   150	// safe to call concurrently with push.
   151	func (b *gcSweepBuf) block(i int) []*mspan {
   152		// Perform bounds check before loading spine address since
   153		// push ensures the allocated length is at least spineLen.
   154		if i < 0 || uintptr(i) >= atomic.Loaduintptr(&b.spineLen) {
   155			throw("block index out of range")
   156		}
   157	
   158		// Get block i.
   159		spine := atomic.Loadp(unsafe.Pointer(&b.spine))
   160		blockp := add(spine, sys.PtrSize*uintptr(i))
   161		block := (*gcSweepBlock)(atomic.Loadp(blockp))
   162	
   163		// Slice the block if necessary.
   164		cursor := uintptr(atomic.Load(&b.index))
   165		top, bottom := cursor/gcSweepBlockEntries, cursor%gcSweepBlockEntries
   166		var spans []*mspan
   167		if uintptr(i) < top {
   168			spans = block.spans[:]
   169		} else {
   170			spans = block.spans[:bottom]
   171		}
   172	
   173		// push may have reserved a slot but not filled it yet, so
   174		// trim away unused entries.
   175		for len(spans) > 0 && spans[len(spans)-1] == nil {
   176			spans = spans[:len(spans)-1]
   177		}
   178		return spans
   179	}
   180	

View as plain text