...

Source file src/runtime/mcentral.go

     1	// Copyright 2009 The Go Authors. All rights reserved.
     2	// Use of this source code is governed by a BSD-style
     3	// license that can be found in the LICENSE file.
     4	
     5	// Central free lists.
     6	//
     7	// See malloc.go for an overview.
     8	//
     9	// The mcentral doesn't actually contain the list of free objects; the mspan does.
    10	// Each mcentral is two lists of mspans: those with free objects (c->nonempty)
    11	// and those that are completely allocated (c->empty).
    12	
    13	package runtime
    14	
    15	import "runtime/internal/atomic"
    16	
    17	// Central list of free objects of a given size.
    18	//
    19	//go:notinheap
    20	type mcentral struct {
    21		lock      mutex
    22		spanclass spanClass
    23		nonempty  mSpanList // list of spans with a free object, ie a nonempty free list
    24		empty     mSpanList // list of spans with no free objects (or cached in an mcache)
    25	
    26		// nmalloc is the cumulative count of objects allocated from
    27		// this mcentral, assuming all spans in mcaches are
    28		// fully-allocated. Written atomically, read under STW.
    29		nmalloc uint64
    30	}
    31	
    32	// Initialize a single central free list.
    33	func (c *mcentral) init(spc spanClass) {
    34		c.spanclass = spc
    35		c.nonempty.init()
    36		c.empty.init()
    37	}
    38	
    39	// Allocate a span to use in an mcache.
    40	func (c *mcentral) cacheSpan() *mspan {
    41		// Deduct credit for this span allocation and sweep if necessary.
    42		spanBytes := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) * _PageSize
    43		deductSweepCredit(spanBytes, 0)
    44	
    45		lock(&c.lock)
    46		traceDone := false
    47		if trace.enabled {
    48			traceGCSweepStart()
    49		}
    50		sg := mheap_.sweepgen
    51	retry:
    52		var s *mspan
    53		for s = c.nonempty.first; s != nil; s = s.next {
    54			if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
    55				c.nonempty.remove(s)
    56				c.empty.insertBack(s)
    57				unlock(&c.lock)
    58				s.sweep(true)
    59				goto havespan
    60			}
    61			if s.sweepgen == sg-1 {
    62				// the span is being swept by background sweeper, skip
    63				continue
    64			}
    65			// we have a nonempty span that does not require sweeping, allocate from it
    66			c.nonempty.remove(s)
    67			c.empty.insertBack(s)
    68			unlock(&c.lock)
    69			goto havespan
    70		}
    71	
    72		for s = c.empty.first; s != nil; s = s.next {
    73			if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
    74				// we have an empty span that requires sweeping,
    75				// sweep it and see if we can free some space in it
    76				c.empty.remove(s)
    77				// swept spans are at the end of the list
    78				c.empty.insertBack(s)
    79				unlock(&c.lock)
    80				s.sweep(true)
    81				freeIndex := s.nextFreeIndex()
    82				if freeIndex != s.nelems {
    83					s.freeindex = freeIndex
    84					goto havespan
    85				}
    86				lock(&c.lock)
    87				// the span is still empty after sweep
    88				// it is already in the empty list, so just retry
    89				goto retry
    90			}
    91			if s.sweepgen == sg-1 {
    92				// the span is being swept by background sweeper, skip
    93				continue
    94			}
    95			// already swept empty span,
    96			// all subsequent ones must also be either swept or in process of sweeping
    97			break
    98		}
    99		if trace.enabled {
   100			traceGCSweepDone()
   101			traceDone = true
   102		}
   103		unlock(&c.lock)
   104	
   105		// Replenish central list if empty.
   106		s = c.grow()
   107		if s == nil {
   108			return nil
   109		}
   110		lock(&c.lock)
   111		c.empty.insertBack(s)
   112		unlock(&c.lock)
   113	
   114		// At this point s is a non-empty span, queued at the end of the empty list,
   115		// c is unlocked.
   116	havespan:
   117		if trace.enabled && !traceDone {
   118			traceGCSweepDone()
   119		}
   120		n := int(s.nelems) - int(s.allocCount)
   121		if n == 0 || s.freeindex == s.nelems || uintptr(s.allocCount) == s.nelems {
   122			throw("span has no free objects")
   123		}
   124		// Assume all objects from this span will be allocated in the
   125		// mcache. If it gets uncached, we'll adjust this.
   126		atomic.Xadd64(&c.nmalloc, int64(n))
   127		usedBytes := uintptr(s.allocCount) * s.elemsize
   128		atomic.Xadd64(&memstats.heap_live, int64(spanBytes)-int64(usedBytes))
   129		if trace.enabled {
   130			// heap_live changed.
   131			traceHeapAlloc()
   132		}
   133		if gcBlackenEnabled != 0 {
   134			// heap_live changed.
   135			gcController.revise()
   136		}
   137		freeByteBase := s.freeindex &^ (64 - 1)
   138		whichByte := freeByteBase / 8
   139		// Init alloc bits cache.
   140		s.refillAllocCache(whichByte)
   141	
   142		// Adjust the allocCache so that s.freeindex corresponds to the low bit in
   143		// s.allocCache.
   144		s.allocCache >>= s.freeindex % 64
   145	
   146		return s
   147	}
   148	
   149	// Return span from an mcache.
   150	func (c *mcentral) uncacheSpan(s *mspan) {
   151		if s.allocCount == 0 {
   152			throw("uncaching span but s.allocCount == 0")
   153		}
   154	
   155		sg := mheap_.sweepgen
   156		stale := s.sweepgen == sg+1
   157		if stale {
   158			// Span was cached before sweep began. It's our
   159			// responsibility to sweep it.
   160			//
   161			// Set sweepgen to indicate it's not cached but needs
   162			// sweeping and can't be allocated from. sweep will
   163			// set s.sweepgen to indicate s is swept.
   164			atomic.Store(&s.sweepgen, sg-1)
   165		} else {
   166			// Indicate that s is no longer cached.
   167			atomic.Store(&s.sweepgen, sg)
   168		}
   169	
   170		n := int(s.nelems) - int(s.allocCount)
   171		if n > 0 {
   172			// cacheSpan updated alloc assuming all objects on s
   173			// were going to be allocated. Adjust for any that
   174			// weren't. We must do this before potentially
   175			// sweeping the span.
   176			atomic.Xadd64(&c.nmalloc, -int64(n))
   177	
   178			lock(&c.lock)
   179			c.empty.remove(s)
   180			c.nonempty.insert(s)
   181			if !stale {
   182				// mCentral_CacheSpan conservatively counted
   183				// unallocated slots in heap_live. Undo this.
   184				//
   185				// If this span was cached before sweep, then
   186				// heap_live was totally recomputed since
   187				// caching this span, so we don't do this for
   188				// stale spans.
   189				atomic.Xadd64(&memstats.heap_live, -int64(n)*int64(s.elemsize))
   190			}
   191			unlock(&c.lock)
   192		}
   193	
   194		if stale {
   195			// Now that s is in the right mcentral list, we can
   196			// sweep it.
   197			s.sweep(false)
   198		}
   199	}
   200	
   201	// freeSpan updates c and s after sweeping s.
   202	// It sets s's sweepgen to the latest generation,
   203	// and, based on the number of free objects in s,
   204	// moves s to the appropriate list of c or returns it
   205	// to the heap.
   206	// freeSpan reports whether s was returned to the heap.
   207	// If preserve=true, it does not move s (the caller
   208	// must take care of it).
   209	func (c *mcentral) freeSpan(s *mspan, preserve bool, wasempty bool) bool {
   210		if sg := mheap_.sweepgen; s.sweepgen == sg+1 || s.sweepgen == sg+3 {
   211			throw("freeSpan given cached span")
   212		}
   213		s.needzero = 1
   214	
   215		if preserve {
   216			// preserve is set only when called from (un)cacheSpan above,
   217			// the span must be in the empty list.
   218			if !s.inList() {
   219				throw("can't preserve unlinked span")
   220			}
   221			atomic.Store(&s.sweepgen, mheap_.sweepgen)
   222			return false
   223		}
   224	
   225		lock(&c.lock)
   226	
   227		// Move to nonempty if necessary.
   228		if wasempty {
   229			c.empty.remove(s)
   230			c.nonempty.insert(s)
   231		}
   232	
   233		// delay updating sweepgen until here. This is the signal that
   234		// the span may be used in an mcache, so it must come after the
   235		// linked list operations above (actually, just after the
   236		// lock of c above.)
   237		atomic.Store(&s.sweepgen, mheap_.sweepgen)
   238	
   239		if s.allocCount != 0 {
   240			unlock(&c.lock)
   241			return false
   242		}
   243	
   244		c.nonempty.remove(s)
   245		unlock(&c.lock)
   246		mheap_.freeSpan(s, false)
   247		return true
   248	}
   249	
   250	// grow allocates a new empty span from the heap and initializes it for c's size class.
   251	func (c *mcentral) grow() *mspan {
   252		npages := uintptr(class_to_allocnpages[c.spanclass.sizeclass()])
   253		size := uintptr(class_to_size[c.spanclass.sizeclass()])
   254	
   255		s := mheap_.alloc(npages, c.spanclass, false, true)
   256		if s == nil {
   257			return nil
   258		}
   259	
   260		// Use division by multiplication and shifts to quickly compute:
   261		// n := (npages << _PageShift) / size
   262		n := (npages << _PageShift) >> s.divShift * uintptr(s.divMul) >> s.divShift2
   263		s.limit = s.base() + size*n
   264		heapBitsForAddr(s.base()).initSpan(s)
   265		return s
   266	}
   267	

View as plain text