...

Source file src/runtime/mcache.go

     1	// Copyright 2009 The Go Authors. All rights reserved.
     2	// Use of this source code is governed by a BSD-style
     3	// license that can be found in the LICENSE file.
     4	
     5	package runtime
     6	
     7	import (
     8		"runtime/internal/atomic"
     9		"unsafe"
    10	)
    11	
    12	// Per-thread (in Go, per-P) cache for small objects.
    13	// No locking needed because it is per-thread (per-P).
    14	//
    15	// mcaches are allocated from non-GC'd memory, so any heap pointers
    16	// must be specially handled.
    17	//
    18	//go:notinheap
    19	type mcache struct {
    20		// The following members are accessed on every malloc,
    21		// so they are grouped here for better caching.
    22		next_sample uintptr // trigger heap sample after allocating this many bytes
    23		local_scan  uintptr // bytes of scannable heap allocated
    24	
    25		// Allocator cache for tiny objects w/o pointers.
    26		// See "Tiny allocator" comment in malloc.go.
    27	
    28		// tiny points to the beginning of the current tiny block, or
    29		// nil if there is no current tiny block.
    30		//
    31		// tiny is a heap pointer. Since mcache is in non-GC'd memory,
    32		// we handle it by clearing it in releaseAll during mark
    33		// termination.
    34		tiny             uintptr
    35		tinyoffset       uintptr
    36		local_tinyallocs uintptr // number of tiny allocs not counted in other stats
    37	
    38		// The rest is not accessed on every malloc.
    39	
    40		alloc [numSpanClasses]*mspan // spans to allocate from, indexed by spanClass
    41	
    42		stackcache [_NumStackOrders]stackfreelist
    43	
    44		// Local allocator stats, flushed during GC.
    45		local_largefree  uintptr                  // bytes freed for large objects (>maxsmallsize)
    46		local_nlargefree uintptr                  // number of frees for large objects (>maxsmallsize)
    47		local_nsmallfree [_NumSizeClasses]uintptr // number of frees for small objects (<=maxsmallsize)
    48	
    49		// flushGen indicates the sweepgen during which this mcache
    50		// was last flushed. If flushGen != mheap_.sweepgen, the spans
    51		// in this mcache are stale and need to the flushed so they
    52		// can be swept. This is done in acquirep.
    53		flushGen uint32
    54	}
    55	
    56	// A gclink is a node in a linked list of blocks, like mlink,
    57	// but it is opaque to the garbage collector.
    58	// The GC does not trace the pointers during collection,
    59	// and the compiler does not emit write barriers for assignments
    60	// of gclinkptr values. Code should store references to gclinks
    61	// as gclinkptr, not as *gclink.
    62	type gclink struct {
    63		next gclinkptr
    64	}
    65	
    66	// A gclinkptr is a pointer to a gclink, but it is opaque
    67	// to the garbage collector.
    68	type gclinkptr uintptr
    69	
    70	// ptr returns the *gclink form of p.
    71	// The result should be used for accessing fields, not stored
    72	// in other data structures.
    73	func (p gclinkptr) ptr() *gclink {
    74		return (*gclink)(unsafe.Pointer(p))
    75	}
    76	
    77	type stackfreelist struct {
    78		list gclinkptr // linked list of free stacks
    79		size uintptr   // total size of stacks in list
    80	}
    81	
    82	// dummy mspan that contains no free objects.
    83	var emptymspan mspan
    84	
    85	func allocmcache() *mcache {
    86		var c *mcache
    87		systemstack(func() {
    88			lock(&mheap_.lock)
    89			c = (*mcache)(mheap_.cachealloc.alloc())
    90			c.flushGen = mheap_.sweepgen
    91			unlock(&mheap_.lock)
    92		})
    93		for i := range c.alloc {
    94			c.alloc[i] = &emptymspan
    95		}
    96		c.next_sample = nextSample()
    97		return c
    98	}
    99	
   100	func freemcache(c *mcache) {
   101		systemstack(func() {
   102			c.releaseAll()
   103			stackcache_clear(c)
   104	
   105			// NOTE(rsc,rlh): If gcworkbuffree comes back, we need to coordinate
   106			// with the stealing of gcworkbufs during garbage collection to avoid
   107			// a race where the workbuf is double-freed.
   108			// gcworkbuffree(c.gcworkbuf)
   109	
   110			lock(&mheap_.lock)
   111			purgecachedstats(c)
   112			mheap_.cachealloc.free(unsafe.Pointer(c))
   113			unlock(&mheap_.lock)
   114		})
   115	}
   116	
   117	// refill acquires a new span of span class spc for c. This span will
   118	// have at least one free object. The current span in c must be full.
   119	//
   120	// Must run in a non-preemptible context since otherwise the owner of
   121	// c could change.
   122	func (c *mcache) refill(spc spanClass) {
   123		// Return the current cached span to the central lists.
   124		s := c.alloc[spc]
   125	
   126		if uintptr(s.allocCount) != s.nelems {
   127			throw("refill of span with free space remaining")
   128		}
   129		if s != &emptymspan {
   130			// Mark this span as no longer cached.
   131			if s.sweepgen != mheap_.sweepgen+3 {
   132				throw("bad sweepgen in refill")
   133			}
   134			atomic.Store(&s.sweepgen, mheap_.sweepgen)
   135		}
   136	
   137		// Get a new cached span from the central lists.
   138		s = mheap_.central[spc].mcentral.cacheSpan()
   139		if s == nil {
   140			throw("out of memory")
   141		}
   142	
   143		if uintptr(s.allocCount) == s.nelems {
   144			throw("span has no free space")
   145		}
   146	
   147		// Indicate that this span is cached and prevent asynchronous
   148		// sweeping in the next sweep phase.
   149		s.sweepgen = mheap_.sweepgen + 3
   150	
   151		c.alloc[spc] = s
   152	}
   153	
   154	func (c *mcache) releaseAll() {
   155		for i := range c.alloc {
   156			s := c.alloc[i]
   157			if s != &emptymspan {
   158				mheap_.central[i].mcentral.uncacheSpan(s)
   159				c.alloc[i] = &emptymspan
   160			}
   161		}
   162		// Clear tinyalloc pool.
   163		c.tiny = 0
   164		c.tinyoffset = 0
   165	}
   166	
   167	// prepareForSweep flushes c if the system has entered a new sweep phase
   168	// since c was populated. This must happen between the sweep phase
   169	// starting and the first allocation from c.
   170	func (c *mcache) prepareForSweep() {
   171		// Alternatively, instead of making sure we do this on every P
   172		// between starting the world and allocating on that P, we
   173		// could leave allocate-black on, allow allocation to continue
   174		// as usual, use a ragged barrier at the beginning of sweep to
   175		// ensure all cached spans are swept, and then disable
   176		// allocate-black. However, with this approach it's difficult
   177		// to avoid spilling mark bits into the *next* GC cycle.
   178		sg := mheap_.sweepgen
   179		if c.flushGen == sg {
   180			return
   181		} else if c.flushGen != sg-2 {
   182			println("bad flushGen", c.flushGen, "in prepareForSweep; sweepgen", sg)
   183			throw("bad flushGen")
   184		}
   185		c.releaseAll()
   186		stackcache_clear(c)
   187		atomic.Store(&c.flushGen, mheap_.sweepgen) // Synchronizes with gcStart
   188	}
   189	

View as plain text