...

Source file src/runtime/stack.go

     1	// Copyright 2013 The Go Authors. All rights reserved.
     2	// Use of this source code is governed by a BSD-style
     3	// license that can be found in the LICENSE file.
     4	
     5	package runtime
     6	
     7	import (
     8		"runtime/internal/atomic"
     9		"runtime/internal/sys"
    10		"unsafe"
    11	)
    12	
    13	/*
    14	Stack layout parameters.
    15	Included both by runtime (compiled via 6c) and linkers (compiled via gcc).
    16	
    17	The per-goroutine g->stackguard is set to point StackGuard bytes
    18	above the bottom of the stack.  Each function compares its stack
    19	pointer against g->stackguard to check for overflow.  To cut one
    20	instruction from the check sequence for functions with tiny frames,
    21	the stack is allowed to protrude StackSmall bytes below the stack
    22	guard.  Functions with large frames don't bother with the check and
    23	always call morestack.  The sequences are (for amd64, others are
    24	similar):
    25	
    26		guard = g->stackguard
    27		frame = function's stack frame size
    28		argsize = size of function arguments (call + return)
    29	
    30		stack frame size <= StackSmall:
    31			CMPQ guard, SP
    32			JHI 3(PC)
    33			MOVQ m->morearg, $(argsize << 32)
    34			CALL morestack(SB)
    35	
    36		stack frame size > StackSmall but < StackBig
    37			LEAQ (frame-StackSmall)(SP), R0
    38			CMPQ guard, R0
    39			JHI 3(PC)
    40			MOVQ m->morearg, $(argsize << 32)
    41			CALL morestack(SB)
    42	
    43		stack frame size >= StackBig:
    44			MOVQ m->morearg, $((argsize << 32) | frame)
    45			CALL morestack(SB)
    46	
    47	The bottom StackGuard - StackSmall bytes are important: there has
    48	to be enough room to execute functions that refuse to check for
    49	stack overflow, either because they need to be adjacent to the
    50	actual caller's frame (deferproc) or because they handle the imminent
    51	stack overflow (morestack).
    52	
    53	For example, deferproc might call malloc, which does one of the
    54	above checks (without allocating a full frame), which might trigger
    55	a call to morestack.  This sequence needs to fit in the bottom
    56	section of the stack.  On amd64, morestack's frame is 40 bytes, and
    57	deferproc's frame is 56 bytes.  That fits well within the
    58	StackGuard - StackSmall bytes at the bottom.
    59	The linkers explore all possible call traces involving non-splitting
    60	functions to make sure that this limit cannot be violated.
    61	*/
    62	
    63	const (
    64		// StackSystem is a number of additional bytes to add
    65		// to each stack below the usual guard area for OS-specific
    66		// purposes like signal handling. Used on Windows, Plan 9,
    67		// and iOS because they do not use a separate stack.
    68		_StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm*1024 + sys.GoosDarwin*sys.GoarchArm64*1024
    69	
    70		// The minimum size of stack used by Go code
    71		_StackMin = 2048
    72	
    73		// The minimum stack size to allocate.
    74		// The hackery here rounds FixedStack0 up to a power of 2.
    75		_FixedStack0 = _StackMin + _StackSystem
    76		_FixedStack1 = _FixedStack0 - 1
    77		_FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1)
    78		_FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2)
    79		_FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4)
    80		_FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8)
    81		_FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16)
    82		_FixedStack  = _FixedStack6 + 1
    83	
    84		// Functions that need frames bigger than this use an extra
    85		// instruction to do the stack split check, to avoid overflow
    86		// in case SP - framesize wraps below zero.
    87		// This value can be no bigger than the size of the unmapped
    88		// space at zero.
    89		_StackBig = 4096
    90	
    91		// The stack guard is a pointer this many bytes above the
    92		// bottom of the stack.
    93		_StackGuard = 880*sys.StackGuardMultiplier + _StackSystem
    94	
    95		// After a stack split check the SP is allowed to be this
    96		// many bytes below the stack guard. This saves an instruction
    97		// in the checking sequence for tiny frames.
    98		_StackSmall = 128
    99	
   100		// The maximum number of bytes that a chain of NOSPLIT
   101		// functions can use.
   102		_StackLimit = _StackGuard - _StackSystem - _StackSmall
   103	)
   104	
   105	const (
   106		// stackDebug == 0: no logging
   107		//            == 1: logging of per-stack operations
   108		//            == 2: logging of per-frame operations
   109		//            == 3: logging of per-word updates
   110		//            == 4: logging of per-word reads
   111		stackDebug       = 0
   112		stackFromSystem  = 0 // allocate stacks from system memory instead of the heap
   113		stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free
   114		stackPoisonCopy  = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
   115		stackNoCache     = 0 // disable per-P small stack caches
   116	
   117		// check the BP links during traceback.
   118		debugCheckBP = false
   119	)
   120	
   121	const (
   122		uintptrMask = 1<<(8*sys.PtrSize) - 1
   123	
   124		// Goroutine preemption request.
   125		// Stored into g->stackguard0 to cause split stack check failure.
   126		// Must be greater than any real sp.
   127		// 0xfffffade in hex.
   128		stackPreempt = uintptrMask & -1314
   129	
   130		// Thread is forking.
   131		// Stored into g->stackguard0 to cause split stack check failure.
   132		// Must be greater than any real sp.
   133		stackFork = uintptrMask & -1234
   134	)
   135	
   136	// Global pool of spans that have free stacks.
   137	// Stacks are assigned an order according to size.
   138	//     order = log_2(size/FixedStack)
   139	// There is a free list for each order.
   140	// TODO: one lock per order?
   141	var stackpool [_NumStackOrders]mSpanList
   142	var stackpoolmu mutex
   143	
   144	// Global pool of large stack spans.
   145	var stackLarge struct {
   146		lock mutex
   147		free [heapAddrBits - pageShift]mSpanList // free lists by log_2(s.npages)
   148	}
   149	
   150	func stackinit() {
   151		if _StackCacheSize&_PageMask != 0 {
   152			throw("cache size must be a multiple of page size")
   153		}
   154		for i := range stackpool {
   155			stackpool[i].init()
   156		}
   157		for i := range stackLarge.free {
   158			stackLarge.free[i].init()
   159		}
   160	}
   161	
   162	// stacklog2 returns ⌊log_2(n)⌋.
   163	func stacklog2(n uintptr) int {
   164		log2 := 0
   165		for n > 1 {
   166			n >>= 1
   167			log2++
   168		}
   169		return log2
   170	}
   171	
   172	// Allocates a stack from the free pool. Must be called with
   173	// stackpoolmu held.
   174	func stackpoolalloc(order uint8) gclinkptr {
   175		list := &stackpool[order]
   176		s := list.first
   177		if s == nil {
   178			// no free stacks. Allocate another span worth.
   179			s = mheap_.allocManual(_StackCacheSize>>_PageShift, &memstats.stacks_inuse)
   180			if s == nil {
   181				throw("out of memory")
   182			}
   183			if s.allocCount != 0 {
   184				throw("bad allocCount")
   185			}
   186			if s.manualFreeList.ptr() != nil {
   187				throw("bad manualFreeList")
   188			}
   189			osStackAlloc(s)
   190			s.elemsize = _FixedStack << order
   191			for i := uintptr(0); i < _StackCacheSize; i += s.elemsize {
   192				x := gclinkptr(s.base() + i)
   193				x.ptr().next = s.manualFreeList
   194				s.manualFreeList = x
   195			}
   196			list.insert(s)
   197		}
   198		x := s.manualFreeList
   199		if x.ptr() == nil {
   200			throw("span has no free stacks")
   201		}
   202		s.manualFreeList = x.ptr().next
   203		s.allocCount++
   204		if s.manualFreeList.ptr() == nil {
   205			// all stacks in s are allocated.
   206			list.remove(s)
   207		}
   208		return x
   209	}
   210	
   211	// Adds stack x to the free pool. Must be called with stackpoolmu held.
   212	func stackpoolfree(x gclinkptr, order uint8) {
   213		s := spanOfUnchecked(uintptr(x))
   214		if s.state != mSpanManual {
   215			throw("freeing stack not in a stack span")
   216		}
   217		if s.manualFreeList.ptr() == nil {
   218			// s will now have a free stack
   219			stackpool[order].insert(s)
   220		}
   221		x.ptr().next = s.manualFreeList
   222		s.manualFreeList = x
   223		s.allocCount--
   224		if gcphase == _GCoff && s.allocCount == 0 {
   225			// Span is completely free. Return it to the heap
   226			// immediately if we're sweeping.
   227			//
   228			// If GC is active, we delay the free until the end of
   229			// GC to avoid the following type of situation:
   230			//
   231			// 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
   232			// 2) The stack that pointer points to is copied
   233			// 3) The old stack is freed
   234			// 4) The containing span is marked free
   235			// 5) GC attempts to mark the SudoG.elem pointer. The
   236			//    marking fails because the pointer looks like a
   237			//    pointer into a free span.
   238			//
   239			// By not freeing, we prevent step #4 until GC is done.
   240			stackpool[order].remove(s)
   241			s.manualFreeList = 0
   242			osStackFree(s)
   243			mheap_.freeManual(s, &memstats.stacks_inuse)
   244		}
   245	}
   246	
   247	// stackcacherefill/stackcacherelease implement a global pool of stack segments.
   248	// The pool is required to prevent unlimited growth of per-thread caches.
   249	//
   250	//go:systemstack
   251	func stackcacherefill(c *mcache, order uint8) {
   252		if stackDebug >= 1 {
   253			print("stackcacherefill order=", order, "\n")
   254		}
   255	
   256		// Grab some stacks from the global cache.
   257		// Grab half of the allowed capacity (to prevent thrashing).
   258		var list gclinkptr
   259		var size uintptr
   260		lock(&stackpoolmu)
   261		for size < _StackCacheSize/2 {
   262			x := stackpoolalloc(order)
   263			x.ptr().next = list
   264			list = x
   265			size += _FixedStack << order
   266		}
   267		unlock(&stackpoolmu)
   268		c.stackcache[order].list = list
   269		c.stackcache[order].size = size
   270	}
   271	
   272	//go:systemstack
   273	func stackcacherelease(c *mcache, order uint8) {
   274		if stackDebug >= 1 {
   275			print("stackcacherelease order=", order, "\n")
   276		}
   277		x := c.stackcache[order].list
   278		size := c.stackcache[order].size
   279		lock(&stackpoolmu)
   280		for size > _StackCacheSize/2 {
   281			y := x.ptr().next
   282			stackpoolfree(x, order)
   283			x = y
   284			size -= _FixedStack << order
   285		}
   286		unlock(&stackpoolmu)
   287		c.stackcache[order].list = x
   288		c.stackcache[order].size = size
   289	}
   290	
   291	//go:systemstack
   292	func stackcache_clear(c *mcache) {
   293		if stackDebug >= 1 {
   294			print("stackcache clear\n")
   295		}
   296		lock(&stackpoolmu)
   297		for order := uint8(0); order < _NumStackOrders; order++ {
   298			x := c.stackcache[order].list
   299			for x.ptr() != nil {
   300				y := x.ptr().next
   301				stackpoolfree(x, order)
   302				x = y
   303			}
   304			c.stackcache[order].list = 0
   305			c.stackcache[order].size = 0
   306		}
   307		unlock(&stackpoolmu)
   308	}
   309	
   310	// stackalloc allocates an n byte stack.
   311	//
   312	// stackalloc must run on the system stack because it uses per-P
   313	// resources and must not split the stack.
   314	//
   315	//go:systemstack
   316	func stackalloc(n uint32) stack {
   317		// Stackalloc must be called on scheduler stack, so that we
   318		// never try to grow the stack during the code that stackalloc runs.
   319		// Doing so would cause a deadlock (issue 1547).
   320		thisg := getg()
   321		if thisg != thisg.m.g0 {
   322			throw("stackalloc not on scheduler stack")
   323		}
   324		if n&(n-1) != 0 {
   325			throw("stack size not a power of 2")
   326		}
   327		if stackDebug >= 1 {
   328			print("stackalloc ", n, "\n")
   329		}
   330	
   331		if debug.efence != 0 || stackFromSystem != 0 {
   332			n = uint32(round(uintptr(n), physPageSize))
   333			v := sysAlloc(uintptr(n), &memstats.stacks_sys)
   334			if v == nil {
   335				throw("out of memory (stackalloc)")
   336			}
   337			return stack{uintptr(v), uintptr(v) + uintptr(n)}
   338		}
   339	
   340		// Small stacks are allocated with a fixed-size free-list allocator.
   341		// If we need a stack of a bigger size, we fall back on allocating
   342		// a dedicated span.
   343		var v unsafe.Pointer
   344		if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
   345			order := uint8(0)
   346			n2 := n
   347			for n2 > _FixedStack {
   348				order++
   349				n2 >>= 1
   350			}
   351			var x gclinkptr
   352			c := thisg.m.mcache
   353			if stackNoCache != 0 || c == nil || thisg.m.preemptoff != "" {
   354				// c == nil can happen in the guts of exitsyscall or
   355				// procresize. Just get a stack from the global pool.
   356				// Also don't touch stackcache during gc
   357				// as it's flushed concurrently.
   358				lock(&stackpoolmu)
   359				x = stackpoolalloc(order)
   360				unlock(&stackpoolmu)
   361			} else {
   362				x = c.stackcache[order].list
   363				if x.ptr() == nil {
   364					stackcacherefill(c, order)
   365					x = c.stackcache[order].list
   366				}
   367				c.stackcache[order].list = x.ptr().next
   368				c.stackcache[order].size -= uintptr(n)
   369			}
   370			v = unsafe.Pointer(x)
   371		} else {
   372			var s *mspan
   373			npage := uintptr(n) >> _PageShift
   374			log2npage := stacklog2(npage)
   375	
   376			// Try to get a stack from the large stack cache.
   377			lock(&stackLarge.lock)
   378			if !stackLarge.free[log2npage].isEmpty() {
   379				s = stackLarge.free[log2npage].first
   380				stackLarge.free[log2npage].remove(s)
   381			}
   382			unlock(&stackLarge.lock)
   383	
   384			if s == nil {
   385				// Allocate a new stack from the heap.
   386				s = mheap_.allocManual(npage, &memstats.stacks_inuse)
   387				if s == nil {
   388					throw("out of memory")
   389				}
   390				osStackAlloc(s)
   391				s.elemsize = uintptr(n)
   392			}
   393			v = unsafe.Pointer(s.base())
   394		}
   395	
   396		if raceenabled {
   397			racemalloc(v, uintptr(n))
   398		}
   399		if msanenabled {
   400			msanmalloc(v, uintptr(n))
   401		}
   402		if stackDebug >= 1 {
   403			print("  allocated ", v, "\n")
   404		}
   405		return stack{uintptr(v), uintptr(v) + uintptr(n)}
   406	}
   407	
   408	// stackfree frees an n byte stack allocation at stk.
   409	//
   410	// stackfree must run on the system stack because it uses per-P
   411	// resources and must not split the stack.
   412	//
   413	//go:systemstack
   414	func stackfree(stk stack) {
   415		gp := getg()
   416		v := unsafe.Pointer(stk.lo)
   417		n := stk.hi - stk.lo
   418		if n&(n-1) != 0 {
   419			throw("stack not a power of 2")
   420		}
   421		if stk.lo+n < stk.hi {
   422			throw("bad stack size")
   423		}
   424		if stackDebug >= 1 {
   425			println("stackfree", v, n)
   426			memclrNoHeapPointers(v, n) // for testing, clobber stack data
   427		}
   428		if debug.efence != 0 || stackFromSystem != 0 {
   429			if debug.efence != 0 || stackFaultOnFree != 0 {
   430				sysFault(v, n)
   431			} else {
   432				sysFree(v, n, &memstats.stacks_sys)
   433			}
   434			return
   435		}
   436		if msanenabled {
   437			msanfree(v, n)
   438		}
   439		if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
   440			order := uint8(0)
   441			n2 := n
   442			for n2 > _FixedStack {
   443				order++
   444				n2 >>= 1
   445			}
   446			x := gclinkptr(v)
   447			c := gp.m.mcache
   448			if stackNoCache != 0 || c == nil || gp.m.preemptoff != "" {
   449				lock(&stackpoolmu)
   450				stackpoolfree(x, order)
   451				unlock(&stackpoolmu)
   452			} else {
   453				if c.stackcache[order].size >= _StackCacheSize {
   454					stackcacherelease(c, order)
   455				}
   456				x.ptr().next = c.stackcache[order].list
   457				c.stackcache[order].list = x
   458				c.stackcache[order].size += n
   459			}
   460		} else {
   461			s := spanOfUnchecked(uintptr(v))
   462			if s.state != mSpanManual {
   463				println(hex(s.base()), v)
   464				throw("bad span state")
   465			}
   466			if gcphase == _GCoff {
   467				// Free the stack immediately if we're
   468				// sweeping.
   469				osStackFree(s)
   470				mheap_.freeManual(s, &memstats.stacks_inuse)
   471			} else {
   472				// If the GC is running, we can't return a
   473				// stack span to the heap because it could be
   474				// reused as a heap span, and this state
   475				// change would race with GC. Add it to the
   476				// large stack cache instead.
   477				log2npage := stacklog2(s.npages)
   478				lock(&stackLarge.lock)
   479				stackLarge.free[log2npage].insert(s)
   480				unlock(&stackLarge.lock)
   481			}
   482		}
   483	}
   484	
   485	var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real
   486	
   487	var ptrnames = []string{
   488		0: "scalar",
   489		1: "ptr",
   490	}
   491	
   492	// Stack frame layout
   493	//
   494	// (x86)
   495	// +------------------+
   496	// | args from caller |
   497	// +------------------+ <- frame->argp
   498	// |  return address  |
   499	// +------------------+
   500	// |  caller's BP (*) | (*) if framepointer_enabled && varp < sp
   501	// +------------------+ <- frame->varp
   502	// |     locals       |
   503	// +------------------+
   504	// |  args to callee  |
   505	// +------------------+ <- frame->sp
   506	//
   507	// (arm)
   508	// +------------------+
   509	// | args from caller |
   510	// +------------------+ <- frame->argp
   511	// | caller's retaddr |
   512	// +------------------+ <- frame->varp
   513	// |     locals       |
   514	// +------------------+
   515	// |  args to callee  |
   516	// +------------------+
   517	// |  return address  |
   518	// +------------------+ <- frame->sp
   519	
   520	type adjustinfo struct {
   521		old   stack
   522		delta uintptr // ptr distance from old to new stack (newbase - oldbase)
   523		cache pcvalueCache
   524	
   525		// sghi is the highest sudog.elem on the stack.
   526		sghi uintptr
   527	}
   528	
   529	// Adjustpointer checks whether *vpp is in the old stack described by adjinfo.
   530	// If so, it rewrites *vpp to point into the new stack.
   531	func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
   532		pp := (*uintptr)(vpp)
   533		p := *pp
   534		if stackDebug >= 4 {
   535			print("        ", pp, ":", hex(p), "\n")
   536		}
   537		if adjinfo.old.lo <= p && p < adjinfo.old.hi {
   538			*pp = p + adjinfo.delta
   539			if stackDebug >= 3 {
   540				print("        adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n")
   541			}
   542		}
   543	}
   544	
   545	// Information from the compiler about the layout of stack frames.
   546	type bitvector struct {
   547		n        int32 // # of bits
   548		bytedata *uint8
   549	}
   550	
   551	// ptrbit returns the i'th bit in bv.
   552	// ptrbit is less efficient than iterating directly over bitvector bits,
   553	// and should only be used in non-performance-critical code.
   554	// See adjustpointers for an example of a high-efficiency walk of a bitvector.
   555	func (bv *bitvector) ptrbit(i uintptr) uint8 {
   556		b := *(addb(bv.bytedata, i/8))
   557		return (b >> (i % 8)) & 1
   558	}
   559	
   560	// bv describes the memory starting at address scanp.
   561	// Adjust any pointers contained therein.
   562	func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) {
   563		minp := adjinfo.old.lo
   564		maxp := adjinfo.old.hi
   565		delta := adjinfo.delta
   566		num := uintptr(bv.n)
   567		// If this frame might contain channel receive slots, use CAS
   568		// to adjust pointers. If the slot hasn't been received into
   569		// yet, it may contain stack pointers and a concurrent send
   570		// could race with adjusting those pointers. (The sent value
   571		// itself can never contain stack pointers.)
   572		useCAS := uintptr(scanp) < adjinfo.sghi
   573		for i := uintptr(0); i < num; i += 8 {
   574			if stackDebug >= 4 {
   575				for j := uintptr(0); j < 8; j++ {
   576					print("        ", add(scanp, (i+j)*sys.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*sys.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
   577				}
   578			}
   579			b := *(addb(bv.bytedata, i/8))
   580			for b != 0 {
   581				j := uintptr(sys.Ctz8(b))
   582				b &= b - 1
   583				pp := (*uintptr)(add(scanp, (i+j)*sys.PtrSize))
   584			retry:
   585				p := *pp
   586				if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
   587					// Looks like a junk value in a pointer slot.
   588					// Live analysis wrong?
   589					getg().m.traceback = 2
   590					print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
   591					throw("invalid pointer found on stack")
   592				}
   593				if minp <= p && p < maxp {
   594					if stackDebug >= 3 {
   595						print("adjust ptr ", hex(p), " ", funcname(f), "\n")
   596					}
   597					if useCAS {
   598						ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
   599						if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
   600							goto retry
   601						}
   602					} else {
   603						*pp = p + delta
   604					}
   605				}
   606			}
   607		}
   608	}
   609	
   610	// Note: the argument/return area is adjusted by the callee.
   611	func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
   612		adjinfo := (*adjustinfo)(arg)
   613		if frame.continpc == 0 {
   614			// Frame is dead.
   615			return true
   616		}
   617		f := frame.fn
   618		if stackDebug >= 2 {
   619			print("    adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
   620		}
   621		if f.funcID == funcID_systemstack_switch {
   622			// A special routine at the bottom of stack of a goroutine that does an systemstack call.
   623			// We will allow it to be copied even though we don't
   624			// have full GC info for it (because it is written in asm).
   625			return true
   626		}
   627	
   628		locals, args, objs := getStackMap(frame, &adjinfo.cache, true)
   629	
   630		// Adjust local variables if stack frame has been allocated.
   631		if locals.n > 0 {
   632			size := uintptr(locals.n) * sys.PtrSize
   633			adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f)
   634		}
   635	
   636		// Adjust saved base pointer if there is one.
   637		if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.RegSize {
   638			if !framepointer_enabled {
   639				print("runtime: found space for saved base pointer, but no framepointer experiment\n")
   640				print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n")
   641				throw("bad frame layout")
   642			}
   643			if stackDebug >= 3 {
   644				print("      saved bp\n")
   645			}
   646			if debugCheckBP {
   647				// Frame pointers should always point to the next higher frame on
   648				// the Go stack (or be nil, for the top frame on the stack).
   649				bp := *(*uintptr)(unsafe.Pointer(frame.varp))
   650				if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
   651					println("runtime: found invalid frame pointer")
   652					print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
   653					throw("bad frame pointer")
   654				}
   655			}
   656			adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
   657		}
   658	
   659		// Adjust arguments.
   660		if args.n > 0 {
   661			if stackDebug >= 3 {
   662				print("      args\n")
   663			}
   664			adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{})
   665		}
   666	
   667		// Adjust pointers in all stack objects (whether they are live or not).
   668		// See comments in mgcmark.go:scanframeworker.
   669		if frame.varp != 0 {
   670			for _, obj := range objs {
   671				off := obj.off
   672				base := frame.varp // locals base pointer
   673				if off >= 0 {
   674					base = frame.argp // arguments and return values base pointer
   675				}
   676				p := base + uintptr(off)
   677				if p < frame.sp {
   678					// Object hasn't been allocated in the frame yet.
   679					// (Happens when the stack bounds check fails and
   680					// we call into morestack.)
   681					continue
   682				}
   683				t := obj.typ
   684				gcdata := t.gcdata
   685				var s *mspan
   686				if t.kind&kindGCProg != 0 {
   687					// See comments in mgcmark.go:scanstack
   688					s = materializeGCProg(t.ptrdata, gcdata)
   689					gcdata = (*byte)(unsafe.Pointer(s.startAddr))
   690				}
   691				for i := uintptr(0); i < t.ptrdata; i += sys.PtrSize {
   692					if *addb(gcdata, i/(8*sys.PtrSize))>>(i/sys.PtrSize&7)&1 != 0 {
   693						adjustpointer(adjinfo, unsafe.Pointer(p+i))
   694					}
   695				}
   696				if s != nil {
   697					dematerializeGCProg(s)
   698				}
   699			}
   700		}
   701	
   702		return true
   703	}
   704	
   705	func adjustctxt(gp *g, adjinfo *adjustinfo) {
   706		adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
   707		if !framepointer_enabled {
   708			return
   709		}
   710		if debugCheckBP {
   711			bp := gp.sched.bp
   712			if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
   713				println("runtime: found invalid top frame pointer")
   714				print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
   715				throw("bad top frame pointer")
   716			}
   717		}
   718		adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp))
   719	}
   720	
   721	func adjustdefers(gp *g, adjinfo *adjustinfo) {
   722		// Adjust pointers in the Defer structs.
   723		// We need to do this first because we need to adjust the
   724		// defer.link fields so we always work on the new stack.
   725		adjustpointer(adjinfo, unsafe.Pointer(&gp._defer))
   726		for d := gp._defer; d != nil; d = d.link {
   727			adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
   728			adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
   729			adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
   730			adjustpointer(adjinfo, unsafe.Pointer(&d.link))
   731		}
   732	
   733		// Adjust defer argument blocks the same way we adjust active stack frames.
   734		// Note: this code is after the loop above, so that if a defer record is
   735		// stack allocated, we work on the copy in the new stack.
   736		tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo)))
   737	}
   738	
   739	func adjustpanics(gp *g, adjinfo *adjustinfo) {
   740		// Panics are on stack and already adjusted.
   741		// Update pointer to head of list in G.
   742		adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
   743	}
   744	
   745	func adjustsudogs(gp *g, adjinfo *adjustinfo) {
   746		// the data elements pointed to by a SudoG structure
   747		// might be in the stack.
   748		for s := gp.waiting; s != nil; s = s.waitlink {
   749			adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
   750		}
   751	}
   752	
   753	func fillstack(stk stack, b byte) {
   754		for p := stk.lo; p < stk.hi; p++ {
   755			*(*byte)(unsafe.Pointer(p)) = b
   756		}
   757	}
   758	
   759	func findsghi(gp *g, stk stack) uintptr {
   760		var sghi uintptr
   761		for sg := gp.waiting; sg != nil; sg = sg.waitlink {
   762			p := uintptr(sg.elem) + uintptr(sg.c.elemsize)
   763			if stk.lo <= p && p < stk.hi && p > sghi {
   764				sghi = p
   765			}
   766		}
   767		return sghi
   768	}
   769	
   770	// syncadjustsudogs adjusts gp's sudogs and copies the part of gp's
   771	// stack they refer to while synchronizing with concurrent channel
   772	// operations. It returns the number of bytes of stack copied.
   773	func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
   774		if gp.waiting == nil {
   775			return 0
   776		}
   777	
   778		// Lock channels to prevent concurrent send/receive.
   779		// It's important that we *only* do this for async
   780		// copystack; otherwise, gp may be in the middle of
   781		// putting itself on wait queues and this would
   782		// self-deadlock.
   783		var lastc *hchan
   784		for sg := gp.waiting; sg != nil; sg = sg.waitlink {
   785			if sg.c != lastc {
   786				lock(&sg.c.lock)
   787			}
   788			lastc = sg.c
   789		}
   790	
   791		// Adjust sudogs.
   792		adjustsudogs(gp, adjinfo)
   793	
   794		// Copy the part of the stack the sudogs point in to
   795		// while holding the lock to prevent races on
   796		// send/receive slots.
   797		var sgsize uintptr
   798		if adjinfo.sghi != 0 {
   799			oldBot := adjinfo.old.hi - used
   800			newBot := oldBot + adjinfo.delta
   801			sgsize = adjinfo.sghi - oldBot
   802			memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize)
   803		}
   804	
   805		// Unlock channels.
   806		lastc = nil
   807		for sg := gp.waiting; sg != nil; sg = sg.waitlink {
   808			if sg.c != lastc {
   809				unlock(&sg.c.lock)
   810			}
   811			lastc = sg.c
   812		}
   813	
   814		return sgsize
   815	}
   816	
   817	// Copies gp's stack to a new stack of a different size.
   818	// Caller must have changed gp status to Gcopystack.
   819	//
   820	// If sync is true, this is a self-triggered stack growth and, in
   821	// particular, no other G may be writing to gp's stack (e.g., via a
   822	// channel operation). If sync is false, copystack protects against
   823	// concurrent channel operations.
   824	func copystack(gp *g, newsize uintptr, sync bool) {
   825		if gp.syscallsp != 0 {
   826			throw("stack growth not allowed in system call")
   827		}
   828		old := gp.stack
   829		if old.lo == 0 {
   830			throw("nil stackbase")
   831		}
   832		used := old.hi - gp.sched.sp
   833	
   834		// allocate new stack
   835		new := stackalloc(uint32(newsize))
   836		if stackPoisonCopy != 0 {
   837			fillstack(new, 0xfd)
   838		}
   839		if stackDebug >= 1 {
   840			print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
   841		}
   842	
   843		// Compute adjustment.
   844		var adjinfo adjustinfo
   845		adjinfo.old = old
   846		adjinfo.delta = new.hi - old.hi
   847	
   848		// Adjust sudogs, synchronizing with channel ops if necessary.
   849		ncopy := used
   850		if sync {
   851			adjustsudogs(gp, &adjinfo)
   852		} else {
   853			// sudogs can point in to the stack. During concurrent
   854			// shrinking, these areas may be written to. Find the
   855			// highest such pointer so we can handle everything
   856			// there and below carefully. (This shouldn't be far
   857			// from the bottom of the stack, so there's little
   858			// cost in handling everything below it carefully.)
   859			adjinfo.sghi = findsghi(gp, old)
   860	
   861			// Synchronize with channel ops and copy the part of
   862			// the stack they may interact with.
   863			ncopy -= syncadjustsudogs(gp, used, &adjinfo)
   864		}
   865	
   866		// Copy the stack (or the rest of it) to the new location
   867		memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
   868	
   869		// Adjust remaining structures that have pointers into stacks.
   870		// We have to do most of these before we traceback the new
   871		// stack because gentraceback uses them.
   872		adjustctxt(gp, &adjinfo)
   873		adjustdefers(gp, &adjinfo)
   874		adjustpanics(gp, &adjinfo)
   875		if adjinfo.sghi != 0 {
   876			adjinfo.sghi += adjinfo.delta
   877		}
   878	
   879		// Swap out old stack for new one
   880		gp.stack = new
   881		gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request
   882		gp.sched.sp = new.hi - used
   883		gp.stktopsp += adjinfo.delta
   884	
   885		// Adjust pointers in the new stack.
   886		gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
   887	
   888		// free old stack
   889		if stackPoisonCopy != 0 {
   890			fillstack(old, 0xfc)
   891		}
   892		stackfree(old)
   893	}
   894	
   895	// round x up to a power of 2.
   896	func round2(x int32) int32 {
   897		s := uint(0)
   898		for 1<<s < x {
   899			s++
   900		}
   901		return 1 << s
   902	}
   903	
   904	// Called from runtime·morestack when more stack is needed.
   905	// Allocate larger stack and relocate to new stack.
   906	// Stack growth is multiplicative, for constant amortized cost.
   907	//
   908	// g->atomicstatus will be Grunning or Gscanrunning upon entry.
   909	// If the GC is trying to stop this g then it will set preemptscan to true.
   910	//
   911	// This must be nowritebarrierrec because it can be called as part of
   912	// stack growth from other nowritebarrierrec functions, but the
   913	// compiler doesn't check this.
   914	//
   915	//go:nowritebarrierrec
   916	func newstack() {
   917		thisg := getg()
   918		// TODO: double check all gp. shouldn't be getg().
   919		if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
   920			throw("stack growth after fork")
   921		}
   922		if thisg.m.morebuf.g.ptr() != thisg.m.curg {
   923			print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
   924			morebuf := thisg.m.morebuf
   925			traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
   926			throw("runtime: wrong goroutine in newstack")
   927		}
   928	
   929		gp := thisg.m.curg
   930	
   931		if thisg.m.curg.throwsplit {
   932			// Update syscallsp, syscallpc in case traceback uses them.
   933			morebuf := thisg.m.morebuf
   934			gp.syscallsp = morebuf.sp
   935			gp.syscallpc = morebuf.pc
   936			pcname, pcoff := "(unknown)", uintptr(0)
   937			f := findfunc(gp.sched.pc)
   938			if f.valid() {
   939				pcname = funcname(f)
   940				pcoff = gp.sched.pc - f.entry
   941			}
   942			print("runtime: newstack at ", pcname, "+", hex(pcoff),
   943				" sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
   944				"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
   945				"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
   946	
   947			thisg.m.traceback = 2 // Include runtime frames
   948			traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
   949			throw("runtime: stack split at bad time")
   950		}
   951	
   952		morebuf := thisg.m.morebuf
   953		thisg.m.morebuf.pc = 0
   954		thisg.m.morebuf.lr = 0
   955		thisg.m.morebuf.sp = 0
   956		thisg.m.morebuf.g = 0
   957	
   958		// NOTE: stackguard0 may change underfoot, if another thread
   959		// is about to try to preempt gp. Read it just once and use that same
   960		// value now and below.
   961		preempt := atomic.Loaduintptr(&gp.stackguard0) == stackPreempt
   962	
   963		// Be conservative about where we preempt.
   964		// We are interested in preempting user Go code, not runtime code.
   965		// If we're holding locks, mallocing, or preemption is disabled, don't
   966		// preempt.
   967		// This check is very early in newstack so that even the status change
   968		// from Grunning to Gwaiting and back doesn't happen in this case.
   969		// That status change by itself can be viewed as a small preemption,
   970		// because the GC might change Gwaiting to Gscanwaiting, and then
   971		// this goroutine has to wait for the GC to finish before continuing.
   972		// If the GC is in some way dependent on this goroutine (for example,
   973		// it needs a lock held by the goroutine), that small preemption turns
   974		// into a real deadlock.
   975		if preempt {
   976			if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.ptr().status != _Prunning {
   977				// Let the goroutine keep running for now.
   978				// gp->preempt is set, so it will be preempted next time.
   979				gp.stackguard0 = gp.stack.lo + _StackGuard
   980				gogo(&gp.sched) // never return
   981			}
   982		}
   983	
   984		if gp.stack.lo == 0 {
   985			throw("missing stack in newstack")
   986		}
   987		sp := gp.sched.sp
   988		if sys.ArchFamily == sys.AMD64 || sys.ArchFamily == sys.I386 || sys.ArchFamily == sys.WASM {
   989			// The call to morestack cost a word.
   990			sp -= sys.PtrSize
   991		}
   992		if stackDebug >= 1 || sp < gp.stack.lo {
   993			print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
   994				"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
   995				"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
   996		}
   997		if sp < gp.stack.lo {
   998			print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->status=", hex(readgstatus(gp)), "\n ")
   999			print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
  1000			throw("runtime: split stack overflow")
  1001		}
  1002	
  1003		if preempt {
  1004			if gp == thisg.m.g0 {
  1005				throw("runtime: preempt g0")
  1006			}
  1007			if thisg.m.p == 0 && thisg.m.locks == 0 {
  1008				throw("runtime: g is running but p is not")
  1009			}
  1010			// Synchronize with scang.
  1011			casgstatus(gp, _Grunning, _Gwaiting)
  1012			if gp.preemptscan {
  1013				for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) {
  1014					// Likely to be racing with the GC as
  1015					// it sees a _Gwaiting and does the
  1016					// stack scan. If so, gcworkdone will
  1017					// be set and gcphasework will simply
  1018					// return.
  1019				}
  1020				if !gp.gcscandone {
  1021					// gcw is safe because we're on the
  1022					// system stack.
  1023					gcw := &gp.m.p.ptr().gcw
  1024					scanstack(gp, gcw)
  1025					gp.gcscandone = true
  1026				}
  1027				gp.preemptscan = false
  1028				gp.preempt = false
  1029				casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting)
  1030				// This clears gcscanvalid.
  1031				casgstatus(gp, _Gwaiting, _Grunning)
  1032				gp.stackguard0 = gp.stack.lo + _StackGuard
  1033				gogo(&gp.sched) // never return
  1034			}
  1035	
  1036			// Act like goroutine called runtime.Gosched.
  1037			casgstatus(gp, _Gwaiting, _Grunning)
  1038			gopreempt_m(gp) // never return
  1039		}
  1040	
  1041		// Allocate a bigger segment and move the stack.
  1042		oldsize := gp.stack.hi - gp.stack.lo
  1043		newsize := oldsize * 2
  1044		if newsize > maxstacksize {
  1045			print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
  1046			throw("stack overflow")
  1047		}
  1048	
  1049		// The goroutine must be executing in order to call newstack,
  1050		// so it must be Grunning (or Gscanrunning).
  1051		casgstatus(gp, _Grunning, _Gcopystack)
  1052	
  1053		// The concurrent GC will not scan the stack while we are doing the copy since
  1054		// the gp is in a Gcopystack status.
  1055		copystack(gp, newsize, true)
  1056		if stackDebug >= 1 {
  1057			print("stack grow done\n")
  1058		}
  1059		casgstatus(gp, _Gcopystack, _Grunning)
  1060		gogo(&gp.sched)
  1061	}
  1062	
  1063	//go:nosplit
  1064	func nilfunc() {
  1065		*(*uint8)(nil) = 0
  1066	}
  1067	
  1068	// adjust Gobuf as if it executed a call to fn
  1069	// and then did an immediate gosave.
  1070	func gostartcallfn(gobuf *gobuf, fv *funcval) {
  1071		var fn unsafe.Pointer
  1072		if fv != nil {
  1073			fn = unsafe.Pointer(fv.fn)
  1074		} else {
  1075			fn = unsafe.Pointer(funcPC(nilfunc))
  1076		}
  1077		gostartcall(gobuf, fn, unsafe.Pointer(fv))
  1078	}
  1079	
  1080	// Maybe shrink the stack being used by gp.
  1081	// Called at garbage collection time.
  1082	// gp must be stopped, but the world need not be.
  1083	func shrinkstack(gp *g) {
  1084		gstatus := readgstatus(gp)
  1085		if gp.stack.lo == 0 {
  1086			throw("missing stack in shrinkstack")
  1087		}
  1088		if gstatus&_Gscan == 0 {
  1089			throw("bad status in shrinkstack")
  1090		}
  1091	
  1092		if debug.gcshrinkstackoff > 0 {
  1093			return
  1094		}
  1095		f := findfunc(gp.startpc)
  1096		if f.valid() && f.funcID == funcID_gcBgMarkWorker {
  1097			// We're not allowed to shrink the gcBgMarkWorker
  1098			// stack (see gcBgMarkWorker for explanation).
  1099			return
  1100		}
  1101	
  1102		oldsize := gp.stack.hi - gp.stack.lo
  1103		newsize := oldsize / 2
  1104		// Don't shrink the allocation below the minimum-sized stack
  1105		// allocation.
  1106		if newsize < _FixedStack {
  1107			return
  1108		}
  1109		// Compute how much of the stack is currently in use and only
  1110		// shrink the stack if gp is using less than a quarter of its
  1111		// current stack. The currently used stack includes everything
  1112		// down to the SP plus the stack guard space that ensures
  1113		// there's room for nosplit functions.
  1114		avail := gp.stack.hi - gp.stack.lo
  1115		if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 {
  1116			return
  1117		}
  1118	
  1119		// We can't copy the stack if we're in a syscall.
  1120		// The syscall might have pointers into the stack.
  1121		if gp.syscallsp != 0 {
  1122			return
  1123		}
  1124		if sys.GoosWindows != 0 && gp.m != nil && gp.m.libcallsp != 0 {
  1125			return
  1126		}
  1127	
  1128		if stackDebug > 0 {
  1129			print("shrinking stack ", oldsize, "->", newsize, "\n")
  1130		}
  1131	
  1132		copystack(gp, newsize, false)
  1133	}
  1134	
  1135	// freeStackSpans frees unused stack spans at the end of GC.
  1136	func freeStackSpans() {
  1137		lock(&stackpoolmu)
  1138	
  1139		// Scan stack pools for empty stack spans.
  1140		for order := range stackpool {
  1141			list := &stackpool[order]
  1142			for s := list.first; s != nil; {
  1143				next := s.next
  1144				if s.allocCount == 0 {
  1145					list.remove(s)
  1146					s.manualFreeList = 0
  1147					osStackFree(s)
  1148					mheap_.freeManual(s, &memstats.stacks_inuse)
  1149				}
  1150				s = next
  1151			}
  1152		}
  1153	
  1154		unlock(&stackpoolmu)
  1155	
  1156		// Free large stack spans.
  1157		lock(&stackLarge.lock)
  1158		for i := range stackLarge.free {
  1159			for s := stackLarge.free[i].first; s != nil; {
  1160				next := s.next
  1161				stackLarge.free[i].remove(s)
  1162				osStackFree(s)
  1163				mheap_.freeManual(s, &memstats.stacks_inuse)
  1164				s = next
  1165			}
  1166		}
  1167		unlock(&stackLarge.lock)
  1168	}
  1169	
  1170	// getStackMap returns the locals and arguments live pointer maps, and
  1171	// stack object list for frame.
  1172	func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args bitvector, objs []stackObjectRecord) {
  1173		targetpc := frame.continpc
  1174		if targetpc == 0 {
  1175			// Frame is dead. Return empty bitvectors.
  1176			return
  1177		}
  1178	
  1179		f := frame.fn
  1180		pcdata := int32(-1)
  1181		if targetpc != f.entry {
  1182			// Back up to the CALL. If we're at the function entry
  1183			// point, we want to use the entry map (-1), even if
  1184			// the first instruction of the function changes the
  1185			// stack map.
  1186			targetpc--
  1187			pcdata = pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, cache)
  1188		}
  1189		if pcdata == -1 {
  1190			// We do not have a valid pcdata value but there might be a
  1191			// stackmap for this function. It is likely that we are looking
  1192			// at the function prologue, assume so and hope for the best.
  1193			pcdata = 0
  1194		}
  1195	
  1196		// Local variables.
  1197		size := frame.varp - frame.sp
  1198		var minsize uintptr
  1199		switch sys.ArchFamily {
  1200		case sys.ARM64:
  1201			minsize = sys.SpAlign
  1202		default:
  1203			minsize = sys.MinFrameSize
  1204		}
  1205		if size > minsize {
  1206			var stkmap *stackmap
  1207			stackid := pcdata
  1208			if f.funcID != funcID_debugCallV1 {
  1209				stkmap = (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
  1210			} else {
  1211				// debugCallV1's stack map is the register map
  1212				// at its call site.
  1213				callerPC := frame.lr
  1214				caller := findfunc(callerPC)
  1215				if !caller.valid() {
  1216					println("runtime: debugCallV1 called by unknown caller", hex(callerPC))
  1217					throw("bad debugCallV1")
  1218				}
  1219				stackid = int32(-1)
  1220				if callerPC != caller.entry {
  1221					callerPC--
  1222					stackid = pcdatavalue(caller, _PCDATA_RegMapIndex, callerPC, cache)
  1223				}
  1224				if stackid == -1 {
  1225					stackid = 0 // in prologue
  1226				}
  1227				stkmap = (*stackmap)(funcdata(caller, _FUNCDATA_RegPointerMaps))
  1228			}
  1229			if stkmap == nil || stkmap.n <= 0 {
  1230				print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
  1231				throw("missing stackmap")
  1232			}
  1233			// If nbit == 0, there's no work to do.
  1234			if stkmap.nbit > 0 {
  1235				if stackid < 0 || stackid >= stkmap.n {
  1236					// don't know where we are
  1237					print("runtime: pcdata is ", stackid, " and ", stkmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n")
  1238					throw("bad symbol table")
  1239				}
  1240				locals = stackmapdata(stkmap, stackid)
  1241				if stackDebug >= 3 && debug {
  1242					print("      locals ", stackid, "/", stkmap.n, " ", locals.n, " words ", locals.bytedata, "\n")
  1243				}
  1244			} else if stackDebug >= 3 && debug {
  1245				print("      no locals to adjust\n")
  1246			}
  1247		}
  1248	
  1249		// Arguments.
  1250		if frame.arglen > 0 {
  1251			if frame.argmap != nil {
  1252				// argmap is set when the function is reflect.makeFuncStub or reflect.methodValueCall.
  1253				// In this case, arglen specifies how much of the args section is actually live.
  1254				// (It could be either all the args + results, or just the args.)
  1255				args = *frame.argmap
  1256				n := int32(frame.arglen / sys.PtrSize)
  1257				if n < args.n {
  1258					args.n = n // Don't use more of the arguments than arglen.
  1259				}
  1260			} else {
  1261				stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
  1262				if stackmap == nil || stackmap.n <= 0 {
  1263					print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(frame.arglen), "\n")
  1264					throw("missing stackmap")
  1265				}
  1266				if pcdata < 0 || pcdata >= stackmap.n {
  1267					// don't know where we are
  1268					print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n")
  1269					throw("bad symbol table")
  1270				}
  1271				if stackmap.nbit > 0 {
  1272					args = stackmapdata(stackmap, pcdata)
  1273				}
  1274			}
  1275		}
  1276	
  1277		// stack objects.
  1278		p := funcdata(f, _FUNCDATA_StackObjects)
  1279		if p != nil {
  1280			n := *(*uintptr)(p)
  1281			p = add(p, sys.PtrSize)
  1282			*(*slice)(unsafe.Pointer(&objs)) = slice{array: noescape(p), len: int(n), cap: int(n)}
  1283			// Note: the noescape above is needed to keep
  1284			// getStackMap from "leaking param content:
  1285			// frame".  That leak propagates up to getgcmask, then
  1286			// GCMask, then verifyGCInfo, which converts the stack
  1287			// gcinfo tests into heap gcinfo tests :(
  1288		}
  1289	
  1290		return
  1291	}
  1292	
  1293	// A stackObjectRecord is generated by the compiler for each stack object in a stack frame.
  1294	// This record must match the generator code in cmd/compile/internal/gc/ssa.go:emitStackObjects.
  1295	type stackObjectRecord struct {
  1296		// offset in frame
  1297		// if negative, offset from varp
  1298		// if non-negative, offset from argp
  1299		off int
  1300		typ *_type
  1301	}
  1302	
  1303	// This is exported as ABI0 via linkname so obj can call it.
  1304	//
  1305	//go:nosplit
  1306	//go:linkname morestackc
  1307	func morestackc() {
  1308		throw("attempt to execute system stack code on user stack")
  1309	}
  1310	

View as plain text