...

Source file src/runtime/panic.go

     1	// Copyright 2014 The Go Authors. All rights reserved.
     2	// Use of this source code is governed by a BSD-style
     3	// license that can be found in the LICENSE file.
     4	
     5	package runtime
     6	
     7	import (
     8		"runtime/internal/atomic"
     9		"runtime/internal/sys"
    10		"unsafe"
    11	)
    12	
    13	// Check to make sure we can really generate a panic. If the panic
    14	// was generated from the runtime, or from inside malloc, then convert
    15	// to a throw of msg.
    16	// pc should be the program counter of the compiler-generated code that
    17	// triggered this panic.
    18	func panicCheck1(pc uintptr, msg string) {
    19		if sys.GoarchWasm == 0 && hasPrefix(funcname(findfunc(pc)), "runtime.") {
    20			// Note: wasm can't tail call, so we can't get the original caller's pc.
    21			throw(msg)
    22		}
    23		// TODO: is this redundant? How could we be in malloc
    24		// but not in the runtime? runtime/internal/*, maybe?
    25		gp := getg()
    26		if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
    27			throw(msg)
    28		}
    29	}
    30	
    31	// Same as above, but calling from the runtime is allowed.
    32	//
    33	// Using this function is necessary for any panic that may be
    34	// generated by runtime.sigpanic, since those are always called by the
    35	// runtime.
    36	func panicCheck2(err string) {
    37		// panic allocates, so to avoid recursive malloc, turn panics
    38		// during malloc into throws.
    39		gp := getg()
    40		if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
    41			throw(err)
    42		}
    43	}
    44	
    45	// Many of the following panic entry-points turn into throws when they
    46	// happen in various runtime contexts. These should never happen in
    47	// the runtime, and if they do, they indicate a serious issue and
    48	// should not be caught by user code.
    49	//
    50	// The panic{Index,Slice,divide,shift} functions are called by
    51	// code generated by the compiler for out of bounds index expressions,
    52	// out of bounds slice expressions, division by zero, and shift by negative.
    53	// The panicdivide (again), panicoverflow, panicfloat, and panicmem
    54	// functions are called by the signal handler when a signal occurs
    55	// indicating the respective problem.
    56	//
    57	// Since panic{Index,Slice,shift} are never called directly, and
    58	// since the runtime package should never have an out of bounds slice
    59	// or array reference or negative shift, if we see those functions called from the
    60	// runtime package we turn the panic into a throw. That will dump the
    61	// entire runtime stack for easier debugging.
    62	//
    63	// The entry points called by the signal handler will be called from
    64	// runtime.sigpanic, so we can't disallow calls from the runtime to
    65	// these (they always look like they're called from the runtime).
    66	// Hence, for these, we just check for clearly bad runtime conditions.
    67	//
    68	// The panic{Index,Slice} functions are implemented in assembly and tail call
    69	// to the goPanic{Index,Slice} functions below. This is done so we can use
    70	// a space-minimal register calling convention.
    71	
    72	// failures in the comparisons for s[x], 0 <= x < y (y == len(s))
    73	func goPanicIndex(x int, y int) {
    74		panicCheck1(getcallerpc(), "index out of range")
    75		panic(boundsError{x: int64(x), signed: true, y: y, code: boundsIndex})
    76	}
    77	func goPanicIndexU(x uint, y int) {
    78		panicCheck1(getcallerpc(), "index out of range")
    79		panic(boundsError{x: int64(x), signed: false, y: y, code: boundsIndex})
    80	}
    81	
    82	// failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s))
    83	func goPanicSliceAlen(x int, y int) {
    84		panicCheck1(getcallerpc(), "slice bounds out of range")
    85		panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAlen})
    86	}
    87	func goPanicSliceAlenU(x uint, y int) {
    88		panicCheck1(getcallerpc(), "slice bounds out of range")
    89		panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAlen})
    90	}
    91	func goPanicSliceAcap(x int, y int) {
    92		panicCheck1(getcallerpc(), "slice bounds out of range")
    93		panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAcap})
    94	}
    95	func goPanicSliceAcapU(x uint, y int) {
    96		panicCheck1(getcallerpc(), "slice bounds out of range")
    97		panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAcap})
    98	}
    99	
   100	// failures in the comparisons for s[x:y], 0 <= x <= y
   101	func goPanicSliceB(x int, y int) {
   102		panicCheck1(getcallerpc(), "slice bounds out of range")
   103		panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceB})
   104	}
   105	func goPanicSliceBU(x uint, y int) {
   106		panicCheck1(getcallerpc(), "slice bounds out of range")
   107		panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceB})
   108	}
   109	
   110	// failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s))
   111	func goPanicSlice3Alen(x int, y int) {
   112		panicCheck1(getcallerpc(), "slice bounds out of range")
   113		panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Alen})
   114	}
   115	func goPanicSlice3AlenU(x uint, y int) {
   116		panicCheck1(getcallerpc(), "slice bounds out of range")
   117		panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Alen})
   118	}
   119	func goPanicSlice3Acap(x int, y int) {
   120		panicCheck1(getcallerpc(), "slice bounds out of range")
   121		panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Acap})
   122	}
   123	func goPanicSlice3AcapU(x uint, y int) {
   124		panicCheck1(getcallerpc(), "slice bounds out of range")
   125		panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Acap})
   126	}
   127	
   128	// failures in the comparisons for s[:x:y], 0 <= x <= y
   129	func goPanicSlice3B(x int, y int) {
   130		panicCheck1(getcallerpc(), "slice bounds out of range")
   131		panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3B})
   132	}
   133	func goPanicSlice3BU(x uint, y int) {
   134		panicCheck1(getcallerpc(), "slice bounds out of range")
   135		panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3B})
   136	}
   137	
   138	// failures in the comparisons for s[x:y:], 0 <= x <= y
   139	func goPanicSlice3C(x int, y int) {
   140		panicCheck1(getcallerpc(), "slice bounds out of range")
   141		panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3C})
   142	}
   143	func goPanicSlice3CU(x uint, y int) {
   144		panicCheck1(getcallerpc(), "slice bounds out of range")
   145		panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3C})
   146	}
   147	
   148	// Implemented in assembly, as they take arguments in registers.
   149	// Declared here to mark them as ABIInternal.
   150	func panicIndex(x int, y int)
   151	func panicIndexU(x uint, y int)
   152	func panicSliceAlen(x int, y int)
   153	func panicSliceAlenU(x uint, y int)
   154	func panicSliceAcap(x int, y int)
   155	func panicSliceAcapU(x uint, y int)
   156	func panicSliceB(x int, y int)
   157	func panicSliceBU(x uint, y int)
   158	func panicSlice3Alen(x int, y int)
   159	func panicSlice3AlenU(x uint, y int)
   160	func panicSlice3Acap(x int, y int)
   161	func panicSlice3AcapU(x uint, y int)
   162	func panicSlice3B(x int, y int)
   163	func panicSlice3BU(x uint, y int)
   164	func panicSlice3C(x int, y int)
   165	func panicSlice3CU(x uint, y int)
   166	
   167	var shiftError = error(errorString("negative shift amount"))
   168	
   169	func panicshift() {
   170		panicCheck1(getcallerpc(), "negative shift amount")
   171		panic(shiftError)
   172	}
   173	
   174	var divideError = error(errorString("integer divide by zero"))
   175	
   176	func panicdivide() {
   177		panicCheck2("integer divide by zero")
   178		panic(divideError)
   179	}
   180	
   181	var overflowError = error(errorString("integer overflow"))
   182	
   183	func panicoverflow() {
   184		panicCheck2("integer overflow")
   185		panic(overflowError)
   186	}
   187	
   188	var floatError = error(errorString("floating point error"))
   189	
   190	func panicfloat() {
   191		panicCheck2("floating point error")
   192		panic(floatError)
   193	}
   194	
   195	var memoryError = error(errorString("invalid memory address or nil pointer dereference"))
   196	
   197	func panicmem() {
   198		panicCheck2("invalid memory address or nil pointer dereference")
   199		panic(memoryError)
   200	}
   201	
   202	// Create a new deferred function fn with siz bytes of arguments.
   203	// The compiler turns a defer statement into a call to this.
   204	//go:nosplit
   205	func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
   206		if getg().m.curg != getg() {
   207			// go code on the system stack can't defer
   208			throw("defer on system stack")
   209		}
   210	
   211		// the arguments of fn are in a perilous state. The stack map
   212		// for deferproc does not describe them. So we can't let garbage
   213		// collection or stack copying trigger until we've copied them out
   214		// to somewhere safe. The memmove below does that.
   215		// Until the copy completes, we can only call nosplit routines.
   216		sp := getcallersp()
   217		argp := uintptr(unsafe.Pointer(&fn)) + unsafe.Sizeof(fn)
   218		callerpc := getcallerpc()
   219	
   220		d := newdefer(siz)
   221		if d._panic != nil {
   222			throw("deferproc: d.panic != nil after newdefer")
   223		}
   224		d.fn = fn
   225		d.pc = callerpc
   226		d.sp = sp
   227		switch siz {
   228		case 0:
   229			// Do nothing.
   230		case sys.PtrSize:
   231			*(*uintptr)(deferArgs(d)) = *(*uintptr)(unsafe.Pointer(argp))
   232		default:
   233			memmove(deferArgs(d), unsafe.Pointer(argp), uintptr(siz))
   234		}
   235	
   236		// deferproc returns 0 normally.
   237		// a deferred func that stops a panic
   238		// makes the deferproc return 1.
   239		// the code the compiler generates always
   240		// checks the return value and jumps to the
   241		// end of the function if deferproc returns != 0.
   242		return0()
   243		// No code can go here - the C return register has
   244		// been set and must not be clobbered.
   245	}
   246	
   247	// deferprocStack queues a new deferred function with a defer record on the stack.
   248	// The defer record must have its siz and fn fields initialized.
   249	// All other fields can contain junk.
   250	// The defer record must be immediately followed in memory by
   251	// the arguments of the defer.
   252	// Nosplit because the arguments on the stack won't be scanned
   253	// until the defer record is spliced into the gp._defer list.
   254	//go:nosplit
   255	func deferprocStack(d *_defer) {
   256		gp := getg()
   257		if gp.m.curg != gp {
   258			// go code on the system stack can't defer
   259			throw("defer on system stack")
   260		}
   261		// siz and fn are already set.
   262		// The other fields are junk on entry to deferprocStack and
   263		// are initialized here.
   264		d.started = false
   265		d.heap = false
   266		d.sp = getcallersp()
   267		d.pc = getcallerpc()
   268		// The lines below implement:
   269		//   d.panic = nil
   270		//   d.link = gp._defer
   271		//   gp._defer = d
   272		// But without write barriers. The first two are writes to
   273		// the stack so they don't need a write barrier, and furthermore
   274		// are to uninitialized memory, so they must not use a write barrier.
   275		// The third write does not require a write barrier because we
   276		// explicitly mark all the defer structures, so we don't need to
   277		// keep track of pointers to them with a write barrier.
   278		*(*uintptr)(unsafe.Pointer(&d._panic)) = 0
   279		*(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer))
   280		*(*uintptr)(unsafe.Pointer(&gp._defer)) = uintptr(unsafe.Pointer(d))
   281	
   282		return0()
   283		// No code can go here - the C return register has
   284		// been set and must not be clobbered.
   285	}
   286	
   287	// Small malloc size classes >= 16 are the multiples of 16: 16, 32, 48, 64, 80, 96, 112, 128, 144, ...
   288	// Each P holds a pool for defers with small arg sizes.
   289	// Assign defer allocations to pools by rounding to 16, to match malloc size classes.
   290	
   291	const (
   292		deferHeaderSize = unsafe.Sizeof(_defer{})
   293		minDeferAlloc   = (deferHeaderSize + 15) &^ 15
   294		minDeferArgs    = minDeferAlloc - deferHeaderSize
   295	)
   296	
   297	// defer size class for arg size sz
   298	//go:nosplit
   299	func deferclass(siz uintptr) uintptr {
   300		if siz <= minDeferArgs {
   301			return 0
   302		}
   303		return (siz - minDeferArgs + 15) / 16
   304	}
   305	
   306	// total size of memory block for defer with arg size sz
   307	func totaldefersize(siz uintptr) uintptr {
   308		if siz <= minDeferArgs {
   309			return minDeferAlloc
   310		}
   311		return deferHeaderSize + siz
   312	}
   313	
   314	// Ensure that defer arg sizes that map to the same defer size class
   315	// also map to the same malloc size class.
   316	func testdefersizes() {
   317		var m [len(p{}.deferpool)]int32
   318	
   319		for i := range m {
   320			m[i] = -1
   321		}
   322		for i := uintptr(0); ; i++ {
   323			defersc := deferclass(i)
   324			if defersc >= uintptr(len(m)) {
   325				break
   326			}
   327			siz := roundupsize(totaldefersize(i))
   328			if m[defersc] < 0 {
   329				m[defersc] = int32(siz)
   330				continue
   331			}
   332			if m[defersc] != int32(siz) {
   333				print("bad defer size class: i=", i, " siz=", siz, " defersc=", defersc, "\n")
   334				throw("bad defer size class")
   335			}
   336		}
   337	}
   338	
   339	// The arguments associated with a deferred call are stored
   340	// immediately after the _defer header in memory.
   341	//go:nosplit
   342	func deferArgs(d *_defer) unsafe.Pointer {
   343		if d.siz == 0 {
   344			// Avoid pointer past the defer allocation.
   345			return nil
   346		}
   347		return add(unsafe.Pointer(d), unsafe.Sizeof(*d))
   348	}
   349	
   350	var deferType *_type // type of _defer struct
   351	
   352	func init() {
   353		var x interface{}
   354		x = (*_defer)(nil)
   355		deferType = (*(**ptrtype)(unsafe.Pointer(&x))).elem
   356	}
   357	
   358	// Allocate a Defer, usually using per-P pool.
   359	// Each defer must be released with freedefer.
   360	//
   361	// This must not grow the stack because there may be a frame without
   362	// stack map information when this is called.
   363	//
   364	//go:nosplit
   365	func newdefer(siz int32) *_defer {
   366		var d *_defer
   367		sc := deferclass(uintptr(siz))
   368		gp := getg()
   369		if sc < uintptr(len(p{}.deferpool)) {
   370			pp := gp.m.p.ptr()
   371			if len(pp.deferpool[sc]) == 0 && sched.deferpool[sc] != nil {
   372				// Take the slow path on the system stack so
   373				// we don't grow newdefer's stack.
   374				systemstack(func() {
   375					lock(&sched.deferlock)
   376					for len(pp.deferpool[sc]) < cap(pp.deferpool[sc])/2 && sched.deferpool[sc] != nil {
   377						d := sched.deferpool[sc]
   378						sched.deferpool[sc] = d.link
   379						d.link = nil
   380						pp.deferpool[sc] = append(pp.deferpool[sc], d)
   381					}
   382					unlock(&sched.deferlock)
   383				})
   384			}
   385			if n := len(pp.deferpool[sc]); n > 0 {
   386				d = pp.deferpool[sc][n-1]
   387				pp.deferpool[sc][n-1] = nil
   388				pp.deferpool[sc] = pp.deferpool[sc][:n-1]
   389			}
   390		}
   391		if d == nil {
   392			// Allocate new defer+args.
   393			systemstack(func() {
   394				total := roundupsize(totaldefersize(uintptr(siz)))
   395				d = (*_defer)(mallocgc(total, deferType, true))
   396			})
   397			if debugCachedWork {
   398				// Duplicate the tail below so if there's a
   399				// crash in checkPut we can tell if d was just
   400				// allocated or came from the pool.
   401				d.siz = siz
   402				d.link = gp._defer
   403				gp._defer = d
   404				return d
   405			}
   406		}
   407		d.siz = siz
   408		d.heap = true
   409		d.link = gp._defer
   410		gp._defer = d
   411		return d
   412	}
   413	
   414	// Free the given defer.
   415	// The defer cannot be used after this call.
   416	//
   417	// This must not grow the stack because there may be a frame without a
   418	// stack map when this is called.
   419	//
   420	//go:nosplit
   421	func freedefer(d *_defer) {
   422		if d._panic != nil {
   423			freedeferpanic()
   424		}
   425		if d.fn != nil {
   426			freedeferfn()
   427		}
   428		if !d.heap {
   429			return
   430		}
   431		sc := deferclass(uintptr(d.siz))
   432		if sc >= uintptr(len(p{}.deferpool)) {
   433			return
   434		}
   435		pp := getg().m.p.ptr()
   436		if len(pp.deferpool[sc]) == cap(pp.deferpool[sc]) {
   437			// Transfer half of local cache to the central cache.
   438			//
   439			// Take this slow path on the system stack so
   440			// we don't grow freedefer's stack.
   441			systemstack(func() {
   442				var first, last *_defer
   443				for len(pp.deferpool[sc]) > cap(pp.deferpool[sc])/2 {
   444					n := len(pp.deferpool[sc])
   445					d := pp.deferpool[sc][n-1]
   446					pp.deferpool[sc][n-1] = nil
   447					pp.deferpool[sc] = pp.deferpool[sc][:n-1]
   448					if first == nil {
   449						first = d
   450					} else {
   451						last.link = d
   452					}
   453					last = d
   454				}
   455				lock(&sched.deferlock)
   456				last.link = sched.deferpool[sc]
   457				sched.deferpool[sc] = first
   458				unlock(&sched.deferlock)
   459			})
   460		}
   461	
   462		// These lines used to be simply `*d = _defer{}` but that
   463		// started causing a nosplit stack overflow via typedmemmove.
   464		d.siz = 0
   465		d.started = false
   466		d.sp = 0
   467		d.pc = 0
   468		// d._panic and d.fn must be nil already.
   469		// If not, we would have called freedeferpanic or freedeferfn above,
   470		// both of which throw.
   471		d.link = nil
   472	
   473		pp.deferpool[sc] = append(pp.deferpool[sc], d)
   474	}
   475	
   476	// Separate function so that it can split stack.
   477	// Windows otherwise runs out of stack space.
   478	func freedeferpanic() {
   479		// _panic must be cleared before d is unlinked from gp.
   480		throw("freedefer with d._panic != nil")
   481	}
   482	
   483	func freedeferfn() {
   484		// fn must be cleared before d is unlinked from gp.
   485		throw("freedefer with d.fn != nil")
   486	}
   487	
   488	// Run a deferred function if there is one.
   489	// The compiler inserts a call to this at the end of any
   490	// function which calls defer.
   491	// If there is a deferred function, this will call runtime·jmpdefer,
   492	// which will jump to the deferred function such that it appears
   493	// to have been called by the caller of deferreturn at the point
   494	// just before deferreturn was called. The effect is that deferreturn
   495	// is called again and again until there are no more deferred functions.
   496	// Cannot split the stack because we reuse the caller's frame to
   497	// call the deferred function.
   498	
   499	// The single argument isn't actually used - it just has its address
   500	// taken so it can be matched against pending defers.
   501	//go:nosplit
   502	func deferreturn(arg0 uintptr) {
   503		gp := getg()
   504		d := gp._defer
   505		if d == nil {
   506			return
   507		}
   508		sp := getcallersp()
   509		if d.sp != sp {
   510			return
   511		}
   512	
   513		// Moving arguments around.
   514		//
   515		// Everything called after this point must be recursively
   516		// nosplit because the garbage collector won't know the form
   517		// of the arguments until the jmpdefer can flip the PC over to
   518		// fn.
   519		switch d.siz {
   520		case 0:
   521			// Do nothing.
   522		case sys.PtrSize:
   523			*(*uintptr)(unsafe.Pointer(&arg0)) = *(*uintptr)(deferArgs(d))
   524		default:
   525			memmove(unsafe.Pointer(&arg0), deferArgs(d), uintptr(d.siz))
   526		}
   527		fn := d.fn
   528		d.fn = nil
   529		gp._defer = d.link
   530		freedefer(d)
   531		jmpdefer(fn, uintptr(unsafe.Pointer(&arg0)))
   532	}
   533	
   534	// Goexit terminates the goroutine that calls it. No other goroutine is affected.
   535	// Goexit runs all deferred calls before terminating the goroutine. Because Goexit
   536	// is not a panic, any recover calls in those deferred functions will return nil.
   537	//
   538	// Calling Goexit from the main goroutine terminates that goroutine
   539	// without func main returning. Since func main has not returned,
   540	// the program continues execution of other goroutines.
   541	// If all other goroutines exit, the program crashes.
   542	func Goexit() {
   543		// Run all deferred functions for the current goroutine.
   544		// This code is similar to gopanic, see that implementation
   545		// for detailed comments.
   546		gp := getg()
   547		for {
   548			d := gp._defer
   549			if d == nil {
   550				break
   551			}
   552			if d.started {
   553				if d._panic != nil {
   554					d._panic.aborted = true
   555					d._panic = nil
   556				}
   557				d.fn = nil
   558				gp._defer = d.link
   559				freedefer(d)
   560				continue
   561			}
   562			d.started = true
   563			reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz))
   564			if gp._defer != d {
   565				throw("bad defer entry in Goexit")
   566			}
   567			d._panic = nil
   568			d.fn = nil
   569			gp._defer = d.link
   570			freedefer(d)
   571			// Note: we ignore recovers here because Goexit isn't a panic
   572		}
   573		goexit1()
   574	}
   575	
   576	// Call all Error and String methods before freezing the world.
   577	// Used when crashing with panicking.
   578	func preprintpanics(p *_panic) {
   579		defer func() {
   580			if recover() != nil {
   581				throw("panic while printing panic value")
   582			}
   583		}()
   584		for p != nil {
   585			switch v := p.arg.(type) {
   586			case error:
   587				p.arg = v.Error()
   588			case stringer:
   589				p.arg = v.String()
   590			}
   591			p = p.link
   592		}
   593	}
   594	
   595	// Print all currently active panics. Used when crashing.
   596	// Should only be called after preprintpanics.
   597	func printpanics(p *_panic) {
   598		if p.link != nil {
   599			printpanics(p.link)
   600			print("\t")
   601		}
   602		print("panic: ")
   603		printany(p.arg)
   604		if p.recovered {
   605			print(" [recovered]")
   606		}
   607		print("\n")
   608	}
   609	
   610	// The implementation of the predeclared function panic.
   611	func gopanic(e interface{}) {
   612		gp := getg()
   613		if gp.m.curg != gp {
   614			print("panic: ")
   615			printany(e)
   616			print("\n")
   617			throw("panic on system stack")
   618		}
   619	
   620		if gp.m.mallocing != 0 {
   621			print("panic: ")
   622			printany(e)
   623			print("\n")
   624			throw("panic during malloc")
   625		}
   626		if gp.m.preemptoff != "" {
   627			print("panic: ")
   628			printany(e)
   629			print("\n")
   630			print("preempt off reason: ")
   631			print(gp.m.preemptoff)
   632			print("\n")
   633			throw("panic during preemptoff")
   634		}
   635		if gp.m.locks != 0 {
   636			print("panic: ")
   637			printany(e)
   638			print("\n")
   639			throw("panic holding locks")
   640		}
   641	
   642		var p _panic
   643		p.arg = e
   644		p.link = gp._panic
   645		gp._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
   646	
   647		atomic.Xadd(&runningPanicDefers, 1)
   648	
   649		for {
   650			d := gp._defer
   651			if d == nil {
   652				break
   653			}
   654	
   655			// If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic),
   656			// take defer off list. The earlier panic or Goexit will not continue running.
   657			if d.started {
   658				if d._panic != nil {
   659					d._panic.aborted = true
   660				}
   661				d._panic = nil
   662				d.fn = nil
   663				gp._defer = d.link
   664				freedefer(d)
   665				continue
   666			}
   667	
   668			// Mark defer as started, but keep on list, so that traceback
   669			// can find and update the defer's argument frame if stack growth
   670			// or a garbage collection happens before reflectcall starts executing d.fn.
   671			d.started = true
   672	
   673			// Record the panic that is running the defer.
   674			// If there is a new panic during the deferred call, that panic
   675			// will find d in the list and will mark d._panic (this panic) aborted.
   676			d._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
   677	
   678			p.argp = unsafe.Pointer(getargp(0))
   679			reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz))
   680			p.argp = nil
   681	
   682			// reflectcall did not panic. Remove d.
   683			if gp._defer != d {
   684				throw("bad defer entry in panic")
   685			}
   686			d._panic = nil
   687			d.fn = nil
   688			gp._defer = d.link
   689	
   690			// trigger shrinkage to test stack copy. See stack_test.go:TestStackPanic
   691			//GC()
   692	
   693			pc := d.pc
   694			sp := unsafe.Pointer(d.sp) // must be pointer so it gets adjusted during stack copy
   695			freedefer(d)
   696			if p.recovered {
   697				atomic.Xadd(&runningPanicDefers, -1)
   698	
   699				gp._panic = p.link
   700				// Aborted panics are marked but remain on the g.panic list.
   701				// Remove them from the list.
   702				for gp._panic != nil && gp._panic.aborted {
   703					gp._panic = gp._panic.link
   704				}
   705				if gp._panic == nil { // must be done with signal
   706					gp.sig = 0
   707				}
   708				// Pass information about recovering frame to recovery.
   709				gp.sigcode0 = uintptr(sp)
   710				gp.sigcode1 = pc
   711				mcall(recovery)
   712				throw("recovery failed") // mcall should not return
   713			}
   714		}
   715	
   716		// ran out of deferred calls - old-school panic now
   717		// Because it is unsafe to call arbitrary user code after freezing
   718		// the world, we call preprintpanics to invoke all necessary Error
   719		// and String methods to prepare the panic strings before startpanic.
   720		preprintpanics(gp._panic)
   721	
   722		fatalpanic(gp._panic) // should not return
   723		*(*int)(nil) = 0      // not reached
   724	}
   725	
   726	// getargp returns the location where the caller
   727	// writes outgoing function call arguments.
   728	//go:nosplit
   729	//go:noinline
   730	func getargp(x int) uintptr {
   731		// x is an argument mainly so that we can return its address.
   732		return uintptr(noescape(unsafe.Pointer(&x)))
   733	}
   734	
   735	// The implementation of the predeclared function recover.
   736	// Cannot split the stack because it needs to reliably
   737	// find the stack segment of its caller.
   738	//
   739	// TODO(rsc): Once we commit to CopyStackAlways,
   740	// this doesn't need to be nosplit.
   741	//go:nosplit
   742	func gorecover(argp uintptr) interface{} {
   743		// Must be in a function running as part of a deferred call during the panic.
   744		// Must be called from the topmost function of the call
   745		// (the function used in the defer statement).
   746		// p.argp is the argument pointer of that topmost deferred function call.
   747		// Compare against argp reported by caller.
   748		// If they match, the caller is the one who can recover.
   749		gp := getg()
   750		p := gp._panic
   751		if p != nil && !p.recovered && argp == uintptr(p.argp) {
   752			p.recovered = true
   753			return p.arg
   754		}
   755		return nil
   756	}
   757	
   758	//go:linkname sync_throw sync.throw
   759	func sync_throw(s string) {
   760		throw(s)
   761	}
   762	
   763	//go:nosplit
   764	func throw(s string) {
   765		// Everything throw does should be recursively nosplit so it
   766		// can be called even when it's unsafe to grow the stack.
   767		systemstack(func() {
   768			print("fatal error: ", s, "\n")
   769		})
   770		gp := getg()
   771		if gp.m.throwing == 0 {
   772			gp.m.throwing = 1
   773		}
   774		fatalthrow()
   775		*(*int)(nil) = 0 // not reached
   776	}
   777	
   778	// runningPanicDefers is non-zero while running deferred functions for panic.
   779	// runningPanicDefers is incremented and decremented atomically.
   780	// This is used to try hard to get a panic stack trace out when exiting.
   781	var runningPanicDefers uint32
   782	
   783	// panicking is non-zero when crashing the program for an unrecovered panic.
   784	// panicking is incremented and decremented atomically.
   785	var panicking uint32
   786	
   787	// paniclk is held while printing the panic information and stack trace,
   788	// so that two concurrent panics don't overlap their output.
   789	var paniclk mutex
   790	
   791	// Unwind the stack after a deferred function calls recover
   792	// after a panic. Then arrange to continue running as though
   793	// the caller of the deferred function returned normally.
   794	func recovery(gp *g) {
   795		// Info about defer passed in G struct.
   796		sp := gp.sigcode0
   797		pc := gp.sigcode1
   798	
   799		// d's arguments need to be in the stack.
   800		if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) {
   801			print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
   802			throw("bad recovery")
   803		}
   804	
   805		// Make the deferproc for this d return again,
   806		// this time returning 1.  The calling function will
   807		// jump to the standard return epilogue.
   808		gp.sched.sp = sp
   809		gp.sched.pc = pc
   810		gp.sched.lr = 0
   811		gp.sched.ret = 1
   812		gogo(&gp.sched)
   813	}
   814	
   815	// fatalthrow implements an unrecoverable runtime throw. It freezes the
   816	// system, prints stack traces starting from its caller, and terminates the
   817	// process.
   818	//
   819	//go:nosplit
   820	func fatalthrow() {
   821		pc := getcallerpc()
   822		sp := getcallersp()
   823		gp := getg()
   824		// Switch to the system stack to avoid any stack growth, which
   825		// may make things worse if the runtime is in a bad state.
   826		systemstack(func() {
   827			startpanic_m()
   828	
   829			if dopanic_m(gp, pc, sp) {
   830				// crash uses a decent amount of nosplit stack and we're already
   831				// low on stack in throw, so crash on the system stack (unlike
   832				// fatalpanic).
   833				crash()
   834			}
   835	
   836			exit(2)
   837		})
   838	
   839		*(*int)(nil) = 0 // not reached
   840	}
   841	
   842	// fatalpanic implements an unrecoverable panic. It is like fatalthrow, except
   843	// that if msgs != nil, fatalpanic also prints panic messages and decrements
   844	// runningPanicDefers once main is blocked from exiting.
   845	//
   846	//go:nosplit
   847	func fatalpanic(msgs *_panic) {
   848		pc := getcallerpc()
   849		sp := getcallersp()
   850		gp := getg()
   851		var docrash bool
   852		// Switch to the system stack to avoid any stack growth, which
   853		// may make things worse if the runtime is in a bad state.
   854		systemstack(func() {
   855			if startpanic_m() && msgs != nil {
   856				// There were panic messages and startpanic_m
   857				// says it's okay to try to print them.
   858	
   859				// startpanic_m set panicking, which will
   860				// block main from exiting, so now OK to
   861				// decrement runningPanicDefers.
   862				atomic.Xadd(&runningPanicDefers, -1)
   863	
   864				printpanics(msgs)
   865			}
   866	
   867			docrash = dopanic_m(gp, pc, sp)
   868		})
   869	
   870		if docrash {
   871			// By crashing outside the above systemstack call, debuggers
   872			// will not be confused when generating a backtrace.
   873			// Function crash is marked nosplit to avoid stack growth.
   874			crash()
   875		}
   876	
   877		systemstack(func() {
   878			exit(2)
   879		})
   880	
   881		*(*int)(nil) = 0 // not reached
   882	}
   883	
   884	// startpanic_m prepares for an unrecoverable panic.
   885	//
   886	// It returns true if panic messages should be printed, or false if
   887	// the runtime is in bad shape and should just print stacks.
   888	//
   889	// It must not have write barriers even though the write barrier
   890	// explicitly ignores writes once dying > 0. Write barriers still
   891	// assume that g.m.p != nil, and this function may not have P
   892	// in some contexts (e.g. a panic in a signal handler for a signal
   893	// sent to an M with no P).
   894	//
   895	//go:nowritebarrierrec
   896	func startpanic_m() bool {
   897		_g_ := getg()
   898		if mheap_.cachealloc.size == 0 { // very early
   899			print("runtime: panic before malloc heap initialized\n")
   900		}
   901		// Disallow malloc during an unrecoverable panic. A panic
   902		// could happen in a signal handler, or in a throw, or inside
   903		// malloc itself. We want to catch if an allocation ever does
   904		// happen (even if we're not in one of these situations).
   905		_g_.m.mallocing++
   906	
   907		// If we're dying because of a bad lock count, set it to a
   908		// good lock count so we don't recursively panic below.
   909		if _g_.m.locks < 0 {
   910			_g_.m.locks = 1
   911		}
   912	
   913		switch _g_.m.dying {
   914		case 0:
   915			// Setting dying >0 has the side-effect of disabling this G's writebuf.
   916			_g_.m.dying = 1
   917			atomic.Xadd(&panicking, 1)
   918			lock(&paniclk)
   919			if debug.schedtrace > 0 || debug.scheddetail > 0 {
   920				schedtrace(true)
   921			}
   922			freezetheworld()
   923			return true
   924		case 1:
   925			// Something failed while panicking.
   926			// Just print a stack trace and exit.
   927			_g_.m.dying = 2
   928			print("panic during panic\n")
   929			return false
   930		case 2:
   931			// This is a genuine bug in the runtime, we couldn't even
   932			// print the stack trace successfully.
   933			_g_.m.dying = 3
   934			print("stack trace unavailable\n")
   935			exit(4)
   936			fallthrough
   937		default:
   938			// Can't even print! Just exit.
   939			exit(5)
   940			return false // Need to return something.
   941		}
   942	}
   943	
   944	var didothers bool
   945	var deadlock mutex
   946	
   947	func dopanic_m(gp *g, pc, sp uintptr) bool {
   948		if gp.sig != 0 {
   949			signame := signame(gp.sig)
   950			if signame != "" {
   951				print("[signal ", signame)
   952			} else {
   953				print("[signal ", hex(gp.sig))
   954			}
   955			print(" code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n")
   956		}
   957	
   958		level, all, docrash := gotraceback()
   959		_g_ := getg()
   960		if level > 0 {
   961			if gp != gp.m.curg {
   962				all = true
   963			}
   964			if gp != gp.m.g0 {
   965				print("\n")
   966				goroutineheader(gp)
   967				traceback(pc, sp, 0, gp)
   968			} else if level >= 2 || _g_.m.throwing > 0 {
   969				print("\nruntime stack:\n")
   970				traceback(pc, sp, 0, gp)
   971			}
   972			if !didothers && all {
   973				didothers = true
   974				tracebackothers(gp)
   975			}
   976		}
   977		unlock(&paniclk)
   978	
   979		if atomic.Xadd(&panicking, -1) != 0 {
   980			// Some other m is panicking too.
   981			// Let it print what it needs to print.
   982			// Wait forever without chewing up cpu.
   983			// It will exit when it's done.
   984			lock(&deadlock)
   985			lock(&deadlock)
   986		}
   987	
   988		printDebugLog()
   989	
   990		return docrash
   991	}
   992	
   993	// canpanic returns false if a signal should throw instead of
   994	// panicking.
   995	//
   996	//go:nosplit
   997	func canpanic(gp *g) bool {
   998		// Note that g is m->gsignal, different from gp.
   999		// Note also that g->m can change at preemption, so m can go stale
  1000		// if this function ever makes a function call.
  1001		_g_ := getg()
  1002		_m_ := _g_.m
  1003	
  1004		// Is it okay for gp to panic instead of crashing the program?
  1005		// Yes, as long as it is running Go code, not runtime code,
  1006		// and not stuck in a system call.
  1007		if gp == nil || gp != _m_.curg {
  1008			return false
  1009		}
  1010		if _m_.locks != 0 || _m_.mallocing != 0 || _m_.throwing != 0 || _m_.preemptoff != "" || _m_.dying != 0 {
  1011			return false
  1012		}
  1013		status := readgstatus(gp)
  1014		if status&^_Gscan != _Grunning || gp.syscallsp != 0 {
  1015			return false
  1016		}
  1017		if GOOS == "windows" && _m_.libcallsp != 0 {
  1018			return false
  1019		}
  1020		return true
  1021	}
  1022	
  1023	// shouldPushSigpanic reports whether pc should be used as sigpanic's
  1024	// return PC (pushing a frame for the call). Otherwise, it should be
  1025	// left alone so that LR is used as sigpanic's return PC, effectively
  1026	// replacing the top-most frame with sigpanic. This is used by
  1027	// preparePanic.
  1028	func shouldPushSigpanic(gp *g, pc, lr uintptr) bool {
  1029		if pc == 0 {
  1030			// Probably a call to a nil func. The old LR is more
  1031			// useful in the stack trace. Not pushing the frame
  1032			// will make the trace look like a call to sigpanic
  1033			// instead. (Otherwise the trace will end at sigpanic
  1034			// and we won't get to see who faulted.)
  1035			return false
  1036		}
  1037		// If we don't recognize the PC as code, but we do recognize
  1038		// the link register as code, then this assumes the panic was
  1039		// caused by a call to non-code. In this case, we want to
  1040		// ignore this call to make unwinding show the context.
  1041		//
  1042		// If we running C code, we're not going to recognize pc as a
  1043		// Go function, so just assume it's good. Otherwise, traceback
  1044		// may try to read a stale LR that looks like a Go code
  1045		// pointer and wander into the woods.
  1046		if gp.m.incgo || findfunc(pc).valid() {
  1047			// This wasn't a bad call, so use PC as sigpanic's
  1048			// return PC.
  1049			return true
  1050		}
  1051		if findfunc(lr).valid() {
  1052			// This was a bad call, but the LR is good, so use the
  1053			// LR as sigpanic's return PC.
  1054			return false
  1055		}
  1056		// Neither the PC or LR is good. Hopefully pushing a frame
  1057		// will work.
  1058		return true
  1059	}
  1060	
  1061	// isAbortPC reports whether pc is the program counter at which
  1062	// runtime.abort raises a signal.
  1063	//
  1064	// It is nosplit because it's part of the isgoexception
  1065	// implementation.
  1066	//
  1067	//go:nosplit
  1068	func isAbortPC(pc uintptr) bool {
  1069		return pc == funcPC(abort) || ((GOARCH == "arm" || GOARCH == "arm64") && pc == funcPC(abort)+sys.PCQuantum)
  1070	}
  1071	

View as plain text