...

Source file src/runtime/runtime1.go

     1	// Copyright 2009 The Go Authors. All rights reserved.
     2	// Use of this source code is governed by a BSD-style
     3	// license that can be found in the LICENSE file.
     4	
     5	package runtime
     6	
     7	import (
     8		"runtime/internal/atomic"
     9		"runtime/internal/sys"
    10		"unsafe"
    11	)
    12	
    13	// Keep a cached value to make gotraceback fast,
    14	// since we call it on every call to gentraceback.
    15	// The cached value is a uint32 in which the low bits
    16	// are the "crash" and "all" settings and the remaining
    17	// bits are the traceback value (0 off, 1 on, 2 include system).
    18	const (
    19		tracebackCrash = 1 << iota
    20		tracebackAll
    21		tracebackShift = iota
    22	)
    23	
    24	var traceback_cache uint32 = 2 << tracebackShift
    25	var traceback_env uint32
    26	
    27	// gotraceback returns the current traceback settings.
    28	//
    29	// If level is 0, suppress all tracebacks.
    30	// If level is 1, show tracebacks, but exclude runtime frames.
    31	// If level is 2, show tracebacks including runtime frames.
    32	// If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
    33	// If crash is set, crash (core dump, etc) after tracebacking.
    34	//
    35	//go:nosplit
    36	func gotraceback() (level int32, all, crash bool) {
    37		_g_ := getg()
    38		t := atomic.Load(&traceback_cache)
    39		crash = t&tracebackCrash != 0
    40		all = _g_.m.throwing > 0 || t&tracebackAll != 0
    41		if _g_.m.traceback != 0 {
    42			level = int32(_g_.m.traceback)
    43		} else {
    44			level = int32(t >> tracebackShift)
    45		}
    46		return
    47	}
    48	
    49	var (
    50		argc int32
    51		argv **byte
    52	)
    53	
    54	// nosplit for use in linux startup sysargs
    55	//go:nosplit
    56	func argv_index(argv **byte, i int32) *byte {
    57		return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*sys.PtrSize))
    58	}
    59	
    60	func args(c int32, v **byte) {
    61		argc = c
    62		argv = v
    63		sysargs(c, v)
    64	}
    65	
    66	func goargs() {
    67		if GOOS == "windows" {
    68			return
    69		}
    70		argslice = make([]string, argc)
    71		for i := int32(0); i < argc; i++ {
    72			argslice[i] = gostringnocopy(argv_index(argv, i))
    73		}
    74	}
    75	
    76	func goenvs_unix() {
    77		// TODO(austin): ppc64 in dynamic linking mode doesn't
    78		// guarantee env[] will immediately follow argv. Might cause
    79		// problems.
    80		n := int32(0)
    81		for argv_index(argv, argc+1+n) != nil {
    82			n++
    83		}
    84	
    85		envs = make([]string, n)
    86		for i := int32(0); i < n; i++ {
    87			envs[i] = gostring(argv_index(argv, argc+1+i))
    88		}
    89	}
    90	
    91	func environ() []string {
    92		return envs
    93	}
    94	
    95	// TODO: These should be locals in testAtomic64, but we don't 8-byte
    96	// align stack variables on 386.
    97	var test_z64, test_x64 uint64
    98	
    99	func testAtomic64() {
   100		test_z64 = 42
   101		test_x64 = 0
   102		if atomic.Cas64(&test_z64, test_x64, 1) {
   103			throw("cas64 failed")
   104		}
   105		if test_x64 != 0 {
   106			throw("cas64 failed")
   107		}
   108		test_x64 = 42
   109		if !atomic.Cas64(&test_z64, test_x64, 1) {
   110			throw("cas64 failed")
   111		}
   112		if test_x64 != 42 || test_z64 != 1 {
   113			throw("cas64 failed")
   114		}
   115		if atomic.Load64(&test_z64) != 1 {
   116			throw("load64 failed")
   117		}
   118		atomic.Store64(&test_z64, (1<<40)+1)
   119		if atomic.Load64(&test_z64) != (1<<40)+1 {
   120			throw("store64 failed")
   121		}
   122		if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
   123			throw("xadd64 failed")
   124		}
   125		if atomic.Load64(&test_z64) != (2<<40)+2 {
   126			throw("xadd64 failed")
   127		}
   128		if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
   129			throw("xchg64 failed")
   130		}
   131		if atomic.Load64(&test_z64) != (3<<40)+3 {
   132			throw("xchg64 failed")
   133		}
   134	}
   135	
   136	func check() {
   137		var (
   138			a     int8
   139			b     uint8
   140			c     int16
   141			d     uint16
   142			e     int32
   143			f     uint32
   144			g     int64
   145			h     uint64
   146			i, i1 float32
   147			j, j1 float64
   148			k     unsafe.Pointer
   149			l     *uint16
   150			m     [4]byte
   151		)
   152		type x1t struct {
   153			x uint8
   154		}
   155		type y1t struct {
   156			x1 x1t
   157			y  uint8
   158		}
   159		var x1 x1t
   160		var y1 y1t
   161	
   162		if unsafe.Sizeof(a) != 1 {
   163			throw("bad a")
   164		}
   165		if unsafe.Sizeof(b) != 1 {
   166			throw("bad b")
   167		}
   168		if unsafe.Sizeof(c) != 2 {
   169			throw("bad c")
   170		}
   171		if unsafe.Sizeof(d) != 2 {
   172			throw("bad d")
   173		}
   174		if unsafe.Sizeof(e) != 4 {
   175			throw("bad e")
   176		}
   177		if unsafe.Sizeof(f) != 4 {
   178			throw("bad f")
   179		}
   180		if unsafe.Sizeof(g) != 8 {
   181			throw("bad g")
   182		}
   183		if unsafe.Sizeof(h) != 8 {
   184			throw("bad h")
   185		}
   186		if unsafe.Sizeof(i) != 4 {
   187			throw("bad i")
   188		}
   189		if unsafe.Sizeof(j) != 8 {
   190			throw("bad j")
   191		}
   192		if unsafe.Sizeof(k) != sys.PtrSize {
   193			throw("bad k")
   194		}
   195		if unsafe.Sizeof(l) != sys.PtrSize {
   196			throw("bad l")
   197		}
   198		if unsafe.Sizeof(x1) != 1 {
   199			throw("bad unsafe.Sizeof x1")
   200		}
   201		if unsafe.Offsetof(y1.y) != 1 {
   202			throw("bad offsetof y1.y")
   203		}
   204		if unsafe.Sizeof(y1) != 2 {
   205			throw("bad unsafe.Sizeof y1")
   206		}
   207	
   208		if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
   209			throw("bad timediv")
   210		}
   211	
   212		var z uint32
   213		z = 1
   214		if !atomic.Cas(&z, 1, 2) {
   215			throw("cas1")
   216		}
   217		if z != 2 {
   218			throw("cas2")
   219		}
   220	
   221		z = 4
   222		if atomic.Cas(&z, 5, 6) {
   223			throw("cas3")
   224		}
   225		if z != 4 {
   226			throw("cas4")
   227		}
   228	
   229		z = 0xffffffff
   230		if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
   231			throw("cas5")
   232		}
   233		if z != 0xfffffffe {
   234			throw("cas6")
   235		}
   236	
   237		m = [4]byte{1, 1, 1, 1}
   238		atomic.Or8(&m[1], 0xf0)
   239		if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
   240			throw("atomicor8")
   241		}
   242	
   243		m = [4]byte{0xff, 0xff, 0xff, 0xff}
   244		atomic.And8(&m[1], 0x1)
   245		if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
   246			throw("atomicand8")
   247		}
   248	
   249		*(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
   250		if j == j {
   251			throw("float64nan")
   252		}
   253		if !(j != j) {
   254			throw("float64nan1")
   255		}
   256	
   257		*(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
   258		if j == j1 {
   259			throw("float64nan2")
   260		}
   261		if !(j != j1) {
   262			throw("float64nan3")
   263		}
   264	
   265		*(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
   266		if i == i {
   267			throw("float32nan")
   268		}
   269		if i == i {
   270			throw("float32nan1")
   271		}
   272	
   273		*(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
   274		if i == i1 {
   275			throw("float32nan2")
   276		}
   277		if i == i1 {
   278			throw("float32nan3")
   279		}
   280	
   281		testAtomic64()
   282	
   283		if _FixedStack != round2(_FixedStack) {
   284			throw("FixedStack is not power-of-2")
   285		}
   286	
   287		if !checkASM() {
   288			throw("assembly checks failed")
   289		}
   290	}
   291	
   292	type dbgVar struct {
   293		name  string
   294		value *int32
   295	}
   296	
   297	// Holds variables parsed from GODEBUG env var,
   298	// except for "memprofilerate" since there is an
   299	// existing int var for that value, which may
   300	// already have an initial value.
   301	var debug struct {
   302		allocfreetrace     int32
   303		cgocheck           int32
   304		clobberfree        int32
   305		efence             int32
   306		gccheckmark        int32
   307		gcpacertrace       int32
   308		gcshrinkstackoff   int32
   309		gcstoptheworld     int32
   310		gctrace            int32
   311		invalidptr         int32
   312		madvdontneed       int32 // for Linux; issue 28466
   313		sbrk               int32
   314		scavenge           int32
   315		scheddetail        int32
   316		schedtrace         int32
   317		tracebackancestors int32
   318	}
   319	
   320	var dbgvars = []dbgVar{
   321		{"allocfreetrace", &debug.allocfreetrace},
   322		{"clobberfree", &debug.clobberfree},
   323		{"cgocheck", &debug.cgocheck},
   324		{"efence", &debug.efence},
   325		{"gccheckmark", &debug.gccheckmark},
   326		{"gcpacertrace", &debug.gcpacertrace},
   327		{"gcshrinkstackoff", &debug.gcshrinkstackoff},
   328		{"gcstoptheworld", &debug.gcstoptheworld},
   329		{"gctrace", &debug.gctrace},
   330		{"invalidptr", &debug.invalidptr},
   331		{"madvdontneed", &debug.madvdontneed},
   332		{"sbrk", &debug.sbrk},
   333		{"scavenge", &debug.scavenge},
   334		{"scheddetail", &debug.scheddetail},
   335		{"schedtrace", &debug.schedtrace},
   336		{"tracebackancestors", &debug.tracebackancestors},
   337	}
   338	
   339	func parsedebugvars() {
   340		// defaults
   341		debug.cgocheck = 1
   342		debug.invalidptr = 1
   343	
   344		for p := gogetenv("GODEBUG"); p != ""; {
   345			field := ""
   346			i := index(p, ",")
   347			if i < 0 {
   348				field, p = p, ""
   349			} else {
   350				field, p = p[:i], p[i+1:]
   351			}
   352			i = index(field, "=")
   353			if i < 0 {
   354				continue
   355			}
   356			key, value := field[:i], field[i+1:]
   357	
   358			// Update MemProfileRate directly here since it
   359			// is int, not int32, and should only be updated
   360			// if specified in GODEBUG.
   361			if key == "memprofilerate" {
   362				if n, ok := atoi(value); ok {
   363					MemProfileRate = n
   364				}
   365			} else {
   366				for _, v := range dbgvars {
   367					if v.name == key {
   368						if n, ok := atoi32(value); ok {
   369							*v.value = n
   370						}
   371					}
   372				}
   373			}
   374		}
   375	
   376		setTraceback(gogetenv("GOTRACEBACK"))
   377		traceback_env = traceback_cache
   378	}
   379	
   380	//go:linkname setTraceback runtime/debug.SetTraceback
   381	func setTraceback(level string) {
   382		var t uint32
   383		switch level {
   384		case "none":
   385			t = 0
   386		case "single", "":
   387			t = 1 << tracebackShift
   388		case "all":
   389			t = 1<<tracebackShift | tracebackAll
   390		case "system":
   391			t = 2<<tracebackShift | tracebackAll
   392		case "crash":
   393			t = 2<<tracebackShift | tracebackAll | tracebackCrash
   394		default:
   395			t = tracebackAll
   396			if n, ok := atoi(level); ok && n == int(uint32(n)) {
   397				t |= uint32(n) << tracebackShift
   398			}
   399		}
   400		// when C owns the process, simply exit'ing the process on fatal errors
   401		// and panics is surprising. Be louder and abort instead.
   402		if islibrary || isarchive {
   403			t |= tracebackCrash
   404		}
   405	
   406		t |= traceback_env
   407	
   408		atomic.Store(&traceback_cache, t)
   409	}
   410	
   411	// Poor mans 64-bit division.
   412	// This is a very special function, do not use it if you are not sure what you are doing.
   413	// int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
   414	// Handles overflow in a time-specific manner.
   415	// This keeps us within no-split stack limits on 32-bit processors.
   416	//go:nosplit
   417	func timediv(v int64, div int32, rem *int32) int32 {
   418		res := int32(0)
   419		for bit := 30; bit >= 0; bit-- {
   420			if v >= int64(div)<<uint(bit) {
   421				v = v - (int64(div) << uint(bit))
   422				// Before this for loop, res was 0, thus all these
   423				// power of 2 increments are now just bitsets.
   424				res |= 1 << uint(bit)
   425			}
   426		}
   427		if v >= int64(div) {
   428			if rem != nil {
   429				*rem = 0
   430			}
   431			return 0x7fffffff
   432		}
   433		if rem != nil {
   434			*rem = int32(v)
   435		}
   436		return res
   437	}
   438	
   439	// Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
   440	
   441	//go:nosplit
   442	func acquirem() *m {
   443		_g_ := getg()
   444		_g_.m.locks++
   445		return _g_.m
   446	}
   447	
   448	//go:nosplit
   449	func releasem(mp *m) {
   450		_g_ := getg()
   451		mp.locks--
   452		if mp.locks == 0 && _g_.preempt {
   453			// restore the preemption request in case we've cleared it in newstack
   454			_g_.stackguard0 = stackPreempt
   455		}
   456	}
   457	
   458	//go:nosplit
   459	func gomcache() *mcache {
   460		return getg().m.mcache
   461	}
   462	
   463	//go:linkname reflect_typelinks reflect.typelinks
   464	func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
   465		modules := activeModules()
   466		sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
   467		ret := [][]int32{modules[0].typelinks}
   468		for _, md := range modules[1:] {
   469			sections = append(sections, unsafe.Pointer(md.types))
   470			ret = append(ret, md.typelinks)
   471		}
   472		return sections, ret
   473	}
   474	
   475	// reflect_resolveNameOff resolves a name offset from a base pointer.
   476	//go:linkname reflect_resolveNameOff reflect.resolveNameOff
   477	func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
   478		return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
   479	}
   480	
   481	// reflect_resolveTypeOff resolves an *rtype offset from a base type.
   482	//go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
   483	func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   484		return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
   485	}
   486	
   487	// reflect_resolveTextOff resolves an function pointer offset from a base type.
   488	//go:linkname reflect_resolveTextOff reflect.resolveTextOff
   489	func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   490		return (*_type)(rtype).textOff(textOff(off))
   491	
   492	}
   493	
   494	// reflectlite_resolveNameOff resolves a name offset from a base pointer.
   495	//go:linkname reflectlite_resolveNameOff internal/reflectlite.resolveNameOff
   496	func reflectlite_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
   497		return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
   498	}
   499	
   500	// reflectlite_resolveTypeOff resolves an *rtype offset from a base type.
   501	//go:linkname reflectlite_resolveTypeOff internal/reflectlite.resolveTypeOff
   502	func reflectlite_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   503		return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
   504	}
   505	
   506	// reflect_addReflectOff adds a pointer to the reflection offset lookup map.
   507	//go:linkname reflect_addReflectOff reflect.addReflectOff
   508	func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
   509		reflectOffsLock()
   510		if reflectOffs.m == nil {
   511			reflectOffs.m = make(map[int32]unsafe.Pointer)
   512			reflectOffs.minv = make(map[unsafe.Pointer]int32)
   513			reflectOffs.next = -1
   514		}
   515		id, found := reflectOffs.minv[ptr]
   516		if !found {
   517			id = reflectOffs.next
   518			reflectOffs.next-- // use negative offsets as IDs to aid debugging
   519			reflectOffs.m[id] = ptr
   520			reflectOffs.minv[ptr] = id
   521		}
   522		reflectOffsUnlock()
   523		return id
   524	}
   525	

View as plain text