1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "internal/cpu" 9 "runtime/internal/atomic" 10 "runtime/internal/sys" 11 "unsafe" 12 ) 13 14 // defined constants 15 const ( 16 // G status 17 // 18 // Beyond indicating the general state of a G, the G status 19 // acts like a lock on the goroutine's stack (and hence its 20 // ability to execute user code). 21 // 22 // If you add to this list, add to the list 23 // of "okay during garbage collection" status 24 // in mgcmark.go too. 25 // 26 // TODO(austin): The _Gscan bit could be much lighter-weight. 27 // For example, we could choose not to run _Gscanrunnable 28 // goroutines found in the run queue, rather than CAS-looping 29 // until they become _Grunnable. And transitions like 30 // _Gscanwaiting -> _Gscanrunnable are actually okay because 31 // they don't affect stack ownership. 32 33 // _Gidle means this goroutine was just allocated and has not 34 // yet been initialized. 35 _Gidle = iota // 0 36 37 // _Grunnable means this goroutine is on a run queue. It is 38 // not currently executing user code. The stack is not owned. 39 _Grunnable // 1 40 41 // _Grunning means this goroutine may execute user code. The 42 // stack is owned by this goroutine. It is not on a run queue. 43 // It is assigned an M and a P. 44 _Grunning // 2 45 46 // _Gsyscall means this goroutine is executing a system call. 47 // It is not executing user code. The stack is owned by this 48 // goroutine. It is not on a run queue. It is assigned an M. 49 _Gsyscall // 3 50 51 // _Gwaiting means this goroutine is blocked in the runtime. 52 // It is not executing user code. It is not on a run queue, 53 // but should be recorded somewhere (e.g., a channel wait 54 // queue) so it can be ready()d when necessary. The stack is 55 // not owned *except* that a channel operation may read or 56 // write parts of the stack under the appropriate channel 57 // lock. Otherwise, it is not safe to access the stack after a 58 // goroutine enters _Gwaiting (e.g., it may get moved). 59 _Gwaiting // 4 60 61 // _Gmoribund_unused is currently unused, but hardcoded in gdb 62 // scripts. 63 _Gmoribund_unused // 5 64 65 // _Gdead means this goroutine is currently unused. It may be 66 // just exited, on a free list, or just being initialized. It 67 // is not executing user code. It may or may not have a stack 68 // allocated. The G and its stack (if any) are owned by the M 69 // that is exiting the G or that obtained the G from the free 70 // list. 71 _Gdead // 6 72 73 // _Genqueue_unused is currently unused. 74 _Genqueue_unused // 7 75 76 // _Gcopystack means this goroutine's stack is being moved. It 77 // is not executing user code and is not on a run queue. The 78 // stack is owned by the goroutine that put it in _Gcopystack. 79 _Gcopystack // 8 80 81 // _Gscan combined with one of the above states other than 82 // _Grunning indicates that GC is scanning the stack. The 83 // goroutine is not executing user code and the stack is owned 84 // by the goroutine that set the _Gscan bit. 85 // 86 // _Gscanrunning is different: it is used to briefly block 87 // state transitions while GC signals the G to scan its own 88 // stack. This is otherwise like _Grunning. 89 // 90 // atomicstatus&~Gscan gives the state the goroutine will 91 // return to when the scan completes. 92 _Gscan = 0x1000 93 _Gscanrunnable = _Gscan + _Grunnable // 0x1001 94 _Gscanrunning = _Gscan + _Grunning // 0x1002 95 _Gscansyscall = _Gscan + _Gsyscall // 0x1003 96 _Gscanwaiting = _Gscan + _Gwaiting // 0x1004 97 ) 98 99 const ( 100 // P status 101 102 // _Pidle means a P is not being used to run user code or the 103 // scheduler. Typically, it's on the idle P list and available 104 // to the scheduler, but it may just be transitioning between 105 // other states. 106 // 107 // The P is owned by the idle list or by whatever is 108 // transitioning its state. Its run queue is empty. 109 _Pidle = iota 110 111 // _Prunning means a P is owned by an M and is being used to 112 // run user code or the scheduler. Only the M that owns this P 113 // is allowed to change the P's status from _Prunning. The M 114 // may transition the P to _Pidle (if it has no more work to 115 // do), _Psyscall (when entering a syscall), or _Pgcstop (to 116 // halt for the GC). The M may also hand ownership of the P 117 // off directly to another M (e.g., to schedule a locked G). 118 _Prunning 119 120 // _Psyscall means a P is not running user code. It has 121 // affinity to an M in a syscall but is not owned by it and 122 // may be stolen by another M. This is similar to _Pidle but 123 // uses lightweight transitions and maintains M affinity. 124 // 125 // Leaving _Psyscall must be done with a CAS, either to steal 126 // or retake the P. Note that there's an ABA hazard: even if 127 // an M successfully CASes its original P back to _Prunning 128 // after a syscall, it must understand the P may have been 129 // used by another M in the interim. 130 _Psyscall 131 132 // _Pgcstop means a P is halted for STW and owned by the M 133 // that stopped the world. The M that stopped the world 134 // continues to use its P, even in _Pgcstop. Transitioning 135 // from _Prunning to _Pgcstop causes an M to release its P and 136 // park. 137 // 138 // The P retains its run queue and startTheWorld will restart 139 // the scheduler on Ps with non-empty run queues. 140 _Pgcstop 141 142 // _Pdead means a P is no longer used (GOMAXPROCS shrank). We 143 // reuse Ps if GOMAXPROCS increases. A dead P is mostly 144 // stripped of its resources, though a few things remain 145 // (e.g., trace buffers). 146 _Pdead 147 ) 148 149 // Mutual exclusion locks. In the uncontended case, 150 // as fast as spin locks (just a few user-level instructions), 151 // but on the contention path they sleep in the kernel. 152 // A zeroed Mutex is unlocked (no need to initialize each lock). 153 type mutex struct { 154 // Futex-based impl treats it as uint32 key, 155 // while sema-based impl as M* waitm. 156 // Used to be a union, but unions break precise GC. 157 key uintptr 158 } 159 160 // sleep and wakeup on one-time events. 161 // before any calls to notesleep or notewakeup, 162 // must call noteclear to initialize the Note. 163 // then, exactly one thread can call notesleep 164 // and exactly one thread can call notewakeup (once). 165 // once notewakeup has been called, the notesleep 166 // will return. future notesleep will return immediately. 167 // subsequent noteclear must be called only after 168 // previous notesleep has returned, e.g. it's disallowed 169 // to call noteclear straight after notewakeup. 170 // 171 // notetsleep is like notesleep but wakes up after 172 // a given number of nanoseconds even if the event 173 // has not yet happened. if a goroutine uses notetsleep to 174 // wake up early, it must wait to call noteclear until it 175 // can be sure that no other goroutine is calling 176 // notewakeup. 177 // 178 // notesleep/notetsleep are generally called on g0, 179 // notetsleepg is similar to notetsleep but is called on user g. 180 type note struct { 181 // Futex-based impl treats it as uint32 key, 182 // while sema-based impl as M* waitm. 183 // Used to be a union, but unions break precise GC. 184 key uintptr 185 } 186 187 type funcval struct { 188 fn uintptr 189 // variable-size, fn-specific data here 190 } 191 192 type iface struct { 193 tab *itab 194 data unsafe.Pointer 195 } 196 197 type eface struct { 198 _type *_type 199 data unsafe.Pointer 200 } 201 202 func efaceOf(ep *interface{}) *eface { 203 return (*eface)(unsafe.Pointer(ep)) 204 } 205 206 // The guintptr, muintptr, and puintptr are all used to bypass write barriers. 207 // It is particularly important to avoid write barriers when the current P has 208 // been released, because the GC thinks the world is stopped, and an 209 // unexpected write barrier would not be synchronized with the GC, 210 // which can lead to a half-executed write barrier that has marked the object 211 // but not queued it. If the GC skips the object and completes before the 212 // queuing can occur, it will incorrectly free the object. 213 // 214 // We tried using special assignment functions invoked only when not 215 // holding a running P, but then some updates to a particular memory 216 // word went through write barriers and some did not. This breaks the 217 // write barrier shadow checking mode, and it is also scary: better to have 218 // a word that is completely ignored by the GC than to have one for which 219 // only a few updates are ignored. 220 // 221 // Gs and Ps are always reachable via true pointers in the 222 // allgs and allp lists or (during allocation before they reach those lists) 223 // from stack variables. 224 // 225 // Ms are always reachable via true pointers either from allm or 226 // freem. Unlike Gs and Ps we do free Ms, so it's important that 227 // nothing ever hold an muintptr across a safe point. 228 229 // A guintptr holds a goroutine pointer, but typed as a uintptr 230 // to bypass write barriers. It is used in the Gobuf goroutine state 231 // and in scheduling lists that are manipulated without a P. 232 // 233 // The Gobuf.g goroutine pointer is almost always updated by assembly code. 234 // In one of the few places it is updated by Go code - func save - it must be 235 // treated as a uintptr to avoid a write barrier being emitted at a bad time. 236 // Instead of figuring out how to emit the write barriers missing in the 237 // assembly manipulation, we change the type of the field to uintptr, 238 // so that it does not require write barriers at all. 239 // 240 // Goroutine structs are published in the allg list and never freed. 241 // That will keep the goroutine structs from being collected. 242 // There is never a time that Gobuf.g's contain the only references 243 // to a goroutine: the publishing of the goroutine in allg comes first. 244 // Goroutine pointers are also kept in non-GC-visible places like TLS, 245 // so I can't see them ever moving. If we did want to start moving data 246 // in the GC, we'd need to allocate the goroutine structs from an 247 // alternate arena. Using guintptr doesn't make that problem any worse. 248 type guintptr uintptr 249 250 //go:nosplit 251 func (gp guintptr) ptr() *g { return (*g)(unsafe.Pointer(gp)) } 252 253 //go:nosplit 254 func (gp *guintptr) set(g *g) { *gp = guintptr(unsafe.Pointer(g)) } 255 256 //go:nosplit 257 func (gp *guintptr) cas(old, new guintptr) bool { 258 return atomic.Casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new)) 259 } 260 261 // setGNoWB performs *gp = new without a write barrier. 262 // For times when it's impractical to use a guintptr. 263 //go:nosplit 264 //go:nowritebarrier 265 func setGNoWB(gp **g, new *g) { 266 (*guintptr)(unsafe.Pointer(gp)).set(new) 267 } 268 269 type puintptr uintptr 270 271 //go:nosplit 272 func (pp puintptr) ptr() *p { return (*p)(unsafe.Pointer(pp)) } 273 274 //go:nosplit 275 func (pp *puintptr) set(p *p) { *pp = puintptr(unsafe.Pointer(p)) } 276 277 // muintptr is a *m that is not tracked by the garbage collector. 278 // 279 // Because we do free Ms, there are some additional constrains on 280 // muintptrs: 281 // 282 // 1. Never hold an muintptr locally across a safe point. 283 // 284 // 2. Any muintptr in the heap must be owned by the M itself so it can 285 // ensure it is not in use when the last true *m is released. 286 type muintptr uintptr 287 288 //go:nosplit 289 func (mp muintptr) ptr() *m { return (*m)(unsafe.Pointer(mp)) } 290 291 //go:nosplit 292 func (mp *muintptr) set(m *m) { *mp = muintptr(unsafe.Pointer(m)) } 293 294 // setMNoWB performs *mp = new without a write barrier. 295 // For times when it's impractical to use an muintptr. 296 //go:nosplit 297 //go:nowritebarrier 298 func setMNoWB(mp **m, new *m) { 299 (*muintptr)(unsafe.Pointer(mp)).set(new) 300 } 301 302 type gobuf struct { 303 // The offsets of sp, pc, and g are known to (hard-coded in) libmach. 304 // 305 // ctxt is unusual with respect to GC: it may be a 306 // heap-allocated funcval, so GC needs to track it, but it 307 // needs to be set and cleared from assembly, where it's 308 // difficult to have write barriers. However, ctxt is really a 309 // saved, live register, and we only ever exchange it between 310 // the real register and the gobuf. Hence, we treat it as a 311 // root during stack scanning, which means assembly that saves 312 // and restores it doesn't need write barriers. It's still 313 // typed as a pointer so that any other writes from Go get 314 // write barriers. 315 sp uintptr 316 pc uintptr 317 g guintptr 318 ctxt unsafe.Pointer 319 ret sys.Uintreg 320 lr uintptr 321 bp uintptr // for GOEXPERIMENT=framepointer 322 } 323 324 // sudog represents a g in a wait list, such as for sending/receiving 325 // on a channel. 326 // 327 // sudog is necessary because the g ↔ synchronization object relation 328 // is many-to-many. A g can be on many wait lists, so there may be 329 // many sudogs for one g; and many gs may be waiting on the same 330 // synchronization object, so there may be many sudogs for one object. 331 // 332 // sudogs are allocated from a special pool. Use acquireSudog and 333 // releaseSudog to allocate and free them. 334 type sudog struct { 335 // The following fields are protected by the hchan.lock of the 336 // channel this sudog is blocking on. shrinkstack depends on 337 // this for sudogs involved in channel ops. 338 339 g *g 340 341 // isSelect indicates g is participating in a select, so 342 // g.selectDone must be CAS'd to win the wake-up race. 343 isSelect bool 344 next *sudog 345 prev *sudog 346 elem unsafe.Pointer // data element (may point to stack) 347 348 // The following fields are never accessed concurrently. 349 // For channels, waitlink is only accessed by g. 350 // For semaphores, all fields (including the ones above) 351 // are only accessed when holding a semaRoot lock. 352 353 acquiretime int64 354 releasetime int64 355 ticket uint32 356 parent *sudog // semaRoot binary tree 357 waitlink *sudog // g.waiting list or semaRoot 358 waittail *sudog // semaRoot 359 c *hchan // channel 360 } 361 362 type libcall struct { 363 fn uintptr 364 n uintptr // number of parameters 365 args uintptr // parameters 366 r1 uintptr // return values 367 r2 uintptr 368 err uintptr // error number 369 } 370 371 // describes how to handle callback 372 type wincallbackcontext struct { 373 gobody unsafe.Pointer // go function to call 374 argsize uintptr // callback arguments size (in bytes) 375 restorestack uintptr // adjust stack on return by (in bytes) (386 only) 376 cleanstack bool 377 } 378 379 // Stack describes a Go execution stack. 380 // The bounds of the stack are exactly [lo, hi), 381 // with no implicit data structures on either side. 382 type stack struct { 383 lo uintptr 384 hi uintptr 385 } 386 387 type g struct { 388 // Stack parameters. 389 // stack describes the actual stack memory: [stack.lo, stack.hi). 390 // stackguard0 is the stack pointer compared in the Go stack growth prologue. 391 // It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption. 392 // stackguard1 is the stack pointer compared in the C stack growth prologue. 393 // It is stack.lo+StackGuard on g0 and gsignal stacks. 394 // It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash). 395 stack stack // offset known to runtime/cgo 396 stackguard0 uintptr // offset known to liblink 397 stackguard1 uintptr // offset known to liblink 398 399 _panic *_panic // innermost panic - offset known to liblink 400 _defer *_defer // innermost defer 401 m *m // current m; offset known to arm liblink 402 sched gobuf 403 syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc 404 syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc 405 stktopsp uintptr // expected sp at top of stack, to check in traceback 406 param unsafe.Pointer // passed parameter on wakeup 407 atomicstatus uint32 408 stackLock uint32 // sigprof/scang lock; TODO: fold in to atomicstatus 409 goid int64 410 schedlink guintptr 411 waitsince int64 // approx time when the g become blocked 412 waitreason waitReason // if status==Gwaiting 413 preempt bool // preemption signal, duplicates stackguard0 = stackpreempt 414 paniconfault bool // panic (instead of crash) on unexpected fault address 415 preemptscan bool // preempted g does scan for gc 416 gcscandone bool // g has scanned stack; protected by _Gscan bit in status 417 gcscanvalid bool // false at start of gc cycle, true if G has not run since last scan; TODO: remove? 418 throwsplit bool // must not split stack 419 raceignore int8 // ignore race detection events 420 sysblocktraced bool // StartTrace has emitted EvGoInSyscall about this goroutine 421 sysexitticks int64 // cputicks when syscall has returned (for tracing) 422 traceseq uint64 // trace event sequencer 423 tracelastp puintptr // last P emitted an event for this goroutine 424 lockedm muintptr 425 sig uint32 426 writebuf []byte 427 sigcode0 uintptr 428 sigcode1 uintptr 429 sigpc uintptr 430 gopc uintptr // pc of go statement that created this goroutine 431 ancestors *[]ancestorInfo // ancestor information goroutine(s) that created this goroutine (only used if debug.tracebackancestors) 432 startpc uintptr // pc of goroutine function 433 racectx uintptr 434 waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order 435 cgoCtxt []uintptr // cgo traceback context 436 labels unsafe.Pointer // profiler labels 437 timer *timer // cached timer for time.Sleep 438 selectDone uint32 // are we participating in a select and did someone win the race? 439 440 // Per-G GC state 441 442 // gcAssistBytes is this G's GC assist credit in terms of 443 // bytes allocated. If this is positive, then the G has credit 444 // to allocate gcAssistBytes bytes without assisting. If this 445 // is negative, then the G must correct this by performing 446 // scan work. We track this in bytes to make it fast to update 447 // and check for debt in the malloc hot path. The assist ratio 448 // determines how this corresponds to scan work debt. 449 gcAssistBytes int64 450 } 451 452 type m struct { 453 g0 *g // goroutine with scheduling stack 454 morebuf gobuf // gobuf arg to morestack 455 divmod uint32 // div/mod denominator for arm - known to liblink 456 457 // Fields not known to debuggers. 458 procid uint64 // for debuggers, but offset not hard-coded 459 gsignal *g // signal-handling g 460 goSigStack gsignalStack // Go-allocated signal handling stack 461 sigmask sigset // storage for saved signal mask 462 tls [6]uintptr // thread-local storage (for x86 extern register) 463 mstartfn func() 464 curg *g // current running goroutine 465 caughtsig guintptr // goroutine running during fatal signal 466 p puintptr // attached p for executing go code (nil if not executing go code) 467 nextp puintptr 468 oldp puintptr // the p that was attached before executing a syscall 469 id int64 470 mallocing int32 471 throwing int32 472 preemptoff string // if != "", keep curg running on this m 473 locks int32 474 dying int32 475 profilehz int32 476 spinning bool // m is out of work and is actively looking for work 477 blocked bool // m is blocked on a note 478 newSigstack bool // minit on C thread called sigaltstack 479 printlock int8 480 incgo bool // m is executing a cgo call 481 freeWait uint32 // if == 0, safe to free g0 and delete m (atomic) 482 fastrand [2]uint32 483 needextram bool 484 traceback uint8 485 ncgocall uint64 // number of cgo calls in total 486 ncgo int32 // number of cgo calls currently in progress 487 cgoCallersUse uint32 // if non-zero, cgoCallers in use temporarily 488 cgoCallers *cgoCallers // cgo traceback if crashing in cgo call 489 park note 490 alllink *m // on allm 491 schedlink muintptr 492 mcache *mcache 493 lockedg guintptr 494 createstack [32]uintptr // stack that created this thread. 495 lockedExt uint32 // tracking for external LockOSThread 496 lockedInt uint32 // tracking for internal lockOSThread 497 nextwaitm muintptr // next m waiting for lock 498 waitunlockf func(*g, unsafe.Pointer) bool 499 waitlock unsafe.Pointer 500 waittraceev byte 501 waittraceskip int 502 startingtrace bool 503 syscalltick uint32 504 thread uintptr // thread handle 505 freelink *m // on sched.freem 506 507 // these are here because they are too large to be on the stack 508 // of low-level NOSPLIT functions. 509 libcall libcall 510 libcallpc uintptr // for cpu profiler 511 libcallsp uintptr 512 libcallg guintptr 513 syscall libcall // stores syscall parameters on windows 514 515 vdsoSP uintptr // SP for traceback while in VDSO call (0 if not in call) 516 vdsoPC uintptr // PC for traceback while in VDSO call 517 518 dlogPerM 519 520 mOS 521 } 522 523 type p struct { 524 id int32 525 status uint32 // one of pidle/prunning/... 526 link puintptr 527 schedtick uint32 // incremented on every scheduler call 528 syscalltick uint32 // incremented on every system call 529 sysmontick sysmontick // last tick observed by sysmon 530 m muintptr // back-link to associated m (nil if idle) 531 mcache *mcache 532 raceprocctx uintptr 533 534 deferpool [5][]*_defer // pool of available defer structs of different sizes (see panic.go) 535 deferpoolbuf [5][32]*_defer 536 537 // Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen. 538 goidcache uint64 539 goidcacheend uint64 540 541 // Queue of runnable goroutines. Accessed without lock. 542 runqhead uint32 543 runqtail uint32 544 runq [256]guintptr 545 // runnext, if non-nil, is a runnable G that was ready'd by 546 // the current G and should be run next instead of what's in 547 // runq if there's time remaining in the running G's time 548 // slice. It will inherit the time left in the current time 549 // slice. If a set of goroutines is locked in a 550 // communicate-and-wait pattern, this schedules that set as a 551 // unit and eliminates the (potentially large) scheduling 552 // latency that otherwise arises from adding the ready'd 553 // goroutines to the end of the run queue. 554 runnext guintptr 555 556 // Available G's (status == Gdead) 557 gFree struct { 558 gList 559 n int32 560 } 561 562 sudogcache []*sudog 563 sudogbuf [128]*sudog 564 565 tracebuf traceBufPtr 566 567 // traceSweep indicates the sweep events should be traced. 568 // This is used to defer the sweep start event until a span 569 // has actually been swept. 570 traceSweep bool 571 // traceSwept and traceReclaimed track the number of bytes 572 // swept and reclaimed by sweeping in the current sweep loop. 573 traceSwept, traceReclaimed uintptr 574 575 palloc persistentAlloc // per-P to avoid mutex 576 577 _ uint32 // Alignment for atomic fields below 578 579 // Per-P GC state 580 gcAssistTime int64 // Nanoseconds in assistAlloc 581 gcFractionalMarkTime int64 // Nanoseconds in fractional mark worker (atomic) 582 gcBgMarkWorker guintptr // (atomic) 583 gcMarkWorkerMode gcMarkWorkerMode 584 585 // gcMarkWorkerStartTime is the nanotime() at which this mark 586 // worker started. 587 gcMarkWorkerStartTime int64 588 589 // gcw is this P's GC work buffer cache. The work buffer is 590 // filled by write barriers, drained by mutator assists, and 591 // disposed on certain GC state transitions. 592 gcw gcWork 593 594 // wbBuf is this P's GC write barrier buffer. 595 // 596 // TODO: Consider caching this in the running G. 597 wbBuf wbBuf 598 599 runSafePointFn uint32 // if 1, run sched.safePointFn at next safe point 600 601 pad cpu.CacheLinePad 602 } 603 604 type schedt struct { 605 // accessed atomically. keep at top to ensure alignment on 32-bit systems. 606 goidgen uint64 607 lastpoll uint64 608 609 lock mutex 610 611 // When increasing nmidle, nmidlelocked, nmsys, or nmfreed, be 612 // sure to call checkdead(). 613 614 midle muintptr // idle m's waiting for work 615 nmidle int32 // number of idle m's waiting for work 616 nmidlelocked int32 // number of locked m's waiting for work 617 mnext int64 // number of m's that have been created and next M ID 618 maxmcount int32 // maximum number of m's allowed (or die) 619 nmsys int32 // number of system m's not counted for deadlock 620 nmfreed int64 // cumulative number of freed m's 621 622 ngsys uint32 // number of system goroutines; updated atomically 623 624 pidle puintptr // idle p's 625 npidle uint32 626 nmspinning uint32 // See "Worker thread parking/unparking" comment in proc.go. 627 628 // Global runnable queue. 629 runq gQueue 630 runqsize int32 631 632 // disable controls selective disabling of the scheduler. 633 // 634 // Use schedEnableUser to control this. 635 // 636 // disable is protected by sched.lock. 637 disable struct { 638 // user disables scheduling of user goroutines. 639 user bool 640 runnable gQueue // pending runnable Gs 641 n int32 // length of runnable 642 } 643 644 // Global cache of dead G's. 645 gFree struct { 646 lock mutex 647 stack gList // Gs with stacks 648 noStack gList // Gs without stacks 649 n int32 650 } 651 652 // Central cache of sudog structs. 653 sudoglock mutex 654 sudogcache *sudog 655 656 // Central pool of available defer structs of different sizes. 657 deferlock mutex 658 deferpool [5]*_defer 659 660 // freem is the list of m's waiting to be freed when their 661 // m.exited is set. Linked through m.freelink. 662 freem *m 663 664 gcwaiting uint32 // gc is waiting to run 665 stopwait int32 666 stopnote note 667 sysmonwait uint32 668 sysmonnote note 669 670 // safepointFn should be called on each P at the next GC 671 // safepoint if p.runSafePointFn is set. 672 safePointFn func(*p) 673 safePointWait int32 674 safePointNote note 675 676 profilehz int32 // cpu profiling rate 677 678 procresizetime int64 // nanotime() of last change to gomaxprocs 679 totaltime int64 // ∫gomaxprocs dt up to procresizetime 680 } 681 682 // Values for the flags field of a sigTabT. 683 const ( 684 _SigNotify = 1 << iota // let signal.Notify have signal, even if from kernel 685 _SigKill // if signal.Notify doesn't take it, exit quietly 686 _SigThrow // if signal.Notify doesn't take it, exit loudly 687 _SigPanic // if the signal is from the kernel, panic 688 _SigDefault // if the signal isn't explicitly requested, don't monitor it 689 _SigGoExit // cause all runtime procs to exit (only used on Plan 9). 690 _SigSetStack // add SA_ONSTACK to libc handler 691 _SigUnblock // always unblock; see blockableSig 692 _SigIgn // _SIG_DFL action is to ignore the signal 693 ) 694 695 // Layout of in-memory per-function information prepared by linker 696 // See https://golang.org/s/go12symtab. 697 // Keep in sync with linker (../cmd/link/internal/ld/pcln.go:/pclntab) 698 // and with package debug/gosym and with symtab.go in package runtime. 699 type _func struct { 700 entry uintptr // start pc 701 nameoff int32 // function name 702 703 args int32 // in/out args size 704 deferreturn uint32 // offset of a deferreturn block from entry, if any. 705 706 pcsp int32 707 pcfile int32 708 pcln int32 709 npcdata int32 710 funcID funcID // set for certain special runtime functions 711 _ [2]int8 // unused 712 nfuncdata uint8 // must be last 713 } 714 715 // Pseudo-Func that is returned for PCs that occur in inlined code. 716 // A *Func can be either a *_func or a *funcinl, and they are distinguished 717 // by the first uintptr. 718 type funcinl struct { 719 zero uintptr // set to 0 to distinguish from _func 720 entry uintptr // entry of the real (the "outermost") frame. 721 name string 722 file string 723 line int 724 } 725 726 // layout of Itab known to compilers 727 // allocated in non-garbage-collected memory 728 // Needs to be in sync with 729 // ../cmd/compile/internal/gc/reflect.go:/^func.dumptypestructs. 730 type itab struct { 731 inter *interfacetype 732 _type *_type 733 hash uint32 // copy of _type.hash. Used for type switches. 734 _ [4]byte 735 fun [1]uintptr // variable sized. fun[0]==0 means _type does not implement inter. 736 } 737 738 // Lock-free stack node. 739 // Also known to export_test.go. 740 type lfnode struct { 741 next uint64 742 pushcnt uintptr 743 } 744 745 type forcegcstate struct { 746 lock mutex 747 g *g 748 idle uint32 749 } 750 751 // startup_random_data holds random bytes initialized at startup. These come from 752 // the ELF AT_RANDOM auxiliary vector (vdso_linux_amd64.go or os_linux_386.go). 753 var startupRandomData []byte 754 755 // extendRandom extends the random numbers in r[:n] to the whole slice r. 756 // Treats n<0 as n==0. 757 func extendRandom(r []byte, n int) { 758 if n < 0 { 759 n = 0 760 } 761 for n < len(r) { 762 // Extend random bits using hash function & time seed 763 w := n 764 if w > 16 { 765 w = 16 766 } 767 h := memhash(unsafe.Pointer(&r[n-w]), uintptr(nanotime()), uintptr(w)) 768 for i := 0; i < sys.PtrSize && n < len(r); i++ { 769 r[n] = byte(h) 770 n++ 771 h >>= 8 772 } 773 } 774 } 775 776 // A _defer holds an entry on the list of deferred calls. 777 // If you add a field here, add code to clear it in freedefer. 778 // This struct must match the code in cmd/compile/internal/gc/reflect.go:deferstruct 779 // and cmd/compile/internal/gc/ssa.go:(*state).call. 780 // Some defers will be allocated on the stack and some on the heap. 781 // All defers are logically part of the stack, so write barriers to 782 // initialize them are not required. All defers must be manually scanned, 783 // and for heap defers, marked. 784 type _defer struct { 785 siz int32 // includes both arguments and results 786 started bool 787 heap bool 788 sp uintptr // sp at time of defer 789 pc uintptr 790 fn *funcval 791 _panic *_panic // panic that is running defer 792 link *_defer 793 } 794 795 // A _panic holds information about an active panic. 796 // 797 // This is marked go:notinheap because _panic values must only ever 798 // live on the stack. 799 // 800 // The argp and link fields are stack pointers, but don't need special 801 // handling during stack growth: because they are pointer-typed and 802 // _panic values only live on the stack, regular stack pointer 803 // adjustment takes care of them. 804 // 805 //go:notinheap 806 type _panic struct { 807 argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink 808 arg interface{} // argument to panic 809 link *_panic // link to earlier panic 810 recovered bool // whether this panic is over 811 aborted bool // the panic was aborted 812 } 813 814 // stack traces 815 type stkframe struct { 816 fn funcInfo // function being run 817 pc uintptr // program counter within fn 818 continpc uintptr // program counter where execution can continue, or 0 if not 819 lr uintptr // program counter at caller aka link register 820 sp uintptr // stack pointer at pc 821 fp uintptr // stack pointer at caller aka frame pointer 822 varp uintptr // top of local variables 823 argp uintptr // pointer to function arguments 824 arglen uintptr // number of bytes at argp 825 argmap *bitvector // force use of this argmap 826 } 827 828 // ancestorInfo records details of where a goroutine was started. 829 type ancestorInfo struct { 830 pcs []uintptr // pcs from the stack of this goroutine 831 goid int64 // goroutine id of this goroutine; original goroutine possibly dead 832 gopc uintptr // pc of go statement that created this goroutine 833 } 834 835 const ( 836 _TraceRuntimeFrames = 1 << iota // include frames for internal runtime functions. 837 _TraceTrap // the initial PC, SP are from a trap, not a return PC from a call 838 _TraceJumpStack // if traceback is on a systemstack, resume trace at g that called into it 839 ) 840 841 // The maximum number of frames we print for a traceback 842 const _TracebackMaxFrames = 100 843 844 // A waitReason explains why a goroutine has been stopped. 845 // See gopark. Do not re-use waitReasons, add new ones. 846 type waitReason uint8 847 848 const ( 849 waitReasonZero waitReason = iota // "" 850 waitReasonGCAssistMarking // "GC assist marking" 851 waitReasonIOWait // "IO wait" 852 waitReasonChanReceiveNilChan // "chan receive (nil chan)" 853 waitReasonChanSendNilChan // "chan send (nil chan)" 854 waitReasonDumpingHeap // "dumping heap" 855 waitReasonGarbageCollection // "garbage collection" 856 waitReasonGarbageCollectionScan // "garbage collection scan" 857 waitReasonPanicWait // "panicwait" 858 waitReasonSelect // "select" 859 waitReasonSelectNoCases // "select (no cases)" 860 waitReasonGCAssistWait // "GC assist wait" 861 waitReasonGCSweepWait // "GC sweep wait" 862 waitReasonGCScavengeWait // "GC scavenge wait" 863 waitReasonChanReceive // "chan receive" 864 waitReasonChanSend // "chan send" 865 waitReasonFinalizerWait // "finalizer wait" 866 waitReasonForceGGIdle // "force gc (idle)" 867 waitReasonSemacquire // "semacquire" 868 waitReasonSleep // "sleep" 869 waitReasonSyncCondWait // "sync.Cond.Wait" 870 waitReasonTimerGoroutineIdle // "timer goroutine (idle)" 871 waitReasonTraceReaderBlocked // "trace reader (blocked)" 872 waitReasonWaitForGCCycle // "wait for GC cycle" 873 waitReasonGCWorkerIdle // "GC worker (idle)" 874 ) 875 876 var waitReasonStrings = [...]string{ 877 waitReasonZero: "", 878 waitReasonGCAssistMarking: "GC assist marking", 879 waitReasonIOWait: "IO wait", 880 waitReasonChanReceiveNilChan: "chan receive (nil chan)", 881 waitReasonChanSendNilChan: "chan send (nil chan)", 882 waitReasonDumpingHeap: "dumping heap", 883 waitReasonGarbageCollection: "garbage collection", 884 waitReasonGarbageCollectionScan: "garbage collection scan", 885 waitReasonPanicWait: "panicwait", 886 waitReasonSelect: "select", 887 waitReasonSelectNoCases: "select (no cases)", 888 waitReasonGCAssistWait: "GC assist wait", 889 waitReasonGCSweepWait: "GC sweep wait", 890 waitReasonGCScavengeWait: "GC scavenge wait", 891 waitReasonChanReceive: "chan receive", 892 waitReasonChanSend: "chan send", 893 waitReasonFinalizerWait: "finalizer wait", 894 waitReasonForceGGIdle: "force gc (idle)", 895 waitReasonSemacquire: "semacquire", 896 waitReasonSleep: "sleep", 897 waitReasonSyncCondWait: "sync.Cond.Wait", 898 waitReasonTimerGoroutineIdle: "timer goroutine (idle)", 899 waitReasonTraceReaderBlocked: "trace reader (blocked)", 900 waitReasonWaitForGCCycle: "wait for GC cycle", 901 waitReasonGCWorkerIdle: "GC worker (idle)", 902 } 903 904 func (w waitReason) String() string { 905 if w < 0 || w >= waitReason(len(waitReasonStrings)) { 906 return "unknown wait reason" 907 } 908 return waitReasonStrings[w] 909 } 910 911 var ( 912 allglen uintptr 913 allm *m 914 allp []*p // len(allp) == gomaxprocs; may change at safe points, otherwise immutable 915 allpLock mutex // Protects P-less reads of allp and all writes 916 gomaxprocs int32 917 ncpu int32 918 forcegc forcegcstate 919 sched schedt 920 newprocs int32 921 922 // Information about what cpu features are available. 923 // Packages outside the runtime should not use these 924 // as they are not an external api. 925 // Set on startup in asm_{386,amd64,amd64p32}.s 926 processorVersionInfo uint32 927 isIntel bool 928 lfenceBeforeRdtsc bool 929 930 goarm uint8 // set by cmd/link on arm systems 931 framepointer_enabled bool // set by cmd/link 932 ) 933 934 // Set by the linker so the runtime can determine the buildmode. 935 var ( 936 islibrary bool // -buildmode=c-shared 937 isarchive bool // -buildmode=c-archive 938 ) 939