Source file src/runtime/proc.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/cpu"
9 "runtime/internal/atomic"
10 "runtime/internal/sys"
11 "unsafe"
12 )
13
14 var buildVersion = sys.TheVersion
15
16
17 var modinfo string
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82 var (
83 m0 m
84 g0 g
85 raceprocctx0 uintptr
86 )
87
88
89 var runtime_inittask initTask
90
91
92 var main_inittask initTask
93
94
95
96
97
98 var main_init_done chan bool
99
100
101 func main_main()
102
103
104 var mainStarted bool
105
106
107 var runtimeInitTime int64
108
109
110 var initSigmask sigset
111
112
113 func main() {
114 g := getg()
115
116
117
118 g.m.g0.racectx = 0
119
120
121
122
123 if sys.PtrSize == 8 {
124 maxstacksize = 1000000000
125 } else {
126 maxstacksize = 250000000
127 }
128
129
130 mainStarted = true
131
132 if GOARCH != "wasm" {
133 systemstack(func() {
134 newm(sysmon, nil)
135 })
136 }
137
138
139
140
141
142
143
144 lockOSThread()
145
146 if g.m != &m0 {
147 throw("runtime.main not on m0")
148 }
149
150 doInit(&runtime_inittask)
151 if nanotime() == 0 {
152 throw("nanotime returning zero")
153 }
154
155
156 needUnlock := true
157 defer func() {
158 if needUnlock {
159 unlockOSThread()
160 }
161 }()
162
163
164 runtimeInitTime = nanotime()
165
166 gcenable()
167
168 main_init_done = make(chan bool)
169 if iscgo {
170 if _cgo_thread_start == nil {
171 throw("_cgo_thread_start missing")
172 }
173 if GOOS != "windows" {
174 if _cgo_setenv == nil {
175 throw("_cgo_setenv missing")
176 }
177 if _cgo_unsetenv == nil {
178 throw("_cgo_unsetenv missing")
179 }
180 }
181 if _cgo_notify_runtime_init_done == nil {
182 throw("_cgo_notify_runtime_init_done missing")
183 }
184
185
186 startTemplateThread()
187 cgocall(_cgo_notify_runtime_init_done, nil)
188 }
189
190 doInit(&main_inittask)
191
192 close(main_init_done)
193
194 needUnlock = false
195 unlockOSThread()
196
197 if isarchive || islibrary {
198
199
200 return
201 }
202 fn := main_main
203 fn()
204 if raceenabled {
205 racefini()
206 }
207
208
209
210
211
212 if atomic.Load(&runningPanicDefers) != 0 {
213
214 for c := 0; c < 1000; c++ {
215 if atomic.Load(&runningPanicDefers) == 0 {
216 break
217 }
218 Gosched()
219 }
220 }
221 if atomic.Load(&panicking) != 0 {
222 gopark(nil, nil, waitReasonPanicWait, traceEvGoStop, 1)
223 }
224
225 exit(0)
226 for {
227 var x *int32
228 *x = 0
229 }
230 }
231
232
233
234 func os_beforeExit() {
235 if raceenabled {
236 racefini()
237 }
238 }
239
240
241 func init() {
242 go forcegchelper()
243 }
244
245 func forcegchelper() {
246 forcegc.g = getg()
247 for {
248 lock(&forcegc.lock)
249 if forcegc.idle != 0 {
250 throw("forcegc: phase error")
251 }
252 atomic.Store(&forcegc.idle, 1)
253 goparkunlock(&forcegc.lock, waitReasonForceGGIdle, traceEvGoBlock, 1)
254
255 if debug.gctrace > 0 {
256 println("GC forced")
257 }
258
259 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
260 }
261 }
262
263
264
265
266
267 func Gosched() {
268 checkTimeouts()
269 mcall(gosched_m)
270 }
271
272
273
274
275 func goschedguarded() {
276 mcall(goschedguarded_m)
277 }
278
279
280
281
282
283
284
285
286
287 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceEv byte, traceskip int) {
288 if reason != waitReasonSleep {
289 checkTimeouts()
290 }
291 mp := acquirem()
292 gp := mp.curg
293 status := readgstatus(gp)
294 if status != _Grunning && status != _Gscanrunning {
295 throw("gopark: bad g status")
296 }
297 mp.waitlock = lock
298 mp.waitunlockf = unlockf
299 gp.waitreason = reason
300 mp.waittraceev = traceEv
301 mp.waittraceskip = traceskip
302 releasem(mp)
303
304 mcall(park_m)
305 }
306
307
308
309 func goparkunlock(lock *mutex, reason waitReason, traceEv byte, traceskip int) {
310 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip)
311 }
312
313 func goready(gp *g, traceskip int) {
314 systemstack(func() {
315 ready(gp, traceskip, true)
316 })
317 }
318
319
320 func acquireSudog() *sudog {
321
322
323
324
325
326
327
328
329 mp := acquirem()
330 pp := mp.p.ptr()
331 if len(pp.sudogcache) == 0 {
332 lock(&sched.sudoglock)
333
334 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
335 s := sched.sudogcache
336 sched.sudogcache = s.next
337 s.next = nil
338 pp.sudogcache = append(pp.sudogcache, s)
339 }
340 unlock(&sched.sudoglock)
341
342 if len(pp.sudogcache) == 0 {
343 pp.sudogcache = append(pp.sudogcache, new(sudog))
344 }
345 }
346 n := len(pp.sudogcache)
347 s := pp.sudogcache[n-1]
348 pp.sudogcache[n-1] = nil
349 pp.sudogcache = pp.sudogcache[:n-1]
350 if s.elem != nil {
351 throw("acquireSudog: found s.elem != nil in cache")
352 }
353 releasem(mp)
354 return s
355 }
356
357
358 func releaseSudog(s *sudog) {
359 if s.elem != nil {
360 throw("runtime: sudog with non-nil elem")
361 }
362 if s.isSelect {
363 throw("runtime: sudog with non-false isSelect")
364 }
365 if s.next != nil {
366 throw("runtime: sudog with non-nil next")
367 }
368 if s.prev != nil {
369 throw("runtime: sudog with non-nil prev")
370 }
371 if s.waitlink != nil {
372 throw("runtime: sudog with non-nil waitlink")
373 }
374 if s.c != nil {
375 throw("runtime: sudog with non-nil c")
376 }
377 gp := getg()
378 if gp.param != nil {
379 throw("runtime: releaseSudog with non-nil gp.param")
380 }
381 mp := acquirem()
382 pp := mp.p.ptr()
383 if len(pp.sudogcache) == cap(pp.sudogcache) {
384
385 var first, last *sudog
386 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
387 n := len(pp.sudogcache)
388 p := pp.sudogcache[n-1]
389 pp.sudogcache[n-1] = nil
390 pp.sudogcache = pp.sudogcache[:n-1]
391 if first == nil {
392 first = p
393 } else {
394 last.next = p
395 }
396 last = p
397 }
398 lock(&sched.sudoglock)
399 last.next = sched.sudogcache
400 sched.sudogcache = first
401 unlock(&sched.sudoglock)
402 }
403 pp.sudogcache = append(pp.sudogcache, s)
404 releasem(mp)
405 }
406
407
408
409
410
411
412
413
414
415 func funcPC(f interface{}) uintptr {
416 return **(**uintptr)(add(unsafe.Pointer(&f), sys.PtrSize))
417 }
418
419
420 func badmcall(fn func(*g)) {
421 throw("runtime: mcall called on m->g0 stack")
422 }
423
424 func badmcall2(fn func(*g)) {
425 throw("runtime: mcall function returned")
426 }
427
428 func badreflectcall() {
429 panic(plainError("arg size to reflect.call more than 1GB"))
430 }
431
432 var badmorestackg0Msg = "fatal: morestack on g0\n"
433
434
435
436 func badmorestackg0() {
437 sp := stringStructOf(&badmorestackg0Msg)
438 write(2, sp.str, int32(sp.len))
439 }
440
441 var badmorestackgsignalMsg = "fatal: morestack on gsignal\n"
442
443
444
445 func badmorestackgsignal() {
446 sp := stringStructOf(&badmorestackgsignalMsg)
447 write(2, sp.str, int32(sp.len))
448 }
449
450
451 func badctxt() {
452 throw("ctxt != 0")
453 }
454
455 func lockedOSThread() bool {
456 gp := getg()
457 return gp.lockedm != 0 && gp.m.lockedg != 0
458 }
459
460 var (
461 allgs []*g
462 allglock mutex
463 )
464
465 func allgadd(gp *g) {
466 if readgstatus(gp) == _Gidle {
467 throw("allgadd: bad status Gidle")
468 }
469
470 lock(&allglock)
471 allgs = append(allgs, gp)
472 allglen = uintptr(len(allgs))
473 unlock(&allglock)
474 }
475
476 const (
477
478
479 _GoidCacheBatch = 16
480 )
481
482
483
484 func cpuinit() {
485 const prefix = "GODEBUG="
486 var env string
487
488 switch GOOS {
489 case "aix", "darwin", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
490 cpu.DebugOptions = true
491
492
493
494
495 n := int32(0)
496 for argv_index(argv, argc+1+n) != nil {
497 n++
498 }
499
500 for i := int32(0); i < n; i++ {
501 p := argv_index(argv, argc+1+i)
502 s := *(*string)(unsafe.Pointer(&stringStruct{unsafe.Pointer(p), findnull(p)}))
503
504 if hasPrefix(s, prefix) {
505 env = gostring(p)[len(prefix):]
506 break
507 }
508 }
509 }
510
511 cpu.Initialize(env)
512
513
514
515 x86HasPOPCNT = cpu.X86.HasPOPCNT
516 x86HasSSE41 = cpu.X86.HasSSE41
517
518 arm64HasATOMICS = cpu.ARM64.HasATOMICS
519 }
520
521
522
523
524
525
526
527
528
529 func schedinit() {
530
531
532 _g_ := getg()
533 if raceenabled {
534 _g_.racectx, raceprocctx0 = raceinit()
535 }
536
537 sched.maxmcount = 10000
538
539 tracebackinit()
540 moduledataverify()
541 stackinit()
542 mallocinit()
543 mcommoninit(_g_.m)
544 cpuinit()
545 alginit()
546 modulesinit()
547 typelinksinit()
548 itabsinit()
549
550 msigsave(_g_.m)
551 initSigmask = _g_.m.sigmask
552
553 goargs()
554 goenvs()
555 parsedebugvars()
556 gcinit()
557
558 sched.lastpoll = uint64(nanotime())
559 procs := ncpu
560 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
561 procs = n
562 }
563 if procresize(procs) != nil {
564 throw("unknown runnable goroutine during bootstrap")
565 }
566
567
568
569
570 if debug.cgocheck > 1 {
571 writeBarrier.cgo = true
572 writeBarrier.enabled = true
573 for _, p := range allp {
574 p.wbBuf.reset()
575 }
576 }
577
578 if buildVersion == "" {
579
580
581 buildVersion = "unknown"
582 }
583 if len(modinfo) == 1 {
584
585
586 modinfo = ""
587 }
588 }
589
590 func dumpgstatus(gp *g) {
591 _g_ := getg()
592 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
593 print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n")
594 }
595
596 func checkmcount() {
597
598 if mcount() > sched.maxmcount {
599 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
600 throw("thread exhaustion")
601 }
602 }
603
604 func mcommoninit(mp *m) {
605 _g_ := getg()
606
607
608 if _g_ != _g_.m.g0 {
609 callers(1, mp.createstack[:])
610 }
611
612 lock(&sched.lock)
613 if sched.mnext+1 < sched.mnext {
614 throw("runtime: thread ID overflow")
615 }
616 mp.id = sched.mnext
617 sched.mnext++
618 checkmcount()
619
620 mp.fastrand[0] = 1597334677 * uint32(mp.id)
621 mp.fastrand[1] = uint32(cputicks())
622 if mp.fastrand[0]|mp.fastrand[1] == 0 {
623 mp.fastrand[1] = 1
624 }
625
626 mpreinit(mp)
627 if mp.gsignal != nil {
628 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard
629 }
630
631
632
633 mp.alllink = allm
634
635
636
637 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
638 unlock(&sched.lock)
639
640
641 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
642 mp.cgoCallers = new(cgoCallers)
643 }
644 }
645
646
647 func ready(gp *g, traceskip int, next bool) {
648 if trace.enabled {
649 traceGoUnpark(gp, traceskip)
650 }
651
652 status := readgstatus(gp)
653
654
655 _g_ := getg()
656 mp := acquirem()
657 if status&^_Gscan != _Gwaiting {
658 dumpgstatus(gp)
659 throw("bad g->status in ready")
660 }
661
662
663 casgstatus(gp, _Gwaiting, _Grunnable)
664 runqput(_g_.m.p.ptr(), gp, next)
665 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
666 wakep()
667 }
668 releasem(mp)
669 }
670
671
672
673 const freezeStopWait = 0x7fffffff
674
675
676
677 var freezing uint32
678
679
680
681
682 func freezetheworld() {
683 atomic.Store(&freezing, 1)
684
685
686
687 for i := 0; i < 5; i++ {
688
689 sched.stopwait = freezeStopWait
690 atomic.Store(&sched.gcwaiting, 1)
691
692 if !preemptall() {
693 break
694 }
695 usleep(1000)
696 }
697
698 usleep(1000)
699 preemptall()
700 usleep(1000)
701 }
702
703
704
705
706 func readgstatus(gp *g) uint32 {
707 return atomic.Load(&gp.atomicstatus)
708 }
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
727 success := false
728
729
730 switch oldval {
731 default:
732 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
733 dumpgstatus(gp)
734 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
735 case _Gscanrunnable,
736 _Gscanwaiting,
737 _Gscanrunning,
738 _Gscansyscall:
739 if newval == oldval&^_Gscan {
740 success = atomic.Cas(&gp.atomicstatus, oldval, newval)
741 }
742 }
743 if !success {
744 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
745 dumpgstatus(gp)
746 throw("casfrom_Gscanstatus: gp->status is not in scan state")
747 }
748 }
749
750
751
752 func castogscanstatus(gp *g, oldval, newval uint32) bool {
753 switch oldval {
754 case _Grunnable,
755 _Grunning,
756 _Gwaiting,
757 _Gsyscall:
758 if newval == oldval|_Gscan {
759 return atomic.Cas(&gp.atomicstatus, oldval, newval)
760 }
761 }
762 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
763 throw("castogscanstatus")
764 panic("not reached")
765 }
766
767
768
769
770
771
772 func casgstatus(gp *g, oldval, newval uint32) {
773 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
774 systemstack(func() {
775 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
776 throw("casgstatus: bad incoming values")
777 })
778 }
779
780 if oldval == _Grunning && gp.gcscanvalid {
781
782
783
784
785 systemstack(func() {
786 print("runtime: casgstatus ", hex(oldval), "->", hex(newval), " gp.status=", hex(gp.atomicstatus), " gp.gcscanvalid=true\n")
787 throw("casgstatus")
788 })
789 }
790
791
792 const yieldDelay = 5 * 1000
793 var nextYield int64
794
795
796
797 for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ {
798 if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
799 throw("casgstatus: waiting for Gwaiting but is Grunnable")
800 }
801
802
803
804
805
806
807
808
809 if i == 0 {
810 nextYield = nanotime() + yieldDelay
811 }
812 if nanotime() < nextYield {
813 for x := 0; x < 10 && gp.atomicstatus != oldval; x++ {
814 procyield(1)
815 }
816 } else {
817 osyield()
818 nextYield = nanotime() + yieldDelay/2
819 }
820 }
821 if newval == _Grunning {
822 gp.gcscanvalid = false
823 }
824 }
825
826
827
828
829
830
831
832 func casgcopystack(gp *g) uint32 {
833 for {
834 oldstatus := readgstatus(gp) &^ _Gscan
835 if oldstatus != _Gwaiting && oldstatus != _Grunnable {
836 throw("copystack: bad status, not Gwaiting or Grunnable")
837 }
838 if atomic.Cas(&gp.atomicstatus, oldstatus, _Gcopystack) {
839 return oldstatus
840 }
841 }
842 }
843
844
845
846
847 func scang(gp *g, gcw *gcWork) {
848
849
850
851
852
853 gp.gcscandone = false
854
855
856 const yieldDelay = 10 * 1000
857 var nextYield int64
858
859
860
861
862
863
864 loop:
865 for i := 0; !gp.gcscandone; i++ {
866 switch s := readgstatus(gp); s {
867 default:
868 dumpgstatus(gp)
869 throw("stopg: invalid status")
870
871 case _Gdead:
872
873 gp.gcscandone = true
874 break loop
875
876 case _Gcopystack:
877
878
879 case _Grunnable, _Gsyscall, _Gwaiting:
880
881
882
883
884 if castogscanstatus(gp, s, s|_Gscan) {
885 if !gp.gcscandone {
886 scanstack(gp, gcw)
887 gp.gcscandone = true
888 }
889 restartg(gp)
890 break loop
891 }
892
893 case _Gscanwaiting:
894
895
896 case _Grunning:
897
898
899
900
901
902 if gp.preemptscan && gp.preempt && gp.stackguard0 == stackPreempt {
903 break
904 }
905
906
907 if castogscanstatus(gp, _Grunning, _Gscanrunning) {
908 if !gp.gcscandone {
909 gp.preemptscan = true
910 gp.preempt = true
911 gp.stackguard0 = stackPreempt
912 }
913 casfrom_Gscanstatus(gp, _Gscanrunning, _Grunning)
914 }
915 }
916
917 if i == 0 {
918 nextYield = nanotime() + yieldDelay
919 }
920 if nanotime() < nextYield {
921 procyield(10)
922 } else {
923 osyield()
924 nextYield = nanotime() + yieldDelay/2
925 }
926 }
927
928 gp.preemptscan = false
929 }
930
931
932 func restartg(gp *g) {
933 s := readgstatus(gp)
934 switch s {
935 default:
936 dumpgstatus(gp)
937 throw("restartg: unexpected status")
938
939 case _Gdead:
940
941
942 case _Gscanrunnable,
943 _Gscanwaiting,
944 _Gscansyscall:
945 casfrom_Gscanstatus(gp, s, s&^_Gscan)
946 }
947 }
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963 func stopTheWorld(reason string) {
964 semacquire(&worldsema)
965 getg().m.preemptoff = reason
966 systemstack(stopTheWorldWithSema)
967 }
968
969
970 func startTheWorld() {
971 systemstack(func() { startTheWorldWithSema(false) })
972
973
974 semrelease(&worldsema)
975 getg().m.preemptoff = ""
976 }
977
978
979
980 var worldsema uint32 = 1
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004 func stopTheWorldWithSema() {
1005 _g_ := getg()
1006
1007
1008
1009 if _g_.m.locks > 0 {
1010 throw("stopTheWorld: holding locks")
1011 }
1012
1013 lock(&sched.lock)
1014 sched.stopwait = gomaxprocs
1015 atomic.Store(&sched.gcwaiting, 1)
1016 preemptall()
1017
1018 _g_.m.p.ptr().status = _Pgcstop
1019 sched.stopwait--
1020
1021 for _, p := range allp {
1022 s := p.status
1023 if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) {
1024 if trace.enabled {
1025 traceGoSysBlock(p)
1026 traceProcStop(p)
1027 }
1028 p.syscalltick++
1029 sched.stopwait--
1030 }
1031 }
1032
1033 for {
1034 p := pidleget()
1035 if p == nil {
1036 break
1037 }
1038 p.status = _Pgcstop
1039 sched.stopwait--
1040 }
1041 wait := sched.stopwait > 0
1042 unlock(&sched.lock)
1043
1044
1045 if wait {
1046 for {
1047
1048 if notetsleep(&sched.stopnote, 100*1000) {
1049 noteclear(&sched.stopnote)
1050 break
1051 }
1052 preemptall()
1053 }
1054 }
1055
1056
1057 bad := ""
1058 if sched.stopwait != 0 {
1059 bad = "stopTheWorld: not stopped (stopwait != 0)"
1060 } else {
1061 for _, p := range allp {
1062 if p.status != _Pgcstop {
1063 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1064 }
1065 }
1066 }
1067 if atomic.Load(&freezing) != 0 {
1068
1069
1070
1071
1072 lock(&deadlock)
1073 lock(&deadlock)
1074 }
1075 if bad != "" {
1076 throw(bad)
1077 }
1078 }
1079
1080 func startTheWorldWithSema(emitTraceEvent bool) int64 {
1081 mp := acquirem()
1082 if netpollinited() {
1083 list := netpoll(false)
1084 injectglist(&list)
1085 }
1086 lock(&sched.lock)
1087
1088 procs := gomaxprocs
1089 if newprocs != 0 {
1090 procs = newprocs
1091 newprocs = 0
1092 }
1093 p1 := procresize(procs)
1094 sched.gcwaiting = 0
1095 if sched.sysmonwait != 0 {
1096 sched.sysmonwait = 0
1097 notewakeup(&sched.sysmonnote)
1098 }
1099 unlock(&sched.lock)
1100
1101 for p1 != nil {
1102 p := p1
1103 p1 = p1.link.ptr()
1104 if p.m != 0 {
1105 mp := p.m.ptr()
1106 p.m = 0
1107 if mp.nextp != 0 {
1108 throw("startTheWorld: inconsistent mp->nextp")
1109 }
1110 mp.nextp.set(p)
1111 notewakeup(&mp.park)
1112 } else {
1113
1114 newm(nil, p)
1115 }
1116 }
1117
1118
1119 startTime := nanotime()
1120 if emitTraceEvent {
1121 traceGCSTWDone()
1122 }
1123
1124
1125
1126
1127 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
1128 wakep()
1129 }
1130
1131 releasem(mp)
1132
1133 return startTime
1134 }
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146 func mstart() {
1147 _g_ := getg()
1148
1149 osStack := _g_.stack.lo == 0
1150 if osStack {
1151
1152
1153
1154 size := _g_.stack.hi
1155 if size == 0 {
1156 size = 8192 * sys.StackGuardMultiplier
1157 }
1158 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1159 _g_.stack.lo = _g_.stack.hi - size + 1024
1160 }
1161
1162
1163 _g_.stackguard0 = _g_.stack.lo + _StackGuard
1164
1165
1166 _g_.stackguard1 = _g_.stackguard0
1167 mstart1()
1168
1169
1170 if GOOS == "windows" || GOOS == "solaris" || GOOS == "illumos" || GOOS == "plan9" || GOOS == "darwin" || GOOS == "aix" {
1171
1172
1173
1174 osStack = true
1175 }
1176 mexit(osStack)
1177 }
1178
1179 func mstart1() {
1180 _g_ := getg()
1181
1182 if _g_ != _g_.m.g0 {
1183 throw("bad runtime·mstart")
1184 }
1185
1186
1187
1188
1189
1190 save(getcallerpc(), getcallersp())
1191 asminit()
1192 minit()
1193
1194
1195
1196 if _g_.m == &m0 {
1197 mstartm0()
1198 }
1199
1200 if fn := _g_.m.mstartfn; fn != nil {
1201 fn()
1202 }
1203
1204 if _g_.m != &m0 {
1205 acquirep(_g_.m.nextp.ptr())
1206 _g_.m.nextp = 0
1207 }
1208 schedule()
1209 }
1210
1211
1212
1213
1214
1215
1216
1217 func mstartm0() {
1218
1219
1220
1221 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1222 cgoHasExtraM = true
1223 newextram()
1224 }
1225 initsig(false)
1226 }
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238 func mexit(osStack bool) {
1239 g := getg()
1240 m := g.m
1241
1242 if m == &m0 {
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254 handoffp(releasep())
1255 lock(&sched.lock)
1256 sched.nmfreed++
1257 checkdead()
1258 unlock(&sched.lock)
1259 notesleep(&m.park)
1260 throw("locked m0 woke up")
1261 }
1262
1263 sigblock()
1264 unminit()
1265
1266
1267 if m.gsignal != nil {
1268 stackfree(m.gsignal.stack)
1269 }
1270
1271
1272 lock(&sched.lock)
1273 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
1274 if *pprev == m {
1275 *pprev = m.alllink
1276 goto found
1277 }
1278 }
1279 throw("m not found in allm")
1280 found:
1281 if !osStack {
1282
1283
1284
1285
1286 atomic.Store(&m.freeWait, 1)
1287
1288
1289
1290
1291 m.freelink = sched.freem
1292 sched.freem = m
1293 }
1294 unlock(&sched.lock)
1295
1296
1297 handoffp(releasep())
1298
1299
1300
1301
1302
1303 lock(&sched.lock)
1304 sched.nmfreed++
1305 checkdead()
1306 unlock(&sched.lock)
1307
1308 if osStack {
1309
1310
1311 return
1312 }
1313
1314
1315
1316
1317
1318 exitThread(&m.freeWait)
1319 }
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332 func forEachP(fn func(*p)) {
1333 mp := acquirem()
1334 _p_ := getg().m.p.ptr()
1335
1336 lock(&sched.lock)
1337 if sched.safePointWait != 0 {
1338 throw("forEachP: sched.safePointWait != 0")
1339 }
1340 sched.safePointWait = gomaxprocs - 1
1341 sched.safePointFn = fn
1342
1343
1344 for _, p := range allp {
1345 if p != _p_ {
1346 atomic.Store(&p.runSafePointFn, 1)
1347 }
1348 }
1349 preemptall()
1350
1351
1352
1353
1354
1355
1356
1357 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
1358 if atomic.Cas(&p.runSafePointFn, 1, 0) {
1359 fn(p)
1360 sched.safePointWait--
1361 }
1362 }
1363
1364 wait := sched.safePointWait > 0
1365 unlock(&sched.lock)
1366
1367
1368 fn(_p_)
1369
1370
1371
1372 for _, p := range allp {
1373 s := p.status
1374 if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) {
1375 if trace.enabled {
1376 traceGoSysBlock(p)
1377 traceProcStop(p)
1378 }
1379 p.syscalltick++
1380 handoffp(p)
1381 }
1382 }
1383
1384
1385 if wait {
1386 for {
1387
1388
1389
1390
1391 if notetsleep(&sched.safePointNote, 100*1000) {
1392 noteclear(&sched.safePointNote)
1393 break
1394 }
1395 preemptall()
1396 }
1397 }
1398 if sched.safePointWait != 0 {
1399 throw("forEachP: not done")
1400 }
1401 for _, p := range allp {
1402 if p.runSafePointFn != 0 {
1403 throw("forEachP: P did not run fn")
1404 }
1405 }
1406
1407 lock(&sched.lock)
1408 sched.safePointFn = nil
1409 unlock(&sched.lock)
1410 releasem(mp)
1411 }
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424 func runSafePointFn() {
1425 p := getg().m.p.ptr()
1426
1427
1428
1429 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
1430 return
1431 }
1432 sched.safePointFn(p)
1433 lock(&sched.lock)
1434 sched.safePointWait--
1435 if sched.safePointWait == 0 {
1436 notewakeup(&sched.safePointNote)
1437 }
1438 unlock(&sched.lock)
1439 }
1440
1441
1442
1443
1444 var cgoThreadStart unsafe.Pointer
1445
1446 type cgothreadstart struct {
1447 g guintptr
1448 tls *uint64
1449 fn unsafe.Pointer
1450 }
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460 func allocm(_p_ *p, fn func()) *m {
1461 _g_ := getg()
1462 acquirem()
1463 if _g_.m.p == 0 {
1464 acquirep(_p_)
1465 }
1466
1467
1468
1469 if sched.freem != nil {
1470 lock(&sched.lock)
1471 var newList *m
1472 for freem := sched.freem; freem != nil; {
1473 if freem.freeWait != 0 {
1474 next := freem.freelink
1475 freem.freelink = newList
1476 newList = freem
1477 freem = next
1478 continue
1479 }
1480 stackfree(freem.g0.stack)
1481 freem = freem.freelink
1482 }
1483 sched.freem = newList
1484 unlock(&sched.lock)
1485 }
1486
1487 mp := new(m)
1488 mp.mstartfn = fn
1489 mcommoninit(mp)
1490
1491
1492
1493 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" || GOOS == "plan9" || GOOS == "darwin" {
1494 mp.g0 = malg(-1)
1495 } else {
1496 mp.g0 = malg(8192 * sys.StackGuardMultiplier)
1497 }
1498 mp.g0.m = mp
1499
1500 if _p_ == _g_.m.p.ptr() {
1501 releasep()
1502 }
1503 releasem(_g_.m)
1504
1505 return mp
1506 }
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542 func needm(x byte) {
1543 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1544
1545
1546
1547
1548
1549
1550 write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback)))
1551 exit(1)
1552 }
1553
1554
1555
1556
1557
1558 mp := lockextra(false)
1559
1560
1561
1562
1563
1564
1565
1566
1567 mp.needextram = mp.schedlink == 0
1568 extraMCount--
1569 unlockextra(mp.schedlink.ptr())
1570
1571
1572
1573
1574
1575
1576
1577 msigsave(mp)
1578 sigblock()
1579
1580
1581
1582
1583
1584
1585 setg(mp.g0)
1586 _g_ := getg()
1587 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&x))) + 1024
1588 _g_.stack.lo = uintptr(noescape(unsafe.Pointer(&x))) - 32*1024
1589 _g_.stackguard0 = _g_.stack.lo + _StackGuard
1590
1591
1592 asminit()
1593 minit()
1594
1595
1596 casgstatus(mp.curg, _Gdead, _Gsyscall)
1597 atomic.Xadd(&sched.ngsys, -1)
1598 }
1599
1600 var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n")
1601
1602
1603
1604
1605 func newextram() {
1606 c := atomic.Xchg(&extraMWaiters, 0)
1607 if c > 0 {
1608 for i := uint32(0); i < c; i++ {
1609 oneNewExtraM()
1610 }
1611 } else {
1612
1613 mp := lockextra(true)
1614 unlockextra(mp)
1615 if mp == nil {
1616 oneNewExtraM()
1617 }
1618 }
1619 }
1620
1621
1622 func oneNewExtraM() {
1623
1624
1625
1626
1627
1628 mp := allocm(nil, nil)
1629 gp := malg(4096)
1630 gp.sched.pc = funcPC(goexit) + sys.PCQuantum
1631 gp.sched.sp = gp.stack.hi
1632 gp.sched.sp -= 4 * sys.RegSize
1633 gp.sched.lr = 0
1634 gp.sched.g = guintptr(unsafe.Pointer(gp))
1635 gp.syscallpc = gp.sched.pc
1636 gp.syscallsp = gp.sched.sp
1637 gp.stktopsp = gp.sched.sp
1638 gp.gcscanvalid = true
1639 gp.gcscandone = true
1640
1641
1642
1643
1644 casgstatus(gp, _Gidle, _Gdead)
1645 gp.m = mp
1646 mp.curg = gp
1647 mp.lockedInt++
1648 mp.lockedg.set(gp)
1649 gp.lockedm.set(mp)
1650 gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1))
1651 if raceenabled {
1652 gp.racectx = racegostart(funcPC(newextram) + sys.PCQuantum)
1653 }
1654
1655 allgadd(gp)
1656
1657
1658
1659
1660
1661 atomic.Xadd(&sched.ngsys, +1)
1662
1663
1664 mnext := lockextra(true)
1665 mp.schedlink.set(mnext)
1666 extraMCount++
1667 unlockextra(mp)
1668 }
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693 func dropm() {
1694
1695
1696
1697 mp := getg().m
1698
1699
1700 casgstatus(mp.curg, _Gsyscall, _Gdead)
1701 atomic.Xadd(&sched.ngsys, +1)
1702
1703
1704
1705
1706
1707 sigmask := mp.sigmask
1708 sigblock()
1709 unminit()
1710
1711 mnext := lockextra(true)
1712 extraMCount++
1713 mp.schedlink.set(mnext)
1714
1715 setg(nil)
1716
1717
1718 unlockextra(mp)
1719
1720 msigrestore(sigmask)
1721 }
1722
1723
1724 func getm() uintptr {
1725 return uintptr(unsafe.Pointer(getg().m))
1726 }
1727
1728 var extram uintptr
1729 var extraMCount uint32
1730 var extraMWaiters uint32
1731
1732
1733
1734
1735
1736
1737
1738 func lockextra(nilokay bool) *m {
1739 const locked = 1
1740
1741 incr := false
1742 for {
1743 old := atomic.Loaduintptr(&extram)
1744 if old == locked {
1745 yield := osyield
1746 yield()
1747 continue
1748 }
1749 if old == 0 && !nilokay {
1750 if !incr {
1751
1752
1753
1754 atomic.Xadd(&extraMWaiters, 1)
1755 incr = true
1756 }
1757 usleep(1)
1758 continue
1759 }
1760 if atomic.Casuintptr(&extram, old, locked) {
1761 return (*m)(unsafe.Pointer(old))
1762 }
1763 yield := osyield
1764 yield()
1765 continue
1766 }
1767 }
1768
1769
1770 func unlockextra(mp *m) {
1771 atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp)))
1772 }
1773
1774
1775
1776 var execLock rwmutex
1777
1778
1779
1780
1781 var newmHandoff struct {
1782 lock mutex
1783
1784
1785
1786 newm muintptr
1787
1788
1789
1790 waiting bool
1791 wake note
1792
1793
1794
1795
1796 haveTemplateThread uint32
1797 }
1798
1799
1800
1801
1802
1803 func newm(fn func(), _p_ *p) {
1804 mp := allocm(_p_, fn)
1805 mp.nextp.set(_p_)
1806 mp.sigmask = initSigmask
1807 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819 lock(&newmHandoff.lock)
1820 if newmHandoff.haveTemplateThread == 0 {
1821 throw("on a locked thread with no template thread")
1822 }
1823 mp.schedlink = newmHandoff.newm
1824 newmHandoff.newm.set(mp)
1825 if newmHandoff.waiting {
1826 newmHandoff.waiting = false
1827 notewakeup(&newmHandoff.wake)
1828 }
1829 unlock(&newmHandoff.lock)
1830 return
1831 }
1832 newm1(mp)
1833 }
1834
1835 func newm1(mp *m) {
1836 if iscgo {
1837 var ts cgothreadstart
1838 if _cgo_thread_start == nil {
1839 throw("_cgo_thread_start missing")
1840 }
1841 ts.g.set(mp.g0)
1842 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
1843 ts.fn = unsafe.Pointer(funcPC(mstart))
1844 if msanenabled {
1845 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
1846 }
1847 execLock.rlock()
1848 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
1849 execLock.runlock()
1850 return
1851 }
1852 execLock.rlock()
1853 newosproc(mp)
1854 execLock.runlock()
1855 }
1856
1857
1858
1859
1860
1861 func startTemplateThread() {
1862 if GOARCH == "wasm" {
1863 return
1864 }
1865 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
1866 return
1867 }
1868 newm(templateThread, nil)
1869 }
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883 func templateThread() {
1884 lock(&sched.lock)
1885 sched.nmsys++
1886 checkdead()
1887 unlock(&sched.lock)
1888
1889 for {
1890 lock(&newmHandoff.lock)
1891 for newmHandoff.newm != 0 {
1892 newm := newmHandoff.newm.ptr()
1893 newmHandoff.newm = 0
1894 unlock(&newmHandoff.lock)
1895 for newm != nil {
1896 next := newm.schedlink.ptr()
1897 newm.schedlink = 0
1898 newm1(newm)
1899 newm = next
1900 }
1901 lock(&newmHandoff.lock)
1902 }
1903 newmHandoff.waiting = true
1904 noteclear(&newmHandoff.wake)
1905 unlock(&newmHandoff.lock)
1906 notesleep(&newmHandoff.wake)
1907 }
1908 }
1909
1910
1911
1912 func stopm() {
1913 _g_ := getg()
1914
1915 if _g_.m.locks != 0 {
1916 throw("stopm holding locks")
1917 }
1918 if _g_.m.p != 0 {
1919 throw("stopm holding p")
1920 }
1921 if _g_.m.spinning {
1922 throw("stopm spinning")
1923 }
1924
1925 lock(&sched.lock)
1926 mput(_g_.m)
1927 unlock(&sched.lock)
1928 notesleep(&_g_.m.park)
1929 noteclear(&_g_.m.park)
1930 acquirep(_g_.m.nextp.ptr())
1931 _g_.m.nextp = 0
1932 }
1933
1934 func mspinning() {
1935
1936 getg().m.spinning = true
1937 }
1938
1939
1940
1941
1942
1943
1944
1945 func startm(_p_ *p, spinning bool) {
1946 lock(&sched.lock)
1947 if _p_ == nil {
1948 _p_ = pidleget()
1949 if _p_ == nil {
1950 unlock(&sched.lock)
1951 if spinning {
1952
1953
1954 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
1955 throw("startm: negative nmspinning")
1956 }
1957 }
1958 return
1959 }
1960 }
1961 mp := mget()
1962 unlock(&sched.lock)
1963 if mp == nil {
1964 var fn func()
1965 if spinning {
1966
1967 fn = mspinning
1968 }
1969 newm(fn, _p_)
1970 return
1971 }
1972 if mp.spinning {
1973 throw("startm: m is spinning")
1974 }
1975 if mp.nextp != 0 {
1976 throw("startm: m has p")
1977 }
1978 if spinning && !runqempty(_p_) {
1979 throw("startm: p has runnable gs")
1980 }
1981
1982 mp.spinning = spinning
1983 mp.nextp.set(_p_)
1984 notewakeup(&mp.park)
1985 }
1986
1987
1988
1989
1990 func handoffp(_p_ *p) {
1991
1992
1993
1994
1995 if !runqempty(_p_) || sched.runqsize != 0 {
1996 startm(_p_, false)
1997 return
1998 }
1999
2000 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) {
2001 startm(_p_, false)
2002 return
2003 }
2004
2005
2006 if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) {
2007 startm(_p_, true)
2008 return
2009 }
2010 lock(&sched.lock)
2011 if sched.gcwaiting != 0 {
2012 _p_.status = _Pgcstop
2013 sched.stopwait--
2014 if sched.stopwait == 0 {
2015 notewakeup(&sched.stopnote)
2016 }
2017 unlock(&sched.lock)
2018 return
2019 }
2020 if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) {
2021 sched.safePointFn(_p_)
2022 sched.safePointWait--
2023 if sched.safePointWait == 0 {
2024 notewakeup(&sched.safePointNote)
2025 }
2026 }
2027 if sched.runqsize != 0 {
2028 unlock(&sched.lock)
2029 startm(_p_, false)
2030 return
2031 }
2032
2033
2034 if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 {
2035 unlock(&sched.lock)
2036 startm(_p_, false)
2037 return
2038 }
2039 pidleput(_p_)
2040 unlock(&sched.lock)
2041 }
2042
2043
2044
2045 func wakep() {
2046
2047 if !atomic.Cas(&sched.nmspinning, 0, 1) {
2048 return
2049 }
2050 startm(nil, true)
2051 }
2052
2053
2054
2055 func stoplockedm() {
2056 _g_ := getg()
2057
2058 if _g_.m.lockedg == 0 || _g_.m.lockedg.ptr().lockedm.ptr() != _g_.m {
2059 throw("stoplockedm: inconsistent locking")
2060 }
2061 if _g_.m.p != 0 {
2062
2063 _p_ := releasep()
2064 handoffp(_p_)
2065 }
2066 incidlelocked(1)
2067
2068 notesleep(&_g_.m.park)
2069 noteclear(&_g_.m.park)
2070 status := readgstatus(_g_.m.lockedg.ptr())
2071 if status&^_Gscan != _Grunnable {
2072 print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n")
2073 dumpgstatus(_g_)
2074 throw("stoplockedm: not runnable")
2075 }
2076 acquirep(_g_.m.nextp.ptr())
2077 _g_.m.nextp = 0
2078 }
2079
2080
2081
2082
2083 func startlockedm(gp *g) {
2084 _g_ := getg()
2085
2086 mp := gp.lockedm.ptr()
2087 if mp == _g_.m {
2088 throw("startlockedm: locked to me")
2089 }
2090 if mp.nextp != 0 {
2091 throw("startlockedm: m has p")
2092 }
2093
2094 incidlelocked(-1)
2095 _p_ := releasep()
2096 mp.nextp.set(_p_)
2097 notewakeup(&mp.park)
2098 stopm()
2099 }
2100
2101
2102
2103 func gcstopm() {
2104 _g_ := getg()
2105
2106 if sched.gcwaiting == 0 {
2107 throw("gcstopm: not waiting for gc")
2108 }
2109 if _g_.m.spinning {
2110 _g_.m.spinning = false
2111
2112
2113 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
2114 throw("gcstopm: negative nmspinning")
2115 }
2116 }
2117 _p_ := releasep()
2118 lock(&sched.lock)
2119 _p_.status = _Pgcstop
2120 sched.stopwait--
2121 if sched.stopwait == 0 {
2122 notewakeup(&sched.stopnote)
2123 }
2124 unlock(&sched.lock)
2125 stopm()
2126 }
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137 func execute(gp *g, inheritTime bool) {
2138 _g_ := getg()
2139
2140 casgstatus(gp, _Grunnable, _Grunning)
2141 gp.waitsince = 0
2142 gp.preempt = false
2143 gp.stackguard0 = gp.stack.lo + _StackGuard
2144 if !inheritTime {
2145 _g_.m.p.ptr().schedtick++
2146 }
2147 _g_.m.curg = gp
2148 gp.m = _g_.m
2149
2150
2151 hz := sched.profilehz
2152 if _g_.m.profilehz != hz {
2153 setThreadCPUProfiler(hz)
2154 }
2155
2156 if trace.enabled {
2157
2158
2159 if gp.syscallsp != 0 && gp.sysblocktraced {
2160 traceGoSysExit(gp.sysexitticks)
2161 }
2162 traceGoStart()
2163 }
2164
2165 gogo(&gp.sched)
2166 }
2167
2168
2169
2170 func findrunnable() (gp *g, inheritTime bool) {
2171 _g_ := getg()
2172
2173
2174
2175
2176
2177 top:
2178 _p_ := _g_.m.p.ptr()
2179 if sched.gcwaiting != 0 {
2180 gcstopm()
2181 goto top
2182 }
2183 if _p_.runSafePointFn != 0 {
2184 runSafePointFn()
2185 }
2186 if fingwait && fingwake {
2187 if gp := wakefing(); gp != nil {
2188 ready(gp, 0, true)
2189 }
2190 }
2191 if *cgo_yield != nil {
2192 asmcgocall(*cgo_yield, nil)
2193 }
2194
2195
2196 if gp, inheritTime := runqget(_p_); gp != nil {
2197 return gp, inheritTime
2198 }
2199
2200
2201 if sched.runqsize != 0 {
2202 lock(&sched.lock)
2203 gp := globrunqget(_p_, 0)
2204 unlock(&sched.lock)
2205 if gp != nil {
2206 return gp, false
2207 }
2208 }
2209
2210
2211
2212
2213
2214
2215
2216
2217 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Load64(&sched.lastpoll) != 0 {
2218 if list := netpoll(false); !list.empty() {
2219 gp := list.pop()
2220 injectglist(&list)
2221 casgstatus(gp, _Gwaiting, _Grunnable)
2222 if trace.enabled {
2223 traceGoUnpark(gp, 0)
2224 }
2225 return gp, false
2226 }
2227 }
2228
2229
2230 procs := uint32(gomaxprocs)
2231 if atomic.Load(&sched.npidle) == procs-1 {
2232
2233
2234
2235 goto stop
2236 }
2237
2238
2239
2240 if !_g_.m.spinning && 2*atomic.Load(&sched.nmspinning) >= procs-atomic.Load(&sched.npidle) {
2241 goto stop
2242 }
2243 if !_g_.m.spinning {
2244 _g_.m.spinning = true
2245 atomic.Xadd(&sched.nmspinning, 1)
2246 }
2247 for i := 0; i < 4; i++ {
2248 for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() {
2249 if sched.gcwaiting != 0 {
2250 goto top
2251 }
2252 stealRunNextG := i > 2
2253 if gp := runqsteal(_p_, allp[enum.position()], stealRunNextG); gp != nil {
2254 return gp, false
2255 }
2256 }
2257 }
2258
2259 stop:
2260
2261
2262
2263
2264 if gcBlackenEnabled != 0 && _p_.gcBgMarkWorker != 0 && gcMarkWorkAvailable(_p_) {
2265 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
2266 gp := _p_.gcBgMarkWorker.ptr()
2267 casgstatus(gp, _Gwaiting, _Grunnable)
2268 if trace.enabled {
2269 traceGoUnpark(gp, 0)
2270 }
2271 return gp, false
2272 }
2273
2274
2275
2276
2277 if beforeIdle() {
2278
2279 goto top
2280 }
2281
2282
2283
2284
2285
2286 allpSnapshot := allp
2287
2288
2289 lock(&sched.lock)
2290 if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 {
2291 unlock(&sched.lock)
2292 goto top
2293 }
2294 if sched.runqsize != 0 {
2295 gp := globrunqget(_p_, 0)
2296 unlock(&sched.lock)
2297 return gp, false
2298 }
2299 if releasep() != _p_ {
2300 throw("findrunnable: wrong p")
2301 }
2302 pidleput(_p_)
2303 unlock(&sched.lock)
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318 wasSpinning := _g_.m.spinning
2319 if _g_.m.spinning {
2320 _g_.m.spinning = false
2321 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
2322 throw("findrunnable: negative nmspinning")
2323 }
2324 }
2325
2326
2327 for _, _p_ := range allpSnapshot {
2328 if !runqempty(_p_) {
2329 lock(&sched.lock)
2330 _p_ = pidleget()
2331 unlock(&sched.lock)
2332 if _p_ != nil {
2333 acquirep(_p_)
2334 if wasSpinning {
2335 _g_.m.spinning = true
2336 atomic.Xadd(&sched.nmspinning, 1)
2337 }
2338 goto top
2339 }
2340 break
2341 }
2342 }
2343
2344
2345 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(nil) {
2346 lock(&sched.lock)
2347 _p_ = pidleget()
2348 if _p_ != nil && _p_.gcBgMarkWorker == 0 {
2349 pidleput(_p_)
2350 _p_ = nil
2351 }
2352 unlock(&sched.lock)
2353 if _p_ != nil {
2354 acquirep(_p_)
2355 if wasSpinning {
2356 _g_.m.spinning = true
2357 atomic.Xadd(&sched.nmspinning, 1)
2358 }
2359
2360 goto stop
2361 }
2362 }
2363
2364
2365 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
2366 if _g_.m.p != 0 {
2367 throw("findrunnable: netpoll with p")
2368 }
2369 if _g_.m.spinning {
2370 throw("findrunnable: netpoll with spinning")
2371 }
2372 list := netpoll(true)
2373 atomic.Store64(&sched.lastpoll, uint64(nanotime()))
2374 if !list.empty() {
2375 lock(&sched.lock)
2376 _p_ = pidleget()
2377 unlock(&sched.lock)
2378 if _p_ != nil {
2379 acquirep(_p_)
2380 gp := list.pop()
2381 injectglist(&list)
2382 casgstatus(gp, _Gwaiting, _Grunnable)
2383 if trace.enabled {
2384 traceGoUnpark(gp, 0)
2385 }
2386 return gp, false
2387 }
2388 injectglist(&list)
2389 }
2390 }
2391 stopm()
2392 goto top
2393 }
2394
2395
2396
2397
2398
2399 func pollWork() bool {
2400 if sched.runqsize != 0 {
2401 return true
2402 }
2403 p := getg().m.p.ptr()
2404 if !runqempty(p) {
2405 return true
2406 }
2407 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll != 0 {
2408 if list := netpoll(false); !list.empty() {
2409 injectglist(&list)
2410 return true
2411 }
2412 }
2413 return false
2414 }
2415
2416 func resetspinning() {
2417 _g_ := getg()
2418 if !_g_.m.spinning {
2419 throw("resetspinning: not a spinning m")
2420 }
2421 _g_.m.spinning = false
2422 nmspinning := atomic.Xadd(&sched.nmspinning, -1)
2423 if int32(nmspinning) < 0 {
2424 throw("findrunnable: negative nmspinning")
2425 }
2426
2427
2428
2429 if nmspinning == 0 && atomic.Load(&sched.npidle) > 0 {
2430 wakep()
2431 }
2432 }
2433
2434
2435
2436 func injectglist(glist *gList) {
2437 if glist.empty() {
2438 return
2439 }
2440 if trace.enabled {
2441 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
2442 traceGoUnpark(gp, 0)
2443 }
2444 }
2445 lock(&sched.lock)
2446 var n int
2447 for n = 0; !glist.empty(); n++ {
2448 gp := glist.pop()
2449 casgstatus(gp, _Gwaiting, _Grunnable)
2450 globrunqput(gp)
2451 }
2452 unlock(&sched.lock)
2453 for ; n != 0 && sched.npidle != 0; n-- {
2454 startm(nil, false)
2455 }
2456 *glist = gList{}
2457 }
2458
2459
2460
2461 func schedule() {
2462 _g_ := getg()
2463
2464 if _g_.m.locks != 0 {
2465 throw("schedule: holding locks")
2466 }
2467
2468 if _g_.m.lockedg != 0 {
2469 stoplockedm()
2470 execute(_g_.m.lockedg.ptr(), false)
2471 }
2472
2473
2474
2475 if _g_.m.incgo {
2476 throw("schedule: in cgo")
2477 }
2478
2479 top:
2480 if sched.gcwaiting != 0 {
2481 gcstopm()
2482 goto top
2483 }
2484 if _g_.m.p.ptr().runSafePointFn != 0 {
2485 runSafePointFn()
2486 }
2487
2488 var gp *g
2489 var inheritTime bool
2490
2491
2492
2493
2494 tryWakeP := false
2495 if trace.enabled || trace.shutdown {
2496 gp = traceReader()
2497 if gp != nil {
2498 casgstatus(gp, _Gwaiting, _Grunnable)
2499 traceGoUnpark(gp, 0)
2500 tryWakeP = true
2501 }
2502 }
2503 if gp == nil && gcBlackenEnabled != 0 {
2504 gp = gcController.findRunnableGCWorker(_g_.m.p.ptr())
2505 tryWakeP = tryWakeP || gp != nil
2506 }
2507 if gp == nil {
2508
2509
2510
2511 if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 {
2512 lock(&sched.lock)
2513 gp = globrunqget(_g_.m.p.ptr(), 1)
2514 unlock(&sched.lock)
2515 }
2516 }
2517 if gp == nil {
2518 gp, inheritTime = runqget(_g_.m.p.ptr())
2519 if gp != nil && _g_.m.spinning {
2520 throw("schedule: spinning with local work")
2521 }
2522 }
2523 if gp == nil {
2524 gp, inheritTime = findrunnable()
2525 }
2526
2527
2528
2529
2530 if _g_.m.spinning {
2531 resetspinning()
2532 }
2533
2534 if sched.disable.user && !schedEnabled(gp) {
2535
2536
2537
2538 lock(&sched.lock)
2539 if schedEnabled(gp) {
2540
2541
2542 unlock(&sched.lock)
2543 } else {
2544 sched.disable.runnable.pushBack(gp)
2545 sched.disable.n++
2546 unlock(&sched.lock)
2547 goto top
2548 }
2549 }
2550
2551
2552
2553 if tryWakeP {
2554 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
2555 wakep()
2556 }
2557 }
2558 if gp.lockedm != 0 {
2559
2560
2561 startlockedm(gp)
2562 goto top
2563 }
2564
2565 execute(gp, inheritTime)
2566 }
2567
2568
2569
2570
2571
2572
2573
2574
2575 func dropg() {
2576 _g_ := getg()
2577
2578 setMNoWB(&_g_.m.curg.m, nil)
2579 setGNoWB(&_g_.m.curg, nil)
2580 }
2581
2582 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
2583 unlock((*mutex)(lock))
2584 return true
2585 }
2586
2587
2588 func park_m(gp *g) {
2589 _g_ := getg()
2590
2591 if trace.enabled {
2592 traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip)
2593 }
2594
2595 casgstatus(gp, _Grunning, _Gwaiting)
2596 dropg()
2597
2598 if fn := _g_.m.waitunlockf; fn != nil {
2599 ok := fn(gp, _g_.m.waitlock)
2600 _g_.m.waitunlockf = nil
2601 _g_.m.waitlock = nil
2602 if !ok {
2603 if trace.enabled {
2604 traceGoUnpark(gp, 2)
2605 }
2606 casgstatus(gp, _Gwaiting, _Grunnable)
2607 execute(gp, true)
2608 }
2609 }
2610 schedule()
2611 }
2612
2613 func goschedImpl(gp *g) {
2614 status := readgstatus(gp)
2615 if status&^_Gscan != _Grunning {
2616 dumpgstatus(gp)
2617 throw("bad g status")
2618 }
2619 casgstatus(gp, _Grunning, _Grunnable)
2620 dropg()
2621 lock(&sched.lock)
2622 globrunqput(gp)
2623 unlock(&sched.lock)
2624
2625 schedule()
2626 }
2627
2628
2629 func gosched_m(gp *g) {
2630 if trace.enabled {
2631 traceGoSched()
2632 }
2633 goschedImpl(gp)
2634 }
2635
2636
2637 func goschedguarded_m(gp *g) {
2638
2639 if gp.m.locks != 0 || gp.m.mallocing != 0 || gp.m.preemptoff != "" || gp.m.p.ptr().status != _Prunning {
2640 gogo(&gp.sched)
2641 }
2642
2643 if trace.enabled {
2644 traceGoSched()
2645 }
2646 goschedImpl(gp)
2647 }
2648
2649 func gopreempt_m(gp *g) {
2650 if trace.enabled {
2651 traceGoPreempt()
2652 }
2653 goschedImpl(gp)
2654 }
2655
2656
2657 func goexit1() {
2658 if raceenabled {
2659 racegoend()
2660 }
2661 if trace.enabled {
2662 traceGoEnd()
2663 }
2664 mcall(goexit0)
2665 }
2666
2667
2668 func goexit0(gp *g) {
2669 _g_ := getg()
2670
2671 casgstatus(gp, _Grunning, _Gdead)
2672 if isSystemGoroutine(gp, false) {
2673 atomic.Xadd(&sched.ngsys, -1)
2674 }
2675 gp.m = nil
2676 locked := gp.lockedm != 0
2677 gp.lockedm = 0
2678 _g_.m.lockedg = 0
2679 gp.paniconfault = false
2680 gp._defer = nil
2681 gp._panic = nil
2682 gp.writebuf = nil
2683 gp.waitreason = 0
2684 gp.param = nil
2685 gp.labels = nil
2686 gp.timer = nil
2687
2688 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
2689
2690
2691
2692 scanCredit := int64(gcController.assistWorkPerByte * float64(gp.gcAssistBytes))
2693 atomic.Xaddint64(&gcController.bgScanCredit, scanCredit)
2694 gp.gcAssistBytes = 0
2695 }
2696
2697
2698
2699 gp.gcscanvalid = true
2700 dropg()
2701
2702 if GOARCH == "wasm" {
2703 gfput(_g_.m.p.ptr(), gp)
2704 schedule()
2705 }
2706
2707 if _g_.m.lockedInt != 0 {
2708 print("invalid m->lockedInt = ", _g_.m.lockedInt, "\n")
2709 throw("internal lockOSThread error")
2710 }
2711 gfput(_g_.m.p.ptr(), gp)
2712 if locked {
2713
2714
2715
2716
2717
2718
2719 if GOOS != "plan9" {
2720 gogo(&_g_.m.g0.sched)
2721 } else {
2722
2723
2724 _g_.m.lockedExt = 0
2725 }
2726 }
2727 schedule()
2728 }
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738 func save(pc, sp uintptr) {
2739 _g_ := getg()
2740
2741 _g_.sched.pc = pc
2742 _g_.sched.sp = sp
2743 _g_.sched.lr = 0
2744 _g_.sched.ret = 0
2745 _g_.sched.g = guintptr(unsafe.Pointer(_g_))
2746
2747
2748
2749 if _g_.sched.ctxt != nil {
2750 badctxt()
2751 }
2752 }
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791 func reentersyscall(pc, sp uintptr) {
2792 _g_ := getg()
2793
2794
2795
2796 _g_.m.locks++
2797
2798
2799
2800
2801
2802 _g_.stackguard0 = stackPreempt
2803 _g_.throwsplit = true
2804
2805
2806 save(pc, sp)
2807 _g_.syscallsp = sp
2808 _g_.syscallpc = pc
2809 casgstatus(_g_, _Grunning, _Gsyscall)
2810 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
2811 systemstack(func() {
2812 print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
2813 throw("entersyscall")
2814 })
2815 }
2816
2817 if trace.enabled {
2818 systemstack(traceGoSysCall)
2819
2820
2821
2822 save(pc, sp)
2823 }
2824
2825 if atomic.Load(&sched.sysmonwait) != 0 {
2826 systemstack(entersyscall_sysmon)
2827 save(pc, sp)
2828 }
2829
2830 if _g_.m.p.ptr().runSafePointFn != 0 {
2831
2832 systemstack(runSafePointFn)
2833 save(pc, sp)
2834 }
2835
2836 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
2837 _g_.sysblocktraced = true
2838 _g_.m.mcache = nil
2839 pp := _g_.m.p.ptr()
2840 pp.m = 0
2841 _g_.m.oldp.set(pp)
2842 _g_.m.p = 0
2843 atomic.Store(&pp.status, _Psyscall)
2844 if sched.gcwaiting != 0 {
2845 systemstack(entersyscall_gcwait)
2846 save(pc, sp)
2847 }
2848
2849 _g_.m.locks--
2850 }
2851
2852
2853
2854
2855
2856
2857
2858 func entersyscall() {
2859 reentersyscall(getcallerpc(), getcallersp())
2860 }
2861
2862 func entersyscall_sysmon() {
2863 lock(&sched.lock)
2864 if atomic.Load(&sched.sysmonwait) != 0 {
2865 atomic.Store(&sched.sysmonwait, 0)
2866 notewakeup(&sched.sysmonnote)
2867 }
2868 unlock(&sched.lock)
2869 }
2870
2871 func entersyscall_gcwait() {
2872 _g_ := getg()
2873 _p_ := _g_.m.oldp.ptr()
2874
2875 lock(&sched.lock)
2876 if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) {
2877 if trace.enabled {
2878 traceGoSysBlock(_p_)
2879 traceProcStop(_p_)
2880 }
2881 _p_.syscalltick++
2882 if sched.stopwait--; sched.stopwait == 0 {
2883 notewakeup(&sched.stopnote)
2884 }
2885 }
2886 unlock(&sched.lock)
2887 }
2888
2889
2890
2891 func entersyscallblock() {
2892 _g_ := getg()
2893
2894 _g_.m.locks++
2895 _g_.throwsplit = true
2896 _g_.stackguard0 = stackPreempt
2897 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
2898 _g_.sysblocktraced = true
2899 _g_.m.p.ptr().syscalltick++
2900
2901
2902 pc := getcallerpc()
2903 sp := getcallersp()
2904 save(pc, sp)
2905 _g_.syscallsp = _g_.sched.sp
2906 _g_.syscallpc = _g_.sched.pc
2907 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
2908 sp1 := sp
2909 sp2 := _g_.sched.sp
2910 sp3 := _g_.syscallsp
2911 systemstack(func() {
2912 print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
2913 throw("entersyscallblock")
2914 })
2915 }
2916 casgstatus(_g_, _Grunning, _Gsyscall)
2917 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
2918 systemstack(func() {
2919 print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
2920 throw("entersyscallblock")
2921 })
2922 }
2923
2924 systemstack(entersyscallblock_handoff)
2925
2926
2927 save(getcallerpc(), getcallersp())
2928
2929 _g_.m.locks--
2930 }
2931
2932 func entersyscallblock_handoff() {
2933 if trace.enabled {
2934 traceGoSysCall()
2935 traceGoSysBlock(getg().m.p.ptr())
2936 }
2937 handoffp(releasep())
2938 }
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952 func exitsyscall() {
2953 _g_ := getg()
2954
2955 _g_.m.locks++
2956 if getcallersp() > _g_.syscallsp {
2957 throw("exitsyscall: syscall frame is no longer valid")
2958 }
2959
2960 _g_.waitsince = 0
2961 oldp := _g_.m.oldp.ptr()
2962 _g_.m.oldp = 0
2963 if exitsyscallfast(oldp) {
2964 if _g_.m.mcache == nil {
2965 throw("lost mcache")
2966 }
2967 if trace.enabled {
2968 if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
2969 systemstack(traceGoStart)
2970 }
2971 }
2972
2973 _g_.m.p.ptr().syscalltick++
2974
2975 casgstatus(_g_, _Gsyscall, _Grunning)
2976
2977
2978
2979 _g_.syscallsp = 0
2980 _g_.m.locks--
2981 if _g_.preempt {
2982
2983 _g_.stackguard0 = stackPreempt
2984 } else {
2985
2986 _g_.stackguard0 = _g_.stack.lo + _StackGuard
2987 }
2988 _g_.throwsplit = false
2989
2990 if sched.disable.user && !schedEnabled(_g_) {
2991
2992 Gosched()
2993 }
2994
2995 return
2996 }
2997
2998 _g_.sysexitticks = 0
2999 if trace.enabled {
3000
3001
3002 for oldp != nil && oldp.syscalltick == _g_.m.syscalltick {
3003 osyield()
3004 }
3005
3006
3007
3008
3009 _g_.sysexitticks = cputicks()
3010 }
3011
3012 _g_.m.locks--
3013
3014
3015 mcall(exitsyscall0)
3016
3017 if _g_.m.mcache == nil {
3018 throw("lost mcache")
3019 }
3020
3021
3022
3023
3024
3025
3026
3027 _g_.syscallsp = 0
3028 _g_.m.p.ptr().syscalltick++
3029 _g_.throwsplit = false
3030 }
3031
3032
3033 func exitsyscallfast(oldp *p) bool {
3034 _g_ := getg()
3035
3036
3037 if sched.stopwait == freezeStopWait {
3038 return false
3039 }
3040
3041
3042 if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) {
3043
3044 wirep(oldp)
3045 exitsyscallfast_reacquired()
3046 return true
3047 }
3048
3049
3050 if sched.pidle != 0 {
3051 var ok bool
3052 systemstack(func() {
3053 ok = exitsyscallfast_pidle()
3054 if ok && trace.enabled {
3055 if oldp != nil {
3056
3057
3058 for oldp.syscalltick == _g_.m.syscalltick {
3059 osyield()
3060 }
3061 }
3062 traceGoSysExit(0)
3063 }
3064 })
3065 if ok {
3066 return true
3067 }
3068 }
3069 return false
3070 }
3071
3072
3073
3074
3075
3076
3077 func exitsyscallfast_reacquired() {
3078 _g_ := getg()
3079 if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
3080 if trace.enabled {
3081
3082
3083
3084 systemstack(func() {
3085
3086 traceGoSysBlock(_g_.m.p.ptr())
3087
3088 traceGoSysExit(0)
3089 })
3090 }
3091 _g_.m.p.ptr().syscalltick++
3092 }
3093 }
3094
3095 func exitsyscallfast_pidle() bool {
3096 lock(&sched.lock)
3097 _p_ := pidleget()
3098 if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 {
3099 atomic.Store(&sched.sysmonwait, 0)
3100 notewakeup(&sched.sysmonnote)
3101 }
3102 unlock(&sched.lock)
3103 if _p_ != nil {
3104 acquirep(_p_)
3105 return true
3106 }
3107 return false
3108 }
3109
3110
3111
3112
3113
3114 func exitsyscall0(gp *g) {
3115 _g_ := getg()
3116
3117 casgstatus(gp, _Gsyscall, _Grunnable)
3118 dropg()
3119 lock(&sched.lock)
3120 var _p_ *p
3121 if schedEnabled(_g_) {
3122 _p_ = pidleget()
3123 }
3124 if _p_ == nil {
3125 globrunqput(gp)
3126 } else if atomic.Load(&sched.sysmonwait) != 0 {
3127 atomic.Store(&sched.sysmonwait, 0)
3128 notewakeup(&sched.sysmonnote)
3129 }
3130 unlock(&sched.lock)
3131 if _p_ != nil {
3132 acquirep(_p_)
3133 execute(gp, false)
3134 }
3135 if _g_.m.lockedg != 0 {
3136
3137 stoplockedm()
3138 execute(gp, false)
3139 }
3140 stopm()
3141 schedule()
3142 }
3143
3144 func beforefork() {
3145 gp := getg().m.curg
3146
3147
3148
3149
3150 gp.m.locks++
3151 msigsave(gp.m)
3152 sigblock()
3153
3154
3155
3156
3157
3158 gp.stackguard0 = stackFork
3159 }
3160
3161
3162
3163
3164 func syscall_runtime_BeforeFork() {
3165 systemstack(beforefork)
3166 }
3167
3168 func afterfork() {
3169 gp := getg().m.curg
3170
3171
3172 gp.stackguard0 = gp.stack.lo + _StackGuard
3173
3174 msigrestore(gp.m.sigmask)
3175
3176 gp.m.locks--
3177 }
3178
3179
3180
3181
3182 func syscall_runtime_AfterFork() {
3183 systemstack(afterfork)
3184 }
3185
3186
3187
3188 var inForkedChild bool
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201 func syscall_runtime_AfterForkInChild() {
3202
3203
3204
3205
3206 inForkedChild = true
3207
3208 clearSignalHandlers()
3209
3210
3211
3212 msigrestore(getg().m.sigmask)
3213
3214 inForkedChild = false
3215 }
3216
3217
3218
3219 func syscall_runtime_BeforeExec() {
3220
3221 execLock.lock()
3222 }
3223
3224
3225
3226 func syscall_runtime_AfterExec() {
3227 execLock.unlock()
3228 }
3229
3230
3231 func malg(stacksize int32) *g {
3232 newg := new(g)
3233 if stacksize >= 0 {
3234 stacksize = round2(_StackSystem + stacksize)
3235 systemstack(func() {
3236 newg.stack = stackalloc(uint32(stacksize))
3237 })
3238 newg.stackguard0 = newg.stack.lo + _StackGuard
3239 newg.stackguard1 = ^uintptr(0)
3240 }
3241 return newg
3242 }
3243
3244
3245
3246
3247
3248
3249
3250
3251 func newproc(siz int32, fn *funcval) {
3252 argp := add(unsafe.Pointer(&fn), sys.PtrSize)
3253 gp := getg()
3254 pc := getcallerpc()
3255 systemstack(func() {
3256 newproc1(fn, (*uint8)(argp), siz, gp, pc)
3257 })
3258 }
3259
3260
3261
3262
3263 func newproc1(fn *funcval, argp *uint8, narg int32, callergp *g, callerpc uintptr) {
3264 _g_ := getg()
3265
3266 if fn == nil {
3267 _g_.m.throwing = -1
3268 throw("go of nil func value")
3269 }
3270 acquirem()
3271 siz := narg
3272 siz = (siz + 7) &^ 7
3273
3274
3275
3276
3277
3278 if siz >= _StackMin-4*sys.RegSize-sys.RegSize {
3279 throw("newproc: function arguments too large for new goroutine")
3280 }
3281
3282 _p_ := _g_.m.p.ptr()
3283 newg := gfget(_p_)
3284 if newg == nil {
3285 newg = malg(_StackMin)
3286 casgstatus(newg, _Gidle, _Gdead)
3287 allgadd(newg)
3288 }
3289 if newg.stack.hi == 0 {
3290 throw("newproc1: newg missing stack")
3291 }
3292
3293 if readgstatus(newg) != _Gdead {
3294 throw("newproc1: new g is not Gdead")
3295 }
3296
3297 totalSize := 4*sys.RegSize + uintptr(siz) + sys.MinFrameSize
3298 totalSize += -totalSize & (sys.SpAlign - 1)
3299 sp := newg.stack.hi - totalSize
3300 spArg := sp
3301 if usesLR {
3302
3303 *(*uintptr)(unsafe.Pointer(sp)) = 0
3304 prepGoExitFrame(sp)
3305 spArg += sys.MinFrameSize
3306 }
3307 if narg > 0 {
3308 memmove(unsafe.Pointer(spArg), unsafe.Pointer(argp), uintptr(narg))
3309
3310
3311
3312
3313
3314
3315 if writeBarrier.needed && !_g_.m.curg.gcscandone {
3316 f := findfunc(fn.fn)
3317 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
3318 if stkmap.nbit > 0 {
3319
3320 bv := stackmapdata(stkmap, 0)
3321 bulkBarrierBitmap(spArg, spArg, uintptr(bv.n)*sys.PtrSize, 0, bv.bytedata)
3322 }
3323 }
3324 }
3325
3326 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
3327 newg.sched.sp = sp
3328 newg.stktopsp = sp
3329 newg.sched.pc = funcPC(goexit) + sys.PCQuantum
3330 newg.sched.g = guintptr(unsafe.Pointer(newg))
3331 gostartcallfn(&newg.sched, fn)
3332 newg.gopc = callerpc
3333 newg.ancestors = saveAncestors(callergp)
3334 newg.startpc = fn.fn
3335 if _g_.m.curg != nil {
3336 newg.labels = _g_.m.curg.labels
3337 }
3338 if isSystemGoroutine(newg, false) {
3339 atomic.Xadd(&sched.ngsys, +1)
3340 }
3341 newg.gcscanvalid = false
3342 casgstatus(newg, _Gdead, _Grunnable)
3343
3344 if _p_.goidcache == _p_.goidcacheend {
3345
3346
3347
3348 _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)
3349 _p_.goidcache -= _GoidCacheBatch - 1
3350 _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch
3351 }
3352 newg.goid = int64(_p_.goidcache)
3353 _p_.goidcache++
3354 if raceenabled {
3355 newg.racectx = racegostart(callerpc)
3356 }
3357 if trace.enabled {
3358 traceGoCreate(newg, newg.startpc)
3359 }
3360 runqput(_p_, newg, true)
3361
3362 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && mainStarted {
3363 wakep()
3364 }
3365 releasem(_g_.m)
3366 }
3367
3368
3369
3370
3371 func saveAncestors(callergp *g) *[]ancestorInfo {
3372
3373 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
3374 return nil
3375 }
3376 var callerAncestors []ancestorInfo
3377 if callergp.ancestors != nil {
3378 callerAncestors = *callergp.ancestors
3379 }
3380 n := int32(len(callerAncestors)) + 1
3381 if n > debug.tracebackancestors {
3382 n = debug.tracebackancestors
3383 }
3384 ancestors := make([]ancestorInfo, n)
3385 copy(ancestors[1:], callerAncestors)
3386
3387 var pcs [_TracebackMaxFrames]uintptr
3388 npcs := gcallers(callergp, 0, pcs[:])
3389 ipcs := make([]uintptr, npcs)
3390 copy(ipcs, pcs[:])
3391 ancestors[0] = ancestorInfo{
3392 pcs: ipcs,
3393 goid: callergp.goid,
3394 gopc: callergp.gopc,
3395 }
3396
3397 ancestorsp := new([]ancestorInfo)
3398 *ancestorsp = ancestors
3399 return ancestorsp
3400 }
3401
3402
3403
3404 func gfput(_p_ *p, gp *g) {
3405 if readgstatus(gp) != _Gdead {
3406 throw("gfput: bad status (not Gdead)")
3407 }
3408
3409 stksize := gp.stack.hi - gp.stack.lo
3410
3411 if stksize != _FixedStack {
3412
3413 stackfree(gp.stack)
3414 gp.stack.lo = 0
3415 gp.stack.hi = 0
3416 gp.stackguard0 = 0
3417 }
3418
3419 _p_.gFree.push(gp)
3420 _p_.gFree.n++
3421 if _p_.gFree.n >= 64 {
3422 lock(&sched.gFree.lock)
3423 for _p_.gFree.n >= 32 {
3424 _p_.gFree.n--
3425 gp = _p_.gFree.pop()
3426 if gp.stack.lo == 0 {
3427 sched.gFree.noStack.push(gp)
3428 } else {
3429 sched.gFree.stack.push(gp)
3430 }
3431 sched.gFree.n++
3432 }
3433 unlock(&sched.gFree.lock)
3434 }
3435 }
3436
3437
3438
3439 func gfget(_p_ *p) *g {
3440 retry:
3441 if _p_.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
3442 lock(&sched.gFree.lock)
3443
3444 for _p_.gFree.n < 32 {
3445
3446 gp := sched.gFree.stack.pop()
3447 if gp == nil {
3448 gp = sched.gFree.noStack.pop()
3449 if gp == nil {
3450 break
3451 }
3452 }
3453 sched.gFree.n--
3454 _p_.gFree.push(gp)
3455 _p_.gFree.n++
3456 }
3457 unlock(&sched.gFree.lock)
3458 goto retry
3459 }
3460 gp := _p_.gFree.pop()
3461 if gp == nil {
3462 return nil
3463 }
3464 _p_.gFree.n--
3465 if gp.stack.lo == 0 {
3466
3467 systemstack(func() {
3468 gp.stack = stackalloc(_FixedStack)
3469 })
3470 gp.stackguard0 = gp.stack.lo + _StackGuard
3471 } else {
3472 if raceenabled {
3473 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
3474 }
3475 if msanenabled {
3476 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
3477 }
3478 }
3479 return gp
3480 }
3481
3482
3483 func gfpurge(_p_ *p) {
3484 lock(&sched.gFree.lock)
3485 for !_p_.gFree.empty() {
3486 gp := _p_.gFree.pop()
3487 _p_.gFree.n--
3488 if gp.stack.lo == 0 {
3489 sched.gFree.noStack.push(gp)
3490 } else {
3491 sched.gFree.stack.push(gp)
3492 }
3493 sched.gFree.n++
3494 }
3495 unlock(&sched.gFree.lock)
3496 }
3497
3498
3499 func Breakpoint() {
3500 breakpoint()
3501 }
3502
3503
3504
3505
3506
3507 func dolockOSThread() {
3508 if GOARCH == "wasm" {
3509 return
3510 }
3511 _g_ := getg()
3512 _g_.m.lockedg.set(_g_)
3513 _g_.lockedm.set(_g_.m)
3514 }
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532 func LockOSThread() {
3533 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
3534
3535
3536
3537 startTemplateThread()
3538 }
3539 _g_ := getg()
3540 _g_.m.lockedExt++
3541 if _g_.m.lockedExt == 0 {
3542 _g_.m.lockedExt--
3543 panic("LockOSThread nesting overflow")
3544 }
3545 dolockOSThread()
3546 }
3547
3548
3549 func lockOSThread() {
3550 getg().m.lockedInt++
3551 dolockOSThread()
3552 }
3553
3554
3555
3556
3557
3558 func dounlockOSThread() {
3559 if GOARCH == "wasm" {
3560 return
3561 }
3562 _g_ := getg()
3563 if _g_.m.lockedInt != 0 || _g_.m.lockedExt != 0 {
3564 return
3565 }
3566 _g_.m.lockedg = 0
3567 _g_.lockedm = 0
3568 }
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584 func UnlockOSThread() {
3585 _g_ := getg()
3586 if _g_.m.lockedExt == 0 {
3587 return
3588 }
3589 _g_.m.lockedExt--
3590 dounlockOSThread()
3591 }
3592
3593
3594 func unlockOSThread() {
3595 _g_ := getg()
3596 if _g_.m.lockedInt == 0 {
3597 systemstack(badunlockosthread)
3598 }
3599 _g_.m.lockedInt--
3600 dounlockOSThread()
3601 }
3602
3603 func badunlockosthread() {
3604 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
3605 }
3606
3607 func gcount() int32 {
3608 n := int32(allglen) - sched.gFree.n - int32(atomic.Load(&sched.ngsys))
3609 for _, _p_ := range allp {
3610 n -= _p_.gFree.n
3611 }
3612
3613
3614
3615 if n < 1 {
3616 n = 1
3617 }
3618 return n
3619 }
3620
3621 func mcount() int32 {
3622 return int32(sched.mnext - sched.nmfreed)
3623 }
3624
3625 var prof struct {
3626 signalLock uint32
3627 hz int32
3628 }
3629
3630 func _System() { _System() }
3631 func _ExternalCode() { _ExternalCode() }
3632 func _LostExternalCode() { _LostExternalCode() }
3633 func _GC() { _GC() }
3634 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
3635 func _VDSO() { _VDSO() }
3636
3637
3638
3639
3640 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
3641 if prof.hz == 0 {
3642 return
3643 }
3644
3645
3646
3647
3648
3649
3650
3651 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
3652 if f := findfunc(pc); f.valid() {
3653 if hasPrefix(funcname(f), "runtime/internal/atomic") {
3654 cpuprof.lostAtomic++
3655 return
3656 }
3657 }
3658 }
3659
3660
3661
3662
3663
3664
3665
3666 getg().m.mallocing++
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733 traceback := true
3734 if gp == nil || sp < gp.stack.lo || gp.stack.hi < sp || setsSP(pc) || (mp != nil && mp.vdsoSP != 0) {
3735 traceback = false
3736 }
3737 var stk [maxCPUProfStack]uintptr
3738 n := 0
3739 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
3740 cgoOff := 0
3741
3742
3743
3744
3745
3746 if atomic.Load(&mp.cgoCallersUse) == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
3747 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
3748 cgoOff++
3749 }
3750 copy(stk[:], mp.cgoCallers[:cgoOff])
3751 mp.cgoCallers[0] = 0
3752 }
3753
3754
3755 n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[cgoOff], len(stk)-cgoOff, nil, nil, 0)
3756 if n > 0 {
3757 n += cgoOff
3758 }
3759 } else if traceback {
3760 n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack)
3761 }
3762
3763 if n <= 0 {
3764
3765
3766 n = 0
3767 if (GOOS == "windows" || GOOS == "solaris" || GOOS == "illumos" || GOOS == "darwin" || GOOS == "aix") && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
3768
3769
3770 n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0)
3771 }
3772 if n == 0 && mp != nil && mp.vdsoSP != 0 {
3773 n = gentraceback(mp.vdsoPC, mp.vdsoSP, 0, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack)
3774 }
3775 if n == 0 {
3776
3777 n = 2
3778 if inVDSOPage(pc) {
3779 pc = funcPC(_VDSO) + sys.PCQuantum
3780 } else if pc > firstmoduledata.etext {
3781
3782 pc = funcPC(_ExternalCode) + sys.PCQuantum
3783 }
3784 stk[0] = pc
3785 if mp.preemptoff != "" {
3786 stk[1] = funcPC(_GC) + sys.PCQuantum
3787 } else {
3788 stk[1] = funcPC(_System) + sys.PCQuantum
3789 }
3790 }
3791 }
3792
3793 if prof.hz != 0 {
3794 cpuprof.add(gp, stk[:n])
3795 }
3796 getg().m.mallocing--
3797 }
3798
3799
3800
3801
3802 var sigprofCallers cgoCallers
3803 var sigprofCallersUse uint32
3804
3805
3806
3807
3808
3809
3810
3811 func sigprofNonGo() {
3812 if prof.hz != 0 {
3813 n := 0
3814 for n < len(sigprofCallers) && sigprofCallers[n] != 0 {
3815 n++
3816 }
3817 cpuprof.addNonGo(sigprofCallers[:n])
3818 }
3819
3820 atomic.Store(&sigprofCallersUse, 0)
3821 }
3822
3823
3824
3825
3826
3827
3828 func sigprofNonGoPC(pc uintptr) {
3829 if prof.hz != 0 {
3830 stk := []uintptr{
3831 pc,
3832 funcPC(_ExternalCode) + sys.PCQuantum,
3833 }
3834 cpuprof.addNonGo(stk)
3835 }
3836 }
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848 func setsSP(pc uintptr) bool {
3849 f := findfunc(pc)
3850 if !f.valid() {
3851
3852
3853 return true
3854 }
3855 switch f.funcID {
3856 case funcID_gogo, funcID_systemstack, funcID_mcall, funcID_morestack:
3857 return true
3858 }
3859 return false
3860 }
3861
3862
3863
3864 func setcpuprofilerate(hz int32) {
3865
3866 if hz < 0 {
3867 hz = 0
3868 }
3869
3870
3871
3872 _g_ := getg()
3873 _g_.m.locks++
3874
3875
3876
3877
3878 setThreadCPUProfiler(0)
3879
3880 for !atomic.Cas(&prof.signalLock, 0, 1) {
3881 osyield()
3882 }
3883 if prof.hz != hz {
3884 setProcessCPUProfiler(hz)
3885 prof.hz = hz
3886 }
3887 atomic.Store(&prof.signalLock, 0)
3888
3889 lock(&sched.lock)
3890 sched.profilehz = hz
3891 unlock(&sched.lock)
3892
3893 if hz != 0 {
3894 setThreadCPUProfiler(hz)
3895 }
3896
3897 _g_.m.locks--
3898 }
3899
3900
3901
3902 func (pp *p) init(id int32) {
3903 pp.id = id
3904 pp.status = _Pgcstop
3905 pp.sudogcache = pp.sudogbuf[:0]
3906 for i := range pp.deferpool {
3907 pp.deferpool[i] = pp.deferpoolbuf[i][:0]
3908 }
3909 pp.wbBuf.reset()
3910 if pp.mcache == nil {
3911 if id == 0 {
3912 if getg().m.mcache == nil {
3913 throw("missing mcache?")
3914 }
3915 pp.mcache = getg().m.mcache
3916 } else {
3917 pp.mcache = allocmcache()
3918 }
3919 }
3920 if raceenabled && pp.raceprocctx == 0 {
3921 if id == 0 {
3922 pp.raceprocctx = raceprocctx0
3923 raceprocctx0 = 0
3924 } else {
3925 pp.raceprocctx = raceproccreate()
3926 }
3927 }
3928 }
3929
3930
3931
3932
3933
3934 func (pp *p) destroy() {
3935
3936 for pp.runqhead != pp.runqtail {
3937
3938 pp.runqtail--
3939 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
3940
3941 globrunqputhead(gp)
3942 }
3943 if pp.runnext != 0 {
3944 globrunqputhead(pp.runnext.ptr())
3945 pp.runnext = 0
3946 }
3947
3948
3949 if gp := pp.gcBgMarkWorker.ptr(); gp != nil {
3950 casgstatus(gp, _Gwaiting, _Grunnable)
3951 if trace.enabled {
3952 traceGoUnpark(gp, 0)
3953 }
3954 globrunqput(gp)
3955
3956
3957 pp.gcBgMarkWorker.set(nil)
3958 }
3959
3960 if gcphase != _GCoff {
3961 wbBufFlush1(pp)
3962 pp.gcw.dispose()
3963 }
3964 for i := range pp.sudogbuf {
3965 pp.sudogbuf[i] = nil
3966 }
3967 pp.sudogcache = pp.sudogbuf[:0]
3968 for i := range pp.deferpool {
3969 for j := range pp.deferpoolbuf[i] {
3970 pp.deferpoolbuf[i][j] = nil
3971 }
3972 pp.deferpool[i] = pp.deferpoolbuf[i][:0]
3973 }
3974 freemcache(pp.mcache)
3975 pp.mcache = nil
3976 gfpurge(pp)
3977 traceProcFree(pp)
3978 if raceenabled {
3979 raceprocdestroy(pp.raceprocctx)
3980 pp.raceprocctx = 0
3981 }
3982 pp.gcAssistTime = 0
3983 pp.status = _Pdead
3984 }
3985
3986
3987
3988
3989
3990 func procresize(nprocs int32) *p {
3991 old := gomaxprocs
3992 if old < 0 || nprocs <= 0 {
3993 throw("procresize: invalid arg")
3994 }
3995 if trace.enabled {
3996 traceGomaxprocs(nprocs)
3997 }
3998
3999
4000 now := nanotime()
4001 if sched.procresizetime != 0 {
4002 sched.totaltime += int64(old) * (now - sched.procresizetime)
4003 }
4004 sched.procresizetime = now
4005
4006
4007 if nprocs > int32(len(allp)) {
4008
4009
4010 lock(&allpLock)
4011 if nprocs <= int32(cap(allp)) {
4012 allp = allp[:nprocs]
4013 } else {
4014 nallp := make([]*p, nprocs)
4015
4016
4017 copy(nallp, allp[:cap(allp)])
4018 allp = nallp
4019 }
4020 unlock(&allpLock)
4021 }
4022
4023
4024 for i := old; i < nprocs; i++ {
4025 pp := allp[i]
4026 if pp == nil {
4027 pp = new(p)
4028 }
4029 pp.init(i)
4030 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
4031 }
4032
4033 _g_ := getg()
4034 if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs {
4035
4036 _g_.m.p.ptr().status = _Prunning
4037 _g_.m.p.ptr().mcache.prepareForSweep()
4038 } else {
4039
4040
4041
4042
4043
4044 if _g_.m.p != 0 {
4045 if trace.enabled {
4046
4047
4048
4049 traceGoSched()
4050 traceProcStop(_g_.m.p.ptr())
4051 }
4052 _g_.m.p.ptr().m = 0
4053 }
4054 _g_.m.p = 0
4055 _g_.m.mcache = nil
4056 p := allp[0]
4057 p.m = 0
4058 p.status = _Pidle
4059 acquirep(p)
4060 if trace.enabled {
4061 traceGoStart()
4062 }
4063 }
4064
4065
4066 for i := nprocs; i < old; i++ {
4067 p := allp[i]
4068 p.destroy()
4069
4070 }
4071
4072
4073 if int32(len(allp)) != nprocs {
4074 lock(&allpLock)
4075 allp = allp[:nprocs]
4076 unlock(&allpLock)
4077 }
4078
4079 var runnablePs *p
4080 for i := nprocs - 1; i >= 0; i-- {
4081 p := allp[i]
4082 if _g_.m.p.ptr() == p {
4083 continue
4084 }
4085 p.status = _Pidle
4086 if runqempty(p) {
4087 pidleput(p)
4088 } else {
4089 p.m.set(mget())
4090 p.link.set(runnablePs)
4091 runnablePs = p
4092 }
4093 }
4094 stealOrder.reset(uint32(nprocs))
4095 var int32p *int32 = &gomaxprocs
4096 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
4097 return runnablePs
4098 }
4099
4100
4101
4102
4103
4104
4105
4106 func acquirep(_p_ *p) {
4107
4108 wirep(_p_)
4109
4110
4111
4112
4113
4114 _p_.mcache.prepareForSweep()
4115
4116 if trace.enabled {
4117 traceProcStart()
4118 }
4119 }
4120
4121
4122
4123
4124
4125
4126
4127 func wirep(_p_ *p) {
4128 _g_ := getg()
4129
4130 if _g_.m.p != 0 || _g_.m.mcache != nil {
4131 throw("wirep: already in go")
4132 }
4133 if _p_.m != 0 || _p_.status != _Pidle {
4134 id := int64(0)
4135 if _p_.m != 0 {
4136 id = _p_.m.ptr().id
4137 }
4138 print("wirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
4139 throw("wirep: invalid p state")
4140 }
4141 _g_.m.mcache = _p_.mcache
4142 _g_.m.p.set(_p_)
4143 _p_.m.set(_g_.m)
4144 _p_.status = _Prunning
4145 }
4146
4147
4148 func releasep() *p {
4149 _g_ := getg()
4150
4151 if _g_.m.p == 0 || _g_.m.mcache == nil {
4152 throw("releasep: invalid arg")
4153 }
4154 _p_ := _g_.m.p.ptr()
4155 if _p_.m.ptr() != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning {
4156 print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", hex(_p_.m), " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n")
4157 throw("releasep: invalid p state")
4158 }
4159 if trace.enabled {
4160 traceProcStop(_g_.m.p.ptr())
4161 }
4162 _g_.m.p = 0
4163 _g_.m.mcache = nil
4164 _p_.m = 0
4165 _p_.status = _Pidle
4166 return _p_
4167 }
4168
4169 func incidlelocked(v int32) {
4170 lock(&sched.lock)
4171 sched.nmidlelocked += v
4172 if v > 0 {
4173 checkdead()
4174 }
4175 unlock(&sched.lock)
4176 }
4177
4178
4179
4180
4181 func checkdead() {
4182
4183
4184
4185 if islibrary || isarchive {
4186 return
4187 }
4188
4189
4190
4191
4192
4193 if panicking > 0 {
4194 return
4195 }
4196
4197
4198
4199
4200
4201 var run0 int32
4202 if !iscgo && cgoHasExtraM {
4203 mp := lockextra(true)
4204 haveExtraM := extraMCount > 0
4205 unlockextra(mp)
4206 if haveExtraM {
4207 run0 = 1
4208 }
4209 }
4210
4211 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
4212 if run > run0 {
4213 return
4214 }
4215 if run < 0 {
4216 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
4217 throw("checkdead: inconsistent counts")
4218 }
4219
4220 grunning := 0
4221 lock(&allglock)
4222 for i := 0; i < len(allgs); i++ {
4223 gp := allgs[i]
4224 if isSystemGoroutine(gp, false) {
4225 continue
4226 }
4227 s := readgstatus(gp)
4228 switch s &^ _Gscan {
4229 case _Gwaiting:
4230 grunning++
4231 case _Grunnable,
4232 _Grunning,
4233 _Gsyscall:
4234 unlock(&allglock)
4235 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
4236 throw("checkdead: runnable g")
4237 }
4238 }
4239 unlock(&allglock)
4240 if grunning == 0 {
4241 throw("no goroutines (main called runtime.Goexit) - deadlock!")
4242 }
4243
4244
4245 gp := timejump()
4246 if gp != nil {
4247 casgstatus(gp, _Gwaiting, _Grunnable)
4248 globrunqput(gp)
4249 _p_ := pidleget()
4250 if _p_ == nil {
4251 throw("checkdead: no p for timer")
4252 }
4253 mp := mget()
4254 if mp == nil {
4255
4256
4257 throw("checkdead: no m for timer")
4258 }
4259 mp.nextp.set(_p_)
4260 notewakeup(&mp.park)
4261 return
4262 }
4263
4264 getg().m.throwing = -1
4265 throw("all goroutines are asleep - deadlock!")
4266 }
4267
4268
4269
4270
4271
4272
4273 var forcegcperiod int64 = 2 * 60 * 1e9
4274
4275
4276
4277
4278 func sysmon() {
4279 lock(&sched.lock)
4280 sched.nmsys++
4281 checkdead()
4282 unlock(&sched.lock)
4283
4284 lasttrace := int64(0)
4285 idle := 0
4286 delay := uint32(0)
4287 for {
4288 if idle == 0 {
4289 delay = 20
4290 } else if idle > 50 {
4291 delay *= 2
4292 }
4293 if delay > 10*1000 {
4294 delay = 10 * 1000
4295 }
4296 usleep(delay)
4297 if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) {
4298 lock(&sched.lock)
4299 if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) {
4300 atomic.Store(&sched.sysmonwait, 1)
4301 unlock(&sched.lock)
4302
4303
4304 maxsleep := forcegcperiod / 2
4305 shouldRelax := true
4306 if osRelaxMinNS > 0 {
4307 next := timeSleepUntil()
4308 now := nanotime()
4309 if next-now < osRelaxMinNS {
4310 shouldRelax = false
4311 }
4312 }
4313 if shouldRelax {
4314 osRelax(true)
4315 }
4316 notetsleep(&sched.sysmonnote, maxsleep)
4317 if shouldRelax {
4318 osRelax(false)
4319 }
4320 lock(&sched.lock)
4321 atomic.Store(&sched.sysmonwait, 0)
4322 noteclear(&sched.sysmonnote)
4323 idle = 0
4324 delay = 20
4325 }
4326 unlock(&sched.lock)
4327 }
4328
4329 if *cgo_yield != nil {
4330 asmcgocall(*cgo_yield, nil)
4331 }
4332
4333 lastpoll := int64(atomic.Load64(&sched.lastpoll))
4334 now := nanotime()
4335 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
4336 atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now))
4337 list := netpoll(false)
4338 if !list.empty() {
4339
4340
4341
4342
4343
4344
4345
4346 incidlelocked(-1)
4347 injectglist(&list)
4348 incidlelocked(1)
4349 }
4350 }
4351
4352
4353 if retake(now) != 0 {
4354 idle = 0
4355 } else {
4356 idle++
4357 }
4358
4359 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && atomic.Load(&forcegc.idle) != 0 {
4360 lock(&forcegc.lock)
4361 forcegc.idle = 0
4362 var list gList
4363 list.push(forcegc.g)
4364 injectglist(&list)
4365 unlock(&forcegc.lock)
4366 }
4367 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
4368 lasttrace = now
4369 schedtrace(debug.scheddetail > 0)
4370 }
4371 }
4372 }
4373
4374 type sysmontick struct {
4375 schedtick uint32
4376 schedwhen int64
4377 syscalltick uint32
4378 syscallwhen int64
4379 }
4380
4381
4382
4383 const forcePreemptNS = 10 * 1000 * 1000
4384
4385 func retake(now int64) uint32 {
4386 n := 0
4387
4388
4389 lock(&allpLock)
4390
4391
4392
4393 for i := 0; i < len(allp); i++ {
4394 _p_ := allp[i]
4395 if _p_ == nil {
4396
4397
4398 continue
4399 }
4400 pd := &_p_.sysmontick
4401 s := _p_.status
4402 sysretake := false
4403 if s == _Prunning || s == _Psyscall {
4404
4405 t := int64(_p_.schedtick)
4406 if int64(pd.schedtick) != t {
4407 pd.schedtick = uint32(t)
4408 pd.schedwhen = now
4409 } else if pd.schedwhen+forcePreemptNS <= now {
4410 preemptone(_p_)
4411
4412
4413 sysretake = true
4414 }
4415 }
4416 if s == _Psyscall {
4417
4418 t := int64(_p_.syscalltick)
4419 if !sysretake && int64(pd.syscalltick) != t {
4420 pd.syscalltick = uint32(t)
4421 pd.syscallwhen = now
4422 continue
4423 }
4424
4425
4426
4427 if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
4428 continue
4429 }
4430
4431 unlock(&allpLock)
4432
4433
4434
4435
4436 incidlelocked(-1)
4437 if atomic.Cas(&_p_.status, s, _Pidle) {
4438 if trace.enabled {
4439 traceGoSysBlock(_p_)
4440 traceProcStop(_p_)
4441 }
4442 n++
4443 _p_.syscalltick++
4444 handoffp(_p_)
4445 }
4446 incidlelocked(1)
4447 lock(&allpLock)
4448 }
4449 }
4450 unlock(&allpLock)
4451 return uint32(n)
4452 }
4453
4454
4455
4456
4457
4458
4459 func preemptall() bool {
4460 res := false
4461 for _, _p_ := range allp {
4462 if _p_.status != _Prunning {
4463 continue
4464 }
4465 if preemptone(_p_) {
4466 res = true
4467 }
4468 }
4469 return res
4470 }
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480
4481
4482 func preemptone(_p_ *p) bool {
4483 mp := _p_.m.ptr()
4484 if mp == nil || mp == getg().m {
4485 return false
4486 }
4487 gp := mp.curg
4488 if gp == nil || gp == mp.g0 {
4489 return false
4490 }
4491
4492 gp.preempt = true
4493
4494
4495
4496
4497
4498 gp.stackguard0 = stackPreempt
4499 return true
4500 }
4501
4502 var starttime int64
4503
4504 func schedtrace(detailed bool) {
4505 now := nanotime()
4506 if starttime == 0 {
4507 starttime = now
4508 }
4509
4510 lock(&sched.lock)
4511 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", mcount(), " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
4512 if detailed {
4513 print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n")
4514 }
4515
4516
4517
4518 for i, _p_ := range allp {
4519 mp := _p_.m.ptr()
4520 h := atomic.Load(&_p_.runqhead)
4521 t := atomic.Load(&_p_.runqtail)
4522 if detailed {
4523 id := int64(-1)
4524 if mp != nil {
4525 id = mp.id
4526 }
4527 print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gFree.n, "\n")
4528 } else {
4529
4530
4531 print(" ")
4532 if i == 0 {
4533 print("[")
4534 }
4535 print(t - h)
4536 if i == len(allp)-1 {
4537 print("]\n")
4538 }
4539 }
4540 }
4541
4542 if !detailed {
4543 unlock(&sched.lock)
4544 return
4545 }
4546
4547 for mp := allm; mp != nil; mp = mp.alllink {
4548 _p_ := mp.p.ptr()
4549 gp := mp.curg
4550 lockedg := mp.lockedg.ptr()
4551 id1 := int32(-1)
4552 if _p_ != nil {
4553 id1 = _p_.id
4554 }
4555 id2 := int64(-1)
4556 if gp != nil {
4557 id2 = gp.goid
4558 }
4559 id3 := int64(-1)
4560 if lockedg != nil {
4561 id3 = lockedg.goid
4562 }
4563 print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n")
4564 }
4565
4566 lock(&allglock)
4567 for gi := 0; gi < len(allgs); gi++ {
4568 gp := allgs[gi]
4569 mp := gp.m
4570 lockedm := gp.lockedm.ptr()
4571 id1 := int64(-1)
4572 if mp != nil {
4573 id1 = mp.id
4574 }
4575 id2 := int64(-1)
4576 if lockedm != nil {
4577 id2 = lockedm.id
4578 }
4579 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=", id1, " lockedm=", id2, "\n")
4580 }
4581 unlock(&allglock)
4582 unlock(&sched.lock)
4583 }
4584
4585
4586
4587
4588
4589
4590 func schedEnableUser(enable bool) {
4591 lock(&sched.lock)
4592 if sched.disable.user == !enable {
4593 unlock(&sched.lock)
4594 return
4595 }
4596 sched.disable.user = !enable
4597 if enable {
4598 n := sched.disable.n
4599 sched.disable.n = 0
4600 globrunqputbatch(&sched.disable.runnable, n)
4601 unlock(&sched.lock)
4602 for ; n != 0 && sched.npidle != 0; n-- {
4603 startm(nil, false)
4604 }
4605 } else {
4606 unlock(&sched.lock)
4607 }
4608 }
4609
4610
4611
4612 func schedEnabled(gp *g) bool {
4613 if sched.disable.user {
4614 return isSystemGoroutine(gp, true)
4615 }
4616 return true
4617 }
4618
4619
4620
4621
4622
4623 func mput(mp *m) {
4624 mp.schedlink = sched.midle
4625 sched.midle.set(mp)
4626 sched.nmidle++
4627 checkdead()
4628 }
4629
4630
4631
4632
4633
4634 func mget() *m {
4635 mp := sched.midle.ptr()
4636 if mp != nil {
4637 sched.midle = mp.schedlink
4638 sched.nmidle--
4639 }
4640 return mp
4641 }
4642
4643
4644
4645
4646
4647 func globrunqput(gp *g) {
4648 sched.runq.pushBack(gp)
4649 sched.runqsize++
4650 }
4651
4652
4653
4654
4655
4656 func globrunqputhead(gp *g) {
4657 sched.runq.push(gp)
4658 sched.runqsize++
4659 }
4660
4661
4662
4663
4664 func globrunqputbatch(batch *gQueue, n int32) {
4665 sched.runq.pushBackAll(*batch)
4666 sched.runqsize += n
4667 *batch = gQueue{}
4668 }
4669
4670
4671
4672 func globrunqget(_p_ *p, max int32) *g {
4673 if sched.runqsize == 0 {
4674 return nil
4675 }
4676
4677 n := sched.runqsize/gomaxprocs + 1
4678 if n > sched.runqsize {
4679 n = sched.runqsize
4680 }
4681 if max > 0 && n > max {
4682 n = max
4683 }
4684 if n > int32(len(_p_.runq))/2 {
4685 n = int32(len(_p_.runq)) / 2
4686 }
4687
4688 sched.runqsize -= n
4689
4690 gp := sched.runq.pop()
4691 n--
4692 for ; n > 0; n-- {
4693 gp1 := sched.runq.pop()
4694 runqput(_p_, gp1, false)
4695 }
4696 return gp
4697 }
4698
4699
4700
4701
4702
4703 func pidleput(_p_ *p) {
4704 if !runqempty(_p_) {
4705 throw("pidleput: P has non-empty run queue")
4706 }
4707 _p_.link = sched.pidle
4708 sched.pidle.set(_p_)
4709 atomic.Xadd(&sched.npidle, 1)
4710 }
4711
4712
4713
4714
4715
4716 func pidleget() *p {
4717 _p_ := sched.pidle.ptr()
4718 if _p_ != nil {
4719 sched.pidle = _p_.link
4720 atomic.Xadd(&sched.npidle, -1)
4721 }
4722 return _p_
4723 }
4724
4725
4726
4727 func runqempty(_p_ *p) bool {
4728
4729
4730
4731
4732 for {
4733 head := atomic.Load(&_p_.runqhead)
4734 tail := atomic.Load(&_p_.runqtail)
4735 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&_p_.runnext)))
4736 if tail == atomic.Load(&_p_.runqtail) {
4737 return head == tail && runnext == 0
4738 }
4739 }
4740 }
4741
4742
4743
4744
4745
4746
4747
4748
4749
4750
4751 const randomizeScheduler = raceenabled
4752
4753
4754
4755
4756
4757
4758 func runqput(_p_ *p, gp *g, next bool) {
4759 if randomizeScheduler && next && fastrand()%2 == 0 {
4760 next = false
4761 }
4762
4763 if next {
4764 retryNext:
4765 oldnext := _p_.runnext
4766 if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
4767 goto retryNext
4768 }
4769 if oldnext == 0 {
4770 return
4771 }
4772
4773 gp = oldnext.ptr()
4774 }
4775
4776 retry:
4777 h := atomic.LoadAcq(&_p_.runqhead)
4778 t := _p_.runqtail
4779 if t-h < uint32(len(_p_.runq)) {
4780 _p_.runq[t%uint32(len(_p_.runq))].set(gp)
4781 atomic.StoreRel(&_p_.runqtail, t+1)
4782 return
4783 }
4784 if runqputslow(_p_, gp, h, t) {
4785 return
4786 }
4787
4788 goto retry
4789 }
4790
4791
4792
4793 func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
4794 var batch [len(_p_.runq)/2 + 1]*g
4795
4796
4797 n := t - h
4798 n = n / 2
4799 if n != uint32(len(_p_.runq)/2) {
4800 throw("runqputslow: queue is not full")
4801 }
4802 for i := uint32(0); i < n; i++ {
4803 batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
4804 }
4805 if !atomic.CasRel(&_p_.runqhead, h, h+n) {
4806 return false
4807 }
4808 batch[n] = gp
4809
4810 if randomizeScheduler {
4811 for i := uint32(1); i <= n; i++ {
4812 j := fastrandn(i + 1)
4813 batch[i], batch[j] = batch[j], batch[i]
4814 }
4815 }
4816
4817
4818 for i := uint32(0); i < n; i++ {
4819 batch[i].schedlink.set(batch[i+1])
4820 }
4821 var q gQueue
4822 q.head.set(batch[0])
4823 q.tail.set(batch[n])
4824
4825
4826 lock(&sched.lock)
4827 globrunqputbatch(&q, int32(n+1))
4828 unlock(&sched.lock)
4829 return true
4830 }
4831
4832
4833
4834
4835
4836 func runqget(_p_ *p) (gp *g, inheritTime bool) {
4837
4838 for {
4839 next := _p_.runnext
4840 if next == 0 {
4841 break
4842 }
4843 if _p_.runnext.cas(next, 0) {
4844 return next.ptr(), true
4845 }
4846 }
4847
4848 for {
4849 h := atomic.LoadAcq(&_p_.runqhead)
4850 t := _p_.runqtail
4851 if t == h {
4852 return nil, false
4853 }
4854 gp := _p_.runq[h%uint32(len(_p_.runq))].ptr()
4855 if atomic.CasRel(&_p_.runqhead, h, h+1) {
4856 return gp, false
4857 }
4858 }
4859 }
4860
4861
4862
4863
4864
4865 func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
4866 for {
4867 h := atomic.LoadAcq(&_p_.runqhead)
4868 t := atomic.LoadAcq(&_p_.runqtail)
4869 n := t - h
4870 n = n - n/2
4871 if n == 0 {
4872 if stealRunNextG {
4873
4874 if next := _p_.runnext; next != 0 {
4875 if _p_.status == _Prunning {
4876
4877
4878
4879
4880
4881
4882
4883
4884
4885
4886 if GOOS != "windows" {
4887 usleep(3)
4888 } else {
4889
4890
4891
4892 osyield()
4893 }
4894 }
4895 if !_p_.runnext.cas(next, 0) {
4896 continue
4897 }
4898 batch[batchHead%uint32(len(batch))] = next
4899 return 1
4900 }
4901 }
4902 return 0
4903 }
4904 if n > uint32(len(_p_.runq)/2) {
4905 continue
4906 }
4907 for i := uint32(0); i < n; i++ {
4908 g := _p_.runq[(h+i)%uint32(len(_p_.runq))]
4909 batch[(batchHead+i)%uint32(len(batch))] = g
4910 }
4911 if atomic.CasRel(&_p_.runqhead, h, h+n) {
4912 return n
4913 }
4914 }
4915 }
4916
4917
4918
4919
4920 func runqsteal(_p_, p2 *p, stealRunNextG bool) *g {
4921 t := _p_.runqtail
4922 n := runqgrab(p2, &_p_.runq, t, stealRunNextG)
4923 if n == 0 {
4924 return nil
4925 }
4926 n--
4927 gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr()
4928 if n == 0 {
4929 return gp
4930 }
4931 h := atomic.LoadAcq(&_p_.runqhead)
4932 if t-h+n >= uint32(len(_p_.runq)) {
4933 throw("runqsteal: runq overflow")
4934 }
4935 atomic.StoreRel(&_p_.runqtail, t+n)
4936 return gp
4937 }
4938
4939
4940
4941 type gQueue struct {
4942 head guintptr
4943 tail guintptr
4944 }
4945
4946
4947 func (q *gQueue) empty() bool {
4948 return q.head == 0
4949 }
4950
4951
4952 func (q *gQueue) push(gp *g) {
4953 gp.schedlink = q.head
4954 q.head.set(gp)
4955 if q.tail == 0 {
4956 q.tail.set(gp)
4957 }
4958 }
4959
4960
4961 func (q *gQueue) pushBack(gp *g) {
4962 gp.schedlink = 0
4963 if q.tail != 0 {
4964 q.tail.ptr().schedlink.set(gp)
4965 } else {
4966 q.head.set(gp)
4967 }
4968 q.tail.set(gp)
4969 }
4970
4971
4972
4973 func (q *gQueue) pushBackAll(q2 gQueue) {
4974 if q2.tail == 0 {
4975 return
4976 }
4977 q2.tail.ptr().schedlink = 0
4978 if q.tail != 0 {
4979 q.tail.ptr().schedlink = q2.head
4980 } else {
4981 q.head = q2.head
4982 }
4983 q.tail = q2.tail
4984 }
4985
4986
4987
4988 func (q *gQueue) pop() *g {
4989 gp := q.head.ptr()
4990 if gp != nil {
4991 q.head = gp.schedlink
4992 if q.head == 0 {
4993 q.tail = 0
4994 }
4995 }
4996 return gp
4997 }
4998
4999
5000 func (q *gQueue) popList() gList {
5001 stack := gList{q.head}
5002 *q = gQueue{}
5003 return stack
5004 }
5005
5006
5007
5008 type gList struct {
5009 head guintptr
5010 }
5011
5012
5013 func (l *gList) empty() bool {
5014 return l.head == 0
5015 }
5016
5017
5018 func (l *gList) push(gp *g) {
5019 gp.schedlink = l.head
5020 l.head.set(gp)
5021 }
5022
5023
5024 func (l *gList) pushAll(q gQueue) {
5025 if !q.empty() {
5026 q.tail.ptr().schedlink = l.head
5027 l.head = q.head
5028 }
5029 }
5030
5031
5032 func (l *gList) pop() *g {
5033 gp := l.head.ptr()
5034 if gp != nil {
5035 l.head = gp.schedlink
5036 }
5037 return gp
5038 }
5039
5040
5041 func setMaxThreads(in int) (out int) {
5042 lock(&sched.lock)
5043 out = int(sched.maxmcount)
5044 if in > 0x7fffffff {
5045 sched.maxmcount = 0x7fffffff
5046 } else {
5047 sched.maxmcount = int32(in)
5048 }
5049 checkmcount()
5050 unlock(&sched.lock)
5051 return
5052 }
5053
5054 func haveexperiment(name string) bool {
5055 if name == "framepointer" {
5056 return framepointer_enabled
5057 }
5058 x := sys.Goexperiment
5059 for x != "" {
5060 xname := ""
5061 i := index(x, ",")
5062 if i < 0 {
5063 xname, x = x, ""
5064 } else {
5065 xname, x = x[:i], x[i+1:]
5066 }
5067 if xname == name {
5068 return true
5069 }
5070 if len(xname) > 2 && xname[:2] == "no" && xname[2:] == name {
5071 return false
5072 }
5073 }
5074 return false
5075 }
5076
5077
5078 func procPin() int {
5079 _g_ := getg()
5080 mp := _g_.m
5081
5082 mp.locks++
5083 return int(mp.p.ptr().id)
5084 }
5085
5086
5087 func procUnpin() {
5088 _g_ := getg()
5089 _g_.m.locks--
5090 }
5091
5092
5093
5094 func sync_runtime_procPin() int {
5095 return procPin()
5096 }
5097
5098
5099
5100 func sync_runtime_procUnpin() {
5101 procUnpin()
5102 }
5103
5104
5105
5106 func sync_atomic_runtime_procPin() int {
5107 return procPin()
5108 }
5109
5110
5111
5112 func sync_atomic_runtime_procUnpin() {
5113 procUnpin()
5114 }
5115
5116
5117
5118
5119 func sync_runtime_canSpin(i int) bool {
5120
5121
5122
5123
5124
5125 if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 {
5126 return false
5127 }
5128 if p := getg().m.p.ptr(); !runqempty(p) {
5129 return false
5130 }
5131 return true
5132 }
5133
5134
5135
5136 func sync_runtime_doSpin() {
5137 procyield(active_spin_cnt)
5138 }
5139
5140 var stealOrder randomOrder
5141
5142
5143
5144
5145
5146 type randomOrder struct {
5147 count uint32
5148 coprimes []uint32
5149 }
5150
5151 type randomEnum struct {
5152 i uint32
5153 count uint32
5154 pos uint32
5155 inc uint32
5156 }
5157
5158 func (ord *randomOrder) reset(count uint32) {
5159 ord.count = count
5160 ord.coprimes = ord.coprimes[:0]
5161 for i := uint32(1); i <= count; i++ {
5162 if gcd(i, count) == 1 {
5163 ord.coprimes = append(ord.coprimes, i)
5164 }
5165 }
5166 }
5167
5168 func (ord *randomOrder) start(i uint32) randomEnum {
5169 return randomEnum{
5170 count: ord.count,
5171 pos: i % ord.count,
5172 inc: ord.coprimes[i%uint32(len(ord.coprimes))],
5173 }
5174 }
5175
5176 func (enum *randomEnum) done() bool {
5177 return enum.i == enum.count
5178 }
5179
5180 func (enum *randomEnum) next() {
5181 enum.i++
5182 enum.pos = (enum.pos + enum.inc) % enum.count
5183 }
5184
5185 func (enum *randomEnum) position() uint32 {
5186 return enum.pos
5187 }
5188
5189 func gcd(a, b uint32) uint32 {
5190 for b != 0 {
5191 a, b = b, a%b
5192 }
5193 return a
5194 }
5195
5196
5197 type initTask struct {
5198
5199 state uintptr
5200 ndeps uintptr
5201 nfns uintptr
5202
5203
5204 }
5205
5206 func doInit(t *initTask) {
5207 switch t.state {
5208 case 2:
5209 return
5210 case 1:
5211 throw("recursive call during initialization - linker skew")
5212 default:
5213 t.state = 1
5214 for i := uintptr(0); i < t.ndeps; i++ {
5215 p := add(unsafe.Pointer(t), (3+i)*sys.PtrSize)
5216 t2 := *(**initTask)(p)
5217 doInit(t2)
5218 }
5219 for i := uintptr(0); i < t.nfns; i++ {
5220 p := add(unsafe.Pointer(t), (3+t.ndeps+i)*sys.PtrSize)
5221 f := *(*func())(unsafe.Pointer(&p))
5222 f()
5223 }
5224 t.state = 2
5225 }
5226 }
5227
View as plain text