Source file src/runtime/stack.go
1
2
3
4
5 package runtime
6
7 import (
8 "runtime/internal/atomic"
9 "runtime/internal/sys"
10 "unsafe"
11 )
12
13
62
63 const (
64
65
66
67
68 _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm*1024 + sys.GoosDarwin*sys.GoarchArm64*1024
69
70
71 _StackMin = 2048
72
73
74
75 _FixedStack0 = _StackMin + _StackSystem
76 _FixedStack1 = _FixedStack0 - 1
77 _FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1)
78 _FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2)
79 _FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4)
80 _FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8)
81 _FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16)
82 _FixedStack = _FixedStack6 + 1
83
84
85
86
87
88
89 _StackBig = 4096
90
91
92
93 _StackGuard = 880*sys.StackGuardMultiplier + _StackSystem
94
95
96
97
98 _StackSmall = 128
99
100
101
102 _StackLimit = _StackGuard - _StackSystem - _StackSmall
103 )
104
105 const (
106
107
108
109
110
111 stackDebug = 0
112 stackFromSystem = 0
113 stackFaultOnFree = 0
114 stackPoisonCopy = 0
115 stackNoCache = 0
116
117
118 debugCheckBP = false
119 )
120
121 const (
122 uintptrMask = 1<<(8*sys.PtrSize) - 1
123
124
125
126
127
128 stackPreempt = uintptrMask & -1314
129
130
131
132
133 stackFork = uintptrMask & -1234
134 )
135
136
137
138
139
140
141 var stackpool [_NumStackOrders]mSpanList
142 var stackpoolmu mutex
143
144
145 var stackLarge struct {
146 lock mutex
147 free [heapAddrBits - pageShift]mSpanList
148 }
149
150 func stackinit() {
151 if _StackCacheSize&_PageMask != 0 {
152 throw("cache size must be a multiple of page size")
153 }
154 for i := range stackpool {
155 stackpool[i].init()
156 }
157 for i := range stackLarge.free {
158 stackLarge.free[i].init()
159 }
160 }
161
162
163 func stacklog2(n uintptr) int {
164 log2 := 0
165 for n > 1 {
166 n >>= 1
167 log2++
168 }
169 return log2
170 }
171
172
173
174 func stackpoolalloc(order uint8) gclinkptr {
175 list := &stackpool[order]
176 s := list.first
177 if s == nil {
178
179 s = mheap_.allocManual(_StackCacheSize>>_PageShift, &memstats.stacks_inuse)
180 if s == nil {
181 throw("out of memory")
182 }
183 if s.allocCount != 0 {
184 throw("bad allocCount")
185 }
186 if s.manualFreeList.ptr() != nil {
187 throw("bad manualFreeList")
188 }
189 osStackAlloc(s)
190 s.elemsize = _FixedStack << order
191 for i := uintptr(0); i < _StackCacheSize; i += s.elemsize {
192 x := gclinkptr(s.base() + i)
193 x.ptr().next = s.manualFreeList
194 s.manualFreeList = x
195 }
196 list.insert(s)
197 }
198 x := s.manualFreeList
199 if x.ptr() == nil {
200 throw("span has no free stacks")
201 }
202 s.manualFreeList = x.ptr().next
203 s.allocCount++
204 if s.manualFreeList.ptr() == nil {
205
206 list.remove(s)
207 }
208 return x
209 }
210
211
212 func stackpoolfree(x gclinkptr, order uint8) {
213 s := spanOfUnchecked(uintptr(x))
214 if s.state != mSpanManual {
215 throw("freeing stack not in a stack span")
216 }
217 if s.manualFreeList.ptr() == nil {
218
219 stackpool[order].insert(s)
220 }
221 x.ptr().next = s.manualFreeList
222 s.manualFreeList = x
223 s.allocCount--
224 if gcphase == _GCoff && s.allocCount == 0 {
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240 stackpool[order].remove(s)
241 s.manualFreeList = 0
242 osStackFree(s)
243 mheap_.freeManual(s, &memstats.stacks_inuse)
244 }
245 }
246
247
248
249
250
251 func stackcacherefill(c *mcache, order uint8) {
252 if stackDebug >= 1 {
253 print("stackcacherefill order=", order, "\n")
254 }
255
256
257
258 var list gclinkptr
259 var size uintptr
260 lock(&stackpoolmu)
261 for size < _StackCacheSize/2 {
262 x := stackpoolalloc(order)
263 x.ptr().next = list
264 list = x
265 size += _FixedStack << order
266 }
267 unlock(&stackpoolmu)
268 c.stackcache[order].list = list
269 c.stackcache[order].size = size
270 }
271
272
273 func stackcacherelease(c *mcache, order uint8) {
274 if stackDebug >= 1 {
275 print("stackcacherelease order=", order, "\n")
276 }
277 x := c.stackcache[order].list
278 size := c.stackcache[order].size
279 lock(&stackpoolmu)
280 for size > _StackCacheSize/2 {
281 y := x.ptr().next
282 stackpoolfree(x, order)
283 x = y
284 size -= _FixedStack << order
285 }
286 unlock(&stackpoolmu)
287 c.stackcache[order].list = x
288 c.stackcache[order].size = size
289 }
290
291
292 func stackcache_clear(c *mcache) {
293 if stackDebug >= 1 {
294 print("stackcache clear\n")
295 }
296 lock(&stackpoolmu)
297 for order := uint8(0); order < _NumStackOrders; order++ {
298 x := c.stackcache[order].list
299 for x.ptr() != nil {
300 y := x.ptr().next
301 stackpoolfree(x, order)
302 x = y
303 }
304 c.stackcache[order].list = 0
305 c.stackcache[order].size = 0
306 }
307 unlock(&stackpoolmu)
308 }
309
310
311
312
313
314
315
316 func stackalloc(n uint32) stack {
317
318
319
320 thisg := getg()
321 if thisg != thisg.m.g0 {
322 throw("stackalloc not on scheduler stack")
323 }
324 if n&(n-1) != 0 {
325 throw("stack size not a power of 2")
326 }
327 if stackDebug >= 1 {
328 print("stackalloc ", n, "\n")
329 }
330
331 if debug.efence != 0 || stackFromSystem != 0 {
332 n = uint32(round(uintptr(n), physPageSize))
333 v := sysAlloc(uintptr(n), &memstats.stacks_sys)
334 if v == nil {
335 throw("out of memory (stackalloc)")
336 }
337 return stack{uintptr(v), uintptr(v) + uintptr(n)}
338 }
339
340
341
342
343 var v unsafe.Pointer
344 if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
345 order := uint8(0)
346 n2 := n
347 for n2 > _FixedStack {
348 order++
349 n2 >>= 1
350 }
351 var x gclinkptr
352 c := thisg.m.mcache
353 if stackNoCache != 0 || c == nil || thisg.m.preemptoff != "" {
354
355
356
357
358 lock(&stackpoolmu)
359 x = stackpoolalloc(order)
360 unlock(&stackpoolmu)
361 } else {
362 x = c.stackcache[order].list
363 if x.ptr() == nil {
364 stackcacherefill(c, order)
365 x = c.stackcache[order].list
366 }
367 c.stackcache[order].list = x.ptr().next
368 c.stackcache[order].size -= uintptr(n)
369 }
370 v = unsafe.Pointer(x)
371 } else {
372 var s *mspan
373 npage := uintptr(n) >> _PageShift
374 log2npage := stacklog2(npage)
375
376
377 lock(&stackLarge.lock)
378 if !stackLarge.free[log2npage].isEmpty() {
379 s = stackLarge.free[log2npage].first
380 stackLarge.free[log2npage].remove(s)
381 }
382 unlock(&stackLarge.lock)
383
384 if s == nil {
385
386 s = mheap_.allocManual(npage, &memstats.stacks_inuse)
387 if s == nil {
388 throw("out of memory")
389 }
390 osStackAlloc(s)
391 s.elemsize = uintptr(n)
392 }
393 v = unsafe.Pointer(s.base())
394 }
395
396 if raceenabled {
397 racemalloc(v, uintptr(n))
398 }
399 if msanenabled {
400 msanmalloc(v, uintptr(n))
401 }
402 if stackDebug >= 1 {
403 print(" allocated ", v, "\n")
404 }
405 return stack{uintptr(v), uintptr(v) + uintptr(n)}
406 }
407
408
409
410
411
412
413
414 func stackfree(stk stack) {
415 gp := getg()
416 v := unsafe.Pointer(stk.lo)
417 n := stk.hi - stk.lo
418 if n&(n-1) != 0 {
419 throw("stack not a power of 2")
420 }
421 if stk.lo+n < stk.hi {
422 throw("bad stack size")
423 }
424 if stackDebug >= 1 {
425 println("stackfree", v, n)
426 memclrNoHeapPointers(v, n)
427 }
428 if debug.efence != 0 || stackFromSystem != 0 {
429 if debug.efence != 0 || stackFaultOnFree != 0 {
430 sysFault(v, n)
431 } else {
432 sysFree(v, n, &memstats.stacks_sys)
433 }
434 return
435 }
436 if msanenabled {
437 msanfree(v, n)
438 }
439 if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
440 order := uint8(0)
441 n2 := n
442 for n2 > _FixedStack {
443 order++
444 n2 >>= 1
445 }
446 x := gclinkptr(v)
447 c := gp.m.mcache
448 if stackNoCache != 0 || c == nil || gp.m.preemptoff != "" {
449 lock(&stackpoolmu)
450 stackpoolfree(x, order)
451 unlock(&stackpoolmu)
452 } else {
453 if c.stackcache[order].size >= _StackCacheSize {
454 stackcacherelease(c, order)
455 }
456 x.ptr().next = c.stackcache[order].list
457 c.stackcache[order].list = x
458 c.stackcache[order].size += n
459 }
460 } else {
461 s := spanOfUnchecked(uintptr(v))
462 if s.state != mSpanManual {
463 println(hex(s.base()), v)
464 throw("bad span state")
465 }
466 if gcphase == _GCoff {
467
468
469 osStackFree(s)
470 mheap_.freeManual(s, &memstats.stacks_inuse)
471 } else {
472
473
474
475
476
477 log2npage := stacklog2(s.npages)
478 lock(&stackLarge.lock)
479 stackLarge.free[log2npage].insert(s)
480 unlock(&stackLarge.lock)
481 }
482 }
483 }
484
485 var maxstacksize uintptr = 1 << 20
486
487 var ptrnames = []string{
488 0: "scalar",
489 1: "ptr",
490 }
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520 type adjustinfo struct {
521 old stack
522 delta uintptr
523 cache pcvalueCache
524
525
526 sghi uintptr
527 }
528
529
530
531 func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
532 pp := (*uintptr)(vpp)
533 p := *pp
534 if stackDebug >= 4 {
535 print(" ", pp, ":", hex(p), "\n")
536 }
537 if adjinfo.old.lo <= p && p < adjinfo.old.hi {
538 *pp = p + adjinfo.delta
539 if stackDebug >= 3 {
540 print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n")
541 }
542 }
543 }
544
545
546 type bitvector struct {
547 n int32
548 bytedata *uint8
549 }
550
551
552
553
554
555 func (bv *bitvector) ptrbit(i uintptr) uint8 {
556 b := *(addb(bv.bytedata, i/8))
557 return (b >> (i % 8)) & 1
558 }
559
560
561
562 func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) {
563 minp := adjinfo.old.lo
564 maxp := adjinfo.old.hi
565 delta := adjinfo.delta
566 num := uintptr(bv.n)
567
568
569
570
571
572 useCAS := uintptr(scanp) < adjinfo.sghi
573 for i := uintptr(0); i < num; i += 8 {
574 if stackDebug >= 4 {
575 for j := uintptr(0); j < 8; j++ {
576 print(" ", add(scanp, (i+j)*sys.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*sys.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
577 }
578 }
579 b := *(addb(bv.bytedata, i/8))
580 for b != 0 {
581 j := uintptr(sys.Ctz8(b))
582 b &= b - 1
583 pp := (*uintptr)(add(scanp, (i+j)*sys.PtrSize))
584 retry:
585 p := *pp
586 if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
587
588
589 getg().m.traceback = 2
590 print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
591 throw("invalid pointer found on stack")
592 }
593 if minp <= p && p < maxp {
594 if stackDebug >= 3 {
595 print("adjust ptr ", hex(p), " ", funcname(f), "\n")
596 }
597 if useCAS {
598 ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
599 if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
600 goto retry
601 }
602 } else {
603 *pp = p + delta
604 }
605 }
606 }
607 }
608 }
609
610
611 func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
612 adjinfo := (*adjustinfo)(arg)
613 if frame.continpc == 0 {
614
615 return true
616 }
617 f := frame.fn
618 if stackDebug >= 2 {
619 print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
620 }
621 if f.funcID == funcID_systemstack_switch {
622
623
624
625 return true
626 }
627
628 locals, args, objs := getStackMap(frame, &adjinfo.cache, true)
629
630
631 if locals.n > 0 {
632 size := uintptr(locals.n) * sys.PtrSize
633 adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f)
634 }
635
636
637 if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.RegSize {
638 if !framepointer_enabled {
639 print("runtime: found space for saved base pointer, but no framepointer experiment\n")
640 print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n")
641 throw("bad frame layout")
642 }
643 if stackDebug >= 3 {
644 print(" saved bp\n")
645 }
646 if debugCheckBP {
647
648
649 bp := *(*uintptr)(unsafe.Pointer(frame.varp))
650 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
651 println("runtime: found invalid frame pointer")
652 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
653 throw("bad frame pointer")
654 }
655 }
656 adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
657 }
658
659
660 if args.n > 0 {
661 if stackDebug >= 3 {
662 print(" args\n")
663 }
664 adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{})
665 }
666
667
668
669 if frame.varp != 0 {
670 for _, obj := range objs {
671 off := obj.off
672 base := frame.varp
673 if off >= 0 {
674 base = frame.argp
675 }
676 p := base + uintptr(off)
677 if p < frame.sp {
678
679
680
681 continue
682 }
683 t := obj.typ
684 gcdata := t.gcdata
685 var s *mspan
686 if t.kind&kindGCProg != 0 {
687
688 s = materializeGCProg(t.ptrdata, gcdata)
689 gcdata = (*byte)(unsafe.Pointer(s.startAddr))
690 }
691 for i := uintptr(0); i < t.ptrdata; i += sys.PtrSize {
692 if *addb(gcdata, i/(8*sys.PtrSize))>>(i/sys.PtrSize&7)&1 != 0 {
693 adjustpointer(adjinfo, unsafe.Pointer(p+i))
694 }
695 }
696 if s != nil {
697 dematerializeGCProg(s)
698 }
699 }
700 }
701
702 return true
703 }
704
705 func adjustctxt(gp *g, adjinfo *adjustinfo) {
706 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
707 if !framepointer_enabled {
708 return
709 }
710 if debugCheckBP {
711 bp := gp.sched.bp
712 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
713 println("runtime: found invalid top frame pointer")
714 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
715 throw("bad top frame pointer")
716 }
717 }
718 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp))
719 }
720
721 func adjustdefers(gp *g, adjinfo *adjustinfo) {
722
723
724
725 adjustpointer(adjinfo, unsafe.Pointer(&gp._defer))
726 for d := gp._defer; d != nil; d = d.link {
727 adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
728 adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
729 adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
730 adjustpointer(adjinfo, unsafe.Pointer(&d.link))
731 }
732
733
734
735
736 tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo)))
737 }
738
739 func adjustpanics(gp *g, adjinfo *adjustinfo) {
740
741
742 adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
743 }
744
745 func adjustsudogs(gp *g, adjinfo *adjustinfo) {
746
747
748 for s := gp.waiting; s != nil; s = s.waitlink {
749 adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
750 }
751 }
752
753 func fillstack(stk stack, b byte) {
754 for p := stk.lo; p < stk.hi; p++ {
755 *(*byte)(unsafe.Pointer(p)) = b
756 }
757 }
758
759 func findsghi(gp *g, stk stack) uintptr {
760 var sghi uintptr
761 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
762 p := uintptr(sg.elem) + uintptr(sg.c.elemsize)
763 if stk.lo <= p && p < stk.hi && p > sghi {
764 sghi = p
765 }
766 }
767 return sghi
768 }
769
770
771
772
773 func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
774 if gp.waiting == nil {
775 return 0
776 }
777
778
779
780
781
782
783 var lastc *hchan
784 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
785 if sg.c != lastc {
786 lock(&sg.c.lock)
787 }
788 lastc = sg.c
789 }
790
791
792 adjustsudogs(gp, adjinfo)
793
794
795
796
797 var sgsize uintptr
798 if adjinfo.sghi != 0 {
799 oldBot := adjinfo.old.hi - used
800 newBot := oldBot + adjinfo.delta
801 sgsize = adjinfo.sghi - oldBot
802 memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize)
803 }
804
805
806 lastc = nil
807 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
808 if sg.c != lastc {
809 unlock(&sg.c.lock)
810 }
811 lastc = sg.c
812 }
813
814 return sgsize
815 }
816
817
818
819
820
821
822
823
824 func copystack(gp *g, newsize uintptr, sync bool) {
825 if gp.syscallsp != 0 {
826 throw("stack growth not allowed in system call")
827 }
828 old := gp.stack
829 if old.lo == 0 {
830 throw("nil stackbase")
831 }
832 used := old.hi - gp.sched.sp
833
834
835 new := stackalloc(uint32(newsize))
836 if stackPoisonCopy != 0 {
837 fillstack(new, 0xfd)
838 }
839 if stackDebug >= 1 {
840 print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
841 }
842
843
844 var adjinfo adjustinfo
845 adjinfo.old = old
846 adjinfo.delta = new.hi - old.hi
847
848
849 ncopy := used
850 if sync {
851 adjustsudogs(gp, &adjinfo)
852 } else {
853
854
855
856
857
858
859 adjinfo.sghi = findsghi(gp, old)
860
861
862
863 ncopy -= syncadjustsudogs(gp, used, &adjinfo)
864 }
865
866
867 memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
868
869
870
871
872 adjustctxt(gp, &adjinfo)
873 adjustdefers(gp, &adjinfo)
874 adjustpanics(gp, &adjinfo)
875 if adjinfo.sghi != 0 {
876 adjinfo.sghi += adjinfo.delta
877 }
878
879
880 gp.stack = new
881 gp.stackguard0 = new.lo + _StackGuard
882 gp.sched.sp = new.hi - used
883 gp.stktopsp += adjinfo.delta
884
885
886 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
887
888
889 if stackPoisonCopy != 0 {
890 fillstack(old, 0xfc)
891 }
892 stackfree(old)
893 }
894
895
896 func round2(x int32) int32 {
897 s := uint(0)
898 for 1<<s < x {
899 s++
900 }
901 return 1 << s
902 }
903
904
905
906
907
908
909
910
911
912
913
914
915
916 func newstack() {
917 thisg := getg()
918
919 if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
920 throw("stack growth after fork")
921 }
922 if thisg.m.morebuf.g.ptr() != thisg.m.curg {
923 print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
924 morebuf := thisg.m.morebuf
925 traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
926 throw("runtime: wrong goroutine in newstack")
927 }
928
929 gp := thisg.m.curg
930
931 if thisg.m.curg.throwsplit {
932
933 morebuf := thisg.m.morebuf
934 gp.syscallsp = morebuf.sp
935 gp.syscallpc = morebuf.pc
936 pcname, pcoff := "(unknown)", uintptr(0)
937 f := findfunc(gp.sched.pc)
938 if f.valid() {
939 pcname = funcname(f)
940 pcoff = gp.sched.pc - f.entry
941 }
942 print("runtime: newstack at ", pcname, "+", hex(pcoff),
943 " sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
944 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
945 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
946
947 thisg.m.traceback = 2
948 traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
949 throw("runtime: stack split at bad time")
950 }
951
952 morebuf := thisg.m.morebuf
953 thisg.m.morebuf.pc = 0
954 thisg.m.morebuf.lr = 0
955 thisg.m.morebuf.sp = 0
956 thisg.m.morebuf.g = 0
957
958
959
960
961 preempt := atomic.Loaduintptr(&gp.stackguard0) == stackPreempt
962
963
964
965
966
967
968
969
970
971
972
973
974
975 if preempt {
976 if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.ptr().status != _Prunning {
977
978
979 gp.stackguard0 = gp.stack.lo + _StackGuard
980 gogo(&gp.sched)
981 }
982 }
983
984 if gp.stack.lo == 0 {
985 throw("missing stack in newstack")
986 }
987 sp := gp.sched.sp
988 if sys.ArchFamily == sys.AMD64 || sys.ArchFamily == sys.I386 || sys.ArchFamily == sys.WASM {
989
990 sp -= sys.PtrSize
991 }
992 if stackDebug >= 1 || sp < gp.stack.lo {
993 print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
994 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
995 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
996 }
997 if sp < gp.stack.lo {
998 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->status=", hex(readgstatus(gp)), "\n ")
999 print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
1000 throw("runtime: split stack overflow")
1001 }
1002
1003 if preempt {
1004 if gp == thisg.m.g0 {
1005 throw("runtime: preempt g0")
1006 }
1007 if thisg.m.p == 0 && thisg.m.locks == 0 {
1008 throw("runtime: g is running but p is not")
1009 }
1010
1011 casgstatus(gp, _Grunning, _Gwaiting)
1012 if gp.preemptscan {
1013 for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) {
1014
1015
1016
1017
1018
1019 }
1020 if !gp.gcscandone {
1021
1022
1023 gcw := &gp.m.p.ptr().gcw
1024 scanstack(gp, gcw)
1025 gp.gcscandone = true
1026 }
1027 gp.preemptscan = false
1028 gp.preempt = false
1029 casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting)
1030
1031 casgstatus(gp, _Gwaiting, _Grunning)
1032 gp.stackguard0 = gp.stack.lo + _StackGuard
1033 gogo(&gp.sched)
1034 }
1035
1036
1037 casgstatus(gp, _Gwaiting, _Grunning)
1038 gopreempt_m(gp)
1039 }
1040
1041
1042 oldsize := gp.stack.hi - gp.stack.lo
1043 newsize := oldsize * 2
1044 if newsize > maxstacksize {
1045 print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
1046 throw("stack overflow")
1047 }
1048
1049
1050
1051 casgstatus(gp, _Grunning, _Gcopystack)
1052
1053
1054
1055 copystack(gp, newsize, true)
1056 if stackDebug >= 1 {
1057 print("stack grow done\n")
1058 }
1059 casgstatus(gp, _Gcopystack, _Grunning)
1060 gogo(&gp.sched)
1061 }
1062
1063
1064 func nilfunc() {
1065 *(*uint8)(nil) = 0
1066 }
1067
1068
1069
1070 func gostartcallfn(gobuf *gobuf, fv *funcval) {
1071 var fn unsafe.Pointer
1072 if fv != nil {
1073 fn = unsafe.Pointer(fv.fn)
1074 } else {
1075 fn = unsafe.Pointer(funcPC(nilfunc))
1076 }
1077 gostartcall(gobuf, fn, unsafe.Pointer(fv))
1078 }
1079
1080
1081
1082
1083 func shrinkstack(gp *g) {
1084 gstatus := readgstatus(gp)
1085 if gp.stack.lo == 0 {
1086 throw("missing stack in shrinkstack")
1087 }
1088 if gstatus&_Gscan == 0 {
1089 throw("bad status in shrinkstack")
1090 }
1091
1092 if debug.gcshrinkstackoff > 0 {
1093 return
1094 }
1095 f := findfunc(gp.startpc)
1096 if f.valid() && f.funcID == funcID_gcBgMarkWorker {
1097
1098
1099 return
1100 }
1101
1102 oldsize := gp.stack.hi - gp.stack.lo
1103 newsize := oldsize / 2
1104
1105
1106 if newsize < _FixedStack {
1107 return
1108 }
1109
1110
1111
1112
1113
1114 avail := gp.stack.hi - gp.stack.lo
1115 if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 {
1116 return
1117 }
1118
1119
1120
1121 if gp.syscallsp != 0 {
1122 return
1123 }
1124 if sys.GoosWindows != 0 && gp.m != nil && gp.m.libcallsp != 0 {
1125 return
1126 }
1127
1128 if stackDebug > 0 {
1129 print("shrinking stack ", oldsize, "->", newsize, "\n")
1130 }
1131
1132 copystack(gp, newsize, false)
1133 }
1134
1135
1136 func freeStackSpans() {
1137 lock(&stackpoolmu)
1138
1139
1140 for order := range stackpool {
1141 list := &stackpool[order]
1142 for s := list.first; s != nil; {
1143 next := s.next
1144 if s.allocCount == 0 {
1145 list.remove(s)
1146 s.manualFreeList = 0
1147 osStackFree(s)
1148 mheap_.freeManual(s, &memstats.stacks_inuse)
1149 }
1150 s = next
1151 }
1152 }
1153
1154 unlock(&stackpoolmu)
1155
1156
1157 lock(&stackLarge.lock)
1158 for i := range stackLarge.free {
1159 for s := stackLarge.free[i].first; s != nil; {
1160 next := s.next
1161 stackLarge.free[i].remove(s)
1162 osStackFree(s)
1163 mheap_.freeManual(s, &memstats.stacks_inuse)
1164 s = next
1165 }
1166 }
1167 unlock(&stackLarge.lock)
1168 }
1169
1170
1171
1172 func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args bitvector, objs []stackObjectRecord) {
1173 targetpc := frame.continpc
1174 if targetpc == 0 {
1175
1176 return
1177 }
1178
1179 f := frame.fn
1180 pcdata := int32(-1)
1181 if targetpc != f.entry {
1182
1183
1184
1185
1186 targetpc--
1187 pcdata = pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, cache)
1188 }
1189 if pcdata == -1 {
1190
1191
1192
1193 pcdata = 0
1194 }
1195
1196
1197 size := frame.varp - frame.sp
1198 var minsize uintptr
1199 switch sys.ArchFamily {
1200 case sys.ARM64:
1201 minsize = sys.SpAlign
1202 default:
1203 minsize = sys.MinFrameSize
1204 }
1205 if size > minsize {
1206 var stkmap *stackmap
1207 stackid := pcdata
1208 if f.funcID != funcID_debugCallV1 {
1209 stkmap = (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
1210 } else {
1211
1212
1213 callerPC := frame.lr
1214 caller := findfunc(callerPC)
1215 if !caller.valid() {
1216 println("runtime: debugCallV1 called by unknown caller", hex(callerPC))
1217 throw("bad debugCallV1")
1218 }
1219 stackid = int32(-1)
1220 if callerPC != caller.entry {
1221 callerPC--
1222 stackid = pcdatavalue(caller, _PCDATA_RegMapIndex, callerPC, cache)
1223 }
1224 if stackid == -1 {
1225 stackid = 0
1226 }
1227 stkmap = (*stackmap)(funcdata(caller, _FUNCDATA_RegPointerMaps))
1228 }
1229 if stkmap == nil || stkmap.n <= 0 {
1230 print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
1231 throw("missing stackmap")
1232 }
1233
1234 if stkmap.nbit > 0 {
1235 if stackid < 0 || stackid >= stkmap.n {
1236
1237 print("runtime: pcdata is ", stackid, " and ", stkmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n")
1238 throw("bad symbol table")
1239 }
1240 locals = stackmapdata(stkmap, stackid)
1241 if stackDebug >= 3 && debug {
1242 print(" locals ", stackid, "/", stkmap.n, " ", locals.n, " words ", locals.bytedata, "\n")
1243 }
1244 } else if stackDebug >= 3 && debug {
1245 print(" no locals to adjust\n")
1246 }
1247 }
1248
1249
1250 if frame.arglen > 0 {
1251 if frame.argmap != nil {
1252
1253
1254
1255 args = *frame.argmap
1256 n := int32(frame.arglen / sys.PtrSize)
1257 if n < args.n {
1258 args.n = n
1259 }
1260 } else {
1261 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
1262 if stackmap == nil || stackmap.n <= 0 {
1263 print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(frame.arglen), "\n")
1264 throw("missing stackmap")
1265 }
1266 if pcdata < 0 || pcdata >= stackmap.n {
1267
1268 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n")
1269 throw("bad symbol table")
1270 }
1271 if stackmap.nbit > 0 {
1272 args = stackmapdata(stackmap, pcdata)
1273 }
1274 }
1275 }
1276
1277
1278 p := funcdata(f, _FUNCDATA_StackObjects)
1279 if p != nil {
1280 n := *(*uintptr)(p)
1281 p = add(p, sys.PtrSize)
1282 *(*slice)(unsafe.Pointer(&objs)) = slice{array: noescape(p), len: int(n), cap: int(n)}
1283
1284
1285
1286
1287
1288 }
1289
1290 return
1291 }
1292
1293
1294
1295 type stackObjectRecord struct {
1296
1297
1298
1299 off int
1300 typ *_type
1301 }
1302
1303
1304
1305
1306
1307 func morestackc() {
1308 throw("attempt to execute system stack code on user stack")
1309 }
1310
View as plain text