Source file src/runtime/trace.go
1
2
3
4
5
6
7
8
9
10
11
12
13 package runtime
14
15 import (
16 "runtime/internal/sys"
17 "unsafe"
18 )
19
20
21 const (
22 traceEvNone = 0
23 traceEvBatch = 1
24 traceEvFrequency = 2
25 traceEvStack = 3
26 traceEvGomaxprocs = 4
27 traceEvProcStart = 5
28 traceEvProcStop = 6
29 traceEvGCStart = 7
30 traceEvGCDone = 8
31 traceEvGCSTWStart = 9
32 traceEvGCSTWDone = 10
33 traceEvGCSweepStart = 11
34 traceEvGCSweepDone = 12
35 traceEvGoCreate = 13
36 traceEvGoStart = 14
37 traceEvGoEnd = 15
38 traceEvGoStop = 16
39 traceEvGoSched = 17
40 traceEvGoPreempt = 18
41 traceEvGoSleep = 19
42 traceEvGoBlock = 20
43 traceEvGoUnblock = 21
44 traceEvGoBlockSend = 22
45 traceEvGoBlockRecv = 23
46 traceEvGoBlockSelect = 24
47 traceEvGoBlockSync = 25
48 traceEvGoBlockCond = 26
49 traceEvGoBlockNet = 27
50 traceEvGoSysCall = 28
51 traceEvGoSysExit = 29
52 traceEvGoSysBlock = 30
53 traceEvGoWaiting = 31
54 traceEvGoInSyscall = 32
55 traceEvHeapAlloc = 33
56 traceEvNextGC = 34
57 traceEvTimerGoroutine = 35
58 traceEvFutileWakeup = 36
59 traceEvString = 37
60 traceEvGoStartLocal = 38
61 traceEvGoUnblockLocal = 39
62 traceEvGoSysExitLocal = 40
63 traceEvGoStartLabel = 41
64 traceEvGoBlockGC = 42
65 traceEvGCMarkAssistStart = 43
66 traceEvGCMarkAssistDone = 44
67 traceEvUserTaskCreate = 45
68 traceEvUserTaskEnd = 46
69 traceEvUserRegion = 47
70 traceEvUserLog = 48
71 traceEvCount = 49
72
73
74
75 )
76
77 const (
78
79
80
81
82
83
84
85
86
87 traceTickDiv = 16 + 48*(sys.Goarch386|sys.GoarchAmd64|sys.GoarchAmd64p32)
88
89
90
91 traceStackSize = 128
92
93 traceGlobProc = -1
94
95 traceBytesPerNumber = 10
96
97 traceArgCountShift = 6
98
99
100
101
102
103
104 traceFutileWakeup byte = 128
105 )
106
107
108 var trace struct {
109 lock mutex
110 lockOwner *g
111 enabled bool
112 shutdown bool
113 headerWritten bool
114 footerWritten bool
115 shutdownSema uint32
116 seqStart uint64
117 ticksStart int64
118 ticksEnd int64
119 timeStart int64
120 timeEnd int64
121 seqGC uint64
122 reading traceBufPtr
123 empty traceBufPtr
124 fullHead traceBufPtr
125 fullTail traceBufPtr
126 reader guintptr
127 stackTab traceStackTable
128
129
130
131
132
133
134
135 stringsLock mutex
136 strings map[string]uint64
137 stringSeq uint64
138
139
140 markWorkerLabels [len(gcMarkWorkerModeStrings)]uint64
141
142 bufLock mutex
143 buf traceBufPtr
144 }
145
146
147 type traceBufHeader struct {
148 link traceBufPtr
149 lastTicks uint64
150 pos int
151 stk [traceStackSize]uintptr
152 }
153
154
155
156
157 type traceBuf struct {
158 traceBufHeader
159 arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte
160 }
161
162
163
164
165
166
167
168
169 type traceBufPtr uintptr
170
171 func (tp traceBufPtr) ptr() *traceBuf { return (*traceBuf)(unsafe.Pointer(tp)) }
172 func (tp *traceBufPtr) set(b *traceBuf) { *tp = traceBufPtr(unsafe.Pointer(b)) }
173 func traceBufPtrOf(b *traceBuf) traceBufPtr {
174 return traceBufPtr(unsafe.Pointer(b))
175 }
176
177
178
179
180
181
182 func StartTrace() error {
183
184
185 stopTheWorld("start tracing")
186
187
188
189
190
191
192 lock(&trace.bufLock)
193
194 if trace.enabled || trace.shutdown {
195 unlock(&trace.bufLock)
196 startTheWorld()
197 return errorString("tracing is already enabled")
198 }
199
200
201
202
203
204
205
206
207 _g_ := getg()
208 _g_.m.startingtrace = true
209
210
211 mp := acquirem()
212 stkBuf := make([]uintptr, traceStackSize)
213 stackID := traceStackID(mp, stkBuf, 2)
214 releasem(mp)
215
216 for _, gp := range allgs {
217 status := readgstatus(gp)
218 if status != _Gdead {
219 gp.traceseq = 0
220 gp.tracelastp = getg().m.p
221
222 id := trace.stackTab.put([]uintptr{gp.startpc + sys.PCQuantum})
223 traceEvent(traceEvGoCreate, -1, uint64(gp.goid), uint64(id), stackID)
224 }
225 if status == _Gwaiting {
226
227 gp.traceseq++
228 traceEvent(traceEvGoWaiting, -1, uint64(gp.goid))
229 }
230 if status == _Gsyscall {
231 gp.traceseq++
232 traceEvent(traceEvGoInSyscall, -1, uint64(gp.goid))
233 } else {
234 gp.sysblocktraced = false
235 }
236 }
237 traceProcStart()
238 traceGoStart()
239
240
241
242
243 trace.ticksStart = cputicks()
244 trace.timeStart = nanotime()
245 trace.headerWritten = false
246 trace.footerWritten = false
247
248
249
250
251 trace.stringSeq = 0
252 trace.strings = make(map[string]uint64)
253
254 trace.seqGC = 0
255 _g_.m.startingtrace = false
256 trace.enabled = true
257
258
259 _, pid, bufp := traceAcquireBuffer()
260 for i, label := range gcMarkWorkerModeStrings[:] {
261 trace.markWorkerLabels[i], bufp = traceString(bufp, pid, label)
262 }
263 traceReleaseBuffer(pid)
264
265 unlock(&trace.bufLock)
266
267 startTheWorld()
268 return nil
269 }
270
271
272
273 func StopTrace() {
274
275
276 stopTheWorld("stop tracing")
277
278
279 lock(&trace.bufLock)
280
281 if !trace.enabled {
282 unlock(&trace.bufLock)
283 startTheWorld()
284 return
285 }
286
287 traceGoSched()
288
289
290
291 for _, p := range allp[:cap(allp)] {
292 buf := p.tracebuf
293 if buf != 0 {
294 traceFullQueue(buf)
295 p.tracebuf = 0
296 }
297 }
298 if trace.buf != 0 {
299 buf := trace.buf
300 trace.buf = 0
301 if buf.ptr().pos != 0 {
302 traceFullQueue(buf)
303 }
304 }
305
306 for {
307 trace.ticksEnd = cputicks()
308 trace.timeEnd = nanotime()
309
310 if trace.timeEnd != trace.timeStart {
311 break
312 }
313 osyield()
314 }
315
316 trace.enabled = false
317 trace.shutdown = true
318 unlock(&trace.bufLock)
319
320 startTheWorld()
321
322
323
324 semacquire(&trace.shutdownSema)
325 if raceenabled {
326 raceacquire(unsafe.Pointer(&trace.shutdownSema))
327 }
328
329
330 lock(&trace.lock)
331 for _, p := range allp[:cap(allp)] {
332 if p.tracebuf != 0 {
333 throw("trace: non-empty trace buffer in proc")
334 }
335 }
336 if trace.buf != 0 {
337 throw("trace: non-empty global trace buffer")
338 }
339 if trace.fullHead != 0 || trace.fullTail != 0 {
340 throw("trace: non-empty full trace buffer")
341 }
342 if trace.reading != 0 || trace.reader != 0 {
343 throw("trace: reading after shutdown")
344 }
345 for trace.empty != 0 {
346 buf := trace.empty
347 trace.empty = buf.ptr().link
348 sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf.ptr()), &memstats.other_sys)
349 }
350 trace.strings = nil
351 trace.shutdown = false
352 unlock(&trace.lock)
353 }
354
355
356
357
358
359
360 func ReadTrace() []byte {
361
362
363
364
365
366
367 lock(&trace.lock)
368 trace.lockOwner = getg()
369
370 if trace.reader != 0 {
371
372
373
374 trace.lockOwner = nil
375 unlock(&trace.lock)
376 println("runtime: ReadTrace called from multiple goroutines simultaneously")
377 return nil
378 }
379
380 if buf := trace.reading; buf != 0 {
381 buf.ptr().link = trace.empty
382 trace.empty = buf
383 trace.reading = 0
384 }
385
386 if !trace.headerWritten {
387 trace.headerWritten = true
388 trace.lockOwner = nil
389 unlock(&trace.lock)
390 return []byte("go 1.11 trace\x00\x00\x00")
391 }
392
393 if trace.fullHead == 0 && !trace.shutdown {
394 trace.reader.set(getg())
395 goparkunlock(&trace.lock, waitReasonTraceReaderBlocked, traceEvGoBlock, 2)
396 lock(&trace.lock)
397 }
398
399 if trace.fullHead != 0 {
400 buf := traceFullDequeue()
401 trace.reading = buf
402 trace.lockOwner = nil
403 unlock(&trace.lock)
404 return buf.ptr().arr[:buf.ptr().pos]
405 }
406
407 if !trace.footerWritten {
408 trace.footerWritten = true
409
410 freq := float64(trace.ticksEnd-trace.ticksStart) * 1e9 / float64(trace.timeEnd-trace.timeStart) / traceTickDiv
411 trace.lockOwner = nil
412 unlock(&trace.lock)
413 var data []byte
414 data = append(data, traceEvFrequency|0<<traceArgCountShift)
415 data = traceAppend(data, uint64(freq))
416 for i := range timers {
417 tb := &timers[i]
418 if tb.gp != nil {
419 data = append(data, traceEvTimerGoroutine|0<<traceArgCountShift)
420 data = traceAppend(data, uint64(tb.gp.goid))
421 }
422 }
423
424
425 trace.stackTab.dump()
426 return data
427 }
428
429 if trace.shutdown {
430 trace.lockOwner = nil
431 unlock(&trace.lock)
432 if raceenabled {
433
434
435
436 racerelease(unsafe.Pointer(&trace.shutdownSema))
437 }
438
439 semrelease(&trace.shutdownSema)
440 return nil
441 }
442
443 trace.lockOwner = nil
444 unlock(&trace.lock)
445 println("runtime: spurious wakeup of trace reader")
446 return nil
447 }
448
449
450 func traceReader() *g {
451 if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) {
452 return nil
453 }
454 lock(&trace.lock)
455 if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) {
456 unlock(&trace.lock)
457 return nil
458 }
459 gp := trace.reader.ptr()
460 trace.reader.set(nil)
461 unlock(&trace.lock)
462 return gp
463 }
464
465
466 func traceProcFree(pp *p) {
467 buf := pp.tracebuf
468 pp.tracebuf = 0
469 if buf == 0 {
470 return
471 }
472 lock(&trace.lock)
473 traceFullQueue(buf)
474 unlock(&trace.lock)
475 }
476
477
478 func traceFullQueue(buf traceBufPtr) {
479 buf.ptr().link = 0
480 if trace.fullHead == 0 {
481 trace.fullHead = buf
482 } else {
483 trace.fullTail.ptr().link = buf
484 }
485 trace.fullTail = buf
486 }
487
488
489 func traceFullDequeue() traceBufPtr {
490 buf := trace.fullHead
491 if buf == 0 {
492 return 0
493 }
494 trace.fullHead = buf.ptr().link
495 if trace.fullHead == 0 {
496 trace.fullTail = 0
497 }
498 buf.ptr().link = 0
499 return buf
500 }
501
502
503
504
505
506
507 func traceEvent(ev byte, skip int, args ...uint64) {
508 mp, pid, bufp := traceAcquireBuffer()
509
510
511
512
513
514
515
516
517
518
519
520 if !trace.enabled && !mp.startingtrace {
521 traceReleaseBuffer(pid)
522 return
523 }
524
525 if skip > 0 {
526 if getg() == mp.curg {
527 skip++
528 }
529 }
530 traceEventLocked(0, mp, pid, bufp, ev, skip, args...)
531 traceReleaseBuffer(pid)
532 }
533
534 func traceEventLocked(extraBytes int, mp *m, pid int32, bufp *traceBufPtr, ev byte, skip int, args ...uint64) {
535 buf := bufp.ptr()
536
537 maxSize := 2 + 5*traceBytesPerNumber + extraBytes
538 if buf == nil || len(buf.arr)-buf.pos < maxSize {
539 buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
540 bufp.set(buf)
541 }
542
543 ticks := uint64(cputicks()) / traceTickDiv
544 tickDiff := ticks - buf.lastTicks
545 buf.lastTicks = ticks
546 narg := byte(len(args))
547 if skip >= 0 {
548 narg++
549 }
550
551
552 if narg > 3 {
553 narg = 3
554 }
555 startPos := buf.pos
556 buf.byte(ev | narg<<traceArgCountShift)
557 var lenp *byte
558 if narg == 3 {
559
560 buf.varint(0)
561 lenp = &buf.arr[buf.pos-1]
562 }
563 buf.varint(tickDiff)
564 for _, a := range args {
565 buf.varint(a)
566 }
567 if skip == 0 {
568 buf.varint(0)
569 } else if skip > 0 {
570 buf.varint(traceStackID(mp, buf.stk[:], skip))
571 }
572 evSize := buf.pos - startPos
573 if evSize > maxSize {
574 throw("invalid length of trace event")
575 }
576 if lenp != nil {
577
578 *lenp = byte(evSize - 2)
579 }
580 }
581
582 func traceStackID(mp *m, buf []uintptr, skip int) uint64 {
583 _g_ := getg()
584 gp := mp.curg
585 var nstk int
586 if gp == _g_ {
587 nstk = callers(skip+1, buf)
588 } else if gp != nil {
589 gp = mp.curg
590 nstk = gcallers(gp, skip, buf)
591 }
592 if nstk > 0 {
593 nstk--
594 }
595 if nstk > 0 && gp.goid == 1 {
596 nstk--
597 }
598 id := trace.stackTab.put(buf[:nstk])
599 return uint64(id)
600 }
601
602
603 func traceAcquireBuffer() (mp *m, pid int32, bufp *traceBufPtr) {
604 mp = acquirem()
605 if p := mp.p.ptr(); p != nil {
606 return mp, p.id, &p.tracebuf
607 }
608 lock(&trace.bufLock)
609 return mp, traceGlobProc, &trace.buf
610 }
611
612
613 func traceReleaseBuffer(pid int32) {
614 if pid == traceGlobProc {
615 unlock(&trace.bufLock)
616 }
617 releasem(getg().m)
618 }
619
620
621 func traceFlush(buf traceBufPtr, pid int32) traceBufPtr {
622 owner := trace.lockOwner
623 dolock := owner == nil || owner != getg().m.curg
624 if dolock {
625 lock(&trace.lock)
626 }
627 if buf != 0 {
628 traceFullQueue(buf)
629 }
630 if trace.empty != 0 {
631 buf = trace.empty
632 trace.empty = buf.ptr().link
633 } else {
634 buf = traceBufPtr(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
635 if buf == 0 {
636 throw("trace: out of memory")
637 }
638 }
639 bufp := buf.ptr()
640 bufp.link.set(nil)
641 bufp.pos = 0
642
643
644 ticks := uint64(cputicks()) / traceTickDiv
645 bufp.lastTicks = ticks
646 bufp.byte(traceEvBatch | 1<<traceArgCountShift)
647 bufp.varint(uint64(pid))
648 bufp.varint(ticks)
649
650 if dolock {
651 unlock(&trace.lock)
652 }
653 return buf
654 }
655
656
657 func traceString(bufp *traceBufPtr, pid int32, s string) (uint64, *traceBufPtr) {
658 if s == "" {
659 return 0, bufp
660 }
661
662 lock(&trace.stringsLock)
663 if raceenabled {
664
665
666 raceacquire(unsafe.Pointer(&trace.stringsLock))
667 }
668
669 if id, ok := trace.strings[s]; ok {
670 if raceenabled {
671 racerelease(unsafe.Pointer(&trace.stringsLock))
672 }
673 unlock(&trace.stringsLock)
674
675 return id, bufp
676 }
677
678 trace.stringSeq++
679 id := trace.stringSeq
680 trace.strings[s] = id
681
682 if raceenabled {
683 racerelease(unsafe.Pointer(&trace.stringsLock))
684 }
685 unlock(&trace.stringsLock)
686
687
688
689
690
691
692 buf := bufp.ptr()
693 size := 1 + 2*traceBytesPerNumber + len(s)
694 if buf == nil || len(buf.arr)-buf.pos < size {
695 buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
696 bufp.set(buf)
697 }
698 buf.byte(traceEvString)
699 buf.varint(id)
700
701
702
703 slen := len(s)
704 if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber {
705 slen = room
706 }
707
708 buf.varint(uint64(slen))
709 buf.pos += copy(buf.arr[buf.pos:], s[:slen])
710
711 bufp.set(buf)
712 return id, bufp
713 }
714
715
716 func traceAppend(buf []byte, v uint64) []byte {
717 for ; v >= 0x80; v >>= 7 {
718 buf = append(buf, 0x80|byte(v))
719 }
720 buf = append(buf, byte(v))
721 return buf
722 }
723
724
725 func (buf *traceBuf) varint(v uint64) {
726 pos := buf.pos
727 for ; v >= 0x80; v >>= 7 {
728 buf.arr[pos] = 0x80 | byte(v)
729 pos++
730 }
731 buf.arr[pos] = byte(v)
732 pos++
733 buf.pos = pos
734 }
735
736
737 func (buf *traceBuf) byte(v byte) {
738 buf.arr[buf.pos] = v
739 buf.pos++
740 }
741
742
743
744 type traceStackTable struct {
745 lock mutex
746 seq uint32
747 mem traceAlloc
748 tab [1 << 13]traceStackPtr
749 }
750
751
752 type traceStack struct {
753 link traceStackPtr
754 hash uintptr
755 id uint32
756 n int
757 stk [0]uintptr
758 }
759
760 type traceStackPtr uintptr
761
762 func (tp traceStackPtr) ptr() *traceStack { return (*traceStack)(unsafe.Pointer(tp)) }
763
764
765 func (ts *traceStack) stack() []uintptr {
766 return (*[traceStackSize]uintptr)(unsafe.Pointer(&ts.stk))[:ts.n]
767 }
768
769
770
771 func (tab *traceStackTable) put(pcs []uintptr) uint32 {
772 if len(pcs) == 0 {
773 return 0
774 }
775 hash := memhash(unsafe.Pointer(&pcs[0]), 0, uintptr(len(pcs))*unsafe.Sizeof(pcs[0]))
776
777 if id := tab.find(pcs, hash); id != 0 {
778 return id
779 }
780
781 lock(&tab.lock)
782 if id := tab.find(pcs, hash); id != 0 {
783 unlock(&tab.lock)
784 return id
785 }
786
787 tab.seq++
788 stk := tab.newStack(len(pcs))
789 stk.hash = hash
790 stk.id = tab.seq
791 stk.n = len(pcs)
792 stkpc := stk.stack()
793 for i, pc := range pcs {
794 stkpc[i] = pc
795 }
796 part := int(hash % uintptr(len(tab.tab)))
797 stk.link = tab.tab[part]
798 atomicstorep(unsafe.Pointer(&tab.tab[part]), unsafe.Pointer(stk))
799 unlock(&tab.lock)
800 return stk.id
801 }
802
803
804 func (tab *traceStackTable) find(pcs []uintptr, hash uintptr) uint32 {
805 part := int(hash % uintptr(len(tab.tab)))
806 Search:
807 for stk := tab.tab[part].ptr(); stk != nil; stk = stk.link.ptr() {
808 if stk.hash == hash && stk.n == len(pcs) {
809 for i, stkpc := range stk.stack() {
810 if stkpc != pcs[i] {
811 continue Search
812 }
813 }
814 return stk.id
815 }
816 }
817 return 0
818 }
819
820
821 func (tab *traceStackTable) newStack(n int) *traceStack {
822 return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*sys.PtrSize))
823 }
824
825
826 func allFrames(pcs []uintptr) []Frame {
827 frames := make([]Frame, 0, len(pcs))
828 ci := CallersFrames(pcs)
829 for {
830 f, more := ci.Next()
831 frames = append(frames, f)
832 if !more {
833 return frames
834 }
835 }
836 }
837
838
839
840 func (tab *traceStackTable) dump() {
841 var tmp [(2 + 4*traceStackSize) * traceBytesPerNumber]byte
842 bufp := traceFlush(0, 0)
843 for _, stk := range tab.tab {
844 stk := stk.ptr()
845 for ; stk != nil; stk = stk.link.ptr() {
846 tmpbuf := tmp[:0]
847 tmpbuf = traceAppend(tmpbuf, uint64(stk.id))
848 frames := allFrames(stk.stack())
849 tmpbuf = traceAppend(tmpbuf, uint64(len(frames)))
850 for _, f := range frames {
851 var frame traceFrame
852 frame, bufp = traceFrameForPC(bufp, 0, f)
853 tmpbuf = traceAppend(tmpbuf, uint64(f.PC))
854 tmpbuf = traceAppend(tmpbuf, uint64(frame.funcID))
855 tmpbuf = traceAppend(tmpbuf, uint64(frame.fileID))
856 tmpbuf = traceAppend(tmpbuf, uint64(frame.line))
857 }
858
859 size := 1 + traceBytesPerNumber + len(tmpbuf)
860 if buf := bufp.ptr(); len(buf.arr)-buf.pos < size {
861 bufp = traceFlush(bufp, 0)
862 }
863 buf := bufp.ptr()
864 buf.byte(traceEvStack | 3<<traceArgCountShift)
865 buf.varint(uint64(len(tmpbuf)))
866 buf.pos += copy(buf.arr[buf.pos:], tmpbuf)
867 }
868 }
869
870 lock(&trace.lock)
871 traceFullQueue(bufp)
872 unlock(&trace.lock)
873
874 tab.mem.drop()
875 *tab = traceStackTable{}
876 }
877
878 type traceFrame struct {
879 funcID uint64
880 fileID uint64
881 line uint64
882 }
883
884
885
886 func traceFrameForPC(buf traceBufPtr, pid int32, f Frame) (traceFrame, traceBufPtr) {
887 bufp := &buf
888 var frame traceFrame
889
890 fn := f.Function
891 const maxLen = 1 << 10
892 if len(fn) > maxLen {
893 fn = fn[len(fn)-maxLen:]
894 }
895 frame.funcID, bufp = traceString(bufp, pid, fn)
896 frame.line = uint64(f.Line)
897 file := f.File
898 if len(file) > maxLen {
899 file = file[len(file)-maxLen:]
900 }
901 frame.fileID, bufp = traceString(bufp, pid, file)
902 return frame, (*bufp)
903 }
904
905
906
907 type traceAlloc struct {
908 head traceAllocBlockPtr
909 off uintptr
910 }
911
912
913
914
915
916
917
918
919 type traceAllocBlock struct {
920 next traceAllocBlockPtr
921 data [64<<10 - sys.PtrSize]byte
922 }
923
924
925 type traceAllocBlockPtr uintptr
926
927 func (p traceAllocBlockPtr) ptr() *traceAllocBlock { return (*traceAllocBlock)(unsafe.Pointer(p)) }
928 func (p *traceAllocBlockPtr) set(x *traceAllocBlock) { *p = traceAllocBlockPtr(unsafe.Pointer(x)) }
929
930
931 func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
932 n = round(n, sys.PtrSize)
933 if a.head == 0 || a.off+n > uintptr(len(a.head.ptr().data)) {
934 if n > uintptr(len(a.head.ptr().data)) {
935 throw("trace: alloc too large")
936 }
937 block := (*traceAllocBlock)(sysAlloc(unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys))
938 if block == nil {
939 throw("trace: out of memory")
940 }
941 block.next.set(a.head.ptr())
942 a.head.set(block)
943 a.off = 0
944 }
945 p := &a.head.ptr().data[a.off]
946 a.off += n
947 return unsafe.Pointer(p)
948 }
949
950
951 func (a *traceAlloc) drop() {
952 for a.head != 0 {
953 block := a.head.ptr()
954 a.head.set(block.next.ptr())
955 sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys)
956 }
957 }
958
959
960
961 func traceGomaxprocs(procs int32) {
962 traceEvent(traceEvGomaxprocs, 1, uint64(procs))
963 }
964
965 func traceProcStart() {
966 traceEvent(traceEvProcStart, -1, uint64(getg().m.id))
967 }
968
969 func traceProcStop(pp *p) {
970
971
972 mp := acquirem()
973 oldp := mp.p
974 mp.p.set(pp)
975 traceEvent(traceEvProcStop, -1)
976 mp.p = oldp
977 releasem(mp)
978 }
979
980 func traceGCStart() {
981 traceEvent(traceEvGCStart, 3, trace.seqGC)
982 trace.seqGC++
983 }
984
985 func traceGCDone() {
986 traceEvent(traceEvGCDone, -1)
987 }
988
989 func traceGCSTWStart(kind int) {
990 traceEvent(traceEvGCSTWStart, -1, uint64(kind))
991 }
992
993 func traceGCSTWDone() {
994 traceEvent(traceEvGCSTWDone, -1)
995 }
996
997
998
999
1000
1001
1002 func traceGCSweepStart() {
1003
1004
1005 _p_ := getg().m.p.ptr()
1006 if _p_.traceSweep {
1007 throw("double traceGCSweepStart")
1008 }
1009 _p_.traceSweep, _p_.traceSwept, _p_.traceReclaimed = true, 0, 0
1010 }
1011
1012
1013
1014
1015
1016 func traceGCSweepSpan(bytesSwept uintptr) {
1017 _p_ := getg().m.p.ptr()
1018 if _p_.traceSweep {
1019 if _p_.traceSwept == 0 {
1020 traceEvent(traceEvGCSweepStart, 1)
1021 }
1022 _p_.traceSwept += bytesSwept
1023 }
1024 }
1025
1026 func traceGCSweepDone() {
1027 _p_ := getg().m.p.ptr()
1028 if !_p_.traceSweep {
1029 throw("missing traceGCSweepStart")
1030 }
1031 if _p_.traceSwept != 0 {
1032 traceEvent(traceEvGCSweepDone, -1, uint64(_p_.traceSwept), uint64(_p_.traceReclaimed))
1033 }
1034 _p_.traceSweep = false
1035 }
1036
1037 func traceGCMarkAssistStart() {
1038 traceEvent(traceEvGCMarkAssistStart, 1)
1039 }
1040
1041 func traceGCMarkAssistDone() {
1042 traceEvent(traceEvGCMarkAssistDone, -1)
1043 }
1044
1045 func traceGoCreate(newg *g, pc uintptr) {
1046 newg.traceseq = 0
1047 newg.tracelastp = getg().m.p
1048
1049 id := trace.stackTab.put([]uintptr{pc + sys.PCQuantum})
1050 traceEvent(traceEvGoCreate, 2, uint64(newg.goid), uint64(id))
1051 }
1052
1053 func traceGoStart() {
1054 _g_ := getg().m.curg
1055 _p_ := _g_.m.p
1056 _g_.traceseq++
1057 if _g_ == _p_.ptr().gcBgMarkWorker.ptr() {
1058 traceEvent(traceEvGoStartLabel, -1, uint64(_g_.goid), _g_.traceseq, trace.markWorkerLabels[_p_.ptr().gcMarkWorkerMode])
1059 } else if _g_.tracelastp == _p_ {
1060 traceEvent(traceEvGoStartLocal, -1, uint64(_g_.goid))
1061 } else {
1062 _g_.tracelastp = _p_
1063 traceEvent(traceEvGoStart, -1, uint64(_g_.goid), _g_.traceseq)
1064 }
1065 }
1066
1067 func traceGoEnd() {
1068 traceEvent(traceEvGoEnd, -1)
1069 }
1070
1071 func traceGoSched() {
1072 _g_ := getg()
1073 _g_.tracelastp = _g_.m.p
1074 traceEvent(traceEvGoSched, 1)
1075 }
1076
1077 func traceGoPreempt() {
1078 _g_ := getg()
1079 _g_.tracelastp = _g_.m.p
1080 traceEvent(traceEvGoPreempt, 1)
1081 }
1082
1083 func traceGoPark(traceEv byte, skip int) {
1084 if traceEv&traceFutileWakeup != 0 {
1085 traceEvent(traceEvFutileWakeup, -1)
1086 }
1087 traceEvent(traceEv & ^traceFutileWakeup, skip)
1088 }
1089
1090 func traceGoUnpark(gp *g, skip int) {
1091 _p_ := getg().m.p
1092 gp.traceseq++
1093 if gp.tracelastp == _p_ {
1094 traceEvent(traceEvGoUnblockLocal, skip, uint64(gp.goid))
1095 } else {
1096 gp.tracelastp = _p_
1097 traceEvent(traceEvGoUnblock, skip, uint64(gp.goid), gp.traceseq)
1098 }
1099 }
1100
1101 func traceGoSysCall() {
1102 traceEvent(traceEvGoSysCall, 1)
1103 }
1104
1105 func traceGoSysExit(ts int64) {
1106 if ts != 0 && ts < trace.ticksStart {
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116 ts = 0
1117 }
1118 _g_ := getg().m.curg
1119 _g_.traceseq++
1120 _g_.tracelastp = _g_.m.p
1121 traceEvent(traceEvGoSysExit, -1, uint64(_g_.goid), _g_.traceseq, uint64(ts)/traceTickDiv)
1122 }
1123
1124 func traceGoSysBlock(pp *p) {
1125
1126
1127 mp := acquirem()
1128 oldp := mp.p
1129 mp.p.set(pp)
1130 traceEvent(traceEvGoSysBlock, -1)
1131 mp.p = oldp
1132 releasem(mp)
1133 }
1134
1135 func traceHeapAlloc() {
1136 traceEvent(traceEvHeapAlloc, -1, memstats.heap_live)
1137 }
1138
1139 func traceNextGC() {
1140 if memstats.next_gc == ^uint64(0) {
1141
1142 traceEvent(traceEvNextGC, -1, 0)
1143 } else {
1144 traceEvent(traceEvNextGC, -1, memstats.next_gc)
1145 }
1146 }
1147
1148
1149
1150
1151
1152 func trace_userTaskCreate(id, parentID uint64, taskType string) {
1153 if !trace.enabled {
1154 return
1155 }
1156
1157
1158 mp, pid, bufp := traceAcquireBuffer()
1159 if !trace.enabled && !mp.startingtrace {
1160 traceReleaseBuffer(pid)
1161 return
1162 }
1163
1164 typeStringID, bufp := traceString(bufp, pid, taskType)
1165 traceEventLocked(0, mp, pid, bufp, traceEvUserTaskCreate, 3, id, parentID, typeStringID)
1166 traceReleaseBuffer(pid)
1167 }
1168
1169
1170 func trace_userTaskEnd(id uint64) {
1171 traceEvent(traceEvUserTaskEnd, 2, id)
1172 }
1173
1174
1175 func trace_userRegion(id, mode uint64, name string) {
1176 if !trace.enabled {
1177 return
1178 }
1179
1180 mp, pid, bufp := traceAcquireBuffer()
1181 if !trace.enabled && !mp.startingtrace {
1182 traceReleaseBuffer(pid)
1183 return
1184 }
1185
1186 nameStringID, bufp := traceString(bufp, pid, name)
1187 traceEventLocked(0, mp, pid, bufp, traceEvUserRegion, 3, id, mode, nameStringID)
1188 traceReleaseBuffer(pid)
1189 }
1190
1191
1192 func trace_userLog(id uint64, category, message string) {
1193 if !trace.enabled {
1194 return
1195 }
1196
1197 mp, pid, bufp := traceAcquireBuffer()
1198 if !trace.enabled && !mp.startingtrace {
1199 traceReleaseBuffer(pid)
1200 return
1201 }
1202
1203 categoryID, bufp := traceString(bufp, pid, category)
1204
1205 extraSpace := traceBytesPerNumber + len(message)
1206 traceEventLocked(extraSpace, mp, pid, bufp, traceEvUserLog, 3, id, categoryID)
1207
1208
1209 buf := bufp.ptr()
1210
1211
1212
1213 slen := len(message)
1214 if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber {
1215 slen = room
1216 }
1217 buf.varint(uint64(slen))
1218 buf.pos += copy(buf.arr[buf.pos:], message[:slen])
1219
1220 traceReleaseBuffer(pid)
1221 }
1222
View as plain text