...

Source file src/pkg/runtime/trace.go

     1	// Copyright 2014 The Go Authors. All rights reserved.
     2	// Use of this source code is governed by a BSD-style
     3	// license that can be found in the LICENSE file.
     4	
     5	// Go execution tracer.
     6	// The tracer captures a wide range of execution events like goroutine
     7	// creation/blocking/unblocking, syscall enter/exit/block, GC-related events,
     8	// changes of heap size, processor start/stop, etc and writes them to a buffer
     9	// in a compact form. A precise nanosecond-precision timestamp and a stack
    10	// trace is captured for most events.
    11	// See https://golang.org/s/go15trace for more info.
    12	
    13	package runtime
    14	
    15	import (
    16		"runtime/internal/sys"
    17		"unsafe"
    18	)
    19	
    20	// Event types in the trace, args are given in square brackets.
    21	const (
    22		traceEvNone              = 0  // unused
    23		traceEvBatch             = 1  // start of per-P batch of events [pid, timestamp]
    24		traceEvFrequency         = 2  // contains tracer timer frequency [frequency (ticks per second)]
    25		traceEvStack             = 3  // stack [stack id, number of PCs, array of {PC, func string ID, file string ID, line}]
    26		traceEvGomaxprocs        = 4  // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack id]
    27		traceEvProcStart         = 5  // start of P [timestamp, thread id]
    28		traceEvProcStop          = 6  // stop of P [timestamp]
    29		traceEvGCStart           = 7  // GC start [timestamp, seq, stack id]
    30		traceEvGCDone            = 8  // GC done [timestamp]
    31		traceEvGCSTWStart        = 9  // GC STW start [timestamp, kind]
    32		traceEvGCSTWDone         = 10 // GC STW done [timestamp]
    33		traceEvGCSweepStart      = 11 // GC sweep start [timestamp, stack id]
    34		traceEvGCSweepDone       = 12 // GC sweep done [timestamp, swept, reclaimed]
    35		traceEvGoCreate          = 13 // goroutine creation [timestamp, new goroutine id, new stack id, stack id]
    36		traceEvGoStart           = 14 // goroutine starts running [timestamp, goroutine id, seq]
    37		traceEvGoEnd             = 15 // goroutine ends [timestamp]
    38		traceEvGoStop            = 16 // goroutine stops (like in select{}) [timestamp, stack]
    39		traceEvGoSched           = 17 // goroutine calls Gosched [timestamp, stack]
    40		traceEvGoPreempt         = 18 // goroutine is preempted [timestamp, stack]
    41		traceEvGoSleep           = 19 // goroutine calls Sleep [timestamp, stack]
    42		traceEvGoBlock           = 20 // goroutine blocks [timestamp, stack]
    43		traceEvGoUnblock         = 21 // goroutine is unblocked [timestamp, goroutine id, seq, stack]
    44		traceEvGoBlockSend       = 22 // goroutine blocks on chan send [timestamp, stack]
    45		traceEvGoBlockRecv       = 23 // goroutine blocks on chan recv [timestamp, stack]
    46		traceEvGoBlockSelect     = 24 // goroutine blocks on select [timestamp, stack]
    47		traceEvGoBlockSync       = 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack]
    48		traceEvGoBlockCond       = 26 // goroutine blocks on Cond [timestamp, stack]
    49		traceEvGoBlockNet        = 27 // goroutine blocks on network [timestamp, stack]
    50		traceEvGoSysCall         = 28 // syscall enter [timestamp, stack]
    51		traceEvGoSysExit         = 29 // syscall exit [timestamp, goroutine id, seq, real timestamp]
    52		traceEvGoSysBlock        = 30 // syscall blocks [timestamp]
    53		traceEvGoWaiting         = 31 // denotes that goroutine is blocked when tracing starts [timestamp, goroutine id]
    54		traceEvGoInSyscall       = 32 // denotes that goroutine is in syscall when tracing starts [timestamp, goroutine id]
    55		traceEvHeapAlloc         = 33 // memstats.heap_live change [timestamp, heap_alloc]
    56		traceEvNextGC            = 34 // memstats.next_gc change [timestamp, next_gc]
    57		traceEvTimerGoroutine    = 35 // denotes timer goroutine [timer goroutine id]
    58		traceEvFutileWakeup      = 36 // denotes that the previous wakeup of this goroutine was futile [timestamp]
    59		traceEvString            = 37 // string dictionary entry [ID, length, string]
    60		traceEvGoStartLocal      = 38 // goroutine starts running on the same P as the last event [timestamp, goroutine id]
    61		traceEvGoUnblockLocal    = 39 // goroutine is unblocked on the same P as the last event [timestamp, goroutine id, stack]
    62		traceEvGoSysExitLocal    = 40 // syscall exit on the same P as the last event [timestamp, goroutine id, real timestamp]
    63		traceEvGoStartLabel      = 41 // goroutine starts running with label [timestamp, goroutine id, seq, label string id]
    64		traceEvGoBlockGC         = 42 // goroutine blocks on GC assist [timestamp, stack]
    65		traceEvGCMarkAssistStart = 43 // GC mark assist start [timestamp, stack]
    66		traceEvGCMarkAssistDone  = 44 // GC mark assist done [timestamp]
    67		traceEvUserTaskCreate    = 45 // trace.NewContext [timestamp, internal task id, internal parent task id, stack, name string]
    68		traceEvUserTaskEnd       = 46 // end of a task [timestamp, internal task id, stack]
    69		traceEvUserRegion        = 47 // trace.WithRegion [timestamp, internal task id, mode(0:start, 1:end), stack, name string]
    70		traceEvUserLog           = 48 // trace.Log [timestamp, internal task id, key string id, stack, value string]
    71		traceEvCount             = 49
    72		// Byte is used but only 6 bits are available for event type.
    73		// The remaining 2 bits are used to specify the number of arguments.
    74		// That means, the max event type value is 63.
    75	)
    76	
    77	const (
    78		// Timestamps in trace are cputicks/traceTickDiv.
    79		// This makes absolute values of timestamp diffs smaller,
    80		// and so they are encoded in less number of bytes.
    81		// 64 on x86 is somewhat arbitrary (one tick is ~20ns on a 3GHz machine).
    82		// The suggested increment frequency for PowerPC's time base register is
    83		// 512 MHz according to Power ISA v2.07 section 6.2, so we use 16 on ppc64
    84		// and ppc64le.
    85		// Tracing won't work reliably for architectures where cputicks is emulated
    86		// by nanotime, so the value doesn't matter for those architectures.
    87		traceTickDiv = 16 + 48*(sys.Goarch386|sys.GoarchAmd64|sys.GoarchAmd64p32)
    88		// Maximum number of PCs in a single stack trace.
    89		// Since events contain only stack id rather than whole stack trace,
    90		// we can allow quite large values here.
    91		traceStackSize = 128
    92		// Identifier of a fake P that is used when we trace without a real P.
    93		traceGlobProc = -1
    94		// Maximum number of bytes to encode uint64 in base-128.
    95		traceBytesPerNumber = 10
    96		// Shift of the number of arguments in the first event byte.
    97		traceArgCountShift = 6
    98		// Flag passed to traceGoPark to denote that the previous wakeup of this
    99		// goroutine was futile. For example, a goroutine was unblocked on a mutex,
   100		// but another goroutine got ahead and acquired the mutex before the first
   101		// goroutine is scheduled, so the first goroutine has to block again.
   102		// Such wakeups happen on buffered channels and sync.Mutex,
   103		// but are generally not interesting for end user.
   104		traceFutileWakeup byte = 128
   105	)
   106	
   107	// trace is global tracing context.
   108	var trace struct {
   109		lock          mutex       // protects the following members
   110		lockOwner     *g          // to avoid deadlocks during recursive lock locks
   111		enabled       bool        // when set runtime traces events
   112		shutdown      bool        // set when we are waiting for trace reader to finish after setting enabled to false
   113		headerWritten bool        // whether ReadTrace has emitted trace header
   114		footerWritten bool        // whether ReadTrace has emitted trace footer
   115		shutdownSema  uint32      // used to wait for ReadTrace completion
   116		seqStart      uint64      // sequence number when tracing was started
   117		ticksStart    int64       // cputicks when tracing was started
   118		ticksEnd      int64       // cputicks when tracing was stopped
   119		timeStart     int64       // nanotime when tracing was started
   120		timeEnd       int64       // nanotime when tracing was stopped
   121		seqGC         uint64      // GC start/done sequencer
   122		reading       traceBufPtr // buffer currently handed off to user
   123		empty         traceBufPtr // stack of empty buffers
   124		fullHead      traceBufPtr // queue of full buffers
   125		fullTail      traceBufPtr
   126		reader        guintptr        // goroutine that called ReadTrace, or nil
   127		stackTab      traceStackTable // maps stack traces to unique ids
   128	
   129		// Dictionary for traceEvString.
   130		//
   131		// TODO: central lock to access the map is not ideal.
   132		//   option: pre-assign ids to all user annotation region names and tags
   133		//   option: per-P cache
   134		//   option: sync.Map like data structure
   135		stringsLock mutex
   136		strings     map[string]uint64
   137		stringSeq   uint64
   138	
   139		// markWorkerLabels maps gcMarkWorkerMode to string ID.
   140		markWorkerLabels [len(gcMarkWorkerModeStrings)]uint64
   141	
   142		bufLock mutex       // protects buf
   143		buf     traceBufPtr // global trace buffer, used when running without a p
   144	}
   145	
   146	// traceBufHeader is per-P tracing buffer.
   147	type traceBufHeader struct {
   148		link      traceBufPtr             // in trace.empty/full
   149		lastTicks uint64                  // when we wrote the last event
   150		pos       int                     // next write offset in arr
   151		stk       [traceStackSize]uintptr // scratch buffer for traceback
   152	}
   153	
   154	// traceBuf is per-P tracing buffer.
   155	//
   156	//go:notinheap
   157	type traceBuf struct {
   158		traceBufHeader
   159		arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf
   160	}
   161	
   162	// traceBufPtr is a *traceBuf that is not traced by the garbage
   163	// collector and doesn't have write barriers. traceBufs are not
   164	// allocated from the GC'd heap, so this is safe, and are often
   165	// manipulated in contexts where write barriers are not allowed, so
   166	// this is necessary.
   167	//
   168	// TODO: Since traceBuf is now go:notinheap, this isn't necessary.
   169	type traceBufPtr uintptr
   170	
   171	func (tp traceBufPtr) ptr() *traceBuf   { return (*traceBuf)(unsafe.Pointer(tp)) }
   172	func (tp *traceBufPtr) set(b *traceBuf) { *tp = traceBufPtr(unsafe.Pointer(b)) }
   173	func traceBufPtrOf(b *traceBuf) traceBufPtr {
   174		return traceBufPtr(unsafe.Pointer(b))
   175	}
   176	
   177	// StartTrace enables tracing for the current process.
   178	// While tracing, the data will be buffered and available via ReadTrace.
   179	// StartTrace returns an error if tracing is already enabled.
   180	// Most clients should use the runtime/trace package or the testing package's
   181	// -test.trace flag instead of calling StartTrace directly.
   182	func StartTrace() error {
   183		// Stop the world, so that we can take a consistent snapshot
   184		// of all goroutines at the beginning of the trace.
   185		stopTheWorld("start tracing")
   186	
   187		// We are in stop-the-world, but syscalls can finish and write to trace concurrently.
   188		// Exitsyscall could check trace.enabled long before and then suddenly wake up
   189		// and decide to write to trace at a random point in time.
   190		// However, such syscall will use the global trace.buf buffer, because we've
   191		// acquired all p's by doing stop-the-world. So this protects us from such races.
   192		lock(&trace.bufLock)
   193	
   194		if trace.enabled || trace.shutdown {
   195			unlock(&trace.bufLock)
   196			startTheWorld()
   197			return errorString("tracing is already enabled")
   198		}
   199	
   200		// Can't set trace.enabled yet. While the world is stopped, exitsyscall could
   201		// already emit a delayed event (see exitTicks in exitsyscall) if we set trace.enabled here.
   202		// That would lead to an inconsistent trace:
   203		// - either GoSysExit appears before EvGoInSyscall,
   204		// - or GoSysExit appears for a goroutine for which we don't emit EvGoInSyscall below.
   205		// To instruct traceEvent that it must not ignore events below, we set startingtrace.
   206		// trace.enabled is set afterwards once we have emitted all preliminary events.
   207		_g_ := getg()
   208		_g_.m.startingtrace = true
   209	
   210		// Obtain current stack ID to use in all traceEvGoCreate events below.
   211		mp := acquirem()
   212		stkBuf := make([]uintptr, traceStackSize)
   213		stackID := traceStackID(mp, stkBuf, 2)
   214		releasem(mp)
   215	
   216		for _, gp := range allgs {
   217			status := readgstatus(gp)
   218			if status != _Gdead {
   219				gp.traceseq = 0
   220				gp.tracelastp = getg().m.p
   221				// +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
   222				id := trace.stackTab.put([]uintptr{gp.startpc + sys.PCQuantum})
   223				traceEvent(traceEvGoCreate, -1, uint64(gp.goid), uint64(id), stackID)
   224			}
   225			if status == _Gwaiting {
   226				// traceEvGoWaiting is implied to have seq=1.
   227				gp.traceseq++
   228				traceEvent(traceEvGoWaiting, -1, uint64(gp.goid))
   229			}
   230			if status == _Gsyscall {
   231				gp.traceseq++
   232				traceEvent(traceEvGoInSyscall, -1, uint64(gp.goid))
   233			} else {
   234				gp.sysblocktraced = false
   235			}
   236		}
   237		traceProcStart()
   238		traceGoStart()
   239		// Note: ticksStart needs to be set after we emit traceEvGoInSyscall events.
   240		// If we do it the other way around, it is possible that exitsyscall will
   241		// query sysexitticks after ticksStart but before traceEvGoInSyscall timestamp.
   242		// It will lead to a false conclusion that cputicks is broken.
   243		trace.ticksStart = cputicks()
   244		trace.timeStart = nanotime()
   245		trace.headerWritten = false
   246		trace.footerWritten = false
   247	
   248		// string to id mapping
   249		//  0 : reserved for an empty string
   250		//  remaining: other strings registered by traceString
   251		trace.stringSeq = 0
   252		trace.strings = make(map[string]uint64)
   253	
   254		trace.seqGC = 0
   255		_g_.m.startingtrace = false
   256		trace.enabled = true
   257	
   258		// Register runtime goroutine labels.
   259		_, pid, bufp := traceAcquireBuffer()
   260		for i, label := range gcMarkWorkerModeStrings[:] {
   261			trace.markWorkerLabels[i], bufp = traceString(bufp, pid, label)
   262		}
   263		traceReleaseBuffer(pid)
   264	
   265		unlock(&trace.bufLock)
   266	
   267		startTheWorld()
   268		return nil
   269	}
   270	
   271	// StopTrace stops tracing, if it was previously enabled.
   272	// StopTrace only returns after all the reads for the trace have completed.
   273	func StopTrace() {
   274		// Stop the world so that we can collect the trace buffers from all p's below,
   275		// and also to avoid races with traceEvent.
   276		stopTheWorld("stop tracing")
   277	
   278		// See the comment in StartTrace.
   279		lock(&trace.bufLock)
   280	
   281		if !trace.enabled {
   282			unlock(&trace.bufLock)
   283			startTheWorld()
   284			return
   285		}
   286	
   287		traceGoSched()
   288	
   289		// Loop over all allocated Ps because dead Ps may still have
   290		// trace buffers.
   291		for _, p := range allp[:cap(allp)] {
   292			buf := p.tracebuf
   293			if buf != 0 {
   294				traceFullQueue(buf)
   295				p.tracebuf = 0
   296			}
   297		}
   298		if trace.buf != 0 {
   299			buf := trace.buf
   300			trace.buf = 0
   301			if buf.ptr().pos != 0 {
   302				traceFullQueue(buf)
   303			}
   304		}
   305	
   306		for {
   307			trace.ticksEnd = cputicks()
   308			trace.timeEnd = nanotime()
   309			// Windows time can tick only every 15ms, wait for at least one tick.
   310			if trace.timeEnd != trace.timeStart {
   311				break
   312			}
   313			osyield()
   314		}
   315	
   316		trace.enabled = false
   317		trace.shutdown = true
   318		unlock(&trace.bufLock)
   319	
   320		startTheWorld()
   321	
   322		// The world is started but we've set trace.shutdown, so new tracing can't start.
   323		// Wait for the trace reader to flush pending buffers and stop.
   324		semacquire(&trace.shutdownSema)
   325		if raceenabled {
   326			raceacquire(unsafe.Pointer(&trace.shutdownSema))
   327		}
   328	
   329		// The lock protects us from races with StartTrace/StopTrace because they do stop-the-world.
   330		lock(&trace.lock)
   331		for _, p := range allp[:cap(allp)] {
   332			if p.tracebuf != 0 {
   333				throw("trace: non-empty trace buffer in proc")
   334			}
   335		}
   336		if trace.buf != 0 {
   337			throw("trace: non-empty global trace buffer")
   338		}
   339		if trace.fullHead != 0 || trace.fullTail != 0 {
   340			throw("trace: non-empty full trace buffer")
   341		}
   342		if trace.reading != 0 || trace.reader != 0 {
   343			throw("trace: reading after shutdown")
   344		}
   345		for trace.empty != 0 {
   346			buf := trace.empty
   347			trace.empty = buf.ptr().link
   348			sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf.ptr()), &memstats.other_sys)
   349		}
   350		trace.strings = nil
   351		trace.shutdown = false
   352		unlock(&trace.lock)
   353	}
   354	
   355	// ReadTrace returns the next chunk of binary tracing data, blocking until data
   356	// is available. If tracing is turned off and all the data accumulated while it
   357	// was on has been returned, ReadTrace returns nil. The caller must copy the
   358	// returned data before calling ReadTrace again.
   359	// ReadTrace must be called from one goroutine at a time.
   360	func ReadTrace() []byte {
   361		// This function may need to lock trace.lock recursively
   362		// (goparkunlock -> traceGoPark -> traceEvent -> traceFlush).
   363		// To allow this we use trace.lockOwner.
   364		// Also this function must not allocate while holding trace.lock:
   365		// allocation can call heap allocate, which will try to emit a trace
   366		// event while holding heap lock.
   367		lock(&trace.lock)
   368		trace.lockOwner = getg()
   369	
   370		if trace.reader != 0 {
   371			// More than one goroutine reads trace. This is bad.
   372			// But we rather do not crash the program because of tracing,
   373			// because tracing can be enabled at runtime on prod servers.
   374			trace.lockOwner = nil
   375			unlock(&trace.lock)
   376			println("runtime: ReadTrace called from multiple goroutines simultaneously")
   377			return nil
   378		}
   379		// Recycle the old buffer.
   380		if buf := trace.reading; buf != 0 {
   381			buf.ptr().link = trace.empty
   382			trace.empty = buf
   383			trace.reading = 0
   384		}
   385		// Write trace header.
   386		if !trace.headerWritten {
   387			trace.headerWritten = true
   388			trace.lockOwner = nil
   389			unlock(&trace.lock)
   390			return []byte("go 1.11 trace\x00\x00\x00")
   391		}
   392		// Wait for new data.
   393		if trace.fullHead == 0 && !trace.shutdown {
   394			trace.reader.set(getg())
   395			goparkunlock(&trace.lock, waitReasonTraceReaderBlocked, traceEvGoBlock, 2)
   396			lock(&trace.lock)
   397		}
   398		// Write a buffer.
   399		if trace.fullHead != 0 {
   400			buf := traceFullDequeue()
   401			trace.reading = buf
   402			trace.lockOwner = nil
   403			unlock(&trace.lock)
   404			return buf.ptr().arr[:buf.ptr().pos]
   405		}
   406		// Write footer with timer frequency.
   407		if !trace.footerWritten {
   408			trace.footerWritten = true
   409			// Use float64 because (trace.ticksEnd - trace.ticksStart) * 1e9 can overflow int64.
   410			freq := float64(trace.ticksEnd-trace.ticksStart) * 1e9 / float64(trace.timeEnd-trace.timeStart) / traceTickDiv
   411			trace.lockOwner = nil
   412			unlock(&trace.lock)
   413			var data []byte
   414			data = append(data, traceEvFrequency|0<<traceArgCountShift)
   415			data = traceAppend(data, uint64(freq))
   416			for i := range timers {
   417				tb := &timers[i]
   418				if tb.gp != nil {
   419					data = append(data, traceEvTimerGoroutine|0<<traceArgCountShift)
   420					data = traceAppend(data, uint64(tb.gp.goid))
   421				}
   422			}
   423			// This will emit a bunch of full buffers, we will pick them up
   424			// on the next iteration.
   425			trace.stackTab.dump()
   426			return data
   427		}
   428		// Done.
   429		if trace.shutdown {
   430			trace.lockOwner = nil
   431			unlock(&trace.lock)
   432			if raceenabled {
   433				// Model synchronization on trace.shutdownSema, which race
   434				// detector does not see. This is required to avoid false
   435				// race reports on writer passed to trace.Start.
   436				racerelease(unsafe.Pointer(&trace.shutdownSema))
   437			}
   438			// trace.enabled is already reset, so can call traceable functions.
   439			semrelease(&trace.shutdownSema)
   440			return nil
   441		}
   442		// Also bad, but see the comment above.
   443		trace.lockOwner = nil
   444		unlock(&trace.lock)
   445		println("runtime: spurious wakeup of trace reader")
   446		return nil
   447	}
   448	
   449	// traceReader returns the trace reader that should be woken up, if any.
   450	func traceReader() *g {
   451		if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) {
   452			return nil
   453		}
   454		lock(&trace.lock)
   455		if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) {
   456			unlock(&trace.lock)
   457			return nil
   458		}
   459		gp := trace.reader.ptr()
   460		trace.reader.set(nil)
   461		unlock(&trace.lock)
   462		return gp
   463	}
   464	
   465	// traceProcFree frees trace buffer associated with pp.
   466	func traceProcFree(pp *p) {
   467		buf := pp.tracebuf
   468		pp.tracebuf = 0
   469		if buf == 0 {
   470			return
   471		}
   472		lock(&trace.lock)
   473		traceFullQueue(buf)
   474		unlock(&trace.lock)
   475	}
   476	
   477	// traceFullQueue queues buf into queue of full buffers.
   478	func traceFullQueue(buf traceBufPtr) {
   479		buf.ptr().link = 0
   480		if trace.fullHead == 0 {
   481			trace.fullHead = buf
   482		} else {
   483			trace.fullTail.ptr().link = buf
   484		}
   485		trace.fullTail = buf
   486	}
   487	
   488	// traceFullDequeue dequeues from queue of full buffers.
   489	func traceFullDequeue() traceBufPtr {
   490		buf := trace.fullHead
   491		if buf == 0 {
   492			return 0
   493		}
   494		trace.fullHead = buf.ptr().link
   495		if trace.fullHead == 0 {
   496			trace.fullTail = 0
   497		}
   498		buf.ptr().link = 0
   499		return buf
   500	}
   501	
   502	// traceEvent writes a single event to trace buffer, flushing the buffer if necessary.
   503	// ev is event type.
   504	// If skip > 0, write current stack id as the last argument (skipping skip top frames).
   505	// If skip = 0, this event type should contain a stack, but we don't want
   506	// to collect and remember it for this particular call.
   507	func traceEvent(ev byte, skip int, args ...uint64) {
   508		mp, pid, bufp := traceAcquireBuffer()
   509		// Double-check trace.enabled now that we've done m.locks++ and acquired bufLock.
   510		// This protects from races between traceEvent and StartTrace/StopTrace.
   511	
   512		// The caller checked that trace.enabled == true, but trace.enabled might have been
   513		// turned off between the check and now. Check again. traceLockBuffer did mp.locks++,
   514		// StopTrace does stopTheWorld, and stopTheWorld waits for mp.locks to go back to zero,
   515		// so if we see trace.enabled == true now, we know it's true for the rest of the function.
   516		// Exitsyscall can run even during stopTheWorld. The race with StartTrace/StopTrace
   517		// during tracing in exitsyscall is resolved by locking trace.bufLock in traceLockBuffer.
   518		//
   519		// Note trace_userTaskCreate runs the same check.
   520		if !trace.enabled && !mp.startingtrace {
   521			traceReleaseBuffer(pid)
   522			return
   523		}
   524	
   525		if skip > 0 {
   526			if getg() == mp.curg {
   527				skip++ // +1 because stack is captured in traceEventLocked.
   528			}
   529		}
   530		traceEventLocked(0, mp, pid, bufp, ev, skip, args...)
   531		traceReleaseBuffer(pid)
   532	}
   533	
   534	func traceEventLocked(extraBytes int, mp *m, pid int32, bufp *traceBufPtr, ev byte, skip int, args ...uint64) {
   535		buf := bufp.ptr()
   536		// TODO: test on non-zero extraBytes param.
   537		maxSize := 2 + 5*traceBytesPerNumber + extraBytes // event type, length, sequence, timestamp, stack id and two add params
   538		if buf == nil || len(buf.arr)-buf.pos < maxSize {
   539			buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
   540			bufp.set(buf)
   541		}
   542	
   543		ticks := uint64(cputicks()) / traceTickDiv
   544		tickDiff := ticks - buf.lastTicks
   545		buf.lastTicks = ticks
   546		narg := byte(len(args))
   547		if skip >= 0 {
   548			narg++
   549		}
   550		// We have only 2 bits for number of arguments.
   551		// If number is >= 3, then the event type is followed by event length in bytes.
   552		if narg > 3 {
   553			narg = 3
   554		}
   555		startPos := buf.pos
   556		buf.byte(ev | narg<<traceArgCountShift)
   557		var lenp *byte
   558		if narg == 3 {
   559			// Reserve the byte for length assuming that length < 128.
   560			buf.varint(0)
   561			lenp = &buf.arr[buf.pos-1]
   562		}
   563		buf.varint(tickDiff)
   564		for _, a := range args {
   565			buf.varint(a)
   566		}
   567		if skip == 0 {
   568			buf.varint(0)
   569		} else if skip > 0 {
   570			buf.varint(traceStackID(mp, buf.stk[:], skip))
   571		}
   572		evSize := buf.pos - startPos
   573		if evSize > maxSize {
   574			throw("invalid length of trace event")
   575		}
   576		if lenp != nil {
   577			// Fill in actual length.
   578			*lenp = byte(evSize - 2)
   579		}
   580	}
   581	
   582	func traceStackID(mp *m, buf []uintptr, skip int) uint64 {
   583		_g_ := getg()
   584		gp := mp.curg
   585		var nstk int
   586		if gp == _g_ {
   587			nstk = callers(skip+1, buf)
   588		} else if gp != nil {
   589			gp = mp.curg
   590			nstk = gcallers(gp, skip, buf)
   591		}
   592		if nstk > 0 {
   593			nstk-- // skip runtime.goexit
   594		}
   595		if nstk > 0 && gp.goid == 1 {
   596			nstk-- // skip runtime.main
   597		}
   598		id := trace.stackTab.put(buf[:nstk])
   599		return uint64(id)
   600	}
   601	
   602	// traceAcquireBuffer returns trace buffer to use and, if necessary, locks it.
   603	func traceAcquireBuffer() (mp *m, pid int32, bufp *traceBufPtr) {
   604		mp = acquirem()
   605		if p := mp.p.ptr(); p != nil {
   606			return mp, p.id, &p.tracebuf
   607		}
   608		lock(&trace.bufLock)
   609		return mp, traceGlobProc, &trace.buf
   610	}
   611	
   612	// traceReleaseBuffer releases a buffer previously acquired with traceAcquireBuffer.
   613	func traceReleaseBuffer(pid int32) {
   614		if pid == traceGlobProc {
   615			unlock(&trace.bufLock)
   616		}
   617		releasem(getg().m)
   618	}
   619	
   620	// traceFlush puts buf onto stack of full buffers and returns an empty buffer.
   621	func traceFlush(buf traceBufPtr, pid int32) traceBufPtr {
   622		owner := trace.lockOwner
   623		dolock := owner == nil || owner != getg().m.curg
   624		if dolock {
   625			lock(&trace.lock)
   626		}
   627		if buf != 0 {
   628			traceFullQueue(buf)
   629		}
   630		if trace.empty != 0 {
   631			buf = trace.empty
   632			trace.empty = buf.ptr().link
   633		} else {
   634			buf = traceBufPtr(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
   635			if buf == 0 {
   636				throw("trace: out of memory")
   637			}
   638		}
   639		bufp := buf.ptr()
   640		bufp.link.set(nil)
   641		bufp.pos = 0
   642	
   643		// initialize the buffer for a new batch
   644		ticks := uint64(cputicks()) / traceTickDiv
   645		bufp.lastTicks = ticks
   646		bufp.byte(traceEvBatch | 1<<traceArgCountShift)
   647		bufp.varint(uint64(pid))
   648		bufp.varint(ticks)
   649	
   650		if dolock {
   651			unlock(&trace.lock)
   652		}
   653		return buf
   654	}
   655	
   656	// traceString adds a string to the trace.strings and returns the id.
   657	func traceString(bufp *traceBufPtr, pid int32, s string) (uint64, *traceBufPtr) {
   658		if s == "" {
   659			return 0, bufp
   660		}
   661	
   662		lock(&trace.stringsLock)
   663		if raceenabled {
   664			// raceacquire is necessary because the map access
   665			// below is race annotated.
   666			raceacquire(unsafe.Pointer(&trace.stringsLock))
   667		}
   668	
   669		if id, ok := trace.strings[s]; ok {
   670			if raceenabled {
   671				racerelease(unsafe.Pointer(&trace.stringsLock))
   672			}
   673			unlock(&trace.stringsLock)
   674	
   675			return id, bufp
   676		}
   677	
   678		trace.stringSeq++
   679		id := trace.stringSeq
   680		trace.strings[s] = id
   681	
   682		if raceenabled {
   683			racerelease(unsafe.Pointer(&trace.stringsLock))
   684		}
   685		unlock(&trace.stringsLock)
   686	
   687		// memory allocation in above may trigger tracing and
   688		// cause *bufp changes. Following code now works with *bufp,
   689		// so there must be no memory allocation or any activities
   690		// that causes tracing after this point.
   691	
   692		buf := bufp.ptr()
   693		size := 1 + 2*traceBytesPerNumber + len(s)
   694		if buf == nil || len(buf.arr)-buf.pos < size {
   695			buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
   696			bufp.set(buf)
   697		}
   698		buf.byte(traceEvString)
   699		buf.varint(id)
   700	
   701		// double-check the string and the length can fit.
   702		// Otherwise, truncate the string.
   703		slen := len(s)
   704		if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber {
   705			slen = room
   706		}
   707	
   708		buf.varint(uint64(slen))
   709		buf.pos += copy(buf.arr[buf.pos:], s[:slen])
   710	
   711		bufp.set(buf)
   712		return id, bufp
   713	}
   714	
   715	// traceAppend appends v to buf in little-endian-base-128 encoding.
   716	func traceAppend(buf []byte, v uint64) []byte {
   717		for ; v >= 0x80; v >>= 7 {
   718			buf = append(buf, 0x80|byte(v))
   719		}
   720		buf = append(buf, byte(v))
   721		return buf
   722	}
   723	
   724	// varint appends v to buf in little-endian-base-128 encoding.
   725	func (buf *traceBuf) varint(v uint64) {
   726		pos := buf.pos
   727		for ; v >= 0x80; v >>= 7 {
   728			buf.arr[pos] = 0x80 | byte(v)
   729			pos++
   730		}
   731		buf.arr[pos] = byte(v)
   732		pos++
   733		buf.pos = pos
   734	}
   735	
   736	// byte appends v to buf.
   737	func (buf *traceBuf) byte(v byte) {
   738		buf.arr[buf.pos] = v
   739		buf.pos++
   740	}
   741	
   742	// traceStackTable maps stack traces (arrays of PC's) to unique uint32 ids.
   743	// It is lock-free for reading.
   744	type traceStackTable struct {
   745		lock mutex
   746		seq  uint32
   747		mem  traceAlloc
   748		tab  [1 << 13]traceStackPtr
   749	}
   750	
   751	// traceStack is a single stack in traceStackTable.
   752	type traceStack struct {
   753		link traceStackPtr
   754		hash uintptr
   755		id   uint32
   756		n    int
   757		stk  [0]uintptr // real type [n]uintptr
   758	}
   759	
   760	type traceStackPtr uintptr
   761	
   762	func (tp traceStackPtr) ptr() *traceStack { return (*traceStack)(unsafe.Pointer(tp)) }
   763	
   764	// stack returns slice of PCs.
   765	func (ts *traceStack) stack() []uintptr {
   766		return (*[traceStackSize]uintptr)(unsafe.Pointer(&ts.stk))[:ts.n]
   767	}
   768	
   769	// put returns a unique id for the stack trace pcs and caches it in the table,
   770	// if it sees the trace for the first time.
   771	func (tab *traceStackTable) put(pcs []uintptr) uint32 {
   772		if len(pcs) == 0 {
   773			return 0
   774		}
   775		hash := memhash(unsafe.Pointer(&pcs[0]), 0, uintptr(len(pcs))*unsafe.Sizeof(pcs[0]))
   776		// First, search the hashtable w/o the mutex.
   777		if id := tab.find(pcs, hash); id != 0 {
   778			return id
   779		}
   780		// Now, double check under the mutex.
   781		lock(&tab.lock)
   782		if id := tab.find(pcs, hash); id != 0 {
   783			unlock(&tab.lock)
   784			return id
   785		}
   786		// Create new record.
   787		tab.seq++
   788		stk := tab.newStack(len(pcs))
   789		stk.hash = hash
   790		stk.id = tab.seq
   791		stk.n = len(pcs)
   792		stkpc := stk.stack()
   793		for i, pc := range pcs {
   794			stkpc[i] = pc
   795		}
   796		part := int(hash % uintptr(len(tab.tab)))
   797		stk.link = tab.tab[part]
   798		atomicstorep(unsafe.Pointer(&tab.tab[part]), unsafe.Pointer(stk))
   799		unlock(&tab.lock)
   800		return stk.id
   801	}
   802	
   803	// find checks if the stack trace pcs is already present in the table.
   804	func (tab *traceStackTable) find(pcs []uintptr, hash uintptr) uint32 {
   805		part := int(hash % uintptr(len(tab.tab)))
   806	Search:
   807		for stk := tab.tab[part].ptr(); stk != nil; stk = stk.link.ptr() {
   808			if stk.hash == hash && stk.n == len(pcs) {
   809				for i, stkpc := range stk.stack() {
   810					if stkpc != pcs[i] {
   811						continue Search
   812					}
   813				}
   814				return stk.id
   815			}
   816		}
   817		return 0
   818	}
   819	
   820	// newStack allocates a new stack of size n.
   821	func (tab *traceStackTable) newStack(n int) *traceStack {
   822		return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*sys.PtrSize))
   823	}
   824	
   825	// allFrames returns all of the Frames corresponding to pcs.
   826	func allFrames(pcs []uintptr) []Frame {
   827		frames := make([]Frame, 0, len(pcs))
   828		ci := CallersFrames(pcs)
   829		for {
   830			f, more := ci.Next()
   831			frames = append(frames, f)
   832			if !more {
   833				return frames
   834			}
   835		}
   836	}
   837	
   838	// dump writes all previously cached stacks to trace buffers,
   839	// releases all memory and resets state.
   840	func (tab *traceStackTable) dump() {
   841		var tmp [(2 + 4*traceStackSize) * traceBytesPerNumber]byte
   842		bufp := traceFlush(0, 0)
   843		for _, stk := range tab.tab {
   844			stk := stk.ptr()
   845			for ; stk != nil; stk = stk.link.ptr() {
   846				tmpbuf := tmp[:0]
   847				tmpbuf = traceAppend(tmpbuf, uint64(stk.id))
   848				frames := allFrames(stk.stack())
   849				tmpbuf = traceAppend(tmpbuf, uint64(len(frames)))
   850				for _, f := range frames {
   851					var frame traceFrame
   852					frame, bufp = traceFrameForPC(bufp, 0, f)
   853					tmpbuf = traceAppend(tmpbuf, uint64(f.PC))
   854					tmpbuf = traceAppend(tmpbuf, uint64(frame.funcID))
   855					tmpbuf = traceAppend(tmpbuf, uint64(frame.fileID))
   856					tmpbuf = traceAppend(tmpbuf, uint64(frame.line))
   857				}
   858				// Now copy to the buffer.
   859				size := 1 + traceBytesPerNumber + len(tmpbuf)
   860				if buf := bufp.ptr(); len(buf.arr)-buf.pos < size {
   861					bufp = traceFlush(bufp, 0)
   862				}
   863				buf := bufp.ptr()
   864				buf.byte(traceEvStack | 3<<traceArgCountShift)
   865				buf.varint(uint64(len(tmpbuf)))
   866				buf.pos += copy(buf.arr[buf.pos:], tmpbuf)
   867			}
   868		}
   869	
   870		lock(&trace.lock)
   871		traceFullQueue(bufp)
   872		unlock(&trace.lock)
   873	
   874		tab.mem.drop()
   875		*tab = traceStackTable{}
   876	}
   877	
   878	type traceFrame struct {
   879		funcID uint64
   880		fileID uint64
   881		line   uint64
   882	}
   883	
   884	// traceFrameForPC records the frame information.
   885	// It may allocate memory.
   886	func traceFrameForPC(buf traceBufPtr, pid int32, f Frame) (traceFrame, traceBufPtr) {
   887		bufp := &buf
   888		var frame traceFrame
   889	
   890		fn := f.Function
   891		const maxLen = 1 << 10
   892		if len(fn) > maxLen {
   893			fn = fn[len(fn)-maxLen:]
   894		}
   895		frame.funcID, bufp = traceString(bufp, pid, fn)
   896		frame.line = uint64(f.Line)
   897		file := f.File
   898		if len(file) > maxLen {
   899			file = file[len(file)-maxLen:]
   900		}
   901		frame.fileID, bufp = traceString(bufp, pid, file)
   902		return frame, (*bufp)
   903	}
   904	
   905	// traceAlloc is a non-thread-safe region allocator.
   906	// It holds a linked list of traceAllocBlock.
   907	type traceAlloc struct {
   908		head traceAllocBlockPtr
   909		off  uintptr
   910	}
   911	
   912	// traceAllocBlock is a block in traceAlloc.
   913	//
   914	// traceAllocBlock is allocated from non-GC'd memory, so it must not
   915	// contain heap pointers. Writes to pointers to traceAllocBlocks do
   916	// not need write barriers.
   917	//
   918	//go:notinheap
   919	type traceAllocBlock struct {
   920		next traceAllocBlockPtr
   921		data [64<<10 - sys.PtrSize]byte
   922	}
   923	
   924	// TODO: Since traceAllocBlock is now go:notinheap, this isn't necessary.
   925	type traceAllocBlockPtr uintptr
   926	
   927	func (p traceAllocBlockPtr) ptr() *traceAllocBlock   { return (*traceAllocBlock)(unsafe.Pointer(p)) }
   928	func (p *traceAllocBlockPtr) set(x *traceAllocBlock) { *p = traceAllocBlockPtr(unsafe.Pointer(x)) }
   929	
   930	// alloc allocates n-byte block.
   931	func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
   932		n = round(n, sys.PtrSize)
   933		if a.head == 0 || a.off+n > uintptr(len(a.head.ptr().data)) {
   934			if n > uintptr(len(a.head.ptr().data)) {
   935				throw("trace: alloc too large")
   936			}
   937			block := (*traceAllocBlock)(sysAlloc(unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys))
   938			if block == nil {
   939				throw("trace: out of memory")
   940			}
   941			block.next.set(a.head.ptr())
   942			a.head.set(block)
   943			a.off = 0
   944		}
   945		p := &a.head.ptr().data[a.off]
   946		a.off += n
   947		return unsafe.Pointer(p)
   948	}
   949	
   950	// drop frees all previously allocated memory and resets the allocator.
   951	func (a *traceAlloc) drop() {
   952		for a.head != 0 {
   953			block := a.head.ptr()
   954			a.head.set(block.next.ptr())
   955			sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys)
   956		}
   957	}
   958	
   959	// The following functions write specific events to trace.
   960	
   961	func traceGomaxprocs(procs int32) {
   962		traceEvent(traceEvGomaxprocs, 1, uint64(procs))
   963	}
   964	
   965	func traceProcStart() {
   966		traceEvent(traceEvProcStart, -1, uint64(getg().m.id))
   967	}
   968	
   969	func traceProcStop(pp *p) {
   970		// Sysmon and stopTheWorld can stop Ps blocked in syscalls,
   971		// to handle this we temporary employ the P.
   972		mp := acquirem()
   973		oldp := mp.p
   974		mp.p.set(pp)
   975		traceEvent(traceEvProcStop, -1)
   976		mp.p = oldp
   977		releasem(mp)
   978	}
   979	
   980	func traceGCStart() {
   981		traceEvent(traceEvGCStart, 3, trace.seqGC)
   982		trace.seqGC++
   983	}
   984	
   985	func traceGCDone() {
   986		traceEvent(traceEvGCDone, -1)
   987	}
   988	
   989	func traceGCSTWStart(kind int) {
   990		traceEvent(traceEvGCSTWStart, -1, uint64(kind))
   991	}
   992	
   993	func traceGCSTWDone() {
   994		traceEvent(traceEvGCSTWDone, -1)
   995	}
   996	
   997	// traceGCSweepStart prepares to trace a sweep loop. This does not
   998	// emit any events until traceGCSweepSpan is called.
   999	//
  1000	// traceGCSweepStart must be paired with traceGCSweepDone and there
  1001	// must be no preemption points between these two calls.
  1002	func traceGCSweepStart() {
  1003		// Delay the actual GCSweepStart event until the first span
  1004		// sweep. If we don't sweep anything, don't emit any events.
  1005		_p_ := getg().m.p.ptr()
  1006		if _p_.traceSweep {
  1007			throw("double traceGCSweepStart")
  1008		}
  1009		_p_.traceSweep, _p_.traceSwept, _p_.traceReclaimed = true, 0, 0
  1010	}
  1011	
  1012	// traceGCSweepSpan traces the sweep of a single page.
  1013	//
  1014	// This may be called outside a traceGCSweepStart/traceGCSweepDone
  1015	// pair; however, it will not emit any trace events in this case.
  1016	func traceGCSweepSpan(bytesSwept uintptr) {
  1017		_p_ := getg().m.p.ptr()
  1018		if _p_.traceSweep {
  1019			if _p_.traceSwept == 0 {
  1020				traceEvent(traceEvGCSweepStart, 1)
  1021			}
  1022			_p_.traceSwept += bytesSwept
  1023		}
  1024	}
  1025	
  1026	func traceGCSweepDone() {
  1027		_p_ := getg().m.p.ptr()
  1028		if !_p_.traceSweep {
  1029			throw("missing traceGCSweepStart")
  1030		}
  1031		if _p_.traceSwept != 0 {
  1032			traceEvent(traceEvGCSweepDone, -1, uint64(_p_.traceSwept), uint64(_p_.traceReclaimed))
  1033		}
  1034		_p_.traceSweep = false
  1035	}
  1036	
  1037	func traceGCMarkAssistStart() {
  1038		traceEvent(traceEvGCMarkAssistStart, 1)
  1039	}
  1040	
  1041	func traceGCMarkAssistDone() {
  1042		traceEvent(traceEvGCMarkAssistDone, -1)
  1043	}
  1044	
  1045	func traceGoCreate(newg *g, pc uintptr) {
  1046		newg.traceseq = 0
  1047		newg.tracelastp = getg().m.p
  1048		// +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
  1049		id := trace.stackTab.put([]uintptr{pc + sys.PCQuantum})
  1050		traceEvent(traceEvGoCreate, 2, uint64(newg.goid), uint64(id))
  1051	}
  1052	
  1053	func traceGoStart() {
  1054		_g_ := getg().m.curg
  1055		_p_ := _g_.m.p
  1056		_g_.traceseq++
  1057		if _g_ == _p_.ptr().gcBgMarkWorker.ptr() {
  1058			traceEvent(traceEvGoStartLabel, -1, uint64(_g_.goid), _g_.traceseq, trace.markWorkerLabels[_p_.ptr().gcMarkWorkerMode])
  1059		} else if _g_.tracelastp == _p_ {
  1060			traceEvent(traceEvGoStartLocal, -1, uint64(_g_.goid))
  1061		} else {
  1062			_g_.tracelastp = _p_
  1063			traceEvent(traceEvGoStart, -1, uint64(_g_.goid), _g_.traceseq)
  1064		}
  1065	}
  1066	
  1067	func traceGoEnd() {
  1068		traceEvent(traceEvGoEnd, -1)
  1069	}
  1070	
  1071	func traceGoSched() {
  1072		_g_ := getg()
  1073		_g_.tracelastp = _g_.m.p
  1074		traceEvent(traceEvGoSched, 1)
  1075	}
  1076	
  1077	func traceGoPreempt() {
  1078		_g_ := getg()
  1079		_g_.tracelastp = _g_.m.p
  1080		traceEvent(traceEvGoPreempt, 1)
  1081	}
  1082	
  1083	func traceGoPark(traceEv byte, skip int) {
  1084		if traceEv&traceFutileWakeup != 0 {
  1085			traceEvent(traceEvFutileWakeup, -1)
  1086		}
  1087		traceEvent(traceEv & ^traceFutileWakeup, skip)
  1088	}
  1089	
  1090	func traceGoUnpark(gp *g, skip int) {
  1091		_p_ := getg().m.p
  1092		gp.traceseq++
  1093		if gp.tracelastp == _p_ {
  1094			traceEvent(traceEvGoUnblockLocal, skip, uint64(gp.goid))
  1095		} else {
  1096			gp.tracelastp = _p_
  1097			traceEvent(traceEvGoUnblock, skip, uint64(gp.goid), gp.traceseq)
  1098		}
  1099	}
  1100	
  1101	func traceGoSysCall() {
  1102		traceEvent(traceEvGoSysCall, 1)
  1103	}
  1104	
  1105	func traceGoSysExit(ts int64) {
  1106		if ts != 0 && ts < trace.ticksStart {
  1107			// There is a race between the code that initializes sysexitticks
  1108			// (in exitsyscall, which runs without a P, and therefore is not
  1109			// stopped with the rest of the world) and the code that initializes
  1110			// a new trace. The recorded sysexitticks must therefore be treated
  1111			// as "best effort". If they are valid for this trace, then great,
  1112			// use them for greater accuracy. But if they're not valid for this
  1113			// trace, assume that the trace was started after the actual syscall
  1114			// exit (but before we actually managed to start the goroutine,
  1115			// aka right now), and assign a fresh time stamp to keep the log consistent.
  1116			ts = 0
  1117		}
  1118		_g_ := getg().m.curg
  1119		_g_.traceseq++
  1120		_g_.tracelastp = _g_.m.p
  1121		traceEvent(traceEvGoSysExit, -1, uint64(_g_.goid), _g_.traceseq, uint64(ts)/traceTickDiv)
  1122	}
  1123	
  1124	func traceGoSysBlock(pp *p) {
  1125		// Sysmon and stopTheWorld can declare syscalls running on remote Ps as blocked,
  1126		// to handle this we temporary employ the P.
  1127		mp := acquirem()
  1128		oldp := mp.p
  1129		mp.p.set(pp)
  1130		traceEvent(traceEvGoSysBlock, -1)
  1131		mp.p = oldp
  1132		releasem(mp)
  1133	}
  1134	
  1135	func traceHeapAlloc() {
  1136		traceEvent(traceEvHeapAlloc, -1, memstats.heap_live)
  1137	}
  1138	
  1139	func traceNextGC() {
  1140		if memstats.next_gc == ^uint64(0) {
  1141			// Heap-based triggering is disabled.
  1142			traceEvent(traceEvNextGC, -1, 0)
  1143		} else {
  1144			traceEvent(traceEvNextGC, -1, memstats.next_gc)
  1145		}
  1146	}
  1147	
  1148	// To access runtime functions from runtime/trace.
  1149	// See runtime/trace/annotation.go
  1150	
  1151	//go:linkname trace_userTaskCreate runtime/trace.userTaskCreate
  1152	func trace_userTaskCreate(id, parentID uint64, taskType string) {
  1153		if !trace.enabled {
  1154			return
  1155		}
  1156	
  1157		// Same as in traceEvent.
  1158		mp, pid, bufp := traceAcquireBuffer()
  1159		if !trace.enabled && !mp.startingtrace {
  1160			traceReleaseBuffer(pid)
  1161			return
  1162		}
  1163	
  1164		typeStringID, bufp := traceString(bufp, pid, taskType)
  1165		traceEventLocked(0, mp, pid, bufp, traceEvUserTaskCreate, 3, id, parentID, typeStringID)
  1166		traceReleaseBuffer(pid)
  1167	}
  1168	
  1169	//go:linkname trace_userTaskEnd runtime/trace.userTaskEnd
  1170	func trace_userTaskEnd(id uint64) {
  1171		traceEvent(traceEvUserTaskEnd, 2, id)
  1172	}
  1173	
  1174	//go:linkname trace_userRegion runtime/trace.userRegion
  1175	func trace_userRegion(id, mode uint64, name string) {
  1176		if !trace.enabled {
  1177			return
  1178		}
  1179	
  1180		mp, pid, bufp := traceAcquireBuffer()
  1181		if !trace.enabled && !mp.startingtrace {
  1182			traceReleaseBuffer(pid)
  1183			return
  1184		}
  1185	
  1186		nameStringID, bufp := traceString(bufp, pid, name)
  1187		traceEventLocked(0, mp, pid, bufp, traceEvUserRegion, 3, id, mode, nameStringID)
  1188		traceReleaseBuffer(pid)
  1189	}
  1190	
  1191	//go:linkname trace_userLog runtime/trace.userLog
  1192	func trace_userLog(id uint64, category, message string) {
  1193		if !trace.enabled {
  1194			return
  1195		}
  1196	
  1197		mp, pid, bufp := traceAcquireBuffer()
  1198		if !trace.enabled && !mp.startingtrace {
  1199			traceReleaseBuffer(pid)
  1200			return
  1201		}
  1202	
  1203		categoryID, bufp := traceString(bufp, pid, category)
  1204	
  1205		extraSpace := traceBytesPerNumber + len(message) // extraSpace for the value string
  1206		traceEventLocked(extraSpace, mp, pid, bufp, traceEvUserLog, 3, id, categoryID)
  1207		// traceEventLocked reserved extra space for val and len(val)
  1208		// in buf, so buf now has room for the following.
  1209		buf := bufp.ptr()
  1210	
  1211		// double-check the message and its length can fit.
  1212		// Otherwise, truncate the message.
  1213		slen := len(message)
  1214		if room := len(buf.arr) - buf.pos; room < slen+traceBytesPerNumber {
  1215			slen = room
  1216		}
  1217		buf.varint(uint64(slen))
  1218		buf.pos += copy(buf.arr[buf.pos:], message[:slen])
  1219	
  1220		traceReleaseBuffer(pid)
  1221	}
  1222	

View as plain text