...

Source file src/pkg/cmd/compile/internal/gc/escape.go

     1	// Copyright 2018 The Go Authors. All rights reserved.
     2	// Use of this source code is governed by a BSD-style
     3	// license that can be found in the LICENSE file.
     4	
     5	package gc
     6	
     7	import (
     8		"cmd/compile/internal/types"
     9		"fmt"
    10	)
    11	
    12	// Escape analysis.
    13	//
    14	// Here we analyze functions to determine which Go variables
    15	// (including implicit allocations such as calls to "new" or "make",
    16	// composite literals, etc.) can be allocated on the stack. The two
    17	// key invariants we have to ensure are: (1) pointers to stack objects
    18	// cannot be stored in the heap, and (2) pointers to a stack object
    19	// cannot outlive that object (e.g., because the declaring function
    20	// returned and destroyed the object's stack frame, or its space is
    21	// reused across loop iterations for logically distinct variables).
    22	//
    23	// We implement this with a static data-flow analysis of the AST.
    24	// First, we construct a directed weighted graph where vertices
    25	// (termed "locations") represent variables allocated by statements
    26	// and expressions, and edges represent assignments between variables
    27	// (with weights reperesenting addressing/dereference counts).
    28	//
    29	// Next we walk the graph looking for assignment paths that might
    30	// violate the invariants stated above. If a variable v's address is
    31	// stored in the heap or elsewhere that may outlive it, then v is
    32	// marked as requiring heap allocation.
    33	//
    34	// To support interprocedural analysis, we also record data-flow from
    35	// each function's parameters to the heap and to its result
    36	// parameters. This information is summarized as "paremeter tags",
    37	// which are used at static call sites to improve escape analysis of
    38	// function arguments.
    39	
    40	// Constructing the location graph.
    41	//
    42	// Every allocating statement (e.g., variable declaration) or
    43	// expression (e.g., "new" or "make") is first mapped to a unique
    44	// "location."
    45	//
    46	// We also model every Go assignment as a directed edges between
    47	// locations. The number of derefence operations minus the number of
    48	// addressing operations is recorded as the edge's weight (termed
    49	// "derefs"). For example:
    50	//
    51	//     p = &q    // -1
    52	//     p = q     //  0
    53	//     p = *q    //  1
    54	//     p = **q   //  2
    55	//
    56	//     p = **&**&q  // 2
    57	//
    58	// Note that the & operator can only be applied to addressable
    59	// expressions, and the expression &x itself is not addressable, so
    60	// derefs cannot go below -1.
    61	//
    62	// Every Go language construct is lowered into this representation,
    63	// generally without sensitivity to flow, path, or context; and
    64	// without distinguishing elements within a compound variable. For
    65	// example:
    66	//
    67	//     var x struct { f, g *int }
    68	//     var u []*int
    69	//
    70	//     x.f = u[0]
    71	//
    72	// is modeled simply as
    73	//
    74	//     x = *u
    75	//
    76	// That is, we don't distinguish x.f from x.g, or u[0] from u[1],
    77	// u[2], etc. However, we do record the implicit dereference involved
    78	// in indexing a slice.
    79	
    80	type Escape struct {
    81		allLocs []*EscLocation
    82	
    83		curfn *Node
    84	
    85		// loopDepth counts the current loop nesting depth within
    86		// curfn. It increments within each "for" loop and at each
    87		// label with a corresponding backwards "goto" (i.e.,
    88		// unstructured loop).
    89		loopDepth int
    90	
    91		heapLoc  EscLocation
    92		blankLoc EscLocation
    93	}
    94	
    95	// An EscLocation represents an abstract location that stores a Go
    96	// variable.
    97	type EscLocation struct {
    98		n         *Node     // represented variable or expression, if any
    99		curfn     *Node     // enclosing function
   100		edges     []EscEdge // incoming edges
   101		loopDepth int       // loopDepth at declaration
   102	
   103		// derefs and walkgen are used during walk to track the
   104		// minimal dereferences from the walk root.
   105		derefs  int // >= -1
   106		walkgen uint32
   107	
   108		// escapes reports whether the represented variable's address
   109		// escapes; that is, whether the variable must be heap
   110		// allocated.
   111		escapes bool
   112	
   113		// transient reports whether the represented expression's
   114		// address does not outlive the statement; that is, whether
   115		// its storage can be immediately reused.
   116		transient bool
   117	
   118		// paramEsc records the represented parameter's escape tags.
   119		// See "Parameter tags" below for details.
   120		paramEsc uint16
   121	}
   122	
   123	// An EscEdge represents an assignment edge between two Go variables.
   124	type EscEdge struct {
   125		src    *EscLocation
   126		derefs int // >= -1
   127	}
   128	
   129	// escapeFuncs performs escape analysis on a minimal batch of
   130	// functions.
   131	func escapeFuncs(fns []*Node, recursive bool) {
   132		for _, fn := range fns {
   133			if fn.Op != ODCLFUNC {
   134				Fatalf("unexpected node: %v", fn)
   135			}
   136		}
   137	
   138		var e Escape
   139	
   140		// Construct data-flow graph from syntax trees.
   141		for _, fn := range fns {
   142			e.initFunc(fn)
   143		}
   144		for _, fn := range fns {
   145			e.walkFunc(fn)
   146		}
   147		e.curfn = nil
   148	
   149		e.walkAll()
   150		e.finish()
   151	
   152		// Record parameter tags for package export data.
   153		for _, fn := range fns {
   154			esctag(fn)
   155		}
   156	}
   157	
   158	func (e *Escape) initFunc(fn *Node) {
   159		if fn.Op != ODCLFUNC || fn.Esc != EscFuncUnknown {
   160			Fatalf("unexpected node: %v", fn)
   161		}
   162		fn.Esc = EscFuncPlanned
   163		if Debug['m'] > 3 {
   164			Dump("escAnalyze", fn)
   165		}
   166	
   167		e.curfn = fn
   168		e.loopDepth = 1
   169	
   170		// Allocate locations for local variables.
   171		for _, dcl := range fn.Func.Dcl {
   172			if dcl.Op == ONAME {
   173				loc := e.newLoc(dcl, false)
   174	
   175				if dcl.Class() == PPARAM && fn.Nbody.Len() == 0 && !fn.Noescape() {
   176					loc.paramEsc = EscHeap
   177				}
   178			}
   179		}
   180	}
   181	
   182	func (e *Escape) walkFunc(fn *Node) {
   183		fn.Esc = EscFuncStarted
   184	
   185		// Identify labels that mark the head of an unstructured loop.
   186		inspectList(fn.Nbody, func(n *Node) bool {
   187			switch n.Op {
   188			case OLABEL:
   189				n.Sym.Label = asTypesNode(&nonlooping)
   190	
   191			case OGOTO:
   192				// If we visited the label before the goto,
   193				// then this is a looping label.
   194				if n.Sym.Label == asTypesNode(&nonlooping) {
   195					n.Sym.Label = asTypesNode(&looping)
   196				}
   197			}
   198	
   199			return true
   200		})
   201	
   202		e.curfn = fn
   203		e.loopDepth = 1
   204		e.stmts(fn.Nbody)
   205	}
   206	
   207	// Below we implement the methods for walking the AST and recording
   208	// data flow edges. Note that because a sub-expression might have
   209	// side-effects, it's important to always visit the entire AST.
   210	//
   211	// For example, write either:
   212	//
   213	//     if x {
   214	//         e.discard(n.Left)
   215	//     } else {
   216	//         e.value(k, n.Left)
   217	//     }
   218	//
   219	// or
   220	//
   221	//     if x {
   222	//         k = e.discardHole()
   223	//     }
   224	//     e.value(k, n.Left)
   225	//
   226	// Do NOT write:
   227	//
   228	//    // BAD: possibly loses side-effects within n.Left
   229	//    if !x {
   230	//        e.value(k, n.Left)
   231	//    }
   232	
   233	// stmt evaluates a single Go statement.
   234	func (e *Escape) stmt(n *Node) {
   235		if n == nil {
   236			return
   237		}
   238	
   239		lno := setlineno(n)
   240		defer func() {
   241			lineno = lno
   242		}()
   243	
   244		if Debug['m'] > 2 {
   245			fmt.Printf("%v:[%d] %v stmt: %v\n", linestr(lineno), e.loopDepth, funcSym(e.curfn), n)
   246		}
   247	
   248		e.stmts(n.Ninit)
   249	
   250		switch n.Op {
   251		default:
   252			Fatalf("unexpected stmt: %v", n)
   253	
   254		case ODCLCONST, ODCLTYPE, OEMPTY, OFALL, OINLMARK:
   255			// nop
   256	
   257		case OBREAK, OCONTINUE, OGOTO:
   258			// TODO(mdempsky): Handle dead code?
   259	
   260		case OBLOCK:
   261			e.stmts(n.List)
   262	
   263		case ODCL:
   264			// Record loop depth at declaration.
   265			if !n.Left.isBlank() {
   266				e.dcl(n.Left)
   267			}
   268	
   269		case OLABEL:
   270			switch asNode(n.Sym.Label) {
   271			case &nonlooping:
   272				if Debug['m'] > 2 {
   273					fmt.Printf("%v:%v non-looping label\n", linestr(lineno), n)
   274				}
   275			case &looping:
   276				if Debug['m'] > 2 {
   277					fmt.Printf("%v: %v looping label\n", linestr(lineno), n)
   278				}
   279				e.loopDepth++
   280			default:
   281				Fatalf("label missing tag")
   282			}
   283			n.Sym.Label = nil
   284	
   285		case OIF:
   286			e.discard(n.Left)
   287			e.stmts(n.Nbody)
   288			e.stmts(n.Rlist)
   289	
   290		case OFOR, OFORUNTIL:
   291			e.loopDepth++
   292			e.discard(n.Left)
   293			e.stmt(n.Right)
   294			e.stmts(n.Nbody)
   295			e.loopDepth--
   296	
   297		case ORANGE:
   298			// for List = range Right { Nbody }
   299	
   300			// Right is evaluated outside the loop.
   301			tv := e.newLoc(n, false)
   302			e.expr(tv.asHole(), n.Right)
   303	
   304			e.loopDepth++
   305			ks := e.addrs(n.List)
   306			if len(ks) >= 2 {
   307				if n.Right.Type.IsArray() {
   308					e.flow(ks[1].note(n, "range"), tv)
   309				} else {
   310					e.flow(ks[1].deref(n, "range-deref"), tv)
   311				}
   312			}
   313	
   314			e.stmts(n.Nbody)
   315			e.loopDepth--
   316	
   317		case OSWITCH:
   318			var tv *EscLocation
   319			if n.Left != nil {
   320				if n.Left.Op == OTYPESW {
   321					k := e.discardHole()
   322					if n.Left.Left != nil {
   323						tv = e.newLoc(n.Left, false)
   324						k = tv.asHole()
   325					}
   326					e.expr(k, n.Left.Right)
   327				} else {
   328					e.discard(n.Left)
   329				}
   330			}
   331	
   332			for _, cas := range n.List.Slice() { // cases
   333				if tv != nil {
   334					// type switch variables have no ODCL.
   335					cv := cas.Rlist.First()
   336					k := e.dcl(cv)
   337					if types.Haspointers(cv.Type) {
   338						e.flow(k.dotType(cv.Type, n, "switch case"), tv)
   339					}
   340				}
   341	
   342				e.discards(cas.List)
   343				e.stmts(cas.Nbody)
   344			}
   345	
   346		case OSELECT:
   347			for _, cas := range n.List.Slice() {
   348				e.stmt(cas.Left)
   349				e.stmts(cas.Nbody)
   350			}
   351		case OSELRECV:
   352			e.assign(n.Left, n.Right, "selrecv", n)
   353		case OSELRECV2:
   354			e.assign(n.Left, n.Right, "selrecv", n)
   355			e.assign(n.List.First(), nil, "selrecv", n)
   356		case ORECV:
   357			// TODO(mdempsky): Consider e.discard(n.Left).
   358			e.exprSkipInit(e.discardHole(), n) // already visited n.Ninit
   359		case OSEND:
   360			e.discard(n.Left)
   361			e.assignHeap(n.Right, "send", n)
   362	
   363		case OAS, OASOP:
   364			e.assign(n.Left, n.Right, "assign", n)
   365	
   366		case OAS2:
   367			for i, nl := range n.List.Slice() {
   368				e.assign(nl, n.Rlist.Index(i), "assign-pair", n)
   369			}
   370	
   371		case OAS2DOTTYPE: // v, ok = x.(type)
   372			e.assign(n.List.First(), n.Rlist.First(), "assign-pair-dot-type", n)
   373			e.assign(n.List.Second(), nil, "assign-pair-dot-type", n)
   374		case OAS2MAPR: // v, ok = m[k]
   375			e.assign(n.List.First(), n.Rlist.First(), "assign-pair-mapr", n)
   376			e.assign(n.List.Second(), nil, "assign-pair-mapr", n)
   377		case OAS2RECV: // v, ok = <-ch
   378			e.assign(n.List.First(), n.Rlist.First(), "assign-pair-receive", n)
   379			e.assign(n.List.Second(), nil, "assign-pair-receive", n)
   380	
   381		case OAS2FUNC:
   382			e.stmts(n.Rlist.First().Ninit)
   383			e.call(e.addrs(n.List), n.Rlist.First(), nil)
   384		case ORETURN:
   385			results := e.curfn.Type.Results().FieldSlice()
   386			for i, v := range n.List.Slice() {
   387				e.assign(asNode(results[i].Nname), v, "return", n)
   388			}
   389		case OCALLFUNC, OCALLMETH, OCALLINTER, OCLOSE, OCOPY, ODELETE, OPANIC, OPRINT, OPRINTN, ORECOVER:
   390			e.call(nil, n, nil)
   391		case OGO, ODEFER:
   392			e.stmts(n.Left.Ninit)
   393			e.call(nil, n.Left, n)
   394	
   395		case ORETJMP:
   396			// TODO(mdempsky): What do? esc.go just ignores it.
   397		}
   398	}
   399	
   400	func (e *Escape) stmts(l Nodes) {
   401		// TODO(mdempsky): Preserve and restore e.loopDepth? See also #22438.
   402		for _, n := range l.Slice() {
   403			e.stmt(n)
   404		}
   405	}
   406	
   407	// expr models evaluating an expression n and flowing the result into
   408	// hole k.
   409	func (e *Escape) expr(k EscHole, n *Node) {
   410		if n == nil {
   411			return
   412		}
   413		e.stmts(n.Ninit)
   414		e.exprSkipInit(k, n)
   415	}
   416	
   417	func (e *Escape) exprSkipInit(k EscHole, n *Node) {
   418		if n == nil {
   419			return
   420		}
   421	
   422		lno := setlineno(n)
   423		defer func() {
   424			lineno = lno
   425		}()
   426	
   427		if k.derefs >= 0 && !types.Haspointers(n.Type) {
   428			k = e.discardHole()
   429		}
   430	
   431		switch n.Op {
   432		default:
   433			Fatalf("unexpected expr: %v", n)
   434	
   435		case OLITERAL, OGETG, OCLOSUREVAR, OTYPE:
   436			// nop
   437	
   438		case ONAME:
   439			if n.Class() == PFUNC || n.Class() == PEXTERN {
   440				return
   441			}
   442			e.flow(k, e.oldLoc(n))
   443	
   444		case OPLUS, ONEG, OBITNOT, ONOT:
   445			e.discard(n.Left)
   446		case OADD, OSUB, OOR, OXOR, OMUL, ODIV, OMOD, OLSH, ORSH, OAND, OANDNOT, OEQ, ONE, OLT, OLE, OGT, OGE, OANDAND, OOROR:
   447			e.discard(n.Left)
   448			e.discard(n.Right)
   449	
   450		case OADDR:
   451			e.expr(k.addr(n, "address-of"), n.Left) // "address-of"
   452		case ODEREF:
   453			e.expr(k.deref(n, "indirection"), n.Left) // "indirection"
   454		case ODOT, ODOTMETH, ODOTINTER:
   455			e.expr(k.note(n, "dot"), n.Left)
   456		case ODOTPTR:
   457			e.expr(k.deref(n, "dot of pointer"), n.Left) // "dot of pointer"
   458		case ODOTTYPE, ODOTTYPE2:
   459			e.expr(k.dotType(n.Type, n, "dot"), n.Left)
   460		case OINDEX:
   461			if n.Left.Type.IsArray() {
   462				e.expr(k.note(n, "fixed-array-index-of"), n.Left)
   463			} else {
   464				// TODO(mdempsky): Fix why reason text.
   465				e.expr(k.deref(n, "dot of pointer"), n.Left)
   466			}
   467			e.discard(n.Right)
   468		case OINDEXMAP:
   469			e.discard(n.Left)
   470			e.discard(n.Right)
   471		case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR:
   472			e.expr(k.note(n, "slice"), n.Left)
   473			low, high, max := n.SliceBounds()
   474			e.discard(low)
   475			e.discard(high)
   476			e.discard(max)
   477	
   478		case OCONV, OCONVNOP:
   479			if n.Type.Etype == TUNSAFEPTR && n.Left.Type.Etype == TUINTPTR {
   480				e.unsafeValue(k, n.Left)
   481			} else {
   482				e.expr(k, n.Left)
   483			}
   484		case OCONVIFACE:
   485			if !n.Left.Type.IsInterface() && !isdirectiface(n.Left.Type) {
   486				k = e.spill(k, n)
   487			} else {
   488				// esc.go prints "escapes to heap" / "does not
   489				// escape" messages for OCONVIFACE even when
   490				// they don't allocate.  Match that behavior
   491				// because it's easy.
   492				// TODO(mdempsky): Remove and cleanup test expectations.
   493				_ = e.spill(k, n)
   494			}
   495			e.expr(k.note(n, "interface-converted"), n.Left)
   496	
   497		case ORECV:
   498			e.discard(n.Left)
   499	
   500		case OCALLMETH, OCALLFUNC, OCALLINTER, OLEN, OCAP, OCOMPLEX, OREAL, OIMAG, OAPPEND, OCOPY:
   501			e.call([]EscHole{k}, n, nil)
   502	
   503		case ONEW:
   504			e.spill(k, n)
   505	
   506		case OMAKESLICE:
   507			e.spill(k, n)
   508			e.discard(n.Left)
   509			e.discard(n.Right)
   510		case OMAKECHAN:
   511			e.discard(n.Left)
   512		case OMAKEMAP:
   513			e.spill(k, n)
   514			e.discard(n.Left)
   515	
   516		case ORECOVER:
   517			// nop
   518	
   519		case OCALLPART:
   520			e.spill(k, n)
   521	
   522			// esc.go says "Contents make it to memory, lose
   523			// track."  I think we can just flow n.Left to our
   524			// spilled location though.
   525			// TODO(mdempsky): Try that.
   526			e.assignHeap(n.Left, "call part", n)
   527	
   528		case OPTRLIT:
   529			e.expr(e.spill(k, n), n.Left)
   530	
   531		case OARRAYLIT:
   532			for _, elt := range n.List.Slice() {
   533				if elt.Op == OKEY {
   534					elt = elt.Right
   535				}
   536				e.expr(k.note(n, "array literal element"), elt)
   537			}
   538	
   539		case OSLICELIT:
   540			k = e.spill(k, n)
   541	
   542			for _, elt := range n.List.Slice() {
   543				if elt.Op == OKEY {
   544					elt = elt.Right
   545				}
   546				e.expr(k.note(n, "slice-literal-element"), elt)
   547			}
   548	
   549		case OSTRUCTLIT:
   550			for _, elt := range n.List.Slice() {
   551				e.expr(k.note(n, "struct literal element"), elt.Left)
   552			}
   553	
   554		case OMAPLIT:
   555			e.spill(k, n)
   556	
   557			// Map keys and values are always stored in the heap.
   558			for _, elt := range n.List.Slice() {
   559				e.assignHeap(elt.Left, "map literal key", n)
   560				e.assignHeap(elt.Right, "map literal value", n)
   561			}
   562	
   563		case OCLOSURE:
   564			k = e.spill(k, n)
   565	
   566			// Link addresses of captured variables to closure.
   567			for _, v := range n.Func.Closure.Func.Cvars.Slice() {
   568				if v.Op == OXXX { // unnamed out argument; see dcl.go:/^funcargs
   569					continue
   570				}
   571	
   572				k := k
   573				if !v.Name.Byval() {
   574					k = k.addr(v, "reference")
   575				}
   576	
   577				e.expr(k.note(n, "captured by a closure"), v.Name.Defn)
   578			}
   579	
   580		case ORUNES2STR, OBYTES2STR, OSTR2RUNES, OSTR2BYTES, ORUNESTR:
   581			e.spill(k, n)
   582			e.discard(n.Left)
   583	
   584		case OADDSTR:
   585			e.spill(k, n)
   586	
   587			// Arguments of OADDSTR never escape;
   588			// runtime.concatstrings makes sure of that.
   589			e.discards(n.List)
   590		}
   591	}
   592	
   593	// unsafeValue evaluates a uintptr-typed arithmetic expression looking
   594	// for conversions from an unsafe.Pointer.
   595	func (e *Escape) unsafeValue(k EscHole, n *Node) {
   596		if n.Type.Etype != TUINTPTR {
   597			Fatalf("unexpected type %v for %v", n.Type, n)
   598		}
   599	
   600		e.stmts(n.Ninit)
   601	
   602		switch n.Op {
   603		case OCONV, OCONVNOP:
   604			if n.Left.Type.Etype == TUNSAFEPTR {
   605				e.expr(k, n.Left)
   606			} else {
   607				e.discard(n.Left)
   608			}
   609		case ODOTPTR:
   610			if isReflectHeaderDataField(n) {
   611				e.expr(k.deref(n, "reflect.Header.Data"), n.Left)
   612			} else {
   613				e.discard(n.Left)
   614			}
   615		case OPLUS, ONEG, OBITNOT:
   616			e.unsafeValue(k, n.Left)
   617		case OADD, OSUB, OOR, OXOR, OMUL, ODIV, OMOD, OAND, OANDNOT:
   618			e.unsafeValue(k, n.Left)
   619			e.unsafeValue(k, n.Right)
   620		case OLSH, ORSH:
   621			e.unsafeValue(k, n.Left)
   622			// RHS need not be uintptr-typed (#32959) and can't meaningfully
   623			// flow pointers anyway.
   624			e.discard(n.Right)
   625		default:
   626			e.exprSkipInit(e.discardHole(), n)
   627		}
   628	}
   629	
   630	// discard evaluates an expression n for side-effects, but discards
   631	// its value.
   632	func (e *Escape) discard(n *Node) {
   633		e.expr(e.discardHole(), n)
   634	}
   635	
   636	func (e *Escape) discards(l Nodes) {
   637		for _, n := range l.Slice() {
   638			e.discard(n)
   639		}
   640	}
   641	
   642	// addr evaluates an addressable expression n and returns an EscHole
   643	// that represents storing into the represented location.
   644	func (e *Escape) addr(n *Node) EscHole {
   645		if n == nil || n.isBlank() {
   646			// Can happen at least in OSELRECV.
   647			// TODO(mdempsky): Anywhere else?
   648			return e.discardHole()
   649		}
   650	
   651		k := e.heapHole()
   652	
   653		switch n.Op {
   654		default:
   655			Fatalf("unexpected addr: %v", n)
   656		case ONAME:
   657			if n.Class() == PEXTERN {
   658				break
   659			}
   660			k = e.oldLoc(n).asHole()
   661		case ODOT:
   662			k = e.addr(n.Left)
   663		case OINDEX:
   664			e.discard(n.Right)
   665			if n.Left.Type.IsArray() {
   666				k = e.addr(n.Left)
   667			} else {
   668				e.discard(n.Left)
   669			}
   670		case ODEREF, ODOTPTR:
   671			e.discard(n)
   672		case OINDEXMAP:
   673			e.discard(n.Left)
   674			e.assignHeap(n.Right, "key of map put", n)
   675		}
   676	
   677		if !types.Haspointers(n.Type) {
   678			k = e.discardHole()
   679		}
   680	
   681		return k
   682	}
   683	
   684	func (e *Escape) addrs(l Nodes) []EscHole {
   685		var ks []EscHole
   686		for _, n := range l.Slice() {
   687			ks = append(ks, e.addr(n))
   688		}
   689		return ks
   690	}
   691	
   692	// assign evaluates the assignment dst = src.
   693	func (e *Escape) assign(dst, src *Node, why string, where *Node) {
   694		// Filter out some no-op assignments for escape analysis.
   695		ignore := dst != nil && src != nil && isSelfAssign(dst, src)
   696		if ignore && Debug['m'] != 0 {
   697			Warnl(where.Pos, "%v ignoring self-assignment in %S", funcSym(e.curfn), where)
   698		}
   699	
   700		k := e.addr(dst)
   701		if dst != nil && dst.Op == ODOTPTR && isReflectHeaderDataField(dst) {
   702			e.unsafeValue(e.heapHole(), src)
   703		} else {
   704			if ignore {
   705				k = e.discardHole()
   706			}
   707			e.expr(k, src)
   708		}
   709	}
   710	
   711	func (e *Escape) assignHeap(src *Node, why string, where *Node) {
   712		e.expr(e.heapHole().note(where, why), src)
   713	}
   714	
   715	// call evaluates a call expressions, including builtin calls. ks
   716	// should contain the holes representing where the function callee's
   717	// results flows; where is the OGO/ODEFER context of the call, if any.
   718	func (e *Escape) call(ks []EscHole, call, where *Node) {
   719		// First, pick out the function callee, its type, and receiver
   720		// (if any) and normal arguments list.
   721		var fn, recv *Node
   722		var fntype *types.Type
   723		args := call.List.Slice()
   724		switch call.Op {
   725		case OCALLFUNC:
   726			fn = call.Left
   727			if fn.Op == OCLOSURE {
   728				fn = fn.Func.Closure.Func.Nname
   729			}
   730			fntype = fn.Type
   731		case OCALLMETH:
   732			fn = asNode(call.Left.Type.FuncType().Nname)
   733			fntype = fn.Type
   734			recv = call.Left.Left
   735		case OCALLINTER:
   736			fntype = call.Left.Type
   737			recv = call.Left.Left
   738		case OAPPEND, ODELETE, OPRINT, OPRINTN, ORECOVER:
   739			// ok
   740		case OLEN, OCAP, OREAL, OIMAG, OCLOSE, OPANIC:
   741			args = []*Node{call.Left}
   742		case OCOMPLEX, OCOPY:
   743			args = []*Node{call.Left, call.Right}
   744		default:
   745			Fatalf("unexpected call op: %v", call.Op)
   746		}
   747	
   748		static := fn != nil && fn.Op == ONAME && fn.Class() == PFUNC
   749	
   750		// Setup evaluation holes for each receiver/argument.
   751		var recvK EscHole
   752		var paramKs []EscHole
   753	
   754		if static && fn.Name.Defn != nil && fn.Name.Defn.Esc < EscFuncTagged {
   755			// Static call to function in same mutually recursive
   756			// group; incorporate into data flow graph.
   757	
   758			if fn.Name.Defn.Esc == EscFuncUnknown {
   759				Fatalf("graph inconsistency")
   760			}
   761	
   762			if ks != nil {
   763				for i, result := range fntype.Results().FieldSlice() {
   764					e.expr(ks[i], asNode(result.Nname))
   765				}
   766			}
   767	
   768			if r := fntype.Recv(); r != nil {
   769				recvK = e.addr(asNode(r.Nname))
   770			}
   771			for _, param := range fntype.Params().FieldSlice() {
   772				paramKs = append(paramKs, e.addr(asNode(param.Nname)))
   773			}
   774		} else if call.Op == OCALLFUNC || call.Op == OCALLMETH || call.Op == OCALLINTER {
   775			// Dynamic call, or call to previously tagged
   776			// function. Setup flows to heap and/or ks according
   777			// to parameter tags.
   778			if r := fntype.Recv(); r != nil {
   779				recvK = e.tagHole(ks, r, static)
   780			}
   781			for _, param := range fntype.Params().FieldSlice() {
   782				paramKs = append(paramKs, e.tagHole(ks, param, static))
   783			}
   784		} else {
   785			// Handle escape analysis for builtins.
   786			// By default, we just discard everything.
   787			for range args {
   788				paramKs = append(paramKs, e.discardHole())
   789			}
   790	
   791			switch call.Op {
   792			case OAPPEND:
   793				// Appendee slice may flow directly to the
   794				// result, if it has enough capacity.
   795				// Alternatively, a new heap slice might be
   796				// allocated, and all slice elements might
   797				// flow to heap.
   798				paramKs[0] = e.teeHole(paramKs[0], ks[0])
   799				if types.Haspointers(args[0].Type.Elem()) {
   800					paramKs[0] = e.teeHole(paramKs[0], e.heapHole().deref(call, "appendee slice"))
   801				}
   802	
   803				if call.IsDDD() {
   804					if args[1].Type.IsSlice() && types.Haspointers(args[1].Type.Elem()) {
   805						paramKs[1] = e.teeHole(paramKs[1], e.heapHole().deref(call, "appended slice..."))
   806					}
   807				} else {
   808					for i := 1; i < len(args); i++ {
   809						paramKs[i] = e.heapHole()
   810					}
   811				}
   812	
   813			case OCOPY:
   814				if call.Right.Type.IsSlice() && types.Haspointers(call.Right.Type.Elem()) {
   815					paramKs[1] = e.teeHole(paramKs[1], e.heapHole().deref(call, "copied slice"))
   816				}
   817	
   818			case OPANIC:
   819				paramKs[0] = e.heapHole()
   820			}
   821		}
   822	
   823		if call.Op == OCALLFUNC {
   824			// Evaluate callee function expression.
   825			e.expr(e.augmentParamHole(e.discardHole(), where), call.Left)
   826		}
   827	
   828		if recv != nil {
   829			// TODO(mdempsky): Handle go:uintptrescapes here too?
   830			e.expr(e.augmentParamHole(recvK, where), recv)
   831		}
   832	
   833		// Apply augmentParamHole before ODDDARG so that it affects
   834		// the implicit slice allocation for variadic calls, if any.
   835		for i, paramK := range paramKs {
   836			paramKs[i] = e.augmentParamHole(paramK, where)
   837		}
   838	
   839		// TODO(mdempsky): Remove after early ddd-ification.
   840		if fntype != nil && fntype.IsVariadic() && !call.IsDDD() {
   841			vi := fntype.NumParams() - 1
   842	
   843			elt := fntype.Params().Field(vi).Type.Elem()
   844			nva := call.List.Len()
   845			nva -= vi
   846	
   847			// Introduce ODDDARG node to represent ... allocation.
   848			ddd := nodl(call.Pos, ODDDARG, nil, nil)
   849			ddd.Type = types.NewPtr(types.NewArray(elt, int64(nva)))
   850			call.Right = ddd
   851	
   852			dddK := e.spill(paramKs[vi], ddd)
   853			paramKs = paramKs[:vi]
   854			for i := 0; i < nva; i++ {
   855				paramKs = append(paramKs, dddK)
   856			}
   857		}
   858	
   859		for i, arg := range args {
   860			// For arguments to go:uintptrescapes, peel
   861			// away an unsafe.Pointer->uintptr conversion,
   862			// if present.
   863			if static && arg.Op == OCONVNOP && arg.Type.Etype == TUINTPTR && arg.Left.Type.Etype == TUNSAFEPTR {
   864				x := i
   865				if fntype.IsVariadic() && x >= fntype.NumParams() {
   866					x = fntype.NumParams() - 1
   867				}
   868				if fntype.Params().Field(x).Note == uintptrEscapesTag {
   869					arg = arg.Left
   870				}
   871			}
   872	
   873			// no augmentParamHole here; handled in loop before ODDDARG
   874			e.expr(paramKs[i], arg)
   875		}
   876	}
   877	
   878	// augmentParamHole augments parameter holes as necessary for use in
   879	// go/defer statements.
   880	func (e *Escape) augmentParamHole(k EscHole, where *Node) EscHole {
   881		if where == nil {
   882			return k
   883		}
   884	
   885		// Top level defers arguments don't escape to heap, but they
   886		// do need to last until end of function. Tee with a
   887		// non-transient location to avoid arguments from being
   888		// transiently allocated.
   889		if where.Op == ODEFER && e.loopDepth == 1 {
   890			where.Esc = EscNever // force stack allocation of defer record (see ssa.go)
   891			// TODO(mdempsky): Eliminate redundant EscLocation allocs.
   892			return e.teeHole(k, e.newLoc(nil, false).asHole())
   893		}
   894	
   895		return e.heapHole()
   896	}
   897	
   898	// tagHole returns a hole for evaluating an argument passed to param.
   899	// ks should contain the holes representing where the function
   900	// callee's results flows; static indicates whether this is a static
   901	// call.
   902	func (e *Escape) tagHole(ks []EscHole, param *types.Field, static bool) EscHole {
   903		// If this is a dynamic call, we can't rely on param.Note.
   904		if !static {
   905			return e.heapHole()
   906		}
   907	
   908		esc := parsetag(param.Note)
   909		switch esc {
   910		case EscHeap, EscUnknown:
   911			return e.heapHole()
   912		}
   913	
   914		var tagKs []EscHole
   915		if esc&EscContentEscapes != 0 {
   916			tagKs = append(tagKs, e.heapHole().shift(1))
   917		}
   918	
   919		if ks != nil {
   920			for i := 0; i < numEscReturns; i++ {
   921				if x := getEscReturn(esc, i); x >= 0 {
   922					tagKs = append(tagKs, ks[i].shift(x))
   923				}
   924			}
   925		}
   926	
   927		return e.teeHole(tagKs...)
   928	}
   929	
   930	// An EscHole represents a context for evaluation a Go
   931	// expression. E.g., when evaluating p in "x = **p", we'd have a hole
   932	// with dst==x and derefs==2.
   933	type EscHole struct {
   934		dst    *EscLocation
   935		derefs int // >= -1
   936	}
   937	
   938	func (k EscHole) note(where *Node, why string) EscHole {
   939		// TODO(mdempsky): Keep a record of where/why for diagnostics.
   940		return k
   941	}
   942	
   943	func (k EscHole) shift(delta int) EscHole {
   944		k.derefs += delta
   945		if k.derefs < -1 {
   946			Fatalf("derefs underflow: %v", k.derefs)
   947		}
   948		return k
   949	}
   950	
   951	func (k EscHole) deref(where *Node, why string) EscHole { return k.shift(1).note(where, why) }
   952	func (k EscHole) addr(where *Node, why string) EscHole  { return k.shift(-1).note(where, why) }
   953	
   954	func (k EscHole) dotType(t *types.Type, where *Node, why string) EscHole {
   955		if !t.IsInterface() && !isdirectiface(t) {
   956			k = k.shift(1)
   957		}
   958		return k.note(where, why)
   959	}
   960	
   961	// teeHole returns a new hole that flows into each hole of ks,
   962	// similar to the Unix tee(1) command.
   963	func (e *Escape) teeHole(ks ...EscHole) EscHole {
   964		if len(ks) == 0 {
   965			return e.discardHole()
   966		}
   967		if len(ks) == 1 {
   968			return ks[0]
   969		}
   970		// TODO(mdempsky): Optimize if there's only one non-discard hole?
   971	
   972		// Given holes "l1 = _", "l2 = **_", "l3 = *_", ..., create a
   973		// new temporary location ltmp, wire it into place, and return
   974		// a hole for "ltmp = _".
   975		loc := e.newLoc(nil, true)
   976		for _, k := range ks {
   977			// N.B., "p = &q" and "p = &tmp; tmp = q" are not
   978			// semantically equivalent. To combine holes like "l1
   979			// = _" and "l2 = &_", we'd need to wire them as "l1 =
   980			// *ltmp" and "l2 = ltmp" and return "ltmp = &_"
   981			// instead.
   982			if k.derefs < 0 {
   983				Fatalf("teeHole: negative derefs")
   984			}
   985	
   986			e.flow(k, loc)
   987		}
   988		return loc.asHole()
   989	}
   990	
   991	func (e *Escape) dcl(n *Node) EscHole {
   992		loc := e.oldLoc(n)
   993		loc.loopDepth = e.loopDepth
   994		return loc.asHole()
   995	}
   996	
   997	func (e *Escape) spill(k EscHole, n *Node) EscHole {
   998		// TODO(mdempsky): Optimize. E.g., if k is the heap or blank,
   999		// then we already know whether n leaks, and we can return a
  1000		// more optimized hole.
  1001		loc := e.newLoc(n, true)
  1002		e.flow(k.addr(n, "spill"), loc)
  1003		return loc.asHole()
  1004	}
  1005	
  1006	// canonicalNode returns the canonical *Node that n logically
  1007	// represents.
  1008	func canonicalNode(n *Node) *Node {
  1009		if n != nil && n.IsClosureVar() {
  1010			n = n.Name.Defn
  1011			if n.IsClosureVar() {
  1012				Fatalf("still closure var")
  1013			}
  1014		}
  1015	
  1016		return n
  1017	}
  1018	
  1019	func (e *Escape) newLoc(n *Node, transient bool) *EscLocation {
  1020		if e.curfn == nil {
  1021			Fatalf("e.curfn isn't set")
  1022		}
  1023	
  1024		n = canonicalNode(n)
  1025		loc := &EscLocation{
  1026			n:         n,
  1027			curfn:     e.curfn,
  1028			loopDepth: e.loopDepth,
  1029			transient: transient,
  1030		}
  1031		e.allLocs = append(e.allLocs, loc)
  1032		if n != nil {
  1033			if n.Op == ONAME && n.Name.Curfn != e.curfn {
  1034				Fatalf("curfn mismatch: %v != %v", n.Name.Curfn, e.curfn)
  1035			}
  1036	
  1037			if n.HasOpt() {
  1038				Fatalf("%v already has a location", n)
  1039			}
  1040			n.SetOpt(loc)
  1041	
  1042			// TODO(mdempsky): Perhaps set n.Esc and then just return &HeapLoc?
  1043			if mustHeapAlloc(n) && !loc.isName(PPARAM) && !loc.isName(PPARAMOUT) {
  1044				e.flow(e.heapHole().addr(nil, ""), loc)
  1045			}
  1046		}
  1047		return loc
  1048	}
  1049	
  1050	func (e *Escape) oldLoc(n *Node) *EscLocation {
  1051		n = canonicalNode(n)
  1052		return n.Opt().(*EscLocation)
  1053	}
  1054	
  1055	func (l *EscLocation) asHole() EscHole {
  1056		return EscHole{dst: l}
  1057	}
  1058	
  1059	func (e *Escape) flow(k EscHole, src *EscLocation) {
  1060		dst := k.dst
  1061		if dst == &e.blankLoc {
  1062			return
  1063		}
  1064		if dst == src && k.derefs >= 0 {
  1065			return
  1066		}
  1067		// TODO(mdempsky): More optimizations?
  1068	
  1069		// TODO(mdempsky): Deduplicate edges?
  1070		dst.edges = append(dst.edges, EscEdge{src: src, derefs: k.derefs})
  1071	}
  1072	
  1073	func (e *Escape) heapHole() EscHole    { return e.heapLoc.asHole() }
  1074	func (e *Escape) discardHole() EscHole { return e.blankLoc.asHole() }
  1075	
  1076	// walkAll computes the minimal dereferences between all pairs of
  1077	// locations.
  1078	func (e *Escape) walkAll() {
  1079		var walkgen uint32
  1080	
  1081		for _, loc := range e.allLocs {
  1082			walkgen++
  1083			e.walkOne(loc, walkgen)
  1084		}
  1085	
  1086		// Walk the heap last so that we catch any edges to the heap
  1087		// added during walkOne.
  1088		walkgen++
  1089		e.walkOne(&e.heapLoc, walkgen)
  1090	}
  1091	
  1092	// walkOne computes the minimal number of dereferences from root to
  1093	// all other locations.
  1094	func (e *Escape) walkOne(root *EscLocation, walkgen uint32) {
  1095		// The data flow graph has negative edges (from addressing
  1096		// operations), so we use the Bellman-Ford algorithm. However,
  1097		// we don't have to worry about infinite negative cycles since
  1098		// we bound intermediate dereference counts to 0.
  1099		root.walkgen = walkgen
  1100		root.derefs = 0
  1101	
  1102		todo := []*EscLocation{root}
  1103		for len(todo) > 0 {
  1104			l := todo[len(todo)-1]
  1105			todo = todo[:len(todo)-1]
  1106	
  1107			base := l.derefs
  1108	
  1109			// If l.derefs < 0, then l's address flows to root.
  1110			addressOf := base < 0
  1111			if addressOf {
  1112				// For a flow path like "root = &l; l = x",
  1113				// l's address flows to root, but x's does
  1114				// not. We recognize this by lower bounding
  1115				// base at 0.
  1116				base = 0
  1117	
  1118				// If l's address flows to a non-transient
  1119				// location, then l can't be transiently
  1120				// allocated.
  1121				if !root.transient {
  1122					l.transient = false
  1123					// TODO(mdempsky): Should we re-walk from l now?
  1124				}
  1125			}
  1126	
  1127			if e.outlives(root, l) {
  1128				// If l's address flows somewhere that
  1129				// outlives it, then l needs to be heap
  1130				// allocated.
  1131				if addressOf && !l.escapes {
  1132					l.escapes = true
  1133	
  1134					// If l is heap allocated, then any
  1135					// values stored into it flow to the
  1136					// heap too.
  1137					// TODO(mdempsky): Better way to handle this?
  1138					if root != &e.heapLoc {
  1139						e.flow(e.heapHole(), l)
  1140					}
  1141				}
  1142	
  1143				// l's value flows to root. If l is a function
  1144				// parameter and root is the heap or a
  1145				// corresponding result parameter, then record
  1146				// that value flow for tagging the function
  1147				// later.
  1148				if l.isName(PPARAM) {
  1149					l.leakTo(root, base)
  1150				}
  1151			}
  1152	
  1153			for _, edge := range l.edges {
  1154				derefs := base + edge.derefs
  1155				if edge.src.walkgen != walkgen || edge.src.derefs > derefs {
  1156					edge.src.walkgen = walkgen
  1157					edge.src.derefs = derefs
  1158					todo = append(todo, edge.src)
  1159				}
  1160			}
  1161		}
  1162	}
  1163	
  1164	// outlives reports whether values stored in l may survive beyond
  1165	// other's lifetime if stack allocated.
  1166	func (e *Escape) outlives(l, other *EscLocation) bool {
  1167		// The heap outlives everything.
  1168		if l == &e.heapLoc {
  1169			return true
  1170		}
  1171	
  1172		// We don't know what callers do with returned values, so
  1173		// pessimistically we need to assume they flow to the heap and
  1174		// outlive everything too.
  1175		if l.isName(PPARAMOUT) {
  1176			// Exception: Directly called closures can return
  1177			// locations allocated outside of them without forcing
  1178			// them to the heap. For example:
  1179			//
  1180			//    var u int  // okay to stack allocate
  1181			//    *(func() *int { return &u }()) = 42
  1182			if containsClosure(other.curfn, l.curfn) && l.curfn.Func.Closure.Func.Top&ctxCallee != 0 {
  1183				return false
  1184			}
  1185	
  1186			return true
  1187		}
  1188	
  1189		// If l and other are within the same function, then l
  1190		// outlives other if it was declared outside other's loop
  1191		// scope. For example:
  1192		//
  1193		//    var l *int
  1194		//    for {
  1195		//        l = new(int)
  1196		//    }
  1197		if l.curfn == other.curfn && l.loopDepth < other.loopDepth {
  1198			return true
  1199		}
  1200	
  1201		// If other is declared within a child closure of where l is
  1202		// declared, then l outlives it. For example:
  1203		//
  1204		//    var l *int
  1205		//    func() {
  1206		//        l = new(int)
  1207		//    }
  1208		if containsClosure(l.curfn, other.curfn) {
  1209			return true
  1210		}
  1211	
  1212		return false
  1213	}
  1214	
  1215	// containsClosure reports whether c is a closure contained within f.
  1216	func containsClosure(f, c *Node) bool {
  1217		if f.Op != ODCLFUNC || c.Op != ODCLFUNC {
  1218			Fatalf("bad containsClosure: %v, %v", f, c)
  1219		}
  1220	
  1221		// Common case.
  1222		if f == c {
  1223			return false
  1224		}
  1225	
  1226		// Closures within function Foo are named like "Foo.funcN..."
  1227		// TODO(mdempsky): Better way to recognize this.
  1228		fn := f.Func.Nname.Sym.Name
  1229		cn := c.Func.Nname.Sym.Name
  1230		return len(cn) > len(fn) && cn[:len(fn)] == fn && cn[len(fn)] == '.'
  1231	}
  1232	
  1233	// leak records that parameter l leaks to sink.
  1234	func (l *EscLocation) leakTo(sink *EscLocation, derefs int) {
  1235		// Short circuit if l already leaks to heap.
  1236		if l.paramEsc == EscHeap {
  1237			return
  1238		}
  1239	
  1240		// If sink is a result parameter and we can fit return bits
  1241		// into the escape analysis tag, then record a return leak.
  1242		if sink.isName(PPARAMOUT) && sink.curfn == l.curfn {
  1243			// TODO(mdempsky): Eliminate dependency on Vargen here.
  1244			ri := int(sink.n.Name.Vargen) - 1
  1245			if ri < numEscReturns {
  1246				// Leak to result parameter.
  1247				if old := getEscReturn(l.paramEsc, ri); old < 0 || derefs < old {
  1248					l.paramEsc = setEscReturn(l.paramEsc, ri, derefs)
  1249				}
  1250				return
  1251			}
  1252		}
  1253	
  1254		// Otherwise, record as heap leak.
  1255		if derefs > 0 {
  1256			l.paramEsc |= EscContentEscapes
  1257		} else {
  1258			l.paramEsc = EscHeap
  1259		}
  1260	}
  1261	
  1262	func (e *Escape) finish() {
  1263		for _, loc := range e.allLocs {
  1264			n := loc.n
  1265			if n == nil {
  1266				continue
  1267			}
  1268			n.SetOpt(nil)
  1269	
  1270			// Update n.Esc based on escape analysis results.
  1271			//
  1272			// TODO(mdempsky): Simplify once compatibility with
  1273			// esc.go is no longer necessary.
  1274			//
  1275			// TODO(mdempsky): Describe path when Debug['m'] >= 2.
  1276	
  1277			if loc.escapes {
  1278				if Debug['m'] != 0 && n.Op != ONAME {
  1279					Warnl(n.Pos, "%S escapes to heap", n)
  1280				}
  1281				n.Esc = EscHeap
  1282				addrescapes(n)
  1283			} else if loc.isName(PPARAM) {
  1284				n.Esc = finalizeEsc(loc.paramEsc)
  1285	
  1286				if Debug['m'] != 0 && types.Haspointers(n.Type) {
  1287					if n.Esc == EscNone {
  1288						Warnl(n.Pos, "%S %S does not escape", funcSym(loc.curfn), n)
  1289					} else if n.Esc == EscHeap {
  1290						Warnl(n.Pos, "leaking param: %S", n)
  1291					} else {
  1292						if n.Esc&EscContentEscapes != 0 {
  1293							Warnl(n.Pos, "leaking param content: %S", n)
  1294						}
  1295						for i := 0; i < numEscReturns; i++ {
  1296							if x := getEscReturn(n.Esc, i); x >= 0 {
  1297								res := n.Name.Curfn.Type.Results().Field(i).Sym
  1298								Warnl(n.Pos, "leaking param: %S to result %v level=%d", n, res, x)
  1299							}
  1300						}
  1301					}
  1302				}
  1303			} else {
  1304				n.Esc = EscNone
  1305				if loc.transient {
  1306					switch n.Op {
  1307					case OCALLPART, OCLOSURE, ODDDARG, OARRAYLIT, OSLICELIT, OPTRLIT, OSTRUCTLIT:
  1308						n.SetNoescape(true)
  1309					}
  1310				}
  1311	
  1312				if Debug['m'] != 0 && n.Op != ONAME && n.Op != OTYPESW && n.Op != ORANGE && n.Op != ODEFER {
  1313					Warnl(n.Pos, "%S %S does not escape", funcSym(loc.curfn), n)
  1314				}
  1315			}
  1316		}
  1317	}
  1318	
  1319	func (l *EscLocation) isName(c Class) bool {
  1320		return l.n != nil && l.n.Op == ONAME && l.n.Class() == c
  1321	}
  1322	
  1323	func finalizeEsc(esc uint16) uint16 {
  1324		esc = optimizeReturns(esc)
  1325	
  1326		if esc>>EscReturnBits != 0 {
  1327			esc |= EscReturn
  1328		} else if esc&EscMask == 0 {
  1329			esc |= EscNone
  1330		}
  1331	
  1332		return esc
  1333	}
  1334	
  1335	func optimizeReturns(esc uint16) uint16 {
  1336		if esc&EscContentEscapes != 0 {
  1337			// EscContentEscapes represents a path of length 1
  1338			// from the heap. No point in keeping paths of equal
  1339			// or longer length to result parameters.
  1340			for i := 0; i < numEscReturns; i++ {
  1341				if x := getEscReturn(esc, i); x >= 1 {
  1342					esc = setEscReturn(esc, i, -1)
  1343				}
  1344			}
  1345		}
  1346		return esc
  1347	}
  1348	
  1349	// Parameter tags.
  1350	//
  1351	// The escape bits saved for each analyzed parameter record the
  1352	// minimal derefs (if any) from that parameter to the heap, or to any
  1353	// of its function's (first numEscReturns) result parameters.
  1354	//
  1355	// Paths to the heap are encoded via EscHeap (length 0) or
  1356	// EscContentEscapes (length 1); if neither of these are set, then
  1357	// there's no path to the heap.
  1358	//
  1359	// Paths to the result parameters are encoded in the upper
  1360	// bits.
  1361	//
  1362	// There are other values stored in the escape bits by esc.go for
  1363	// vestigial reasons, and other special tag values used (e.g.,
  1364	// uintptrEscapesTag and unsafeUintptrTag). These could be simplified
  1365	// once compatibility with esc.go is no longer a concern.
  1366	
  1367	const numEscReturns = (16 - EscReturnBits) / bitsPerOutputInTag
  1368	
  1369	func getEscReturn(esc uint16, i int) int {
  1370		return int((esc>>escReturnShift(i))&bitsMaskForTag) - 1
  1371	}
  1372	
  1373	func setEscReturn(esc uint16, i, v int) uint16 {
  1374		if v < -1 {
  1375			Fatalf("invalid esc return value: %v", v)
  1376		}
  1377		if v > maxEncodedLevel {
  1378			v = maxEncodedLevel
  1379		}
  1380	
  1381		shift := escReturnShift(i)
  1382		esc &^= bitsMaskForTag << shift
  1383		esc |= uint16(v+1) << shift
  1384		return esc
  1385	}
  1386	
  1387	func escReturnShift(i int) uint {
  1388		if uint(i) >= numEscReturns {
  1389			Fatalf("esc return index out of bounds: %v", i)
  1390		}
  1391		return uint(EscReturnBits + i*bitsPerOutputInTag)
  1392	}
  1393	

View as plain text