...

Source file src/pkg/cmd/compile/internal/gc/ssa.go

     1	// Copyright 2015 The Go Authors. All rights reserved.
     2	// Use of this source code is governed by a BSD-style
     3	// license that can be found in the LICENSE file.
     4	
     5	package gc
     6	
     7	import (
     8		"bufio"
     9		"bytes"
    10		"encoding/binary"
    11		"fmt"
    12		"html"
    13		"os"
    14		"sort"
    15	
    16		"cmd/compile/internal/ssa"
    17		"cmd/compile/internal/types"
    18		"cmd/internal/obj"
    19		"cmd/internal/objabi"
    20		"cmd/internal/src"
    21		"cmd/internal/sys"
    22	)
    23	
    24	var ssaConfig *ssa.Config
    25	var ssaCaches []ssa.Cache
    26	
    27	var ssaDump string     // early copy of $GOSSAFUNC; the func name to dump output for
    28	var ssaDumpStdout bool // whether to dump to stdout
    29	var ssaDumpCFG string  // generate CFGs for these phases
    30	const ssaDumpFile = "ssa.html"
    31	
    32	// ssaDumpInlined holds all inlined functions when ssaDump contains a function name.
    33	var ssaDumpInlined []*Node
    34	
    35	func initssaconfig() {
    36		types_ := ssa.NewTypes()
    37	
    38		if thearch.SoftFloat {
    39			softfloatInit()
    40		}
    41	
    42		// Generate a few pointer types that are uncommon in the frontend but common in the backend.
    43		// Caching is disabled in the backend, so generating these here avoids allocations.
    44		_ = types.NewPtr(types.Types[TINTER])                             // *interface{}
    45		_ = types.NewPtr(types.NewPtr(types.Types[TSTRING]))              // **string
    46		_ = types.NewPtr(types.NewPtr(types.Idealstring))                 // **string
    47		_ = types.NewPtr(types.NewSlice(types.Types[TINTER]))             // *[]interface{}
    48		_ = types.NewPtr(types.NewPtr(types.Bytetype))                    // **byte
    49		_ = types.NewPtr(types.NewSlice(types.Bytetype))                  // *[]byte
    50		_ = types.NewPtr(types.NewSlice(types.Types[TSTRING]))            // *[]string
    51		_ = types.NewPtr(types.NewSlice(types.Idealstring))               // *[]string
    52		_ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[TUINT8]))) // ***uint8
    53		_ = types.NewPtr(types.Types[TINT16])                             // *int16
    54		_ = types.NewPtr(types.Types[TINT64])                             // *int64
    55		_ = types.NewPtr(types.Errortype)                                 // *error
    56		types.NewPtrCacheEnabled = false
    57		ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, *types_, Ctxt, Debug['N'] == 0)
    58		if thearch.LinkArch.Name == "386" {
    59			ssaConfig.Set387(thearch.Use387)
    60		}
    61		ssaConfig.SoftFloat = thearch.SoftFloat
    62		ssaConfig.Race = flag_race
    63		ssaCaches = make([]ssa.Cache, nBackendWorkers)
    64	
    65		// Set up some runtime functions we'll need to call.
    66		assertE2I = sysfunc("assertE2I")
    67		assertE2I2 = sysfunc("assertE2I2")
    68		assertI2I = sysfunc("assertI2I")
    69		assertI2I2 = sysfunc("assertI2I2")
    70		deferproc = sysfunc("deferproc")
    71		deferprocStack = sysfunc("deferprocStack")
    72		Deferreturn = sysfunc("deferreturn")
    73		Duffcopy = sysvar("duffcopy")             // asm func with special ABI
    74		Duffzero = sysvar("duffzero")             // asm func with special ABI
    75		gcWriteBarrier = sysvar("gcWriteBarrier") // asm func with special ABI
    76		goschedguarded = sysfunc("goschedguarded")
    77		growslice = sysfunc("growslice")
    78		msanread = sysfunc("msanread")
    79		msanwrite = sysfunc("msanwrite")
    80		newobject = sysfunc("newobject")
    81		newproc = sysfunc("newproc")
    82		panicdivide = sysfunc("panicdivide")
    83		panicdottypeE = sysfunc("panicdottypeE")
    84		panicdottypeI = sysfunc("panicdottypeI")
    85		panicnildottype = sysfunc("panicnildottype")
    86		panicoverflow = sysfunc("panicoverflow")
    87		panicshift = sysfunc("panicshift")
    88		raceread = sysfunc("raceread")
    89		racereadrange = sysfunc("racereadrange")
    90		racewrite = sysfunc("racewrite")
    91		racewriterange = sysfunc("racewriterange")
    92		x86HasPOPCNT = sysvar("x86HasPOPCNT")       // bool
    93		x86HasSSE41 = sysvar("x86HasSSE41")         // bool
    94		arm64HasATOMICS = sysvar("arm64HasATOMICS") // bool
    95		typedmemclr = sysfunc("typedmemclr")
    96		typedmemmove = sysfunc("typedmemmove")
    97		Udiv = sysvar("udiv")                 // asm func with special ABI
    98		writeBarrier = sysvar("writeBarrier") // struct { bool; ... }
    99		zerobaseSym = sysvar("zerobase")
   100	
   101		if thearch.LinkArch.Family == sys.Wasm {
   102			BoundsCheckFunc[ssa.BoundsIndex] = sysvar("goPanicIndex")
   103			BoundsCheckFunc[ssa.BoundsIndexU] = sysvar("goPanicIndexU")
   104			BoundsCheckFunc[ssa.BoundsSliceAlen] = sysvar("goPanicSliceAlen")
   105			BoundsCheckFunc[ssa.BoundsSliceAlenU] = sysvar("goPanicSliceAlenU")
   106			BoundsCheckFunc[ssa.BoundsSliceAcap] = sysvar("goPanicSliceAcap")
   107			BoundsCheckFunc[ssa.BoundsSliceAcapU] = sysvar("goPanicSliceAcapU")
   108			BoundsCheckFunc[ssa.BoundsSliceB] = sysvar("goPanicSliceB")
   109			BoundsCheckFunc[ssa.BoundsSliceBU] = sysvar("goPanicSliceBU")
   110			BoundsCheckFunc[ssa.BoundsSlice3Alen] = sysvar("goPanicSlice3Alen")
   111			BoundsCheckFunc[ssa.BoundsSlice3AlenU] = sysvar("goPanicSlice3AlenU")
   112			BoundsCheckFunc[ssa.BoundsSlice3Acap] = sysvar("goPanicSlice3Acap")
   113			BoundsCheckFunc[ssa.BoundsSlice3AcapU] = sysvar("goPanicSlice3AcapU")
   114			BoundsCheckFunc[ssa.BoundsSlice3B] = sysvar("goPanicSlice3B")
   115			BoundsCheckFunc[ssa.BoundsSlice3BU] = sysvar("goPanicSlice3BU")
   116			BoundsCheckFunc[ssa.BoundsSlice3C] = sysvar("goPanicSlice3C")
   117			BoundsCheckFunc[ssa.BoundsSlice3CU] = sysvar("goPanicSlice3CU")
   118		} else {
   119			BoundsCheckFunc[ssa.BoundsIndex] = sysvar("panicIndex")
   120			BoundsCheckFunc[ssa.BoundsIndexU] = sysvar("panicIndexU")
   121			BoundsCheckFunc[ssa.BoundsSliceAlen] = sysvar("panicSliceAlen")
   122			BoundsCheckFunc[ssa.BoundsSliceAlenU] = sysvar("panicSliceAlenU")
   123			BoundsCheckFunc[ssa.BoundsSliceAcap] = sysvar("panicSliceAcap")
   124			BoundsCheckFunc[ssa.BoundsSliceAcapU] = sysvar("panicSliceAcapU")
   125			BoundsCheckFunc[ssa.BoundsSliceB] = sysvar("panicSliceB")
   126			BoundsCheckFunc[ssa.BoundsSliceBU] = sysvar("panicSliceBU")
   127			BoundsCheckFunc[ssa.BoundsSlice3Alen] = sysvar("panicSlice3Alen")
   128			BoundsCheckFunc[ssa.BoundsSlice3AlenU] = sysvar("panicSlice3AlenU")
   129			BoundsCheckFunc[ssa.BoundsSlice3Acap] = sysvar("panicSlice3Acap")
   130			BoundsCheckFunc[ssa.BoundsSlice3AcapU] = sysvar("panicSlice3AcapU")
   131			BoundsCheckFunc[ssa.BoundsSlice3B] = sysvar("panicSlice3B")
   132			BoundsCheckFunc[ssa.BoundsSlice3BU] = sysvar("panicSlice3BU")
   133			BoundsCheckFunc[ssa.BoundsSlice3C] = sysvar("panicSlice3C")
   134			BoundsCheckFunc[ssa.BoundsSlice3CU] = sysvar("panicSlice3CU")
   135		}
   136		if thearch.LinkArch.PtrSize == 4 {
   137			ExtendCheckFunc[ssa.BoundsIndex] = sysvar("panicExtendIndex")
   138			ExtendCheckFunc[ssa.BoundsIndexU] = sysvar("panicExtendIndexU")
   139			ExtendCheckFunc[ssa.BoundsSliceAlen] = sysvar("panicExtendSliceAlen")
   140			ExtendCheckFunc[ssa.BoundsSliceAlenU] = sysvar("panicExtendSliceAlenU")
   141			ExtendCheckFunc[ssa.BoundsSliceAcap] = sysvar("panicExtendSliceAcap")
   142			ExtendCheckFunc[ssa.BoundsSliceAcapU] = sysvar("panicExtendSliceAcapU")
   143			ExtendCheckFunc[ssa.BoundsSliceB] = sysvar("panicExtendSliceB")
   144			ExtendCheckFunc[ssa.BoundsSliceBU] = sysvar("panicExtendSliceBU")
   145			ExtendCheckFunc[ssa.BoundsSlice3Alen] = sysvar("panicExtendSlice3Alen")
   146			ExtendCheckFunc[ssa.BoundsSlice3AlenU] = sysvar("panicExtendSlice3AlenU")
   147			ExtendCheckFunc[ssa.BoundsSlice3Acap] = sysvar("panicExtendSlice3Acap")
   148			ExtendCheckFunc[ssa.BoundsSlice3AcapU] = sysvar("panicExtendSlice3AcapU")
   149			ExtendCheckFunc[ssa.BoundsSlice3B] = sysvar("panicExtendSlice3B")
   150			ExtendCheckFunc[ssa.BoundsSlice3BU] = sysvar("panicExtendSlice3BU")
   151			ExtendCheckFunc[ssa.BoundsSlice3C] = sysvar("panicExtendSlice3C")
   152			ExtendCheckFunc[ssa.BoundsSlice3CU] = sysvar("panicExtendSlice3CU")
   153		}
   154	
   155		// GO386=387 runtime definitions
   156		ControlWord64trunc = sysvar("controlWord64trunc") // uint16
   157		ControlWord32 = sysvar("controlWord32")           // uint16
   158	
   159		// Wasm (all asm funcs with special ABIs)
   160		WasmMove = sysvar("wasmMove")
   161		WasmZero = sysvar("wasmZero")
   162		WasmDiv = sysvar("wasmDiv")
   163		WasmTruncS = sysvar("wasmTruncS")
   164		WasmTruncU = sysvar("wasmTruncU")
   165		SigPanic = sysfunc("sigpanic")
   166	}
   167	
   168	// buildssa builds an SSA function for fn.
   169	// worker indicates which of the backend workers is doing the processing.
   170	func buildssa(fn *Node, worker int) *ssa.Func {
   171		name := fn.funcname()
   172		printssa := name == ssaDump
   173		var astBuf *bytes.Buffer
   174		if printssa {
   175			astBuf = &bytes.Buffer{}
   176			fdumplist(astBuf, "buildssa-enter", fn.Func.Enter)
   177			fdumplist(astBuf, "buildssa-body", fn.Nbody)
   178			fdumplist(astBuf, "buildssa-exit", fn.Func.Exit)
   179			if ssaDumpStdout {
   180				fmt.Println("generating SSA for", name)
   181				fmt.Print(astBuf.String())
   182			}
   183		}
   184	
   185		var s state
   186		s.pushLine(fn.Pos)
   187		defer s.popLine()
   188	
   189		s.hasdefer = fn.Func.HasDefer()
   190		if fn.Func.Pragma&CgoUnsafeArgs != 0 {
   191			s.cgoUnsafeArgs = true
   192		}
   193	
   194		fe := ssafn{
   195			curfn: fn,
   196			log:   printssa && ssaDumpStdout,
   197		}
   198		s.curfn = fn
   199	
   200		s.f = ssa.NewFunc(&fe)
   201		s.config = ssaConfig
   202		s.f.Type = fn.Type
   203		s.f.Config = ssaConfig
   204		s.f.Cache = &ssaCaches[worker]
   205		s.f.Cache.Reset()
   206		s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH", name)
   207		s.f.Name = name
   208		s.f.PrintOrHtmlSSA = printssa
   209		if fn.Func.Pragma&Nosplit != 0 {
   210			s.f.NoSplit = true
   211		}
   212		s.panics = map[funcLine]*ssa.Block{}
   213		s.softFloat = s.config.SoftFloat
   214	
   215		if printssa {
   216			s.f.HTMLWriter = ssa.NewHTMLWriter(ssaDumpFile, s.f.Frontend(), name, ssaDumpCFG)
   217			// TODO: generate and print a mapping from nodes to values and blocks
   218			dumpSourcesColumn(s.f.HTMLWriter, fn)
   219			s.f.HTMLWriter.WriteAST("AST", astBuf)
   220		}
   221	
   222		// Allocate starting block
   223		s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
   224	
   225		// Allocate starting values
   226		s.labels = map[string]*ssaLabel{}
   227		s.labeledNodes = map[*Node]*ssaLabel{}
   228		s.fwdVars = map[*Node]*ssa.Value{}
   229		s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem)
   230		s.sp = s.entryNewValue0(ssa.OpSP, types.Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
   231		s.sb = s.entryNewValue0(ssa.OpSB, types.Types[TUINTPTR])
   232	
   233		s.startBlock(s.f.Entry)
   234		s.vars[&memVar] = s.startmem
   235	
   236		// Generate addresses of local declarations
   237		s.decladdrs = map[*Node]*ssa.Value{}
   238		for _, n := range fn.Func.Dcl {
   239			switch n.Class() {
   240			case PPARAM, PPARAMOUT:
   241				s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type), n, s.sp, s.startmem)
   242				if n.Class() == PPARAMOUT && s.canSSA(n) {
   243					// Save ssa-able PPARAMOUT variables so we can
   244					// store them back to the stack at the end of
   245					// the function.
   246					s.returns = append(s.returns, n)
   247				}
   248			case PAUTO:
   249				// processed at each use, to prevent Addr coming
   250				// before the decl.
   251			case PAUTOHEAP:
   252				// moved to heap - already handled by frontend
   253			case PFUNC:
   254				// local function - already handled by frontend
   255			default:
   256				s.Fatalf("local variable with class %v unimplemented", n.Class())
   257			}
   258		}
   259	
   260		// Populate SSAable arguments.
   261		for _, n := range fn.Func.Dcl {
   262			if n.Class() == PPARAM && s.canSSA(n) {
   263				v := s.newValue0A(ssa.OpArg, n.Type, n)
   264				s.vars[n] = v
   265				s.addNamedValue(n, v) // This helps with debugging information, not needed for compilation itself.
   266			}
   267		}
   268	
   269		// Convert the AST-based IR to the SSA-based IR
   270		s.stmtList(fn.Func.Enter)
   271		s.stmtList(fn.Nbody)
   272	
   273		// fallthrough to exit
   274		if s.curBlock != nil {
   275			s.pushLine(fn.Func.Endlineno)
   276			s.exit()
   277			s.popLine()
   278		}
   279	
   280		for _, b := range s.f.Blocks {
   281			if b.Pos != src.NoXPos {
   282				s.updateUnsetPredPos(b)
   283			}
   284		}
   285	
   286		s.insertPhis()
   287	
   288		// Main call to ssa package to compile function
   289		ssa.Compile(s.f)
   290		return s.f
   291	}
   292	
   293	func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *Node) {
   294		// Read sources of target function fn.
   295		fname := Ctxt.PosTable.Pos(fn.Pos).Filename()
   296		targetFn, err := readFuncLines(fname, fn.Pos.Line(), fn.Func.Endlineno.Line())
   297		if err != nil {
   298			writer.Logger.Logf("cannot read sources for function %v: %v", fn, err)
   299		}
   300	
   301		// Read sources of inlined functions.
   302		var inlFns []*ssa.FuncLines
   303		for _, fi := range ssaDumpInlined {
   304			var elno src.XPos
   305			if fi.Name.Defn == nil {
   306				// Endlineno is filled from exported data.
   307				elno = fi.Func.Endlineno
   308			} else {
   309				elno = fi.Name.Defn.Func.Endlineno
   310			}
   311			fname := Ctxt.PosTable.Pos(fi.Pos).Filename()
   312			fnLines, err := readFuncLines(fname, fi.Pos.Line(), elno.Line())
   313			if err != nil {
   314				writer.Logger.Logf("cannot read sources for function %v: %v", fi, err)
   315				continue
   316			}
   317			inlFns = append(inlFns, fnLines)
   318		}
   319	
   320		sort.Sort(ssa.ByTopo(inlFns))
   321		if targetFn != nil {
   322			inlFns = append([]*ssa.FuncLines{targetFn}, inlFns...)
   323		}
   324	
   325		writer.WriteSources("sources", inlFns)
   326	}
   327	
   328	func readFuncLines(file string, start, end uint) (*ssa.FuncLines, error) {
   329		f, err := os.Open(os.ExpandEnv(file))
   330		if err != nil {
   331			return nil, err
   332		}
   333		defer f.Close()
   334		var lines []string
   335		ln := uint(1)
   336		scanner := bufio.NewScanner(f)
   337		for scanner.Scan() && ln <= end {
   338			if ln >= start {
   339				lines = append(lines, scanner.Text())
   340			}
   341			ln++
   342		}
   343		return &ssa.FuncLines{Filename: file, StartLineno: start, Lines: lines}, nil
   344	}
   345	
   346	// updateUnsetPredPos propagates the earliest-value position information for b
   347	// towards all of b's predecessors that need a position, and recurs on that
   348	// predecessor if its position is updated. B should have a non-empty position.
   349	func (s *state) updateUnsetPredPos(b *ssa.Block) {
   350		if b.Pos == src.NoXPos {
   351			s.Fatalf("Block %s should have a position", b)
   352		}
   353		bestPos := src.NoXPos
   354		for _, e := range b.Preds {
   355			p := e.Block()
   356			if !p.LackingPos() {
   357				continue
   358			}
   359			if bestPos == src.NoXPos {
   360				bestPos = b.Pos
   361				for _, v := range b.Values {
   362					if v.LackingPos() {
   363						continue
   364					}
   365					if v.Pos != src.NoXPos {
   366						// Assume values are still in roughly textual order;
   367						// TODO: could also seek minimum position?
   368						bestPos = v.Pos
   369						break
   370					}
   371				}
   372			}
   373			p.Pos = bestPos
   374			s.updateUnsetPredPos(p) // We do not expect long chains of these, thus recursion is okay.
   375		}
   376	}
   377	
   378	type state struct {
   379		// configuration (arch) information
   380		config *ssa.Config
   381	
   382		// function we're building
   383		f *ssa.Func
   384	
   385		// Node for function
   386		curfn *Node
   387	
   388		// labels and labeled control flow nodes (OFOR, OFORUNTIL, OSWITCH, OSELECT) in f
   389		labels       map[string]*ssaLabel
   390		labeledNodes map[*Node]*ssaLabel
   391	
   392		// unlabeled break and continue statement tracking
   393		breakTo    *ssa.Block // current target for plain break statement
   394		continueTo *ssa.Block // current target for plain continue statement
   395	
   396		// current location where we're interpreting the AST
   397		curBlock *ssa.Block
   398	
   399		// variable assignments in the current block (map from variable symbol to ssa value)
   400		// *Node is the unique identifier (an ONAME Node) for the variable.
   401		// TODO: keep a single varnum map, then make all of these maps slices instead?
   402		vars map[*Node]*ssa.Value
   403	
   404		// fwdVars are variables that are used before they are defined in the current block.
   405		// This map exists just to coalesce multiple references into a single FwdRef op.
   406		// *Node is the unique identifier (an ONAME Node) for the variable.
   407		fwdVars map[*Node]*ssa.Value
   408	
   409		// all defined variables at the end of each block. Indexed by block ID.
   410		defvars []map[*Node]*ssa.Value
   411	
   412		// addresses of PPARAM and PPARAMOUT variables.
   413		decladdrs map[*Node]*ssa.Value
   414	
   415		// starting values. Memory, stack pointer, and globals pointer
   416		startmem *ssa.Value
   417		sp       *ssa.Value
   418		sb       *ssa.Value
   419	
   420		// line number stack. The current line number is top of stack
   421		line []src.XPos
   422		// the last line number processed; it may have been popped
   423		lastPos src.XPos
   424	
   425		// list of panic calls by function name and line number.
   426		// Used to deduplicate panic calls.
   427		panics map[funcLine]*ssa.Block
   428	
   429		// list of PPARAMOUT (return) variables.
   430		returns []*Node
   431	
   432		cgoUnsafeArgs bool
   433		hasdefer      bool // whether the function contains a defer statement
   434		softFloat     bool
   435	}
   436	
   437	type funcLine struct {
   438		f    *obj.LSym
   439		base *src.PosBase
   440		line uint
   441	}
   442	
   443	type ssaLabel struct {
   444		target         *ssa.Block // block identified by this label
   445		breakTarget    *ssa.Block // block to break to in control flow node identified by this label
   446		continueTarget *ssa.Block // block to continue to in control flow node identified by this label
   447	}
   448	
   449	// label returns the label associated with sym, creating it if necessary.
   450	func (s *state) label(sym *types.Sym) *ssaLabel {
   451		lab := s.labels[sym.Name]
   452		if lab == nil {
   453			lab = new(ssaLabel)
   454			s.labels[sym.Name] = lab
   455		}
   456		return lab
   457	}
   458	
   459	func (s *state) Logf(msg string, args ...interface{}) { s.f.Logf(msg, args...) }
   460	func (s *state) Log() bool                            { return s.f.Log() }
   461	func (s *state) Fatalf(msg string, args ...interface{}) {
   462		s.f.Frontend().Fatalf(s.peekPos(), msg, args...)
   463	}
   464	func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) }
   465	func (s *state) Debug_checknil() bool                                { return s.f.Frontend().Debug_checknil() }
   466	
   467	var (
   468		// dummy node for the memory variable
   469		memVar = Node{Op: ONAME, Sym: &types.Sym{Name: "mem"}}
   470	
   471		// dummy nodes for temporary variables
   472		ptrVar    = Node{Op: ONAME, Sym: &types.Sym{Name: "ptr"}}
   473		lenVar    = Node{Op: ONAME, Sym: &types.Sym{Name: "len"}}
   474		newlenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "newlen"}}
   475		capVar    = Node{Op: ONAME, Sym: &types.Sym{Name: "cap"}}
   476		typVar    = Node{Op: ONAME, Sym: &types.Sym{Name: "typ"}}
   477		okVar     = Node{Op: ONAME, Sym: &types.Sym{Name: "ok"}}
   478	)
   479	
   480	// startBlock sets the current block we're generating code in to b.
   481	func (s *state) startBlock(b *ssa.Block) {
   482		if s.curBlock != nil {
   483			s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock)
   484		}
   485		s.curBlock = b
   486		s.vars = map[*Node]*ssa.Value{}
   487		for n := range s.fwdVars {
   488			delete(s.fwdVars, n)
   489		}
   490	}
   491	
   492	// endBlock marks the end of generating code for the current block.
   493	// Returns the (former) current block. Returns nil if there is no current
   494	// block, i.e. if no code flows to the current execution point.
   495	func (s *state) endBlock() *ssa.Block {
   496		b := s.curBlock
   497		if b == nil {
   498			return nil
   499		}
   500		for len(s.defvars) <= int(b.ID) {
   501			s.defvars = append(s.defvars, nil)
   502		}
   503		s.defvars[b.ID] = s.vars
   504		s.curBlock = nil
   505		s.vars = nil
   506		if b.LackingPos() {
   507			// Empty plain blocks get the line of their successor (handled after all blocks created),
   508			// except for increment blocks in For statements (handled in ssa conversion of OFOR),
   509			// and for blocks ending in GOTO/BREAK/CONTINUE.
   510			b.Pos = src.NoXPos
   511		} else {
   512			b.Pos = s.lastPos
   513		}
   514		return b
   515	}
   516	
   517	// pushLine pushes a line number on the line number stack.
   518	func (s *state) pushLine(line src.XPos) {
   519		if !line.IsKnown() {
   520			// the frontend may emit node with line number missing,
   521			// use the parent line number in this case.
   522			line = s.peekPos()
   523			if Debug['K'] != 0 {
   524				Warn("buildssa: unknown position (line 0)")
   525			}
   526		} else {
   527			s.lastPos = line
   528		}
   529	
   530		s.line = append(s.line, line)
   531	}
   532	
   533	// popLine pops the top of the line number stack.
   534	func (s *state) popLine() {
   535		s.line = s.line[:len(s.line)-1]
   536	}
   537	
   538	// peekPos peeks the top of the line number stack.
   539	func (s *state) peekPos() src.XPos {
   540		return s.line[len(s.line)-1]
   541	}
   542	
   543	// newValue0 adds a new value with no arguments to the current block.
   544	func (s *state) newValue0(op ssa.Op, t *types.Type) *ssa.Value {
   545		return s.curBlock.NewValue0(s.peekPos(), op, t)
   546	}
   547	
   548	// newValue0A adds a new value with no arguments and an aux value to the current block.
   549	func (s *state) newValue0A(op ssa.Op, t *types.Type, aux interface{}) *ssa.Value {
   550		return s.curBlock.NewValue0A(s.peekPos(), op, t, aux)
   551	}
   552	
   553	// newValue0I adds a new value with no arguments and an auxint value to the current block.
   554	func (s *state) newValue0I(op ssa.Op, t *types.Type, auxint int64) *ssa.Value {
   555		return s.curBlock.NewValue0I(s.peekPos(), op, t, auxint)
   556	}
   557	
   558	// newValue1 adds a new value with one argument to the current block.
   559	func (s *state) newValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
   560		return s.curBlock.NewValue1(s.peekPos(), op, t, arg)
   561	}
   562	
   563	// newValue1A adds a new value with one argument and an aux value to the current block.
   564	func (s *state) newValue1A(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
   565		return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
   566	}
   567	
   568	// newValue1Apos adds a new value with one argument and an aux value to the current block.
   569	// isStmt determines whether the created values may be a statement or not
   570	// (i.e., false means never, yes means maybe).
   571	func (s *state) newValue1Apos(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value, isStmt bool) *ssa.Value {
   572		if isStmt {
   573			return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
   574		}
   575		return s.curBlock.NewValue1A(s.peekPos().WithNotStmt(), op, t, aux, arg)
   576	}
   577	
   578	// newValue1I adds a new value with one argument and an auxint value to the current block.
   579	func (s *state) newValue1I(op ssa.Op, t *types.Type, aux int64, arg *ssa.Value) *ssa.Value {
   580		return s.curBlock.NewValue1I(s.peekPos(), op, t, aux, arg)
   581	}
   582	
   583	// newValue2 adds a new value with two arguments to the current block.
   584	func (s *state) newValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
   585		return s.curBlock.NewValue2(s.peekPos(), op, t, arg0, arg1)
   586	}
   587	
   588	// newValue2Apos adds a new value with two arguments and an aux value to the current block.
   589	// isStmt determines whether the created values may be a statement or not
   590	// (i.e., false means never, yes means maybe).
   591	func (s *state) newValue2Apos(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1 *ssa.Value, isStmt bool) *ssa.Value {
   592		if isStmt {
   593			return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1)
   594		}
   595		return s.curBlock.NewValue2A(s.peekPos().WithNotStmt(), op, t, aux, arg0, arg1)
   596	}
   597	
   598	// newValue2I adds a new value with two arguments and an auxint value to the current block.
   599	func (s *state) newValue2I(op ssa.Op, t *types.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value {
   600		return s.curBlock.NewValue2I(s.peekPos(), op, t, aux, arg0, arg1)
   601	}
   602	
   603	// newValue3 adds a new value with three arguments to the current block.
   604	func (s *state) newValue3(op ssa.Op, t *types.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
   605		return s.curBlock.NewValue3(s.peekPos(), op, t, arg0, arg1, arg2)
   606	}
   607	
   608	// newValue3I adds a new value with three arguments and an auxint value to the current block.
   609	func (s *state) newValue3I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
   610		return s.curBlock.NewValue3I(s.peekPos(), op, t, aux, arg0, arg1, arg2)
   611	}
   612	
   613	// newValue3A adds a new value with three arguments and an aux value to the current block.
   614	func (s *state) newValue3A(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
   615		return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
   616	}
   617	
   618	// newValue3Apos adds a new value with three arguments and an aux value to the current block.
   619	// isStmt determines whether the created values may be a statement or not
   620	// (i.e., false means never, yes means maybe).
   621	func (s *state) newValue3Apos(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1, arg2 *ssa.Value, isStmt bool) *ssa.Value {
   622		if isStmt {
   623			return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
   624		}
   625		return s.curBlock.NewValue3A(s.peekPos().WithNotStmt(), op, t, aux, arg0, arg1, arg2)
   626	}
   627	
   628	// newValue4 adds a new value with four arguments to the current block.
   629	func (s *state) newValue4(op ssa.Op, t *types.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
   630		return s.curBlock.NewValue4(s.peekPos(), op, t, arg0, arg1, arg2, arg3)
   631	}
   632	
   633	// newValue4 adds a new value with four arguments and an auxint value to the current block.
   634	func (s *state) newValue4I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
   635		return s.curBlock.NewValue4I(s.peekPos(), op, t, aux, arg0, arg1, arg2, arg3)
   636	}
   637	
   638	// entryNewValue0 adds a new value with no arguments to the entry block.
   639	func (s *state) entryNewValue0(op ssa.Op, t *types.Type) *ssa.Value {
   640		return s.f.Entry.NewValue0(src.NoXPos, op, t)
   641	}
   642	
   643	// entryNewValue0A adds a new value with no arguments and an aux value to the entry block.
   644	func (s *state) entryNewValue0A(op ssa.Op, t *types.Type, aux interface{}) *ssa.Value {
   645		return s.f.Entry.NewValue0A(src.NoXPos, op, t, aux)
   646	}
   647	
   648	// entryNewValue1 adds a new value with one argument to the entry block.
   649	func (s *state) entryNewValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
   650		return s.f.Entry.NewValue1(src.NoXPos, op, t, arg)
   651	}
   652	
   653	// entryNewValue1 adds a new value with one argument and an auxint value to the entry block.
   654	func (s *state) entryNewValue1I(op ssa.Op, t *types.Type, auxint int64, arg *ssa.Value) *ssa.Value {
   655		return s.f.Entry.NewValue1I(src.NoXPos, op, t, auxint, arg)
   656	}
   657	
   658	// entryNewValue1A adds a new value with one argument and an aux value to the entry block.
   659	func (s *state) entryNewValue1A(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
   660		return s.f.Entry.NewValue1A(src.NoXPos, op, t, aux, arg)
   661	}
   662	
   663	// entryNewValue2 adds a new value with two arguments to the entry block.
   664	func (s *state) entryNewValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
   665		return s.f.Entry.NewValue2(src.NoXPos, op, t, arg0, arg1)
   666	}
   667	
   668	// entryNewValue2A adds a new value with two arguments and an aux value to the entry block.
   669	func (s *state) entryNewValue2A(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1 *ssa.Value) *ssa.Value {
   670		return s.f.Entry.NewValue2A(src.NoXPos, op, t, aux, arg0, arg1)
   671	}
   672	
   673	// const* routines add a new const value to the entry block.
   674	func (s *state) constSlice(t *types.Type) *ssa.Value {
   675		return s.f.ConstSlice(t)
   676	}
   677	func (s *state) constInterface(t *types.Type) *ssa.Value {
   678		return s.f.ConstInterface(t)
   679	}
   680	func (s *state) constNil(t *types.Type) *ssa.Value { return s.f.ConstNil(t) }
   681	func (s *state) constEmptyString(t *types.Type) *ssa.Value {
   682		return s.f.ConstEmptyString(t)
   683	}
   684	func (s *state) constBool(c bool) *ssa.Value {
   685		return s.f.ConstBool(types.Types[TBOOL], c)
   686	}
   687	func (s *state) constInt8(t *types.Type, c int8) *ssa.Value {
   688		return s.f.ConstInt8(t, c)
   689	}
   690	func (s *state) constInt16(t *types.Type, c int16) *ssa.Value {
   691		return s.f.ConstInt16(t, c)
   692	}
   693	func (s *state) constInt32(t *types.Type, c int32) *ssa.Value {
   694		return s.f.ConstInt32(t, c)
   695	}
   696	func (s *state) constInt64(t *types.Type, c int64) *ssa.Value {
   697		return s.f.ConstInt64(t, c)
   698	}
   699	func (s *state) constFloat32(t *types.Type, c float64) *ssa.Value {
   700		return s.f.ConstFloat32(t, c)
   701	}
   702	func (s *state) constFloat64(t *types.Type, c float64) *ssa.Value {
   703		return s.f.ConstFloat64(t, c)
   704	}
   705	func (s *state) constInt(t *types.Type, c int64) *ssa.Value {
   706		if s.config.PtrSize == 8 {
   707			return s.constInt64(t, c)
   708		}
   709		if int64(int32(c)) != c {
   710			s.Fatalf("integer constant too big %d", c)
   711		}
   712		return s.constInt32(t, int32(c))
   713	}
   714	func (s *state) constOffPtrSP(t *types.Type, c int64) *ssa.Value {
   715		return s.f.ConstOffPtrSP(t, c, s.sp)
   716	}
   717	
   718	// newValueOrSfCall* are wrappers around newValue*, which may create a call to a
   719	// soft-float runtime function instead (when emitting soft-float code).
   720	func (s *state) newValueOrSfCall1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
   721		if s.softFloat {
   722			if c, ok := s.sfcall(op, arg); ok {
   723				return c
   724			}
   725		}
   726		return s.newValue1(op, t, arg)
   727	}
   728	func (s *state) newValueOrSfCall2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
   729		if s.softFloat {
   730			if c, ok := s.sfcall(op, arg0, arg1); ok {
   731				return c
   732			}
   733		}
   734		return s.newValue2(op, t, arg0, arg1)
   735	}
   736	
   737	func (s *state) instrument(t *types.Type, addr *ssa.Value, wr bool) {
   738		if !s.curfn.Func.InstrumentBody() {
   739			return
   740		}
   741	
   742		w := t.Size()
   743		if w == 0 {
   744			return // can't race on zero-sized things
   745		}
   746	
   747		if ssa.IsSanitizerSafeAddr(addr) {
   748			return
   749		}
   750	
   751		var fn *obj.LSym
   752		needWidth := false
   753	
   754		if flag_msan {
   755			fn = msanread
   756			if wr {
   757				fn = msanwrite
   758			}
   759			needWidth = true
   760		} else if flag_race && t.NumComponents(types.CountBlankFields) > 1 {
   761			// for composite objects we have to write every address
   762			// because a write might happen to any subobject.
   763			// composites with only one element don't have subobjects, though.
   764			fn = racereadrange
   765			if wr {
   766				fn = racewriterange
   767			}
   768			needWidth = true
   769		} else if flag_race {
   770			// for non-composite objects we can write just the start
   771			// address, as any write must write the first byte.
   772			fn = raceread
   773			if wr {
   774				fn = racewrite
   775			}
   776		} else {
   777			panic("unreachable")
   778		}
   779	
   780		args := []*ssa.Value{addr}
   781		if needWidth {
   782			args = append(args, s.constInt(types.Types[TUINTPTR], w))
   783		}
   784		s.rtcall(fn, true, nil, args...)
   785	}
   786	
   787	func (s *state) load(t *types.Type, src *ssa.Value) *ssa.Value {
   788		s.instrument(t, src, false)
   789		return s.rawLoad(t, src)
   790	}
   791	
   792	func (s *state) rawLoad(t *types.Type, src *ssa.Value) *ssa.Value {
   793		return s.newValue2(ssa.OpLoad, t, src, s.mem())
   794	}
   795	
   796	func (s *state) store(t *types.Type, dst, val *ssa.Value) {
   797		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, dst, val, s.mem())
   798	}
   799	
   800	func (s *state) zero(t *types.Type, dst *ssa.Value) {
   801		s.instrument(t, dst, true)
   802		store := s.newValue2I(ssa.OpZero, types.TypeMem, t.Size(), dst, s.mem())
   803		store.Aux = t
   804		s.vars[&memVar] = store
   805	}
   806	
   807	func (s *state) move(t *types.Type, dst, src *ssa.Value) {
   808		s.instrument(t, src, false)
   809		s.instrument(t, dst, true)
   810		store := s.newValue3I(ssa.OpMove, types.TypeMem, t.Size(), dst, src, s.mem())
   811		store.Aux = t
   812		s.vars[&memVar] = store
   813	}
   814	
   815	// stmtList converts the statement list n to SSA and adds it to s.
   816	func (s *state) stmtList(l Nodes) {
   817		for _, n := range l.Slice() {
   818			s.stmt(n)
   819		}
   820	}
   821	
   822	// stmt converts the statement n to SSA and adds it to s.
   823	func (s *state) stmt(n *Node) {
   824		if !(n.Op == OVARKILL || n.Op == OVARLIVE || n.Op == OVARDEF) {
   825			// OVARKILL, OVARLIVE, and OVARDEF are invisible to the programmer, so we don't use their line numbers to avoid confusion in debugging.
   826			s.pushLine(n.Pos)
   827			defer s.popLine()
   828		}
   829	
   830		// If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere),
   831		// then this code is dead. Stop here.
   832		if s.curBlock == nil && n.Op != OLABEL {
   833			return
   834		}
   835	
   836		s.stmtList(n.Ninit)
   837		switch n.Op {
   838	
   839		case OBLOCK:
   840			s.stmtList(n.List)
   841	
   842		// No-ops
   843		case OEMPTY, ODCLCONST, ODCLTYPE, OFALL:
   844	
   845		// Expression statements
   846		case OCALLFUNC:
   847			if isIntrinsicCall(n) {
   848				s.intrinsicCall(n)
   849				return
   850			}
   851			fallthrough
   852	
   853		case OCALLMETH, OCALLINTER:
   854			s.call(n, callNormal)
   855			if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class() == PFUNC {
   856				if fn := n.Left.Sym.Name; compiling_runtime && fn == "throw" ||
   857					n.Left.Sym.Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") {
   858					m := s.mem()
   859					b := s.endBlock()
   860					b.Kind = ssa.BlockExit
   861					b.SetControl(m)
   862					// TODO: never rewrite OPANIC to OCALLFUNC in the
   863					// first place. Need to wait until all backends
   864					// go through SSA.
   865				}
   866			}
   867		case ODEFER:
   868			d := callDefer
   869			if n.Esc == EscNever {
   870				d = callDeferStack
   871			}
   872			s.call(n.Left, d)
   873		case OGO:
   874			s.call(n.Left, callGo)
   875	
   876		case OAS2DOTTYPE:
   877			res, resok := s.dottype(n.Rlist.First(), true)
   878			deref := false
   879			if !canSSAType(n.Rlist.First().Type) {
   880				if res.Op != ssa.OpLoad {
   881					s.Fatalf("dottype of non-load")
   882				}
   883				mem := s.mem()
   884				if mem.Op == ssa.OpVarKill {
   885					mem = mem.Args[0]
   886				}
   887				if res.Args[1] != mem {
   888					s.Fatalf("memory no longer live from 2-result dottype load")
   889				}
   890				deref = true
   891				res = res.Args[0]
   892			}
   893			s.assign(n.List.First(), res, deref, 0)
   894			s.assign(n.List.Second(), resok, false, 0)
   895			return
   896	
   897		case OAS2FUNC:
   898			// We come here only when it is an intrinsic call returning two values.
   899			if !isIntrinsicCall(n.Rlist.First()) {
   900				s.Fatalf("non-intrinsic AS2FUNC not expanded %v", n.Rlist.First())
   901			}
   902			v := s.intrinsicCall(n.Rlist.First())
   903			v1 := s.newValue1(ssa.OpSelect0, n.List.First().Type, v)
   904			v2 := s.newValue1(ssa.OpSelect1, n.List.Second().Type, v)
   905			s.assign(n.List.First(), v1, false, 0)
   906			s.assign(n.List.Second(), v2, false, 0)
   907			return
   908	
   909		case ODCL:
   910			if n.Left.Class() == PAUTOHEAP {
   911				Fatalf("DCL %v", n)
   912			}
   913	
   914		case OLABEL:
   915			sym := n.Sym
   916			lab := s.label(sym)
   917	
   918			// Associate label with its control flow node, if any
   919			if ctl := n.labeledControl(); ctl != nil {
   920				s.labeledNodes[ctl] = lab
   921			}
   922	
   923			// The label might already have a target block via a goto.
   924			if lab.target == nil {
   925				lab.target = s.f.NewBlock(ssa.BlockPlain)
   926			}
   927	
   928			// Go to that label.
   929			// (We pretend "label:" is preceded by "goto label", unless the predecessor is unreachable.)
   930			if s.curBlock != nil {
   931				b := s.endBlock()
   932				b.AddEdgeTo(lab.target)
   933			}
   934			s.startBlock(lab.target)
   935	
   936		case OGOTO:
   937			sym := n.Sym
   938	
   939			lab := s.label(sym)
   940			if lab.target == nil {
   941				lab.target = s.f.NewBlock(ssa.BlockPlain)
   942			}
   943	
   944			b := s.endBlock()
   945			b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block.
   946			b.AddEdgeTo(lab.target)
   947	
   948		case OAS:
   949			if n.Left == n.Right && n.Left.Op == ONAME {
   950				// An x=x assignment. No point in doing anything
   951				// here. In addition, skipping this assignment
   952				// prevents generating:
   953				//   VARDEF x
   954				//   COPY x -> x
   955				// which is bad because x is incorrectly considered
   956				// dead before the vardef. See issue #14904.
   957				return
   958			}
   959	
   960			// Evaluate RHS.
   961			rhs := n.Right
   962			if rhs != nil {
   963				switch rhs.Op {
   964				case OSTRUCTLIT, OARRAYLIT, OSLICELIT:
   965					// All literals with nonzero fields have already been
   966					// rewritten during walk. Any that remain are just T{}
   967					// or equivalents. Use the zero value.
   968					if !isZero(rhs) {
   969						Fatalf("literal with nonzero value in SSA: %v", rhs)
   970					}
   971					rhs = nil
   972				case OAPPEND:
   973					// Check whether we're writing the result of an append back to the same slice.
   974					// If so, we handle it specially to avoid write barriers on the fast
   975					// (non-growth) path.
   976					if !samesafeexpr(n.Left, rhs.List.First()) || Debug['N'] != 0 {
   977						break
   978					}
   979					// If the slice can be SSA'd, it'll be on the stack,
   980					// so there will be no write barriers,
   981					// so there's no need to attempt to prevent them.
   982					if s.canSSA(n.Left) {
   983						if Debug_append > 0 { // replicating old diagnostic message
   984							Warnl(n.Pos, "append: len-only update (in local slice)")
   985						}
   986						break
   987					}
   988					if Debug_append > 0 {
   989						Warnl(n.Pos, "append: len-only update")
   990					}
   991					s.append(rhs, true)
   992					return
   993				}
   994			}
   995	
   996			if n.Left.isBlank() {
   997				// _ = rhs
   998				// Just evaluate rhs for side-effects.
   999				if rhs != nil {
  1000					s.expr(rhs)
  1001				}
  1002				return
  1003			}
  1004	
  1005			var t *types.Type
  1006			if n.Right != nil {
  1007				t = n.Right.Type
  1008			} else {
  1009				t = n.Left.Type
  1010			}
  1011	
  1012			var r *ssa.Value
  1013			deref := !canSSAType(t)
  1014			if deref {
  1015				if rhs == nil {
  1016					r = nil // Signal assign to use OpZero.
  1017				} else {
  1018					r = s.addr(rhs, false)
  1019				}
  1020			} else {
  1021				if rhs == nil {
  1022					r = s.zeroVal(t)
  1023				} else {
  1024					r = s.expr(rhs)
  1025				}
  1026			}
  1027	
  1028			var skip skipMask
  1029			if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) {
  1030				// We're assigning a slicing operation back to its source.
  1031				// Don't write back fields we aren't changing. See issue #14855.
  1032				i, j, k := rhs.SliceBounds()
  1033				if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64() == 0) {
  1034					// [0:...] is the same as [:...]
  1035					i = nil
  1036				}
  1037				// TODO: detect defaults for len/cap also.
  1038				// Currently doesn't really work because (*p)[:len(*p)] appears here as:
  1039				//    tmp = len(*p)
  1040				//    (*p)[:tmp]
  1041				//if j != nil && (j.Op == OLEN && samesafeexpr(j.Left, n.Left)) {
  1042				//      j = nil
  1043				//}
  1044				//if k != nil && (k.Op == OCAP && samesafeexpr(k.Left, n.Left)) {
  1045				//      k = nil
  1046				//}
  1047				if i == nil {
  1048					skip |= skipPtr
  1049					if j == nil {
  1050						skip |= skipLen
  1051					}
  1052					if k == nil {
  1053						skip |= skipCap
  1054					}
  1055				}
  1056			}
  1057	
  1058			s.assign(n.Left, r, deref, skip)
  1059	
  1060		case OIF:
  1061			bEnd := s.f.NewBlock(ssa.BlockPlain)
  1062			var likely int8
  1063			if n.Likely() {
  1064				likely = 1
  1065			}
  1066			var bThen *ssa.Block
  1067			if n.Nbody.Len() != 0 {
  1068				bThen = s.f.NewBlock(ssa.BlockPlain)
  1069			} else {
  1070				bThen = bEnd
  1071			}
  1072			var bElse *ssa.Block
  1073			if n.Rlist.Len() != 0 {
  1074				bElse = s.f.NewBlock(ssa.BlockPlain)
  1075			} else {
  1076				bElse = bEnd
  1077			}
  1078			s.condBranch(n.Left, bThen, bElse, likely)
  1079	
  1080			if n.Nbody.Len() != 0 {
  1081				s.startBlock(bThen)
  1082				s.stmtList(n.Nbody)
  1083				if b := s.endBlock(); b != nil {
  1084					b.AddEdgeTo(bEnd)
  1085				}
  1086			}
  1087			if n.Rlist.Len() != 0 {
  1088				s.startBlock(bElse)
  1089				s.stmtList(n.Rlist)
  1090				if b := s.endBlock(); b != nil {
  1091					b.AddEdgeTo(bEnd)
  1092				}
  1093			}
  1094			s.startBlock(bEnd)
  1095	
  1096		case ORETURN:
  1097			s.stmtList(n.List)
  1098			b := s.exit()
  1099			b.Pos = s.lastPos.WithIsStmt()
  1100	
  1101		case ORETJMP:
  1102			s.stmtList(n.List)
  1103			b := s.exit()
  1104			b.Kind = ssa.BlockRetJmp // override BlockRet
  1105			b.Aux = n.Sym.Linksym()
  1106	
  1107		case OCONTINUE, OBREAK:
  1108			var to *ssa.Block
  1109			if n.Sym == nil {
  1110				// plain break/continue
  1111				switch n.Op {
  1112				case OCONTINUE:
  1113					to = s.continueTo
  1114				case OBREAK:
  1115					to = s.breakTo
  1116				}
  1117			} else {
  1118				// labeled break/continue; look up the target
  1119				sym := n.Sym
  1120				lab := s.label(sym)
  1121				switch n.Op {
  1122				case OCONTINUE:
  1123					to = lab.continueTarget
  1124				case OBREAK:
  1125					to = lab.breakTarget
  1126				}
  1127			}
  1128	
  1129			b := s.endBlock()
  1130			b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block.
  1131			b.AddEdgeTo(to)
  1132	
  1133		case OFOR, OFORUNTIL:
  1134			// OFOR: for Ninit; Left; Right { Nbody }
  1135			// cond (Left); body (Nbody); incr (Right)
  1136			//
  1137			// OFORUNTIL: for Ninit; Left; Right; List { Nbody }
  1138			// => body: { Nbody }; incr: Right; if Left { lateincr: List; goto body }; end:
  1139			bCond := s.f.NewBlock(ssa.BlockPlain)
  1140			bBody := s.f.NewBlock(ssa.BlockPlain)
  1141			bIncr := s.f.NewBlock(ssa.BlockPlain)
  1142			bEnd := s.f.NewBlock(ssa.BlockPlain)
  1143	
  1144			// ensure empty for loops have correct position; issue #30167
  1145			bBody.Pos = n.Pos
  1146	
  1147			// first, jump to condition test (OFOR) or body (OFORUNTIL)
  1148			b := s.endBlock()
  1149			if n.Op == OFOR {
  1150				b.AddEdgeTo(bCond)
  1151				// generate code to test condition
  1152				s.startBlock(bCond)
  1153				if n.Left != nil {
  1154					s.condBranch(n.Left, bBody, bEnd, 1)
  1155				} else {
  1156					b := s.endBlock()
  1157					b.Kind = ssa.BlockPlain
  1158					b.AddEdgeTo(bBody)
  1159				}
  1160	
  1161			} else {
  1162				b.AddEdgeTo(bBody)
  1163			}
  1164	
  1165			// set up for continue/break in body
  1166			prevContinue := s.continueTo
  1167			prevBreak := s.breakTo
  1168			s.continueTo = bIncr
  1169			s.breakTo = bEnd
  1170			lab := s.labeledNodes[n]
  1171			if lab != nil {
  1172				// labeled for loop
  1173				lab.continueTarget = bIncr
  1174				lab.breakTarget = bEnd
  1175			}
  1176	
  1177			// generate body
  1178			s.startBlock(bBody)
  1179			s.stmtList(n.Nbody)
  1180	
  1181			// tear down continue/break
  1182			s.continueTo = prevContinue
  1183			s.breakTo = prevBreak
  1184			if lab != nil {
  1185				lab.continueTarget = nil
  1186				lab.breakTarget = nil
  1187			}
  1188	
  1189			// done with body, goto incr
  1190			if b := s.endBlock(); b != nil {
  1191				b.AddEdgeTo(bIncr)
  1192			}
  1193	
  1194			// generate incr (and, for OFORUNTIL, condition)
  1195			s.startBlock(bIncr)
  1196			if n.Right != nil {
  1197				s.stmt(n.Right)
  1198			}
  1199			if n.Op == OFOR {
  1200				if b := s.endBlock(); b != nil {
  1201					b.AddEdgeTo(bCond)
  1202					// It can happen that bIncr ends in a block containing only VARKILL,
  1203					// and that muddles the debugging experience.
  1204					if n.Op != OFORUNTIL && b.Pos == src.NoXPos {
  1205						b.Pos = bCond.Pos
  1206					}
  1207				}
  1208			} else {
  1209				// bCond is unused in OFORUNTIL, so repurpose it.
  1210				bLateIncr := bCond
  1211				// test condition
  1212				s.condBranch(n.Left, bLateIncr, bEnd, 1)
  1213				// generate late increment
  1214				s.startBlock(bLateIncr)
  1215				s.stmtList(n.List)
  1216				s.endBlock().AddEdgeTo(bBody)
  1217			}
  1218	
  1219			s.startBlock(bEnd)
  1220	
  1221		case OSWITCH, OSELECT:
  1222			// These have been mostly rewritten by the front end into their Nbody fields.
  1223			// Our main task is to correctly hook up any break statements.
  1224			bEnd := s.f.NewBlock(ssa.BlockPlain)
  1225	
  1226			prevBreak := s.breakTo
  1227			s.breakTo = bEnd
  1228			lab := s.labeledNodes[n]
  1229			if lab != nil {
  1230				// labeled
  1231				lab.breakTarget = bEnd
  1232			}
  1233	
  1234			// generate body code
  1235			s.stmtList(n.Nbody)
  1236	
  1237			s.breakTo = prevBreak
  1238			if lab != nil {
  1239				lab.breakTarget = nil
  1240			}
  1241	
  1242			// walk adds explicit OBREAK nodes to the end of all reachable code paths.
  1243			// If we still have a current block here, then mark it unreachable.
  1244			if s.curBlock != nil {
  1245				m := s.mem()
  1246				b := s.endBlock()
  1247				b.Kind = ssa.BlockExit
  1248				b.SetControl(m)
  1249			}
  1250			s.startBlock(bEnd)
  1251	
  1252		case OVARDEF:
  1253			if !s.canSSA(n.Left) {
  1254				s.vars[&memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.Left, s.mem(), false)
  1255			}
  1256		case OVARKILL:
  1257			// Insert a varkill op to record that a variable is no longer live.
  1258			// We only care about liveness info at call sites, so putting the
  1259			// varkill in the store chain is enough to keep it correctly ordered
  1260			// with respect to call ops.
  1261			if !s.canSSA(n.Left) {
  1262				s.vars[&memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.Left, s.mem(), false)
  1263			}
  1264	
  1265		case OVARLIVE:
  1266			// Insert a varlive op to record that a variable is still live.
  1267			if !n.Left.Addrtaken() {
  1268				s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left)
  1269			}
  1270			switch n.Left.Class() {
  1271			case PAUTO, PPARAM, PPARAMOUT:
  1272			default:
  1273				s.Fatalf("VARLIVE variable %v must be Auto or Arg", n.Left)
  1274			}
  1275			s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, n.Left, s.mem())
  1276	
  1277		case OCHECKNIL:
  1278			p := s.expr(n.Left)
  1279			s.nilCheck(p)
  1280	
  1281		case OINLMARK:
  1282			s.newValue1I(ssa.OpInlMark, types.TypeVoid, n.Xoffset, s.mem())
  1283	
  1284		default:
  1285			s.Fatalf("unhandled stmt %v", n.Op)
  1286		}
  1287	}
  1288	
  1289	// exit processes any code that needs to be generated just before returning.
  1290	// It returns a BlockRet block that ends the control flow. Its control value
  1291	// will be set to the final memory state.
  1292	func (s *state) exit() *ssa.Block {
  1293		if s.hasdefer {
  1294			s.rtcall(Deferreturn, true, nil)
  1295		}
  1296	
  1297		// Run exit code. Typically, this code copies heap-allocated PPARAMOUT
  1298		// variables back to the stack.
  1299		s.stmtList(s.curfn.Func.Exit)
  1300	
  1301		// Store SSAable PPARAMOUT variables back to stack locations.
  1302		for _, n := range s.returns {
  1303			addr := s.decladdrs[n]
  1304			val := s.variable(n, n.Type)
  1305			s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
  1306			s.store(n.Type, addr, val)
  1307			// TODO: if val is ever spilled, we'd like to use the
  1308			// PPARAMOUT slot for spilling it. That won't happen
  1309			// currently.
  1310		}
  1311	
  1312		// Do actual return.
  1313		m := s.mem()
  1314		b := s.endBlock()
  1315		b.Kind = ssa.BlockRet
  1316		b.SetControl(m)
  1317		return b
  1318	}
  1319	
  1320	type opAndType struct {
  1321		op    Op
  1322		etype types.EType
  1323	}
  1324	
  1325	var opToSSA = map[opAndType]ssa.Op{
  1326		opAndType{OADD, TINT8}:    ssa.OpAdd8,
  1327		opAndType{OADD, TUINT8}:   ssa.OpAdd8,
  1328		opAndType{OADD, TINT16}:   ssa.OpAdd16,
  1329		opAndType{OADD, TUINT16}:  ssa.OpAdd16,
  1330		opAndType{OADD, TINT32}:   ssa.OpAdd32,
  1331		opAndType{OADD, TUINT32}:  ssa.OpAdd32,
  1332		opAndType{OADD, TINT64}:   ssa.OpAdd64,
  1333		opAndType{OADD, TUINT64}:  ssa.OpAdd64,
  1334		opAndType{OADD, TFLOAT32}: ssa.OpAdd32F,
  1335		opAndType{OADD, TFLOAT64}: ssa.OpAdd64F,
  1336	
  1337		opAndType{OSUB, TINT8}:    ssa.OpSub8,
  1338		opAndType{OSUB, TUINT8}:   ssa.OpSub8,
  1339		opAndType{OSUB, TINT16}:   ssa.OpSub16,
  1340		opAndType{OSUB, TUINT16}:  ssa.OpSub16,
  1341		opAndType{OSUB, TINT32}:   ssa.OpSub32,
  1342		opAndType{OSUB, TUINT32}:  ssa.OpSub32,
  1343		opAndType{OSUB, TINT64}:   ssa.OpSub64,
  1344		opAndType{OSUB, TUINT64}:  ssa.OpSub64,
  1345		opAndType{OSUB, TFLOAT32}: ssa.OpSub32F,
  1346		opAndType{OSUB, TFLOAT64}: ssa.OpSub64F,
  1347	
  1348		opAndType{ONOT, TBOOL}: ssa.OpNot,
  1349	
  1350		opAndType{ONEG, TINT8}:    ssa.OpNeg8,
  1351		opAndType{ONEG, TUINT8}:   ssa.OpNeg8,
  1352		opAndType{ONEG, TINT16}:   ssa.OpNeg16,
  1353		opAndType{ONEG, TUINT16}:  ssa.OpNeg16,
  1354		opAndType{ONEG, TINT32}:   ssa.OpNeg32,
  1355		opAndType{ONEG, TUINT32}:  ssa.OpNeg32,
  1356		opAndType{ONEG, TINT64}:   ssa.OpNeg64,
  1357		opAndType{ONEG, TUINT64}:  ssa.OpNeg64,
  1358		opAndType{ONEG, TFLOAT32}: ssa.OpNeg32F,
  1359		opAndType{ONEG, TFLOAT64}: ssa.OpNeg64F,
  1360	
  1361		opAndType{OBITNOT, TINT8}:   ssa.OpCom8,
  1362		opAndType{OBITNOT, TUINT8}:  ssa.OpCom8,
  1363		opAndType{OBITNOT, TINT16}:  ssa.OpCom16,
  1364		opAndType{OBITNOT, TUINT16}: ssa.OpCom16,
  1365		opAndType{OBITNOT, TINT32}:  ssa.OpCom32,
  1366		opAndType{OBITNOT, TUINT32}: ssa.OpCom32,
  1367		opAndType{OBITNOT, TINT64}:  ssa.OpCom64,
  1368		opAndType{OBITNOT, TUINT64}: ssa.OpCom64,
  1369	
  1370		opAndType{OIMAG, TCOMPLEX64}:  ssa.OpComplexImag,
  1371		opAndType{OIMAG, TCOMPLEX128}: ssa.OpComplexImag,
  1372		opAndType{OREAL, TCOMPLEX64}:  ssa.OpComplexReal,
  1373		opAndType{OREAL, TCOMPLEX128}: ssa.OpComplexReal,
  1374	
  1375		opAndType{OMUL, TINT8}:    ssa.OpMul8,
  1376		opAndType{OMUL, TUINT8}:   ssa.OpMul8,
  1377		opAndType{OMUL, TINT16}:   ssa.OpMul16,
  1378		opAndType{OMUL, TUINT16}:  ssa.OpMul16,
  1379		opAndType{OMUL, TINT32}:   ssa.OpMul32,
  1380		opAndType{OMUL, TUINT32}:  ssa.OpMul32,
  1381		opAndType{OMUL, TINT64}:   ssa.OpMul64,
  1382		opAndType{OMUL, TUINT64}:  ssa.OpMul64,
  1383		opAndType{OMUL, TFLOAT32}: ssa.OpMul32F,
  1384		opAndType{OMUL, TFLOAT64}: ssa.OpMul64F,
  1385	
  1386		opAndType{ODIV, TFLOAT32}: ssa.OpDiv32F,
  1387		opAndType{ODIV, TFLOAT64}: ssa.OpDiv64F,
  1388	
  1389		opAndType{ODIV, TINT8}:   ssa.OpDiv8,
  1390		opAndType{ODIV, TUINT8}:  ssa.OpDiv8u,
  1391		opAndType{ODIV, TINT16}:  ssa.OpDiv16,
  1392		opAndType{ODIV, TUINT16}: ssa.OpDiv16u,
  1393		opAndType{ODIV, TINT32}:  ssa.OpDiv32,
  1394		opAndType{ODIV, TUINT32}: ssa.OpDiv32u,
  1395		opAndType{ODIV, TINT64}:  ssa.OpDiv64,
  1396		opAndType{ODIV, TUINT64}: ssa.OpDiv64u,
  1397	
  1398		opAndType{OMOD, TINT8}:   ssa.OpMod8,
  1399		opAndType{OMOD, TUINT8}:  ssa.OpMod8u,
  1400		opAndType{OMOD, TINT16}:  ssa.OpMod16,
  1401		opAndType{OMOD, TUINT16}: ssa.OpMod16u,
  1402		opAndType{OMOD, TINT32}:  ssa.OpMod32,
  1403		opAndType{OMOD, TUINT32}: ssa.OpMod32u,
  1404		opAndType{OMOD, TINT64}:  ssa.OpMod64,
  1405		opAndType{OMOD, TUINT64}: ssa.OpMod64u,
  1406	
  1407		opAndType{OAND, TINT8}:   ssa.OpAnd8,
  1408		opAndType{OAND, TUINT8}:  ssa.OpAnd8,
  1409		opAndType{OAND, TINT16}:  ssa.OpAnd16,
  1410		opAndType{OAND, TUINT16}: ssa.OpAnd16,
  1411		opAndType{OAND, TINT32}:  ssa.OpAnd32,
  1412		opAndType{OAND, TUINT32}: ssa.OpAnd32,
  1413		opAndType{OAND, TINT64}:  ssa.OpAnd64,
  1414		opAndType{OAND, TUINT64}: ssa.OpAnd64,
  1415	
  1416		opAndType{OOR, TINT8}:   ssa.OpOr8,
  1417		opAndType{OOR, TUINT8}:  ssa.OpOr8,
  1418		opAndType{OOR, TINT16}:  ssa.OpOr16,
  1419		opAndType{OOR, TUINT16}: ssa.OpOr16,
  1420		opAndType{OOR, TINT32}:  ssa.OpOr32,
  1421		opAndType{OOR, TUINT32}: ssa.OpOr32,
  1422		opAndType{OOR, TINT64}:  ssa.OpOr64,
  1423		opAndType{OOR, TUINT64}: ssa.OpOr64,
  1424	
  1425		opAndType{OXOR, TINT8}:   ssa.OpXor8,
  1426		opAndType{OXOR, TUINT8}:  ssa.OpXor8,
  1427		opAndType{OXOR, TINT16}:  ssa.OpXor16,
  1428		opAndType{OXOR, TUINT16}: ssa.OpXor16,
  1429		opAndType{OXOR, TINT32}:  ssa.OpXor32,
  1430		opAndType{OXOR, TUINT32}: ssa.OpXor32,
  1431		opAndType{OXOR, TINT64}:  ssa.OpXor64,
  1432		opAndType{OXOR, TUINT64}: ssa.OpXor64,
  1433	
  1434		opAndType{OEQ, TBOOL}:      ssa.OpEqB,
  1435		opAndType{OEQ, TINT8}:      ssa.OpEq8,
  1436		opAndType{OEQ, TUINT8}:     ssa.OpEq8,
  1437		opAndType{OEQ, TINT16}:     ssa.OpEq16,
  1438		opAndType{OEQ, TUINT16}:    ssa.OpEq16,
  1439		opAndType{OEQ, TINT32}:     ssa.OpEq32,
  1440		opAndType{OEQ, TUINT32}:    ssa.OpEq32,
  1441		opAndType{OEQ, TINT64}:     ssa.OpEq64,
  1442		opAndType{OEQ, TUINT64}:    ssa.OpEq64,
  1443		opAndType{OEQ, TINTER}:     ssa.OpEqInter,
  1444		opAndType{OEQ, TSLICE}:     ssa.OpEqSlice,
  1445		opAndType{OEQ, TFUNC}:      ssa.OpEqPtr,
  1446		opAndType{OEQ, TMAP}:       ssa.OpEqPtr,
  1447		opAndType{OEQ, TCHAN}:      ssa.OpEqPtr,
  1448		opAndType{OEQ, TPTR}:       ssa.OpEqPtr,
  1449		opAndType{OEQ, TUINTPTR}:   ssa.OpEqPtr,
  1450		opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr,
  1451		opAndType{OEQ, TFLOAT64}:   ssa.OpEq64F,
  1452		opAndType{OEQ, TFLOAT32}:   ssa.OpEq32F,
  1453	
  1454		opAndType{ONE, TBOOL}:      ssa.OpNeqB,
  1455		opAndType{ONE, TINT8}:      ssa.OpNeq8,
  1456		opAndType{ONE, TUINT8}:     ssa.OpNeq8,
  1457		opAndType{ONE, TINT16}:     ssa.OpNeq16,
  1458		opAndType{ONE, TUINT16}:    ssa.OpNeq16,
  1459		opAndType{ONE, TINT32}:     ssa.OpNeq32,
  1460		opAndType{ONE, TUINT32}:    ssa.OpNeq32,
  1461		opAndType{ONE, TINT64}:     ssa.OpNeq64,
  1462		opAndType{ONE, TUINT64}:    ssa.OpNeq64,
  1463		opAndType{ONE, TINTER}:     ssa.OpNeqInter,
  1464		opAndType{ONE, TSLICE}:     ssa.OpNeqSlice,
  1465		opAndType{ONE, TFUNC}:      ssa.OpNeqPtr,
  1466		opAndType{ONE, TMAP}:       ssa.OpNeqPtr,
  1467		opAndType{ONE, TCHAN}:      ssa.OpNeqPtr,
  1468		opAndType{ONE, TPTR}:       ssa.OpNeqPtr,
  1469		opAndType{ONE, TUINTPTR}:   ssa.OpNeqPtr,
  1470		opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr,
  1471		opAndType{ONE, TFLOAT64}:   ssa.OpNeq64F,
  1472		opAndType{ONE, TFLOAT32}:   ssa.OpNeq32F,
  1473	
  1474		opAndType{OLT, TINT8}:    ssa.OpLess8,
  1475		opAndType{OLT, TUINT8}:   ssa.OpLess8U,
  1476		opAndType{OLT, TINT16}:   ssa.OpLess16,
  1477		opAndType{OLT, TUINT16}:  ssa.OpLess16U,
  1478		opAndType{OLT, TINT32}:   ssa.OpLess32,
  1479		opAndType{OLT, TUINT32}:  ssa.OpLess32U,
  1480		opAndType{OLT, TINT64}:   ssa.OpLess64,
  1481		opAndType{OLT, TUINT64}:  ssa.OpLess64U,
  1482		opAndType{OLT, TFLOAT64}: ssa.OpLess64F,
  1483		opAndType{OLT, TFLOAT32}: ssa.OpLess32F,
  1484	
  1485		opAndType{OGT, TINT8}:    ssa.OpGreater8,
  1486		opAndType{OGT, TUINT8}:   ssa.OpGreater8U,
  1487		opAndType{OGT, TINT16}:   ssa.OpGreater16,
  1488		opAndType{OGT, TUINT16}:  ssa.OpGreater16U,
  1489		opAndType{OGT, TINT32}:   ssa.OpGreater32,
  1490		opAndType{OGT, TUINT32}:  ssa.OpGreater32U,
  1491		opAndType{OGT, TINT64}:   ssa.OpGreater64,
  1492		opAndType{OGT, TUINT64}:  ssa.OpGreater64U,
  1493		opAndType{OGT, TFLOAT64}: ssa.OpGreater64F,
  1494		opAndType{OGT, TFLOAT32}: ssa.OpGreater32F,
  1495	
  1496		opAndType{OLE, TINT8}:    ssa.OpLeq8,
  1497		opAndType{OLE, TUINT8}:   ssa.OpLeq8U,
  1498		opAndType{OLE, TINT16}:   ssa.OpLeq16,
  1499		opAndType{OLE, TUINT16}:  ssa.OpLeq16U,
  1500		opAndType{OLE, TINT32}:   ssa.OpLeq32,
  1501		opAndType{OLE, TUINT32}:  ssa.OpLeq32U,
  1502		opAndType{OLE, TINT64}:   ssa.OpLeq64,
  1503		opAndType{OLE, TUINT64}:  ssa.OpLeq64U,
  1504		opAndType{OLE, TFLOAT64}: ssa.OpLeq64F,
  1505		opAndType{OLE, TFLOAT32}: ssa.OpLeq32F,
  1506	
  1507		opAndType{OGE, TINT8}:    ssa.OpGeq8,
  1508		opAndType{OGE, TUINT8}:   ssa.OpGeq8U,
  1509		opAndType{OGE, TINT16}:   ssa.OpGeq16,
  1510		opAndType{OGE, TUINT16}:  ssa.OpGeq16U,
  1511		opAndType{OGE, TINT32}:   ssa.OpGeq32,
  1512		opAndType{OGE, TUINT32}:  ssa.OpGeq32U,
  1513		opAndType{OGE, TINT64}:   ssa.OpGeq64,
  1514		opAndType{OGE, TUINT64}:  ssa.OpGeq64U,
  1515		opAndType{OGE, TFLOAT64}: ssa.OpGeq64F,
  1516		opAndType{OGE, TFLOAT32}: ssa.OpGeq32F,
  1517	}
  1518	
  1519	func (s *state) concreteEtype(t *types.Type) types.EType {
  1520		e := t.Etype
  1521		switch e {
  1522		default:
  1523			return e
  1524		case TINT:
  1525			if s.config.PtrSize == 8 {
  1526				return TINT64
  1527			}
  1528			return TINT32
  1529		case TUINT:
  1530			if s.config.PtrSize == 8 {
  1531				return TUINT64
  1532			}
  1533			return TUINT32
  1534		case TUINTPTR:
  1535			if s.config.PtrSize == 8 {
  1536				return TUINT64
  1537			}
  1538			return TUINT32
  1539		}
  1540	}
  1541	
  1542	func (s *state) ssaOp(op Op, t *types.Type) ssa.Op {
  1543		etype := s.concreteEtype(t)
  1544		x, ok := opToSSA[opAndType{op, etype}]
  1545		if !ok {
  1546			s.Fatalf("unhandled binary op %v %s", op, etype)
  1547		}
  1548		return x
  1549	}
  1550	
  1551	func floatForComplex(t *types.Type) *types.Type {
  1552		if t.Size() == 8 {
  1553			return types.Types[TFLOAT32]
  1554		} else {
  1555			return types.Types[TFLOAT64]
  1556		}
  1557	}
  1558	
  1559	type opAndTwoTypes struct {
  1560		op     Op
  1561		etype1 types.EType
  1562		etype2 types.EType
  1563	}
  1564	
  1565	type twoTypes struct {
  1566		etype1 types.EType
  1567		etype2 types.EType
  1568	}
  1569	
  1570	type twoOpsAndType struct {
  1571		op1              ssa.Op
  1572		op2              ssa.Op
  1573		intermediateType types.EType
  1574	}
  1575	
  1576	var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
  1577	
  1578		twoTypes{TINT8, TFLOAT32}:  twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, TINT32},
  1579		twoTypes{TINT16, TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, TINT32},
  1580		twoTypes{TINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, TINT32},
  1581		twoTypes{TINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, TINT64},
  1582	
  1583		twoTypes{TINT8, TFLOAT64}:  twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, TINT32},
  1584		twoTypes{TINT16, TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, TINT32},
  1585		twoTypes{TINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, TINT32},
  1586		twoTypes{TINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, TINT64},
  1587	
  1588		twoTypes{TFLOAT32, TINT8}:  twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
  1589		twoTypes{TFLOAT32, TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
  1590		twoTypes{TFLOAT32, TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, TINT32},
  1591		twoTypes{TFLOAT32, TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, TINT64},
  1592	
  1593		twoTypes{TFLOAT64, TINT8}:  twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
  1594		twoTypes{TFLOAT64, TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
  1595		twoTypes{TFLOAT64, TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, TINT32},
  1596		twoTypes{TFLOAT64, TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, TINT64},
  1597		// unsigned
  1598		twoTypes{TUINT8, TFLOAT32}:  twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, TINT32},
  1599		twoTypes{TUINT16, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, TINT32},
  1600		twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, TINT64}, // go wide to dodge unsigned
  1601		twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64},            // Cvt64Uto32F, branchy code expansion instead
  1602	
  1603		twoTypes{TUINT8, TFLOAT64}:  twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, TINT32},
  1604		twoTypes{TUINT16, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, TINT32},
  1605		twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, TINT64}, // go wide to dodge unsigned
  1606		twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64},            // Cvt64Uto64F, branchy code expansion instead
  1607	
  1608		twoTypes{TFLOAT32, TUINT8}:  twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
  1609		twoTypes{TFLOAT32, TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
  1610		twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
  1611		twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64},          // Cvt32Fto64U, branchy code expansion instead
  1612	
  1613		twoTypes{TFLOAT64, TUINT8}:  twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
  1614		twoTypes{TFLOAT64, TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
  1615		twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
  1616		twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64},          // Cvt64Fto64U, branchy code expansion instead
  1617	
  1618		// float
  1619		twoTypes{TFLOAT64, TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, TFLOAT32},
  1620		twoTypes{TFLOAT64, TFLOAT64}: twoOpsAndType{ssa.OpRound64F, ssa.OpCopy, TFLOAT64},
  1621		twoTypes{TFLOAT32, TFLOAT32}: twoOpsAndType{ssa.OpRound32F, ssa.OpCopy, TFLOAT32},
  1622		twoTypes{TFLOAT32, TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, TFLOAT64},
  1623	}
  1624	
  1625	// this map is used only for 32-bit arch, and only includes the difference
  1626	// on 32-bit arch, don't use int64<->float conversion for uint32
  1627	var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{
  1628		twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, TUINT32},
  1629		twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, TUINT32},
  1630		twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, TUINT32},
  1631		twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, TUINT32},
  1632	}
  1633	
  1634	// uint64<->float conversions, only on machines that have intructions for that
  1635	var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{
  1636		twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, TUINT64},
  1637		twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, TUINT64},
  1638		twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, TUINT64},
  1639		twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, TUINT64},
  1640	}
  1641	
  1642	var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{
  1643		opAndTwoTypes{OLSH, TINT8, TUINT8}:   ssa.OpLsh8x8,
  1644		opAndTwoTypes{OLSH, TUINT8, TUINT8}:  ssa.OpLsh8x8,
  1645		opAndTwoTypes{OLSH, TINT8, TUINT16}:  ssa.OpLsh8x16,
  1646		opAndTwoTypes{OLSH, TUINT8, TUINT16}: ssa.OpLsh8x16,
  1647		opAndTwoTypes{OLSH, TINT8, TUINT32}:  ssa.OpLsh8x32,
  1648		opAndTwoTypes{OLSH, TUINT8, TUINT32}: ssa.OpLsh8x32,
  1649		opAndTwoTypes{OLSH, TINT8, TUINT64}:  ssa.OpLsh8x64,
  1650		opAndTwoTypes{OLSH, TUINT8, TUINT64}: ssa.OpLsh8x64,
  1651	
  1652		opAndTwoTypes{OLSH, TINT16, TUINT8}:   ssa.OpLsh16x8,
  1653		opAndTwoTypes{OLSH, TUINT16, TUINT8}:  ssa.OpLsh16x8,
  1654		opAndTwoTypes{OLSH, TINT16, TUINT16}:  ssa.OpLsh16x16,
  1655		opAndTwoTypes{OLSH, TUINT16, TUINT16}: ssa.OpLsh16x16,
  1656		opAndTwoTypes{OLSH, TINT16, TUINT32}:  ssa.OpLsh16x32,
  1657		opAndTwoTypes{OLSH, TUINT16, TUINT32}: ssa.OpLsh16x32,
  1658		opAndTwoTypes{OLSH, TINT16, TUINT64}:  ssa.OpLsh16x64,
  1659		opAndTwoTypes{OLSH, TUINT16, TUINT64}: ssa.OpLsh16x64,
  1660	
  1661		opAndTwoTypes{OLSH, TINT32, TUINT8}:   ssa.OpLsh32x8,
  1662		opAndTwoTypes{OLSH, TUINT32, TUINT8}:  ssa.OpLsh32x8,
  1663		opAndTwoTypes{OLSH, TINT32, TUINT16}:  ssa.OpLsh32x16,
  1664		opAndTwoTypes{OLSH, TUINT32, TUINT16}: ssa.OpLsh32x16,
  1665		opAndTwoTypes{OLSH, TINT32, TUINT32}:  ssa.OpLsh32x32,
  1666		opAndTwoTypes{OLSH, TUINT32, TUINT32}: ssa.OpLsh32x32,
  1667		opAndTwoTypes{OLSH, TINT32, TUINT64}:  ssa.OpLsh32x64,
  1668		opAndTwoTypes{OLSH, TUINT32, TUINT64}: ssa.OpLsh32x64,
  1669	
  1670		opAndTwoTypes{OLSH, TINT64, TUINT8}:   ssa.OpLsh64x8,
  1671		opAndTwoTypes{OLSH, TUINT64, TUINT8}:  ssa.OpLsh64x8,
  1672		opAndTwoTypes{OLSH, TINT64, TUINT16}:  ssa.OpLsh64x16,
  1673		opAndTwoTypes{OLSH, TUINT64, TUINT16}: ssa.OpLsh64x16,
  1674		opAndTwoTypes{OLSH, TINT64, TUINT32}:  ssa.OpLsh64x32,
  1675		opAndTwoTypes{OLSH, TUINT64, TUINT32}: ssa.OpLsh64x32,
  1676		opAndTwoTypes{OLSH, TINT64, TUINT64}:  ssa.OpLsh64x64,
  1677		opAndTwoTypes{OLSH, TUINT64, TUINT64}: ssa.OpLsh64x64,
  1678	
  1679		opAndTwoTypes{ORSH, TINT8, TUINT8}:   ssa.OpRsh8x8,
  1680		opAndTwoTypes{ORSH, TUINT8, TUINT8}:  ssa.OpRsh8Ux8,
  1681		opAndTwoTypes{ORSH, TINT8, TUINT16}:  ssa.OpRsh8x16,
  1682		opAndTwoTypes{ORSH, TUINT8, TUINT16}: ssa.OpRsh8Ux16,
  1683		opAndTwoTypes{ORSH, TINT8, TUINT32}:  ssa.OpRsh8x32,
  1684		opAndTwoTypes{ORSH, TUINT8, TUINT32}: ssa.OpRsh8Ux32,
  1685		opAndTwoTypes{ORSH, TINT8, TUINT64}:  ssa.OpRsh8x64,
  1686		opAndTwoTypes{ORSH, TUINT8, TUINT64}: ssa.OpRsh8Ux64,
  1687	
  1688		opAndTwoTypes{ORSH, TINT16, TUINT8}:   ssa.OpRsh16x8,
  1689		opAndTwoTypes{ORSH, TUINT16, TUINT8}:  ssa.OpRsh16Ux8,
  1690		opAndTwoTypes{ORSH, TINT16, TUINT16}:  ssa.OpRsh16x16,
  1691		opAndTwoTypes{ORSH, TUINT16, TUINT16}: ssa.OpRsh16Ux16,
  1692		opAndTwoTypes{ORSH, TINT16, TUINT32}:  ssa.OpRsh16x32,
  1693		opAndTwoTypes{ORSH, TUINT16, TUINT32}: ssa.OpRsh16Ux32,
  1694		opAndTwoTypes{ORSH, TINT16, TUINT64}:  ssa.OpRsh16x64,
  1695		opAndTwoTypes{ORSH, TUINT16, TUINT64}: ssa.OpRsh16Ux64,
  1696	
  1697		opAndTwoTypes{ORSH, TINT32, TUINT8}:   ssa.OpRsh32x8,
  1698		opAndTwoTypes{ORSH, TUINT32, TUINT8}:  ssa.OpRsh32Ux8,
  1699		opAndTwoTypes{ORSH, TINT32, TUINT16}:  ssa.OpRsh32x16,
  1700		opAndTwoTypes{ORSH, TUINT32, TUINT16}: ssa.OpRsh32Ux16,
  1701		opAndTwoTypes{ORSH, TINT32, TUINT32}:  ssa.OpRsh32x32,
  1702		opAndTwoTypes{ORSH, TUINT32, TUINT32}: ssa.OpRsh32Ux32,
  1703		opAndTwoTypes{ORSH, TINT32, TUINT64}:  ssa.OpRsh32x64,
  1704		opAndTwoTypes{ORSH, TUINT32, TUINT64}: ssa.OpRsh32Ux64,
  1705	
  1706		opAndTwoTypes{ORSH, TINT64, TUINT8}:   ssa.OpRsh64x8,
  1707		opAndTwoTypes{ORSH, TUINT64, TUINT8}:  ssa.OpRsh64Ux8,
  1708		opAndTwoTypes{ORSH, TINT64, TUINT16}:  ssa.OpRsh64x16,
  1709		opAndTwoTypes{ORSH, TUINT64, TUINT16}: ssa.OpRsh64Ux16,
  1710		opAndTwoTypes{ORSH, TINT64, TUINT32}:  ssa.OpRsh64x32,
  1711		opAndTwoTypes{ORSH, TUINT64, TUINT32}: ssa.OpRsh64Ux32,
  1712		opAndTwoTypes{ORSH, TINT64, TUINT64}:  ssa.OpRsh64x64,
  1713		opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64,
  1714	}
  1715	
  1716	func (s *state) ssaShiftOp(op Op, t *types.Type, u *types.Type) ssa.Op {
  1717		etype1 := s.concreteEtype(t)
  1718		etype2 := s.concreteEtype(u)
  1719		x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}]
  1720		if !ok {
  1721			s.Fatalf("unhandled shift op %v etype=%s/%s", op, etype1, etype2)
  1722		}
  1723		return x
  1724	}
  1725	
  1726	// expr converts the expression n to ssa, adds it to s and returns the ssa result.
  1727	func (s *state) expr(n *Node) *ssa.Value {
  1728		if !(n.Op == ONAME || n.Op == OLITERAL && n.Sym != nil) {
  1729			// ONAMEs and named OLITERALs have the line number
  1730			// of the decl, not the use. See issue 14742.
  1731			s.pushLine(n.Pos)
  1732			defer s.popLine()
  1733		}
  1734	
  1735		s.stmtList(n.Ninit)
  1736		switch n.Op {
  1737		case OBYTES2STRTMP:
  1738			slice := s.expr(n.Left)
  1739			ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
  1740			len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice)
  1741			return s.newValue2(ssa.OpStringMake, n.Type, ptr, len)
  1742		case OSTR2BYTESTMP:
  1743			str := s.expr(n.Left)
  1744			ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str)
  1745			len := s.newValue1(ssa.OpStringLen, types.Types[TINT], str)
  1746			return s.newValue3(ssa.OpSliceMake, n.Type, ptr, len, len)
  1747		case OCFUNC:
  1748			aux := n.Left.Sym.Linksym()
  1749			return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb)
  1750		case ONAME:
  1751			if n.Class() == PFUNC {
  1752				// "value" of a function is the address of the function's closure
  1753				sym := funcsym(n.Sym).Linksym()
  1754				return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), sym, s.sb)
  1755			}
  1756			if s.canSSA(n) {
  1757				return s.variable(n, n.Type)
  1758			}
  1759			addr := s.addr(n, false)
  1760			return s.load(n.Type, addr)
  1761		case OCLOSUREVAR:
  1762			addr := s.addr(n, false)
  1763			return s.load(n.Type, addr)
  1764		case OLITERAL:
  1765			switch u := n.Val().U.(type) {
  1766			case *Mpint:
  1767				i := u.Int64()
  1768				switch n.Type.Size() {
  1769				case 1:
  1770					return s.constInt8(n.Type, int8(i))
  1771				case 2:
  1772					return s.constInt16(n.Type, int16(i))
  1773				case 4:
  1774					return s.constInt32(n.Type, int32(i))
  1775				case 8:
  1776					return s.constInt64(n.Type, i)
  1777				default:
  1778					s.Fatalf("bad integer size %d", n.Type.Size())
  1779					return nil
  1780				}
  1781			case string:
  1782				if u == "" {
  1783					return s.constEmptyString(n.Type)
  1784				}
  1785				return s.entryNewValue0A(ssa.OpConstString, n.Type, u)
  1786			case bool:
  1787				return s.constBool(u)
  1788			case *NilVal:
  1789				t := n.Type
  1790				switch {
  1791				case t.IsSlice():
  1792					return s.constSlice(t)
  1793				case t.IsInterface():
  1794					return s.constInterface(t)
  1795				default:
  1796					return s.constNil(t)
  1797				}
  1798			case *Mpflt:
  1799				switch n.Type.Size() {
  1800				case 4:
  1801					return s.constFloat32(n.Type, u.Float32())
  1802				case 8:
  1803					return s.constFloat64(n.Type, u.Float64())
  1804				default:
  1805					s.Fatalf("bad float size %d", n.Type.Size())
  1806					return nil
  1807				}
  1808			case *Mpcplx:
  1809				r := &u.Real
  1810				i := &u.Imag
  1811				switch n.Type.Size() {
  1812				case 8:
  1813					pt := types.Types[TFLOAT32]
  1814					return s.newValue2(ssa.OpComplexMake, n.Type,
  1815						s.constFloat32(pt, r.Float32()),
  1816						s.constFloat32(pt, i.Float32()))
  1817				case 16:
  1818					pt := types.Types[TFLOAT64]
  1819					return s.newValue2(ssa.OpComplexMake, n.Type,
  1820						s.constFloat64(pt, r.Float64()),
  1821						s.constFloat64(pt, i.Float64()))
  1822				default:
  1823					s.Fatalf("bad float size %d", n.Type.Size())
  1824					return nil
  1825				}
  1826	
  1827			default:
  1828				s.Fatalf("unhandled OLITERAL %v", n.Val().Ctype())
  1829				return nil
  1830			}
  1831		case OCONVNOP:
  1832			to := n.Type
  1833			from := n.Left.Type
  1834	
  1835			// Assume everything will work out, so set up our return value.
  1836			// Anything interesting that happens from here is a fatal.
  1837			x := s.expr(n.Left)
  1838	
  1839			// Special case for not confusing GC and liveness.
  1840			// We don't want pointers accidentally classified
  1841			// as not-pointers or vice-versa because of copy
  1842			// elision.
  1843			if to.IsPtrShaped() != from.IsPtrShaped() {
  1844				return s.newValue2(ssa.OpConvert, to, x, s.mem())
  1845			}
  1846	
  1847			v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type
  1848	
  1849			// CONVNOP closure
  1850			if to.Etype == TFUNC && from.IsPtrShaped() {
  1851				return v
  1852			}
  1853	
  1854			// named <--> unnamed type or typed <--> untyped const
  1855			if from.Etype == to.Etype {
  1856				return v
  1857			}
  1858	
  1859			// unsafe.Pointer <--> *T
  1860			if to.Etype == TUNSAFEPTR && from.IsPtrShaped() || from.Etype == TUNSAFEPTR && to.IsPtrShaped() {
  1861				return v
  1862			}
  1863	
  1864			// map <--> *hmap
  1865			if to.Etype == TMAP && from.IsPtr() &&
  1866				to.MapType().Hmap == from.Elem() {
  1867				return v
  1868			}
  1869	
  1870			dowidth(from)
  1871			dowidth(to)
  1872			if from.Width != to.Width {
  1873				s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width)
  1874				return nil
  1875			}
  1876			if etypesign(from.Etype) != etypesign(to.Etype) {
  1877				s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Etype, to, to.Etype)
  1878				return nil
  1879			}
  1880	
  1881			if instrumenting {
  1882				// These appear to be fine, but they fail the
  1883				// integer constraint below, so okay them here.
  1884				// Sample non-integer conversion: map[string]string -> *uint8
  1885				return v
  1886			}
  1887	
  1888			if etypesign(from.Etype) == 0 {
  1889				s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to)
  1890				return nil
  1891			}
  1892	
  1893			// integer, same width, same sign
  1894			return v
  1895	
  1896		case OCONV:
  1897			x := s.expr(n.Left)
  1898			ft := n.Left.Type // from type
  1899			tt := n.Type      // to type
  1900			if ft.IsBoolean() && tt.IsKind(TUINT8) {
  1901				// Bool -> uint8 is generated internally when indexing into runtime.staticbyte.
  1902				return s.newValue1(ssa.OpCopy, n.Type, x)
  1903			}
  1904			if ft.IsInteger() && tt.IsInteger() {
  1905				var op ssa.Op
  1906				if tt.Size() == ft.Size() {
  1907					op = ssa.OpCopy
  1908				} else if tt.Size() < ft.Size() {
  1909					// truncation
  1910					switch 10*ft.Size() + tt.Size() {
  1911					case 21:
  1912						op = ssa.OpTrunc16to8
  1913					case 41:
  1914						op = ssa.OpTrunc32to8
  1915					case 42:
  1916						op = ssa.OpTrunc32to16
  1917					case 81:
  1918						op = ssa.OpTrunc64to8
  1919					case 82:
  1920						op = ssa.OpTrunc64to16
  1921					case 84:
  1922						op = ssa.OpTrunc64to32
  1923					default:
  1924						s.Fatalf("weird integer truncation %v -> %v", ft, tt)
  1925					}
  1926				} else if ft.IsSigned() {
  1927					// sign extension
  1928					switch 10*ft.Size() + tt.Size() {
  1929					case 12:
  1930						op = ssa.OpSignExt8to16
  1931					case 14:
  1932						op = ssa.OpSignExt8to32
  1933					case 18:
  1934						op = ssa.OpSignExt8to64
  1935					case 24:
  1936						op = ssa.OpSignExt16to32
  1937					case 28:
  1938						op = ssa.OpSignExt16to64
  1939					case 48:
  1940						op = ssa.OpSignExt32to64
  1941					default:
  1942						s.Fatalf("bad integer sign extension %v -> %v", ft, tt)
  1943					}
  1944				} else {
  1945					// zero extension
  1946					switch 10*ft.Size() + tt.Size() {
  1947					case 12:
  1948						op = ssa.OpZeroExt8to16
  1949					case 14:
  1950						op = ssa.OpZeroExt8to32
  1951					case 18:
  1952						op = ssa.OpZeroExt8to64
  1953					case 24:
  1954						op = ssa.OpZeroExt16to32
  1955					case 28:
  1956						op = ssa.OpZeroExt16to64
  1957					case 48:
  1958						op = ssa.OpZeroExt32to64
  1959					default:
  1960						s.Fatalf("weird integer sign extension %v -> %v", ft, tt)
  1961					}
  1962				}
  1963				return s.newValue1(op, n.Type, x)
  1964			}
  1965	
  1966			if ft.IsFloat() || tt.IsFloat() {
  1967				conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
  1968				if s.config.RegSize == 4 && thearch.LinkArch.Family != sys.MIPS && !s.softFloat {
  1969					if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
  1970						conv = conv1
  1971					}
  1972				}
  1973				if thearch.LinkArch.Family == sys.ARM64 || thearch.LinkArch.Family == sys.Wasm || s.softFloat {
  1974					if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
  1975						conv = conv1
  1976					}
  1977				}
  1978	
  1979				if thearch.LinkArch.Family == sys.MIPS && !s.softFloat {
  1980					if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() {
  1981						// tt is float32 or float64, and ft is also unsigned
  1982						if tt.Size() == 4 {
  1983							return s.uint32Tofloat32(n, x, ft, tt)
  1984						}
  1985						if tt.Size() == 8 {
  1986							return s.uint32Tofloat64(n, x, ft, tt)
  1987						}
  1988					} else if tt.Size() == 4 && tt.IsInteger() && !tt.IsSigned() {
  1989						// ft is float32 or float64, and tt is unsigned integer
  1990						if ft.Size() == 4 {
  1991							return s.float32ToUint32(n, x, ft, tt)
  1992						}
  1993						if ft.Size() == 8 {
  1994							return s.float64ToUint32(n, x, ft, tt)
  1995						}
  1996					}
  1997				}
  1998	
  1999				if !ok {
  2000					s.Fatalf("weird float conversion %v -> %v", ft, tt)
  2001				}
  2002				op1, op2, it := conv.op1, conv.op2, conv.intermediateType
  2003	
  2004				if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid {
  2005					// normal case, not tripping over unsigned 64
  2006					if op1 == ssa.OpCopy {
  2007						if op2 == ssa.OpCopy {
  2008							return x
  2009						}
  2010						return s.newValueOrSfCall1(op2, n.Type, x)
  2011					}
  2012					if op2 == ssa.OpCopy {
  2013						return s.newValueOrSfCall1(op1, n.Type, x)
  2014					}
  2015					return s.newValueOrSfCall1(op2, n.Type, s.newValueOrSfCall1(op1, types.Types[it], x))
  2016				}
  2017				// Tricky 64-bit unsigned cases.
  2018				if ft.IsInteger() {
  2019					// tt is float32 or float64, and ft is also unsigned
  2020					if tt.Size() == 4 {
  2021						return s.uint64Tofloat32(n, x, ft, tt)
  2022					}
  2023					if tt.Size() == 8 {
  2024						return s.uint64Tofloat64(n, x, ft, tt)
  2025					}
  2026					s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt)
  2027				}
  2028				// ft is float32 or float64, and tt is unsigned integer
  2029				if ft.Size() == 4 {
  2030					return s.float32ToUint64(n, x, ft, tt)
  2031				}
  2032				if ft.Size() == 8 {
  2033					return s.float64ToUint64(n, x, ft, tt)
  2034				}
  2035				s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt)
  2036				return nil
  2037			}
  2038	
  2039			if ft.IsComplex() && tt.IsComplex() {
  2040				var op ssa.Op
  2041				if ft.Size() == tt.Size() {
  2042					switch ft.Size() {
  2043					case 8:
  2044						op = ssa.OpRound32F
  2045					case 16:
  2046						op = ssa.OpRound64F
  2047					default:
  2048						s.Fatalf("weird complex conversion %v -> %v", ft, tt)
  2049					}
  2050				} else if ft.Size() == 8 && tt.Size() == 16 {
  2051					op = ssa.OpCvt32Fto64F
  2052				} else if ft.Size() == 16 && tt.Size() == 8 {
  2053					op = ssa.OpCvt64Fto32F
  2054				} else {
  2055					s.Fatalf("weird complex conversion %v -> %v", ft, tt)
  2056				}
  2057				ftp := floatForComplex(ft)
  2058				ttp := floatForComplex(tt)
  2059				return s.newValue2(ssa.OpComplexMake, tt,
  2060					s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)),
  2061					s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x)))
  2062			}
  2063	
  2064			s.Fatalf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype)
  2065			return nil
  2066	
  2067		case ODOTTYPE:
  2068			res, _ := s.dottype(n, false)
  2069			return res
  2070	
  2071		// binary ops
  2072		case OLT, OEQ, ONE, OLE, OGE, OGT:
  2073			a := s.expr(n.Left)
  2074			b := s.expr(n.Right)
  2075			if n.Left.Type.IsComplex() {
  2076				pt := floatForComplex(n.Left.Type)
  2077				op := s.ssaOp(OEQ, pt)
  2078				r := s.newValueOrSfCall2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
  2079				i := s.newValueOrSfCall2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
  2080				c := s.newValue2(ssa.OpAndB, types.Types[TBOOL], r, i)
  2081				switch n.Op {
  2082				case OEQ:
  2083					return c
  2084				case ONE:
  2085					return s.newValue1(ssa.OpNot, types.Types[TBOOL], c)
  2086				default:
  2087					s.Fatalf("ordered complex compare %v", n.Op)
  2088				}
  2089			}
  2090			if n.Left.Type.IsFloat() {
  2091				return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Left.Type), types.Types[TBOOL], a, b)
  2092			}
  2093			return s.newValue2(s.ssaOp(n.Op, n.Left.Type), types.Types[TBOOL], a, b)
  2094		case OMUL:
  2095			a := s.expr(n.Left)
  2096			b := s.expr(n.Right)
  2097			if n.Type.IsComplex() {
  2098				mulop := ssa.OpMul64F
  2099				addop := ssa.OpAdd64F
  2100				subop := ssa.OpSub64F
  2101				pt := floatForComplex(n.Type) // Could be Float32 or Float64
  2102				wt := types.Types[TFLOAT64]   // Compute in Float64 to minimize cancellation error
  2103	
  2104				areal := s.newValue1(ssa.OpComplexReal, pt, a)
  2105				breal := s.newValue1(ssa.OpComplexReal, pt, b)
  2106				aimag := s.newValue1(ssa.OpComplexImag, pt, a)
  2107				bimag := s.newValue1(ssa.OpComplexImag, pt, b)
  2108	
  2109				if pt != wt { // Widen for calculation
  2110					areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal)
  2111					breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal)
  2112					aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag)
  2113					bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag)
  2114				}
  2115	
  2116				xreal := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag))
  2117				ximag := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, bimag), s.newValueOrSfCall2(mulop, wt, aimag, breal))
  2118	
  2119				if pt != wt { // Narrow to store back
  2120					xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
  2121					ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
  2122				}
  2123	
  2124				return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
  2125			}
  2126	
  2127			if n.Type.IsFloat() {
  2128				return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
  2129			}
  2130	
  2131			return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
  2132	
  2133		case ODIV:
  2134			a := s.expr(n.Left)
  2135			b := s.expr(n.Right)
  2136			if n.Type.IsComplex() {
  2137				// TODO this is not executed because the front-end substitutes a runtime call.
  2138				// That probably ought to change; with modest optimization the widen/narrow
  2139				// conversions could all be elided in larger expression trees.
  2140				mulop := ssa.OpMul64F
  2141				addop := ssa.OpAdd64F
  2142				subop := ssa.OpSub64F
  2143				divop := ssa.OpDiv64F
  2144				pt := floatForComplex(n.Type) // Could be Float32 or Float64
  2145				wt := types.Types[TFLOAT64]   // Compute in Float64 to minimize cancellation error
  2146	
  2147				areal := s.newValue1(ssa.OpComplexReal, pt, a)
  2148				breal := s.newValue1(ssa.OpComplexReal, pt, b)
  2149				aimag := s.newValue1(ssa.OpComplexImag, pt, a)
  2150				bimag := s.newValue1(ssa.OpComplexImag, pt, b)
  2151	
  2152				if pt != wt { // Widen for calculation
  2153					areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal)
  2154					breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal)
  2155					aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag)
  2156					bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag)
  2157				}
  2158	
  2159				denom := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, breal, breal), s.newValueOrSfCall2(mulop, wt, bimag, bimag))
  2160				xreal := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag))
  2161				ximag := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, aimag, breal), s.newValueOrSfCall2(mulop, wt, areal, bimag))
  2162	
  2163				// TODO not sure if this is best done in wide precision or narrow
  2164				// Double-rounding might be an issue.
  2165				// Note that the pre-SSA implementation does the entire calculation
  2166				// in wide format, so wide is compatible.
  2167				xreal = s.newValueOrSfCall2(divop, wt, xreal, denom)
  2168				ximag = s.newValueOrSfCall2(divop, wt, ximag, denom)
  2169	
  2170				if pt != wt { // Narrow to store back
  2171					xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
  2172					ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
  2173				}
  2174				return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
  2175			}
  2176			if n.Type.IsFloat() {
  2177				return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
  2178			}
  2179			return s.intDivide(n, a, b)
  2180		case OMOD:
  2181			a := s.expr(n.Left)
  2182			b := s.expr(n.Right)
  2183			return s.intDivide(n, a, b)
  2184		case OADD, OSUB:
  2185			a := s.expr(n.Left)
  2186			b := s.expr(n.Right)
  2187			if n.Type.IsComplex() {
  2188				pt := floatForComplex(n.Type)
  2189				op := s.ssaOp(n.Op, pt)
  2190				return s.newValue2(ssa.OpComplexMake, n.Type,
  2191					s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)),
  2192					s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)))
  2193			}
  2194			if n.Type.IsFloat() {
  2195				return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
  2196			}
  2197			return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
  2198		case OAND, OOR, OXOR:
  2199			a := s.expr(n.Left)
  2200			b := s.expr(n.Right)
  2201			return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
  2202		case OLSH, ORSH:
  2203			a := s.expr(n.Left)
  2204			b := s.expr(n.Right)
  2205			bt := b.Type
  2206			if bt.IsSigned() {
  2207				cmp := s.newValue2(s.ssaOp(OGE, bt), types.Types[TBOOL], b, s.zeroVal(bt))
  2208				s.check(cmp, panicshift)
  2209				bt = bt.ToUnsigned()
  2210			}
  2211			return s.newValue2(s.ssaShiftOp(n.Op, n.Type, bt), a.Type, a, b)
  2212		case OANDAND, OOROR:
  2213			// To implement OANDAND (and OOROR), we introduce a
  2214			// new temporary variable to hold the result. The
  2215			// variable is associated with the OANDAND node in the
  2216			// s.vars table (normally variables are only
  2217			// associated with ONAME nodes). We convert
  2218			//     A && B
  2219			// to
  2220			//     var = A
  2221			//     if var {
  2222			//         var = B
  2223			//     }
  2224			// Using var in the subsequent block introduces the
  2225			// necessary phi variable.
  2226			el := s.expr(n.Left)
  2227			s.vars[n] = el
  2228	
  2229			b := s.endBlock()
  2230			b.Kind = ssa.BlockIf
  2231			b.SetControl(el)
  2232			// In theory, we should set b.Likely here based on context.
  2233			// However, gc only gives us likeliness hints
  2234			// in a single place, for plain OIF statements,
  2235			// and passing around context is finnicky, so don't bother for now.
  2236	
  2237			bRight := s.f.NewBlock(ssa.BlockPlain)
  2238			bResult := s.f.NewBlock(ssa.BlockPlain)
  2239			if n.Op == OANDAND {
  2240				b.AddEdgeTo(bRight)
  2241				b.AddEdgeTo(bResult)
  2242			} else if n.Op == OOROR {
  2243				b.AddEdgeTo(bResult)
  2244				b.AddEdgeTo(bRight)
  2245			}
  2246	
  2247			s.startBlock(bRight)
  2248			er := s.expr(n.Right)
  2249			s.vars[n] = er
  2250	
  2251			b = s.endBlock()
  2252			b.AddEdgeTo(bResult)
  2253	
  2254			s.startBlock(bResult)
  2255			return s.variable(n, types.Types[TBOOL])
  2256		case OCOMPLEX:
  2257			r := s.expr(n.Left)
  2258			i := s.expr(n.Right)
  2259			return s.newValue2(ssa.OpComplexMake, n.Type, r, i)
  2260	
  2261		// unary ops
  2262		case ONEG:
  2263			a := s.expr(n.Left)
  2264			if n.Type.IsComplex() {
  2265				tp := floatForComplex(n.Type)
  2266				negop := s.ssaOp(n.Op, tp)
  2267				return s.newValue2(ssa.OpComplexMake, n.Type,
  2268					s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)),
  2269					s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a)))
  2270			}
  2271			return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
  2272		case ONOT, OBITNOT:
  2273			a := s.expr(n.Left)
  2274			return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
  2275		case OIMAG, OREAL:
  2276			a := s.expr(n.Left)
  2277			return s.newValue1(s.ssaOp(n.Op, n.Left.Type), n.Type, a)
  2278		case OPLUS:
  2279			return s.expr(n.Left)
  2280	
  2281		case OADDR:
  2282			return s.addr(n.Left, n.Bounded())
  2283	
  2284		case ORESULT:
  2285			addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset)
  2286			return s.load(n.Type, addr)
  2287	
  2288		case ODEREF:
  2289			p := s.exprPtr(n.Left, false, n.Pos)
  2290			return s.load(n.Type, p)
  2291	
  2292		case ODOT:
  2293			if n.Left.Op == OSTRUCTLIT {
  2294				// All literals with nonzero fields have already been
  2295				// rewritten during walk. Any that remain are just T{}
  2296				// or equivalents. Use the zero value.
  2297				if !isZero(n.Left) {
  2298					Fatalf("literal with nonzero value in SSA: %v", n.Left)
  2299				}
  2300				return s.zeroVal(n.Type)
  2301			}
  2302			// If n is addressable and can't be represented in
  2303			// SSA, then load just the selected field. This
  2304			// prevents false memory dependencies in race/msan
  2305			// instrumentation.
  2306			if islvalue(n) && !s.canSSA(n) {
  2307				p := s.addr(n, false)
  2308				return s.load(n.Type, p)
  2309			}
  2310			v := s.expr(n.Left)
  2311			return s.newValue1I(ssa.OpStructSelect, n.Type, int64(fieldIdx(n)), v)
  2312	
  2313		case ODOTPTR:
  2314			p := s.exprPtr(n.Left, false, n.Pos)
  2315			p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type), n.Xoffset, p)
  2316			return s.load(n.Type, p)
  2317	
  2318		case OINDEX:
  2319			switch {
  2320			case n.Left.Type.IsString():
  2321				if n.Bounded() && Isconst(n.Left, CTSTR) && Isconst(n.Right, CTINT) {
  2322					// Replace "abc"[1] with 'b'.
  2323					// Delayed until now because "abc"[1] is not an ideal constant.
  2324					// See test/fixedbugs/issue11370.go.
  2325					return s.newValue0I(ssa.OpConst8, types.Types[TUINT8], int64(int8(n.Left.Val().U.(string)[n.Right.Int64()])))
  2326				}
  2327				a := s.expr(n.Left)
  2328				i := s.expr(n.Right)
  2329				len := s.newValue1(ssa.OpStringLen, types.Types[TINT], a)
  2330				i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
  2331				ptrtyp := s.f.Config.Types.BytePtr
  2332				ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
  2333				if Isconst(n.Right, CTINT) {
  2334					ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64(), ptr)
  2335				} else {
  2336					ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
  2337				}
  2338				return s.load(types.Types[TUINT8], ptr)
  2339			case n.Left.Type.IsSlice():
  2340				p := s.addr(n, false)
  2341				return s.load(n.Left.Type.Elem(), p)
  2342			case n.Left.Type.IsArray():
  2343				if canSSAType(n.Left.Type) {
  2344					// SSA can handle arrays of length at most 1.
  2345					bound := n.Left.Type.NumElem()
  2346					a := s.expr(n.Left)
  2347					i := s.expr(n.Right)
  2348					if bound == 0 {
  2349						// Bounds check will never succeed.  Might as well
  2350						// use constants for the bounds check.
  2351						z := s.constInt(types.Types[TINT], 0)
  2352						s.boundsCheck(z, z, ssa.BoundsIndex, false)
  2353						// The return value won't be live, return junk.
  2354						return s.newValue0(ssa.OpUnknown, n.Type)
  2355					}
  2356					len := s.constInt(types.Types[TINT], bound)
  2357					i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
  2358					return s.newValue1I(ssa.OpArraySelect, n.Type, 0, a)
  2359				}
  2360				p := s.addr(n, false)
  2361				return s.load(n.Left.Type.Elem(), p)
  2362			default:
  2363				s.Fatalf("bad type for index %v", n.Left.Type)
  2364				return nil
  2365			}
  2366	
  2367		case OLEN, OCAP:
  2368			switch {
  2369			case n.Left.Type.IsSlice():
  2370				op := ssa.OpSliceLen
  2371				if n.Op == OCAP {
  2372					op = ssa.OpSliceCap
  2373				}
  2374				return s.newValue1(op, types.Types[TINT], s.expr(n.Left))
  2375			case n.Left.Type.IsString(): // string; not reachable for OCAP
  2376				return s.newValue1(ssa.OpStringLen, types.Types[TINT], s.expr(n.Left))
  2377			case n.Left.Type.IsMap(), n.Left.Type.IsChan():
  2378				return s.referenceTypeBuiltin(n, s.expr(n.Left))
  2379			default: // array
  2380				return s.constInt(types.Types[TINT], n.Left.Type.NumElem())
  2381			}
  2382	
  2383		case OSPTR:
  2384			a := s.expr(n.Left)
  2385			if n.Left.Type.IsSlice() {
  2386				return s.newValue1(ssa.OpSlicePtr, n.Type, a)
  2387			} else {
  2388				return s.newValue1(ssa.OpStringPtr, n.Type, a)
  2389			}
  2390	
  2391		case OITAB:
  2392			a := s.expr(n.Left)
  2393			return s.newValue1(ssa.OpITab, n.Type, a)
  2394	
  2395		case OIDATA:
  2396			a := s.expr(n.Left)
  2397			return s.newValue1(ssa.OpIData, n.Type, a)
  2398	
  2399		case OEFACE:
  2400			tab := s.expr(n.Left)
  2401			data := s.expr(n.Right)
  2402			return s.newValue2(ssa.OpIMake, n.Type, tab, data)
  2403	
  2404		case OSLICEHEADER:
  2405			p := s.expr(n.Left)
  2406			l := s.expr(n.List.First())
  2407			c := s.expr(n.List.Second())
  2408			return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c)
  2409	
  2410		case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR:
  2411			v := s.expr(n.Left)
  2412			var i, j, k *ssa.Value
  2413			low, high, max := n.SliceBounds()
  2414			if low != nil {
  2415				i = s.expr(low)
  2416			}
  2417			if high != nil {
  2418				j = s.expr(high)
  2419			}
  2420			if max != nil {
  2421				k = s.expr(max)
  2422			}
  2423			p, l, c := s.slice(v, i, j, k, n.Bounded())
  2424			return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c)
  2425	
  2426		case OSLICESTR:
  2427			v := s.expr(n.Left)
  2428			var i, j *ssa.Value
  2429			low, high, _ := n.SliceBounds()
  2430			if low != nil {
  2431				i = s.expr(low)
  2432			}
  2433			if high != nil {
  2434				j = s.expr(high)
  2435			}
  2436			p, l, _ := s.slice(v, i, j, nil, n.Bounded())
  2437			return s.newValue2(ssa.OpStringMake, n.Type, p, l)
  2438	
  2439		case OCALLFUNC:
  2440			if isIntrinsicCall(n) {
  2441				return s.intrinsicCall(n)
  2442			}
  2443			fallthrough
  2444	
  2445		case OCALLINTER, OCALLMETH:
  2446			a := s.call(n, callNormal)
  2447			return s.load(n.Type, a)
  2448	
  2449		case OGETG:
  2450			return s.newValue1(ssa.OpGetG, n.Type, s.mem())
  2451	
  2452		case OAPPEND:
  2453			return s.append(n, false)
  2454	
  2455		case OSTRUCTLIT, OARRAYLIT:
  2456			// All literals with nonzero fields have already been
  2457			// rewritten during walk. Any that remain are just T{}
  2458			// or equivalents. Use the zero value.
  2459			if !isZero(n) {
  2460				Fatalf("literal with nonzero value in SSA: %v", n)
  2461			}
  2462			return s.zeroVal(n.Type)
  2463	
  2464		case ONEWOBJ:
  2465			if n.Type.Elem().Size() == 0 {
  2466				return s.newValue1A(ssa.OpAddr, n.Type, zerobaseSym, s.sb)
  2467			}
  2468			typ := s.expr(n.Left)
  2469			vv := s.rtcall(newobject, true, []*types.Type{n.Type}, typ)
  2470			return vv[0]
  2471	
  2472		default:
  2473			s.Fatalf("unhandled expr %v", n.Op)
  2474			return nil
  2475		}
  2476	}
  2477	
  2478	// append converts an OAPPEND node to SSA.
  2479	// If inplace is false, it converts the OAPPEND expression n to an ssa.Value,
  2480	// adds it to s, and returns the Value.
  2481	// If inplace is true, it writes the result of the OAPPEND expression n
  2482	// back to the slice being appended to, and returns nil.
  2483	// inplace MUST be set to false if the slice can be SSA'd.
  2484	func (s *state) append(n *Node, inplace bool) *ssa.Value {
  2485		// If inplace is false, process as expression "append(s, e1, e2, e3)":
  2486		//
  2487		// ptr, len, cap := s
  2488		// newlen := len + 3
  2489		// if newlen > cap {
  2490		//     ptr, len, cap = growslice(s, newlen)
  2491		//     newlen = len + 3 // recalculate to avoid a spill
  2492		// }
  2493		// // with write barriers, if needed:
  2494		// *(ptr+len) = e1
  2495		// *(ptr+len+1) = e2
  2496		// *(ptr+len+2) = e3
  2497		// return makeslice(ptr, newlen, cap)
  2498		//
  2499		//
  2500		// If inplace is true, process as statement "s = append(s, e1, e2, e3)":
  2501		//
  2502		// a := &s
  2503		// ptr, len, cap := s
  2504		// newlen := len + 3
  2505		// if uint(newlen) > uint(cap) {
  2506		//    newptr, len, newcap = growslice(ptr, len, cap, newlen)
  2507		//    vardef(a)       // if necessary, advise liveness we are writing a new a
  2508		//    *a.cap = newcap // write before ptr to avoid a spill
  2509		//    *a.ptr = newptr // with write barrier
  2510		// }
  2511		// newlen = len + 3 // recalculate to avoid a spill
  2512		// *a.len = newlen
  2513		// // with write barriers, if needed:
  2514		// *(ptr+len) = e1
  2515		// *(ptr+len+1) = e2
  2516		// *(ptr+len+2) = e3
  2517	
  2518		et := n.Type.Elem()
  2519		pt := types.NewPtr(et)
  2520	
  2521		// Evaluate slice
  2522		sn := n.List.First() // the slice node is the first in the list
  2523	
  2524		var slice, addr *ssa.Value
  2525		if inplace {
  2526			addr = s.addr(sn, false)
  2527			slice = s.load(n.Type, addr)
  2528		} else {
  2529			slice = s.expr(sn)
  2530		}
  2531	
  2532		// Allocate new blocks
  2533		grow := s.f.NewBlock(ssa.BlockPlain)
  2534		assign := s.f.NewBlock(ssa.BlockPlain)
  2535	
  2536		// Decide if we need to grow
  2537		nargs := int64(n.List.Len() - 1)
  2538		p := s.newValue1(ssa.OpSlicePtr, pt, slice)
  2539		l := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice)
  2540		c := s.newValue1(ssa.OpSliceCap, types.Types[TINT], slice)
  2541		nl := s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs))
  2542	
  2543		cmp := s.newValue2(s.ssaOp(OGT, types.Types[TUINT]), types.Types[TBOOL], nl, c)
  2544		s.vars[&ptrVar] = p
  2545	
  2546		if !inplace {
  2547			s.vars[&newlenVar] = nl
  2548			s.vars[&capVar] = c
  2549		} else {
  2550			s.vars[&lenVar] = l
  2551		}
  2552	
  2553		b := s.endBlock()
  2554		b.Kind = ssa.BlockIf
  2555		b.Likely = ssa.BranchUnlikely
  2556		b.SetControl(cmp)
  2557		b.AddEdgeTo(grow)
  2558		b.AddEdgeTo(assign)
  2559	
  2560		// Call growslice
  2561		s.startBlock(grow)
  2562		taddr := s.expr(n.Left)
  2563		r := s.rtcall(growslice, true, []*types.Type{pt, types.Types[TINT], types.Types[TINT]}, taddr, p, l, c, nl)
  2564	
  2565		if inplace {
  2566			if sn.Op == ONAME && sn.Class() != PEXTERN {
  2567				// Tell liveness we're about to build a new slice
  2568				s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem())
  2569			}
  2570			capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, int64(array_cap), addr)
  2571			s.store(types.Types[TINT], capaddr, r[2])
  2572			s.store(pt, addr, r[0])
  2573			// load the value we just stored to avoid having to spill it
  2574			s.vars[&ptrVar] = s.load(pt, addr)
  2575			s.vars[&lenVar] = r[1] // avoid a spill in the fast path
  2576		} else {
  2577			s.vars[&ptrVar] = r[0]
  2578			s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], r[1], s.constInt(types.Types[TINT], nargs))
  2579			s.vars[&capVar] = r[2]
  2580		}
  2581	
  2582		b = s.endBlock()
  2583		b.AddEdgeTo(assign)
  2584	
  2585		// assign new elements to slots
  2586		s.startBlock(assign)
  2587	
  2588		if inplace {
  2589			l = s.variable(&lenVar, types.Types[TINT]) // generates phi for len
  2590			nl = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs))
  2591			lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, int64(array_nel), addr)
  2592			s.store(types.Types[TINT], lenaddr, nl)
  2593		}
  2594	
  2595		// Evaluate args
  2596		type argRec struct {
  2597			// if store is true, we're appending the value v.  If false, we're appending the
  2598			// value at *v.
  2599			v     *ssa.Value
  2600			store bool
  2601		}
  2602		args := make([]argRec, 0, nargs)
  2603		for _, n := range n.List.Slice()[1:] {
  2604			if canSSAType(n.Type) {
  2605				args = append(args, argRec{v: s.expr(n), store: true})
  2606			} else {
  2607				v := s.addr(n, false)
  2608				args = append(args, argRec{v: v})
  2609			}
  2610		}
  2611	
  2612		p = s.variable(&ptrVar, pt) // generates phi for ptr
  2613		if !inplace {
  2614			nl = s.variable(&newlenVar, types.Types[TINT]) // generates phi for nl
  2615			c = s.variable(&capVar, types.Types[TINT])     // generates phi for cap
  2616		}
  2617		p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l)
  2618		for i, arg := range args {
  2619			addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[TINT], int64(i)))
  2620			if arg.store {
  2621				s.storeType(et, addr, arg.v, 0, true)
  2622			} else {
  2623				s.move(et, addr, arg.v)
  2624			}
  2625		}
  2626	
  2627		delete(s.vars, &ptrVar)
  2628		if inplace {
  2629			delete(s.vars, &lenVar)
  2630			return nil
  2631		}
  2632		delete(s.vars, &newlenVar)
  2633		delete(s.vars, &capVar)
  2634		// make result
  2635		return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c)
  2636	}
  2637	
  2638	// condBranch evaluates the boolean expression cond and branches to yes
  2639	// if cond is true and no if cond is false.
  2640	// This function is intended to handle && and || better than just calling
  2641	// s.expr(cond) and branching on the result.
  2642	func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) {
  2643		switch cond.Op {
  2644		case OANDAND:
  2645			mid := s.f.NewBlock(ssa.BlockPlain)
  2646			s.stmtList(cond.Ninit)
  2647			s.condBranch(cond.Left, mid, no, max8(likely, 0))
  2648			s.startBlock(mid)
  2649			s.condBranch(cond.Right, yes, no, likely)
  2650			return
  2651			// Note: if likely==1, then both recursive calls pass 1.
  2652			// If likely==-1, then we don't have enough information to decide
  2653			// whether the first branch is likely or not. So we pass 0 for
  2654			// the likeliness of the first branch.
  2655			// TODO: have the frontend give us branch prediction hints for
  2656			// OANDAND and OOROR nodes (if it ever has such info).
  2657		case OOROR:
  2658			mid := s.f.NewBlock(ssa.BlockPlain)
  2659			s.stmtList(cond.Ninit)
  2660			s.condBranch(cond.Left, yes, mid, min8(likely, 0))
  2661			s.startBlock(mid)
  2662			s.condBranch(cond.Right, yes, no, likely)
  2663			return
  2664			// Note: if likely==-1, then both recursive calls pass -1.
  2665			// If likely==1, then we don't have enough info to decide
  2666			// the likelihood of the first branch.
  2667		case ONOT:
  2668			s.stmtList(cond.Ninit)
  2669			s.condBranch(cond.Left, no, yes, -likely)
  2670			return
  2671		}
  2672		c := s.expr(cond)
  2673		b := s.endBlock()
  2674		b.Kind = ssa.BlockIf
  2675		b.SetControl(c)
  2676		b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness
  2677		b.AddEdgeTo(yes)
  2678		b.AddEdgeTo(no)
  2679	}
  2680	
  2681	type skipMask uint8
  2682	
  2683	const (
  2684		skipPtr skipMask = 1 << iota
  2685		skipLen
  2686		skipCap
  2687	)
  2688	
  2689	// assign does left = right.
  2690	// Right has already been evaluated to ssa, left has not.
  2691	// If deref is true, then we do left = *right instead (and right has already been nil-checked).
  2692	// If deref is true and right == nil, just do left = 0.
  2693	// skip indicates assignments (at the top level) that can be avoided.
  2694	func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask) {
  2695		if left.Op == ONAME && left.isBlank() {
  2696			return
  2697		}
  2698		t := left.Type
  2699		dowidth(t)
  2700		if s.canSSA(left) {
  2701			if deref {
  2702				s.Fatalf("can SSA LHS %v but not RHS %s", left, right)
  2703			}
  2704			if left.Op == ODOT {
  2705				// We're assigning to a field of an ssa-able value.
  2706				// We need to build a new structure with the new value for the
  2707				// field we're assigning and the old values for the other fields.
  2708				// For instance:
  2709				//   type T struct {a, b, c int}
  2710				//   var T x
  2711				//   x.b = 5
  2712				// For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c}
  2713	
  2714				// Grab information about the structure type.
  2715				t := left.Left.Type
  2716				nf := t.NumFields()
  2717				idx := fieldIdx(left)
  2718	
  2719				// Grab old value of structure.
  2720				old := s.expr(left.Left)
  2721	
  2722				// Make new structure.
  2723				new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t)
  2724	
  2725				// Add fields as args.
  2726				for i := 0; i < nf; i++ {
  2727					if i == idx {
  2728						new.AddArg(right)
  2729					} else {
  2730						new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old))
  2731					}
  2732				}
  2733	
  2734				// Recursively assign the new value we've made to the base of the dot op.
  2735				s.assign(left.Left, new, false, 0)
  2736				// TODO: do we need to update named values here?
  2737				return
  2738			}
  2739			if left.Op == OINDEX && left.Left.Type.IsArray() {
  2740				s.pushLine(left.Pos)
  2741				defer s.popLine()
  2742				// We're assigning to an element of an ssa-able array.
  2743				// a[i] = v
  2744				t := left.Left.Type
  2745				n := t.NumElem()
  2746	
  2747				i := s.expr(left.Right) // index
  2748				if n == 0 {
  2749					// The bounds check must fail.  Might as well
  2750					// ignore the actual index and just use zeros.
  2751					z := s.constInt(types.Types[TINT], 0)
  2752					s.boundsCheck(z, z, ssa.BoundsIndex, false)
  2753					return
  2754				}
  2755				if n != 1 {
  2756					s.Fatalf("assigning to non-1-length array")
  2757				}
  2758				// Rewrite to a = [1]{v}
  2759				len := s.constInt(types.Types[TINT], 1)
  2760				i = s.boundsCheck(i, len, ssa.BoundsIndex, false)
  2761				v := s.newValue1(ssa.OpArrayMake1, t, right)
  2762				s.assign(left.Left, v, false, 0)
  2763				return
  2764			}
  2765			// Update variable assignment.
  2766			s.vars[left] = right
  2767			s.addNamedValue(left, right)
  2768			return
  2769		}
  2770		// Left is not ssa-able. Compute its address.
  2771		if left.Op == ONAME && left.Class() != PEXTERN && skip == 0 {
  2772			s.vars[&memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, left, s.mem(), !left.IsAutoTmp())
  2773		}
  2774		addr := s.addr(left, false)
  2775		if isReflectHeaderDataField(left) {
  2776			// Package unsafe's documentation says storing pointers into
  2777			// reflect.SliceHeader and reflect.StringHeader's Data fields
  2778			// is valid, even though they have type uintptr (#19168).
  2779			// Mark it pointer type to signal the writebarrier pass to
  2780			// insert a write barrier.
  2781			t = types.Types[TUNSAFEPTR]
  2782		}
  2783		if deref {
  2784			// Treat as a mem->mem move.
  2785			if right == nil {
  2786				s.zero(t, addr)
  2787			} else {
  2788				s.move(t, addr, right)
  2789			}
  2790			return
  2791		}
  2792		// Treat as a store.
  2793		s.storeType(t, addr, right, skip, !left.IsAutoTmp())
  2794	}
  2795	
  2796	// zeroVal returns the zero value for type t.
  2797	func (s *state) zeroVal(t *types.Type) *ssa.Value {
  2798		switch {
  2799		case t.IsInteger():
  2800			switch t.Size() {
  2801			case 1:
  2802				return s.constInt8(t, 0)
  2803			case 2:
  2804				return s.constInt16(t, 0)
  2805			case 4:
  2806				return s.constInt32(t, 0)
  2807			case 8:
  2808				return s.constInt64(t, 0)
  2809			default:
  2810				s.Fatalf("bad sized integer type %v", t)
  2811			}
  2812		case t.IsFloat():
  2813			switch t.Size() {
  2814			case 4:
  2815				return s.constFloat32(t, 0)
  2816			case 8:
  2817				return s.constFloat64(t, 0)
  2818			default:
  2819				s.Fatalf("bad sized float type %v", t)
  2820			}
  2821		case t.IsComplex():
  2822			switch t.Size() {
  2823			case 8:
  2824				z := s.constFloat32(types.Types[TFLOAT32], 0)
  2825				return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
  2826			case 16:
  2827				z := s.constFloat64(types.Types[TFLOAT64], 0)
  2828				return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
  2829			default:
  2830				s.Fatalf("bad sized complex type %v", t)
  2831			}
  2832	
  2833		case t.IsString():
  2834			return s.constEmptyString(t)
  2835		case t.IsPtrShaped():
  2836			return s.constNil(t)
  2837		case t.IsBoolean():
  2838			return s.constBool(false)
  2839		case t.IsInterface():
  2840			return s.constInterface(t)
  2841		case t.IsSlice():
  2842			return s.constSlice(t)
  2843		case t.IsStruct():
  2844			n := t.NumFields()
  2845			v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t)
  2846			for i := 0; i < n; i++ {
  2847				v.AddArg(s.zeroVal(t.FieldType(i)))
  2848			}
  2849			return v
  2850		case t.IsArray():
  2851			switch t.NumElem() {
  2852			case 0:
  2853				return s.entryNewValue0(ssa.OpArrayMake0, t)
  2854			case 1:
  2855				return s.entryNewValue1(ssa.OpArrayMake1, t, s.zeroVal(t.Elem()))
  2856			}
  2857		}
  2858		s.Fatalf("zero for type %v not implemented", t)
  2859		return nil
  2860	}
  2861	
  2862	type callKind int8
  2863	
  2864	const (
  2865		callNormal callKind = iota
  2866		callDefer
  2867		callDeferStack
  2868		callGo
  2869	)
  2870	
  2871	type sfRtCallDef struct {
  2872		rtfn  *obj.LSym
  2873		rtype types.EType
  2874	}
  2875	
  2876	var softFloatOps map[ssa.Op]sfRtCallDef
  2877	
  2878	func softfloatInit() {
  2879		// Some of these operations get transformed by sfcall.
  2880		softFloatOps = map[ssa.Op]sfRtCallDef{
  2881			ssa.OpAdd32F: sfRtCallDef{sysfunc("fadd32"), TFLOAT32},
  2882			ssa.OpAdd64F: sfRtCallDef{sysfunc("fadd64"), TFLOAT64},
  2883			ssa.OpSub32F: sfRtCallDef{sysfunc("fadd32"), TFLOAT32},
  2884			ssa.OpSub64F: sfRtCallDef{sysfunc("fadd64"), TFLOAT64},
  2885			ssa.OpMul32F: sfRtCallDef{sysfunc("fmul32"), TFLOAT32},
  2886			ssa.OpMul64F: sfRtCallDef{sysfunc("fmul64"), TFLOAT64},
  2887			ssa.OpDiv32F: sfRtCallDef{sysfunc("fdiv32"), TFLOAT32},
  2888			ssa.OpDiv64F: sfRtCallDef{sysfunc("fdiv64"), TFLOAT64},
  2889	
  2890			ssa.OpEq64F:      sfRtCallDef{sysfunc("feq64"), TBOOL},
  2891			ssa.OpEq32F:      sfRtCallDef{sysfunc("feq32"), TBOOL},
  2892			ssa.OpNeq64F:     sfRtCallDef{sysfunc("feq64"), TBOOL},
  2893			ssa.OpNeq32F:     sfRtCallDef{sysfunc("feq32"), TBOOL},
  2894			ssa.OpLess64F:    sfRtCallDef{sysfunc("fgt64"), TBOOL},
  2895			ssa.OpLess32F:    sfRtCallDef{sysfunc("fgt32"), TBOOL},
  2896			ssa.OpGreater64F: sfRtCallDef{sysfunc("fgt64"), TBOOL},
  2897			ssa.OpGreater32F: sfRtCallDef{sysfunc("fgt32"), TBOOL},
  2898			ssa.OpLeq64F:     sfRtCallDef{sysfunc("fge64"), TBOOL},
  2899			ssa.OpLeq32F:     sfRtCallDef{sysfunc("fge32"), TBOOL},
  2900			ssa.OpGeq64F:     sfRtCallDef{sysfunc("fge64"), TBOOL},
  2901			ssa.OpGeq32F:     sfRtCallDef{sysfunc("fge32"), TBOOL},
  2902	
  2903			ssa.OpCvt32to32F:  sfRtCallDef{sysfunc("fint32to32"), TFLOAT32},
  2904			ssa.OpCvt32Fto32:  sfRtCallDef{sysfunc("f32toint32"), TINT32},
  2905			ssa.OpCvt64to32F:  sfRtCallDef{sysfunc("fint64to32"), TFLOAT32},
  2906			ssa.OpCvt32Fto64:  sfRtCallDef{sysfunc("f32toint64"), TINT64},
  2907			ssa.OpCvt64Uto32F: sfRtCallDef{sysfunc("fuint64to32"), TFLOAT32},
  2908			ssa.OpCvt32Fto64U: sfRtCallDef{sysfunc("f32touint64"), TUINT64},
  2909			ssa.OpCvt32to64F:  sfRtCallDef{sysfunc("fint32to64"), TFLOAT64},
  2910			ssa.OpCvt64Fto32:  sfRtCallDef{sysfunc("f64toint32"), TINT32},
  2911			ssa.OpCvt64to64F:  sfRtCallDef{sysfunc("fint64to64"), TFLOAT64},
  2912			ssa.OpCvt64Fto64:  sfRtCallDef{sysfunc("f64toint64"), TINT64},
  2913			ssa.OpCvt64Uto64F: sfRtCallDef{sysfunc("fuint64to64"), TFLOAT64},
  2914			ssa.OpCvt64Fto64U: sfRtCallDef{sysfunc("f64touint64"), TUINT64},
  2915			ssa.OpCvt32Fto64F: sfRtCallDef{sysfunc("f32to64"), TFLOAT64},
  2916			ssa.OpCvt64Fto32F: sfRtCallDef{sysfunc("f64to32"), TFLOAT32},
  2917		}
  2918	}
  2919	
  2920	// TODO: do not emit sfcall if operation can be optimized to constant in later
  2921	// opt phase
  2922	func (s *state) sfcall(op ssa.Op, args ...*ssa.Value) (*ssa.Value, bool) {
  2923		if callDef, ok := softFloatOps[op]; ok {
  2924			switch op {
  2925			case ssa.OpLess32F,
  2926				ssa.OpLess64F,
  2927				ssa.OpLeq32F,
  2928				ssa.OpLeq64F:
  2929				args[0], args[1] = args[1], args[0]
  2930			case ssa.OpSub32F,
  2931				ssa.OpSub64F:
  2932				args[1] = s.newValue1(s.ssaOp(ONEG, types.Types[callDef.rtype]), args[1].Type, args[1])
  2933			}
  2934	
  2935			result := s.rtcall(callDef.rtfn, true, []*types.Type{types.Types[callDef.rtype]}, args...)[0]
  2936			if op == ssa.OpNeq32F || op == ssa.OpNeq64F {
  2937				result = s.newValue1(ssa.OpNot, result.Type, result)
  2938			}
  2939			return result, true
  2940		}
  2941		return nil, false
  2942	}
  2943	
  2944	var intrinsics map[intrinsicKey]intrinsicBuilder
  2945	
  2946	// An intrinsicBuilder converts a call node n into an ssa value that
  2947	// implements that call as an intrinsic. args is a list of arguments to the func.
  2948	type intrinsicBuilder func(s *state, n *Node, args []*ssa.Value) *ssa.Value
  2949	
  2950	type intrinsicKey struct {
  2951		arch *sys.Arch
  2952		pkg  string
  2953		fn   string
  2954	}
  2955	
  2956	func init() {
  2957		intrinsics = map[intrinsicKey]intrinsicBuilder{}
  2958	
  2959		var all []*sys.Arch
  2960		var p4 []*sys.Arch
  2961		var p8 []*sys.Arch
  2962		var lwatomics []*sys.Arch
  2963		for _, a := range sys.Archs {
  2964			all = append(all, a)
  2965			if a.PtrSize == 4 {
  2966				p4 = append(p4, a)
  2967			} else {
  2968				p8 = append(p8, a)
  2969			}
  2970			if a.Family != sys.PPC64 {
  2971				lwatomics = append(lwatomics, a)
  2972			}
  2973		}
  2974	
  2975		// add adds the intrinsic b for pkg.fn for the given list of architectures.
  2976		add := func(pkg, fn string, b intrinsicBuilder, archs ...*sys.Arch) {
  2977			for _, a := range archs {
  2978				intrinsics[intrinsicKey{a, pkg, fn}] = b
  2979			}
  2980		}
  2981		// addF does the same as add but operates on architecture families.
  2982		addF := func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily) {
  2983			m := 0
  2984			for _, f := range archFamilies {
  2985				if f >= 32 {
  2986					panic("too many architecture families")
  2987				}
  2988				m |= 1 << uint(f)
  2989			}
  2990			for _, a := range all {
  2991				if m>>uint(a.Family)&1 != 0 {
  2992					intrinsics[intrinsicKey{a, pkg, fn}] = b
  2993				}
  2994			}
  2995		}
  2996		// alias defines pkg.fn = pkg2.fn2 for all architectures in archs for which pkg2.fn2 exists.
  2997		alias := func(pkg, fn, pkg2, fn2 string, archs ...*sys.Arch) {
  2998			for _, a := range archs {
  2999				if b, ok := intrinsics[intrinsicKey{a, pkg2, fn2}]; ok {
  3000					intrinsics[intrinsicKey{a, pkg, fn}] = b
  3001				}
  3002			}
  3003		}
  3004	
  3005		/******** runtime ********/
  3006		if !instrumenting {
  3007			add("runtime", "slicebytetostringtmp",
  3008				func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3009					// Compiler frontend optimizations emit OBYTES2STRTMP nodes
  3010					// for the backend instead of slicebytetostringtmp calls
  3011					// when not instrumenting.
  3012					slice := args[0]
  3013					ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
  3014					len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice)
  3015					return s.newValue2(ssa.OpStringMake, n.Type, ptr, len)
  3016				},
  3017				all...)
  3018		}
  3019		addF("runtime/internal/math", "MulUintptr",
  3020			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3021				if s.config.PtrSize == 4 {
  3022					return s.newValue2(ssa.OpMul32uover, types.NewTuple(types.Types[TUINT], types.Types[TUINT]), args[0], args[1])
  3023				}
  3024				return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[TUINT], types.Types[TUINT]), args[0], args[1])
  3025			},
  3026			sys.AMD64, sys.I386)
  3027		add("runtime", "KeepAlive",
  3028			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3029				data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])
  3030				s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem())
  3031				return nil
  3032			},
  3033			all...)
  3034		add("runtime", "getclosureptr",
  3035			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3036				return s.newValue0(ssa.OpGetClosurePtr, s.f.Config.Types.Uintptr)
  3037			},
  3038			all...)
  3039	
  3040		add("runtime", "getcallerpc",
  3041			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3042				return s.newValue0(ssa.OpGetCallerPC, s.f.Config.Types.Uintptr)
  3043			},
  3044			all...)
  3045	
  3046		add("runtime", "getcallersp",
  3047			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3048				return s.newValue0(ssa.OpGetCallerSP, s.f.Config.Types.Uintptr)
  3049			},
  3050			all...)
  3051	
  3052		/******** runtime/internal/sys ********/
  3053		addF("runtime/internal/sys", "Ctz32",
  3054			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3055				return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0])
  3056			},
  3057			sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
  3058		addF("runtime/internal/sys", "Ctz64",
  3059			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3060				return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0])
  3061			},
  3062			sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
  3063		addF("runtime/internal/sys", "Bswap32",
  3064			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3065				return s.newValue1(ssa.OpBswap32, types.Types[TUINT32], args[0])
  3066			},
  3067			sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
  3068		addF("runtime/internal/sys", "Bswap64",
  3069			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3070				return s.newValue1(ssa.OpBswap64, types.Types[TUINT64], args[0])
  3071			},
  3072			sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
  3073	
  3074		/******** runtime/internal/atomic ********/
  3075		addF("runtime/internal/atomic", "Load",
  3076			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3077				v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], s.mem())
  3078				s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
  3079				return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
  3080			},
  3081			sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64)
  3082		addF("runtime/internal/atomic", "Load8",
  3083			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3084				v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[TUINT8], types.TypeMem), args[0], s.mem())
  3085				s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
  3086				return s.newValue1(ssa.OpSelect0, types.Types[TUINT8], v)
  3087			},
  3088			sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64)
  3089		addF("runtime/internal/atomic", "Load64",
  3090			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3091				v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem())
  3092				s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
  3093				return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
  3094			},
  3095			sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64)
  3096		addF("runtime/internal/atomic", "LoadAcq",
  3097			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3098				v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], s.mem())
  3099				s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
  3100				return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
  3101			},
  3102			sys.PPC64, sys.S390X)
  3103		addF("runtime/internal/atomic", "Loadp",
  3104			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3105				v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem())
  3106				s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
  3107				return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v)
  3108			},
  3109			sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64)
  3110	
  3111		addF("runtime/internal/atomic", "Store",
  3112			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3113				s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem())
  3114				return nil
  3115			},
  3116			sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64)
  3117		addF("runtime/internal/atomic", "Store64",
  3118			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3119				s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem())
  3120				return nil
  3121			},
  3122			sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64)
  3123		addF("runtime/internal/atomic", "StorepNoWB",
  3124			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3125				s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem())
  3126				return nil
  3127			},
  3128			sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64)
  3129		addF("runtime/internal/atomic", "StoreRel",
  3130			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3131				s.vars[&memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem())
  3132				return nil
  3133			},
  3134			sys.PPC64, sys.S390X)
  3135	
  3136		addF("runtime/internal/atomic", "Xchg",
  3137			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3138				v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem())
  3139				s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
  3140				return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
  3141			},
  3142			sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64)
  3143		addF("runtime/internal/atomic", "Xchg64",
  3144			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3145				v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem())
  3146				s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
  3147				return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
  3148			},
  3149			sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64)
  3150	
  3151		addF("runtime/internal/atomic", "Xadd",
  3152			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3153				v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem())
  3154				s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
  3155				return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
  3156			},
  3157			sys.AMD64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64)
  3158		addF("runtime/internal/atomic", "Xadd64",
  3159			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3160				v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem())
  3161				s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
  3162				return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
  3163			},
  3164			sys.AMD64, sys.S390X, sys.MIPS64, sys.PPC64)
  3165	
  3166		makeXaddARM64 := func(op0 ssa.Op, op1 ssa.Op, ty types.EType) func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3167			return func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3168				// Target Atomic feature is identified by dynamic detection
  3169				addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), arm64HasATOMICS, s.sb)
  3170				v := s.load(types.Types[TBOOL], addr)
  3171				b := s.endBlock()
  3172				b.Kind = ssa.BlockIf
  3173				b.SetControl(v)
  3174				bTrue := s.f.NewBlock(ssa.BlockPlain)
  3175				bFalse := s.f.NewBlock(ssa.BlockPlain)
  3176				bEnd := s.f.NewBlock(ssa.BlockPlain)
  3177				b.AddEdgeTo(bTrue)
  3178				b.AddEdgeTo(bFalse)
  3179				b.Likely = ssa.BranchUnlikely // most machines don't have Atomics nowadays
  3180	
  3181				// We have atomic instructions - use it directly.
  3182				s.startBlock(bTrue)
  3183				v0 := s.newValue3(op1, types.NewTuple(types.Types[ty], types.TypeMem), args[0], args[1], s.mem())
  3184				s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v0)
  3185				s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[ty], v0)
  3186				s.endBlock().AddEdgeTo(bEnd)
  3187	
  3188				// Use original instruction sequence.
  3189				s.startBlock(bFalse)
  3190				v1 := s.newValue3(op0, types.NewTuple(types.Types[ty], types.TypeMem), args[0], args[1], s.mem())
  3191				s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v1)
  3192				s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[ty], v1)
  3193				s.endBlock().AddEdgeTo(bEnd)
  3194	
  3195				// Merge results.
  3196				s.startBlock(bEnd)
  3197				return s.variable(n, types.Types[ty])
  3198			}
  3199		}
  3200	
  3201		addF("runtime/internal/atomic", "Xadd",
  3202			makeXaddARM64(ssa.OpAtomicAdd32, ssa.OpAtomicAdd32Variant, TUINT32),
  3203			sys.ARM64)
  3204		addF("runtime/internal/atomic", "Xadd64",
  3205			makeXaddARM64(ssa.OpAtomicAdd64, ssa.OpAtomicAdd64Variant, TUINT64),
  3206			sys.ARM64)
  3207	
  3208		addF("runtime/internal/atomic", "Cas",
  3209			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3210				v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
  3211				s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
  3212				return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
  3213			},
  3214			sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64)
  3215		addF("runtime/internal/atomic", "Cas64",
  3216			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3217				v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
  3218				s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
  3219				return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
  3220			},
  3221			sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64)
  3222		addF("runtime/internal/atomic", "CasRel",
  3223			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3224				v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
  3225				s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
  3226				return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
  3227			},
  3228			sys.PPC64)
  3229	
  3230		addF("runtime/internal/atomic", "And8",
  3231			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3232				s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem())
  3233				return nil
  3234			},
  3235			sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64)
  3236		addF("runtime/internal/atomic", "Or8",
  3237			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3238				s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem())
  3239				return nil
  3240			},
  3241			sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64)
  3242	
  3243		alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...)
  3244		alias("runtime/internal/atomic", "Xaddint64", "runtime/internal/atomic", "Xadd64", all...)
  3245		alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load", p4...)
  3246		alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load64", p8...)
  3247		alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...)
  3248		alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...)
  3249		alias("runtime/internal/atomic", "LoadAcq", "runtime/internal/atomic", "Load", lwatomics...)
  3250		alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...)
  3251		alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...)
  3252		alias("runtime/internal/atomic", "StoreRel", "runtime/internal/atomic", "Store", lwatomics...)
  3253		alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...)
  3254		alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...)
  3255		alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...)
  3256		alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd64", p8...)
  3257		alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas", p4...)
  3258		alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas64", p8...)
  3259		alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas", p4...)
  3260		alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas64", p8...)
  3261		alias("runtime/internal/atomic", "CasRel", "runtime/internal/atomic", "Cas", lwatomics...)
  3262	
  3263		alias("runtime/internal/sys", "Ctz8", "math/bits", "TrailingZeros8", all...)
  3264	
  3265		/******** math ********/
  3266		addF("math", "Sqrt",
  3267			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3268				return s.newValue1(ssa.OpSqrt, types.Types[TFLOAT64], args[0])
  3269			},
  3270			sys.I386, sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.S390X, sys.Wasm)
  3271		addF("math", "Trunc",
  3272			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3273				return s.newValue1(ssa.OpTrunc, types.Types[TFLOAT64], args[0])
  3274			},
  3275			sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
  3276		addF("math", "Ceil",
  3277			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3278				return s.newValue1(ssa.OpCeil, types.Types[TFLOAT64], args[0])
  3279			},
  3280			sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
  3281		addF("math", "Floor",
  3282			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3283				return s.newValue1(ssa.OpFloor, types.Types[TFLOAT64], args[0])
  3284			},
  3285			sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
  3286		addF("math", "Round",
  3287			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3288				return s.newValue1(ssa.OpRound, types.Types[TFLOAT64], args[0])
  3289			},
  3290			sys.ARM64, sys.PPC64, sys.S390X)
  3291		addF("math", "RoundToEven",
  3292			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3293				return s.newValue1(ssa.OpRoundToEven, types.Types[TFLOAT64], args[0])
  3294			},
  3295			sys.ARM64, sys.S390X, sys.Wasm)
  3296		addF("math", "Abs",
  3297			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3298				return s.newValue1(ssa.OpAbs, types.Types[TFLOAT64], args[0])
  3299			},
  3300			sys.ARM64, sys.PPC64, sys.Wasm)
  3301		addF("math", "Copysign",
  3302			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3303				return s.newValue2(ssa.OpCopysign, types.Types[TFLOAT64], args[0], args[1])
  3304			},
  3305			sys.PPC64, sys.Wasm)
  3306	
  3307		makeRoundAMD64 := func(op ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3308			return func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3309				addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), x86HasSSE41, s.sb)
  3310				v := s.load(types.Types[TBOOL], addr)
  3311				b := s.endBlock()
  3312				b.Kind = ssa.BlockIf
  3313				b.SetControl(v)
  3314				bTrue := s.f.NewBlock(ssa.BlockPlain)
  3315				bFalse := s.f.NewBlock(ssa.BlockPlain)
  3316				bEnd := s.f.NewBlock(ssa.BlockPlain)
  3317				b.AddEdgeTo(bTrue)
  3318				b.AddEdgeTo(bFalse)
  3319				b.Likely = ssa.BranchLikely // most machines have sse4.1 nowadays
  3320	
  3321				// We have the intrinsic - use it directly.
  3322				s.startBlock(bTrue)
  3323				s.vars[n] = s.newValue1(op, types.Types[TFLOAT64], args[0])
  3324				s.endBlock().AddEdgeTo(bEnd)
  3325	
  3326				// Call the pure Go version.
  3327				s.startBlock(bFalse)
  3328				a := s.call(n, callNormal)
  3329				s.vars[n] = s.load(types.Types[TFLOAT64], a)
  3330				s.endBlock().AddEdgeTo(bEnd)
  3331	
  3332				// Merge results.
  3333				s.startBlock(bEnd)
  3334				return s.variable(n, types.Types[TFLOAT64])
  3335			}
  3336		}
  3337		addF("math", "RoundToEven",
  3338			makeRoundAMD64(ssa.OpRoundToEven),
  3339			sys.AMD64)
  3340		addF("math", "Floor",
  3341			makeRoundAMD64(ssa.OpFloor),
  3342			sys.AMD64)
  3343		addF("math", "Ceil",
  3344			makeRoundAMD64(ssa.OpCeil),
  3345			sys.AMD64)
  3346		addF("math", "Trunc",
  3347			makeRoundAMD64(ssa.OpTrunc),
  3348			sys.AMD64)
  3349	
  3350		/******** math/bits ********/
  3351		addF("math/bits", "TrailingZeros64",
  3352			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3353				return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0])
  3354			},
  3355			sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
  3356		addF("math/bits", "TrailingZeros32",
  3357			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3358				return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0])
  3359			},
  3360			sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
  3361		addF("math/bits", "TrailingZeros16",
  3362			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3363				x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0])
  3364				c := s.constInt32(types.Types[TUINT32], 1<<16)
  3365				y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c)
  3366				return s.newValue1(ssa.OpCtz32, types.Types[TINT], y)
  3367			},
  3368			sys.MIPS)
  3369		addF("math/bits", "TrailingZeros16",
  3370			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3371				return s.newValue1(ssa.OpCtz16, types.Types[TINT], args[0])
  3372			},
  3373			sys.AMD64, sys.ARM, sys.ARM64, sys.Wasm)
  3374		addF("math/bits", "TrailingZeros16",
  3375			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3376				x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0])
  3377				c := s.constInt64(types.Types[TUINT64], 1<<16)
  3378				y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c)
  3379				return s.newValue1(ssa.OpCtz64, types.Types[TINT], y)
  3380			},
  3381			sys.S390X, sys.PPC64)
  3382		addF("math/bits", "TrailingZeros8",
  3383			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3384				x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0])
  3385				c := s.constInt32(types.Types[TUINT32], 1<<8)
  3386				y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c)
  3387				return s.newValue1(ssa.OpCtz32, types.Types[TINT], y)
  3388			},
  3389			sys.MIPS)
  3390		addF("math/bits", "TrailingZeros8",
  3391			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3392				return s.newValue1(ssa.OpCtz8, types.Types[TINT], args[0])
  3393			},
  3394			sys.AMD64, sys.ARM, sys.ARM64, sys.Wasm)
  3395		addF("math/bits", "TrailingZeros8",
  3396			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3397				x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0])
  3398				c := s.constInt64(types.Types[TUINT64], 1<<8)
  3399				y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c)
  3400				return s.newValue1(ssa.OpCtz64, types.Types[TINT], y)
  3401			},
  3402			sys.S390X)
  3403		alias("math/bits", "ReverseBytes64", "runtime/internal/sys", "Bswap64", all...)
  3404		alias("math/bits", "ReverseBytes32", "runtime/internal/sys", "Bswap32", all...)
  3405		// ReverseBytes inlines correctly, no need to intrinsify it.
  3406		// ReverseBytes16 lowers to a rotate, no need for anything special here.
  3407		addF("math/bits", "Len64",
  3408			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3409				return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0])
  3410			},
  3411			sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
  3412		addF("math/bits", "Len32",
  3413			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3414				return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0])
  3415			},
  3416			sys.AMD64, sys.ARM64)
  3417		addF("math/bits", "Len32",
  3418			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3419				if s.config.PtrSize == 4 {
  3420					return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0])
  3421				}
  3422				x := s.newValue1(ssa.OpZeroExt32to64, types.Types[TUINT64], args[0])
  3423				return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
  3424			},
  3425			sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
  3426		addF("math/bits", "Len16",
  3427			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3428				if s.config.PtrSize == 4 {
  3429					x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0])
  3430					return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x)
  3431				}
  3432				x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0])
  3433				return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
  3434			},
  3435			sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
  3436		addF("math/bits", "Len16",
  3437			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3438				return s.newValue1(ssa.OpBitLen16, types.Types[TINT], args[0])
  3439			},
  3440			sys.AMD64)
  3441		addF("math/bits", "Len8",
  3442			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3443				if s.config.PtrSize == 4 {
  3444					x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0])
  3445					return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x)
  3446				}
  3447				x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0])
  3448				return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
  3449			},
  3450			sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
  3451		addF("math/bits", "Len8",
  3452			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3453				return s.newValue1(ssa.OpBitLen8, types.Types[TINT], args[0])
  3454			},
  3455			sys.AMD64)
  3456		addF("math/bits", "Len",
  3457			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3458				if s.config.PtrSize == 4 {
  3459					return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0])
  3460				}
  3461				return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0])
  3462			},
  3463			sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
  3464		// LeadingZeros is handled because it trivially calls Len.
  3465		addF("math/bits", "Reverse64",
  3466			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3467				return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0])
  3468			},
  3469			sys.ARM64)
  3470		addF("math/bits", "Reverse32",
  3471			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3472				return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0])
  3473			},
  3474			sys.ARM64)
  3475		addF("math/bits", "Reverse16",
  3476			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3477				return s.newValue1(ssa.OpBitRev16, types.Types[TINT], args[0])
  3478			},
  3479			sys.ARM64)
  3480		addF("math/bits", "Reverse8",
  3481			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3482				return s.newValue1(ssa.OpBitRev8, types.Types[TINT], args[0])
  3483			},
  3484			sys.ARM64)
  3485		addF("math/bits", "Reverse",
  3486			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3487				if s.config.PtrSize == 4 {
  3488					return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0])
  3489				}
  3490				return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0])
  3491			},
  3492			sys.ARM64)
  3493		addF("math/bits", "RotateLeft8",
  3494			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3495				return s.newValue2(ssa.OpRotateLeft8, types.Types[TUINT8], args[0], args[1])
  3496			},
  3497			sys.AMD64)
  3498		addF("math/bits", "RotateLeft16",
  3499			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3500				return s.newValue2(ssa.OpRotateLeft16, types.Types[TUINT16], args[0], args[1])
  3501			},
  3502			sys.AMD64)
  3503		addF("math/bits", "RotateLeft32",
  3504			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3505				return s.newValue2(ssa.OpRotateLeft32, types.Types[TUINT32], args[0], args[1])
  3506			},
  3507			sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
  3508		addF("math/bits", "RotateLeft64",
  3509			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3510				return s.newValue2(ssa.OpRotateLeft64, types.Types[TUINT64], args[0], args[1])
  3511			},
  3512			sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
  3513		alias("math/bits", "RotateLeft", "math/bits", "RotateLeft64", p8...)
  3514	
  3515		makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3516			return func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3517				addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), x86HasPOPCNT, s.sb)
  3518				v := s.load(types.Types[TBOOL], addr)
  3519				b := s.endBlock()
  3520				b.Kind = ssa.BlockIf
  3521				b.SetControl(v)
  3522				bTrue := s.f.NewBlock(ssa.BlockPlain)
  3523				bFalse := s.f.NewBlock(ssa.BlockPlain)
  3524				bEnd := s.f.NewBlock(ssa.BlockPlain)
  3525				b.AddEdgeTo(bTrue)
  3526				b.AddEdgeTo(bFalse)
  3527				b.Likely = ssa.BranchLikely // most machines have popcnt nowadays
  3528	
  3529				// We have the intrinsic - use it directly.
  3530				s.startBlock(bTrue)
  3531				op := op64
  3532				if s.config.PtrSize == 4 {
  3533					op = op32
  3534				}
  3535				s.vars[n] = s.newValue1(op, types.Types[TINT], args[0])
  3536				s.endBlock().AddEdgeTo(bEnd)
  3537	
  3538				// Call the pure Go version.
  3539				s.startBlock(bFalse)
  3540				a := s.call(n, callNormal)
  3541				s.vars[n] = s.load(types.Types[TINT], a)
  3542				s.endBlock().AddEdgeTo(bEnd)
  3543	
  3544				// Merge results.
  3545				s.startBlock(bEnd)
  3546				return s.variable(n, types.Types[TINT])
  3547			}
  3548		}
  3549		addF("math/bits", "OnesCount64",
  3550			makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount64),
  3551			sys.AMD64)
  3552		addF("math/bits", "OnesCount64",
  3553			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3554				return s.newValue1(ssa.OpPopCount64, types.Types[TINT], args[0])
  3555			},
  3556			sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
  3557		addF("math/bits", "OnesCount32",
  3558			makeOnesCountAMD64(ssa.OpPopCount32, ssa.OpPopCount32),
  3559			sys.AMD64)
  3560		addF("math/bits", "OnesCount32",
  3561			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3562				return s.newValue1(ssa.OpPopCount32, types.Types[TINT], args[0])
  3563			},
  3564			sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
  3565		addF("math/bits", "OnesCount16",
  3566			makeOnesCountAMD64(ssa.OpPopCount16, ssa.OpPopCount16),
  3567			sys.AMD64)
  3568		addF("math/bits", "OnesCount16",
  3569			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3570				return s.newValue1(ssa.OpPopCount16, types.Types[TINT], args[0])
  3571			},
  3572			sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
  3573		addF("math/bits", "OnesCount8",
  3574			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3575				return s.newValue1(ssa.OpPopCount8, types.Types[TINT], args[0])
  3576			},
  3577			sys.S390X, sys.PPC64, sys.Wasm)
  3578		addF("math/bits", "OnesCount",
  3579			makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount32),
  3580			sys.AMD64)
  3581		addF("math/bits", "Mul64",
  3582			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3583				return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1])
  3584			},
  3585			sys.AMD64, sys.ARM64, sys.PPC64)
  3586		alias("math/bits", "Mul", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64)
  3587		addF("math/bits", "Add64",
  3588			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3589				return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2])
  3590			},
  3591			sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X)
  3592		alias("math/bits", "Add", "math/bits", "Add64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchS390X)
  3593		addF("math/bits", "Sub64",
  3594			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3595				return s.newValue3(ssa.OpSub64borrow, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2])
  3596			},
  3597			sys.AMD64, sys.ARM64, sys.S390X)
  3598		alias("math/bits", "Sub", "math/bits", "Sub64", sys.ArchAMD64, sys.ArchARM64, sys.ArchS390X)
  3599		addF("math/bits", "Div64",
  3600			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3601				// check for divide-by-zero/overflow and panic with appropriate message
  3602				cmpZero := s.newValue2(s.ssaOp(ONE, types.Types[TUINT64]), types.Types[TBOOL], args[2], s.zeroVal(types.Types[TUINT64]))
  3603				s.check(cmpZero, panicdivide)
  3604				cmpOverflow := s.newValue2(s.ssaOp(OLT, types.Types[TUINT64]), types.Types[TBOOL], args[0], args[2])
  3605				s.check(cmpOverflow, panicoverflow)
  3606				return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2])
  3607			},
  3608			sys.AMD64)
  3609		alias("math/bits", "Div", "math/bits", "Div64", sys.ArchAMD64)
  3610	
  3611		/******** sync/atomic ********/
  3612	
  3613		// Note: these are disabled by flag_race in findIntrinsic below.
  3614		alias("sync/atomic", "LoadInt32", "runtime/internal/atomic", "Load", all...)
  3615		alias("sync/atomic", "LoadInt64", "runtime/internal/atomic", "Load64", all...)
  3616		alias("sync/atomic", "LoadPointer", "runtime/internal/atomic", "Loadp", all...)
  3617		alias("sync/atomic", "LoadUint32", "runtime/internal/atomic", "Load", all...)
  3618		alias("sync/atomic", "LoadUint64", "runtime/internal/atomic", "Load64", all...)
  3619		alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load", p4...)
  3620		alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load64", p8...)
  3621	
  3622		alias("sync/atomic", "StoreInt32", "runtime/internal/atomic", "Store", all...)
  3623		alias("sync/atomic", "StoreInt64", "runtime/internal/atomic", "Store64", all...)
  3624		// Note: not StorePointer, that needs a write barrier.  Same below for {CompareAnd}Swap.
  3625		alias("sync/atomic", "StoreUint32", "runtime/internal/atomic", "Store", all...)
  3626		alias("sync/atomic", "StoreUint64", "runtime/internal/atomic", "Store64", all...)
  3627		alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store", p4...)
  3628		alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store64", p8...)
  3629	
  3630		alias("sync/atomic", "SwapInt32", "runtime/internal/atomic", "Xchg", all...)
  3631		alias("sync/atomic", "SwapInt64", "runtime/internal/atomic", "Xchg64", all...)
  3632		alias("sync/atomic", "SwapUint32", "runtime/internal/atomic", "Xchg", all...)
  3633		alias("sync/atomic", "SwapUint64", "runtime/internal/atomic", "Xchg64", all...)
  3634		alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg", p4...)
  3635		alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg64", p8...)
  3636	
  3637		alias("sync/atomic", "CompareAndSwapInt32", "runtime/internal/atomic", "Cas", all...)
  3638		alias("sync/atomic", "CompareAndSwapInt64", "runtime/internal/atomic", "Cas64", all...)
  3639		alias("sync/atomic", "CompareAndSwapUint32", "runtime/internal/atomic", "Cas", all...)
  3640		alias("sync/atomic", "CompareAndSwapUint64", "runtime/internal/atomic", "Cas64", all...)
  3641		alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas", p4...)
  3642		alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas64", p8...)
  3643	
  3644		alias("sync/atomic", "AddInt32", "runtime/internal/atomic", "Xadd", all...)
  3645		alias("sync/atomic", "AddInt64", "runtime/internal/atomic", "Xadd64", all...)
  3646		alias("sync/atomic", "AddUint32", "runtime/internal/atomic", "Xadd", all...)
  3647		alias("sync/atomic", "AddUint64", "runtime/internal/atomic", "Xadd64", all...)
  3648		alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd", p4...)
  3649		alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd64", p8...)
  3650	
  3651		/******** math/big ********/
  3652		add("math/big", "mulWW",
  3653			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3654				return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1])
  3655			},
  3656			sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64LE, sys.ArchPPC64)
  3657		add("math/big", "divWW",
  3658			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
  3659				return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2])
  3660			},
  3661			sys.ArchAMD64)
  3662	}
  3663	
  3664	// findIntrinsic returns a function which builds the SSA equivalent of the
  3665	// function identified by the symbol sym.  If sym is not an intrinsic call, returns nil.
  3666	func findIntrinsic(sym *types.Sym) intrinsicBuilder {
  3667		if sym == nil || sym.Pkg == nil {
  3668			return nil
  3669		}
  3670		pkg := sym.Pkg.Path
  3671		if sym.Pkg == localpkg {
  3672			pkg = myimportpath
  3673		}
  3674		if flag_race && pkg == "sync/atomic" {
  3675			// The race detector needs to be able to intercept these calls.
  3676			// We can't intrinsify them.
  3677			return nil
  3678		}
  3679		// Skip intrinsifying math functions (which may contain hard-float
  3680		// instructions) when soft-float
  3681		if thearch.SoftFloat && pkg == "math" {
  3682			return nil
  3683		}
  3684	
  3685		fn := sym.Name
  3686		if ssa.IntrinsicsDisable {
  3687			if pkg == "runtime" && (fn == "getcallerpc" || fn == "getcallersp" || fn == "getclosureptr") {
  3688				// These runtime functions don't have definitions, must be intrinsics.
  3689			} else {
  3690				return nil
  3691			}
  3692		}
  3693		return intrinsics[intrinsicKey{thearch.LinkArch.Arch, pkg, fn}]
  3694	}
  3695	
  3696	func isIntrinsicCall(n *Node) bool {
  3697		if n == nil || n.Left == nil {
  3698			return false
  3699		}
  3700		return findIntrinsic(n.Left.Sym) != nil
  3701	}
  3702	
  3703	// intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation.
  3704	func (s *state) intrinsicCall(n *Node) *ssa.Value {
  3705		v := findIntrinsic(n.Left.Sym)(s, n, s.intrinsicArgs(n))
  3706		if ssa.IntrinsicsDebug > 0 {
  3707			x := v
  3708			if x == nil {
  3709				x = s.mem()
  3710			}
  3711			if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 {
  3712				x = x.Args[0]
  3713			}
  3714			Warnl(n.Pos, "intrinsic substitution for %v with %s", n.Left.Sym.Name, x.LongString())
  3715		}
  3716		return v
  3717	}
  3718	
  3719	// intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them.
  3720	func (s *state) intrinsicArgs(n *Node) []*ssa.Value {
  3721		// Construct map of temps; see comments in s.call about the structure of n.
  3722		temps := map[*Node]*ssa.Value{}
  3723		for _, a := range n.List.Slice() {
  3724			if a.Op != OAS {
  3725				s.Fatalf("non-assignment as a temp function argument %v", a.Op)
  3726			}
  3727			l, r := a.Left, a.Right
  3728			if l.Op != ONAME {
  3729				s.Fatalf("non-ONAME temp function argument %v", a.Op)
  3730			}
  3731			// Evaluate and store to "temporary".
  3732			// Walk ensures these temporaries are dead outside of n.
  3733			temps[l] = s.expr(r)
  3734		}
  3735		args := make([]*ssa.Value, n.Rlist.Len())
  3736		for i, n := range n.Rlist.Slice() {
  3737			// Store a value to an argument slot.
  3738			if x, ok := temps[n]; ok {
  3739				// This is a previously computed temporary.
  3740				args[i] = x
  3741				continue
  3742			}
  3743			// This is an explicit value; evaluate it.
  3744			args[i] = s.expr(n)
  3745		}
  3746		return args
  3747	}
  3748	
  3749	// Calls the function n using the specified call type.
  3750	// Returns the address of the return value (or nil if none).
  3751	func (s *state) call(n *Node, k callKind) *ssa.Value {
  3752		var sym *types.Sym     // target symbol (if static)
  3753		var closure *ssa.Value // ptr to closure to run (if dynamic)
  3754		var codeptr *ssa.Value // ptr to target code (if dynamic)
  3755		var rcvr *ssa.Value    // receiver to set
  3756		fn := n.Left
  3757		switch n.Op {
  3758		case OCALLFUNC:
  3759			if k == callNormal && fn.Op == ONAME && fn.Class() == PFUNC {
  3760				sym = fn.Sym
  3761				break
  3762			}
  3763			closure = s.expr(fn)
  3764			if thearch.LinkArch.Family == sys.Wasm || objabi.GOOS == "aix" && k != callGo {
  3765				// On AIX, the closure needs to be verified as fn can be nil, except if it's a call go. This needs to be handled by the runtime to have the "go of nil func value" error.
  3766				// TODO(neelance): On other architectures this should be eliminated by the optimization steps
  3767				s.nilCheck(closure)
  3768			}
  3769		case OCALLMETH:
  3770			if fn.Op != ODOTMETH {
  3771				Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn)
  3772			}
  3773			if k == callNormal {
  3774				sym = fn.Sym
  3775				break
  3776			}
  3777			// Make a name n2 for the function.
  3778			// fn.Sym might be sync.(*Mutex).Unlock.
  3779			// Make a PFUNC node out of that, then evaluate it.
  3780			// We get back an SSA value representing &sync.(*Mutex).Unlock·f.
  3781			// We can then pass that to defer or go.
  3782			n2 := newnamel(fn.Pos, fn.Sym)
  3783			n2.Name.Curfn = s.curfn
  3784			n2.SetClass(PFUNC)
  3785			// n2.Sym already existed, so it's already marked as a function.
  3786			n2.Pos = fn.Pos
  3787			n2.Type = types.Types[TUINT8] // dummy type for a static closure. Could use runtime.funcval if we had it.
  3788			closure = s.expr(n2)
  3789			// Note: receiver is already present in n.Rlist, so we don't
  3790			// want to set it here.
  3791		case OCALLINTER:
  3792			if fn.Op != ODOTINTER {
  3793				Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op)
  3794			}
  3795			i := s.expr(fn.Left)
  3796			itab := s.newValue1(ssa.OpITab, types.Types[TUINTPTR], i)
  3797			s.nilCheck(itab)
  3798			itabidx := fn.Xoffset + 2*int64(Widthptr) + 8 // offset of fun field in runtime.itab
  3799			itab = s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab)
  3800			if k == callNormal {
  3801				codeptr = s.load(types.Types[TUINTPTR], itab)
  3802			} else {
  3803				closure = itab
  3804			}
  3805			rcvr = s.newValue1(ssa.OpIData, types.Types[TUINTPTR], i)
  3806		}
  3807		dowidth(fn.Type)
  3808		stksize := fn.Type.ArgWidth() // includes receiver, args, and results
  3809	
  3810		// Run all assignments of temps.
  3811		// The temps are introduced to avoid overwriting argument
  3812		// slots when arguments themselves require function calls.
  3813		s.stmtList(n.List)
  3814	
  3815		var call *ssa.Value
  3816		if k == callDeferStack {
  3817			// Make a defer struct d on the stack.
  3818			t := deferstruct(stksize)
  3819			d := tempAt(n.Pos, s.curfn, t)
  3820	
  3821			s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, d, s.mem())
  3822			addr := s.addr(d, false)
  3823	
  3824			// Must match reflect.go:deferstruct and src/runtime/runtime2.go:_defer.
  3825			// 0: siz
  3826			s.store(types.Types[TUINT32],
  3827				s.newValue1I(ssa.OpOffPtr, types.Types[TUINT32].PtrTo(), t.FieldOff(0), addr),
  3828				s.constInt32(types.Types[TUINT32], int32(stksize)))
  3829			// 1: started, set in deferprocStack
  3830			// 2: heap, set in deferprocStack
  3831			// 3: sp, set in deferprocStack
  3832			// 4: pc, set in deferprocStack
  3833			// 5: fn
  3834			s.store(closure.Type,
  3835				s.newValue1I(ssa.OpOffPtr, closure.Type.PtrTo(), t.FieldOff(5), addr),
  3836				closure)
  3837			// 6: panic, set in deferprocStack
  3838			// 7: link, set in deferprocStack
  3839	
  3840			// Then, store all the arguments of the defer call.
  3841			ft := fn.Type
  3842			off := t.FieldOff(8)
  3843			args := n.Rlist.Slice()
  3844	
  3845			// Set receiver (for interface calls). Always a pointer.
  3846			if rcvr != nil {
  3847				p := s.newValue1I(ssa.OpOffPtr, ft.Recv().Type.PtrTo(), off, addr)
  3848				s.store(types.Types[TUINTPTR], p, rcvr)
  3849			}
  3850			// Set receiver (for method calls).
  3851			if n.Op == OCALLMETH {
  3852				f := ft.Recv()
  3853				s.storeArgWithBase(args[0], f.Type, addr, off+f.Offset)
  3854				args = args[1:]
  3855			}
  3856			// Set other args.
  3857			for _, f := range ft.Params().Fields().Slice() {
  3858				s.storeArgWithBase(args[0], f.Type, addr, off+f.Offset)
  3859				args = args[1:]
  3860			}
  3861	
  3862			// Call runtime.deferprocStack with pointer to _defer record.
  3863			arg0 := s.constOffPtrSP(types.Types[TUINTPTR], Ctxt.FixedFrameSize())
  3864			s.store(types.Types[TUINTPTR], arg0, addr)
  3865			call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, deferprocStack, s.mem())
  3866			if stksize < int64(Widthptr) {
  3867				// We need room for both the call to deferprocStack and the call to
  3868				// the deferred function.
  3869				stksize = int64(Widthptr)
  3870			}
  3871			call.AuxInt = stksize
  3872		} else {
  3873			// Store arguments to stack, including defer/go arguments and receiver for method calls.
  3874			// These are written in SP-offset order.
  3875			argStart := Ctxt.FixedFrameSize()
  3876			// Defer/go args.
  3877			if k != callNormal {
  3878				// Write argsize and closure (args to newproc/deferproc).
  3879				argsize := s.constInt32(types.Types[TUINT32], int32(stksize))
  3880				addr := s.constOffPtrSP(s.f.Config.Types.UInt32Ptr, argStart)
  3881				s.store(types.Types[TUINT32], addr, argsize)
  3882				addr = s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(Widthptr))
  3883				s.store(types.Types[TUINTPTR], addr, closure)
  3884				stksize += 2 * int64(Widthptr)
  3885				argStart += 2 * int64(Widthptr)
  3886			}
  3887	
  3888			// Set receiver (for interface calls).
  3889			if rcvr != nil {
  3890				addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart)
  3891				s.store(types.Types[TUINTPTR], addr, rcvr)
  3892			}
  3893	
  3894			// Write args.
  3895			t := n.Left.Type
  3896			args := n.Rlist.Slice()
  3897			if n.Op == OCALLMETH {
  3898				f := t.Recv()
  3899				s.storeArg(args[0], f.Type, argStart+f.Offset)
  3900				args = args[1:]
  3901			}
  3902			for i, n := range args {
  3903				f := t.Params().Field(i)
  3904				s.storeArg(n, f.Type, argStart+f.Offset)
  3905			}
  3906	
  3907			// call target
  3908			switch {
  3909			case k == callDefer:
  3910				call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, deferproc, s.mem())
  3911			case k == callGo:
  3912				call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, newproc, s.mem())
  3913			case closure != nil:
  3914				// rawLoad because loading the code pointer from a
  3915				// closure is always safe, but IsSanitizerSafeAddr
  3916				// can't always figure that out currently, and it's
  3917				// critical that we not clobber any arguments already
  3918				// stored onto the stack.
  3919				codeptr = s.rawLoad(types.Types[TUINTPTR], closure)
  3920				call = s.newValue3(ssa.OpClosureCall, types.TypeMem, codeptr, closure, s.mem())
  3921			case codeptr != nil:
  3922				call = s.newValue2(ssa.OpInterCall, types.TypeMem, codeptr, s.mem())
  3923			case sym != nil:
  3924				call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, sym.Linksym(), s.mem())
  3925			default:
  3926				Fatalf("bad call type %v %v", n.Op, n)
  3927			}
  3928			call.AuxInt = stksize // Call operations carry the argsize of the callee along with them
  3929		}
  3930		s.vars[&memVar] = call
  3931	
  3932		// Finish block for defers
  3933		if k == callDefer || k == callDeferStack {
  3934			b := s.endBlock()
  3935			b.Kind = ssa.BlockDefer
  3936			b.SetControl(call)
  3937			bNext := s.f.NewBlock(ssa.BlockPlain)
  3938			b.AddEdgeTo(bNext)
  3939			// Add recover edge to exit code.
  3940			r := s.f.NewBlock(ssa.BlockPlain)
  3941			s.startBlock(r)
  3942			s.exit()
  3943			b.AddEdgeTo(r)
  3944			b.Likely = ssa.BranchLikely
  3945			s.startBlock(bNext)
  3946		}
  3947	
  3948		res := n.Left.Type.Results()
  3949		if res.NumFields() == 0 || k != callNormal {
  3950			// call has no return value. Continue with the next statement.
  3951			return nil
  3952		}
  3953		fp := res.Field(0)
  3954		return s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+Ctxt.FixedFrameSize())
  3955	}
  3956	
  3957	// etypesign returns the signed-ness of e, for integer/pointer etypes.
  3958	// -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer.
  3959	func etypesign(e types.EType) int8 {
  3960		switch e {
  3961		case TINT8, TINT16, TINT32, TINT64, TINT:
  3962			return -1
  3963		case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR, TUNSAFEPTR:
  3964			return +1
  3965		}
  3966		return 0
  3967	}
  3968	
  3969	// addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
  3970	// The value that the returned Value represents is guaranteed to be non-nil.
  3971	// If bounded is true then this address does not require a nil check for its operand
  3972	// even if that would otherwise be implied.
  3973	func (s *state) addr(n *Node, bounded bool) *ssa.Value {
  3974		if n.Op != ONAME {
  3975			s.pushLine(n.Pos)
  3976			defer s.popLine()
  3977		}
  3978	
  3979		t := types.NewPtr(n.Type)
  3980		switch n.Op {
  3981		case ONAME:
  3982			switch n.Class() {
  3983			case PEXTERN:
  3984				// global variable
  3985				v := s.entryNewValue1A(ssa.OpAddr, t, n.Sym.Linksym(), s.sb)
  3986				// TODO: Make OpAddr use AuxInt as well as Aux.
  3987				if n.Xoffset != 0 {
  3988					v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v)
  3989				}
  3990				return v
  3991			case PPARAM:
  3992				// parameter slot
  3993				v := s.decladdrs[n]
  3994				if v != nil {
  3995					return v
  3996				}
  3997				if n == nodfp {
  3998					// Special arg that points to the frame pointer (Used by ORECOVER).
  3999					return s.entryNewValue2A(ssa.OpLocalAddr, t, n, s.sp, s.startmem)
  4000				}
  4001				s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
  4002				return nil
  4003			case PAUTO:
  4004				return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), !n.IsAutoTmp())
  4005	
  4006			case PPARAMOUT: // Same as PAUTO -- cannot generate LEA early.
  4007				// ensure that we reuse symbols for out parameters so
  4008				// that cse works on their addresses
  4009				return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), true)
  4010			default:
  4011				s.Fatalf("variable address class %v not implemented", n.Class())
  4012				return nil
  4013			}
  4014		case ORESULT:
  4015			// load return from callee
  4016			return s.constOffPtrSP(t, n.Xoffset)
  4017		case OINDEX:
  4018			if n.Left.Type.IsSlice() {
  4019				a := s.expr(n.Left)
  4020				i := s.expr(n.Right)
  4021				len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], a)
  4022				i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
  4023				p := s.newValue1(ssa.OpSlicePtr, t, a)
  4024				return s.newValue2(ssa.OpPtrIndex, t, p, i)
  4025			} else { // array
  4026				a := s.addr(n.Left, bounded)
  4027				i := s.expr(n.Right)
  4028				len := s.constInt(types.Types[TINT], n.Left.Type.NumElem())
  4029				i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
  4030				return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.Left.Type.Elem()), a, i)
  4031			}
  4032		case ODEREF:
  4033			return s.exprPtr(n.Left, bounded, n.Pos)
  4034		case ODOT:
  4035			p := s.addr(n.Left, bounded)
  4036			return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p)
  4037		case ODOTPTR:
  4038			p := s.exprPtr(n.Left, bounded, n.Pos)
  4039			return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p)
  4040		case OCLOSUREVAR:
  4041			return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset,
  4042				s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr))
  4043		case OCONVNOP:
  4044			addr := s.addr(n.Left, bounded)
  4045			return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type
  4046		case OCALLFUNC, OCALLINTER, OCALLMETH:
  4047			return s.call(n, callNormal)
  4048		case ODOTTYPE:
  4049			v, _ := s.dottype(n, false)
  4050			if v.Op != ssa.OpLoad {
  4051				s.Fatalf("dottype of non-load")
  4052			}
  4053			if v.Args[1] != s.mem() {
  4054				s.Fatalf("memory no longer live from dottype load")
  4055			}
  4056			return v.Args[0]
  4057		default:
  4058			s.Fatalf("unhandled addr %v", n.Op)
  4059			return nil
  4060		}
  4061	}
  4062	
  4063	// canSSA reports whether n is SSA-able.
  4064	// n must be an ONAME (or an ODOT sequence with an ONAME base).
  4065	func (s *state) canSSA(n *Node) bool {
  4066		if Debug['N'] != 0 {
  4067			return false
  4068		}
  4069		for n.Op == ODOT || (n.Op == OINDEX && n.Left.Type.IsArray()) {
  4070			n = n.Left
  4071		}
  4072		if n.Op != ONAME {
  4073			return false
  4074		}
  4075		if n.Addrtaken() {
  4076			return false
  4077		}
  4078		if n.isParamHeapCopy() {
  4079			return false
  4080		}
  4081		if n.Class() == PAUTOHEAP {
  4082			Fatalf("canSSA of PAUTOHEAP %v", n)
  4083		}
  4084		switch n.Class() {
  4085		case PEXTERN:
  4086			return false
  4087		case PPARAMOUT:
  4088			if s.hasdefer {
  4089				// TODO: handle this case? Named return values must be
  4090				// in memory so that the deferred function can see them.
  4091				// Maybe do: if !strings.HasPrefix(n.String(), "~") { return false }
  4092				// Or maybe not, see issue 18860.  Even unnamed return values
  4093				// must be written back so if a defer recovers, the caller can see them.
  4094				return false
  4095			}
  4096			if s.cgoUnsafeArgs {
  4097				// Cgo effectively takes the address of all result args,
  4098				// but the compiler can't see that.
  4099				return false
  4100			}
  4101		}
  4102		if n.Class() == PPARAM && n.Sym != nil && n.Sym.Name == ".this" {
  4103			// wrappers generated by genwrapper need to update
  4104			// the .this pointer in place.
  4105			// TODO: treat as a PPARMOUT?
  4106			return false
  4107		}
  4108		return canSSAType(n.Type)
  4109		// TODO: try to make more variables SSAable?
  4110	}
  4111	
  4112	// canSSA reports whether variables of type t are SSA-able.
  4113	func canSSAType(t *types.Type) bool {
  4114		dowidth(t)
  4115		if t.Width > int64(4*Widthptr) {
  4116			// 4*Widthptr is an arbitrary constant. We want it
  4117			// to be at least 3*Widthptr so slices can be registerized.
  4118			// Too big and we'll introduce too much register pressure.
  4119			return false
  4120		}
  4121		switch t.Etype {
  4122		case TARRAY:
  4123			// We can't do larger arrays because dynamic indexing is
  4124			// not supported on SSA variables.
  4125			// TODO: allow if all indexes are constant.
  4126			if t.NumElem() <= 1 {
  4127				return canSSAType(t.Elem())
  4128			}
  4129			return false
  4130		case TSTRUCT:
  4131			if t.NumFields() > ssa.MaxStruct {
  4132				return false
  4133			}
  4134			for _, t1 := range t.Fields().Slice() {
  4135				if !canSSAType(t1.Type) {
  4136					return false
  4137				}
  4138			}
  4139			return true
  4140		default:
  4141			return true
  4142		}
  4143	}
  4144	
  4145	// exprPtr evaluates n to a pointer and nil-checks it.
  4146	func (s *state) exprPtr(n *Node, bounded bool, lineno src.XPos) *ssa.Value {
  4147		p := s.expr(n)
  4148		if bounded || n.NonNil() {
  4149			if s.f.Frontend().Debug_checknil() && lineno.Line() > 1 {
  4150				s.f.Warnl(lineno, "removed nil check")
  4151			}
  4152			return p
  4153		}
  4154		s.nilCheck(p)
  4155		return p
  4156	}
  4157	
  4158	// nilCheck generates nil pointer checking code.
  4159	// Used only for automatically inserted nil checks,
  4160	// not for user code like 'x != nil'.
  4161	func (s *state) nilCheck(ptr *ssa.Value) {
  4162		if disable_checknil != 0 || s.curfn.Func.NilCheckDisabled() {
  4163			return
  4164		}
  4165		s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem())
  4166	}
  4167	
  4168	// boundsCheck generates bounds checking code. Checks if 0 <= idx <[=] len, branches to exit if not.
  4169	// Starts a new block on return.
  4170	// On input, len must be converted to full int width and be nonnegative.
  4171	// Returns idx converted to full int width.
  4172	// If bounded is true then caller guarantees the index is not out of bounds
  4173	// (but boundsCheck will still extend the index to full int width).
  4174	func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value {
  4175		idx = s.extendIndex(idx, len, kind, bounded)
  4176	
  4177		if bounded || Debug['B'] != 0 {
  4178			// If bounded or bounds checking is flag-disabled, then no check necessary,
  4179			// just return the extended index.
  4180			return idx
  4181		}
  4182	
  4183		bNext := s.f.NewBlock(ssa.BlockPlain)
  4184		bPanic := s.f.NewBlock(ssa.BlockExit)
  4185	
  4186		if !idx.Type.IsSigned() {
  4187			switch kind {
  4188			case ssa.BoundsIndex:
  4189				kind = ssa.BoundsIndexU
  4190			case ssa.BoundsSliceAlen:
  4191				kind = ssa.BoundsSliceAlenU
  4192			case ssa.BoundsSliceAcap:
  4193				kind = ssa.BoundsSliceAcapU
  4194			case ssa.BoundsSliceB:
  4195				kind = ssa.BoundsSliceBU
  4196			case ssa.BoundsSlice3Alen:
  4197				kind = ssa.BoundsSlice3AlenU
  4198			case ssa.BoundsSlice3Acap:
  4199				kind = ssa.BoundsSlice3AcapU
  4200			case ssa.BoundsSlice3B:
  4201				kind = ssa.BoundsSlice3BU
  4202			case ssa.BoundsSlice3C:
  4203				kind = ssa.BoundsSlice3CU
  4204			}
  4205		}
  4206	
  4207		var cmp *ssa.Value
  4208		if kind == ssa.BoundsIndex || kind == ssa.BoundsIndexU {
  4209			cmp = s.newValue2(ssa.OpIsInBounds, types.Types[TBOOL], idx, len)
  4210		} else {
  4211			cmp = s.newValue2(ssa.OpIsSliceInBounds, types.Types[TBOOL], idx, len)
  4212		}
  4213		b := s.endBlock()
  4214		b.Kind = ssa.BlockIf
  4215		b.SetControl(cmp)
  4216		b.Likely = ssa.BranchLikely
  4217		b.AddEdgeTo(bNext)
  4218		b.AddEdgeTo(bPanic)
  4219	
  4220		s.startBlock(bPanic)
  4221		if thearch.LinkArch.Family == sys.Wasm {
  4222			// TODO(khr): figure out how to do "register" based calling convention for bounds checks.
  4223			// Should be similar to gcWriteBarrier, but I can't make it work.
  4224			s.rtcall(BoundsCheckFunc[kind], false, nil, idx, len)
  4225		} else {
  4226			mem := s.newValue3I(ssa.OpPanicBounds, types.TypeMem, int64(kind), idx, len, s.mem())
  4227			s.endBlock().SetControl(mem)
  4228		}
  4229		s.startBlock(bNext)
  4230	
  4231		return idx
  4232	}
  4233	
  4234	// If cmp (a bool) is false, panic using the given function.
  4235	func (s *state) check(cmp *ssa.Value, fn *obj.LSym) {
  4236		b := s.endBlock()
  4237		b.Kind = ssa.BlockIf
  4238		b.SetControl(cmp)
  4239		b.Likely = ssa.BranchLikely
  4240		bNext := s.f.NewBlock(ssa.BlockPlain)
  4241		line := s.peekPos()
  4242		pos := Ctxt.PosTable.Pos(line)
  4243		fl := funcLine{f: fn, base: pos.Base(), line: pos.Line()}
  4244		bPanic := s.panics[fl]
  4245		if bPanic == nil {
  4246			bPanic = s.f.NewBlock(ssa.BlockPlain)
  4247			s.panics[fl] = bPanic
  4248			s.startBlock(bPanic)
  4249			// The panic call takes/returns memory to ensure that the right
  4250			// memory state is observed if the panic happens.
  4251			s.rtcall(fn, false, nil)
  4252		}
  4253		b.AddEdgeTo(bNext)
  4254		b.AddEdgeTo(bPanic)
  4255		s.startBlock(bNext)
  4256	}
  4257	
  4258	func (s *state) intDivide(n *Node, a, b *ssa.Value) *ssa.Value {
  4259		needcheck := true
  4260		switch b.Op {
  4261		case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64:
  4262			if b.AuxInt != 0 {
  4263				needcheck = false
  4264			}
  4265		}
  4266		if needcheck {
  4267			// do a size-appropriate check for zero
  4268			cmp := s.newValue2(s.ssaOp(ONE, n.Type), types.Types[TBOOL], b, s.zeroVal(n.Type))
  4269			s.check(cmp, panicdivide)
  4270		}
  4271		return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
  4272	}
  4273	
  4274	// rtcall issues a call to the given runtime function fn with the listed args.
  4275	// Returns a slice of results of the given result types.
  4276	// The call is added to the end of the current block.
  4277	// If returns is false, the block is marked as an exit block.
  4278	func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args ...*ssa.Value) []*ssa.Value {
  4279		// Write args to the stack
  4280		off := Ctxt.FixedFrameSize()
  4281		for _, arg := range args {
  4282			t := arg.Type
  4283			off = Rnd(off, t.Alignment())
  4284			ptr := s.constOffPtrSP(t.PtrTo(), off)
  4285			size := t.Size()
  4286			s.store(t, ptr, arg)
  4287			off += size
  4288		}
  4289		off = Rnd(off, int64(Widthreg))
  4290	
  4291		// Issue call
  4292		call := s.newValue1A(ssa.OpStaticCall, types.TypeMem, fn, s.mem())
  4293		s.vars[&memVar] = call
  4294	
  4295		if !returns {
  4296			// Finish block
  4297			b := s.endBlock()
  4298			b.Kind = ssa.BlockExit
  4299			b.SetControl(call)
  4300			call.AuxInt = off - Ctxt.FixedFrameSize()
  4301			if len(results) > 0 {
  4302				Fatalf("panic call can't have results")
  4303			}
  4304			return nil
  4305		}
  4306	
  4307		// Load results
  4308		res := make([]*ssa.Value, len(results))
  4309		for i, t := range results {
  4310			off = Rnd(off, t.Alignment())
  4311			ptr := s.constOffPtrSP(types.NewPtr(t), off)
  4312			res[i] = s.load(t, ptr)
  4313			off += t.Size()
  4314		}
  4315		off = Rnd(off, int64(Widthptr))
  4316	
  4317		// Remember how much callee stack space we needed.
  4318		call.AuxInt = off
  4319	
  4320		return res
  4321	}
  4322	
  4323	// do *left = right for type t.
  4324	func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask, leftIsStmt bool) {
  4325		s.instrument(t, left, true)
  4326	
  4327		if skip == 0 && (!types.Haspointers(t) || ssa.IsStackAddr(left)) {
  4328			// Known to not have write barrier. Store the whole type.
  4329			s.vars[&memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, left, right, s.mem(), leftIsStmt)
  4330			return
  4331		}
  4332	
  4333		// store scalar fields first, so write barrier stores for
  4334		// pointer fields can be grouped together, and scalar values
  4335		// don't need to be live across the write barrier call.
  4336		// TODO: if the writebarrier pass knows how to reorder stores,
  4337		// we can do a single store here as long as skip==0.
  4338		s.storeTypeScalars(t, left, right, skip)
  4339		if skip&skipPtr == 0 && types.Haspointers(t) {
  4340			s.storeTypePtrs(t, left, right)
  4341		}
  4342	}
  4343	
  4344	// do *left = right for all scalar (non-pointer) parts of t.
  4345	func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip skipMask) {
  4346		switch {
  4347		case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex():
  4348			s.store(t, left, right)
  4349		case t.IsPtrShaped():
  4350			// no scalar fields.
  4351		case t.IsString():
  4352			if skip&skipLen != 0 {
  4353				return
  4354			}
  4355			len := s.newValue1(ssa.OpStringLen, types.Types[TINT], right)
  4356			lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
  4357			s.store(types.Types[TINT], lenAddr, len)
  4358		case t.IsSlice():
  4359			if skip&skipLen == 0 {
  4360				len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], right)
  4361				lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
  4362				s.store(types.Types[TINT], lenAddr, len)
  4363			}
  4364			if skip&skipCap == 0 {
  4365				cap := s.newValue1(ssa.OpSliceCap, types.Types[TINT], right)
  4366				capAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, 2*s.config.PtrSize, left)
  4367				s.store(types.Types[TINT], capAddr, cap)
  4368			}
  4369		case t.IsInterface():
  4370			// itab field doesn't need a write barrier (even though it is a pointer).
  4371			itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right)
  4372			s.store(types.Types[TUINTPTR], left, itab)
  4373		case t.IsStruct():
  4374			n := t.NumFields()
  4375			for i := 0; i < n; i++ {
  4376				ft := t.FieldType(i)
  4377				addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
  4378				val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
  4379				s.storeTypeScalars(ft, addr, val, 0)
  4380			}
  4381		case t.IsArray() && t.NumElem() == 0:
  4382			// nothing
  4383		case t.IsArray() && t.NumElem() == 1:
  4384			s.storeTypeScalars(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right), 0)
  4385		default:
  4386			s.Fatalf("bad write barrier type %v", t)
  4387		}
  4388	}
  4389	
  4390	// do *left = right for all pointer parts of t.
  4391	func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
  4392		switch {
  4393		case t.IsPtrShaped():
  4394			s.store(t, left, right)
  4395		case t.IsString():
  4396			ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right)
  4397			s.store(s.f.Config.Types.BytePtr, left, ptr)
  4398		case t.IsSlice():
  4399			elType := types.NewPtr(t.Elem())
  4400			ptr := s.newValue1(ssa.OpSlicePtr, elType, right)
  4401			s.store(elType, left, ptr)
  4402		case t.IsInterface():
  4403			// itab field is treated as a scalar.
  4404			idata := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, right)
  4405			idataAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.BytePtrPtr, s.config.PtrSize, left)
  4406			s.store(s.f.Config.Types.BytePtr, idataAddr, idata)
  4407		case t.IsStruct():
  4408			n := t.NumFields()
  4409			for i := 0; i < n; i++ {
  4410				ft := t.FieldType(i)
  4411				if !types.Haspointers(ft) {
  4412					continue
  4413				}
  4414				addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
  4415				val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
  4416				s.storeTypePtrs(ft, addr, val)
  4417			}
  4418		case t.IsArray() && t.NumElem() == 0:
  4419			// nothing
  4420		case t.IsArray() && t.NumElem() == 1:
  4421			s.storeTypePtrs(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right))
  4422		default:
  4423			s.Fatalf("bad write barrier type %v", t)
  4424		}
  4425	}
  4426	
  4427	func (s *state) storeArg(n *Node, t *types.Type, off int64) {
  4428		s.storeArgWithBase(n, t, s.sp, off)
  4429	}
  4430	
  4431	func (s *state) storeArgWithBase(n *Node, t *types.Type, base *ssa.Value, off int64) {
  4432		pt := types.NewPtr(t)
  4433		var addr *ssa.Value
  4434		if base == s.sp {
  4435			// Use special routine that avoids allocation on duplicate offsets.
  4436			addr = s.constOffPtrSP(pt, off)
  4437		} else {
  4438			addr = s.newValue1I(ssa.OpOffPtr, pt, off, base)
  4439		}
  4440	
  4441		if !canSSAType(t) {
  4442			a := s.addr(n, false)
  4443			s.move(t, addr, a)
  4444			return
  4445		}
  4446	
  4447		a := s.expr(n)
  4448		s.storeType(t, addr, a, 0, false)
  4449	}
  4450	
  4451	// slice computes the slice v[i:j:k] and returns ptr, len, and cap of result.
  4452	// i,j,k may be nil, in which case they are set to their default value.
  4453	// v may be a slice, string or pointer to an array.
  4454	func (s *state) slice(v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value) {
  4455		t := v.Type
  4456		var ptr, len, cap *ssa.Value
  4457		switch {
  4458		case t.IsSlice():
  4459			ptr = s.newValue1(ssa.OpSlicePtr, types.NewPtr(t.Elem()), v)
  4460			len = s.newValue1(ssa.OpSliceLen, types.Types[TINT], v)
  4461			cap = s.newValue1(ssa.OpSliceCap, types.Types[TINT], v)
  4462		case t.IsString():
  4463			ptr = s.newValue1(ssa.OpStringPtr, types.NewPtr(types.Types[TUINT8]), v)
  4464			len = s.newValue1(ssa.OpStringLen, types.Types[TINT], v)
  4465			cap = len
  4466		case t.IsPtr():
  4467			if !t.Elem().IsArray() {
  4468				s.Fatalf("bad ptr to array in slice %v\n", t)
  4469			}
  4470			s.nilCheck(v)
  4471			ptr = s.newValue1(ssa.OpCopy, types.NewPtr(t.Elem().Elem()), v)
  4472			len = s.constInt(types.Types[TINT], t.Elem().NumElem())
  4473			cap = len
  4474		default:
  4475			s.Fatalf("bad type in slice %v\n", t)
  4476		}
  4477	
  4478		// Set default values
  4479		if i == nil {
  4480			i = s.constInt(types.Types[TINT], 0)
  4481		}
  4482		if j == nil {
  4483			j = len
  4484		}
  4485		three := true
  4486		if k == nil {
  4487			three = false
  4488			k = cap
  4489		}
  4490	
  4491		// Panic if slice indices are not in bounds.
  4492		// Make sure we check these in reverse order so that we're always
  4493		// comparing against a value known to be nonnegative. See issue 28797.
  4494		if three {
  4495			if k != cap {
  4496				kind := ssa.BoundsSlice3Alen
  4497				if t.IsSlice() {
  4498					kind = ssa.BoundsSlice3Acap
  4499				}
  4500				k = s.boundsCheck(k, cap, kind, bounded)
  4501			}
  4502			if j != k {
  4503				j = s.boundsCheck(j, k, ssa.BoundsSlice3B, bounded)
  4504			}
  4505			i = s.boundsCheck(i, j, ssa.BoundsSlice3C, bounded)
  4506		} else {
  4507			if j != k {
  4508				kind := ssa.BoundsSliceAlen
  4509				if t.IsSlice() {
  4510					kind = ssa.BoundsSliceAcap
  4511				}
  4512				j = s.boundsCheck(j, k, kind, bounded)
  4513			}
  4514			i = s.boundsCheck(i, j, ssa.BoundsSliceB, bounded)
  4515		}
  4516	
  4517		// Word-sized integer operations.
  4518		subOp := s.ssaOp(OSUB, types.Types[TINT])
  4519		mulOp := s.ssaOp(OMUL, types.Types[TINT])
  4520		andOp := s.ssaOp(OAND, types.Types[TINT])
  4521	
  4522		// Calculate the length (rlen) and capacity (rcap) of the new slice.
  4523		// For strings the capacity of the result is unimportant. However,
  4524		// we use rcap to test if we've generated a zero-length slice.
  4525		// Use length of strings for that.
  4526		rlen := s.newValue2(subOp, types.Types[TINT], j, i)
  4527		rcap := rlen
  4528		if j != k && !t.IsString() {
  4529			rcap = s.newValue2(subOp, types.Types[TINT], k, i)
  4530		}
  4531	
  4532		if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 {
  4533			// No pointer arithmetic necessary.
  4534			return ptr, rlen, rcap
  4535		}
  4536	
  4537		// Calculate the base pointer (rptr) for the new slice.
  4538		//
  4539		// Generate the following code assuming that indexes are in bounds.
  4540		// The masking is to make sure that we don't generate a slice
  4541		// that points to the next object in memory. We cannot just set
  4542		// the pointer to nil because then we would create a nil slice or
  4543		// string.
  4544		//
  4545		//     rcap = k - i
  4546		//     rlen = j - i
  4547		//     rptr = ptr + (mask(rcap) & (i * stride))
  4548		//
  4549		// Where mask(x) is 0 if x==0 and -1 if x>0 and stride is the width
  4550		// of the element type.
  4551		stride := s.constInt(types.Types[TINT], ptr.Type.Elem().Width)
  4552	
  4553		// The delta is the number of bytes to offset ptr by.
  4554		delta := s.newValue2(mulOp, types.Types[TINT], i, stride)
  4555	
  4556		// If we're slicing to the point where the capacity is zero,
  4557		// zero out the delta.
  4558		mask := s.newValue1(ssa.OpSlicemask, types.Types[TINT], rcap)
  4559		delta = s.newValue2(andOp, types.Types[TINT], delta, mask)
  4560	
  4561		// Compute rptr = ptr + delta.
  4562		rptr := s.newValue2(ssa.OpAddPtr, ptr.Type, ptr, delta)
  4563	
  4564		return rptr, rlen, rcap
  4565	}
  4566	
  4567	type u642fcvtTab struct {
  4568		geq, cvt2F, and, rsh, or, add ssa.Op
  4569		one                           func(*state, *types.Type, int64) *ssa.Value
  4570	}
  4571	
  4572	var u64_f64 = u642fcvtTab{
  4573		geq:   ssa.OpGeq64,
  4574		cvt2F: ssa.OpCvt64to64F,
  4575		and:   ssa.OpAnd64,
  4576		rsh:   ssa.OpRsh64Ux64,
  4577		or:    ssa.OpOr64,
  4578		add:   ssa.OpAdd64F,
  4579		one:   (*state).constInt64,
  4580	}
  4581	
  4582	var u64_f32 = u642fcvtTab{
  4583		geq:   ssa.OpGeq64,
  4584		cvt2F: ssa.OpCvt64to32F,
  4585		and:   ssa.OpAnd64,
  4586		rsh:   ssa.OpRsh64Ux64,
  4587		or:    ssa.OpOr64,
  4588		add:   ssa.OpAdd32F,
  4589		one:   (*state).constInt64,
  4590	}
  4591	
  4592	func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
  4593		return s.uint64Tofloat(&u64_f64, n, x, ft, tt)
  4594	}
  4595	
  4596	func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
  4597		return s.uint64Tofloat(&u64_f32, n, x, ft, tt)
  4598	}
  4599	
  4600	func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
  4601		// if x >= 0 {
  4602		//    result = (floatY) x
  4603		// } else {
  4604		// 	  y = uintX(x) ; y = x & 1
  4605		// 	  z = uintX(x) ; z = z >> 1
  4606		// 	  z = z >> 1
  4607		// 	  z = z | y
  4608		// 	  result = floatY(z)
  4609		// 	  result = result + result
  4610		// }
  4611		//
  4612		// Code borrowed from old code generator.
  4613		// What's going on: large 64-bit "unsigned" looks like
  4614		// negative number to hardware's integer-to-float
  4615		// conversion. However, because the mantissa is only
  4616		// 63 bits, we don't need the LSB, so instead we do an
  4617		// unsigned right shift (divide by two), convert, and
  4618		// double. However, before we do that, we need to be
  4619		// sure that we do not lose a "1" if that made the
  4620		// difference in the resulting rounding. Therefore, we
  4621		// preserve it, and OR (not ADD) it back in. The case
  4622		// that matters is when the eleven discarded bits are
  4623		// equal to 10000000001; that rounds up, and the 1 cannot
  4624		// be lost else it would round down if the LSB of the
  4625		// candidate mantissa is 0.
  4626		cmp := s.newValue2(cvttab.geq, types.Types[TBOOL], x, s.zeroVal(ft))
  4627		b := s.endBlock()
  4628		b.Kind = ssa.BlockIf
  4629		b.SetControl(cmp)
  4630		b.Likely = ssa.BranchLikely
  4631	
  4632		bThen := s.f.NewBlock(ssa.BlockPlain)
  4633		bElse := s.f.NewBlock(ssa.BlockPlain)
  4634		bAfter := s.f.NewBlock(ssa.BlockPlain)
  4635	
  4636		b.AddEdgeTo(bThen)
  4637		s.startBlock(bThen)
  4638		a0 := s.newValue1(cvttab.cvt2F, tt, x)
  4639		s.vars[n] = a0
  4640		s.endBlock()
  4641		bThen.AddEdgeTo(bAfter)
  4642	
  4643		b.AddEdgeTo(bElse)
  4644		s.startBlock(bElse)
  4645		one := cvttab.one(s, ft, 1)
  4646		y := s.newValue2(cvttab.and, ft, x, one)
  4647		z := s.newValue2(cvttab.rsh, ft, x, one)
  4648		z = s.newValue2(cvttab.or, ft, z, y)
  4649		a := s.newValue1(cvttab.cvt2F, tt, z)
  4650		a1 := s.newValue2(cvttab.add, tt, a, a)
  4651		s.vars[n] = a1
  4652		s.endBlock()
  4653		bElse.AddEdgeTo(bAfter)
  4654	
  4655		s.startBlock(bAfter)
  4656		return s.variable(n, n.Type)
  4657	}
  4658	
  4659	type u322fcvtTab struct {
  4660		cvtI2F, cvtF2F ssa.Op
  4661	}
  4662	
  4663	var u32_f64 = u322fcvtTab{
  4664		cvtI2F: ssa.OpCvt32to64F,
  4665		cvtF2F: ssa.OpCopy,
  4666	}
  4667	
  4668	var u32_f32 = u322fcvtTab{
  4669		cvtI2F: ssa.OpCvt32to32F,
  4670		cvtF2F: ssa.OpCvt64Fto32F,
  4671	}
  4672	
  4673	func (s *state) uint32Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
  4674		return s.uint32Tofloat(&u32_f64, n, x, ft, tt)
  4675	}
  4676	
  4677	func (s *state) uint32Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
  4678		return s.uint32Tofloat(&u32_f32, n, x, ft, tt)
  4679	}
  4680	
  4681	func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
  4682		// if x >= 0 {
  4683		// 	result = floatY(x)
  4684		// } else {
  4685		// 	result = floatY(float64(x) + (1<<32))
  4686		// }
  4687		cmp := s.newValue2(ssa.OpGeq32, types.Types[TBOOL], x, s.zeroVal(ft))
  4688		b := s.endBlock()
  4689		b.Kind = ssa.BlockIf
  4690		b.SetControl(cmp)
  4691		b.Likely = ssa.BranchLikely
  4692	
  4693		bThen := s.f.NewBlock(ssa.BlockPlain)
  4694		bElse := s.f.NewBlock(ssa.BlockPlain)
  4695		bAfter := s.f.NewBlock(ssa.BlockPlain)
  4696	
  4697		b.AddEdgeTo(bThen)
  4698		s.startBlock(bThen)
  4699		a0 := s.newValue1(cvttab.cvtI2F, tt, x)
  4700		s.vars[n] = a0
  4701		s.endBlock()
  4702		bThen.AddEdgeTo(bAfter)
  4703	
  4704		b.AddEdgeTo(bElse)
  4705		s.startBlock(bElse)
  4706		a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[TFLOAT64], x)
  4707		twoToThe32 := s.constFloat64(types.Types[TFLOAT64], float64(1<<32))
  4708		a2 := s.newValue2(ssa.OpAdd64F, types.Types[TFLOAT64], a1, twoToThe32)
  4709		a3 := s.newValue1(cvttab.cvtF2F, tt, a2)
  4710	
  4711		s.vars[n] = a3
  4712		s.endBlock()
  4713		bElse.AddEdgeTo(bAfter)
  4714	
  4715		s.startBlock(bAfter)
  4716		return s.variable(n, n.Type)
  4717	}
  4718	
  4719	// referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
  4720	func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value {
  4721		if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() {
  4722			s.Fatalf("node must be a map or a channel")
  4723		}
  4724		// if n == nil {
  4725		//   return 0
  4726		// } else {
  4727		//   // len
  4728		//   return *((*int)n)
  4729		//   // cap
  4730		//   return *(((*int)n)+1)
  4731		// }
  4732		lenType := n.Type
  4733		nilValue := s.constNil(types.Types[TUINTPTR])
  4734		cmp := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], x, nilValue)
  4735		b := s.endBlock()
  4736		b.Kind = ssa.BlockIf
  4737		b.SetControl(cmp)
  4738		b.Likely = ssa.BranchUnlikely
  4739	
  4740		bThen := s.f.NewBlock(ssa.BlockPlain)
  4741		bElse := s.f.NewBlock(ssa.BlockPlain)
  4742		bAfter := s.f.NewBlock(ssa.BlockPlain)
  4743	
  4744		// length/capacity of a nil map/chan is zero
  4745		b.AddEdgeTo(bThen)
  4746		s.startBlock(bThen)
  4747		s.vars[n] = s.zeroVal(lenType)
  4748		s.endBlock()
  4749		bThen.AddEdgeTo(bAfter)
  4750	
  4751		b.AddEdgeTo(bElse)
  4752		s.startBlock(bElse)
  4753		switch n.Op {
  4754		case OLEN:
  4755			// length is stored in the first word for map/chan
  4756			s.vars[n] = s.load(lenType, x)
  4757		case OCAP:
  4758			// capacity is stored in the second word for chan
  4759			sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x)
  4760			s.vars[n] = s.load(lenType, sw)
  4761		default:
  4762			s.Fatalf("op must be OLEN or OCAP")
  4763		}
  4764		s.endBlock()
  4765		bElse.AddEdgeTo(bAfter)
  4766	
  4767		s.startBlock(bAfter)
  4768		return s.variable(n, lenType)
  4769	}
  4770	
  4771	type f2uCvtTab struct {
  4772		ltf, cvt2U, subf, or ssa.Op
  4773		floatValue           func(*state, *types.Type, float64) *ssa.Value
  4774		intValue             func(*state, *types.Type, int64) *ssa.Value
  4775		cutoff               uint64
  4776	}
  4777	
  4778	var f32_u64 = f2uCvtTab{
  4779		ltf:        ssa.OpLess32F,
  4780		cvt2U:      ssa.OpCvt32Fto64,
  4781		subf:       ssa.OpSub32F,
  4782		or:         ssa.OpOr64,
  4783		floatValue: (*state).constFloat32,
  4784		intValue:   (*state).constInt64,
  4785		cutoff:     1 << 63,
  4786	}
  4787	
  4788	var f64_u64 = f2uCvtTab{
  4789		ltf:        ssa.OpLess64F,
  4790		cvt2U:      ssa.OpCvt64Fto64,
  4791		subf:       ssa.OpSub64F,
  4792		or:         ssa.OpOr64,
  4793		floatValue: (*state).constFloat64,
  4794		intValue:   (*state).constInt64,
  4795		cutoff:     1 << 63,
  4796	}
  4797	
  4798	var f32_u32 = f2uCvtTab{
  4799		ltf:        ssa.OpLess32F,
  4800		cvt2U:      ssa.OpCvt32Fto32,
  4801		subf:       ssa.OpSub32F,
  4802		or:         ssa.OpOr32,
  4803		floatValue: (*state).constFloat32,
  4804		intValue:   func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
  4805		cutoff:     1 << 31,
  4806	}
  4807	
  4808	var f64_u32 = f2uCvtTab{
  4809		ltf:        ssa.OpLess64F,
  4810		cvt2U:      ssa.OpCvt64Fto32,
  4811		subf:       ssa.OpSub64F,
  4812		or:         ssa.OpOr32,
  4813		floatValue: (*state).constFloat64,
  4814		intValue:   func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
  4815		cutoff:     1 << 31,
  4816	}
  4817	
  4818	func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
  4819		return s.floatToUint(&f32_u64, n, x, ft, tt)
  4820	}
  4821	func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
  4822		return s.floatToUint(&f64_u64, n, x, ft, tt)
  4823	}
  4824	
  4825	func (s *state) float32ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
  4826		return s.floatToUint(&f32_u32, n, x, ft, tt)
  4827	}
  4828	
  4829	func (s *state) float64ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
  4830		return s.floatToUint(&f64_u32, n, x, ft, tt)
  4831	}
  4832	
  4833	func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
  4834		// cutoff:=1<<(intY_Size-1)
  4835		// if x < floatX(cutoff) {
  4836		// 	result = uintY(x)
  4837		// } else {
  4838		// 	y = x - floatX(cutoff)
  4839		// 	z = uintY(y)
  4840		// 	result = z | -(cutoff)
  4841		// }
  4842		cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff))
  4843		cmp := s.newValue2(cvttab.ltf, types.Types[TBOOL], x, cutoff)
  4844		b := s.endBlock()
  4845		b.Kind = ssa.BlockIf
  4846		b.SetControl(cmp)
  4847		b.Likely = ssa.BranchLikely
  4848	
  4849		bThen := s.f.NewBlock(ssa.BlockPlain)
  4850		bElse := s.f.NewBlock(ssa.BlockPlain)
  4851		bAfter := s.f.NewBlock(ssa.BlockPlain)
  4852	
  4853		b.AddEdgeTo(bThen)
  4854		s.startBlock(bThen)
  4855		a0 := s.newValue1(cvttab.cvt2U, tt, x)
  4856		s.vars[n] = a0
  4857		s.endBlock()
  4858		bThen.AddEdgeTo(bAfter)
  4859	
  4860		b.AddEdgeTo(bElse)
  4861		s.startBlock(bElse)
  4862		y := s.newValue2(cvttab.subf, ft, x, cutoff)
  4863		y = s.newValue1(cvttab.cvt2U, tt, y)
  4864		z := cvttab.intValue(s, tt, int64(-cvttab.cutoff))
  4865		a1 := s.newValue2(cvttab.or, tt, y, z)
  4866		s.vars[n] = a1
  4867		s.endBlock()
  4868		bElse.AddEdgeTo(bAfter)
  4869	
  4870		s.startBlock(bAfter)
  4871		return s.variable(n, n.Type)
  4872	}
  4873	
  4874	// dottype generates SSA for a type assertion node.
  4875	// commaok indicates whether to panic or return a bool.
  4876	// If commaok is false, resok will be nil.
  4877	func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
  4878		iface := s.expr(n.Left)   // input interface
  4879		target := s.expr(n.Right) // target type
  4880		byteptr := s.f.Config.Types.BytePtr
  4881	
  4882		if n.Type.IsInterface() {
  4883			if n.Type.IsEmptyInterface() {
  4884				// Converting to an empty interface.
  4885				// Input could be an empty or nonempty interface.
  4886				if Debug_typeassert > 0 {
  4887					Warnl(n.Pos, "type assertion inlined")
  4888				}
  4889	
  4890				// Get itab/type field from input.
  4891				itab := s.newValue1(ssa.OpITab, byteptr, iface)
  4892				// Conversion succeeds iff that field is not nil.
  4893				cond := s.newValue2(ssa.OpNeqPtr, types.Types[TBOOL], itab, s.constNil(byteptr))
  4894	
  4895				if n.Left.Type.IsEmptyInterface() && commaok {
  4896					// Converting empty interface to empty interface with ,ok is just a nil check.
  4897					return iface, cond
  4898				}
  4899	
  4900				// Branch on nilness.
  4901				b := s.endBlock()
  4902				b.Kind = ssa.BlockIf
  4903				b.SetControl(cond)
  4904				b.Likely = ssa.BranchLikely
  4905				bOk := s.f.NewBlock(ssa.BlockPlain)
  4906				bFail := s.f.NewBlock(ssa.BlockPlain)
  4907				b.AddEdgeTo(bOk)
  4908				b.AddEdgeTo(bFail)
  4909	
  4910				if !commaok {
  4911					// On failure, panic by calling panicnildottype.
  4912					s.startBlock(bFail)
  4913					s.rtcall(panicnildottype, false, nil, target)
  4914	
  4915					// On success, return (perhaps modified) input interface.
  4916					s.startBlock(bOk)
  4917					if n.Left.Type.IsEmptyInterface() {
  4918						res = iface // Use input interface unchanged.
  4919						return
  4920					}
  4921					// Load type out of itab, build interface with existing idata.
  4922					off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab)
  4923					typ := s.load(byteptr, off)
  4924					idata := s.newValue1(ssa.OpIData, n.Type, iface)
  4925					res = s.newValue2(ssa.OpIMake, n.Type, typ, idata)
  4926					return
  4927				}
  4928	
  4929				s.startBlock(bOk)
  4930				// nonempty -> empty
  4931				// Need to load type from itab
  4932				off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab)
  4933				s.vars[&typVar] = s.load(byteptr, off)
  4934				s.endBlock()
  4935	
  4936				// itab is nil, might as well use that as the nil result.
  4937				s.startBlock(bFail)
  4938				s.vars[&typVar] = itab
  4939				s.endBlock()
  4940	
  4941				// Merge point.
  4942				bEnd := s.f.NewBlock(ssa.BlockPlain)
  4943				bOk.AddEdgeTo(bEnd)
  4944				bFail.AddEdgeTo(bEnd)
  4945				s.startBlock(bEnd)
  4946				idata := s.newValue1(ssa.OpIData, n.Type, iface)
  4947				res = s.newValue2(ssa.OpIMake, n.Type, s.variable(&typVar, byteptr), idata)
  4948				resok = cond
  4949				delete(s.vars, &typVar)
  4950				return
  4951			}
  4952			// converting to a nonempty interface needs a runtime call.
  4953			if Debug_typeassert > 0 {
  4954				Warnl(n.Pos, "type assertion not inlined")
  4955			}
  4956			if n.Left.Type.IsEmptyInterface() {
  4957				if commaok {
  4958					call := s.rtcall(assertE2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface)
  4959					return call[0], call[1]
  4960				}
  4961				return s.rtcall(assertE2I, true, []*types.Type{n.Type}, target, iface)[0], nil
  4962			}
  4963			if commaok {
  4964				call := s.rtcall(assertI2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface)
  4965				return call[0], call[1]
  4966			}
  4967			return s.rtcall(assertI2I, true, []*types.Type{n.Type}, target, iface)[0], nil
  4968		}
  4969	
  4970		if Debug_typeassert > 0 {
  4971			Warnl(n.Pos, "type assertion inlined")
  4972		}
  4973	
  4974		// Converting to a concrete type.
  4975		direct := isdirectiface(n.Type)
  4976		itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface
  4977		if Debug_typeassert > 0 {
  4978			Warnl(n.Pos, "type assertion inlined")
  4979		}
  4980		var targetITab *ssa.Value
  4981		if n.Left.Type.IsEmptyInterface() {
  4982			// Looking for pointer to target type.
  4983			targetITab = target
  4984		} else {
  4985			// Looking for pointer to itab for target type and source interface.
  4986			targetITab = s.expr(n.List.First())
  4987		}
  4988	
  4989		var tmp *Node       // temporary for use with large types
  4990		var addr *ssa.Value // address of tmp
  4991		if commaok && !canSSAType(n.Type) {
  4992			// unSSAable type, use temporary.
  4993			// TODO: get rid of some of these temporaries.
  4994			tmp = tempAt(n.Pos, s.curfn, n.Type)
  4995			s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem())
  4996			addr = s.addr(tmp, false)
  4997		}
  4998	
  4999		cond := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], itab, targetITab)
  5000		b := s.endBlock()
  5001		b.Kind = ssa.BlockIf
  5002		b.SetControl(cond)
  5003		b.Likely = ssa.BranchLikely
  5004	
  5005		bOk := s.f.NewBlock(ssa.BlockPlain)
  5006		bFail := s.f.NewBlock(ssa.BlockPlain)
  5007		b.AddEdgeTo(bOk)
  5008		b.AddEdgeTo(bFail)
  5009	
  5010		if !commaok {
  5011			// on failure, panic by calling panicdottype
  5012			s.startBlock(bFail)
  5013			taddr := s.expr(n.Right.Right)
  5014			if n.Left.Type.IsEmptyInterface() {
  5015				s.rtcall(panicdottypeE, false, nil, itab, target, taddr)
  5016			} else {
  5017				s.rtcall(panicdottypeI, false, nil, itab, target, taddr)
  5018			}
  5019	
  5020			// on success, return data from interface
  5021			s.startBlock(bOk)
  5022			if direct {
  5023				return s.newValue1(ssa.OpIData, n.Type, iface), nil
  5024			}
  5025			p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
  5026			return s.load(n.Type, p), nil
  5027		}
  5028	
  5029		// commaok is the more complicated case because we have
  5030		// a control flow merge point.
  5031		bEnd := s.f.NewBlock(ssa.BlockPlain)
  5032		// Note that we need a new valVar each time (unlike okVar where we can
  5033		// reuse the variable) because it might have a different type every time.
  5034		valVar := &Node{Op: ONAME, Sym: &types.Sym{Name: "val"}}
  5035	
  5036		// type assertion succeeded
  5037		s.startBlock(bOk)
  5038		if tmp == nil {
  5039			if direct {
  5040				s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type, iface)
  5041			} else {
  5042				p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
  5043				s.vars[valVar] = s.load(n.Type, p)
  5044			}
  5045		} else {
  5046			p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
  5047			s.move(n.Type, addr, p)
  5048		}
  5049		s.vars[&okVar] = s.constBool(true)
  5050		s.endBlock()
  5051		bOk.AddEdgeTo(bEnd)
  5052	
  5053		// type assertion failed
  5054		s.startBlock(bFail)
  5055		if tmp == nil {
  5056			s.vars[valVar] = s.zeroVal(n.Type)
  5057		} else {
  5058			s.zero(n.Type, addr)
  5059		}
  5060		s.vars[&okVar] = s.constBool(false)
  5061		s.endBlock()
  5062		bFail.AddEdgeTo(bEnd)
  5063	
  5064		// merge point
  5065		s.startBlock(bEnd)
  5066		if tmp == nil {
  5067			res = s.variable(valVar, n.Type)
  5068			delete(s.vars, valVar)
  5069		} else {
  5070			res = s.load(n.Type, addr)
  5071			s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp, s.mem())
  5072		}
  5073		resok = s.variable(&okVar, types.Types[TBOOL])
  5074		delete(s.vars, &okVar)
  5075		return res, resok
  5076	}
  5077	
  5078	// variable returns the value of a variable at the current location.
  5079	func (s *state) variable(name *Node, t *types.Type) *ssa.Value {
  5080		v := s.vars[name]
  5081		if v != nil {
  5082			return v
  5083		}
  5084		v = s.fwdVars[name]
  5085		if v != nil {
  5086			return v
  5087		}
  5088	
  5089		if s.curBlock == s.f.Entry {
  5090			// No variable should be live at entry.
  5091			s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, name, v)
  5092		}
  5093		// Make a FwdRef, which records a value that's live on block input.
  5094		// We'll find the matching definition as part of insertPhis.
  5095		v = s.newValue0A(ssa.OpFwdRef, t, name)
  5096		s.fwdVars[name] = v
  5097		s.addNamedValue(name, v)
  5098		return v
  5099	}
  5100	
  5101	func (s *state) mem() *ssa.Value {
  5102		return s.variable(&memVar, types.TypeMem)
  5103	}
  5104	
  5105	func (s *state) addNamedValue(n *Node, v *ssa.Value) {
  5106		if n.Class() == Pxxx {
  5107			// Don't track our dummy nodes (&memVar etc.).
  5108			return
  5109		}
  5110		if n.IsAutoTmp() {
  5111			// Don't track temporary variables.
  5112			return
  5113		}
  5114		if n.Class() == PPARAMOUT {
  5115			// Don't track named output values.  This prevents return values
  5116			// from being assigned too early. See #14591 and #14762. TODO: allow this.
  5117			return
  5118		}
  5119		if n.Class() == PAUTO && n.Xoffset != 0 {
  5120			s.Fatalf("AUTO var with offset %v %d", n, n.Xoffset)
  5121		}
  5122		loc := ssa.LocalSlot{N: n, Type: n.Type, Off: 0}
  5123		values, ok := s.f.NamedValues[loc]
  5124		if !ok {
  5125			s.f.Names = append(s.f.Names, loc)
  5126		}
  5127		s.f.NamedValues[loc] = append(values, v)
  5128	}
  5129	
  5130	// Branch is an unresolved branch.
  5131	type Branch struct {
  5132		P *obj.Prog  // branch instruction
  5133		B *ssa.Block // target
  5134	}
  5135	
  5136	// SSAGenState contains state needed during Prog generation.
  5137	type SSAGenState struct {
  5138		pp *Progs
  5139	
  5140		// Branches remembers all the branch instructions we've seen
  5141		// and where they would like to go.
  5142		Branches []Branch
  5143	
  5144		// bstart remembers where each block starts (indexed by block ID)
  5145		bstart []*obj.Prog
  5146	
  5147		// 387 port: maps from SSE registers (REG_X?) to 387 registers (REG_F?)
  5148		SSEto387 map[int16]int16
  5149		// Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include x86-387, PPC, and Sparc V8.
  5150		ScratchFpMem *Node
  5151	
  5152		maxarg int64 // largest frame size for arguments to calls made by the function
  5153	
  5154		// Map from GC safe points to liveness index, generated by
  5155		// liveness analysis.
  5156		livenessMap LivenessMap
  5157	
  5158		// lineRunStart records the beginning of the current run of instructions
  5159		// within a single block sharing the same line number
  5160		// Used to move statement marks to the beginning of such runs.
  5161		lineRunStart *obj.Prog
  5162	
  5163		// wasm: The number of values on the WebAssembly stack. This is only used as a safeguard.
  5164		OnWasmStackSkipped int
  5165	}
  5166	
  5167	// Prog appends a new Prog.
  5168	func (s *SSAGenState) Prog(as obj.As) *obj.Prog {
  5169		p := s.pp.Prog(as)
  5170		if ssa.LosesStmtMark(as) {
  5171			return p
  5172		}
  5173		// Float a statement start to the beginning of any same-line run.
  5174		// lineRunStart is reset at block boundaries, which appears to work well.
  5175		if s.lineRunStart == nil || s.lineRunStart.Pos.Line() != p.Pos.Line() {
  5176			s.lineRunStart = p
  5177		} else if p.Pos.IsStmt() == src.PosIsStmt {
  5178			s.lineRunStart.Pos = s.lineRunStart.Pos.WithIsStmt()
  5179			p.Pos = p.Pos.WithNotStmt()
  5180		}
  5181		return p
  5182	}
  5183	
  5184	// Pc returns the current Prog.
  5185	func (s *SSAGenState) Pc() *obj.Prog {
  5186		return s.pp.next
  5187	}
  5188	
  5189	// SetPos sets the current source position.
  5190	func (s *SSAGenState) SetPos(pos src.XPos) {
  5191		s.pp.pos = pos
  5192	}
  5193	
  5194	// Br emits a single branch instruction and returns the instruction.
  5195	// Not all architectures need the returned instruction, but otherwise
  5196	// the boilerplate is common to all.
  5197	func (s *SSAGenState) Br(op obj.As, target *ssa.Block) *obj.Prog {
  5198		p := s.Prog(op)
  5199		p.To.Type = obj.TYPE_BRANCH
  5200		s.Branches = append(s.Branches, Branch{P: p, B: target})
  5201		return p
  5202	}
  5203	
  5204	// DebugFriendlySetPosFrom adjusts Pos.IsStmt subject to heuristics
  5205	// that reduce "jumpy" line number churn when debugging.
  5206	// Spill/fill/copy instructions from the register allocator,
  5207	// phi functions, and instructions with a no-pos position
  5208	// are examples of instructions that can cause churn.
  5209	func (s *SSAGenState) DebugFriendlySetPosFrom(v *ssa.Value) {
  5210		switch v.Op {
  5211		case ssa.OpPhi, ssa.OpCopy, ssa.OpLoadReg, ssa.OpStoreReg:
  5212			// These are not statements
  5213			s.SetPos(v.Pos.WithNotStmt())
  5214		default:
  5215			p := v.Pos
  5216			if p != src.NoXPos {
  5217				// If the position is defined, update the position.
  5218				// Also convert default IsStmt to NotStmt; only
  5219				// explicit statement boundaries should appear
  5220				// in the generated code.
  5221				if p.IsStmt() != src.PosIsStmt {
  5222					p = p.WithNotStmt()
  5223				}
  5224				s.SetPos(p)
  5225			}
  5226		}
  5227	}
  5228	
  5229	// byXoffset implements sort.Interface for []*Node using Xoffset as the ordering.
  5230	type byXoffset []*Node
  5231	
  5232	func (s byXoffset) Len() int           { return len(s) }
  5233	func (s byXoffset) Less(i, j int) bool { return s[i].Xoffset < s[j].Xoffset }
  5234	func (s byXoffset) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
  5235	
  5236	func emitStackObjects(e *ssafn, pp *Progs) {
  5237		var vars []*Node
  5238		for _, n := range e.curfn.Func.Dcl {
  5239			if livenessShouldTrack(n) && n.Addrtaken() {
  5240				vars = append(vars, n)
  5241			}
  5242		}
  5243		if len(vars) == 0 {
  5244			return
  5245		}
  5246	
  5247		// Sort variables from lowest to highest address.
  5248		sort.Sort(byXoffset(vars))
  5249	
  5250		// Populate the stack object data.
  5251		// Format must match runtime/stack.go:stackObjectRecord.
  5252		x := e.curfn.Func.lsym.Func.StackObjects
  5253		off := 0
  5254		off = duintptr(x, off, uint64(len(vars)))
  5255		for _, v := range vars {
  5256			// Note: arguments and return values have non-negative Xoffset,
  5257			// in which case the offset is relative to argp.
  5258			// Locals have a negative Xoffset, in which case the offset is relative to varp.
  5259			off = duintptr(x, off, uint64(v.Xoffset))
  5260			if !typesym(v.Type).Siggen() {
  5261				Fatalf("stack object's type symbol not generated for type %s", v.Type)
  5262			}
  5263			off = dsymptr(x, off, dtypesym(v.Type), 0)
  5264		}
  5265	
  5266		// Emit a funcdata pointing at the stack object data.
  5267		p := pp.Prog(obj.AFUNCDATA)
  5268		Addrconst(&p.From, objabi.FUNCDATA_StackObjects)
  5269		p.To.Type = obj.TYPE_MEM
  5270		p.To.Name = obj.NAME_EXTERN
  5271		p.To.Sym = x
  5272	
  5273		if debuglive != 0 {
  5274			for _, v := range vars {
  5275				Warnl(v.Pos, "stack object %v %s", v, v.Type.String())
  5276			}
  5277		}
  5278	}
  5279	
  5280	// genssa appends entries to pp for each instruction in f.
  5281	func genssa(f *ssa.Func, pp *Progs) {
  5282		var s SSAGenState
  5283	
  5284		e := f.Frontend().(*ssafn)
  5285	
  5286		s.livenessMap = liveness(e, f, pp)
  5287		emitStackObjects(e, pp)
  5288	
  5289		// Remember where each block starts.
  5290		s.bstart = make([]*obj.Prog, f.NumBlocks())
  5291		s.pp = pp
  5292		var progToValue map[*obj.Prog]*ssa.Value
  5293		var progToBlock map[*obj.Prog]*ssa.Block
  5294		var valueToProgAfter []*obj.Prog // The first Prog following computation of a value v; v is visible at this point.
  5295		if f.PrintOrHtmlSSA {
  5296			progToValue = make(map[*obj.Prog]*ssa.Value, f.NumValues())
  5297			progToBlock = make(map[*obj.Prog]*ssa.Block, f.NumBlocks())
  5298			f.Logf("genssa %s\n", f.Name)
  5299			progToBlock[s.pp.next] = f.Blocks[0]
  5300		}
  5301	
  5302		if thearch.Use387 {
  5303			s.SSEto387 = map[int16]int16{}
  5304		}
  5305	
  5306		s.ScratchFpMem = e.scratchFpMem
  5307	
  5308		if Ctxt.Flag_locationlists {
  5309			if cap(f.Cache.ValueToProgAfter) < f.NumValues() {
  5310				f.Cache.ValueToProgAfter = make([]*obj.Prog, f.NumValues())
  5311			}
  5312			valueToProgAfter = f.Cache.ValueToProgAfter[:f.NumValues()]
  5313			for i := range valueToProgAfter {
  5314				valueToProgAfter[i] = nil
  5315			}
  5316		}
  5317	
  5318		// If the very first instruction is not tagged as a statement,
  5319		// debuggers may attribute it to previous function in program.
  5320		firstPos := src.NoXPos
  5321		for _, v := range f.Entry.Values {
  5322			if v.Pos.IsStmt() == src.PosIsStmt {
  5323				firstPos = v.Pos
  5324				v.Pos = firstPos.WithDefaultStmt()
  5325				break
  5326			}
  5327		}
  5328	
  5329		// inlMarks has an entry for each Prog that implements an inline mark.
  5330		// It maps from that Prog to the global inlining id of the inlined body
  5331		// which should unwind to this Prog's location.
  5332		var inlMarks map[*obj.Prog]int32
  5333		var inlMarkList []*obj.Prog
  5334	
  5335		// inlMarksByPos maps from a (column 1) source position to the set of
  5336		// Progs that are in the set above and have that source position.
  5337		var inlMarksByPos map[src.XPos][]*obj.Prog
  5338	
  5339		// Emit basic blocks
  5340		for i, b := range f.Blocks {
  5341			s.bstart[b.ID] = s.pp.next
  5342			s.pp.nextLive = LivenessInvalid
  5343			s.lineRunStart = nil
  5344	
  5345			// Emit values in block
  5346			thearch.SSAMarkMoves(&s, b)
  5347			for _, v := range b.Values {
  5348				x := s.pp.next
  5349				s.DebugFriendlySetPosFrom(v)
  5350				// Attach this safe point to the next
  5351				// instruction.
  5352				s.pp.nextLive = s.livenessMap.Get(v)
  5353				switch v.Op {
  5354				case ssa.OpInitMem:
  5355					// memory arg needs no code
  5356				case ssa.OpArg:
  5357					// input args need no code
  5358				case ssa.OpSP, ssa.OpSB:
  5359					// nothing to do
  5360				case ssa.OpSelect0, ssa.OpSelect1:
  5361					// nothing to do
  5362				case ssa.OpGetG:
  5363					// nothing to do when there's a g register,
  5364					// and checkLower complains if there's not
  5365				case ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive, ssa.OpVarKill:
  5366					// nothing to do; already used by liveness
  5367				case ssa.OpPhi:
  5368					CheckLoweredPhi(v)
  5369				case ssa.OpConvert:
  5370					// nothing to do; no-op conversion for liveness
  5371					if v.Args[0].Reg() != v.Reg() {
  5372						v.Fatalf("OpConvert should be a no-op: %s; %s", v.Args[0].LongString(), v.LongString())
  5373					}
  5374				case ssa.OpInlMark:
  5375					p := thearch.Ginsnop(s.pp)
  5376					if inlMarks == nil {
  5377						inlMarks = map[*obj.Prog]int32{}
  5378						inlMarksByPos = map[src.XPos][]*obj.Prog{}
  5379					}
  5380					inlMarks[p] = v.AuxInt32()
  5381					inlMarkList = append(inlMarkList, p)
  5382					pos := v.Pos.AtColumn1()
  5383					inlMarksByPos[pos] = append(inlMarksByPos[pos], p)
  5384	
  5385				default:
  5386					// let the backend handle it
  5387					// Special case for first line in function; move it to the start.
  5388					if firstPos != src.NoXPos {
  5389						s.SetPos(firstPos)
  5390						firstPos = src.NoXPos
  5391					}
  5392					thearch.SSAGenValue(&s, v)
  5393				}
  5394	
  5395				if Ctxt.Flag_locationlists {
  5396					valueToProgAfter[v.ID] = s.pp.next
  5397				}
  5398	
  5399				if f.PrintOrHtmlSSA {
  5400					for ; x != s.pp.next; x = x.Link {
  5401						progToValue[x] = v
  5402					}
  5403				}
  5404			}
  5405			// If this is an empty infinite loop, stick a hardware NOP in there so that debuggers are less confused.
  5406			if s.bstart[b.ID] == s.pp.next && len(b.Succs) == 1 && b.Succs[0].Block() == b {
  5407				p := thearch.Ginsnop(s.pp)
  5408				p.Pos = p.Pos.WithIsStmt()
  5409				b.Pos = b.Pos.WithBogusLine() // Debuggers are not good about infinite loops, force a change in line number
  5410			}
  5411			// Emit control flow instructions for block
  5412			var next *ssa.Block
  5413			if i < len(f.Blocks)-1 && Debug['N'] == 0 {
  5414				// If -N, leave next==nil so every block with successors
  5415				// ends in a JMP (except call blocks - plive doesn't like
  5416				// select{send,recv} followed by a JMP call).  Helps keep
  5417				// line numbers for otherwise empty blocks.
  5418				next = f.Blocks[i+1]
  5419			}
  5420			x := s.pp.next
  5421			s.SetPos(b.Pos)
  5422			thearch.SSAGenBlock(&s, b, next)
  5423			if f.PrintOrHtmlSSA {
  5424				for ; x != s.pp.next; x = x.Link {
  5425					progToBlock[x] = b
  5426				}
  5427			}
  5428		}
  5429		if f.Blocks[len(f.Blocks)-1].Kind == ssa.BlockExit {
  5430			// We need the return address of a panic call to
  5431			// still be inside the function in question. So if
  5432			// it ends in a call which doesn't return, add a
  5433			// nop (which will never execute) after the call.
  5434			thearch.Ginsnop(pp)
  5435		}
  5436	
  5437		if inlMarks != nil {
  5438			// We have some inline marks. Try to find other instructions we're
  5439			// going to emit anyway, and use those instructions instead of the
  5440			// inline marks.
  5441			for p := pp.Text; p != nil; p = p.Link {
  5442				if p.As == obj.ANOP || p.As == obj.AFUNCDATA || p.As == obj.APCDATA || p.As == obj.ATEXT || p.As == obj.APCALIGN || thearch.LinkArch.Family == sys.Wasm {
  5443					// Don't use 0-sized instructions as inline marks, because we need
  5444					// to identify inline mark instructions by pc offset.
  5445					// (Some of these instructions are sometimes zero-sized, sometimes not.
  5446					// We must not use anything that even might be zero-sized.)
  5447					// TODO: are there others?
  5448					continue
  5449				}
  5450				if _, ok := inlMarks[p]; ok {
  5451					// Don't use inline marks themselves. We don't know
  5452					// whether they will be zero-sized or not yet.
  5453					continue
  5454				}
  5455				pos := p.Pos.AtColumn1()
  5456				s := inlMarksByPos[pos]
  5457				if len(s) == 0 {
  5458					continue
  5459				}
  5460				for _, m := range s {
  5461					// We found an instruction with the same source position as
  5462					// some of the inline marks.
  5463					// Use this instruction instead.
  5464					p.Pos = p.Pos.WithIsStmt() // promote position to a statement
  5465					pp.curfn.Func.lsym.Func.AddInlMark(p, inlMarks[m])
  5466					// Make the inline mark a real nop, so it doesn't generate any code.
  5467					m.As = obj.ANOP
  5468					m.Pos = src.NoXPos
  5469					m.From = obj.Addr{}
  5470					m.To = obj.Addr{}
  5471				}
  5472				delete(inlMarksByPos, pos)
  5473			}
  5474			// Any unmatched inline marks now need to be added to the inlining tree (and will generate a nop instruction).
  5475			for _, p := range inlMarkList {
  5476				if p.As != obj.ANOP {
  5477					pp.curfn.Func.lsym.Func.AddInlMark(p, inlMarks[p])
  5478				}
  5479			}
  5480		}
  5481	
  5482		if Ctxt.Flag_locationlists {
  5483			e.curfn.Func.DebugInfo = ssa.BuildFuncDebug(Ctxt, f, Debug_locationlist > 1, stackOffset)
  5484			bstart := s.bstart
  5485			// Note that at this moment, Prog.Pc is a sequence number; it's
  5486			// not a real PC until after assembly, so this mapping has to
  5487			// be done later.
  5488			e.curfn.Func.DebugInfo.GetPC = func(b, v ssa.ID) int64 {
  5489				switch v {
  5490				case ssa.BlockStart.ID:
  5491					if b == f.Entry.ID {
  5492						return 0 // Start at the very beginning, at the assembler-generated prologue.
  5493						// this should only happen for function args (ssa.OpArg)
  5494					}
  5495					return bstart[b].Pc
  5496				case ssa.BlockEnd.ID:
  5497					return e.curfn.Func.lsym.Size
  5498				default:
  5499					return valueToProgAfter[v].Pc
  5500				}
  5501			}
  5502		}
  5503	
  5504		// Resolve branches, and relax DefaultStmt into NotStmt
  5505		for _, br := range s.Branches {
  5506			br.P.To.Val = s.bstart[br.B.ID]
  5507			if br.P.Pos.IsStmt() != src.PosIsStmt {
  5508				br.P.Pos = br.P.Pos.WithNotStmt()
  5509			} else if v0 := br.B.FirstPossibleStmtValue(); v0 != nil && v0.Pos.Line() == br.P.Pos.Line() && v0.Pos.IsStmt() == src.PosIsStmt {
  5510				br.P.Pos = br.P.Pos.WithNotStmt()
  5511			}
  5512	
  5513		}
  5514	
  5515		if e.log { // spew to stdout
  5516			filename := ""
  5517			for p := pp.Text; p != nil; p = p.Link {
  5518				if p.Pos.IsKnown() && p.InnermostFilename() != filename {
  5519					filename = p.InnermostFilename()
  5520					f.Logf("# %s\n", filename)
  5521				}
  5522	
  5523				var s string
  5524				if v, ok := progToValue[p]; ok {
  5525					s = v.String()
  5526				} else if b, ok := progToBlock[p]; ok {
  5527					s = b.String()
  5528				} else {
  5529					s = "   " // most value and branch strings are 2-3 characters long
  5530				}
  5531				f.Logf(" %-6s\t%.5d (%s)\t%s\n", s, p.Pc, p.InnermostLineNumber(), p.InstructionString())
  5532			}
  5533		}
  5534		if f.HTMLWriter != nil { // spew to ssa.html
  5535			var buf bytes.Buffer
  5536			buf.WriteString("<code>")
  5537			buf.WriteString("<dl class=\"ssa-gen\">")
  5538			filename := ""
  5539			for p := pp.Text; p != nil; p = p.Link {
  5540				// Don't spam every line with the file name, which is often huge.
  5541				// Only print changes, and "unknown" is not a change.
  5542				if p.Pos.IsKnown() && p.InnermostFilename() != filename {
  5543					filename = p.InnermostFilename()
  5544					buf.WriteString("<dt class=\"ssa-prog-src\"></dt><dd class=\"ssa-prog\">")
  5545					buf.WriteString(html.EscapeString("# " + filename))
  5546					buf.WriteString("</dd>")
  5547				}
  5548	
  5549				buf.WriteString("<dt class=\"ssa-prog-src\">")
  5550				if v, ok := progToValue[p]; ok {
  5551					buf.WriteString(v.HTML())
  5552				} else if b, ok := progToBlock[p]; ok {
  5553					buf.WriteString("<b>" + b.HTML() + "</b>")
  5554				}
  5555				buf.WriteString("</dt>")
  5556				buf.WriteString("<dd class=\"ssa-prog\">")
  5557				buf.WriteString(fmt.Sprintf("%.5d <span class=\"l%v line-number\">(%s)</span> %s", p.Pc, p.InnermostLineNumber(), p.InnermostLineNumberHTML(), html.EscapeString(p.InstructionString())))
  5558				buf.WriteString("</dd>")
  5559			}
  5560			buf.WriteString("</dl>")
  5561			buf.WriteString("</code>")
  5562			f.HTMLWriter.WriteColumn("genssa", "genssa", "ssa-prog", buf.String())
  5563		}
  5564	
  5565		defframe(&s, e)
  5566	
  5567		f.HTMLWriter.Close()
  5568		f.HTMLWriter = nil
  5569	}
  5570	
  5571	func defframe(s *SSAGenState, e *ssafn) {
  5572		pp := s.pp
  5573	
  5574		frame := Rnd(s.maxarg+e.stksize, int64(Widthreg))
  5575		if thearch.PadFrame != nil {
  5576			frame = thearch.PadFrame(frame)
  5577		}
  5578	
  5579		// Fill in argument and frame size.
  5580		pp.Text.To.Type = obj.TYPE_TEXTSIZE
  5581		pp.Text.To.Val = int32(Rnd(e.curfn.Type.ArgWidth(), int64(Widthreg)))
  5582		pp.Text.To.Offset = frame
  5583	
  5584		// Insert code to zero ambiguously live variables so that the
  5585		// garbage collector only sees initialized values when it
  5586		// looks for pointers.
  5587		p := pp.Text
  5588		var lo, hi int64
  5589	
  5590		// Opaque state for backend to use. Current backends use it to
  5591		// keep track of which helper registers have been zeroed.
  5592		var state uint32
  5593	
  5594		// Iterate through declarations. They are sorted in decreasing Xoffset order.
  5595		for _, n := range e.curfn.Func.Dcl {
  5596			if !n.Name.Needzero() {
  5597				continue
  5598			}
  5599			if n.Class() != PAUTO {
  5600				Fatalf("needzero class %d", n.Class())
  5601			}
  5602			if n.Type.Size()%int64(Widthptr) != 0 || n.Xoffset%int64(Widthptr) != 0 || n.Type.Size() == 0 {
  5603				Fatalf("var %L has size %d offset %d", n, n.Type.Size(), n.Xoffset)
  5604			}
  5605	
  5606			if lo != hi && n.Xoffset+n.Type.Size() >= lo-int64(2*Widthreg) {
  5607				// Merge with range we already have.
  5608				lo = n.Xoffset
  5609				continue
  5610			}
  5611	
  5612			// Zero old range
  5613			p = thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
  5614	
  5615			// Set new range.
  5616			lo = n.Xoffset
  5617			hi = lo + n.Type.Size()
  5618		}
  5619	
  5620		// Zero final range.
  5621		thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
  5622	}
  5623	
  5624	type FloatingEQNEJump struct {
  5625		Jump  obj.As
  5626		Index int
  5627	}
  5628	
  5629	func (s *SSAGenState) oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump) {
  5630		p := s.Prog(jumps.Jump)
  5631		p.To.Type = obj.TYPE_BRANCH
  5632		p.Pos = b.Pos
  5633		to := jumps.Index
  5634		s.Branches = append(s.Branches, Branch{p, b.Succs[to].Block()})
  5635	}
  5636	
  5637	func (s *SSAGenState) FPJump(b, next *ssa.Block, jumps *[2][2]FloatingEQNEJump) {
  5638		switch next {
  5639		case b.Succs[0].Block():
  5640			s.oneFPJump(b, &jumps[0][0])
  5641			s.oneFPJump(b, &jumps[0][1])
  5642		case b.Succs[1].Block():
  5643			s.oneFPJump(b, &jumps[1][0])
  5644			s.oneFPJump(b, &jumps[1][1])
  5645		default:
  5646			s.oneFPJump(b, &jumps[1][0])
  5647			s.oneFPJump(b, &jumps[1][1])
  5648			q := s.Prog(obj.AJMP)
  5649			q.Pos = b.Pos
  5650			q.To.Type = obj.TYPE_BRANCH
  5651			s.Branches = append(s.Branches, Branch{q, b.Succs[1].Block()})
  5652		}
  5653	}
  5654	
  5655	func AuxOffset(v *ssa.Value) (offset int64) {
  5656		if v.Aux == nil {
  5657			return 0
  5658		}
  5659		n, ok := v.Aux.(*Node)
  5660		if !ok {
  5661			v.Fatalf("bad aux type in %s\n", v.LongString())
  5662		}
  5663		if n.Class() == PAUTO {
  5664			return n.Xoffset
  5665		}
  5666		return 0
  5667	}
  5668	
  5669	// AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a.
  5670	func AddAux(a *obj.Addr, v *ssa.Value) {
  5671		AddAux2(a, v, v.AuxInt)
  5672	}
  5673	func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
  5674		if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR {
  5675			v.Fatalf("bad AddAux addr %v", a)
  5676		}
  5677		// add integer offset
  5678		a.Offset += offset
  5679	
  5680		// If no additional symbol offset, we're done.
  5681		if v.Aux == nil {
  5682			return
  5683		}
  5684		// Add symbol's offset from its base register.
  5685		switch n := v.Aux.(type) {
  5686		case *obj.LSym:
  5687			a.Name = obj.NAME_EXTERN
  5688			a.Sym = n
  5689		case *Node:
  5690			if n.Class() == PPARAM || n.Class() == PPARAMOUT {
  5691				a.Name = obj.NAME_PARAM
  5692				a.Sym = n.Orig.Sym.Linksym()
  5693				a.Offset += n.Xoffset
  5694				break
  5695			}
  5696			a.Name = obj.NAME_AUTO
  5697			a.Sym = n.Sym.Linksym()
  5698			a.Offset += n.Xoffset
  5699		default:
  5700			v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
  5701		}
  5702	}
  5703	
  5704	// extendIndex extends v to a full int width.
  5705	// panic with the given kind if v does not fit in an int (only on 32-bit archs).
  5706	func (s *state) extendIndex(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value {
  5707		size := idx.Type.Size()
  5708		if size == s.config.PtrSize {
  5709			return idx
  5710		}
  5711		if size > s.config.PtrSize {
  5712			// truncate 64-bit indexes on 32-bit pointer archs. Test the
  5713			// high word and branch to out-of-bounds failure if it is not 0.
  5714			var lo *ssa.Value
  5715			if idx.Type.IsSigned() {
  5716				lo = s.newValue1(ssa.OpInt64Lo, types.Types[TINT], idx)
  5717			} else {
  5718				lo = s.newValue1(ssa.OpInt64Lo, types.Types[TUINT], idx)
  5719			}
  5720			if bounded || Debug['B'] != 0 {
  5721				return lo
  5722			}
  5723			bNext := s.f.NewBlock(ssa.BlockPlain)
  5724			bPanic := s.f.NewBlock(ssa.BlockExit)
  5725			hi := s.newValue1(ssa.OpInt64Hi, types.Types[TUINT32], idx)
  5726			cmp := s.newValue2(ssa.OpEq32, types.Types[TBOOL], hi, s.constInt32(types.Types[TUINT32], 0))
  5727			if !idx.Type.IsSigned() {
  5728				switch kind {
  5729				case ssa.BoundsIndex:
  5730					kind = ssa.BoundsIndexU
  5731				case ssa.BoundsSliceAlen:
  5732					kind = ssa.BoundsSliceAlenU
  5733				case ssa.BoundsSliceAcap:
  5734					kind = ssa.BoundsSliceAcapU
  5735				case ssa.BoundsSliceB:
  5736					kind = ssa.BoundsSliceBU
  5737				case ssa.BoundsSlice3Alen:
  5738					kind = ssa.BoundsSlice3AlenU
  5739				case ssa.BoundsSlice3Acap:
  5740					kind = ssa.BoundsSlice3AcapU
  5741				case ssa.BoundsSlice3B:
  5742					kind = ssa.BoundsSlice3BU
  5743				case ssa.BoundsSlice3C:
  5744					kind = ssa.BoundsSlice3CU
  5745				}
  5746			}
  5747			b := s.endBlock()
  5748			b.Kind = ssa.BlockIf
  5749			b.SetControl(cmp)
  5750			b.Likely = ssa.BranchLikely
  5751			b.AddEdgeTo(bNext)
  5752			b.AddEdgeTo(bPanic)
  5753	
  5754			s.startBlock(bPanic)
  5755			mem := s.newValue4I(ssa.OpPanicExtend, types.TypeMem, int64(kind), hi, lo, len, s.mem())
  5756			s.endBlock().SetControl(mem)
  5757			s.startBlock(bNext)
  5758	
  5759			return lo
  5760		}
  5761	
  5762		// Extend value to the required size
  5763		var op ssa.Op
  5764		if idx.Type.IsSigned() {
  5765			switch 10*size + s.config.PtrSize {
  5766			case 14:
  5767				op = ssa.OpSignExt8to32
  5768			case 18:
  5769				op = ssa.OpSignExt8to64
  5770			case 24:
  5771				op = ssa.OpSignExt16to32
  5772			case 28:
  5773				op = ssa.OpSignExt16to64
  5774			case 48:
  5775				op = ssa.OpSignExt32to64
  5776			default:
  5777				s.Fatalf("bad signed index extension %s", idx.Type)
  5778			}
  5779		} else {
  5780			switch 10*size + s.config.PtrSize {
  5781			case 14:
  5782				op = ssa.OpZeroExt8to32
  5783			case 18:
  5784				op = ssa.OpZeroExt8to64
  5785			case 24:
  5786				op = ssa.OpZeroExt16to32
  5787			case 28:
  5788				op = ssa.OpZeroExt16to64
  5789			case 48:
  5790				op = ssa.OpZeroExt32to64
  5791			default:
  5792				s.Fatalf("bad unsigned index extension %s", idx.Type)
  5793			}
  5794		}
  5795		return s.newValue1(op, types.Types[TINT], idx)
  5796	}
  5797	
  5798	// CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values.
  5799	// Called during ssaGenValue.
  5800	func CheckLoweredPhi(v *ssa.Value) {
  5801		if v.Op != ssa.OpPhi {
  5802			v.Fatalf("CheckLoweredPhi called with non-phi value: %v", v.LongString())
  5803		}
  5804		if v.Type.IsMemory() {
  5805			return
  5806		}
  5807		f := v.Block.Func
  5808		loc := f.RegAlloc[v.ID]
  5809		for _, a := range v.Args {
  5810			if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead?
  5811				v.Fatalf("phi arg at different location than phi: %v @ %s, but arg %v @ %s\n%s\n", v, loc, a, aloc, v.Block.Func)
  5812			}
  5813		}
  5814	}
  5815	
  5816	// CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block.
  5817	// The output of LoweredGetClosurePtr is generally hardwired to the correct register.
  5818	// That register contains the closure pointer on closure entry.
  5819	func CheckLoweredGetClosurePtr(v *ssa.Value) {
  5820		entry := v.Block.Func.Entry
  5821		if entry != v.Block || entry.Values[0] != v {
  5822			Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
  5823		}
  5824	}
  5825	
  5826	// AutoVar returns a *Node and int64 representing the auto variable and offset within it
  5827	// where v should be spilled.
  5828	func AutoVar(v *ssa.Value) (*Node, int64) {
  5829		loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot)
  5830		if v.Type.Size() > loc.Type.Size() {
  5831			v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type)
  5832		}
  5833		return loc.N.(*Node), loc.Off
  5834	}
  5835	
  5836	func AddrAuto(a *obj.Addr, v *ssa.Value) {
  5837		n, off := AutoVar(v)
  5838		a.Type = obj.TYPE_MEM
  5839		a.Sym = n.Sym.Linksym()
  5840		a.Reg = int16(thearch.REGSP)
  5841		a.Offset = n.Xoffset + off
  5842		if n.Class() == PPARAM || n.Class() == PPARAMOUT {
  5843			a.Name = obj.NAME_PARAM
  5844		} else {
  5845			a.Name = obj.NAME_AUTO
  5846		}
  5847	}
  5848	
  5849	func (s *SSAGenState) AddrScratch(a *obj.Addr) {
  5850		if s.ScratchFpMem == nil {
  5851			panic("no scratch memory available; forgot to declare usesScratch for Op?")
  5852		}
  5853		a.Type = obj.TYPE_MEM
  5854		a.Name = obj.NAME_AUTO
  5855		a.Sym = s.ScratchFpMem.Sym.Linksym()
  5856		a.Reg = int16(thearch.REGSP)
  5857		a.Offset = s.ScratchFpMem.Xoffset
  5858	}
  5859	
  5860	// Call returns a new CALL instruction for the SSA value v.
  5861	// It uses PrepareCall to prepare the call.
  5862	func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog {
  5863		s.PrepareCall(v)
  5864	
  5865		p := s.Prog(obj.ACALL)
  5866		p.Pos = v.Pos
  5867		if sym, ok := v.Aux.(*obj.LSym); ok {
  5868			p.To.Type = obj.TYPE_MEM
  5869			p.To.Name = obj.NAME_EXTERN
  5870			p.To.Sym = sym
  5871		} else {
  5872			// TODO(mdempsky): Can these differences be eliminated?
  5873			switch thearch.LinkArch.Family {
  5874			case sys.AMD64, sys.I386, sys.PPC64, sys.S390X, sys.Wasm:
  5875				p.To.Type = obj.TYPE_REG
  5876			case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64:
  5877				p.To.Type = obj.TYPE_MEM
  5878			default:
  5879				Fatalf("unknown indirect call family")
  5880			}
  5881			p.To.Reg = v.Args[0].Reg()
  5882		}
  5883		return p
  5884	}
  5885	
  5886	// PrepareCall prepares to emit a CALL instruction for v and does call-related bookkeeping.
  5887	// It must be called immediately before emitting the actual CALL instruction,
  5888	// since it emits PCDATA for the stack map at the call (calls are safe points).
  5889	func (s *SSAGenState) PrepareCall(v *ssa.Value) {
  5890		idx := s.livenessMap.Get(v)
  5891		if !idx.Valid() {
  5892			// typedmemclr and typedmemmove are write barriers and
  5893			// deeply non-preemptible. They are unsafe points and
  5894			// hence should not have liveness maps.
  5895			if sym, _ := v.Aux.(*obj.LSym); !(sym == typedmemclr || sym == typedmemmove) {
  5896				Fatalf("missing stack map index for %v", v.LongString())
  5897			}
  5898		}
  5899	
  5900		if sym, _ := v.Aux.(*obj.LSym); sym == Deferreturn {
  5901			// Deferred calls will appear to be returning to
  5902			// the CALL deferreturn(SB) that we are about to emit.
  5903			// However, the stack trace code will show the line
  5904			// of the instruction byte before the return PC.
  5905			// To avoid that being an unrelated instruction,
  5906			// insert an actual hardware NOP that will have the right line number.
  5907			// This is different from obj.ANOP, which is a virtual no-op
  5908			// that doesn't make it into the instruction stream.
  5909			thearch.Ginsnopdefer(s.pp)
  5910		}
  5911	
  5912		if sym, ok := v.Aux.(*obj.LSym); ok {
  5913			// Record call graph information for nowritebarrierrec
  5914			// analysis.
  5915			if nowritebarrierrecCheck != nil {
  5916				nowritebarrierrecCheck.recordCall(s.pp.curfn, sym, v.Pos)
  5917			}
  5918		}
  5919	
  5920		if s.maxarg < v.AuxInt {
  5921			s.maxarg = v.AuxInt
  5922		}
  5923	}
  5924	
  5925	// UseArgs records the fact that an instruction needs a certain amount of
  5926	// callee args space for its use.
  5927	func (s *SSAGenState) UseArgs(n int64) {
  5928		if s.maxarg < n {
  5929			s.maxarg = n
  5930		}
  5931	}
  5932	
  5933	// fieldIdx finds the index of the field referred to by the ODOT node n.
  5934	func fieldIdx(n *Node) int {
  5935		t := n.Left.Type
  5936		f := n.Sym
  5937		if !t.IsStruct() {
  5938			panic("ODOT's LHS is not a struct")
  5939		}
  5940	
  5941		var i int
  5942		for _, t1 := range t.Fields().Slice() {
  5943			if t1.Sym != f {
  5944				i++
  5945				continue
  5946			}
  5947			if t1.Offset != n.Xoffset {
  5948				panic("field offset doesn't match")
  5949			}
  5950			return i
  5951		}
  5952		panic(fmt.Sprintf("can't find field in expr %v\n", n))
  5953	
  5954		// TODO: keep the result of this function somewhere in the ODOT Node
  5955		// so we don't have to recompute it each time we need it.
  5956	}
  5957	
  5958	// ssafn holds frontend information about a function that the backend is processing.
  5959	// It also exports a bunch of compiler services for the ssa backend.
  5960	type ssafn struct {
  5961		curfn        *Node
  5962		strings      map[string]interface{} // map from constant string to data symbols
  5963		scratchFpMem *Node                  // temp for floating point register / memory moves on some architectures
  5964		stksize      int64                  // stack size for current frame
  5965		stkptrsize   int64                  // prefix of stack containing pointers
  5966		log          bool                   // print ssa debug to the stdout
  5967	}
  5968	
  5969	// StringData returns a symbol (a *types.Sym wrapped in an interface) which
  5970	// is the data component of a global string constant containing s.
  5971	func (e *ssafn) StringData(s string) interface{} {
  5972		if aux, ok := e.strings[s]; ok {
  5973			return aux
  5974		}
  5975		if e.strings == nil {
  5976			e.strings = make(map[string]interface{})
  5977		}
  5978		data := stringsym(e.curfn.Pos, s)
  5979		e.strings[s] = data
  5980		return data
  5981	}
  5982	
  5983	func (e *ssafn) Auto(pos src.XPos, t *types.Type) ssa.GCNode {
  5984		n := tempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list
  5985		return n
  5986	}
  5987	
  5988	func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
  5989		n := name.N.(*Node)
  5990		ptrType := types.NewPtr(types.Types[TUINT8])
  5991		lenType := types.Types[TINT]
  5992		if n.Class() == PAUTO && !n.Addrtaken() {
  5993			// Split this string up into two separate variables.
  5994			p := e.splitSlot(&name, ".ptr", 0, ptrType)
  5995			l := e.splitSlot(&name, ".len", ptrType.Size(), lenType)
  5996			return p, l
  5997		}
  5998		// Return the two parts of the larger variable.
  5999		return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)}
  6000	}
  6001	
  6002	func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
  6003		n := name.N.(*Node)
  6004		u := types.Types[TUINTPTR]
  6005		t := types.NewPtr(types.Types[TUINT8])
  6006		if n.Class() == PAUTO && !n.Addrtaken() {
  6007			// Split this interface up into two separate variables.
  6008			f := ".itab"
  6009			if n.Type.IsEmptyInterface() {
  6010				f = ".type"
  6011			}
  6012			c := e.splitSlot(&name, f, 0, u) // see comment in plive.go:onebitwalktype1.
  6013			d := e.splitSlot(&name, ".data", u.Size(), t)
  6014			return c, d
  6015		}
  6016		// Return the two parts of the larger variable.
  6017		return ssa.LocalSlot{N: n, Type: u, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + int64(Widthptr)}
  6018	}
  6019	
  6020	func (e *ssafn) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) {
  6021		n := name.N.(*Node)
  6022		ptrType := types.NewPtr(name.Type.Elem())
  6023		lenType := types.Types[TINT]
  6024		if n.Class() == PAUTO && !n.Addrtaken() {
  6025			// Split this slice up into three separate variables.
  6026			p := e.splitSlot(&name, ".ptr", 0, ptrType)
  6027			l := e.splitSlot(&name, ".len", ptrType.Size(), lenType)
  6028			c := e.splitSlot(&name, ".cap", ptrType.Size()+lenType.Size(), lenType)
  6029			return p, l, c
  6030		}
  6031		// Return the three parts of the larger variable.
  6032		return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off},
  6033			ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)},
  6034			ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(2*Widthptr)}
  6035	}
  6036	
  6037	func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
  6038		n := name.N.(*Node)
  6039		s := name.Type.Size() / 2
  6040		var t *types.Type
  6041		if s == 8 {
  6042			t = types.Types[TFLOAT64]
  6043		} else {
  6044			t = types.Types[TFLOAT32]
  6045		}
  6046		if n.Class() == PAUTO && !n.Addrtaken() {
  6047			// Split this complex up into two separate variables.
  6048			r := e.splitSlot(&name, ".real", 0, t)
  6049			i := e.splitSlot(&name, ".imag", t.Size(), t)
  6050			return r, i
  6051		}
  6052		// Return the two parts of the larger variable.
  6053		return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + s}
  6054	}
  6055	
  6056	func (e *ssafn) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
  6057		n := name.N.(*Node)
  6058		var t *types.Type
  6059		if name.Type.IsSigned() {
  6060			t = types.Types[TINT32]
  6061		} else {
  6062			t = types.Types[TUINT32]
  6063		}
  6064		if n.Class() == PAUTO && !n.Addrtaken() {
  6065			// Split this int64 up into two separate variables.
  6066			if thearch.LinkArch.ByteOrder == binary.BigEndian {
  6067				return e.splitSlot(&name, ".hi", 0, t), e.splitSlot(&name, ".lo", t.Size(), types.Types[TUINT32])
  6068			}
  6069			return e.splitSlot(&name, ".hi", t.Size(), t), e.splitSlot(&name, ".lo", 0, types.Types[TUINT32])
  6070		}
  6071		// Return the two parts of the larger variable.
  6072		if thearch.LinkArch.ByteOrder == binary.BigEndian {
  6073			return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: types.Types[TUINT32], Off: name.Off + 4}
  6074		}
  6075		return ssa.LocalSlot{N: n, Type: t, Off: name.Off + 4}, ssa.LocalSlot{N: n, Type: types.Types[TUINT32], Off: name.Off}
  6076	}
  6077	
  6078	func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot {
  6079		n := name.N.(*Node)
  6080		st := name.Type
  6081		ft := st.FieldType(i)
  6082		var offset int64
  6083		for f := 0; f < i; f++ {
  6084			offset += st.FieldType(f).Size()
  6085		}
  6086		if n.Class() == PAUTO && !n.Addrtaken() {
  6087			// Note: the _ field may appear several times.  But
  6088			// have no fear, identically-named but distinct Autos are
  6089			// ok, albeit maybe confusing for a debugger.
  6090			return e.splitSlot(&name, "."+st.FieldName(i), offset, ft)
  6091		}
  6092		return ssa.LocalSlot{N: n, Type: ft, Off: name.Off + st.FieldOff(i)}
  6093	}
  6094	
  6095	func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot {
  6096		n := name.N.(*Node)
  6097		at := name.Type
  6098		if at.NumElem() != 1 {
  6099			Fatalf("bad array size")
  6100		}
  6101		et := at.Elem()
  6102		if n.Class() == PAUTO && !n.Addrtaken() {
  6103			return e.splitSlot(&name, "[0]", 0, et)
  6104		}
  6105		return ssa.LocalSlot{N: n, Type: et, Off: name.Off}
  6106	}
  6107	
  6108	func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym {
  6109		return itabsym(it, offset)
  6110	}
  6111	
  6112	// splitSlot returns a slot representing the data of parent starting at offset.
  6113	func (e *ssafn) splitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
  6114		s := &types.Sym{Name: parent.N.(*Node).Sym.Name + suffix, Pkg: localpkg}
  6115	
  6116		n := &Node{
  6117			Name: new(Name),
  6118			Op:   ONAME,
  6119			Pos:  parent.N.(*Node).Pos,
  6120		}
  6121		n.Orig = n
  6122	
  6123		s.Def = asTypesNode(n)
  6124		asNode(s.Def).Name.SetUsed(true)
  6125		n.Sym = s
  6126		n.Type = t
  6127		n.SetClass(PAUTO)
  6128		n.SetAddable(true)
  6129		n.Esc = EscNever
  6130		n.Name.Curfn = e.curfn
  6131		e.curfn.Func.Dcl = append(e.curfn.Func.Dcl, n)
  6132		dowidth(t)
  6133		return ssa.LocalSlot{N: n, Type: t, Off: 0, SplitOf: parent, SplitOffset: offset}
  6134	}
  6135	
  6136	func (e *ssafn) CanSSA(t *types.Type) bool {
  6137		return canSSAType(t)
  6138	}
  6139	
  6140	func (e *ssafn) Line(pos src.XPos) string {
  6141		return linestr(pos)
  6142	}
  6143	
  6144	// Log logs a message from the compiler.
  6145	func (e *ssafn) Logf(msg string, args ...interface{}) {
  6146		if e.log {
  6147			fmt.Printf(msg, args...)
  6148		}
  6149	}
  6150	
  6151	func (e *ssafn) Log() bool {
  6152		return e.log
  6153	}
  6154	
  6155	// Fatal reports a compiler error and exits.
  6156	func (e *ssafn) Fatalf(pos src.XPos, msg string, args ...interface{}) {
  6157		lineno = pos
  6158		nargs := append([]interface{}{e.curfn.funcname()}, args...)
  6159		Fatalf("'%s': "+msg, nargs...)
  6160	}
  6161	
  6162	// Warnl reports a "warning", which is usually flag-triggered
  6163	// logging output for the benefit of tests.
  6164	func (e *ssafn) Warnl(pos src.XPos, fmt_ string, args ...interface{}) {
  6165		Warnl(pos, fmt_, args...)
  6166	}
  6167	
  6168	func (e *ssafn) Debug_checknil() bool {
  6169		return Debug_checknil != 0
  6170	}
  6171	
  6172	func (e *ssafn) UseWriteBarrier() bool {
  6173		return use_writebarrier
  6174	}
  6175	
  6176	func (e *ssafn) Syslook(name string) *obj.LSym {
  6177		switch name {
  6178		case "goschedguarded":
  6179			return goschedguarded
  6180		case "writeBarrier":
  6181			return writeBarrier
  6182		case "gcWriteBarrier":
  6183			return gcWriteBarrier
  6184		case "typedmemmove":
  6185			return typedmemmove
  6186		case "typedmemclr":
  6187			return typedmemclr
  6188		}
  6189		Fatalf("unknown Syslook func %v", name)
  6190		return nil
  6191	}
  6192	
  6193	func (e *ssafn) SetWBPos(pos src.XPos) {
  6194		e.curfn.Func.setWBPos(pos)
  6195	}
  6196	
  6197	func (n *Node) Typ() *types.Type {
  6198		return n.Type
  6199	}
  6200	func (n *Node) StorageClass() ssa.StorageClass {
  6201		switch n.Class() {
  6202		case PPARAM:
  6203			return ssa.ClassParam
  6204		case PPARAMOUT:
  6205			return ssa.ClassParamOut
  6206		case PAUTO:
  6207			return ssa.ClassAuto
  6208		default:
  6209			Fatalf("untranslatable storage class for %v: %s", n, n.Class())
  6210			return 0
  6211		}
  6212	}
  6213	

View as plain text