Text file src/pkg/cmd/compile/internal/ssa/gen/PPC64.rules
1 // Copyright 2016 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 // Lowering arithmetic
6 (Add(Ptr|64|32|16|8) x y) -> (ADD x y)
7 (Add64F x y) -> (FADD x y)
8 (Add32F x y) -> (FADDS x y)
9
10 (Sub(Ptr|64|32|16|8) x y) -> (SUB x y)
11 (Sub32F x y) -> (FSUBS x y)
12 (Sub64F x y) -> (FSUB x y)
13
14 (Mod16 x y) -> (Mod32 (SignExt16to32 x) (SignExt16to32 y))
15 (Mod16u x y) -> (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
16 (Mod8 x y) -> (Mod32 (SignExt8to32 x) (SignExt8to32 y))
17 (Mod8u x y) -> (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
18 (Mod64 x y) -> (SUB x (MULLD y (DIVD x y)))
19 (Mod64u x y) -> (SUB x (MULLD y (DIVDU x y)))
20 (Mod32 x y) -> (SUB x (MULLW y (DIVW x y)))
21 (Mod32u x y) -> (SUB x (MULLW y (DIVWU x y)))
22
23 // (x + y) / 2 with x>=y -> (x - y) / 2 + y
24 (Avg64u <t> x y) -> (ADD (SRDconst <t> (SUB <t> x y) [1]) y)
25
26 (Add64carry x y c) -> (LoweredAdd64Carry x y c)
27 (Mul64 x y) -> (MULLD x y)
28 (Mul(32|16|8) x y) -> (MULLW x y)
29 (Mul64uhilo x y) -> (LoweredMuluhilo x y)
30
31 (Div64 x y) -> (DIVD x y)
32 (Div64u x y) -> (DIVDU x y)
33 (Div32 x y) -> (DIVW x y)
34 (Div32u x y) -> (DIVWU x y)
35 (Div16 x y) -> (DIVW (SignExt16to32 x) (SignExt16to32 y))
36 (Div16u x y) -> (DIVWU (ZeroExt16to32 x) (ZeroExt16to32 y))
37 (Div8 x y) -> (DIVW (SignExt8to32 x) (SignExt8to32 y))
38 (Div8u x y) -> (DIVWU (ZeroExt8to32 x) (ZeroExt8to32 y))
39
40 (Hmul(64|64u|32|32u) x y) -> (MULH(D|DU|W|WU) x y)
41
42 (Mul32F x y) -> (FMULS x y)
43 (Mul64F x y) -> (FMUL x y)
44
45 (Div32F x y) -> (FDIVS x y)
46 (Div64F x y) -> (FDIV x y)
47
48 // Lowering float <-> int
49 (Cvt32to32F x) -> (FCFIDS (MTVSRD (SignExt32to64 x)))
50 (Cvt32to64F x) -> (FCFID (MTVSRD (SignExt32to64 x)))
51 (Cvt64to32F x) -> (FCFIDS (MTVSRD x))
52 (Cvt64to64F x) -> (FCFID (MTVSRD x))
53
54 (Cvt32Fto32 x) -> (MFVSRD (FCTIWZ x))
55 (Cvt32Fto64 x) -> (MFVSRD (FCTIDZ x))
56 (Cvt64Fto32 x) -> (MFVSRD (FCTIWZ x))
57 (Cvt64Fto64 x) -> (MFVSRD (FCTIDZ x))
58
59 (Cvt32Fto64F x) -> x // Note x will have the wrong type for patterns dependent on Float32/Float64
60 (Cvt64Fto32F x) -> (FRSP x)
61
62 (Round(32|64)F x) -> (LoweredRound(32|64)F x)
63
64 (Sqrt x) -> (FSQRT x)
65 (Floor x) -> (FFLOOR x)
66 (Ceil x) -> (FCEIL x)
67 (Trunc x) -> (FTRUNC x)
68 (Round x) -> (FROUND x)
69 (Copysign x y) -> (FCPSGN y x)
70 (Abs x) -> (FABS x)
71
72 // Lowering constants
73 (Const(64|32|16|8) [val]) -> (MOVDconst [val])
74 (Const(32|64)F [val]) -> (FMOV(S|D)const [val])
75 (ConstNil) -> (MOVDconst [0])
76 (ConstBool [b]) -> (MOVDconst [b])
77
78 // Constant folding
79 (FABS (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Abs(auxTo64F(x)))])
80 (FSQRT (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Sqrt(auxTo64F(x)))])
81 (FFLOOR (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Floor(auxTo64F(x)))])
82 (FCEIL (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Ceil(auxTo64F(x)))])
83 (FTRUNC (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Trunc(auxTo64F(x)))])
84
85 // Rotates
86 (RotateLeft8 <t> x (MOVDconst [c])) -> (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
87 (RotateLeft16 <t> x (MOVDconst [c])) -> (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
88 (RotateLeft32 x (MOVDconst [c])) -> (ROTLWconst [c&31] x)
89 (RotateLeft64 x (MOVDconst [c])) -> (ROTLconst [c&63] x)
90
91 // Rotate generation with const shift
92 (ADD (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (ROTLconst [c] x)
93 ( OR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (ROTLconst [c] x)
94 (XOR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (ROTLconst [c] x)
95
96 (ADD (SLWconst x [c]) (SRWconst x [d])) && d == 32-c -> (ROTLWconst [c] x)
97 ( OR (SLWconst x [c]) (SRWconst x [d])) && d == 32-c -> (ROTLWconst [c] x)
98 (XOR (SLWconst x [c]) (SRWconst x [d])) && d == 32-c -> (ROTLWconst [c] x)
99
100 // Rotate generation with non-const shift
101 // these match patterns from math/bits/RotateLeft[32|64], but there could be others
102 (ADD (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) -> (ROTL x y)
103 ( OR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) -> (ROTL x y)
104 (XOR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) -> (ROTL x y)
105
106 (ADD (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) -> (ROTLW x y)
107 ( OR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) -> (ROTLW x y)
108 (XOR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) -> (ROTLW x y)
109
110 // Lowering rotates
111 (RotateLeft32 x y) -> (ROTLW x y)
112 (RotateLeft64 x y) -> (ROTL x y)
113
114 // Constant rotate generation
115 (ROTLW x (MOVDconst [c])) -> (ROTLWconst x [c&31])
116 (ROTL x (MOVDconst [c])) -> (ROTLconst x [c&63])
117
118 (Lsh64x64 x (Const64 [c])) && uint64(c) < 64 -> (SLDconst x [c])
119 (Rsh64x64 x (Const64 [c])) && uint64(c) < 64 -> (SRADconst x [c])
120 (Rsh64Ux64 x (Const64 [c])) && uint64(c) < 64 -> (SRDconst x [c])
121 (Lsh32x64 x (Const64 [c])) && uint64(c) < 32 -> (SLWconst x [c])
122 (Rsh32x64 x (Const64 [c])) && uint64(c) < 32 -> (SRAWconst x [c])
123 (Rsh32Ux64 x (Const64 [c])) && uint64(c) < 32 -> (SRWconst x [c])
124 (Lsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SLWconst x [c])
125 (Rsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SRAWconst (SignExt16to32 x) [c])
126 (Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c])
127 (Lsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SLWconst x [c])
128 (Rsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SRAWconst (SignExt8to32 x) [c])
129 (Rsh8Ux64 x (Const64 [c])) && uint64(c) < 8 -> (SRWconst (ZeroExt8to32 x) [c])
130
131 (Lsh64x32 x (Const64 [c])) && uint32(c) < 64 -> (SLDconst x [c])
132 (Rsh64x32 x (Const64 [c])) && uint32(c) < 64 -> (SRADconst x [c])
133 (Rsh64Ux32 x (Const64 [c])) && uint32(c) < 64 -> (SRDconst x [c])
134 (Lsh32x32 x (Const64 [c])) && uint32(c) < 32 -> (SLWconst x [c])
135 (Rsh32x32 x (Const64 [c])) && uint32(c) < 32 -> (SRAWconst x [c])
136 (Rsh32Ux32 x (Const64 [c])) && uint32(c) < 32 -> (SRWconst x [c])
137 (Lsh16x32 x (Const64 [c])) && uint32(c) < 16 -> (SLWconst x [c])
138 (Rsh16x32 x (Const64 [c])) && uint32(c) < 16 -> (SRAWconst (SignExt16to32 x) [c])
139 (Rsh16Ux32 x (Const64 [c])) && uint32(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c])
140 (Lsh8x32 x (Const64 [c])) && uint32(c) < 8 -> (SLWconst x [c])
141 (Rsh8x32 x (Const64 [c])) && uint32(c) < 8 -> (SRAWconst (SignExt8to32 x) [c])
142 (Rsh8Ux32 x (Const64 [c])) && uint32(c) < 8 -> (SRWconst (ZeroExt8to32 x) [c])
143
144 // large constant shifts
145 (Lsh64x64 _ (Const64 [c])) && uint64(c) >= 64 -> (MOVDconst [0])
146 (Rsh64Ux64 _ (Const64 [c])) && uint64(c) >= 64 -> (MOVDconst [0])
147 (Lsh32x64 _ (Const64 [c])) && uint64(c) >= 32 -> (MOVDconst [0])
148 (Rsh32Ux64 _ (Const64 [c])) && uint64(c) >= 32 -> (MOVDconst [0])
149 (Lsh16x64 _ (Const64 [c])) && uint64(c) >= 16 -> (MOVDconst [0])
150 (Rsh16Ux64 _ (Const64 [c])) && uint64(c) >= 16 -> (MOVDconst [0])
151 (Lsh8x64 _ (Const64 [c])) && uint64(c) >= 8 -> (MOVDconst [0])
152 (Rsh8Ux64 _ (Const64 [c])) && uint64(c) >= 8 -> (MOVDconst [0])
153
154 // large constant signed right shift, we leave the sign bit
155 (Rsh64x64 x (Const64 [c])) && uint64(c) >= 64 -> (SRADconst x [63])
156 (Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 -> (SRAWconst x [63])
157 (Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 -> (SRAWconst (SignExt16to32 x) [63])
158 (Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 -> (SRAWconst (SignExt8to32 x) [63])
159
160 // constant shifts
161 (Lsh64x64 x (MOVDconst [c])) && uint64(c) < 64 -> (SLDconst x [c])
162 (Rsh64x64 x (MOVDconst [c])) && uint64(c) < 64 -> (SRADconst x [c])
163 (Rsh64Ux64 x (MOVDconst [c])) && uint64(c) < 64 -> (SRDconst x [c])
164 (Lsh32x64 x (MOVDconst [c])) && uint64(c) < 32 -> (SLWconst x [c])
165 (Rsh32x64 x (MOVDconst [c])) && uint64(c) < 32 -> (SRAWconst x [c])
166 (Rsh32Ux64 x (MOVDconst [c])) && uint64(c) < 32 -> (SRWconst x [c])
167 (Lsh16x64 x (MOVDconst [c])) && uint64(c) < 16 -> (SLWconst x [c])
168 (Rsh16x64 x (MOVDconst [c])) && uint64(c) < 16 -> (SRAWconst (SignExt16to32 x) [c])
169 (Rsh16Ux64 x (MOVDconst [c])) && uint64(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c])
170 (Lsh8x64 x (MOVDconst [c])) && uint64(c) < 8 -> (SLWconst x [c])
171 (Rsh8x64 x (MOVDconst [c])) && uint64(c) < 8 -> (SRAWconst (SignExt8to32 x) [c])
172 (Rsh8Ux64 x (MOVDconst [c])) && uint64(c) < 8 -> (SRWconst (ZeroExt8to32 x) [c])
173
174 (Lsh64x32 x (MOVDconst [c])) && uint32(c) < 64 -> (SLDconst x [c])
175 (Rsh64x32 x (MOVDconst [c])) && uint32(c) < 64 -> (SRADconst x [c])
176 (Rsh64Ux32 x (MOVDconst [c])) && uint32(c) < 64 -> (SRDconst x [c])
177 (Lsh32x32 x (MOVDconst [c])) && uint32(c) < 32 -> (SLWconst x [c])
178 (Rsh32x32 x (MOVDconst [c])) && uint32(c) < 32 -> (SRAWconst x [c])
179 (Rsh32Ux32 x (MOVDconst [c])) && uint32(c) < 32 -> (SRWconst x [c])
180 (Lsh16x32 x (MOVDconst [c])) && uint32(c) < 16 -> (SLWconst x [c])
181 (Rsh16x32 x (MOVDconst [c])) && uint32(c) < 16 -> (SRAWconst (SignExt16to32 x) [c])
182 (Rsh16Ux32 x (MOVDconst [c])) && uint32(c) < 16 -> (SRWconst (ZeroExt16to32 x) [c])
183 (Lsh8x32 x (MOVDconst [c])) && uint32(c) < 8 -> (SLWconst x [c])
184 (Rsh8x32 x (MOVDconst [c])) && uint32(c) < 8 -> (SRAWconst (SignExt8to32 x) [c])
185 (Rsh8Ux32 x (MOVDconst [c])) && uint32(c) < 8 -> (SRWconst (ZeroExt8to32 x) [c])
186
187 // Lower bounded shifts first. No need to check shift value.
188 (Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) -> (SLD x y)
189 (Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) -> (SLW x y)
190 (Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) -> (SLW x y)
191 (Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) -> (SLW x y)
192 (Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) -> (SRD x y)
193 (Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) -> (SRW x y)
194 (Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) -> (SRW (MOVHZreg x) y)
195 (Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) -> (SRW (MOVBZreg x) y)
196 (Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) -> (SRAD x y)
197 (Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) -> (SRAW x y)
198 (Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) -> (SRAW (MOVHreg x) y)
199 (Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) -> (SRAW (MOVBreg x) y)
200
201 // non-constant rotates
202 // These are subexpressions found in statements that can become rotates
203 // In these cases the shift count is known to be < 64 so the more complicated expressions
204 // with Mask & Carry is not needed
205 (Lsh64x64 x (AND y (MOVDconst [63]))) -> (SLD x (ANDconst <typ.Int64> [63] y))
206 (Lsh64x64 x (ANDconst <typ.Int64> [63] y)) -> (SLD x (ANDconst <typ.Int64> [63] y))
207 (Rsh64Ux64 x (AND y (MOVDconst [63]))) -> (SRD x (ANDconst <typ.Int64> [63] y))
208 (Rsh64Ux64 x (ANDconst <typ.UInt> [63] y)) -> (SRD x (ANDconst <typ.UInt> [63] y))
209 (Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) -> (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
210 (Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63])))) -> (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
211 (Rsh64x64 x (AND y (MOVDconst [63]))) -> (SRAD x (ANDconst <typ.Int64> [63] y))
212 (Rsh64x64 x (ANDconst <typ.UInt> [63] y)) -> (SRAD x (ANDconst <typ.UInt> [63] y))
213 (Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) -> (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
214 (Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63])))) -> (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
215
216 (Rsh64x64 x y) -> (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
217 (Rsh64Ux64 x y) -> (SRD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
218 (Lsh64x64 x y) -> (SLD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
219
220 (Lsh32x64 x (AND y (MOVDconst [31]))) -> (SLW x (ANDconst <typ.Int32> [31] y))
221 (Lsh32x64 x (ANDconst <typ.Int32> [31] y)) -> (SLW x (ANDconst <typ.Int32> [31] y))
222
223 (Rsh32Ux64 x (AND y (MOVDconst [31]))) -> (SRW x (ANDconst <typ.Int32> [31] y))
224 (Rsh32Ux64 x (ANDconst <typ.UInt> [31] y)) -> (SRW x (ANDconst <typ.UInt> [31] y))
225 (Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) -> (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
226 (Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31])))) -> (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
227
228 (Rsh32x64 x (AND y (MOVDconst [31]))) -> (SRAW x (ANDconst <typ.Int32> [31] y))
229 (Rsh32x64 x (ANDconst <typ.UInt> [31] y)) -> (SRAW x (ANDconst <typ.UInt> [31] y))
230 (Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) -> (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
231 (Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31])))) -> (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
232
233 (Rsh32x64 x y) -> (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
234 (Rsh32Ux64 x y) -> (SRW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
235 (Lsh32x64 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
236
237 (Rsh16x64 x y) -> (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
238 (Rsh16Ux64 x y) -> (SRW (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
239 (Lsh16x64 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
240
241 (Rsh8x64 x y) -> (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
242 (Rsh8Ux64 x y) -> (SRW (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
243 (Lsh8x64 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
244
245 (Rsh64x32 x y) -> (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
246 (Rsh64Ux32 x y) -> (SRD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
247 (Lsh64x32 x y) -> (SLD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
248
249 (Rsh32x32 x y) -> (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
250 (Rsh32Ux32 x y) -> (SRW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
251 (Lsh32x32 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
252
253 (Rsh16x32 x y) -> (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
254 (Rsh16Ux32 x y) -> (SRW (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
255 (Lsh16x32 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
256
257 (Rsh8x32 x y) -> (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
258 (Rsh8Ux32 x y) -> (SRW (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
259 (Lsh8x32 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
260
261
262 (Rsh64x16 x y) -> (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
263 (Rsh64Ux16 x y) -> (SRD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
264 (Lsh64x16 x y) -> (SLD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
265
266 (Rsh32x16 x y) -> (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
267 (Rsh32Ux16 x y) -> (SRW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
268 (Lsh32x16 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
269
270 (Rsh16x16 x y) -> (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
271 (Rsh16Ux16 x y) -> (SRW (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
272 (Lsh16x16 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
273
274 (Rsh8x16 x y) -> (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
275 (Rsh8Ux16 x y) -> (SRW (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
276 (Lsh8x16 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
277
278
279 (Rsh64x8 x y) -> (SRAD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
280 (Rsh64Ux8 x y) -> (SRD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
281 (Lsh64x8 x y) -> (SLD x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
282
283 (Rsh32x8 x y) -> (SRAW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
284 (Rsh32Ux8 x y) -> (SRW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
285 (Lsh32x8 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
286
287 (Rsh16x8 x y) -> (SRAW (SignExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
288 (Rsh16Ux8 x y) -> (SRW (ZeroExt16to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
289 (Lsh16x8 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
290
291 (Rsh8x8 x y) -> (SRAW (SignExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
292 (Rsh8Ux8 x y) -> (SRW (ZeroExt8to32 x) (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
293 (Lsh8x8 x y) -> (SLW x (ORN y <typ.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
294
295 // Cleaning up shift ops when input is masked
296 (MaskIfNotCarry (ADDconstForCarry [c] (ANDconst [d] _))) && c < 0 && d > 0 && c + d < 0 -> (MOVDconst [-1])
297 (ORN x (MOVDconst [-1])) -> x
298
299 // Potentially useful optimizing rewrites.
300 // (ADDconstForCarry [k] c), k < 0 && (c < 0 || k+c >= 0) -> CarrySet
301 // (ADDconstForCarry [k] c), K < 0 && (c >= 0 && k+c < 0) -> CarryClear
302 // (MaskIfNotCarry CarrySet) -> 0
303 // (MaskIfNotCarry CarrySet) -> -1
304
305 (Addr {sym} base) -> (MOVDaddr {sym} base)
306 (LocalAddr {sym} base _) -> (MOVDaddr {sym} base)
307 (OffPtr [off] ptr) -> (ADD (MOVDconst <typ.Int64> [off]) ptr)
308
309 // TODO: optimize these cases?
310 (Ctz32NonZero x) -> (Ctz32 x)
311 (Ctz64NonZero x) -> (Ctz64 x)
312
313 (Ctz64 x) && objabi.GOPPC64<=8 -> (POPCNTD (ANDN <typ.Int64> (ADDconst <typ.Int64> [-1] x) x))
314 (Ctz64 x) -> (CNTTZD x)
315 (Ctz32 x) && objabi.GOPPC64<=8 -> (POPCNTW (MOVWZreg (ANDN <typ.Int> (ADDconst <typ.Int> [-1] x) x)))
316 (Ctz32 x) -> (CNTTZW (MOVWZreg x))
317 (Ctz16 x) -> (POPCNTW (MOVHZreg (ANDN <typ.Int16> (ADDconst <typ.Int16> [-1] x) x)))
318 (Ctz8 x) -> (POPCNTB (MOVBZreg (ANDN <typ.UInt8> (ADDconst <typ.UInt8> [-1] x) x)))
319
320 (BitLen64 x) -> (SUB (MOVDconst [64]) (CNTLZD <typ.Int> x))
321 (BitLen32 x) -> (SUB (MOVDconst [32]) (CNTLZW <typ.Int> x))
322
323 (PopCount64 x) -> (POPCNTD x)
324 (PopCount32 x) -> (POPCNTW (MOVWZreg x))
325 (PopCount16 x) -> (POPCNTW (MOVHZreg x))
326 (PopCount8 x) -> (POPCNTB (MOVBZreg x))
327
328 (And(64|32|16|8) x y) -> (AND x y)
329 (Or(64|32|16|8) x y) -> (OR x y)
330 (Xor(64|32|16|8) x y) -> (XOR x y)
331
332 (Neg(64|32|16|8) x) -> (NEG x)
333 (Neg64F x) -> (FNEG x)
334 (Neg32F x) -> (FNEG x)
335
336 (Com(64|32|16|8) x) -> (NOR x x)
337
338 // Lowering boolean ops
339 (AndB x y) -> (AND x y)
340 (OrB x y) -> (OR x y)
341 (Not x) -> (XORconst [1] x)
342
343 // Use ANDN for AND x NOT y
344 (AND x (NOR y y)) -> (ANDN x y)
345
346 // Lowering comparisons
347 (EqB x y) -> (ANDconst [1] (EQV x y))
348 // Sign extension dependence on operand sign sets up for sign/zero-extension elision later
349 (Eq8 x y) && isSigned(x.Type) && isSigned(y.Type) -> (Equal (CMPW (SignExt8to32 x) (SignExt8to32 y)))
350 (Eq16 x y) && isSigned(x.Type) && isSigned(y.Type) -> (Equal (CMPW (SignExt16to32 x) (SignExt16to32 y)))
351 (Eq8 x y) -> (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
352 (Eq16 x y) -> (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
353 (Eq32 x y) -> (Equal (CMPW x y))
354 (Eq64 x y) -> (Equal (CMP x y))
355 (Eq32F x y) -> (Equal (FCMPU x y))
356 (Eq64F x y) -> (Equal (FCMPU x y))
357 (EqPtr x y) -> (Equal (CMP x y))
358
359 (NeqB x y) -> (XOR x y)
360 // Like Eq8 and Eq16, prefer sign extension likely to enable later elision.
361 (Neq8 x y) && isSigned(x.Type) && isSigned(y.Type) -> (NotEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
362 (Neq16 x y) && isSigned(x.Type) && isSigned(y.Type) -> (NotEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
363 (Neq8 x y) -> (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
364 (Neq16 x y) -> (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
365 (Neq32 x y) -> (NotEqual (CMPW x y))
366 (Neq64 x y) -> (NotEqual (CMP x y))
367 (Neq32F x y) -> (NotEqual (FCMPU x y))
368 (Neq64F x y) -> (NotEqual (FCMPU x y))
369 (NeqPtr x y) -> (NotEqual (CMP x y))
370
371 (Less8 x y) -> (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
372 (Less16 x y) -> (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
373 (Less32 x y) -> (LessThan (CMPW x y))
374 (Less64 x y) -> (LessThan (CMP x y))
375 (Less32F x y) -> (FLessThan (FCMPU x y))
376 (Less64F x y) -> (FLessThan (FCMPU x y))
377
378 (Less8U x y) -> (LessThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
379 (Less16U x y) -> (LessThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
380 (Less32U x y) -> (LessThan (CMPWU x y))
381 (Less64U x y) -> (LessThan (CMPU x y))
382
383 (Leq8 x y) -> (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
384 (Leq16 x y) -> (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
385 (Leq32 x y) -> (LessEqual (CMPW x y))
386 (Leq64 x y) -> (LessEqual (CMP x y))
387 (Leq32F x y) -> (FLessEqual (FCMPU x y))
388 (Leq64F x y) -> (FLessEqual (FCMPU x y))
389
390 (Leq8U x y) -> (LessEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
391 (Leq16U x y) -> (LessEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
392 (Leq32U x y) -> (LessEqual (CMPWU x y))
393 (Leq64U x y) -> (LessEqual (CMPU x y))
394
395 (Greater8 x y) -> (GreaterThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
396 (Greater16 x y) -> (GreaterThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
397 (Greater32 x y) -> (GreaterThan (CMPW x y))
398 (Greater64 x y) -> (GreaterThan (CMP x y))
399 (Greater(32|64)F x y) -> (FGreaterThan (FCMPU x y))
400
401 (Greater8U x y) -> (GreaterThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
402 (Greater16U x y) -> (GreaterThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
403 (Greater32U x y) -> (GreaterThan (CMPWU x y))
404 (Greater64U x y) -> (GreaterThan (CMPU x y))
405
406 (Geq8 x y) -> (GreaterEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
407 (Geq16 x y) -> (GreaterEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
408 (Geq32 x y) -> (GreaterEqual (CMPW x y))
409 (Geq64 x y) -> (GreaterEqual (CMP x y))
410 (Geq(32|64)F x y) -> (FGreaterEqual (FCMPU x y))
411
412 (Geq8U x y) -> (GreaterEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
413 (Geq16U x y) -> (GreaterEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
414 (Geq32U x y) -> (GreaterEqual (CMPWU x y))
415 (Geq64U x y) -> (GreaterEqual (CMPU x y))
416
417 // Absorb pseudo-ops into blocks.
418 (If (Equal cc) yes no) -> (EQ cc yes no)
419 (If (NotEqual cc) yes no) -> (NE cc yes no)
420 (If (LessThan cc) yes no) -> (LT cc yes no)
421 (If (LessEqual cc) yes no) -> (LE cc yes no)
422 (If (GreaterThan cc) yes no) -> (GT cc yes no)
423 (If (GreaterEqual cc) yes no) -> (GE cc yes no)
424 (If (FLessThan cc) yes no) -> (FLT cc yes no)
425 (If (FLessEqual cc) yes no) -> (FLE cc yes no)
426 (If (FGreaterThan cc) yes no) -> (FGT cc yes no)
427 (If (FGreaterEqual cc) yes no) -> (FGE cc yes no)
428
429 (If cond yes no) -> (NE (CMPWconst [0] cond) yes no)
430
431 // Absorb boolean tests into block
432 (NE (CMPWconst [0] (Equal cc)) yes no) -> (EQ cc yes no)
433 (NE (CMPWconst [0] (NotEqual cc)) yes no) -> (NE cc yes no)
434 (NE (CMPWconst [0] (LessThan cc)) yes no) -> (LT cc yes no)
435 (NE (CMPWconst [0] (LessEqual cc)) yes no) -> (LE cc yes no)
436 (NE (CMPWconst [0] (GreaterThan cc)) yes no) -> (GT cc yes no)
437 (NE (CMPWconst [0] (GreaterEqual cc)) yes no) -> (GE cc yes no)
438 (NE (CMPWconst [0] (FLessThan cc)) yes no) -> (FLT cc yes no)
439 (NE (CMPWconst [0] (FLessEqual cc)) yes no) -> (FLE cc yes no)
440 (NE (CMPWconst [0] (FGreaterThan cc)) yes no) -> (FGT cc yes no)
441 (NE (CMPWconst [0] (FGreaterEqual cc)) yes no) -> (FGE cc yes no)
442
443 // Elide compares of bit tests // TODO need to make both CC and result of ANDCC available.
444 (EQ (CMPconst [0] (ANDconst [c] x)) yes no) -> (EQ (ANDCCconst [c] x) yes no)
445 (NE (CMPconst [0] (ANDconst [c] x)) yes no) -> (NE (ANDCCconst [c] x) yes no)
446 (EQ (CMPWconst [0] (ANDconst [c] x)) yes no) -> (EQ (ANDCCconst [c] x) yes no)
447 (NE (CMPWconst [0] (ANDconst [c] x)) yes no) -> (NE (ANDCCconst [c] x) yes no)
448
449 // absorb flag constants into branches
450 (EQ (FlagEQ) yes no) -> (First nil yes no)
451 (EQ (FlagLT) yes no) -> (First nil no yes)
452 (EQ (FlagGT) yes no) -> (First nil no yes)
453
454 (NE (FlagEQ) yes no) -> (First nil no yes)
455 (NE (FlagLT) yes no) -> (First nil yes no)
456 (NE (FlagGT) yes no) -> (First nil yes no)
457
458 (LT (FlagEQ) yes no) -> (First nil no yes)
459 (LT (FlagLT) yes no) -> (First nil yes no)
460 (LT (FlagGT) yes no) -> (First nil no yes)
461
462 (LE (FlagEQ) yes no) -> (First nil yes no)
463 (LE (FlagLT) yes no) -> (First nil yes no)
464 (LE (FlagGT) yes no) -> (First nil no yes)
465
466 (GT (FlagEQ) yes no) -> (First nil no yes)
467 (GT (FlagLT) yes no) -> (First nil no yes)
468 (GT (FlagGT) yes no) -> (First nil yes no)
469
470 (GE (FlagEQ) yes no) -> (First nil yes no)
471 (GE (FlagLT) yes no) -> (First nil no yes)
472 (GE (FlagGT) yes no) -> (First nil yes no)
473
474 // absorb InvertFlags into branches
475 (LT (InvertFlags cmp) yes no) -> (GT cmp yes no)
476 (GT (InvertFlags cmp) yes no) -> (LT cmp yes no)
477 (LE (InvertFlags cmp) yes no) -> (GE cmp yes no)
478 (GE (InvertFlags cmp) yes no) -> (LE cmp yes no)
479 (EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no)
480 (NE (InvertFlags cmp) yes no) -> (NE cmp yes no)
481
482 // constant comparisons
483 (CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ)
484 (CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y) -> (FlagLT)
485 (CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y) -> (FlagGT)
486
487 (CMPconst (MOVDconst [x]) [y]) && x==y -> (FlagEQ)
488 (CMPconst (MOVDconst [x]) [y]) && x<y -> (FlagLT)
489 (CMPconst (MOVDconst [x]) [y]) && x>y -> (FlagGT)
490
491 (CMPWUconst (MOVDconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ)
492 (CMPWUconst (MOVDconst [x]) [y]) && uint32(x)<uint32(y) -> (FlagLT)
493 (CMPWUconst (MOVDconst [x]) [y]) && uint32(x)>uint32(y) -> (FlagGT)
494
495 (CMPUconst (MOVDconst [x]) [y]) && x==y -> (FlagEQ)
496 (CMPUconst (MOVDconst [x]) [y]) && uint64(x)<uint64(y) -> (FlagLT)
497 (CMPUconst (MOVDconst [x]) [y]) && uint64(x)>uint64(y) -> (FlagGT)
498
499 // other known comparisons
500 //(CMPconst (MOVBUreg _) [c]) && 0xff < c -> (FlagLT)
501 //(CMPconst (MOVHUreg _) [c]) && 0xffff < c -> (FlagLT)
502 //(CMPconst (ANDconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT)
503 //(CMPconst (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n) -> (FlagLT)
504
505 // absorb flag constants into boolean values
506 (Equal (FlagEQ)) -> (MOVDconst [1])
507 (Equal (FlagLT)) -> (MOVDconst [0])
508 (Equal (FlagGT)) -> (MOVDconst [0])
509
510 (NotEqual (FlagEQ)) -> (MOVDconst [0])
511 (NotEqual (FlagLT)) -> (MOVDconst [1])
512 (NotEqual (FlagGT)) -> (MOVDconst [1])
513
514 (LessThan (FlagEQ)) -> (MOVDconst [0])
515 (LessThan (FlagLT)) -> (MOVDconst [1])
516 (LessThan (FlagGT)) -> (MOVDconst [0])
517
518 (LessEqual (FlagEQ)) -> (MOVDconst [1])
519 (LessEqual (FlagLT)) -> (MOVDconst [1])
520 (LessEqual (FlagGT)) -> (MOVDconst [0])
521
522 (GreaterThan (FlagEQ)) -> (MOVDconst [0])
523 (GreaterThan (FlagLT)) -> (MOVDconst [0])
524 (GreaterThan (FlagGT)) -> (MOVDconst [1])
525
526 (GreaterEqual (FlagEQ)) -> (MOVDconst [1])
527 (GreaterEqual (FlagLT)) -> (MOVDconst [0])
528 (GreaterEqual (FlagGT)) -> (MOVDconst [1])
529
530 // absorb InvertFlags into boolean values
531 (Equal (InvertFlags x)) -> (Equal x)
532 (NotEqual (InvertFlags x)) -> (NotEqual x)
533 (LessThan (InvertFlags x)) -> (GreaterThan x)
534 (GreaterThan (InvertFlags x)) -> (LessThan x)
535 (LessEqual (InvertFlags x)) -> (GreaterEqual x)
536 (GreaterEqual (InvertFlags x)) -> (LessEqual x)
537
538 // Elide compares of bit tests // TODO need to make both CC and result of ANDCC available.
539 ((EQ|NE|LT|LE|GT|GE) (CMPconst [0] (ANDconst [c] x)) yes no) -> ((EQ|NE|LT|LE|GT|GE) (ANDCCconst [c] x) yes no)
540 ((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] (ANDconst [c] x)) yes no) -> ((EQ|NE|LT|LE|GT|GE) (ANDCCconst [c] x) yes no)
541 ((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 -> ((EQ|NE|LT|LE|GT|GE) (ANDCC x y) yes no)
542 ((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(OR x y)) yes no) && z.Uses == 1 -> ((EQ|NE|LT|LE|GT|GE) (ORCC x y) yes no)
543 ((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(XOR x y)) yes no) && z.Uses == 1 -> ((EQ|NE|LT|LE|GT|GE) (XORCC x y) yes no)
544
545 // Lowering loads
546 (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVDload ptr mem)
547 (Load <t> ptr mem) && is32BitInt(t) && isSigned(t) -> (MOVWload ptr mem)
548 (Load <t> ptr mem) && is32BitInt(t) && !isSigned(t) -> (MOVWZload ptr mem)
549 (Load <t> ptr mem) && is16BitInt(t) && isSigned(t) -> (MOVHload ptr mem)
550 (Load <t> ptr mem) && is16BitInt(t) && !isSigned(t) -> (MOVHZload ptr mem)
551 (Load <t> ptr mem) && t.IsBoolean() -> (MOVBZload ptr mem)
552 (Load <t> ptr mem) && is8BitInt(t) && isSigned(t) -> (MOVBreg (MOVBZload ptr mem)) // PPC has no signed-byte load.
553 (Load <t> ptr mem) && is8BitInt(t) && !isSigned(t) -> (MOVBZload ptr mem)
554
555 (Load <t> ptr mem) && is32BitFloat(t) -> (FMOVSload ptr mem)
556 (Load <t> ptr mem) && is64BitFloat(t) -> (FMOVDload ptr mem)
557
558 (Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem)
559 (Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is32BitFloat(val.Type) -> (FMOVDstore ptr val mem) // glitch from (Cvt32Fto64F x) -> x -- type is wrong
560 (Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (FMOVSstore ptr val mem)
561 (Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && (is64BitInt(val.Type) || isPtr(val.Type)) -> (MOVDstore ptr val mem)
562 (Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitInt(val.Type) -> (MOVWstore ptr val mem)
563 (Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVHstore ptr val mem)
564 (Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem)
565
566 // Using Zero instead of LoweredZero allows the
567 // target address to be folded where possible.
568 (Zero [0] _ mem) -> mem
569 (Zero [1] destptr mem) -> (MOVBstorezero destptr mem)
570 (Zero [2] destptr mem) ->
571 (MOVHstorezero destptr mem)
572 (Zero [3] destptr mem) ->
573 (MOVBstorezero [2] destptr
574 (MOVHstorezero destptr mem))
575 (Zero [4] destptr mem) ->
576 (MOVWstorezero destptr mem)
577 (Zero [5] destptr mem) ->
578 (MOVBstorezero [4] destptr
579 (MOVWstorezero destptr mem))
580 (Zero [6] destptr mem) ->
581 (MOVHstorezero [4] destptr
582 (MOVWstorezero destptr mem))
583 (Zero [7] destptr mem) ->
584 (MOVBstorezero [6] destptr
585 (MOVHstorezero [4] destptr
586 (MOVWstorezero destptr mem)))
587
588 // MOVD for store with DS must have offsets that are multiple of 4
589 (Zero [8] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
590 (MOVDstorezero destptr mem)
591 (Zero [8] destptr mem) ->
592 (MOVWstorezero [4] destptr
593 (MOVWstorezero [0] destptr mem))
594 // Handle these cases only if aligned properly, otherwise use general case below
595 (Zero [12] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
596 (MOVWstorezero [8] destptr
597 (MOVDstorezero [0] destptr mem))
598 (Zero [16] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
599 (MOVDstorezero [8] destptr
600 (MOVDstorezero [0] destptr mem))
601 (Zero [24] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
602 (MOVDstorezero [16] destptr
603 (MOVDstorezero [8] destptr
604 (MOVDstorezero [0] destptr mem)))
605 (Zero [32] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
606 (MOVDstorezero [24] destptr
607 (MOVDstorezero [16] destptr
608 (MOVDstorezero [8] destptr
609 (MOVDstorezero [0] destptr mem))))
610
611 // Handle cases not handled above
612 (Zero [s] ptr mem) -> (LoweredZero [s] ptr mem)
613
614 // moves
615 // Only the MOVD and MOVW instructions require 4 byte
616 // alignment in the offset field. The other MOVx instructions
617 // allow any alignment.
618 (Move [0] _ _ mem) -> mem
619 (Move [1] dst src mem) -> (MOVBstore dst (MOVBZload src mem) mem)
620 (Move [2] dst src mem) ->
621 (MOVHstore dst (MOVHZload src mem) mem)
622 (Move [4] dst src mem) ->
623 (MOVWstore dst (MOVWZload src mem) mem)
624 // MOVD for load and store must have offsets that are multiple of 4
625 (Move [8] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 ->
626 (MOVDstore dst (MOVDload src mem) mem)
627 (Move [8] dst src mem) ->
628 (MOVWstore [4] dst (MOVWZload [4] src mem)
629 (MOVWstore dst (MOVWZload src mem) mem))
630 (Move [3] dst src mem) ->
631 (MOVBstore [2] dst (MOVBZload [2] src mem)
632 (MOVHstore dst (MOVHload src mem) mem))
633 (Move [5] dst src mem) ->
634 (MOVBstore [4] dst (MOVBZload [4] src mem)
635 (MOVWstore dst (MOVWZload src mem) mem))
636 (Move [6] dst src mem) ->
637 (MOVHstore [4] dst (MOVHZload [4] src mem)
638 (MOVWstore dst (MOVWZload src mem) mem))
639 (Move [7] dst src mem) ->
640 (MOVBstore [6] dst (MOVBZload [6] src mem)
641 (MOVHstore [4] dst (MOVHZload [4] src mem)
642 (MOVWstore dst (MOVWZload src mem) mem)))
643
644 // Large move uses a loop. Since the address is computed and the
645 // offset is zero, any alignment can be used.
646 (Move [s] dst src mem) && s > 8 ->
647 (LoweredMove [s] dst src mem)
648
649 // Calls
650 // Lowering calls
651 (StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
652 (ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
653 (InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
654
655 // Miscellaneous
656 (GetClosurePtr) -> (LoweredGetClosurePtr)
657 (GetCallerSP) -> (LoweredGetCallerSP)
658 (GetCallerPC) -> (LoweredGetCallerPC)
659 (IsNonNil ptr) -> (NotEqual (CMPconst [0] ptr))
660 (IsInBounds idx len) -> (LessThan (CMPU idx len))
661 (IsSliceInBounds idx len) -> (LessEqual (CMPU idx len))
662 (NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
663
664 // Write barrier.
665 (WB {fn} destptr srcptr mem) -> (LoweredWB {fn} destptr srcptr mem)
666
667 (PanicBounds [kind] x y mem) && boundsABI(kind) == 0 -> (LoweredPanicBoundsA [kind] x y mem)
668 (PanicBounds [kind] x y mem) && boundsABI(kind) == 1 -> (LoweredPanicBoundsB [kind] x y mem)
669 (PanicBounds [kind] x y mem) && boundsABI(kind) == 2 -> (LoweredPanicBoundsC [kind] x y mem)
670
671 // Optimizations
672 // Note that PPC "logical" immediates come in 0:15 and 16:31 unsigned immediate forms,
673 // so ORconst, XORconst easily expand into a pair.
674
675 // Include very-large constants in the const-const case.
676 (AND (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c&d])
677 (OR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c|d])
678 (XOR (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c^d])
679
680 // Discover consts
681 (AND x (MOVDconst [c])) && isU16Bit(c) -> (ANDconst [c] x)
682 (XOR x (MOVDconst [c])) && isU32Bit(c) -> (XORconst [c] x)
683 (OR x (MOVDconst [c])) && isU32Bit(c) -> (ORconst [c] x)
684
685 // Simplify consts
686 (ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c&d] x)
687 (ORconst [c] (ORconst [d] x)) -> (ORconst [c|d] x)
688 (XORconst [c] (XORconst [d] x)) -> (XORconst [c^d] x)
689 (ANDconst [-1] x) -> x
690 (ANDconst [0] _) -> (MOVDconst [0])
691 (XORconst [0] x) -> x
692 (ORconst [-1] _) -> (MOVDconst [-1])
693 (ORconst [0] x) -> x
694
695 // zero-extend of small and -> small and
696 (MOVBZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFF -> y
697 (MOVHZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF -> y
698 (MOVWZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFFFFFF -> y
699 (MOVWZreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0xFFFFFFFF -> y
700
701 // sign extend of small-positive and -> small-positive-and
702 (MOVBreg y:(ANDconst [c] _)) && uint64(c) <= 0x7F -> y
703 (MOVHreg y:(ANDconst [c] _)) && uint64(c) <= 0x7FFF -> y
704 (MOVWreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF -> y // 0xFFFF is largest immediate constant, when regarded as 32-bit is > 0
705 (MOVWreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0x7FFFFFFF -> y
706
707 // small and of zero-extend -> either zero-extend or small and
708 (ANDconst [c] y:(MOVBZreg _)) && c&0xFF == 0xFF -> y
709 (ANDconst [0xFF] y:(MOVBreg _)) -> y
710 (ANDconst [c] y:(MOVHZreg _)) && c&0xFFFF == 0xFFFF -> y
711 (ANDconst [0xFFFF] y:(MOVHreg _)) -> y
712
713 (AND (MOVDconst [c]) y:(MOVWZreg _)) && c&0xFFFFFFFF == 0xFFFFFFFF -> y
714 (AND (MOVDconst [0xFFFFFFFF]) y:(MOVWreg x)) -> (MOVWZreg x)
715 // normal case
716 (ANDconst [c] (MOV(B|BZ)reg x)) -> (ANDconst [c&0xFF] x)
717 (ANDconst [c] (MOV(H|HZ)reg x)) -> (ANDconst [c&0xFFFF] x)
718 (ANDconst [c] (MOV(W|WZ)reg x)) -> (ANDconst [c&0xFFFFFFFF] x)
719
720 // Eliminate unnecessary sign/zero extend following right shift
721 (MOV(B|H|W)Zreg (SRWconst [c] (MOVBZreg x))) -> (SRWconst [c] (MOVBZreg x))
722 (MOV(H|W)Zreg (SRWconst [c] (MOVHZreg x))) -> (SRWconst [c] (MOVHZreg x))
723 (MOVWZreg (SRWconst [c] (MOVWZreg x))) -> (SRWconst [c] (MOVWZreg x))
724 (MOV(B|H|W)reg (SRAWconst [c] (MOVBreg x))) -> (SRAWconst [c] (MOVBreg x))
725 (MOV(H|W)reg (SRAWconst [c] (MOVHreg x))) -> (SRAWconst [c] (MOVHreg x))
726 (MOVWreg (SRAWconst [c] (MOVWreg x))) -> (SRAWconst [c] (MOVWreg x))
727
728 (MOVWZreg (SRWconst [c] x)) && sizeof(x.Type) <= 32 -> (SRWconst [c] x)
729 (MOVHZreg (SRWconst [c] x)) && sizeof(x.Type) <= 16 -> (SRWconst [c] x)
730 (MOVBZreg (SRWconst [c] x)) && sizeof(x.Type) == 8 -> (SRWconst [c] x)
731 (MOVWreg (SRAWconst [c] x)) && sizeof(x.Type) <= 32 -> (SRAWconst [c] x)
732 (MOVHreg (SRAWconst [c] x)) && sizeof(x.Type) <= 16 -> (SRAWconst [c] x)
733 (MOVBreg (SRAWconst [c] x)) && sizeof(x.Type) == 8 -> (SRAWconst [c] x)
734
735 // initial right shift will handle sign/zero extend
736 (MOVBZreg (SRDconst [c] x)) && c>=56 -> (SRDconst [c] x)
737 (MOVBreg (SRDconst [c] x)) && c>56 -> (SRDconst [c] x)
738 (MOVBreg (SRDconst [c] x)) && c==56 -> (SRADconst [c] x)
739 (MOVBZreg (SRWconst [c] x)) && c>=24 -> (SRWconst [c] x)
740 (MOVBreg (SRWconst [c] x)) && c>24 -> (SRWconst [c] x)
741 (MOVBreg (SRWconst [c] x)) && c==24 -> (SRAWconst [c] x)
742
743 (MOVHZreg (SRDconst [c] x)) && c>=48 -> (SRDconst [c] x)
744 (MOVHreg (SRDconst [c] x)) && c>48 -> (SRDconst [c] x)
745 (MOVHreg (SRDconst [c] x)) && c==48 -> (SRADconst [c] x)
746 (MOVHZreg (SRWconst [c] x)) && c>=16 -> (SRWconst [c] x)
747 (MOVHreg (SRWconst [c] x)) && c>16 -> (SRWconst [c] x)
748 (MOVHreg (SRWconst [c] x)) && c==16 -> (SRAWconst [c] x)
749
750 (MOVWZreg (SRDconst [c] x)) && c>=32 -> (SRDconst [c] x)
751 (MOVWreg (SRDconst [c] x)) && c>32 -> (SRDconst [c] x)
752 (MOVWreg (SRDconst [c] x)) && c==32 -> (SRADconst [c] x)
753
754 // Various redundant zero/sign extension combinations.
755 (MOVBZreg y:(MOVBZreg _)) -> y // repeat
756 (MOVBreg y:(MOVBreg _)) -> y // repeat
757 (MOVBreg (MOVBZreg x)) -> (MOVBreg x)
758 (MOVBZreg (MOVBreg x)) -> (MOVBZreg x)
759
760 // H - there are more combinations than these
761
762 (MOVHZreg y:(MOVHZreg _)) -> y // repeat
763 (MOVHZreg y:(MOVBZreg _)) -> y // wide of narrow
764 (MOVHZreg y:(MOVHBRload _ _)) -> y
765
766 (MOVHreg y:(MOVHreg _)) -> y // repeat
767 (MOVHreg y:(MOVBreg _)) -> y // wide of narrow
768
769 (MOVHreg y:(MOVHZreg x)) -> (MOVHreg x)
770 (MOVHZreg y:(MOVHreg x)) -> (MOVHZreg x)
771
772 // W - there are more combinations than these
773
774 (MOVWZreg y:(MOVWZreg _)) -> y // repeat
775 (MOVWZreg y:(MOVHZreg _)) -> y // wide of narrow
776 (MOVWZreg y:(MOVBZreg _)) -> y // wide of narrow
777 (MOVWZreg y:(MOVHBRload _ _)) -> y
778 (MOVWZreg y:(MOVWBRload _ _)) -> y
779
780 (MOVWreg y:(MOVWreg _)) -> y // repeat
781 (MOVWreg y:(MOVHreg _)) -> y // wide of narrow
782 (MOVWreg y:(MOVBreg _)) -> y // wide of narrow
783
784 (MOVWreg y:(MOVWZreg x)) -> (MOVWreg x)
785 (MOVWZreg y:(MOVWreg x)) -> (MOVWZreg x)
786
787 // Arithmetic constant ops
788
789 (ADD x (MOVDconst [c])) && is32Bit(c) -> (ADDconst [c] x)
790 (ADDconst [c] (ADDconst [d] x)) && is32Bit(c+d) -> (ADDconst [c+d] x)
791 (ADDconst [0] x) -> x
792 (SUB x (MOVDconst [c])) && is32Bit(-c) -> (ADDconst [-c] x)
793 // TODO deal with subtract-from-const
794
795 (ADDconst [c] (MOVDaddr [d] {sym} x)) -> (MOVDaddr [c+d] {sym} x)
796
797 // Use register moves instead of stores and loads to move int<->float values
798 // Common with math Float64bits, Float64frombits
799 (MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr x _)) -> (MFVSRD x)
800 (FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr x _)) -> (MTVSRD x)
801
802 (FMOVDstore [off] {sym} ptr (MTVSRD x) mem) -> (MOVDstore [off] {sym} ptr x mem)
803 (MOVDstore [off] {sym} ptr (MFVSRD x) mem) -> (FMOVDstore [off] {sym} ptr x mem)
804
805 (MTVSRD (MOVDconst [c])) -> (FMOVDconst [c])
806 (MFVSRD (FMOVDconst [c])) -> (MOVDconst [c])
807
808 (MTVSRD x:(MOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (FMOVDload [off] {sym} ptr mem)
809 (MFVSRD x:(FMOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVDload [off] {sym} ptr mem)
810
811 // Fold offsets for stores.
812 (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVDstore [off1+off2] {sym} x val mem)
813 (MOVWstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} x val mem)
814 (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVHstore [off1+off2] {sym} x val mem)
815 (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVBstore [off1+off2] {sym} x val mem)
816
817 (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is16Bit(off1+off2) -> (FMOVSstore [off1+off2] {sym} ptr val mem)
818 (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is16Bit(off1+off2) -> (FMOVDstore [off1+off2] {sym} ptr val mem)
819
820 // Fold address into load/store.
821 // The assembler needs to generate several instructions and use
822 // temp register for accessing global, and each time it will reload
823 // the temp register. So don't fold address of global, unless there
824 // is only one use.
825 (MOVBstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
826 && (ptr.Op != OpSB || p.Uses == 1) ->
827 (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
828 (MOVHstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
829 && (ptr.Op != OpSB || p.Uses == 1) ->
830 (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
831 (MOVWstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
832 && (ptr.Op != OpSB || p.Uses == 1) ->
833 (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
834 (MOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
835 && (ptr.Op != OpSB || p.Uses == 1) ->
836 (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
837
838 (FMOVSstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
839 && (ptr.Op != OpSB || p.Uses == 1) ->
840 (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
841 (FMOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
842 && (ptr.Op != OpSB || p.Uses == 1) ->
843 (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
844
845 (MOVBZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
846 && (ptr.Op != OpSB || p.Uses == 1) ->
847 (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
848 (MOVHload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
849 && (ptr.Op != OpSB || p.Uses == 1) ->
850 (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
851 (MOVHZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
852 && (ptr.Op != OpSB || p.Uses == 1) ->
853 (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
854 (MOVWload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
855 && (ptr.Op != OpSB || p.Uses == 1) ->
856 (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
857 (MOVWZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
858 && (ptr.Op != OpSB || p.Uses == 1) ->
859 (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
860 (MOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
861 && (ptr.Op != OpSB || p.Uses == 1) ->
862 (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
863 (FMOVSload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
864 && (ptr.Op != OpSB || p.Uses == 1) ->
865 (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
866 (FMOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
867 && (ptr.Op != OpSB || p.Uses == 1) ->
868 (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
869
870 // Fold offsets for loads.
871 (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(off1+off2) -> (FMOVSload [off1+off2] {sym} ptr mem)
872 (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(off1+off2) -> (FMOVDload [off1+off2] {sym} ptr mem)
873
874 (MOVDload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVDload [off1+off2] {sym} x mem)
875 (MOVWload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVWload [off1+off2] {sym} x mem)
876 (MOVWZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVWZload [off1+off2] {sym} x mem)
877 (MOVHload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVHload [off1+off2] {sym} x mem)
878 (MOVHZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVHZload [off1+off2] {sym} x mem)
879 (MOVBZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) -> (MOVBZload [off1+off2] {sym} x mem)
880
881 // Determine load + addressing that can be done as a register indexed load
882 (MOV(D|W|WZ|H|HZ|BZ)load [0] {sym} p:(ADD ptr idx) mem) && sym == nil && p.Uses == 1 -> (MOV(D|W|WZ|H|HZ|BZ)loadidx ptr idx mem)
883
884 // Determine indexed loads with constant values that can be done without index
885 (MOV(D|W|WZ|H|HZ|BZ)loadidx ptr (MOVDconst [c]) mem) && is16Bit(c) -> (MOV(D|W|WZ|H|HZ|BZ)load [c] ptr mem)
886 (MOV(D|W|WZ|H|HZ|BZ)loadidx (MOVDconst [c]) ptr mem) && is16Bit(c) -> (MOV(D|W|WZ|H|HZ|BZ)load [c] ptr mem)
887
888
889 // Store of zero -> storezero
890 (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVDstorezero [off] {sym} ptr mem)
891 (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVWstorezero [off] {sym} ptr mem)
892 (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVHstorezero [off] {sym} ptr mem)
893 (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVBstorezero [off] {sym} ptr mem)
894
895 // Fold offsets for storezero
896 (MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
897 (MOVDstorezero [off1+off2] {sym} x mem)
898 (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
899 (MOVWstorezero [off1+off2] {sym} x mem)
900 (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
901 (MOVHstorezero [off1+off2] {sym} x mem)
902 (MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(off1+off2) ->
903 (MOVBstorezero [off1+off2] {sym} x mem)
904
905 // Stores with addressing that can be done as indexed stores
906 (MOV(D|W|H|B)store [off] {sym} p:(ADD ptr idx) val mem) && off == 0 && sym == nil && p.Uses == 1 -> (MOV(D|W|H|B)storeidx ptr idx val mem)
907
908 // Stores with constant index values can be done without indexed instructions
909 (MOV(D|W|H|B)storeidx ptr (MOVDconst [c]) val mem) && is16Bit(c) -> (MOV(D|W|H|B)store [c] ptr val mem)
910 (MOV(D|W|H|B)storeidx (MOVDconst [c]) ptr val mem) && is16Bit(c) -> (MOV(D|W|H|B)store [c] ptr val mem)
911
912 // Fold symbols into storezero
913 (MOVDstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
914 && (x.Op != OpSB || p.Uses == 1) ->
915 (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
916 (MOVWstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
917 && (x.Op != OpSB || p.Uses == 1) ->
918 (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
919 (MOVHstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
920 && (x.Op != OpSB || p.Uses == 1) ->
921 (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
922 (MOVBstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
923 && (x.Op != OpSB || p.Uses == 1) ->
924 (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
925
926 // atomic intrinsics
927 (AtomicLoad(8|32|64|Ptr) ptr mem) -> (LoweredAtomicLoad(8|32|64|Ptr) [1] ptr mem)
928 (AtomicLoadAcq32 ptr mem) -> (LoweredAtomicLoad32 [0] ptr mem)
929
930 (AtomicStore(32|64) ptr val mem) -> (LoweredAtomicStore(32|64) [1] ptr val mem)
931 (AtomicStoreRel32 ptr val mem) -> (LoweredAtomicStore32 [0] ptr val mem)
932 //(AtomicStorePtrNoWB ptr val mem) -> (STLR ptr val mem)
933
934 (AtomicExchange(32|64) ptr val mem) -> (LoweredAtomicExchange(32|64) ptr val mem)
935
936 (AtomicAdd(32|64) ptr val mem) -> (LoweredAtomicAdd(32|64) ptr val mem)
937
938 (AtomicCompareAndSwap(32|64) ptr old new_ mem) -> (LoweredAtomicCas(32|64) [1] ptr old new_ mem)
939 (AtomicCompareAndSwapRel32 ptr old new_ mem) -> (LoweredAtomicCas32 [0] ptr old new_ mem)
940
941 (AtomicAnd8 ptr val mem) -> (LoweredAtomicAnd8 ptr val mem)
942 (AtomicOr8 ptr val mem) -> (LoweredAtomicOr8 ptr val mem)
943
944 // Lowering extension
945 // Note: we always extend to 64 bits even though some ops don't need that many result bits.
946 (SignExt8to(16|32|64) x) -> (MOVBreg x)
947 (SignExt16to(32|64) x) -> (MOVHreg x)
948 (SignExt32to64 x) -> (MOVWreg x)
949
950 (ZeroExt8to(16|32|64) x) -> (MOVBZreg x)
951 (ZeroExt16to(32|64) x) -> (MOVHZreg x)
952 (ZeroExt32to64 x) -> (MOVWZreg x)
953
954 (Trunc(16|32|64)to8 <t> x) && isSigned(t) -> (MOVBreg x)
955 (Trunc(16|32|64)to8 x) -> (MOVBZreg x)
956 (Trunc(32|64)to16 <t> x) && isSigned(t) -> (MOVHreg x)
957 (Trunc(32|64)to16 x) -> (MOVHZreg x)
958 (Trunc64to32 <t> x) && isSigned(t) -> (MOVWreg x)
959 (Trunc64to32 x) -> (MOVWZreg x)
960
961 (Slicemask <t> x) -> (SRADconst (NEG <t> x) [63])
962
963 // Note that MOV??reg returns a 64-bit int, x is not necessarily that wide
964 // This may interact with other patterns in the future. (Compare with arm64)
965 (MOV(B|H|W)Zreg x:(MOVBZload _ _)) -> x
966 (MOV(B|H|W)Zreg x:(MOVBZloadidx _ _ _)) -> x
967 (MOV(H|W)Zreg x:(MOVHZload _ _)) -> x
968 (MOV(H|W)Zreg x:(MOVHZloadidx _ _ _)) -> x
969 (MOV(H|W)reg x:(MOVHload _ _)) -> x
970 (MOV(H|W)reg x:(MOVHloadidx _ _ _)) -> x
971 (MOVWZreg x:(MOVWZload _ _)) -> x
972 (MOVWZreg x:(MOVWZloadidx _ _ _)) -> x
973 (MOVWreg x:(MOVWload _ _)) -> x
974 (MOVWreg x:(MOVWloadidx _ _ _)) -> x
975
976 // don't extend if argument is already extended
977 (MOVBreg x:(Arg <t>)) && is8BitInt(t) && isSigned(t) -> x
978 (MOVBZreg x:(Arg <t>)) && is8BitInt(t) && !isSigned(t) -> x
979 (MOVHreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && isSigned(t) -> x
980 (MOVHZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && !isSigned(t) -> x
981 (MOVWreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t) -> x
982 (MOVWZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t) -> x
983
984 (MOVBZreg (MOVDconst [c])) -> (MOVDconst [int64(uint8(c))])
985 (MOVBreg (MOVDconst [c])) -> (MOVDconst [int64(int8(c))])
986 (MOVHZreg (MOVDconst [c])) -> (MOVDconst [int64(uint16(c))])
987 (MOVHreg (MOVDconst [c])) -> (MOVDconst [int64(int16(c))])
988 (MOVWreg (MOVDconst [c])) -> (MOVDconst [int64(int32(c))])
989 (MOVWZreg (MOVDconst [c])) -> (MOVDconst [int64(uint32(c))])
990
991
992 // Lose widening ops fed to stores
993 (MOVBstore [off] {sym} ptr (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
994 (MOVHstore [off] {sym} ptr (MOV(H|HZ|W|WZ)reg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
995 (MOVWstore [off] {sym} ptr (MOV(W|WZ)reg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
996 (MOVBstore [off] {sym} ptr (SRWconst (MOV(H|HZ)reg x) [c]) mem) && c <= 8 -> (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
997 (MOVBstore [off] {sym} ptr (SRWconst (MOV(W|WZ)reg x) [c]) mem) && c <= 24 -> (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
998 (MOVBstoreidx [off] {sym} ptr idx (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) -> (MOVBstoreidx [off] {sym} ptr idx x mem)
999 (MOVHstoreidx [off] {sym} ptr idx (MOV(H|HZ|W|WZ)reg x) mem) -> (MOVHstoreidx [off] {sym} ptr idx x mem)
1000 (MOVWstoreidx [off] {sym} ptr idx (MOV(W|WZ)reg x) mem) -> (MOVWstoreidx [off] {sym} ptr idx x mem)
1001 (MOVBstoreidx [off] {sym} ptr idx (SRWconst (MOV(H|HZ)reg x) [c]) mem) && c <= 8 -> (MOVBstoreidx [off] {sym} ptr idx (SRWconst <typ.UInt32> x [c]) mem)
1002 (MOVBstoreidx [off] {sym} ptr idx (SRWconst (MOV(W|WZ)reg x) [c]) mem) && c <= 24 -> (MOVBstoreidx [off] {sym} ptr idx (SRWconst <typ.UInt32> x [c]) mem)
1003 (MOVHBRstore {sym} ptr (MOV(H|HZ|W|WZ)reg x) mem) -> (MOVHBRstore {sym} ptr x mem)
1004 (MOVWBRstore {sym} ptr (MOV(W|WZ)reg x) mem) -> (MOVWBRstore {sym} ptr x mem)
1005
1006 // Lose W-widening ops fed to compare-W
1007 (CMPW x (MOVWreg y)) -> (CMPW x y)
1008 (CMPW (MOVWreg x) y) -> (CMPW x y)
1009 (CMPWU x (MOVWZreg y)) -> (CMPWU x y)
1010 (CMPWU (MOVWZreg x) y) -> (CMPWU x y)
1011
1012 (CMP x (MOVDconst [c])) && is16Bit(c) -> (CMPconst x [c])
1013 (CMP (MOVDconst [c]) y) && is16Bit(c) -> (InvertFlags (CMPconst y [c]))
1014 (CMPW x (MOVDconst [c])) && is16Bit(c) -> (CMPWconst x [c])
1015 (CMPW (MOVDconst [c]) y) && is16Bit(c) -> (InvertFlags (CMPWconst y [c]))
1016
1017 (CMPU x (MOVDconst [c])) && isU16Bit(c) -> (CMPUconst x [c])
1018 (CMPU (MOVDconst [c]) y) && isU16Bit(c) -> (InvertFlags (CMPUconst y [c]))
1019 (CMPWU x (MOVDconst [c])) && isU16Bit(c) -> (CMPWUconst x [c])
1020 (CMPWU (MOVDconst [c]) y) && isU16Bit(c) -> (InvertFlags (CMPWUconst y [c]))
1021
1022 // A particular pattern seen in cgo code:
1023 (AND (MOVDconst [c]) x:(MOVBZload _ _)) -> (ANDconst [c&0xFF] x)
1024 (AND x:(MOVBZload _ _) (MOVDconst [c])) -> (ANDconst [c&0xFF] x)
1025
1026 // floating point negative abs
1027 (FNEG (FABS x)) -> (FNABS x)
1028 (FNEG (FNABS x)) -> (FABS x)
1029
1030 // floating-point fused multiply-add/sub
1031 (FADD (FMUL x y) z) -> (FMADD x y z)
1032 (FSUB (FMUL x y) z) -> (FMSUB x y z)
1033 (FADDS (FMULS x y) z) -> (FMADDS x y z)
1034 (FSUBS (FMULS x y) z) -> (FMSUBS x y z)
1035
1036
1037 // The following statements are found in encoding/binary functions UintXX (load) and PutUintXX (store)
1038 // and convert the statements in these functions from multiple single byte loads or stores to
1039 // the single largest possible load or store.
1040 // Some are marked big or little endian based on the order in which the bytes are loaded or stored,
1041 // not on the ordering of the machine. These are intended for little endian machines.
1042 // To implement for big endian machines, most rules would have to be duplicated but the
1043 // resulting rule would be reversed, i. e., MOVHZload on little endian would be MOVHBRload on big endian
1044 // and vice versa.
1045 // b[0] | b[1]<<8 -> load 16-bit Little endian
1046 (OR <t> x0:(MOVBZload [i0] {s} p mem)
1047 o1:(SL(W|D)const x1:(MOVBZload [i1] {s} p mem) [8]))
1048 && !config.BigEndian
1049 && i1 == i0+1
1050 && x0.Uses ==1 && x1.Uses == 1
1051 && o1.Uses == 1
1052 && mergePoint(b, x0, x1) != nil
1053 && clobber(x0) && clobber(x1) && clobber(o1)
1054 -> @mergePoint(b,x0,x1) (MOVHZload <t> {s} [i0] p mem)
1055
1056 // b[0]<<8 | b[1] -> load 16-bit Big endian on Little endian arch.
1057 // Use byte-reverse indexed load for 2 bytes.
1058 (OR <t> x0:(MOVBZload [i1] {s} p mem)
1059 o1:(SL(W|D)const x1:(MOVBZload [i0] {s} p mem) [8]))
1060 && !config.BigEndian
1061 && i1 == i0+1
1062 && x0.Uses ==1 && x1.Uses == 1
1063 && o1.Uses == 1
1064 && mergePoint(b, x0, x1) != nil
1065 && clobber(x0) && clobber(x1) && clobber(o1)
1066 -> @mergePoint(b,x0,x1) (MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
1067
1068 // b[0]<<n+8 | b[1]<<n -> load 16-bit Big endian (where n%8== 0)
1069 // Use byte-reverse indexed load for 2 bytes,
1070 // then shift left to the correct position. Used to match subrules
1071 // from longer rules.
1072 (OR <t> s0:(SL(W|D)const x0:(MOVBZload [i1] {s} p mem) [n1])
1073 s1:(SL(W|D)const x1:(MOVBZload [i0] {s} p mem) [n2]))
1074 && !config.BigEndian
1075 && i1 == i0+1
1076 && n1%8 == 0
1077 && n2 == n1+8
1078 && x0.Uses == 1 && x1.Uses == 1
1079 && s0.Uses == 1 && s1.Uses == 1
1080 && mergePoint(b, x0, x1) != nil
1081 && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1)
1082 -> @mergePoint(b,x0,x1) (SLDconst <t> (MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [n1])
1083
1084 // b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 -> load 32-bit Little endian
1085 // Use byte-reverse indexed load for 4 bytes.
1086 (OR <t> s1:(SL(W|D)const x2:(MOVBZload [i3] {s} p mem) [24])
1087 o0:(OR <t> s0:(SL(W|D)const x1:(MOVBZload [i2] {s} p mem) [16])
1088 x0:(MOVHZload [i0] {s} p mem)))
1089 && !config.BigEndian
1090 && i2 == i0+2
1091 && i3 == i0+3
1092 && x0.Uses ==1 && x1.Uses == 1 && x2.Uses == 1
1093 && o0.Uses == 1
1094 && s0.Uses == 1 && s1.Uses == 1
1095 && mergePoint(b, x0, x1, x2) != nil
1096 && clobber(x0) && clobber(x1) && clobber(x2)
1097 && clobber(s0) && clobber(s1)
1098 && clobber(o0)
1099 -> @mergePoint(b,x0,x1,x2) (MOVWZload <t> {s} [i0] p mem)
1100
1101 // b[0]<<24 | b[1]<<16 | b[2]<<8 | b[3] -> load 32-bit Big endian order on Little endian arch
1102 // Use byte-reverse indexed load for 4 bytes with computed address.
1103 // Could be used to match subrules of a longer rule.
1104 (OR <t> s1:(SL(W|D)const x2:(MOVBZload [i0] {s} p mem) [24])
1105 o0:(OR <t> s0:(SL(W|D)const x1:(MOVBZload [i1] {s} p mem) [16])
1106 x0:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i2] {s} p) mem)))
1107 && !config.BigEndian
1108 && i1 == i0+1
1109 && i2 == i0+2
1110 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
1111 && o0.Uses == 1
1112 && s0.Uses == 1 && s1.Uses == 1
1113 && mergePoint(b, x0, x1, x2) != nil
1114 && clobber(x0) && clobber(x1) && clobber(x2)
1115 && clobber(s0) && clobber(s1)
1116 && clobber(o0)
1117 -> @mergePoint(b,x0,x1,x2) (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
1118
1119 // b[3] | b[2]<<8 | b[1]<<16 | b[0]<<24 -> load 32-bit Big endian order on Little endian arch
1120 // Use byte-reverse indexed load for 4 bytes with computed address.
1121 // Could be used to match subrules of a longer rule.
1122 (OR <t> x0:(MOVBZload [i3] {s} p mem)
1123 o0:(OR <t> s0:(SL(W|D)const x1:(MOVBZload [i2] {s} p mem) [8])
1124 s1:(SL(W|D)const x2:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [16])))
1125 && !config.BigEndian
1126 && i2 == i0+2
1127 && i3 == i0+3
1128 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
1129 && o0.Uses == 1
1130 && s0.Uses == 1 && s1.Uses == 1
1131 && mergePoint(b, x0, x1, x2) != nil
1132 && clobber(x0) && clobber(x1) && clobber(x2)
1133 && clobber(s0) && clobber(s1)
1134 && clobber(o0)
1135 -> @mergePoint(b,x0,x1,x2) (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
1136
1137 // b[0]<<56 | b[1]<<48 | b[2]<<40 | b[3]<<32 -> load 32-bit Big endian order on Little endian arch
1138 // Use byte-reverse indexed load to for 4 bytes with computed address.
1139 // Used to match longer rules.
1140 (OR <t> s2:(SLDconst x2:(MOVBZload [i3] {s} p mem) [32])
1141 o0:(OR <t> s1:(SLDconst x1:(MOVBZload [i2] {s} p mem) [40])
1142 s0:(SLDconst x0:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [48])))
1143 && !config.BigEndian
1144 && i2 == i0+2
1145 && i3 == i0+3
1146 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
1147 && o0.Uses == 1
1148 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1
1149 && mergePoint(b, x0, x1, x2) != nil
1150 && clobber(x0) && clobber(x1) && clobber(x2)
1151 && clobber(s0) && clobber(s1) && clobber(s2)
1152 && clobber(o0)
1153 -> @mergePoint(b,x0,x1,x2) (SLDconst <t> (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32])
1154
1155 // b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 -> load 32-bit Big endian order on Little endian arch
1156 // Use byte-reverse indexed load for 4 bytes with constant address.
1157 // Used to match longer rules.
1158 (OR <t> s2:(SLDconst x2:(MOVBZload [i0] {s} p mem) [56])
1159 o0:(OR <t> s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48])
1160 s0:(SLDconst x0:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i2] {s} p) mem) [32])))
1161 && !config.BigEndian
1162 && i1 == i0+1
1163 && i2 == i0+2
1164 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
1165 && o0.Uses == 1
1166 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1
1167 && mergePoint(b, x0, x1, x2) != nil
1168 && clobber(x0) && clobber(x1) && clobber(x2)
1169 && clobber(s0) && clobber(s1) && clobber(s2)
1170 && clobber(o0)
1171 -> @mergePoint(b,x0,x1,x2) (SLDconst <t> (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32])
1172
1173 // b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4] <<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 -> load 64-bit Little endian
1174 // Rules with commutative ops and many operands will result in extremely large functions in rewritePPC64,
1175 // so matching shorter previously defined subrules is important.
1176 // Offset must be multiple of 4 for MOVD
1177 (OR <t> s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])
1178 o5:(OR <t> s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])
1179 o4:(OR <t> s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])
1180 o3:(OR <t> s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])
1181 x0:(MOVWZload {s} [i0] p mem)))))
1182 && !config.BigEndian
1183 && i0%4 == 0
1184 && i4 == i0+4
1185 && i5 == i0+5
1186 && i6 == i0+6
1187 && i7 == i0+7
1188 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1
1189 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1
1190 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1
1191 && mergePoint(b, x0, x4, x5, x6, x7) != nil
1192 && clobber(x0) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7)
1193 && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6)
1194 && clobber(o3) && clobber(o4) && clobber(o5)
1195 -> @mergePoint(b,x0,x4,x5,x6,x7) (MOVDload <t> {s} [i0] p mem)
1196
1197 // b[7] | b[6]<<8 | b[5]<<16 | b[4]<<24 | b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 load 64-bit Big endian ordered bytes on Little endian arch
1198 // Use byte-reverse indexed load of 8 bytes.
1199 // Rules with commutative ops and many operands can result in extremely large functions in rewritePPC64,
1200 // so matching shorter previously defined subrules is important.
1201 (OR <t> s0:(SLDconst x0:(MOVBZload [i0] {s} p mem) [56])
1202 o0:(OR <t> s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48])
1203 o1:(OR <t> s2:(SLDconst x2:(MOVBZload [i2] {s} p mem) [40])
1204 o2:(OR <t> s3:(SLDconst x3:(MOVBZload [i3] {s} p mem) [32])
1205 x4:(MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i4] p) mem)))))
1206 && !config.BigEndian
1207 && i1 == i0+1
1208 && i2 == i0+2
1209 && i3 == i0+3
1210 && i4 == i0+4
1211 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1
1212 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
1213 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1
1214 && mergePoint(b, x0, x1, x2, x3, x4) != nil
1215 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4)
1216 && clobber(o0) && clobber(o1) && clobber(o2)
1217 && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3)
1218 -> @mergePoint(b,x0,x1,x2,x3,x4) (MOVDBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
1219
1220 // b[0]<<56 | b[1]<<48 | b[2]<<40 | b[3]<<32 | b[4]<<24 | b[5]<<16 | b[6]<<8 | b[7] -> load 64-bit Big endian ordered bytes on Little endian arch
1221 // Use byte-reverse indexed load of 8 bytes.
1222 // Rules with commutative ops and many operands can result in extremely large functions in rewritePPC64,
1223 // so matching shorter previously defined subrules is important.
1224 (OR <t> x7:(MOVBZload [i7] {s} p mem)
1225 o5:(OR <t> s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8])
1226 o4:(OR <t> s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16])
1227 o3:(OR <t> s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24])
1228 s0:(SL(W|D)const x3:(MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32])))))
1229 && !config.BigEndian
1230 && i4 == i0+4
1231 && i5 == i0+5
1232 && i6 == i0+6
1233 && i7 == i0+7
1234 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1
1235 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1
1236 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1
1237 && mergePoint(b, x3, x4, x5, x6, x7) != nil
1238 && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7)
1239 && clobber(o3) && clobber(o4) && clobber(o5)
1240 && clobber(s0) && clobber(s4) && clobber(s5) && clobber(s6)
1241 -> @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
1242
1243 // 2 byte store Little endian as in:
1244 // b[0] = byte(v >> 16)
1245 // b[1] = byte(v >> 24)
1246 // Added for use in matching longer rules.
1247 (MOVBstore [i1] {s} p (SR(W|D)const w [24])
1248 x0:(MOVBstore [i0] {s} p (SR(W|D)const w [16]) mem))
1249 && !config.BigEndian
1250 && x0.Uses == 1
1251 && i1 == i0+1
1252 && clobber(x0)
1253 -> (MOVHstore [i0] {s} p (SRWconst <typ.UInt16> w [16]) mem)
1254
1255 // 2 byte store Little endian as in:
1256 // b[0] = byte(v)
1257 // b[1] = byte(v >> 8)
1258 (MOVBstore [i1] {s} p (SR(W|D)const w [8])
1259 x0:(MOVBstore [i0] {s} p w mem))
1260 && !config.BigEndian
1261 && x0.Uses == 1
1262 && i1 == i0+1
1263 && clobber(x0)
1264 -> (MOVHstore [i0] {s} p w mem)
1265
1266 // 4 byte store Little endian as in:
1267 // b[0:1] = uint16(v)
1268 // b[2:3] = uint16(v >> 16)
1269 (MOVHstore [i1] {s} p (SR(W|D)const w [16])
1270 x0:(MOVHstore [i0] {s} p w mem))
1271 && !config.BigEndian
1272 && x0.Uses == 1
1273 && i1 == i0+2
1274 && clobber(x0)
1275 -> (MOVWstore [i0] {s} p w mem)
1276
1277 // 4 byte store Big endian as in:
1278 // b[0] = byte(v >> 24)
1279 // b[1] = byte(v >> 16)
1280 // b[2] = byte(v >> 8)
1281 // b[3] = byte(v)
1282 // Use byte-reverse indexed 4 byte store.
1283 (MOVBstore [i3] {s} p w
1284 x0:(MOVBstore [i2] {s} p (SRWconst w [8])
1285 x1:(MOVBstore [i1] {s} p (SRWconst w [16])
1286 x2:(MOVBstore [i0] {s} p (SRWconst w [24]) mem))))
1287 && !config.BigEndian
1288 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
1289 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3
1290 && clobber(x0) && clobber(x1) && clobber(x2)
1291 -> (MOVWBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
1292
1293 // The 2 byte store appears after the 4 byte store so that the
1294 // match for the 2 byte store is not done first.
1295 // If the 4 byte store is based on the 2 byte store then there are
1296 // variations on the MOVDaddr subrule that would require additional
1297 // rules to be written.
1298
1299 // 2 byte store Big endian as in:
1300 // b[0] = byte(v >> 8)
1301 // b[1] = byte(v)
1302 (MOVBstore [i1] {s} p w x0:(MOVBstore [i0] {s} p (SRWconst w [8]) mem))
1303 && !config.BigEndian
1304 && x0.Uses == 1
1305 && i1 == i0+1
1306 && clobber(x0)
1307 -> (MOVHBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
1308
1309 // 8 byte store Little endian as in:
1310 // b[0] = byte(v)
1311 // b[1] = byte(v >> 8)
1312 // b[2] = byte(v >> 16)
1313 // b[3] = byte(v >> 24)
1314 // b[4] = byte(v >> 32)
1315 // b[5] = byte(v >> 40)
1316 // b[6] = byte(v >> 48)
1317 // b[7] = byte(v >> 56)
1318 // Built on previously defined rules
1319 // Offset must be multiple of 4 for MOVDstore
1320 (MOVBstore [i7] {s} p (SRDconst w [56])
1321 x0:(MOVBstore [i6] {s} p (SRDconst w [48])
1322 x1:(MOVBstore [i5] {s} p (SRDconst w [40])
1323 x2:(MOVBstore [i4] {s} p (SRDconst w [32])
1324 x3:(MOVWstore [i0] {s} p w mem)))))
1325 && !config.BigEndian
1326 && i0%4 == 0
1327 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
1328 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7
1329 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3)
1330 -> (MOVDstore [i0] {s} p w mem)
1331
1332 // 8 byte store Big endian as in:
1333 // b[0] = byte(v >> 56)
1334 // b[1] = byte(v >> 48)
1335 // b[2] = byte(v >> 40)
1336 // b[3] = byte(v >> 32)
1337 // b[4] = byte(v >> 24)
1338 // b[5] = byte(v >> 16)
1339 // b[6] = byte(v >> 8)
1340 // b[7] = byte(v)
1341 // Use byte-reverse indexed 8 byte store.
1342 (MOVBstore [i7] {s} p w
1343 x0:(MOVBstore [i6] {s} p (SRDconst w [8])
1344 x1:(MOVBstore [i5] {s} p (SRDconst w [16])
1345 x2:(MOVBstore [i4] {s} p (SRDconst w [24])
1346 x3:(MOVBstore [i3] {s} p (SRDconst w [32])
1347 x4:(MOVBstore [i2] {s} p (SRDconst w [40])
1348 x5:(MOVBstore [i1] {s} p (SRDconst w [48])
1349 x6:(MOVBstore [i0] {s} p (SRDconst w [56]) mem))))))))
1350 && !config.BigEndian
1351 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1
1352 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7
1353 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)
1354 -> (MOVDBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
View as plain text