Text file src/pkg/cmd/compile/internal/ssa/gen/MIPS.rules
1 // Copyright 2016 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 (Add(Ptr|32|16|8) x y) -> (ADD x y)
6 (Add(32|64)F x y) -> (ADD(F|D) x y)
7
8 (Select0 (Add32carry <t> x y)) -> (ADD <t.FieldType(0)> x y)
9 (Select1 (Add32carry <t> x y)) -> (SGTU <typ.Bool> x (ADD <t.FieldType(0)> x y))
10 (Add32withcarry <t> x y c) -> (ADD c (ADD <t> x y))
11
12 (Sub(Ptr|32|16|8) x y) -> (SUB x y)
13 (Sub(32|64)F x y) -> (SUB(F|D) x y)
14
15 (Select0 (Sub32carry <t> x y)) -> (SUB <t.FieldType(0)> x y)
16 (Select1 (Sub32carry <t> x y)) -> (SGTU <typ.Bool> (SUB <t.FieldType(0)> x y) x)
17 (Sub32withcarry <t> x y c) -> (SUB (SUB <t> x y) c)
18
19 (Mul(32|16|8) x y) -> (MUL x y)
20 (Mul(32|64)F x y) -> (MUL(F|D) x y)
21
22 (Hmul(32|32u) x y) -> (Select0 (MUL(T|TU) x y))
23 (Mul32uhilo x y) -> (MULTU x y)
24
25 (Div32 x y) -> (Select1 (DIV x y))
26 (Div32u x y) -> (Select1 (DIVU x y))
27 (Div16 x y) -> (Select1 (DIV (SignExt16to32 x) (SignExt16to32 y)))
28 (Div16u x y) -> (Select1 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)))
29 (Div8 x y) -> (Select1 (DIV (SignExt8to32 x) (SignExt8to32 y)))
30 (Div8u x y) -> (Select1 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)))
31 (Div(32|64)F x y) -> (DIV(F|D) x y)
32
33 (Mod32 x y) -> (Select0 (DIV x y))
34 (Mod32u x y) -> (Select0 (DIVU x y))
35 (Mod16 x y) -> (Select0 (DIV (SignExt16to32 x) (SignExt16to32 y)))
36 (Mod16u x y) -> (Select0 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)))
37 (Mod8 x y) -> (Select0 (DIV (SignExt8to32 x) (SignExt8to32 y)))
38 (Mod8u x y) -> (Select0 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)))
39
40 // (x + y) / 2 with x>=y -> (x - y) / 2 + y
41 (Avg32u <t> x y) -> (ADD (SRLconst <t> (SUB <t> x y) [1]) y)
42
43 (And(32|16|8) x y) -> (AND x y)
44 (Or(32|16|8) x y) -> (OR x y)
45 (Xor(32|16|8) x y) -> (XOR x y)
46
47 // constant shifts
48 // generic opt rewrites all constant shifts to shift by Const64
49 (Lsh32x64 x (Const64 [c])) && uint32(c) < 32 -> (SLLconst x [c])
50 (Rsh32x64 x (Const64 [c])) && uint32(c) < 32 -> (SRAconst x [c])
51 (Rsh32Ux64 x (Const64 [c])) && uint32(c) < 32 -> (SRLconst x [c])
52 (Lsh16x64 x (Const64 [c])) && uint32(c) < 16 -> (SLLconst x [c])
53 (Rsh16x64 x (Const64 [c])) && uint32(c) < 16 -> (SRAconst (SLLconst <typ.UInt32> x [16]) [c+16])
54 (Rsh16Ux64 x (Const64 [c])) && uint32(c) < 16 -> (SRLconst (SLLconst <typ.UInt32> x [16]) [c+16])
55 (Lsh8x64 x (Const64 [c])) && uint32(c) < 8 -> (SLLconst x [c])
56 (Rsh8x64 x (Const64 [c])) && uint32(c) < 8 -> (SRAconst (SLLconst <typ.UInt32> x [24]) [c+24])
57 (Rsh8Ux64 x (Const64 [c])) && uint32(c) < 8 -> (SRLconst (SLLconst <typ.UInt32> x [24]) [c+24])
58
59 // large constant shifts
60 (Lsh32x64 _ (Const64 [c])) && uint32(c) >= 32 -> (MOVWconst [0])
61 (Rsh32Ux64 _ (Const64 [c])) && uint32(c) >= 32 -> (MOVWconst [0])
62 (Lsh16x64 _ (Const64 [c])) && uint32(c) >= 16 -> (MOVWconst [0])
63 (Rsh16Ux64 _ (Const64 [c])) && uint32(c) >= 16 -> (MOVWconst [0])
64 (Lsh8x64 _ (Const64 [c])) && uint32(c) >= 8 -> (MOVWconst [0])
65 (Rsh8Ux64 _ (Const64 [c])) && uint32(c) >= 8 -> (MOVWconst [0])
66
67 // large constant signed right shift, we leave the sign bit
68 (Rsh32x64 x (Const64 [c])) && uint32(c) >= 32 -> (SRAconst x [31])
69 (Rsh16x64 x (Const64 [c])) && uint32(c) >= 16 -> (SRAconst (SLLconst <typ.UInt32> x [16]) [31])
70 (Rsh8x64 x (Const64 [c])) && uint32(c) >= 8 -> (SRAconst (SLLconst <typ.UInt32> x [24]) [31])
71
72 // shifts
73 // hardware instruction uses only the low 5 bits of the shift
74 // we compare to 32 to ensure Go semantics for large shifts
75 (Lsh32x32 <t> x y) -> (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
76 (Lsh32x16 <t> x y) -> (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
77 (Lsh32x8 <t> x y) -> (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
78
79 (Lsh16x32 <t> x y) -> (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
80 (Lsh16x16 <t> x y) -> (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
81 (Lsh16x8 <t> x y) -> (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
82
83 (Lsh8x32 <t> x y) -> (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
84 (Lsh8x16 <t> x y) -> (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
85 (Lsh8x8 <t> x y) -> (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
86
87 (Rsh32Ux32 <t> x y) -> (CMOVZ (SRL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
88 (Rsh32Ux16 <t> x y) -> (CMOVZ (SRL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
89 (Rsh32Ux8 <t> x y) -> (CMOVZ (SRL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
90
91 (Rsh16Ux32 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt16to32 x) y) (MOVWconst [0]) (SGTUconst [32] y))
92 (Rsh16Ux16 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt16to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
93 (Rsh16Ux8 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt16to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
94
95 (Rsh8Ux32 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt8to32 x) y) (MOVWconst [0]) (SGTUconst [32] y))
96 (Rsh8Ux16 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
97 (Rsh8Ux8 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
98
99 (Rsh32x32 x y) -> (SRA x ( CMOVZ <typ.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
100 (Rsh32x16 x y) -> (SRA x ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
101 (Rsh32x8 x y) -> (SRA x ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
102
103 (Rsh16x32 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
104 (Rsh16x16 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
105 (Rsh16x8 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
106
107 (Rsh8x32 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
108 (Rsh8x16 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
109 (Rsh8x8 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
110
111 // rotates
112 (RotateLeft8 <t> x (MOVWconst [c])) -> (Or8 (Lsh8x32 <t> x (MOVWconst [c&7])) (Rsh8Ux32 <t> x (MOVWconst [-c&7])))
113 (RotateLeft16 <t> x (MOVWconst [c])) -> (Or16 (Lsh16x32 <t> x (MOVWconst [c&15])) (Rsh16Ux32 <t> x (MOVWconst [-c&15])))
114 (RotateLeft32 <t> x (MOVWconst [c])) -> (Or32 (Lsh32x32 <t> x (MOVWconst [c&31])) (Rsh32Ux32 <t> x (MOVWconst [-c&31])))
115 (RotateLeft64 <t> x (MOVWconst [c])) -> (Or64 (Lsh64x32 <t> x (MOVWconst [c&63])) (Rsh64Ux32 <t> x (MOVWconst [-c&63])))
116
117 // unary ops
118 (Neg(32|16|8) x) -> (NEG x)
119 (Neg(32|64)F x) -> (NEG(F|D) x)
120
121 (Com(32|16|8) x) -> (NORconst [0] x)
122
123 (Sqrt x) -> (SQRTD x)
124
125 // TODO: optimize this case?
126 (Ctz32NonZero x) -> (Ctz32 x)
127
128 // count trailing zero
129 // 32 - CLZ(x&-x - 1)
130 (Ctz32 <t> x) -> (SUB (MOVWconst [32]) (CLZ <t> (SUBconst <t> [1] (AND <t> x (NEG <t> x)))))
131
132 // bit length
133 (BitLen32 <t> x) -> (SUB (MOVWconst [32]) (CLZ <t> x))
134
135 // boolean ops -- booleans are represented with 0=false, 1=true
136 (AndB x y) -> (AND x y)
137 (OrB x y) -> (OR x y)
138 (EqB x y) -> (XORconst [1] (XOR <typ.Bool> x y))
139 (NeqB x y) -> (XOR x y)
140 (Not x) -> (XORconst [1] x)
141
142 // constants
143 (Const(32|16|8) [val]) -> (MOVWconst [val])
144 (Const(32|64)F [val]) -> (MOV(F|D)const [val])
145 (ConstNil) -> (MOVWconst [0])
146 (ConstBool [b]) -> (MOVWconst [b])
147
148 // truncations
149 // Because we ignore high parts of registers, truncates are just copies.
150 (Trunc16to8 x) -> x
151 (Trunc32to8 x) -> x
152 (Trunc32to16 x) -> x
153
154 // Zero-/Sign-extensions
155 (ZeroExt8to16 x) -> (MOVBUreg x)
156 (ZeroExt8to32 x) -> (MOVBUreg x)
157 (ZeroExt16to32 x) -> (MOVHUreg x)
158
159 (SignExt8to16 x) -> (MOVBreg x)
160 (SignExt8to32 x) -> (MOVBreg x)
161 (SignExt16to32 x) -> (MOVHreg x)
162
163 (Signmask x) -> (SRAconst x [31])
164 (Zeromask x) -> (NEG (SGTU x (MOVWconst [0])))
165 (Slicemask <t> x) -> (SRAconst (NEG <t> x) [31])
166
167 // float <-> int conversion
168 (Cvt32to(32|64)F x) -> (MOVW(F|D) x)
169 (Cvt(32|64)Fto32 x) -> (TRUNC(F|D)W x)
170 (Cvt32Fto64F x) -> (MOVFD x)
171 (Cvt64Fto32F x) -> (MOVDF x)
172
173 (Round(32|64)F x) -> x
174
175 // comparisons
176 (Eq8 x y) -> (SGTUconst [1] (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)))
177 (Eq16 x y) -> (SGTUconst [1] (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)))
178 (Eq32 x y) -> (SGTUconst [1] (XOR x y))
179 (EqPtr x y) -> (SGTUconst [1] (XOR x y))
180 (Eq(32|64)F x y) -> (FPFlagTrue (CMPEQ(F|D) x y))
181
182 (Neq8 x y) -> (SGTU (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)) (MOVWconst [0]))
183 (Neq16 x y) -> (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)) (MOVWconst [0]))
184 (Neq32 x y) -> (SGTU (XOR x y) (MOVWconst [0]))
185 (NeqPtr x y) -> (SGTU (XOR x y) (MOVWconst [0]))
186 (Neq(32|64)F x y) -> (FPFlagFalse (CMPEQ(F|D) x y))
187
188 (Less8 x y) -> (SGT (SignExt8to32 y) (SignExt8to32 x))
189 (Less16 x y) -> (SGT (SignExt16to32 y) (SignExt16to32 x))
190 (Less32 x y) -> (SGT y x)
191 (Less(32|64)F x y) -> (FPFlagTrue (CMPGT(F|D) y x)) // reverse operands to work around NaN
192
193 (Less8U x y) -> (SGTU (ZeroExt8to32 y) (ZeroExt8to32 x))
194 (Less16U x y) -> (SGTU (ZeroExt16to32 y) (ZeroExt16to32 x))
195 (Less32U x y) -> (SGTU y x)
196
197 (Leq8 x y) -> (XORconst [1] (SGT (SignExt8to32 x) (SignExt8to32 y)))
198 (Leq16 x y) -> (XORconst [1] (SGT (SignExt16to32 x) (SignExt16to32 y)))
199 (Leq32 x y) -> (XORconst [1] (SGT x y))
200 (Leq(32|64)F x y) -> (FPFlagTrue (CMPGE(F|D) y x)) // reverse operands to work around NaN
201
202 (Leq8U x y) -> (XORconst [1] (SGTU (ZeroExt8to32 x) (ZeroExt8to32 y)))
203 (Leq16U x y) -> (XORconst [1] (SGTU (ZeroExt16to32 x) (ZeroExt16to32 y)))
204 (Leq32U x y) -> (XORconst [1] (SGTU x y))
205
206 (Greater8 x y) -> (SGT (SignExt8to32 x) (SignExt8to32 y))
207 (Greater16 x y) -> (SGT (SignExt16to32 x) (SignExt16to32 y))
208 (Greater32 x y) -> (SGT x y)
209 (Greater(32|64)F x y) -> (FPFlagTrue (CMPGT(F|D) x y))
210
211 (Greater8U x y) -> (SGTU (ZeroExt8to32 x) (ZeroExt8to32 y))
212 (Greater16U x y) -> (SGTU (ZeroExt16to32 x) (ZeroExt16to32 y))
213 (Greater32U x y) -> (SGTU x y)
214
215 (Geq8 x y) -> (XORconst [1] (SGT (SignExt8to32 y) (SignExt8to32 x)))
216 (Geq16 x y) -> (XORconst [1] (SGT (SignExt16to32 y) (SignExt16to32 x)))
217 (Geq32 x y) -> (XORconst [1] (SGT y x))
218 (Geq(32|64)F x y) -> (FPFlagTrue (CMPGE(F|D) x y))
219
220 (Geq8U x y) -> (XORconst [1] (SGTU (ZeroExt8to32 y) (ZeroExt8to32 x)))
221 (Geq16U x y) -> (XORconst [1] (SGTU (ZeroExt16to32 y) (ZeroExt16to32 x)))
222 (Geq32U x y) -> (XORconst [1] (SGTU y x))
223
224 (OffPtr [off] ptr:(SP)) -> (MOVWaddr [off] ptr)
225 (OffPtr [off] ptr) -> (ADDconst [off] ptr)
226
227 (Addr {sym} base) -> (MOVWaddr {sym} base)
228 (LocalAddr {sym} base _) -> (MOVWaddr {sym} base)
229
230 // loads
231 (Load <t> ptr mem) && t.IsBoolean() -> (MOVBUload ptr mem)
232 (Load <t> ptr mem) && (is8BitInt(t) && isSigned(t)) -> (MOVBload ptr mem)
233 (Load <t> ptr mem) && (is8BitInt(t) && !isSigned(t)) -> (MOVBUload ptr mem)
234 (Load <t> ptr mem) && (is16BitInt(t) && isSigned(t)) -> (MOVHload ptr mem)
235 (Load <t> ptr mem) && (is16BitInt(t) && !isSigned(t)) -> (MOVHUload ptr mem)
236 (Load <t> ptr mem) && (is32BitInt(t) || isPtr(t)) -> (MOVWload ptr mem)
237 (Load <t> ptr mem) && is32BitFloat(t) -> (MOVFload ptr mem)
238 (Load <t> ptr mem) && is64BitFloat(t) -> (MOVDload ptr mem)
239
240 // stores
241 (Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem)
242 (Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVHstore ptr val mem)
243 (Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem)
244 (Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (MOVFstore ptr val mem)
245 (Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (MOVDstore ptr val mem)
246
247 // zero instructions
248 (Zero [0] _ mem) -> mem
249 (Zero [1] ptr mem) -> (MOVBstore ptr (MOVWconst [0]) mem)
250 (Zero [2] {t} ptr mem) && t.(*types.Type).Alignment()%2 == 0 ->
251 (MOVHstore ptr (MOVWconst [0]) mem)
252 (Zero [2] ptr mem) ->
253 (MOVBstore [1] ptr (MOVWconst [0])
254 (MOVBstore [0] ptr (MOVWconst [0]) mem))
255 (Zero [4] {t} ptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
256 (MOVWstore ptr (MOVWconst [0]) mem)
257 (Zero [4] {t} ptr mem) && t.(*types.Type).Alignment()%2 == 0 ->
258 (MOVHstore [2] ptr (MOVWconst [0])
259 (MOVHstore [0] ptr (MOVWconst [0]) mem))
260 (Zero [4] ptr mem) ->
261 (MOVBstore [3] ptr (MOVWconst [0])
262 (MOVBstore [2] ptr (MOVWconst [0])
263 (MOVBstore [1] ptr (MOVWconst [0])
264 (MOVBstore [0] ptr (MOVWconst [0]) mem))))
265 (Zero [3] ptr mem) ->
266 (MOVBstore [2] ptr (MOVWconst [0])
267 (MOVBstore [1] ptr (MOVWconst [0])
268 (MOVBstore [0] ptr (MOVWconst [0]) mem)))
269 (Zero [6] {t} ptr mem) && t.(*types.Type).Alignment()%2 == 0 ->
270 (MOVHstore [4] ptr (MOVWconst [0])
271 (MOVHstore [2] ptr (MOVWconst [0])
272 (MOVHstore [0] ptr (MOVWconst [0]) mem)))
273 (Zero [8] {t} ptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
274 (MOVWstore [4] ptr (MOVWconst [0])
275 (MOVWstore [0] ptr (MOVWconst [0]) mem))
276 (Zero [12] {t} ptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
277 (MOVWstore [8] ptr (MOVWconst [0])
278 (MOVWstore [4] ptr (MOVWconst [0])
279 (MOVWstore [0] ptr (MOVWconst [0]) mem)))
280 (Zero [16] {t} ptr mem) && t.(*types.Type).Alignment()%4 == 0 ->
281 (MOVWstore [12] ptr (MOVWconst [0])
282 (MOVWstore [8] ptr (MOVWconst [0])
283 (MOVWstore [4] ptr (MOVWconst [0])
284 (MOVWstore [0] ptr (MOVWconst [0]) mem))))
285
286 // large or unaligned zeroing uses a loop
287 (Zero [s] {t} ptr mem)
288 && (s > 16 || t.(*types.Type).Alignment()%4 != 0) ->
289 (LoweredZero [t.(*types.Type).Alignment()]
290 ptr
291 (ADDconst <ptr.Type> ptr [s-moveSize(t.(*types.Type).Alignment(), config)])
292 mem)
293
294 // moves
295 (Move [0] _ _ mem) -> mem
296 (Move [1] dst src mem) -> (MOVBstore dst (MOVBUload src mem) mem)
297 (Move [2] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 ->
298 (MOVHstore dst (MOVHUload src mem) mem)
299 (Move [2] dst src mem) ->
300 (MOVBstore [1] dst (MOVBUload [1] src mem)
301 (MOVBstore dst (MOVBUload src mem) mem))
302 (Move [4] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 ->
303 (MOVWstore dst (MOVWload src mem) mem)
304 (Move [4] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 ->
305 (MOVHstore [2] dst (MOVHUload [2] src mem)
306 (MOVHstore dst (MOVHUload src mem) mem))
307 (Move [4] dst src mem) ->
308 (MOVBstore [3] dst (MOVBUload [3] src mem)
309 (MOVBstore [2] dst (MOVBUload [2] src mem)
310 (MOVBstore [1] dst (MOVBUload [1] src mem)
311 (MOVBstore dst (MOVBUload src mem) mem))))
312 (Move [3] dst src mem) ->
313 (MOVBstore [2] dst (MOVBUload [2] src mem)
314 (MOVBstore [1] dst (MOVBUload [1] src mem)
315 (MOVBstore dst (MOVBUload src mem) mem)))
316 (Move [8] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 ->
317 (MOVWstore [4] dst (MOVWload [4] src mem)
318 (MOVWstore dst (MOVWload src mem) mem))
319 (Move [8] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 ->
320 (MOVHstore [6] dst (MOVHload [6] src mem)
321 (MOVHstore [4] dst (MOVHload [4] src mem)
322 (MOVHstore [2] dst (MOVHload [2] src mem)
323 (MOVHstore dst (MOVHload src mem) mem))))
324 (Move [6] {t} dst src mem) && t.(*types.Type).Alignment()%2 == 0 ->
325 (MOVHstore [4] dst (MOVHload [4] src mem)
326 (MOVHstore [2] dst (MOVHload [2] src mem)
327 (MOVHstore dst (MOVHload src mem) mem)))
328 (Move [12] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 ->
329 (MOVWstore [8] dst (MOVWload [8] src mem)
330 (MOVWstore [4] dst (MOVWload [4] src mem)
331 (MOVWstore dst (MOVWload src mem) mem)))
332 (Move [16] {t} dst src mem) && t.(*types.Type).Alignment()%4 == 0 ->
333 (MOVWstore [12] dst (MOVWload [12] src mem)
334 (MOVWstore [8] dst (MOVWload [8] src mem)
335 (MOVWstore [4] dst (MOVWload [4] src mem)
336 (MOVWstore dst (MOVWload src mem) mem))))
337
338
339 // large or unaligned move uses a loop
340 (Move [s] {t} dst src mem)
341 && (s > 16 || t.(*types.Type).Alignment()%4 != 0) ->
342 (LoweredMove [t.(*types.Type).Alignment()]
343 dst
344 src
345 (ADDconst <src.Type> src [s-moveSize(t.(*types.Type).Alignment(), config)])
346 mem)
347
348 // calls
349 (StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem)
350 (ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem)
351 (InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
352
353 // atomic intrinsics
354 (AtomicLoad32 ptr mem) -> (LoweredAtomicLoad ptr mem)
355 (AtomicLoadPtr ptr mem) -> (LoweredAtomicLoad ptr mem)
356
357 (AtomicStore32 ptr val mem) -> (LoweredAtomicStore ptr val mem)
358 (AtomicStorePtrNoWB ptr val mem) -> (LoweredAtomicStore ptr val mem)
359
360 (AtomicExchange32 ptr val mem) -> (LoweredAtomicExchange ptr val mem)
361 (AtomicAdd32 ptr val mem) -> (LoweredAtomicAdd ptr val mem)
362
363 (AtomicCompareAndSwap32 ptr old new_ mem) -> (LoweredAtomicCas ptr old new_ mem)
364
365 // AtomicOr8(ptr,val) -> LoweredAtomicOr(ptr&^3,uint32(val) << ((ptr & 3) * 8))
366 (AtomicOr8 ptr val mem) && !config.BigEndian ->
367 (LoweredAtomicOr (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr)
368 (SLL <typ.UInt32> (ZeroExt8to32 val)
369 (SLLconst <typ.UInt32> [3]
370 (ANDconst <typ.UInt32> [3] ptr))) mem)
371
372 // AtomicAnd8(ptr,val) -> LoweredAtomicAnd(ptr&^3,(uint32(val) << ((ptr & 3) * 8)) | ^(uint32(0xFF) << ((ptr & 3) * 8))))
373 (AtomicAnd8 ptr val mem) && !config.BigEndian ->
374 (LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr)
375 (OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val)
376 (SLLconst <typ.UInt32> [3]
377 (ANDconst <typ.UInt32> [3] ptr)))
378 (NORconst [0] <typ.UInt32> (SLL <typ.UInt32>
379 (MOVWconst [0xff]) (SLLconst <typ.UInt32> [3]
380 (ANDconst <typ.UInt32> [3] ptr))))) mem)
381
382 // AtomicOr8(ptr,val) -> LoweredAtomicOr(ptr&^3,uint32(val) << (((ptr^3) & 3) * 8))
383 (AtomicOr8 ptr val mem) && config.BigEndian ->
384 (LoweredAtomicOr (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr)
385 (SLL <typ.UInt32> (ZeroExt8to32 val)
386 (SLLconst <typ.UInt32> [3]
387 (ANDconst <typ.UInt32> [3]
388 (XORconst <typ.UInt32> [3] ptr)))) mem)
389
390 // AtomicAnd8(ptr,val) -> LoweredAtomicAnd(ptr&^3,(uint32(val) << (((ptr^3) & 3) * 8)) | ^(uint32(0xFF) << (((ptr^3) & 3) * 8))))
391 (AtomicAnd8 ptr val mem) && config.BigEndian ->
392 (LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr)
393 (OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val)
394 (SLLconst <typ.UInt32> [3]
395 (ANDconst <typ.UInt32> [3]
396 (XORconst <typ.UInt32> [3] ptr))))
397 (NORconst [0] <typ.UInt32> (SLL <typ.UInt32>
398 (MOVWconst [0xff]) (SLLconst <typ.UInt32> [3]
399 (ANDconst <typ.UInt32> [3]
400 (XORconst <typ.UInt32> [3] ptr)))))) mem)
401
402
403 // checks
404 (NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
405 (IsNonNil ptr) -> (SGTU ptr (MOVWconst [0]))
406 (IsInBounds idx len) -> (SGTU len idx)
407 (IsSliceInBounds idx len) -> (XORconst [1] (SGTU idx len))
408
409 // pseudo-ops
410 (GetClosurePtr) -> (LoweredGetClosurePtr)
411 (GetCallerSP) -> (LoweredGetCallerSP)
412 (GetCallerPC) -> (LoweredGetCallerPC)
413
414 (If cond yes no) -> (NE cond yes no)
415
416 // Write barrier.
417 (WB {fn} destptr srcptr mem) -> (LoweredWB {fn} destptr srcptr mem)
418
419 (PanicBounds [kind] x y mem) && boundsABI(kind) == 0 -> (LoweredPanicBoundsA [kind] x y mem)
420 (PanicBounds [kind] x y mem) && boundsABI(kind) == 1 -> (LoweredPanicBoundsB [kind] x y mem)
421 (PanicBounds [kind] x y mem) && boundsABI(kind) == 2 -> (LoweredPanicBoundsC [kind] x y mem)
422
423 (PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 0 -> (LoweredPanicExtendA [kind] hi lo y mem)
424 (PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 1 -> (LoweredPanicExtendB [kind] hi lo y mem)
425 (PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 2 -> (LoweredPanicExtendC [kind] hi lo y mem)
426
427 // Optimizations
428
429 // Absorb boolean tests into block
430 (NE (FPFlagTrue cmp) yes no) -> (FPT cmp yes no)
431 (NE (FPFlagFalse cmp) yes no) -> (FPF cmp yes no)
432 (EQ (FPFlagTrue cmp) yes no) -> (FPF cmp yes no)
433 (EQ (FPFlagFalse cmp) yes no) -> (FPT cmp yes no)
434 (NE (XORconst [1] cmp:(SGT _ _)) yes no) -> (EQ cmp yes no)
435 (NE (XORconst [1] cmp:(SGTU _ _)) yes no) -> (EQ cmp yes no)
436 (NE (XORconst [1] cmp:(SGTconst _)) yes no) -> (EQ cmp yes no)
437 (NE (XORconst [1] cmp:(SGTUconst _)) yes no) -> (EQ cmp yes no)
438 (NE (XORconst [1] cmp:(SGTzero _)) yes no) -> (EQ cmp yes no)
439 (NE (XORconst [1] cmp:(SGTUzero _)) yes no) -> (EQ cmp yes no)
440 (EQ (XORconst [1] cmp:(SGT _ _)) yes no) -> (NE cmp yes no)
441 (EQ (XORconst [1] cmp:(SGTU _ _)) yes no) -> (NE cmp yes no)
442 (EQ (XORconst [1] cmp:(SGTconst _)) yes no) -> (NE cmp yes no)
443 (EQ (XORconst [1] cmp:(SGTUconst _)) yes no) -> (NE cmp yes no)
444 (EQ (XORconst [1] cmp:(SGTzero _)) yes no) -> (NE cmp yes no)
445 (EQ (XORconst [1] cmp:(SGTUzero _)) yes no) -> (NE cmp yes no)
446 (NE (SGTUconst [1] x) yes no) -> (EQ x yes no)
447 (EQ (SGTUconst [1] x) yes no) -> (NE x yes no)
448 (NE (SGTUzero x) yes no) -> (NE x yes no)
449 (EQ (SGTUzero x) yes no) -> (EQ x yes no)
450 (NE (SGTconst [0] x) yes no) -> (LTZ x yes no)
451 (EQ (SGTconst [0] x) yes no) -> (GEZ x yes no)
452 (NE (SGTzero x) yes no) -> (GTZ x yes no)
453 (EQ (SGTzero x) yes no) -> (LEZ x yes no)
454
455 // fold offset into address
456 (ADDconst [off1] (MOVWaddr [off2] {sym} ptr)) -> (MOVWaddr [off1+off2] {sym} ptr)
457
458 // fold address into load/store
459 (MOVBload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVBload [off1+off2] {sym} ptr mem)
460 (MOVBUload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVBUload [off1+off2] {sym} ptr mem)
461 (MOVHload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVHload [off1+off2] {sym} ptr mem)
462 (MOVHUload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVHUload [off1+off2] {sym} ptr mem)
463 (MOVWload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVWload [off1+off2] {sym} ptr mem)
464 (MOVFload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVFload [off1+off2] {sym} ptr mem)
465 (MOVDload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVDload [off1+off2] {sym} ptr mem)
466
467 (MOVBstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVBstore [off1+off2] {sym} ptr val mem)
468 (MOVHstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVHstore [off1+off2] {sym} ptr val mem)
469 (MOVWstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVWstore [off1+off2] {sym} ptr val mem)
470 (MOVFstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVFstore [off1+off2] {sym} ptr val mem)
471 (MOVDstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVDstore [off1+off2] {sym} ptr val mem)
472
473 (MOVBstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVBstorezero [off1+off2] {sym} ptr mem)
474 (MOVHstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVHstorezero [off1+off2] {sym} ptr mem)
475 (MOVWstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(off1+off2) || x.Uses == 1) -> (MOVWstorezero [off1+off2] {sym} ptr mem)
476
477 (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
478 (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
479 (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
480 (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
481 (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
482 (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
483 (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
484 (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
485 (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
486 (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
487 (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
488 (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
489 (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
490 (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
491
492 (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
493 (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
494 (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
495 (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
496 (MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
497 (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
498 (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
499 (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
500 (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) ->
501 (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
502 (MOVBstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
503 (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
504 (MOVHstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
505 (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
506 (MOVWstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) ->
507 (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
508
509 // replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
510 (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBreg x)
511 (MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBUreg x)
512 (MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVHreg x)
513 (MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVHUreg x)
514 (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
515 (MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
516 (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
517
518 // store zero
519 (MOVBstore [off] {sym} ptr (MOVWconst [0]) mem) -> (MOVBstorezero [off] {sym} ptr mem)
520 (MOVHstore [off] {sym} ptr (MOVWconst [0]) mem) -> (MOVHstorezero [off] {sym} ptr mem)
521 (MOVWstore [off] {sym} ptr (MOVWconst [0]) mem) -> (MOVWstorezero [off] {sym} ptr mem)
522
523 // don't extend after proper load
524 (MOVBreg x:(MOVBload _ _)) -> (MOVWreg x)
525 (MOVBUreg x:(MOVBUload _ _)) -> (MOVWreg x)
526 (MOVHreg x:(MOVBload _ _)) -> (MOVWreg x)
527 (MOVHreg x:(MOVBUload _ _)) -> (MOVWreg x)
528 (MOVHreg x:(MOVHload _ _)) -> (MOVWreg x)
529 (MOVHUreg x:(MOVBUload _ _)) -> (MOVWreg x)
530 (MOVHUreg x:(MOVHUload _ _)) -> (MOVWreg x)
531
532 // fold double extensions
533 (MOVBreg x:(MOVBreg _)) -> (MOVWreg x)
534 (MOVBUreg x:(MOVBUreg _)) -> (MOVWreg x)
535 (MOVHreg x:(MOVBreg _)) -> (MOVWreg x)
536 (MOVHreg x:(MOVBUreg _)) -> (MOVWreg x)
537 (MOVHreg x:(MOVHreg _)) -> (MOVWreg x)
538 (MOVHUreg x:(MOVBUreg _)) -> (MOVWreg x)
539 (MOVHUreg x:(MOVHUreg _)) -> (MOVWreg x)
540
541 // sign extended loads
542 // Note: The combined instruction must end up in the same block
543 // as the original load. If not, we end up making a value with
544 // memory type live in two different blocks, which can lead to
545 // multiple memory values alive simultaneously.
546 // Make sure we don't combine these ops if the load has another use.
547 // This prevents a single load from being split into multiple loads
548 // which then might return different values. See test/atomicload.go.
549 (MOVBreg <t> x:(MOVBUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload <t> [off] {sym} ptr mem)
550 (MOVBUreg <t> x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBUload <t> [off] {sym} ptr mem)
551 (MOVHreg <t> x:(MOVHUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHload <t> [off] {sym} ptr mem)
552 (MOVHUreg <t> x:(MOVHload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHUload <t> [off] {sym} ptr mem)
553
554 // fold extensions and ANDs together
555 (MOVBUreg (ANDconst [c] x)) -> (ANDconst [c&0xff] x)
556 (MOVHUreg (ANDconst [c] x)) -> (ANDconst [c&0xffff] x)
557 (MOVBreg (ANDconst [c] x)) && c & 0x80 == 0 -> (ANDconst [c&0x7f] x)
558 (MOVHreg (ANDconst [c] x)) && c & 0x8000 == 0 -> (ANDconst [c&0x7fff] x)
559
560 // don't extend before store
561 (MOVBstore [off] {sym} ptr (MOVBreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
562 (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
563 (MOVBstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
564 (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
565 (MOVBstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVBstore [off] {sym} ptr x mem)
566 (MOVHstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
567 (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
568 (MOVHstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVHstore [off] {sym} ptr x mem)
569 (MOVWstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVWstore [off] {sym} ptr x mem)
570
571 // if a register move has only 1 use, just use the same register without emitting instruction
572 // MOVWnop doesn't emit instruction, only for ensuring the type.
573 (MOVWreg x) && x.Uses == 1 -> (MOVWnop x)
574
575 // fold constant into arithmatic ops
576 (ADD x (MOVWconst [c])) -> (ADDconst [c] x)
577 (SUB x (MOVWconst [c])) -> (SUBconst [c] x)
578 (AND x (MOVWconst [c])) -> (ANDconst [c] x)
579 (OR x (MOVWconst [c])) -> (ORconst [c] x)
580 (XOR x (MOVWconst [c])) -> (XORconst [c] x)
581 (NOR x (MOVWconst [c])) -> (NORconst [c] x)
582
583 (SLL _ (MOVWconst [c])) && uint32(c)>=32 -> (MOVWconst [0])
584 (SRL _ (MOVWconst [c])) && uint32(c)>=32 -> (MOVWconst [0])
585 (SRA x (MOVWconst [c])) && uint32(c)>=32 -> (SRAconst x [31])
586 (SLL x (MOVWconst [c])) -> (SLLconst x [c])
587 (SRL x (MOVWconst [c])) -> (SRLconst x [c])
588 (SRA x (MOVWconst [c])) -> (SRAconst x [c])
589
590 (SGT (MOVWconst [c]) x) -> (SGTconst [c] x)
591 (SGTU (MOVWconst [c]) x) -> (SGTUconst [c] x)
592 (SGT x (MOVWconst [0])) -> (SGTzero x)
593 (SGTU x (MOVWconst [0])) -> (SGTUzero x)
594
595 // mul with constant
596 (Select1 (MULTU (MOVWconst [0]) _ )) -> (MOVWconst [0])
597 (Select0 (MULTU (MOVWconst [0]) _ )) -> (MOVWconst [0])
598 (Select1 (MULTU (MOVWconst [1]) x )) -> x
599 (Select0 (MULTU (MOVWconst [1]) _ )) -> (MOVWconst [0])
600 (Select1 (MULTU (MOVWconst [-1]) x )) -> (NEG <x.Type> x)
601 (Select0 (MULTU (MOVWconst [-1]) x )) -> (CMOVZ (ADDconst <x.Type> [-1] x) (MOVWconst [0]) x)
602 (Select1 (MULTU (MOVWconst [c]) x )) && isPowerOfTwo(int64(uint32(c))) -> (SLLconst [log2(int64(uint32(c)))] x)
603 (Select0 (MULTU (MOVWconst [c]) x )) && isPowerOfTwo(int64(uint32(c))) -> (SRLconst [32-log2(int64(uint32(c)))] x)
604
605 (MUL (MOVWconst [0]) _ ) -> (MOVWconst [0])
606 (MUL (MOVWconst [1]) x ) -> x
607 (MUL (MOVWconst [-1]) x ) -> (NEG x)
608 (MUL (MOVWconst [c]) x ) && isPowerOfTwo(int64(uint32(c))) -> (SLLconst [log2(int64(uint32(c)))] x)
609
610 // generic simplifications
611 (ADD x (NEG y)) -> (SUB x y)
612 (SUB x x) -> (MOVWconst [0])
613 (SUB (MOVWconst [0]) x) -> (NEG x)
614 (AND x x) -> x
615 (OR x x) -> x
616 (XOR x x) -> (MOVWconst [0])
617
618 // miscellaneous patterns generated by dec64
619 (AND (SGTUconst [1] x) (SGTUconst [1] y)) -> (SGTUconst [1] (OR <x.Type> x y))
620 (OR (SGTUzero x) (SGTUzero y)) -> (SGTUzero (OR <x.Type> x y))
621
622 // remove redundant *const ops
623 (ADDconst [0] x) -> x
624 (SUBconst [0] x) -> x
625 (ANDconst [0] _) -> (MOVWconst [0])
626 (ANDconst [-1] x) -> x
627 (ORconst [0] x) -> x
628 (ORconst [-1] _) -> (MOVWconst [-1])
629 (XORconst [0] x) -> x
630 (XORconst [-1] x) -> (NORconst [0] x)
631
632 // generic constant folding
633 (ADDconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int32(c+d))])
634 (ADDconst [c] (ADDconst [d] x)) -> (ADDconst [int64(int32(c+d))] x)
635 (ADDconst [c] (SUBconst [d] x)) -> (ADDconst [int64(int32(c-d))] x)
636 (SUBconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int32(d-c))])
637 (SUBconst [c] (SUBconst [d] x)) -> (ADDconst [int64(int32(-c-d))] x)
638 (SUBconst [c] (ADDconst [d] x)) -> (ADDconst [int64(int32(-c+d))] x)
639 (SLLconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int32(uint32(d)<<uint32(c)))])
640 (SRLconst [c] (MOVWconst [d])) -> (MOVWconst [int64(uint32(d)>>uint32(c))])
641 (SRAconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int32(d)>>uint32(c))])
642 (MUL (MOVWconst [c]) (MOVWconst [d])) -> (MOVWconst [int64(int32(c)*int32(d))])
643 (Select1 (MULTU (MOVWconst [c]) (MOVWconst [d]))) -> (MOVWconst [int64(int32(uint32(c)*uint32(d)))])
644 (Select0 (MULTU (MOVWconst [c]) (MOVWconst [d]))) -> (MOVWconst [(c*d)>>32])
645 (Select1 (DIV (MOVWconst [c]) (MOVWconst [d]))) -> (MOVWconst [int64(int32(c)/int32(d))])
646 (Select1 (DIVU (MOVWconst [c]) (MOVWconst [d]))) -> (MOVWconst [int64(int32(uint32(c)/uint32(d)))])
647 (Select0 (DIV (MOVWconst [c]) (MOVWconst [d]))) -> (MOVWconst [int64(int32(c)%int32(d))])
648 (Select0 (DIVU (MOVWconst [c]) (MOVWconst [d]))) -> (MOVWconst [int64(int32(uint32(c)%uint32(d)))])
649 (ANDconst [c] (MOVWconst [d])) -> (MOVWconst [c&d])
650 (ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c&d] x)
651 (ORconst [c] (MOVWconst [d])) -> (MOVWconst [c|d])
652 (ORconst [c] (ORconst [d] x)) -> (ORconst [c|d] x)
653 (XORconst [c] (MOVWconst [d])) -> (MOVWconst [c^d])
654 (XORconst [c] (XORconst [d] x)) -> (XORconst [c^d] x)
655 (NORconst [c] (MOVWconst [d])) -> (MOVWconst [^(c|d)])
656 (NEG (MOVWconst [c])) -> (MOVWconst [int64(int32(-c))])
657 (MOVBreg (MOVWconst [c])) -> (MOVWconst [int64(int8(c))])
658 (MOVBUreg (MOVWconst [c])) -> (MOVWconst [int64(uint8(c))])
659 (MOVHreg (MOVWconst [c])) -> (MOVWconst [int64(int16(c))])
660 (MOVHUreg (MOVWconst [c])) -> (MOVWconst [int64(uint16(c))])
661 (MOVWreg (MOVWconst [c])) -> (MOVWconst [c])
662
663 // constant comparisons
664 (SGTconst [c] (MOVWconst [d])) && int32(c) > int32(d) -> (MOVWconst [1])
665 (SGTconst [c] (MOVWconst [d])) && int32(c) <= int32(d) -> (MOVWconst [0])
666 (SGTUconst [c] (MOVWconst [d])) && uint32(c)>uint32(d) -> (MOVWconst [1])
667 (SGTUconst [c] (MOVWconst [d])) && uint32(c)<=uint32(d) -> (MOVWconst [0])
668 (SGTzero (MOVWconst [d])) && int32(d) > 0 -> (MOVWconst [1])
669 (SGTzero (MOVWconst [d])) && int32(d) <= 0 -> (MOVWconst [0])
670 (SGTUzero (MOVWconst [d])) && uint32(d) != 0 -> (MOVWconst [1])
671 (SGTUzero (MOVWconst [d])) && uint32(d) == 0 -> (MOVWconst [0])
672
673 // other known comparisons
674 (SGTconst [c] (MOVBreg _)) && 0x7f < int32(c) -> (MOVWconst [1])
675 (SGTconst [c] (MOVBreg _)) && int32(c) <= -0x80 -> (MOVWconst [0])
676 (SGTconst [c] (MOVBUreg _)) && 0xff < int32(c) -> (MOVWconst [1])
677 (SGTconst [c] (MOVBUreg _)) && int32(c) < 0 -> (MOVWconst [0])
678 (SGTUconst [c] (MOVBUreg _)) && 0xff < uint32(c) -> (MOVWconst [1])
679 (SGTconst [c] (MOVHreg _)) && 0x7fff < int32(c) -> (MOVWconst [1])
680 (SGTconst [c] (MOVHreg _)) && int32(c) <= -0x8000 -> (MOVWconst [0])
681 (SGTconst [c] (MOVHUreg _)) && 0xffff < int32(c) -> (MOVWconst [1])
682 (SGTconst [c] (MOVHUreg _)) && int32(c) < 0 -> (MOVWconst [0])
683 (SGTUconst [c] (MOVHUreg _)) && 0xffff < uint32(c) -> (MOVWconst [1])
684 (SGTconst [c] (ANDconst [m] _)) && 0 <= int32(m) && int32(m) < int32(c) -> (MOVWconst [1])
685 (SGTUconst [c] (ANDconst [m] _)) && uint32(m) < uint32(c) -> (MOVWconst [1])
686 (SGTconst [c] (SRLconst _ [d])) && 0 <= int32(c) && uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c) -> (MOVWconst [1])
687 (SGTUconst [c] (SRLconst _ [d])) && uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c) -> (MOVWconst [1])
688
689 // absorb constants into branches
690 (EQ (MOVWconst [0]) yes no) -> (First nil yes no)
691 (EQ (MOVWconst [c]) yes no) && c != 0 -> (First nil no yes)
692 (NE (MOVWconst [0]) yes no) -> (First nil no yes)
693 (NE (MOVWconst [c]) yes no) && c != 0 -> (First nil yes no)
694 (LTZ (MOVWconst [c]) yes no) && int32(c) < 0 -> (First nil yes no)
695 (LTZ (MOVWconst [c]) yes no) && int32(c) >= 0 -> (First nil no yes)
696 (LEZ (MOVWconst [c]) yes no) && int32(c) <= 0 -> (First nil yes no)
697 (LEZ (MOVWconst [c]) yes no) && int32(c) > 0 -> (First nil no yes)
698 (GTZ (MOVWconst [c]) yes no) && int32(c) > 0 -> (First nil yes no)
699 (GTZ (MOVWconst [c]) yes no) && int32(c) <= 0 -> (First nil no yes)
700 (GEZ (MOVWconst [c]) yes no) && int32(c) >= 0 -> (First nil yes no)
701 (GEZ (MOVWconst [c]) yes no) && int32(c) < 0 -> (First nil no yes)
702
703 // conditional move
704 (CMOVZ _ f (MOVWconst [0])) -> f
705 (CMOVZ a _ (MOVWconst [c])) && c!=0 -> a
706 (CMOVZzero _ (MOVWconst [0])) -> (MOVWconst [0])
707 (CMOVZzero a (MOVWconst [c])) && c!=0 -> a
708 (CMOVZ a (MOVWconst [0]) c) -> (CMOVZzero a c)
709
710 // atomic
711 (LoweredAtomicStore ptr (MOVWconst [0]) mem) -> (LoweredAtomicStorezero ptr mem)
712 (LoweredAtomicAdd ptr (MOVWconst [c]) mem) && is16Bit(c) -> (LoweredAtomicAddconst [c] ptr mem)
713
View as plain text