Source file src/pkg/cmd/compile/internal/ssa/rewriteAMD64.go
1
2
3
4 package ssa
5
6 import "fmt"
7 import "math"
8 import "cmd/internal/obj"
9 import "cmd/internal/objabi"
10 import "cmd/compile/internal/types"
11
12 var _ = fmt.Println
13 var _ = math.MinInt8
14 var _ = obj.ANOP
15 var _ = objabi.GOROOT
16 var _ = types.TypeMem
17
18 func rewriteValueAMD64(v *Value) bool {
19 switch v.Op {
20 case OpAMD64ADCQ:
21 return rewriteValueAMD64_OpAMD64ADCQ_0(v)
22 case OpAMD64ADCQconst:
23 return rewriteValueAMD64_OpAMD64ADCQconst_0(v)
24 case OpAMD64ADDL:
25 return rewriteValueAMD64_OpAMD64ADDL_0(v) || rewriteValueAMD64_OpAMD64ADDL_10(v) || rewriteValueAMD64_OpAMD64ADDL_20(v)
26 case OpAMD64ADDLconst:
27 return rewriteValueAMD64_OpAMD64ADDLconst_0(v) || rewriteValueAMD64_OpAMD64ADDLconst_10(v)
28 case OpAMD64ADDLconstmodify:
29 return rewriteValueAMD64_OpAMD64ADDLconstmodify_0(v)
30 case OpAMD64ADDLload:
31 return rewriteValueAMD64_OpAMD64ADDLload_0(v)
32 case OpAMD64ADDLmodify:
33 return rewriteValueAMD64_OpAMD64ADDLmodify_0(v)
34 case OpAMD64ADDQ:
35 return rewriteValueAMD64_OpAMD64ADDQ_0(v) || rewriteValueAMD64_OpAMD64ADDQ_10(v) || rewriteValueAMD64_OpAMD64ADDQ_20(v)
36 case OpAMD64ADDQcarry:
37 return rewriteValueAMD64_OpAMD64ADDQcarry_0(v)
38 case OpAMD64ADDQconst:
39 return rewriteValueAMD64_OpAMD64ADDQconst_0(v) || rewriteValueAMD64_OpAMD64ADDQconst_10(v)
40 case OpAMD64ADDQconstmodify:
41 return rewriteValueAMD64_OpAMD64ADDQconstmodify_0(v)
42 case OpAMD64ADDQload:
43 return rewriteValueAMD64_OpAMD64ADDQload_0(v)
44 case OpAMD64ADDQmodify:
45 return rewriteValueAMD64_OpAMD64ADDQmodify_0(v)
46 case OpAMD64ADDSD:
47 return rewriteValueAMD64_OpAMD64ADDSD_0(v)
48 case OpAMD64ADDSDload:
49 return rewriteValueAMD64_OpAMD64ADDSDload_0(v)
50 case OpAMD64ADDSS:
51 return rewriteValueAMD64_OpAMD64ADDSS_0(v)
52 case OpAMD64ADDSSload:
53 return rewriteValueAMD64_OpAMD64ADDSSload_0(v)
54 case OpAMD64ANDL:
55 return rewriteValueAMD64_OpAMD64ANDL_0(v)
56 case OpAMD64ANDLconst:
57 return rewriteValueAMD64_OpAMD64ANDLconst_0(v)
58 case OpAMD64ANDLconstmodify:
59 return rewriteValueAMD64_OpAMD64ANDLconstmodify_0(v)
60 case OpAMD64ANDLload:
61 return rewriteValueAMD64_OpAMD64ANDLload_0(v)
62 case OpAMD64ANDLmodify:
63 return rewriteValueAMD64_OpAMD64ANDLmodify_0(v)
64 case OpAMD64ANDQ:
65 return rewriteValueAMD64_OpAMD64ANDQ_0(v)
66 case OpAMD64ANDQconst:
67 return rewriteValueAMD64_OpAMD64ANDQconst_0(v)
68 case OpAMD64ANDQconstmodify:
69 return rewriteValueAMD64_OpAMD64ANDQconstmodify_0(v)
70 case OpAMD64ANDQload:
71 return rewriteValueAMD64_OpAMD64ANDQload_0(v)
72 case OpAMD64ANDQmodify:
73 return rewriteValueAMD64_OpAMD64ANDQmodify_0(v)
74 case OpAMD64BSFQ:
75 return rewriteValueAMD64_OpAMD64BSFQ_0(v)
76 case OpAMD64BTCLconst:
77 return rewriteValueAMD64_OpAMD64BTCLconst_0(v)
78 case OpAMD64BTCLconstmodify:
79 return rewriteValueAMD64_OpAMD64BTCLconstmodify_0(v)
80 case OpAMD64BTCLmodify:
81 return rewriteValueAMD64_OpAMD64BTCLmodify_0(v)
82 case OpAMD64BTCQconst:
83 return rewriteValueAMD64_OpAMD64BTCQconst_0(v)
84 case OpAMD64BTCQconstmodify:
85 return rewriteValueAMD64_OpAMD64BTCQconstmodify_0(v)
86 case OpAMD64BTCQmodify:
87 return rewriteValueAMD64_OpAMD64BTCQmodify_0(v)
88 case OpAMD64BTLconst:
89 return rewriteValueAMD64_OpAMD64BTLconst_0(v)
90 case OpAMD64BTQconst:
91 return rewriteValueAMD64_OpAMD64BTQconst_0(v)
92 case OpAMD64BTRLconst:
93 return rewriteValueAMD64_OpAMD64BTRLconst_0(v)
94 case OpAMD64BTRLconstmodify:
95 return rewriteValueAMD64_OpAMD64BTRLconstmodify_0(v)
96 case OpAMD64BTRLmodify:
97 return rewriteValueAMD64_OpAMD64BTRLmodify_0(v)
98 case OpAMD64BTRQconst:
99 return rewriteValueAMD64_OpAMD64BTRQconst_0(v)
100 case OpAMD64BTRQconstmodify:
101 return rewriteValueAMD64_OpAMD64BTRQconstmodify_0(v)
102 case OpAMD64BTRQmodify:
103 return rewriteValueAMD64_OpAMD64BTRQmodify_0(v)
104 case OpAMD64BTSLconst:
105 return rewriteValueAMD64_OpAMD64BTSLconst_0(v)
106 case OpAMD64BTSLconstmodify:
107 return rewriteValueAMD64_OpAMD64BTSLconstmodify_0(v)
108 case OpAMD64BTSLmodify:
109 return rewriteValueAMD64_OpAMD64BTSLmodify_0(v)
110 case OpAMD64BTSQconst:
111 return rewriteValueAMD64_OpAMD64BTSQconst_0(v)
112 case OpAMD64BTSQconstmodify:
113 return rewriteValueAMD64_OpAMD64BTSQconstmodify_0(v)
114 case OpAMD64BTSQmodify:
115 return rewriteValueAMD64_OpAMD64BTSQmodify_0(v)
116 case OpAMD64CMOVLCC:
117 return rewriteValueAMD64_OpAMD64CMOVLCC_0(v)
118 case OpAMD64CMOVLCS:
119 return rewriteValueAMD64_OpAMD64CMOVLCS_0(v)
120 case OpAMD64CMOVLEQ:
121 return rewriteValueAMD64_OpAMD64CMOVLEQ_0(v)
122 case OpAMD64CMOVLGE:
123 return rewriteValueAMD64_OpAMD64CMOVLGE_0(v)
124 case OpAMD64CMOVLGT:
125 return rewriteValueAMD64_OpAMD64CMOVLGT_0(v)
126 case OpAMD64CMOVLHI:
127 return rewriteValueAMD64_OpAMD64CMOVLHI_0(v)
128 case OpAMD64CMOVLLE:
129 return rewriteValueAMD64_OpAMD64CMOVLLE_0(v)
130 case OpAMD64CMOVLLS:
131 return rewriteValueAMD64_OpAMD64CMOVLLS_0(v)
132 case OpAMD64CMOVLLT:
133 return rewriteValueAMD64_OpAMD64CMOVLLT_0(v)
134 case OpAMD64CMOVLNE:
135 return rewriteValueAMD64_OpAMD64CMOVLNE_0(v)
136 case OpAMD64CMOVQCC:
137 return rewriteValueAMD64_OpAMD64CMOVQCC_0(v)
138 case OpAMD64CMOVQCS:
139 return rewriteValueAMD64_OpAMD64CMOVQCS_0(v)
140 case OpAMD64CMOVQEQ:
141 return rewriteValueAMD64_OpAMD64CMOVQEQ_0(v)
142 case OpAMD64CMOVQGE:
143 return rewriteValueAMD64_OpAMD64CMOVQGE_0(v)
144 case OpAMD64CMOVQGT:
145 return rewriteValueAMD64_OpAMD64CMOVQGT_0(v)
146 case OpAMD64CMOVQHI:
147 return rewriteValueAMD64_OpAMD64CMOVQHI_0(v)
148 case OpAMD64CMOVQLE:
149 return rewriteValueAMD64_OpAMD64CMOVQLE_0(v)
150 case OpAMD64CMOVQLS:
151 return rewriteValueAMD64_OpAMD64CMOVQLS_0(v)
152 case OpAMD64CMOVQLT:
153 return rewriteValueAMD64_OpAMD64CMOVQLT_0(v)
154 case OpAMD64CMOVQNE:
155 return rewriteValueAMD64_OpAMD64CMOVQNE_0(v)
156 case OpAMD64CMOVWCC:
157 return rewriteValueAMD64_OpAMD64CMOVWCC_0(v)
158 case OpAMD64CMOVWCS:
159 return rewriteValueAMD64_OpAMD64CMOVWCS_0(v)
160 case OpAMD64CMOVWEQ:
161 return rewriteValueAMD64_OpAMD64CMOVWEQ_0(v)
162 case OpAMD64CMOVWGE:
163 return rewriteValueAMD64_OpAMD64CMOVWGE_0(v)
164 case OpAMD64CMOVWGT:
165 return rewriteValueAMD64_OpAMD64CMOVWGT_0(v)
166 case OpAMD64CMOVWHI:
167 return rewriteValueAMD64_OpAMD64CMOVWHI_0(v)
168 case OpAMD64CMOVWLE:
169 return rewriteValueAMD64_OpAMD64CMOVWLE_0(v)
170 case OpAMD64CMOVWLS:
171 return rewriteValueAMD64_OpAMD64CMOVWLS_0(v)
172 case OpAMD64CMOVWLT:
173 return rewriteValueAMD64_OpAMD64CMOVWLT_0(v)
174 case OpAMD64CMOVWNE:
175 return rewriteValueAMD64_OpAMD64CMOVWNE_0(v)
176 case OpAMD64CMPB:
177 return rewriteValueAMD64_OpAMD64CMPB_0(v)
178 case OpAMD64CMPBconst:
179 return rewriteValueAMD64_OpAMD64CMPBconst_0(v)
180 case OpAMD64CMPBconstload:
181 return rewriteValueAMD64_OpAMD64CMPBconstload_0(v)
182 case OpAMD64CMPBload:
183 return rewriteValueAMD64_OpAMD64CMPBload_0(v)
184 case OpAMD64CMPL:
185 return rewriteValueAMD64_OpAMD64CMPL_0(v)
186 case OpAMD64CMPLconst:
187 return rewriteValueAMD64_OpAMD64CMPLconst_0(v) || rewriteValueAMD64_OpAMD64CMPLconst_10(v)
188 case OpAMD64CMPLconstload:
189 return rewriteValueAMD64_OpAMD64CMPLconstload_0(v)
190 case OpAMD64CMPLload:
191 return rewriteValueAMD64_OpAMD64CMPLload_0(v)
192 case OpAMD64CMPQ:
193 return rewriteValueAMD64_OpAMD64CMPQ_0(v)
194 case OpAMD64CMPQconst:
195 return rewriteValueAMD64_OpAMD64CMPQconst_0(v) || rewriteValueAMD64_OpAMD64CMPQconst_10(v)
196 case OpAMD64CMPQconstload:
197 return rewriteValueAMD64_OpAMD64CMPQconstload_0(v)
198 case OpAMD64CMPQload:
199 return rewriteValueAMD64_OpAMD64CMPQload_0(v)
200 case OpAMD64CMPW:
201 return rewriteValueAMD64_OpAMD64CMPW_0(v)
202 case OpAMD64CMPWconst:
203 return rewriteValueAMD64_OpAMD64CMPWconst_0(v)
204 case OpAMD64CMPWconstload:
205 return rewriteValueAMD64_OpAMD64CMPWconstload_0(v)
206 case OpAMD64CMPWload:
207 return rewriteValueAMD64_OpAMD64CMPWload_0(v)
208 case OpAMD64CMPXCHGLlock:
209 return rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v)
210 case OpAMD64CMPXCHGQlock:
211 return rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v)
212 case OpAMD64DIVSD:
213 return rewriteValueAMD64_OpAMD64DIVSD_0(v)
214 case OpAMD64DIVSDload:
215 return rewriteValueAMD64_OpAMD64DIVSDload_0(v)
216 case OpAMD64DIVSS:
217 return rewriteValueAMD64_OpAMD64DIVSS_0(v)
218 case OpAMD64DIVSSload:
219 return rewriteValueAMD64_OpAMD64DIVSSload_0(v)
220 case OpAMD64HMULL:
221 return rewriteValueAMD64_OpAMD64HMULL_0(v)
222 case OpAMD64HMULLU:
223 return rewriteValueAMD64_OpAMD64HMULLU_0(v)
224 case OpAMD64HMULQ:
225 return rewriteValueAMD64_OpAMD64HMULQ_0(v)
226 case OpAMD64HMULQU:
227 return rewriteValueAMD64_OpAMD64HMULQU_0(v)
228 case OpAMD64LEAL:
229 return rewriteValueAMD64_OpAMD64LEAL_0(v)
230 case OpAMD64LEAL1:
231 return rewriteValueAMD64_OpAMD64LEAL1_0(v)
232 case OpAMD64LEAL2:
233 return rewriteValueAMD64_OpAMD64LEAL2_0(v)
234 case OpAMD64LEAL4:
235 return rewriteValueAMD64_OpAMD64LEAL4_0(v)
236 case OpAMD64LEAL8:
237 return rewriteValueAMD64_OpAMD64LEAL8_0(v)
238 case OpAMD64LEAQ:
239 return rewriteValueAMD64_OpAMD64LEAQ_0(v)
240 case OpAMD64LEAQ1:
241 return rewriteValueAMD64_OpAMD64LEAQ1_0(v)
242 case OpAMD64LEAQ2:
243 return rewriteValueAMD64_OpAMD64LEAQ2_0(v)
244 case OpAMD64LEAQ4:
245 return rewriteValueAMD64_OpAMD64LEAQ4_0(v)
246 case OpAMD64LEAQ8:
247 return rewriteValueAMD64_OpAMD64LEAQ8_0(v)
248 case OpAMD64MOVBQSX:
249 return rewriteValueAMD64_OpAMD64MOVBQSX_0(v)
250 case OpAMD64MOVBQSXload:
251 return rewriteValueAMD64_OpAMD64MOVBQSXload_0(v)
252 case OpAMD64MOVBQZX:
253 return rewriteValueAMD64_OpAMD64MOVBQZX_0(v)
254 case OpAMD64MOVBatomicload:
255 return rewriteValueAMD64_OpAMD64MOVBatomicload_0(v)
256 case OpAMD64MOVBload:
257 return rewriteValueAMD64_OpAMD64MOVBload_0(v)
258 case OpAMD64MOVBloadidx1:
259 return rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v)
260 case OpAMD64MOVBstore:
261 return rewriteValueAMD64_OpAMD64MOVBstore_0(v) || rewriteValueAMD64_OpAMD64MOVBstore_10(v) || rewriteValueAMD64_OpAMD64MOVBstore_20(v) || rewriteValueAMD64_OpAMD64MOVBstore_30(v)
262 case OpAMD64MOVBstoreconst:
263 return rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v)
264 case OpAMD64MOVBstoreconstidx1:
265 return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v)
266 case OpAMD64MOVBstoreidx1:
267 return rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v) || rewriteValueAMD64_OpAMD64MOVBstoreidx1_10(v)
268 case OpAMD64MOVLQSX:
269 return rewriteValueAMD64_OpAMD64MOVLQSX_0(v)
270 case OpAMD64MOVLQSXload:
271 return rewriteValueAMD64_OpAMD64MOVLQSXload_0(v)
272 case OpAMD64MOVLQZX:
273 return rewriteValueAMD64_OpAMD64MOVLQZX_0(v)
274 case OpAMD64MOVLatomicload:
275 return rewriteValueAMD64_OpAMD64MOVLatomicload_0(v)
276 case OpAMD64MOVLf2i:
277 return rewriteValueAMD64_OpAMD64MOVLf2i_0(v)
278 case OpAMD64MOVLi2f:
279 return rewriteValueAMD64_OpAMD64MOVLi2f_0(v)
280 case OpAMD64MOVLload:
281 return rewriteValueAMD64_OpAMD64MOVLload_0(v) || rewriteValueAMD64_OpAMD64MOVLload_10(v)
282 case OpAMD64MOVLloadidx1:
283 return rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v)
284 case OpAMD64MOVLloadidx4:
285 return rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v)
286 case OpAMD64MOVLloadidx8:
287 return rewriteValueAMD64_OpAMD64MOVLloadidx8_0(v)
288 case OpAMD64MOVLstore:
289 return rewriteValueAMD64_OpAMD64MOVLstore_0(v) || rewriteValueAMD64_OpAMD64MOVLstore_10(v) || rewriteValueAMD64_OpAMD64MOVLstore_20(v) || rewriteValueAMD64_OpAMD64MOVLstore_30(v)
290 case OpAMD64MOVLstoreconst:
291 return rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v)
292 case OpAMD64MOVLstoreconstidx1:
293 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v)
294 case OpAMD64MOVLstoreconstidx4:
295 return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v)
296 case OpAMD64MOVLstoreidx1:
297 return rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v)
298 case OpAMD64MOVLstoreidx4:
299 return rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v)
300 case OpAMD64MOVLstoreidx8:
301 return rewriteValueAMD64_OpAMD64MOVLstoreidx8_0(v)
302 case OpAMD64MOVOload:
303 return rewriteValueAMD64_OpAMD64MOVOload_0(v)
304 case OpAMD64MOVOstore:
305 return rewriteValueAMD64_OpAMD64MOVOstore_0(v)
306 case OpAMD64MOVQatomicload:
307 return rewriteValueAMD64_OpAMD64MOVQatomicload_0(v)
308 case OpAMD64MOVQf2i:
309 return rewriteValueAMD64_OpAMD64MOVQf2i_0(v)
310 case OpAMD64MOVQi2f:
311 return rewriteValueAMD64_OpAMD64MOVQi2f_0(v)
312 case OpAMD64MOVQload:
313 return rewriteValueAMD64_OpAMD64MOVQload_0(v)
314 case OpAMD64MOVQloadidx1:
315 return rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v)
316 case OpAMD64MOVQloadidx8:
317 return rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v)
318 case OpAMD64MOVQstore:
319 return rewriteValueAMD64_OpAMD64MOVQstore_0(v) || rewriteValueAMD64_OpAMD64MOVQstore_10(v) || rewriteValueAMD64_OpAMD64MOVQstore_20(v) || rewriteValueAMD64_OpAMD64MOVQstore_30(v)
320 case OpAMD64MOVQstoreconst:
321 return rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v)
322 case OpAMD64MOVQstoreconstidx1:
323 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v)
324 case OpAMD64MOVQstoreconstidx8:
325 return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v)
326 case OpAMD64MOVQstoreidx1:
327 return rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v)
328 case OpAMD64MOVQstoreidx8:
329 return rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v)
330 case OpAMD64MOVSDload:
331 return rewriteValueAMD64_OpAMD64MOVSDload_0(v)
332 case OpAMD64MOVSDloadidx1:
333 return rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v)
334 case OpAMD64MOVSDloadidx8:
335 return rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v)
336 case OpAMD64MOVSDstore:
337 return rewriteValueAMD64_OpAMD64MOVSDstore_0(v)
338 case OpAMD64MOVSDstoreidx1:
339 return rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v)
340 case OpAMD64MOVSDstoreidx8:
341 return rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v)
342 case OpAMD64MOVSSload:
343 return rewriteValueAMD64_OpAMD64MOVSSload_0(v)
344 case OpAMD64MOVSSloadidx1:
345 return rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v)
346 case OpAMD64MOVSSloadidx4:
347 return rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v)
348 case OpAMD64MOVSSstore:
349 return rewriteValueAMD64_OpAMD64MOVSSstore_0(v)
350 case OpAMD64MOVSSstoreidx1:
351 return rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v)
352 case OpAMD64MOVSSstoreidx4:
353 return rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v)
354 case OpAMD64MOVWQSX:
355 return rewriteValueAMD64_OpAMD64MOVWQSX_0(v)
356 case OpAMD64MOVWQSXload:
357 return rewriteValueAMD64_OpAMD64MOVWQSXload_0(v)
358 case OpAMD64MOVWQZX:
359 return rewriteValueAMD64_OpAMD64MOVWQZX_0(v)
360 case OpAMD64MOVWload:
361 return rewriteValueAMD64_OpAMD64MOVWload_0(v)
362 case OpAMD64MOVWloadidx1:
363 return rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v)
364 case OpAMD64MOVWloadidx2:
365 return rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v)
366 case OpAMD64MOVWstore:
367 return rewriteValueAMD64_OpAMD64MOVWstore_0(v) || rewriteValueAMD64_OpAMD64MOVWstore_10(v)
368 case OpAMD64MOVWstoreconst:
369 return rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v)
370 case OpAMD64MOVWstoreconstidx1:
371 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v)
372 case OpAMD64MOVWstoreconstidx2:
373 return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v)
374 case OpAMD64MOVWstoreidx1:
375 return rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v)
376 case OpAMD64MOVWstoreidx2:
377 return rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v)
378 case OpAMD64MULL:
379 return rewriteValueAMD64_OpAMD64MULL_0(v)
380 case OpAMD64MULLconst:
381 return rewriteValueAMD64_OpAMD64MULLconst_0(v) || rewriteValueAMD64_OpAMD64MULLconst_10(v) || rewriteValueAMD64_OpAMD64MULLconst_20(v) || rewriteValueAMD64_OpAMD64MULLconst_30(v)
382 case OpAMD64MULQ:
383 return rewriteValueAMD64_OpAMD64MULQ_0(v)
384 case OpAMD64MULQconst:
385 return rewriteValueAMD64_OpAMD64MULQconst_0(v) || rewriteValueAMD64_OpAMD64MULQconst_10(v) || rewriteValueAMD64_OpAMD64MULQconst_20(v) || rewriteValueAMD64_OpAMD64MULQconst_30(v)
386 case OpAMD64MULSD:
387 return rewriteValueAMD64_OpAMD64MULSD_0(v)
388 case OpAMD64MULSDload:
389 return rewriteValueAMD64_OpAMD64MULSDload_0(v)
390 case OpAMD64MULSS:
391 return rewriteValueAMD64_OpAMD64MULSS_0(v)
392 case OpAMD64MULSSload:
393 return rewriteValueAMD64_OpAMD64MULSSload_0(v)
394 case OpAMD64NEGL:
395 return rewriteValueAMD64_OpAMD64NEGL_0(v)
396 case OpAMD64NEGQ:
397 return rewriteValueAMD64_OpAMD64NEGQ_0(v)
398 case OpAMD64NOTL:
399 return rewriteValueAMD64_OpAMD64NOTL_0(v)
400 case OpAMD64NOTQ:
401 return rewriteValueAMD64_OpAMD64NOTQ_0(v)
402 case OpAMD64ORL:
403 return rewriteValueAMD64_OpAMD64ORL_0(v) || rewriteValueAMD64_OpAMD64ORL_10(v) || rewriteValueAMD64_OpAMD64ORL_20(v) || rewriteValueAMD64_OpAMD64ORL_30(v) || rewriteValueAMD64_OpAMD64ORL_40(v) || rewriteValueAMD64_OpAMD64ORL_50(v) || rewriteValueAMD64_OpAMD64ORL_60(v) || rewriteValueAMD64_OpAMD64ORL_70(v) || rewriteValueAMD64_OpAMD64ORL_80(v) || rewriteValueAMD64_OpAMD64ORL_90(v) || rewriteValueAMD64_OpAMD64ORL_100(v) || rewriteValueAMD64_OpAMD64ORL_110(v) || rewriteValueAMD64_OpAMD64ORL_120(v) || rewriteValueAMD64_OpAMD64ORL_130(v)
404 case OpAMD64ORLconst:
405 return rewriteValueAMD64_OpAMD64ORLconst_0(v)
406 case OpAMD64ORLconstmodify:
407 return rewriteValueAMD64_OpAMD64ORLconstmodify_0(v)
408 case OpAMD64ORLload:
409 return rewriteValueAMD64_OpAMD64ORLload_0(v)
410 case OpAMD64ORLmodify:
411 return rewriteValueAMD64_OpAMD64ORLmodify_0(v)
412 case OpAMD64ORQ:
413 return rewriteValueAMD64_OpAMD64ORQ_0(v) || rewriteValueAMD64_OpAMD64ORQ_10(v) || rewriteValueAMD64_OpAMD64ORQ_20(v) || rewriteValueAMD64_OpAMD64ORQ_30(v) || rewriteValueAMD64_OpAMD64ORQ_40(v) || rewriteValueAMD64_OpAMD64ORQ_50(v) || rewriteValueAMD64_OpAMD64ORQ_60(v) || rewriteValueAMD64_OpAMD64ORQ_70(v) || rewriteValueAMD64_OpAMD64ORQ_80(v) || rewriteValueAMD64_OpAMD64ORQ_90(v) || rewriteValueAMD64_OpAMD64ORQ_100(v) || rewriteValueAMD64_OpAMD64ORQ_110(v) || rewriteValueAMD64_OpAMD64ORQ_120(v) || rewriteValueAMD64_OpAMD64ORQ_130(v) || rewriteValueAMD64_OpAMD64ORQ_140(v) || rewriteValueAMD64_OpAMD64ORQ_150(v) || rewriteValueAMD64_OpAMD64ORQ_160(v)
414 case OpAMD64ORQconst:
415 return rewriteValueAMD64_OpAMD64ORQconst_0(v)
416 case OpAMD64ORQconstmodify:
417 return rewriteValueAMD64_OpAMD64ORQconstmodify_0(v)
418 case OpAMD64ORQload:
419 return rewriteValueAMD64_OpAMD64ORQload_0(v)
420 case OpAMD64ORQmodify:
421 return rewriteValueAMD64_OpAMD64ORQmodify_0(v)
422 case OpAMD64ROLB:
423 return rewriteValueAMD64_OpAMD64ROLB_0(v)
424 case OpAMD64ROLBconst:
425 return rewriteValueAMD64_OpAMD64ROLBconst_0(v)
426 case OpAMD64ROLL:
427 return rewriteValueAMD64_OpAMD64ROLL_0(v)
428 case OpAMD64ROLLconst:
429 return rewriteValueAMD64_OpAMD64ROLLconst_0(v)
430 case OpAMD64ROLQ:
431 return rewriteValueAMD64_OpAMD64ROLQ_0(v)
432 case OpAMD64ROLQconst:
433 return rewriteValueAMD64_OpAMD64ROLQconst_0(v)
434 case OpAMD64ROLW:
435 return rewriteValueAMD64_OpAMD64ROLW_0(v)
436 case OpAMD64ROLWconst:
437 return rewriteValueAMD64_OpAMD64ROLWconst_0(v)
438 case OpAMD64RORB:
439 return rewriteValueAMD64_OpAMD64RORB_0(v)
440 case OpAMD64RORL:
441 return rewriteValueAMD64_OpAMD64RORL_0(v)
442 case OpAMD64RORQ:
443 return rewriteValueAMD64_OpAMD64RORQ_0(v)
444 case OpAMD64RORW:
445 return rewriteValueAMD64_OpAMD64RORW_0(v)
446 case OpAMD64SARB:
447 return rewriteValueAMD64_OpAMD64SARB_0(v)
448 case OpAMD64SARBconst:
449 return rewriteValueAMD64_OpAMD64SARBconst_0(v)
450 case OpAMD64SARL:
451 return rewriteValueAMD64_OpAMD64SARL_0(v)
452 case OpAMD64SARLconst:
453 return rewriteValueAMD64_OpAMD64SARLconst_0(v)
454 case OpAMD64SARQ:
455 return rewriteValueAMD64_OpAMD64SARQ_0(v)
456 case OpAMD64SARQconst:
457 return rewriteValueAMD64_OpAMD64SARQconst_0(v)
458 case OpAMD64SARW:
459 return rewriteValueAMD64_OpAMD64SARW_0(v)
460 case OpAMD64SARWconst:
461 return rewriteValueAMD64_OpAMD64SARWconst_0(v)
462 case OpAMD64SBBLcarrymask:
463 return rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v)
464 case OpAMD64SBBQ:
465 return rewriteValueAMD64_OpAMD64SBBQ_0(v)
466 case OpAMD64SBBQcarrymask:
467 return rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v)
468 case OpAMD64SBBQconst:
469 return rewriteValueAMD64_OpAMD64SBBQconst_0(v)
470 case OpAMD64SETA:
471 return rewriteValueAMD64_OpAMD64SETA_0(v)
472 case OpAMD64SETAE:
473 return rewriteValueAMD64_OpAMD64SETAE_0(v)
474 case OpAMD64SETAEstore:
475 return rewriteValueAMD64_OpAMD64SETAEstore_0(v)
476 case OpAMD64SETAstore:
477 return rewriteValueAMD64_OpAMD64SETAstore_0(v)
478 case OpAMD64SETB:
479 return rewriteValueAMD64_OpAMD64SETB_0(v)
480 case OpAMD64SETBE:
481 return rewriteValueAMD64_OpAMD64SETBE_0(v)
482 case OpAMD64SETBEstore:
483 return rewriteValueAMD64_OpAMD64SETBEstore_0(v)
484 case OpAMD64SETBstore:
485 return rewriteValueAMD64_OpAMD64SETBstore_0(v)
486 case OpAMD64SETEQ:
487 return rewriteValueAMD64_OpAMD64SETEQ_0(v) || rewriteValueAMD64_OpAMD64SETEQ_10(v) || rewriteValueAMD64_OpAMD64SETEQ_20(v)
488 case OpAMD64SETEQstore:
489 return rewriteValueAMD64_OpAMD64SETEQstore_0(v) || rewriteValueAMD64_OpAMD64SETEQstore_10(v) || rewriteValueAMD64_OpAMD64SETEQstore_20(v)
490 case OpAMD64SETG:
491 return rewriteValueAMD64_OpAMD64SETG_0(v)
492 case OpAMD64SETGE:
493 return rewriteValueAMD64_OpAMD64SETGE_0(v)
494 case OpAMD64SETGEstore:
495 return rewriteValueAMD64_OpAMD64SETGEstore_0(v)
496 case OpAMD64SETGstore:
497 return rewriteValueAMD64_OpAMD64SETGstore_0(v)
498 case OpAMD64SETL:
499 return rewriteValueAMD64_OpAMD64SETL_0(v)
500 case OpAMD64SETLE:
501 return rewriteValueAMD64_OpAMD64SETLE_0(v)
502 case OpAMD64SETLEstore:
503 return rewriteValueAMD64_OpAMD64SETLEstore_0(v)
504 case OpAMD64SETLstore:
505 return rewriteValueAMD64_OpAMD64SETLstore_0(v)
506 case OpAMD64SETNE:
507 return rewriteValueAMD64_OpAMD64SETNE_0(v) || rewriteValueAMD64_OpAMD64SETNE_10(v) || rewriteValueAMD64_OpAMD64SETNE_20(v)
508 case OpAMD64SETNEstore:
509 return rewriteValueAMD64_OpAMD64SETNEstore_0(v) || rewriteValueAMD64_OpAMD64SETNEstore_10(v) || rewriteValueAMD64_OpAMD64SETNEstore_20(v)
510 case OpAMD64SHLL:
511 return rewriteValueAMD64_OpAMD64SHLL_0(v)
512 case OpAMD64SHLLconst:
513 return rewriteValueAMD64_OpAMD64SHLLconst_0(v)
514 case OpAMD64SHLQ:
515 return rewriteValueAMD64_OpAMD64SHLQ_0(v)
516 case OpAMD64SHLQconst:
517 return rewriteValueAMD64_OpAMD64SHLQconst_0(v)
518 case OpAMD64SHRB:
519 return rewriteValueAMD64_OpAMD64SHRB_0(v)
520 case OpAMD64SHRBconst:
521 return rewriteValueAMD64_OpAMD64SHRBconst_0(v)
522 case OpAMD64SHRL:
523 return rewriteValueAMD64_OpAMD64SHRL_0(v)
524 case OpAMD64SHRLconst:
525 return rewriteValueAMD64_OpAMD64SHRLconst_0(v)
526 case OpAMD64SHRQ:
527 return rewriteValueAMD64_OpAMD64SHRQ_0(v)
528 case OpAMD64SHRQconst:
529 return rewriteValueAMD64_OpAMD64SHRQconst_0(v)
530 case OpAMD64SHRW:
531 return rewriteValueAMD64_OpAMD64SHRW_0(v)
532 case OpAMD64SHRWconst:
533 return rewriteValueAMD64_OpAMD64SHRWconst_0(v)
534 case OpAMD64SUBL:
535 return rewriteValueAMD64_OpAMD64SUBL_0(v)
536 case OpAMD64SUBLconst:
537 return rewriteValueAMD64_OpAMD64SUBLconst_0(v)
538 case OpAMD64SUBLload:
539 return rewriteValueAMD64_OpAMD64SUBLload_0(v)
540 case OpAMD64SUBLmodify:
541 return rewriteValueAMD64_OpAMD64SUBLmodify_0(v)
542 case OpAMD64SUBQ:
543 return rewriteValueAMD64_OpAMD64SUBQ_0(v)
544 case OpAMD64SUBQborrow:
545 return rewriteValueAMD64_OpAMD64SUBQborrow_0(v)
546 case OpAMD64SUBQconst:
547 return rewriteValueAMD64_OpAMD64SUBQconst_0(v)
548 case OpAMD64SUBQload:
549 return rewriteValueAMD64_OpAMD64SUBQload_0(v)
550 case OpAMD64SUBQmodify:
551 return rewriteValueAMD64_OpAMD64SUBQmodify_0(v)
552 case OpAMD64SUBSD:
553 return rewriteValueAMD64_OpAMD64SUBSD_0(v)
554 case OpAMD64SUBSDload:
555 return rewriteValueAMD64_OpAMD64SUBSDload_0(v)
556 case OpAMD64SUBSS:
557 return rewriteValueAMD64_OpAMD64SUBSS_0(v)
558 case OpAMD64SUBSSload:
559 return rewriteValueAMD64_OpAMD64SUBSSload_0(v)
560 case OpAMD64TESTB:
561 return rewriteValueAMD64_OpAMD64TESTB_0(v)
562 case OpAMD64TESTBconst:
563 return rewriteValueAMD64_OpAMD64TESTBconst_0(v)
564 case OpAMD64TESTL:
565 return rewriteValueAMD64_OpAMD64TESTL_0(v)
566 case OpAMD64TESTLconst:
567 return rewriteValueAMD64_OpAMD64TESTLconst_0(v)
568 case OpAMD64TESTQ:
569 return rewriteValueAMD64_OpAMD64TESTQ_0(v)
570 case OpAMD64TESTQconst:
571 return rewriteValueAMD64_OpAMD64TESTQconst_0(v)
572 case OpAMD64TESTW:
573 return rewriteValueAMD64_OpAMD64TESTW_0(v)
574 case OpAMD64TESTWconst:
575 return rewriteValueAMD64_OpAMD64TESTWconst_0(v)
576 case OpAMD64XADDLlock:
577 return rewriteValueAMD64_OpAMD64XADDLlock_0(v)
578 case OpAMD64XADDQlock:
579 return rewriteValueAMD64_OpAMD64XADDQlock_0(v)
580 case OpAMD64XCHGL:
581 return rewriteValueAMD64_OpAMD64XCHGL_0(v)
582 case OpAMD64XCHGQ:
583 return rewriteValueAMD64_OpAMD64XCHGQ_0(v)
584 case OpAMD64XORL:
585 return rewriteValueAMD64_OpAMD64XORL_0(v) || rewriteValueAMD64_OpAMD64XORL_10(v)
586 case OpAMD64XORLconst:
587 return rewriteValueAMD64_OpAMD64XORLconst_0(v) || rewriteValueAMD64_OpAMD64XORLconst_10(v)
588 case OpAMD64XORLconstmodify:
589 return rewriteValueAMD64_OpAMD64XORLconstmodify_0(v)
590 case OpAMD64XORLload:
591 return rewriteValueAMD64_OpAMD64XORLload_0(v)
592 case OpAMD64XORLmodify:
593 return rewriteValueAMD64_OpAMD64XORLmodify_0(v)
594 case OpAMD64XORQ:
595 return rewriteValueAMD64_OpAMD64XORQ_0(v) || rewriteValueAMD64_OpAMD64XORQ_10(v)
596 case OpAMD64XORQconst:
597 return rewriteValueAMD64_OpAMD64XORQconst_0(v)
598 case OpAMD64XORQconstmodify:
599 return rewriteValueAMD64_OpAMD64XORQconstmodify_0(v)
600 case OpAMD64XORQload:
601 return rewriteValueAMD64_OpAMD64XORQload_0(v)
602 case OpAMD64XORQmodify:
603 return rewriteValueAMD64_OpAMD64XORQmodify_0(v)
604 case OpAdd16:
605 return rewriteValueAMD64_OpAdd16_0(v)
606 case OpAdd32:
607 return rewriteValueAMD64_OpAdd32_0(v)
608 case OpAdd32F:
609 return rewriteValueAMD64_OpAdd32F_0(v)
610 case OpAdd64:
611 return rewriteValueAMD64_OpAdd64_0(v)
612 case OpAdd64F:
613 return rewriteValueAMD64_OpAdd64F_0(v)
614 case OpAdd8:
615 return rewriteValueAMD64_OpAdd8_0(v)
616 case OpAddPtr:
617 return rewriteValueAMD64_OpAddPtr_0(v)
618 case OpAddr:
619 return rewriteValueAMD64_OpAddr_0(v)
620 case OpAnd16:
621 return rewriteValueAMD64_OpAnd16_0(v)
622 case OpAnd32:
623 return rewriteValueAMD64_OpAnd32_0(v)
624 case OpAnd64:
625 return rewriteValueAMD64_OpAnd64_0(v)
626 case OpAnd8:
627 return rewriteValueAMD64_OpAnd8_0(v)
628 case OpAndB:
629 return rewriteValueAMD64_OpAndB_0(v)
630 case OpAtomicAdd32:
631 return rewriteValueAMD64_OpAtomicAdd32_0(v)
632 case OpAtomicAdd64:
633 return rewriteValueAMD64_OpAtomicAdd64_0(v)
634 case OpAtomicAnd8:
635 return rewriteValueAMD64_OpAtomicAnd8_0(v)
636 case OpAtomicCompareAndSwap32:
637 return rewriteValueAMD64_OpAtomicCompareAndSwap32_0(v)
638 case OpAtomicCompareAndSwap64:
639 return rewriteValueAMD64_OpAtomicCompareAndSwap64_0(v)
640 case OpAtomicExchange32:
641 return rewriteValueAMD64_OpAtomicExchange32_0(v)
642 case OpAtomicExchange64:
643 return rewriteValueAMD64_OpAtomicExchange64_0(v)
644 case OpAtomicLoad32:
645 return rewriteValueAMD64_OpAtomicLoad32_0(v)
646 case OpAtomicLoad64:
647 return rewriteValueAMD64_OpAtomicLoad64_0(v)
648 case OpAtomicLoad8:
649 return rewriteValueAMD64_OpAtomicLoad8_0(v)
650 case OpAtomicLoadPtr:
651 return rewriteValueAMD64_OpAtomicLoadPtr_0(v)
652 case OpAtomicOr8:
653 return rewriteValueAMD64_OpAtomicOr8_0(v)
654 case OpAtomicStore32:
655 return rewriteValueAMD64_OpAtomicStore32_0(v)
656 case OpAtomicStore64:
657 return rewriteValueAMD64_OpAtomicStore64_0(v)
658 case OpAtomicStorePtrNoWB:
659 return rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v)
660 case OpAvg64u:
661 return rewriteValueAMD64_OpAvg64u_0(v)
662 case OpBitLen16:
663 return rewriteValueAMD64_OpBitLen16_0(v)
664 case OpBitLen32:
665 return rewriteValueAMD64_OpBitLen32_0(v)
666 case OpBitLen64:
667 return rewriteValueAMD64_OpBitLen64_0(v)
668 case OpBitLen8:
669 return rewriteValueAMD64_OpBitLen8_0(v)
670 case OpBswap32:
671 return rewriteValueAMD64_OpBswap32_0(v)
672 case OpBswap64:
673 return rewriteValueAMD64_OpBswap64_0(v)
674 case OpCeil:
675 return rewriteValueAMD64_OpCeil_0(v)
676 case OpClosureCall:
677 return rewriteValueAMD64_OpClosureCall_0(v)
678 case OpCom16:
679 return rewriteValueAMD64_OpCom16_0(v)
680 case OpCom32:
681 return rewriteValueAMD64_OpCom32_0(v)
682 case OpCom64:
683 return rewriteValueAMD64_OpCom64_0(v)
684 case OpCom8:
685 return rewriteValueAMD64_OpCom8_0(v)
686 case OpCondSelect:
687 return rewriteValueAMD64_OpCondSelect_0(v) || rewriteValueAMD64_OpCondSelect_10(v) || rewriteValueAMD64_OpCondSelect_20(v) || rewriteValueAMD64_OpCondSelect_30(v) || rewriteValueAMD64_OpCondSelect_40(v)
688 case OpConst16:
689 return rewriteValueAMD64_OpConst16_0(v)
690 case OpConst32:
691 return rewriteValueAMD64_OpConst32_0(v)
692 case OpConst32F:
693 return rewriteValueAMD64_OpConst32F_0(v)
694 case OpConst64:
695 return rewriteValueAMD64_OpConst64_0(v)
696 case OpConst64F:
697 return rewriteValueAMD64_OpConst64F_0(v)
698 case OpConst8:
699 return rewriteValueAMD64_OpConst8_0(v)
700 case OpConstBool:
701 return rewriteValueAMD64_OpConstBool_0(v)
702 case OpConstNil:
703 return rewriteValueAMD64_OpConstNil_0(v)
704 case OpCtz16:
705 return rewriteValueAMD64_OpCtz16_0(v)
706 case OpCtz16NonZero:
707 return rewriteValueAMD64_OpCtz16NonZero_0(v)
708 case OpCtz32:
709 return rewriteValueAMD64_OpCtz32_0(v)
710 case OpCtz32NonZero:
711 return rewriteValueAMD64_OpCtz32NonZero_0(v)
712 case OpCtz64:
713 return rewriteValueAMD64_OpCtz64_0(v)
714 case OpCtz64NonZero:
715 return rewriteValueAMD64_OpCtz64NonZero_0(v)
716 case OpCtz8:
717 return rewriteValueAMD64_OpCtz8_0(v)
718 case OpCtz8NonZero:
719 return rewriteValueAMD64_OpCtz8NonZero_0(v)
720 case OpCvt32Fto32:
721 return rewriteValueAMD64_OpCvt32Fto32_0(v)
722 case OpCvt32Fto64:
723 return rewriteValueAMD64_OpCvt32Fto64_0(v)
724 case OpCvt32Fto64F:
725 return rewriteValueAMD64_OpCvt32Fto64F_0(v)
726 case OpCvt32to32F:
727 return rewriteValueAMD64_OpCvt32to32F_0(v)
728 case OpCvt32to64F:
729 return rewriteValueAMD64_OpCvt32to64F_0(v)
730 case OpCvt64Fto32:
731 return rewriteValueAMD64_OpCvt64Fto32_0(v)
732 case OpCvt64Fto32F:
733 return rewriteValueAMD64_OpCvt64Fto32F_0(v)
734 case OpCvt64Fto64:
735 return rewriteValueAMD64_OpCvt64Fto64_0(v)
736 case OpCvt64to32F:
737 return rewriteValueAMD64_OpCvt64to32F_0(v)
738 case OpCvt64to64F:
739 return rewriteValueAMD64_OpCvt64to64F_0(v)
740 case OpDiv128u:
741 return rewriteValueAMD64_OpDiv128u_0(v)
742 case OpDiv16:
743 return rewriteValueAMD64_OpDiv16_0(v)
744 case OpDiv16u:
745 return rewriteValueAMD64_OpDiv16u_0(v)
746 case OpDiv32:
747 return rewriteValueAMD64_OpDiv32_0(v)
748 case OpDiv32F:
749 return rewriteValueAMD64_OpDiv32F_0(v)
750 case OpDiv32u:
751 return rewriteValueAMD64_OpDiv32u_0(v)
752 case OpDiv64:
753 return rewriteValueAMD64_OpDiv64_0(v)
754 case OpDiv64F:
755 return rewriteValueAMD64_OpDiv64F_0(v)
756 case OpDiv64u:
757 return rewriteValueAMD64_OpDiv64u_0(v)
758 case OpDiv8:
759 return rewriteValueAMD64_OpDiv8_0(v)
760 case OpDiv8u:
761 return rewriteValueAMD64_OpDiv8u_0(v)
762 case OpEq16:
763 return rewriteValueAMD64_OpEq16_0(v)
764 case OpEq32:
765 return rewriteValueAMD64_OpEq32_0(v)
766 case OpEq32F:
767 return rewriteValueAMD64_OpEq32F_0(v)
768 case OpEq64:
769 return rewriteValueAMD64_OpEq64_0(v)
770 case OpEq64F:
771 return rewriteValueAMD64_OpEq64F_0(v)
772 case OpEq8:
773 return rewriteValueAMD64_OpEq8_0(v)
774 case OpEqB:
775 return rewriteValueAMD64_OpEqB_0(v)
776 case OpEqPtr:
777 return rewriteValueAMD64_OpEqPtr_0(v)
778 case OpFloor:
779 return rewriteValueAMD64_OpFloor_0(v)
780 case OpGeq16:
781 return rewriteValueAMD64_OpGeq16_0(v)
782 case OpGeq16U:
783 return rewriteValueAMD64_OpGeq16U_0(v)
784 case OpGeq32:
785 return rewriteValueAMD64_OpGeq32_0(v)
786 case OpGeq32F:
787 return rewriteValueAMD64_OpGeq32F_0(v)
788 case OpGeq32U:
789 return rewriteValueAMD64_OpGeq32U_0(v)
790 case OpGeq64:
791 return rewriteValueAMD64_OpGeq64_0(v)
792 case OpGeq64F:
793 return rewriteValueAMD64_OpGeq64F_0(v)
794 case OpGeq64U:
795 return rewriteValueAMD64_OpGeq64U_0(v)
796 case OpGeq8:
797 return rewriteValueAMD64_OpGeq8_0(v)
798 case OpGeq8U:
799 return rewriteValueAMD64_OpGeq8U_0(v)
800 case OpGetCallerPC:
801 return rewriteValueAMD64_OpGetCallerPC_0(v)
802 case OpGetCallerSP:
803 return rewriteValueAMD64_OpGetCallerSP_0(v)
804 case OpGetClosurePtr:
805 return rewriteValueAMD64_OpGetClosurePtr_0(v)
806 case OpGetG:
807 return rewriteValueAMD64_OpGetG_0(v)
808 case OpGreater16:
809 return rewriteValueAMD64_OpGreater16_0(v)
810 case OpGreater16U:
811 return rewriteValueAMD64_OpGreater16U_0(v)
812 case OpGreater32:
813 return rewriteValueAMD64_OpGreater32_0(v)
814 case OpGreater32F:
815 return rewriteValueAMD64_OpGreater32F_0(v)
816 case OpGreater32U:
817 return rewriteValueAMD64_OpGreater32U_0(v)
818 case OpGreater64:
819 return rewriteValueAMD64_OpGreater64_0(v)
820 case OpGreater64F:
821 return rewriteValueAMD64_OpGreater64F_0(v)
822 case OpGreater64U:
823 return rewriteValueAMD64_OpGreater64U_0(v)
824 case OpGreater8:
825 return rewriteValueAMD64_OpGreater8_0(v)
826 case OpGreater8U:
827 return rewriteValueAMD64_OpGreater8U_0(v)
828 case OpHmul32:
829 return rewriteValueAMD64_OpHmul32_0(v)
830 case OpHmul32u:
831 return rewriteValueAMD64_OpHmul32u_0(v)
832 case OpHmul64:
833 return rewriteValueAMD64_OpHmul64_0(v)
834 case OpHmul64u:
835 return rewriteValueAMD64_OpHmul64u_0(v)
836 case OpInt64Hi:
837 return rewriteValueAMD64_OpInt64Hi_0(v)
838 case OpInt64Lo:
839 return rewriteValueAMD64_OpInt64Lo_0(v)
840 case OpInterCall:
841 return rewriteValueAMD64_OpInterCall_0(v)
842 case OpIsInBounds:
843 return rewriteValueAMD64_OpIsInBounds_0(v)
844 case OpIsNonNil:
845 return rewriteValueAMD64_OpIsNonNil_0(v)
846 case OpIsSliceInBounds:
847 return rewriteValueAMD64_OpIsSliceInBounds_0(v)
848 case OpLeq16:
849 return rewriteValueAMD64_OpLeq16_0(v)
850 case OpLeq16U:
851 return rewriteValueAMD64_OpLeq16U_0(v)
852 case OpLeq32:
853 return rewriteValueAMD64_OpLeq32_0(v)
854 case OpLeq32F:
855 return rewriteValueAMD64_OpLeq32F_0(v)
856 case OpLeq32U:
857 return rewriteValueAMD64_OpLeq32U_0(v)
858 case OpLeq64:
859 return rewriteValueAMD64_OpLeq64_0(v)
860 case OpLeq64F:
861 return rewriteValueAMD64_OpLeq64F_0(v)
862 case OpLeq64U:
863 return rewriteValueAMD64_OpLeq64U_0(v)
864 case OpLeq8:
865 return rewriteValueAMD64_OpLeq8_0(v)
866 case OpLeq8U:
867 return rewriteValueAMD64_OpLeq8U_0(v)
868 case OpLess16:
869 return rewriteValueAMD64_OpLess16_0(v)
870 case OpLess16U:
871 return rewriteValueAMD64_OpLess16U_0(v)
872 case OpLess32:
873 return rewriteValueAMD64_OpLess32_0(v)
874 case OpLess32F:
875 return rewriteValueAMD64_OpLess32F_0(v)
876 case OpLess32U:
877 return rewriteValueAMD64_OpLess32U_0(v)
878 case OpLess64:
879 return rewriteValueAMD64_OpLess64_0(v)
880 case OpLess64F:
881 return rewriteValueAMD64_OpLess64F_0(v)
882 case OpLess64U:
883 return rewriteValueAMD64_OpLess64U_0(v)
884 case OpLess8:
885 return rewriteValueAMD64_OpLess8_0(v)
886 case OpLess8U:
887 return rewriteValueAMD64_OpLess8U_0(v)
888 case OpLoad:
889 return rewriteValueAMD64_OpLoad_0(v)
890 case OpLocalAddr:
891 return rewriteValueAMD64_OpLocalAddr_0(v)
892 case OpLsh16x16:
893 return rewriteValueAMD64_OpLsh16x16_0(v)
894 case OpLsh16x32:
895 return rewriteValueAMD64_OpLsh16x32_0(v)
896 case OpLsh16x64:
897 return rewriteValueAMD64_OpLsh16x64_0(v)
898 case OpLsh16x8:
899 return rewriteValueAMD64_OpLsh16x8_0(v)
900 case OpLsh32x16:
901 return rewriteValueAMD64_OpLsh32x16_0(v)
902 case OpLsh32x32:
903 return rewriteValueAMD64_OpLsh32x32_0(v)
904 case OpLsh32x64:
905 return rewriteValueAMD64_OpLsh32x64_0(v)
906 case OpLsh32x8:
907 return rewriteValueAMD64_OpLsh32x8_0(v)
908 case OpLsh64x16:
909 return rewriteValueAMD64_OpLsh64x16_0(v)
910 case OpLsh64x32:
911 return rewriteValueAMD64_OpLsh64x32_0(v)
912 case OpLsh64x64:
913 return rewriteValueAMD64_OpLsh64x64_0(v)
914 case OpLsh64x8:
915 return rewriteValueAMD64_OpLsh64x8_0(v)
916 case OpLsh8x16:
917 return rewriteValueAMD64_OpLsh8x16_0(v)
918 case OpLsh8x32:
919 return rewriteValueAMD64_OpLsh8x32_0(v)
920 case OpLsh8x64:
921 return rewriteValueAMD64_OpLsh8x64_0(v)
922 case OpLsh8x8:
923 return rewriteValueAMD64_OpLsh8x8_0(v)
924 case OpMod16:
925 return rewriteValueAMD64_OpMod16_0(v)
926 case OpMod16u:
927 return rewriteValueAMD64_OpMod16u_0(v)
928 case OpMod32:
929 return rewriteValueAMD64_OpMod32_0(v)
930 case OpMod32u:
931 return rewriteValueAMD64_OpMod32u_0(v)
932 case OpMod64:
933 return rewriteValueAMD64_OpMod64_0(v)
934 case OpMod64u:
935 return rewriteValueAMD64_OpMod64u_0(v)
936 case OpMod8:
937 return rewriteValueAMD64_OpMod8_0(v)
938 case OpMod8u:
939 return rewriteValueAMD64_OpMod8u_0(v)
940 case OpMove:
941 return rewriteValueAMD64_OpMove_0(v) || rewriteValueAMD64_OpMove_10(v) || rewriteValueAMD64_OpMove_20(v)
942 case OpMul16:
943 return rewriteValueAMD64_OpMul16_0(v)
944 case OpMul32:
945 return rewriteValueAMD64_OpMul32_0(v)
946 case OpMul32F:
947 return rewriteValueAMD64_OpMul32F_0(v)
948 case OpMul64:
949 return rewriteValueAMD64_OpMul64_0(v)
950 case OpMul64F:
951 return rewriteValueAMD64_OpMul64F_0(v)
952 case OpMul64uhilo:
953 return rewriteValueAMD64_OpMul64uhilo_0(v)
954 case OpMul8:
955 return rewriteValueAMD64_OpMul8_0(v)
956 case OpNeg16:
957 return rewriteValueAMD64_OpNeg16_0(v)
958 case OpNeg32:
959 return rewriteValueAMD64_OpNeg32_0(v)
960 case OpNeg32F:
961 return rewriteValueAMD64_OpNeg32F_0(v)
962 case OpNeg64:
963 return rewriteValueAMD64_OpNeg64_0(v)
964 case OpNeg64F:
965 return rewriteValueAMD64_OpNeg64F_0(v)
966 case OpNeg8:
967 return rewriteValueAMD64_OpNeg8_0(v)
968 case OpNeq16:
969 return rewriteValueAMD64_OpNeq16_0(v)
970 case OpNeq32:
971 return rewriteValueAMD64_OpNeq32_0(v)
972 case OpNeq32F:
973 return rewriteValueAMD64_OpNeq32F_0(v)
974 case OpNeq64:
975 return rewriteValueAMD64_OpNeq64_0(v)
976 case OpNeq64F:
977 return rewriteValueAMD64_OpNeq64F_0(v)
978 case OpNeq8:
979 return rewriteValueAMD64_OpNeq8_0(v)
980 case OpNeqB:
981 return rewriteValueAMD64_OpNeqB_0(v)
982 case OpNeqPtr:
983 return rewriteValueAMD64_OpNeqPtr_0(v)
984 case OpNilCheck:
985 return rewriteValueAMD64_OpNilCheck_0(v)
986 case OpNot:
987 return rewriteValueAMD64_OpNot_0(v)
988 case OpOffPtr:
989 return rewriteValueAMD64_OpOffPtr_0(v)
990 case OpOr16:
991 return rewriteValueAMD64_OpOr16_0(v)
992 case OpOr32:
993 return rewriteValueAMD64_OpOr32_0(v)
994 case OpOr64:
995 return rewriteValueAMD64_OpOr64_0(v)
996 case OpOr8:
997 return rewriteValueAMD64_OpOr8_0(v)
998 case OpOrB:
999 return rewriteValueAMD64_OpOrB_0(v)
1000 case OpPanicBounds:
1001 return rewriteValueAMD64_OpPanicBounds_0(v)
1002 case OpPanicExtend:
1003 return rewriteValueAMD64_OpPanicExtend_0(v)
1004 case OpPopCount16:
1005 return rewriteValueAMD64_OpPopCount16_0(v)
1006 case OpPopCount32:
1007 return rewriteValueAMD64_OpPopCount32_0(v)
1008 case OpPopCount64:
1009 return rewriteValueAMD64_OpPopCount64_0(v)
1010 case OpPopCount8:
1011 return rewriteValueAMD64_OpPopCount8_0(v)
1012 case OpRotateLeft16:
1013 return rewriteValueAMD64_OpRotateLeft16_0(v)
1014 case OpRotateLeft32:
1015 return rewriteValueAMD64_OpRotateLeft32_0(v)
1016 case OpRotateLeft64:
1017 return rewriteValueAMD64_OpRotateLeft64_0(v)
1018 case OpRotateLeft8:
1019 return rewriteValueAMD64_OpRotateLeft8_0(v)
1020 case OpRound32F:
1021 return rewriteValueAMD64_OpRound32F_0(v)
1022 case OpRound64F:
1023 return rewriteValueAMD64_OpRound64F_0(v)
1024 case OpRoundToEven:
1025 return rewriteValueAMD64_OpRoundToEven_0(v)
1026 case OpRsh16Ux16:
1027 return rewriteValueAMD64_OpRsh16Ux16_0(v)
1028 case OpRsh16Ux32:
1029 return rewriteValueAMD64_OpRsh16Ux32_0(v)
1030 case OpRsh16Ux64:
1031 return rewriteValueAMD64_OpRsh16Ux64_0(v)
1032 case OpRsh16Ux8:
1033 return rewriteValueAMD64_OpRsh16Ux8_0(v)
1034 case OpRsh16x16:
1035 return rewriteValueAMD64_OpRsh16x16_0(v)
1036 case OpRsh16x32:
1037 return rewriteValueAMD64_OpRsh16x32_0(v)
1038 case OpRsh16x64:
1039 return rewriteValueAMD64_OpRsh16x64_0(v)
1040 case OpRsh16x8:
1041 return rewriteValueAMD64_OpRsh16x8_0(v)
1042 case OpRsh32Ux16:
1043 return rewriteValueAMD64_OpRsh32Ux16_0(v)
1044 case OpRsh32Ux32:
1045 return rewriteValueAMD64_OpRsh32Ux32_0(v)
1046 case OpRsh32Ux64:
1047 return rewriteValueAMD64_OpRsh32Ux64_0(v)
1048 case OpRsh32Ux8:
1049 return rewriteValueAMD64_OpRsh32Ux8_0(v)
1050 case OpRsh32x16:
1051 return rewriteValueAMD64_OpRsh32x16_0(v)
1052 case OpRsh32x32:
1053 return rewriteValueAMD64_OpRsh32x32_0(v)
1054 case OpRsh32x64:
1055 return rewriteValueAMD64_OpRsh32x64_0(v)
1056 case OpRsh32x8:
1057 return rewriteValueAMD64_OpRsh32x8_0(v)
1058 case OpRsh64Ux16:
1059 return rewriteValueAMD64_OpRsh64Ux16_0(v)
1060 case OpRsh64Ux32:
1061 return rewriteValueAMD64_OpRsh64Ux32_0(v)
1062 case OpRsh64Ux64:
1063 return rewriteValueAMD64_OpRsh64Ux64_0(v)
1064 case OpRsh64Ux8:
1065 return rewriteValueAMD64_OpRsh64Ux8_0(v)
1066 case OpRsh64x16:
1067 return rewriteValueAMD64_OpRsh64x16_0(v)
1068 case OpRsh64x32:
1069 return rewriteValueAMD64_OpRsh64x32_0(v)
1070 case OpRsh64x64:
1071 return rewriteValueAMD64_OpRsh64x64_0(v)
1072 case OpRsh64x8:
1073 return rewriteValueAMD64_OpRsh64x8_0(v)
1074 case OpRsh8Ux16:
1075 return rewriteValueAMD64_OpRsh8Ux16_0(v)
1076 case OpRsh8Ux32:
1077 return rewriteValueAMD64_OpRsh8Ux32_0(v)
1078 case OpRsh8Ux64:
1079 return rewriteValueAMD64_OpRsh8Ux64_0(v)
1080 case OpRsh8Ux8:
1081 return rewriteValueAMD64_OpRsh8Ux8_0(v)
1082 case OpRsh8x16:
1083 return rewriteValueAMD64_OpRsh8x16_0(v)
1084 case OpRsh8x32:
1085 return rewriteValueAMD64_OpRsh8x32_0(v)
1086 case OpRsh8x64:
1087 return rewriteValueAMD64_OpRsh8x64_0(v)
1088 case OpRsh8x8:
1089 return rewriteValueAMD64_OpRsh8x8_0(v)
1090 case OpSelect0:
1091 return rewriteValueAMD64_OpSelect0_0(v)
1092 case OpSelect1:
1093 return rewriteValueAMD64_OpSelect1_0(v)
1094 case OpSignExt16to32:
1095 return rewriteValueAMD64_OpSignExt16to32_0(v)
1096 case OpSignExt16to64:
1097 return rewriteValueAMD64_OpSignExt16to64_0(v)
1098 case OpSignExt32to64:
1099 return rewriteValueAMD64_OpSignExt32to64_0(v)
1100 case OpSignExt8to16:
1101 return rewriteValueAMD64_OpSignExt8to16_0(v)
1102 case OpSignExt8to32:
1103 return rewriteValueAMD64_OpSignExt8to32_0(v)
1104 case OpSignExt8to64:
1105 return rewriteValueAMD64_OpSignExt8to64_0(v)
1106 case OpSlicemask:
1107 return rewriteValueAMD64_OpSlicemask_0(v)
1108 case OpSqrt:
1109 return rewriteValueAMD64_OpSqrt_0(v)
1110 case OpStaticCall:
1111 return rewriteValueAMD64_OpStaticCall_0(v)
1112 case OpStore:
1113 return rewriteValueAMD64_OpStore_0(v)
1114 case OpSub16:
1115 return rewriteValueAMD64_OpSub16_0(v)
1116 case OpSub32:
1117 return rewriteValueAMD64_OpSub32_0(v)
1118 case OpSub32F:
1119 return rewriteValueAMD64_OpSub32F_0(v)
1120 case OpSub64:
1121 return rewriteValueAMD64_OpSub64_0(v)
1122 case OpSub64F:
1123 return rewriteValueAMD64_OpSub64F_0(v)
1124 case OpSub8:
1125 return rewriteValueAMD64_OpSub8_0(v)
1126 case OpSubPtr:
1127 return rewriteValueAMD64_OpSubPtr_0(v)
1128 case OpTrunc:
1129 return rewriteValueAMD64_OpTrunc_0(v)
1130 case OpTrunc16to8:
1131 return rewriteValueAMD64_OpTrunc16to8_0(v)
1132 case OpTrunc32to16:
1133 return rewriteValueAMD64_OpTrunc32to16_0(v)
1134 case OpTrunc32to8:
1135 return rewriteValueAMD64_OpTrunc32to8_0(v)
1136 case OpTrunc64to16:
1137 return rewriteValueAMD64_OpTrunc64to16_0(v)
1138 case OpTrunc64to32:
1139 return rewriteValueAMD64_OpTrunc64to32_0(v)
1140 case OpTrunc64to8:
1141 return rewriteValueAMD64_OpTrunc64to8_0(v)
1142 case OpWB:
1143 return rewriteValueAMD64_OpWB_0(v)
1144 case OpXor16:
1145 return rewriteValueAMD64_OpXor16_0(v)
1146 case OpXor32:
1147 return rewriteValueAMD64_OpXor32_0(v)
1148 case OpXor64:
1149 return rewriteValueAMD64_OpXor64_0(v)
1150 case OpXor8:
1151 return rewriteValueAMD64_OpXor8_0(v)
1152 case OpZero:
1153 return rewriteValueAMD64_OpZero_0(v) || rewriteValueAMD64_OpZero_10(v) || rewriteValueAMD64_OpZero_20(v)
1154 case OpZeroExt16to32:
1155 return rewriteValueAMD64_OpZeroExt16to32_0(v)
1156 case OpZeroExt16to64:
1157 return rewriteValueAMD64_OpZeroExt16to64_0(v)
1158 case OpZeroExt32to64:
1159 return rewriteValueAMD64_OpZeroExt32to64_0(v)
1160 case OpZeroExt8to16:
1161 return rewriteValueAMD64_OpZeroExt8to16_0(v)
1162 case OpZeroExt8to32:
1163 return rewriteValueAMD64_OpZeroExt8to32_0(v)
1164 case OpZeroExt8to64:
1165 return rewriteValueAMD64_OpZeroExt8to64_0(v)
1166 }
1167 return false
1168 }
1169 func rewriteValueAMD64_OpAMD64ADCQ_0(v *Value) bool {
1170
1171
1172
1173 for {
1174 carry := v.Args[2]
1175 x := v.Args[0]
1176 v_1 := v.Args[1]
1177 if v_1.Op != OpAMD64MOVQconst {
1178 break
1179 }
1180 c := v_1.AuxInt
1181 if !(is32Bit(c)) {
1182 break
1183 }
1184 v.reset(OpAMD64ADCQconst)
1185 v.AuxInt = c
1186 v.AddArg(x)
1187 v.AddArg(carry)
1188 return true
1189 }
1190
1191
1192
1193 for {
1194 carry := v.Args[2]
1195 v_0 := v.Args[0]
1196 if v_0.Op != OpAMD64MOVQconst {
1197 break
1198 }
1199 c := v_0.AuxInt
1200 x := v.Args[1]
1201 if !(is32Bit(c)) {
1202 break
1203 }
1204 v.reset(OpAMD64ADCQconst)
1205 v.AuxInt = c
1206 v.AddArg(x)
1207 v.AddArg(carry)
1208 return true
1209 }
1210
1211
1212
1213 for {
1214 _ = v.Args[2]
1215 x := v.Args[0]
1216 y := v.Args[1]
1217 v_2 := v.Args[2]
1218 if v_2.Op != OpAMD64FlagEQ {
1219 break
1220 }
1221 v.reset(OpAMD64ADDQcarry)
1222 v.AddArg(x)
1223 v.AddArg(y)
1224 return true
1225 }
1226 return false
1227 }
1228 func rewriteValueAMD64_OpAMD64ADCQconst_0(v *Value) bool {
1229
1230
1231
1232 for {
1233 c := v.AuxInt
1234 _ = v.Args[1]
1235 x := v.Args[0]
1236 v_1 := v.Args[1]
1237 if v_1.Op != OpAMD64FlagEQ {
1238 break
1239 }
1240 v.reset(OpAMD64ADDQconstcarry)
1241 v.AuxInt = c
1242 v.AddArg(x)
1243 return true
1244 }
1245 return false
1246 }
1247 func rewriteValueAMD64_OpAMD64ADDL_0(v *Value) bool {
1248
1249
1250
1251 for {
1252 _ = v.Args[1]
1253 x := v.Args[0]
1254 v_1 := v.Args[1]
1255 if v_1.Op != OpAMD64MOVLconst {
1256 break
1257 }
1258 c := v_1.AuxInt
1259 v.reset(OpAMD64ADDLconst)
1260 v.AuxInt = c
1261 v.AddArg(x)
1262 return true
1263 }
1264
1265
1266
1267 for {
1268 x := v.Args[1]
1269 v_0 := v.Args[0]
1270 if v_0.Op != OpAMD64MOVLconst {
1271 break
1272 }
1273 c := v_0.AuxInt
1274 v.reset(OpAMD64ADDLconst)
1275 v.AuxInt = c
1276 v.AddArg(x)
1277 return true
1278 }
1279
1280
1281
1282 for {
1283 _ = v.Args[1]
1284 v_0 := v.Args[0]
1285 if v_0.Op != OpAMD64SHLLconst {
1286 break
1287 }
1288 c := v_0.AuxInt
1289 x := v_0.Args[0]
1290 v_1 := v.Args[1]
1291 if v_1.Op != OpAMD64SHRLconst {
1292 break
1293 }
1294 d := v_1.AuxInt
1295 if x != v_1.Args[0] {
1296 break
1297 }
1298 if !(d == 32-c) {
1299 break
1300 }
1301 v.reset(OpAMD64ROLLconst)
1302 v.AuxInt = c
1303 v.AddArg(x)
1304 return true
1305 }
1306
1307
1308
1309 for {
1310 _ = v.Args[1]
1311 v_0 := v.Args[0]
1312 if v_0.Op != OpAMD64SHRLconst {
1313 break
1314 }
1315 d := v_0.AuxInt
1316 x := v_0.Args[0]
1317 v_1 := v.Args[1]
1318 if v_1.Op != OpAMD64SHLLconst {
1319 break
1320 }
1321 c := v_1.AuxInt
1322 if x != v_1.Args[0] {
1323 break
1324 }
1325 if !(d == 32-c) {
1326 break
1327 }
1328 v.reset(OpAMD64ROLLconst)
1329 v.AuxInt = c
1330 v.AddArg(x)
1331 return true
1332 }
1333
1334
1335
1336 for {
1337 t := v.Type
1338 _ = v.Args[1]
1339 v_0 := v.Args[0]
1340 if v_0.Op != OpAMD64SHLLconst {
1341 break
1342 }
1343 c := v_0.AuxInt
1344 x := v_0.Args[0]
1345 v_1 := v.Args[1]
1346 if v_1.Op != OpAMD64SHRWconst {
1347 break
1348 }
1349 d := v_1.AuxInt
1350 if x != v_1.Args[0] {
1351 break
1352 }
1353 if !(d == 16-c && c < 16 && t.Size() == 2) {
1354 break
1355 }
1356 v.reset(OpAMD64ROLWconst)
1357 v.AuxInt = c
1358 v.AddArg(x)
1359 return true
1360 }
1361
1362
1363
1364 for {
1365 t := v.Type
1366 _ = v.Args[1]
1367 v_0 := v.Args[0]
1368 if v_0.Op != OpAMD64SHRWconst {
1369 break
1370 }
1371 d := v_0.AuxInt
1372 x := v_0.Args[0]
1373 v_1 := v.Args[1]
1374 if v_1.Op != OpAMD64SHLLconst {
1375 break
1376 }
1377 c := v_1.AuxInt
1378 if x != v_1.Args[0] {
1379 break
1380 }
1381 if !(d == 16-c && c < 16 && t.Size() == 2) {
1382 break
1383 }
1384 v.reset(OpAMD64ROLWconst)
1385 v.AuxInt = c
1386 v.AddArg(x)
1387 return true
1388 }
1389
1390
1391
1392 for {
1393 t := v.Type
1394 _ = v.Args[1]
1395 v_0 := v.Args[0]
1396 if v_0.Op != OpAMD64SHLLconst {
1397 break
1398 }
1399 c := v_0.AuxInt
1400 x := v_0.Args[0]
1401 v_1 := v.Args[1]
1402 if v_1.Op != OpAMD64SHRBconst {
1403 break
1404 }
1405 d := v_1.AuxInt
1406 if x != v_1.Args[0] {
1407 break
1408 }
1409 if !(d == 8-c && c < 8 && t.Size() == 1) {
1410 break
1411 }
1412 v.reset(OpAMD64ROLBconst)
1413 v.AuxInt = c
1414 v.AddArg(x)
1415 return true
1416 }
1417
1418
1419
1420 for {
1421 t := v.Type
1422 _ = v.Args[1]
1423 v_0 := v.Args[0]
1424 if v_0.Op != OpAMD64SHRBconst {
1425 break
1426 }
1427 d := v_0.AuxInt
1428 x := v_0.Args[0]
1429 v_1 := v.Args[1]
1430 if v_1.Op != OpAMD64SHLLconst {
1431 break
1432 }
1433 c := v_1.AuxInt
1434 if x != v_1.Args[0] {
1435 break
1436 }
1437 if !(d == 8-c && c < 8 && t.Size() == 1) {
1438 break
1439 }
1440 v.reset(OpAMD64ROLBconst)
1441 v.AuxInt = c
1442 v.AddArg(x)
1443 return true
1444 }
1445
1446
1447
1448 for {
1449 _ = v.Args[1]
1450 x := v.Args[0]
1451 v_1 := v.Args[1]
1452 if v_1.Op != OpAMD64SHLLconst {
1453 break
1454 }
1455 if v_1.AuxInt != 3 {
1456 break
1457 }
1458 y := v_1.Args[0]
1459 v.reset(OpAMD64LEAL8)
1460 v.AddArg(x)
1461 v.AddArg(y)
1462 return true
1463 }
1464
1465
1466
1467 for {
1468 x := v.Args[1]
1469 v_0 := v.Args[0]
1470 if v_0.Op != OpAMD64SHLLconst {
1471 break
1472 }
1473 if v_0.AuxInt != 3 {
1474 break
1475 }
1476 y := v_0.Args[0]
1477 v.reset(OpAMD64LEAL8)
1478 v.AddArg(x)
1479 v.AddArg(y)
1480 return true
1481 }
1482 return false
1483 }
1484 func rewriteValueAMD64_OpAMD64ADDL_10(v *Value) bool {
1485
1486
1487
1488 for {
1489 _ = v.Args[1]
1490 x := v.Args[0]
1491 v_1 := v.Args[1]
1492 if v_1.Op != OpAMD64SHLLconst {
1493 break
1494 }
1495 if v_1.AuxInt != 2 {
1496 break
1497 }
1498 y := v_1.Args[0]
1499 v.reset(OpAMD64LEAL4)
1500 v.AddArg(x)
1501 v.AddArg(y)
1502 return true
1503 }
1504
1505
1506
1507 for {
1508 x := v.Args[1]
1509 v_0 := v.Args[0]
1510 if v_0.Op != OpAMD64SHLLconst {
1511 break
1512 }
1513 if v_0.AuxInt != 2 {
1514 break
1515 }
1516 y := v_0.Args[0]
1517 v.reset(OpAMD64LEAL4)
1518 v.AddArg(x)
1519 v.AddArg(y)
1520 return true
1521 }
1522
1523
1524
1525 for {
1526 _ = v.Args[1]
1527 x := v.Args[0]
1528 v_1 := v.Args[1]
1529 if v_1.Op != OpAMD64SHLLconst {
1530 break
1531 }
1532 if v_1.AuxInt != 1 {
1533 break
1534 }
1535 y := v_1.Args[0]
1536 v.reset(OpAMD64LEAL2)
1537 v.AddArg(x)
1538 v.AddArg(y)
1539 return true
1540 }
1541
1542
1543
1544 for {
1545 x := v.Args[1]
1546 v_0 := v.Args[0]
1547 if v_0.Op != OpAMD64SHLLconst {
1548 break
1549 }
1550 if v_0.AuxInt != 1 {
1551 break
1552 }
1553 y := v_0.Args[0]
1554 v.reset(OpAMD64LEAL2)
1555 v.AddArg(x)
1556 v.AddArg(y)
1557 return true
1558 }
1559
1560
1561
1562 for {
1563 _ = v.Args[1]
1564 x := v.Args[0]
1565 v_1 := v.Args[1]
1566 if v_1.Op != OpAMD64ADDL {
1567 break
1568 }
1569 y := v_1.Args[1]
1570 if y != v_1.Args[0] {
1571 break
1572 }
1573 v.reset(OpAMD64LEAL2)
1574 v.AddArg(x)
1575 v.AddArg(y)
1576 return true
1577 }
1578
1579
1580
1581 for {
1582 x := v.Args[1]
1583 v_0 := v.Args[0]
1584 if v_0.Op != OpAMD64ADDL {
1585 break
1586 }
1587 y := v_0.Args[1]
1588 if y != v_0.Args[0] {
1589 break
1590 }
1591 v.reset(OpAMD64LEAL2)
1592 v.AddArg(x)
1593 v.AddArg(y)
1594 return true
1595 }
1596
1597
1598
1599 for {
1600 _ = v.Args[1]
1601 x := v.Args[0]
1602 v_1 := v.Args[1]
1603 if v_1.Op != OpAMD64ADDL {
1604 break
1605 }
1606 y := v_1.Args[1]
1607 if x != v_1.Args[0] {
1608 break
1609 }
1610 v.reset(OpAMD64LEAL2)
1611 v.AddArg(y)
1612 v.AddArg(x)
1613 return true
1614 }
1615
1616
1617
1618 for {
1619 _ = v.Args[1]
1620 x := v.Args[0]
1621 v_1 := v.Args[1]
1622 if v_1.Op != OpAMD64ADDL {
1623 break
1624 }
1625 _ = v_1.Args[1]
1626 y := v_1.Args[0]
1627 if x != v_1.Args[1] {
1628 break
1629 }
1630 v.reset(OpAMD64LEAL2)
1631 v.AddArg(y)
1632 v.AddArg(x)
1633 return true
1634 }
1635
1636
1637
1638 for {
1639 x := v.Args[1]
1640 v_0 := v.Args[0]
1641 if v_0.Op != OpAMD64ADDL {
1642 break
1643 }
1644 y := v_0.Args[1]
1645 if x != v_0.Args[0] {
1646 break
1647 }
1648 v.reset(OpAMD64LEAL2)
1649 v.AddArg(y)
1650 v.AddArg(x)
1651 return true
1652 }
1653
1654
1655
1656 for {
1657 x := v.Args[1]
1658 v_0 := v.Args[0]
1659 if v_0.Op != OpAMD64ADDL {
1660 break
1661 }
1662 _ = v_0.Args[1]
1663 y := v_0.Args[0]
1664 if x != v_0.Args[1] {
1665 break
1666 }
1667 v.reset(OpAMD64LEAL2)
1668 v.AddArg(y)
1669 v.AddArg(x)
1670 return true
1671 }
1672 return false
1673 }
1674 func rewriteValueAMD64_OpAMD64ADDL_20(v *Value) bool {
1675
1676
1677
1678 for {
1679 y := v.Args[1]
1680 v_0 := v.Args[0]
1681 if v_0.Op != OpAMD64ADDLconst {
1682 break
1683 }
1684 c := v_0.AuxInt
1685 x := v_0.Args[0]
1686 v.reset(OpAMD64LEAL1)
1687 v.AuxInt = c
1688 v.AddArg(x)
1689 v.AddArg(y)
1690 return true
1691 }
1692
1693
1694
1695 for {
1696 _ = v.Args[1]
1697 y := v.Args[0]
1698 v_1 := v.Args[1]
1699 if v_1.Op != OpAMD64ADDLconst {
1700 break
1701 }
1702 c := v_1.AuxInt
1703 x := v_1.Args[0]
1704 v.reset(OpAMD64LEAL1)
1705 v.AuxInt = c
1706 v.AddArg(x)
1707 v.AddArg(y)
1708 return true
1709 }
1710
1711
1712
1713 for {
1714 _ = v.Args[1]
1715 x := v.Args[0]
1716 v_1 := v.Args[1]
1717 if v_1.Op != OpAMD64LEAL {
1718 break
1719 }
1720 c := v_1.AuxInt
1721 s := v_1.Aux
1722 y := v_1.Args[0]
1723 if !(x.Op != OpSB && y.Op != OpSB) {
1724 break
1725 }
1726 v.reset(OpAMD64LEAL1)
1727 v.AuxInt = c
1728 v.Aux = s
1729 v.AddArg(x)
1730 v.AddArg(y)
1731 return true
1732 }
1733
1734
1735
1736 for {
1737 x := v.Args[1]
1738 v_0 := v.Args[0]
1739 if v_0.Op != OpAMD64LEAL {
1740 break
1741 }
1742 c := v_0.AuxInt
1743 s := v_0.Aux
1744 y := v_0.Args[0]
1745 if !(x.Op != OpSB && y.Op != OpSB) {
1746 break
1747 }
1748 v.reset(OpAMD64LEAL1)
1749 v.AuxInt = c
1750 v.Aux = s
1751 v.AddArg(x)
1752 v.AddArg(y)
1753 return true
1754 }
1755
1756
1757
1758 for {
1759 _ = v.Args[1]
1760 x := v.Args[0]
1761 v_1 := v.Args[1]
1762 if v_1.Op != OpAMD64NEGL {
1763 break
1764 }
1765 y := v_1.Args[0]
1766 v.reset(OpAMD64SUBL)
1767 v.AddArg(x)
1768 v.AddArg(y)
1769 return true
1770 }
1771
1772
1773
1774 for {
1775 x := v.Args[1]
1776 v_0 := v.Args[0]
1777 if v_0.Op != OpAMD64NEGL {
1778 break
1779 }
1780 y := v_0.Args[0]
1781 v.reset(OpAMD64SUBL)
1782 v.AddArg(x)
1783 v.AddArg(y)
1784 return true
1785 }
1786
1787
1788
1789 for {
1790 _ = v.Args[1]
1791 x := v.Args[0]
1792 l := v.Args[1]
1793 if l.Op != OpAMD64MOVLload {
1794 break
1795 }
1796 off := l.AuxInt
1797 sym := l.Aux
1798 mem := l.Args[1]
1799 ptr := l.Args[0]
1800 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
1801 break
1802 }
1803 v.reset(OpAMD64ADDLload)
1804 v.AuxInt = off
1805 v.Aux = sym
1806 v.AddArg(x)
1807 v.AddArg(ptr)
1808 v.AddArg(mem)
1809 return true
1810 }
1811
1812
1813
1814 for {
1815 x := v.Args[1]
1816 l := v.Args[0]
1817 if l.Op != OpAMD64MOVLload {
1818 break
1819 }
1820 off := l.AuxInt
1821 sym := l.Aux
1822 mem := l.Args[1]
1823 ptr := l.Args[0]
1824 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
1825 break
1826 }
1827 v.reset(OpAMD64ADDLload)
1828 v.AuxInt = off
1829 v.Aux = sym
1830 v.AddArg(x)
1831 v.AddArg(ptr)
1832 v.AddArg(mem)
1833 return true
1834 }
1835 return false
1836 }
1837 func rewriteValueAMD64_OpAMD64ADDLconst_0(v *Value) bool {
1838
1839
1840
1841 for {
1842 c := v.AuxInt
1843 v_0 := v.Args[0]
1844 if v_0.Op != OpAMD64ADDL {
1845 break
1846 }
1847 y := v_0.Args[1]
1848 x := v_0.Args[0]
1849 v.reset(OpAMD64LEAL1)
1850 v.AuxInt = c
1851 v.AddArg(x)
1852 v.AddArg(y)
1853 return true
1854 }
1855
1856
1857
1858 for {
1859 c := v.AuxInt
1860 v_0 := v.Args[0]
1861 if v_0.Op != OpAMD64SHLLconst {
1862 break
1863 }
1864 if v_0.AuxInt != 1 {
1865 break
1866 }
1867 x := v_0.Args[0]
1868 v.reset(OpAMD64LEAL1)
1869 v.AuxInt = c
1870 v.AddArg(x)
1871 v.AddArg(x)
1872 return true
1873 }
1874
1875
1876
1877 for {
1878 c := v.AuxInt
1879 v_0 := v.Args[0]
1880 if v_0.Op != OpAMD64LEAL {
1881 break
1882 }
1883 d := v_0.AuxInt
1884 s := v_0.Aux
1885 x := v_0.Args[0]
1886 if !(is32Bit(c + d)) {
1887 break
1888 }
1889 v.reset(OpAMD64LEAL)
1890 v.AuxInt = c + d
1891 v.Aux = s
1892 v.AddArg(x)
1893 return true
1894 }
1895
1896
1897
1898 for {
1899 c := v.AuxInt
1900 v_0 := v.Args[0]
1901 if v_0.Op != OpAMD64LEAL1 {
1902 break
1903 }
1904 d := v_0.AuxInt
1905 s := v_0.Aux
1906 y := v_0.Args[1]
1907 x := v_0.Args[0]
1908 if !(is32Bit(c + d)) {
1909 break
1910 }
1911 v.reset(OpAMD64LEAL1)
1912 v.AuxInt = c + d
1913 v.Aux = s
1914 v.AddArg(x)
1915 v.AddArg(y)
1916 return true
1917 }
1918
1919
1920
1921 for {
1922 c := v.AuxInt
1923 v_0 := v.Args[0]
1924 if v_0.Op != OpAMD64LEAL2 {
1925 break
1926 }
1927 d := v_0.AuxInt
1928 s := v_0.Aux
1929 y := v_0.Args[1]
1930 x := v_0.Args[0]
1931 if !(is32Bit(c + d)) {
1932 break
1933 }
1934 v.reset(OpAMD64LEAL2)
1935 v.AuxInt = c + d
1936 v.Aux = s
1937 v.AddArg(x)
1938 v.AddArg(y)
1939 return true
1940 }
1941
1942
1943
1944 for {
1945 c := v.AuxInt
1946 v_0 := v.Args[0]
1947 if v_0.Op != OpAMD64LEAL4 {
1948 break
1949 }
1950 d := v_0.AuxInt
1951 s := v_0.Aux
1952 y := v_0.Args[1]
1953 x := v_0.Args[0]
1954 if !(is32Bit(c + d)) {
1955 break
1956 }
1957 v.reset(OpAMD64LEAL4)
1958 v.AuxInt = c + d
1959 v.Aux = s
1960 v.AddArg(x)
1961 v.AddArg(y)
1962 return true
1963 }
1964
1965
1966
1967 for {
1968 c := v.AuxInt
1969 v_0 := v.Args[0]
1970 if v_0.Op != OpAMD64LEAL8 {
1971 break
1972 }
1973 d := v_0.AuxInt
1974 s := v_0.Aux
1975 y := v_0.Args[1]
1976 x := v_0.Args[0]
1977 if !(is32Bit(c + d)) {
1978 break
1979 }
1980 v.reset(OpAMD64LEAL8)
1981 v.AuxInt = c + d
1982 v.Aux = s
1983 v.AddArg(x)
1984 v.AddArg(y)
1985 return true
1986 }
1987
1988
1989
1990 for {
1991 c := v.AuxInt
1992 x := v.Args[0]
1993 if !(int32(c) == 0) {
1994 break
1995 }
1996 v.reset(OpCopy)
1997 v.Type = x.Type
1998 v.AddArg(x)
1999 return true
2000 }
2001
2002
2003
2004 for {
2005 c := v.AuxInt
2006 v_0 := v.Args[0]
2007 if v_0.Op != OpAMD64MOVLconst {
2008 break
2009 }
2010 d := v_0.AuxInt
2011 v.reset(OpAMD64MOVLconst)
2012 v.AuxInt = int64(int32(c + d))
2013 return true
2014 }
2015
2016
2017
2018 for {
2019 c := v.AuxInt
2020 v_0 := v.Args[0]
2021 if v_0.Op != OpAMD64ADDLconst {
2022 break
2023 }
2024 d := v_0.AuxInt
2025 x := v_0.Args[0]
2026 v.reset(OpAMD64ADDLconst)
2027 v.AuxInt = int64(int32(c + d))
2028 v.AddArg(x)
2029 return true
2030 }
2031 return false
2032 }
2033 func rewriteValueAMD64_OpAMD64ADDLconst_10(v *Value) bool {
2034
2035
2036
2037 for {
2038 off := v.AuxInt
2039 x := v.Args[0]
2040 if x.Op != OpSP {
2041 break
2042 }
2043 v.reset(OpAMD64LEAL)
2044 v.AuxInt = off
2045 v.AddArg(x)
2046 return true
2047 }
2048 return false
2049 }
2050 func rewriteValueAMD64_OpAMD64ADDLconstmodify_0(v *Value) bool {
2051
2052
2053
2054 for {
2055 valoff1 := v.AuxInt
2056 sym := v.Aux
2057 mem := v.Args[1]
2058 v_0 := v.Args[0]
2059 if v_0.Op != OpAMD64ADDQconst {
2060 break
2061 }
2062 off2 := v_0.AuxInt
2063 base := v_0.Args[0]
2064 if !(ValAndOff(valoff1).canAdd(off2)) {
2065 break
2066 }
2067 v.reset(OpAMD64ADDLconstmodify)
2068 v.AuxInt = ValAndOff(valoff1).add(off2)
2069 v.Aux = sym
2070 v.AddArg(base)
2071 v.AddArg(mem)
2072 return true
2073 }
2074
2075
2076
2077 for {
2078 valoff1 := v.AuxInt
2079 sym1 := v.Aux
2080 mem := v.Args[1]
2081 v_0 := v.Args[0]
2082 if v_0.Op != OpAMD64LEAQ {
2083 break
2084 }
2085 off2 := v_0.AuxInt
2086 sym2 := v_0.Aux
2087 base := v_0.Args[0]
2088 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
2089 break
2090 }
2091 v.reset(OpAMD64ADDLconstmodify)
2092 v.AuxInt = ValAndOff(valoff1).add(off2)
2093 v.Aux = mergeSym(sym1, sym2)
2094 v.AddArg(base)
2095 v.AddArg(mem)
2096 return true
2097 }
2098 return false
2099 }
2100 func rewriteValueAMD64_OpAMD64ADDLload_0(v *Value) bool {
2101 b := v.Block
2102 typ := &b.Func.Config.Types
2103
2104
2105
2106 for {
2107 off1 := v.AuxInt
2108 sym := v.Aux
2109 mem := v.Args[2]
2110 val := v.Args[0]
2111 v_1 := v.Args[1]
2112 if v_1.Op != OpAMD64ADDQconst {
2113 break
2114 }
2115 off2 := v_1.AuxInt
2116 base := v_1.Args[0]
2117 if !(is32Bit(off1 + off2)) {
2118 break
2119 }
2120 v.reset(OpAMD64ADDLload)
2121 v.AuxInt = off1 + off2
2122 v.Aux = sym
2123 v.AddArg(val)
2124 v.AddArg(base)
2125 v.AddArg(mem)
2126 return true
2127 }
2128
2129
2130
2131 for {
2132 off1 := v.AuxInt
2133 sym1 := v.Aux
2134 mem := v.Args[2]
2135 val := v.Args[0]
2136 v_1 := v.Args[1]
2137 if v_1.Op != OpAMD64LEAQ {
2138 break
2139 }
2140 off2 := v_1.AuxInt
2141 sym2 := v_1.Aux
2142 base := v_1.Args[0]
2143 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
2144 break
2145 }
2146 v.reset(OpAMD64ADDLload)
2147 v.AuxInt = off1 + off2
2148 v.Aux = mergeSym(sym1, sym2)
2149 v.AddArg(val)
2150 v.AddArg(base)
2151 v.AddArg(mem)
2152 return true
2153 }
2154
2155
2156
2157 for {
2158 off := v.AuxInt
2159 sym := v.Aux
2160 _ = v.Args[2]
2161 x := v.Args[0]
2162 ptr := v.Args[1]
2163 v_2 := v.Args[2]
2164 if v_2.Op != OpAMD64MOVSSstore {
2165 break
2166 }
2167 if v_2.AuxInt != off {
2168 break
2169 }
2170 if v_2.Aux != sym {
2171 break
2172 }
2173 _ = v_2.Args[2]
2174 if ptr != v_2.Args[0] {
2175 break
2176 }
2177 y := v_2.Args[1]
2178 v.reset(OpAMD64ADDL)
2179 v.AddArg(x)
2180 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
2181 v0.AddArg(y)
2182 v.AddArg(v0)
2183 return true
2184 }
2185 return false
2186 }
2187 func rewriteValueAMD64_OpAMD64ADDLmodify_0(v *Value) bool {
2188
2189
2190
2191 for {
2192 off1 := v.AuxInt
2193 sym := v.Aux
2194 mem := v.Args[2]
2195 v_0 := v.Args[0]
2196 if v_0.Op != OpAMD64ADDQconst {
2197 break
2198 }
2199 off2 := v_0.AuxInt
2200 base := v_0.Args[0]
2201 val := v.Args[1]
2202 if !(is32Bit(off1 + off2)) {
2203 break
2204 }
2205 v.reset(OpAMD64ADDLmodify)
2206 v.AuxInt = off1 + off2
2207 v.Aux = sym
2208 v.AddArg(base)
2209 v.AddArg(val)
2210 v.AddArg(mem)
2211 return true
2212 }
2213
2214
2215
2216 for {
2217 off1 := v.AuxInt
2218 sym1 := v.Aux
2219 mem := v.Args[2]
2220 v_0 := v.Args[0]
2221 if v_0.Op != OpAMD64LEAQ {
2222 break
2223 }
2224 off2 := v_0.AuxInt
2225 sym2 := v_0.Aux
2226 base := v_0.Args[0]
2227 val := v.Args[1]
2228 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
2229 break
2230 }
2231 v.reset(OpAMD64ADDLmodify)
2232 v.AuxInt = off1 + off2
2233 v.Aux = mergeSym(sym1, sym2)
2234 v.AddArg(base)
2235 v.AddArg(val)
2236 v.AddArg(mem)
2237 return true
2238 }
2239 return false
2240 }
2241 func rewriteValueAMD64_OpAMD64ADDQ_0(v *Value) bool {
2242
2243
2244
2245 for {
2246 _ = v.Args[1]
2247 x := v.Args[0]
2248 v_1 := v.Args[1]
2249 if v_1.Op != OpAMD64MOVQconst {
2250 break
2251 }
2252 c := v_1.AuxInt
2253 if !(is32Bit(c)) {
2254 break
2255 }
2256 v.reset(OpAMD64ADDQconst)
2257 v.AuxInt = c
2258 v.AddArg(x)
2259 return true
2260 }
2261
2262
2263
2264 for {
2265 x := v.Args[1]
2266 v_0 := v.Args[0]
2267 if v_0.Op != OpAMD64MOVQconst {
2268 break
2269 }
2270 c := v_0.AuxInt
2271 if !(is32Bit(c)) {
2272 break
2273 }
2274 v.reset(OpAMD64ADDQconst)
2275 v.AuxInt = c
2276 v.AddArg(x)
2277 return true
2278 }
2279
2280
2281
2282 for {
2283 _ = v.Args[1]
2284 v_0 := v.Args[0]
2285 if v_0.Op != OpAMD64SHLQconst {
2286 break
2287 }
2288 c := v_0.AuxInt
2289 x := v_0.Args[0]
2290 v_1 := v.Args[1]
2291 if v_1.Op != OpAMD64SHRQconst {
2292 break
2293 }
2294 d := v_1.AuxInt
2295 if x != v_1.Args[0] {
2296 break
2297 }
2298 if !(d == 64-c) {
2299 break
2300 }
2301 v.reset(OpAMD64ROLQconst)
2302 v.AuxInt = c
2303 v.AddArg(x)
2304 return true
2305 }
2306
2307
2308
2309 for {
2310 _ = v.Args[1]
2311 v_0 := v.Args[0]
2312 if v_0.Op != OpAMD64SHRQconst {
2313 break
2314 }
2315 d := v_0.AuxInt
2316 x := v_0.Args[0]
2317 v_1 := v.Args[1]
2318 if v_1.Op != OpAMD64SHLQconst {
2319 break
2320 }
2321 c := v_1.AuxInt
2322 if x != v_1.Args[0] {
2323 break
2324 }
2325 if !(d == 64-c) {
2326 break
2327 }
2328 v.reset(OpAMD64ROLQconst)
2329 v.AuxInt = c
2330 v.AddArg(x)
2331 return true
2332 }
2333
2334
2335
2336 for {
2337 _ = v.Args[1]
2338 x := v.Args[0]
2339 v_1 := v.Args[1]
2340 if v_1.Op != OpAMD64SHLQconst {
2341 break
2342 }
2343 if v_1.AuxInt != 3 {
2344 break
2345 }
2346 y := v_1.Args[0]
2347 v.reset(OpAMD64LEAQ8)
2348 v.AddArg(x)
2349 v.AddArg(y)
2350 return true
2351 }
2352
2353
2354
2355 for {
2356 x := v.Args[1]
2357 v_0 := v.Args[0]
2358 if v_0.Op != OpAMD64SHLQconst {
2359 break
2360 }
2361 if v_0.AuxInt != 3 {
2362 break
2363 }
2364 y := v_0.Args[0]
2365 v.reset(OpAMD64LEAQ8)
2366 v.AddArg(x)
2367 v.AddArg(y)
2368 return true
2369 }
2370
2371
2372
2373 for {
2374 _ = v.Args[1]
2375 x := v.Args[0]
2376 v_1 := v.Args[1]
2377 if v_1.Op != OpAMD64SHLQconst {
2378 break
2379 }
2380 if v_1.AuxInt != 2 {
2381 break
2382 }
2383 y := v_1.Args[0]
2384 v.reset(OpAMD64LEAQ4)
2385 v.AddArg(x)
2386 v.AddArg(y)
2387 return true
2388 }
2389
2390
2391
2392 for {
2393 x := v.Args[1]
2394 v_0 := v.Args[0]
2395 if v_0.Op != OpAMD64SHLQconst {
2396 break
2397 }
2398 if v_0.AuxInt != 2 {
2399 break
2400 }
2401 y := v_0.Args[0]
2402 v.reset(OpAMD64LEAQ4)
2403 v.AddArg(x)
2404 v.AddArg(y)
2405 return true
2406 }
2407
2408
2409
2410 for {
2411 _ = v.Args[1]
2412 x := v.Args[0]
2413 v_1 := v.Args[1]
2414 if v_1.Op != OpAMD64SHLQconst {
2415 break
2416 }
2417 if v_1.AuxInt != 1 {
2418 break
2419 }
2420 y := v_1.Args[0]
2421 v.reset(OpAMD64LEAQ2)
2422 v.AddArg(x)
2423 v.AddArg(y)
2424 return true
2425 }
2426
2427
2428
2429 for {
2430 x := v.Args[1]
2431 v_0 := v.Args[0]
2432 if v_0.Op != OpAMD64SHLQconst {
2433 break
2434 }
2435 if v_0.AuxInt != 1 {
2436 break
2437 }
2438 y := v_0.Args[0]
2439 v.reset(OpAMD64LEAQ2)
2440 v.AddArg(x)
2441 v.AddArg(y)
2442 return true
2443 }
2444 return false
2445 }
2446 func rewriteValueAMD64_OpAMD64ADDQ_10(v *Value) bool {
2447
2448
2449
2450 for {
2451 _ = v.Args[1]
2452 x := v.Args[0]
2453 v_1 := v.Args[1]
2454 if v_1.Op != OpAMD64ADDQ {
2455 break
2456 }
2457 y := v_1.Args[1]
2458 if y != v_1.Args[0] {
2459 break
2460 }
2461 v.reset(OpAMD64LEAQ2)
2462 v.AddArg(x)
2463 v.AddArg(y)
2464 return true
2465 }
2466
2467
2468
2469 for {
2470 x := v.Args[1]
2471 v_0 := v.Args[0]
2472 if v_0.Op != OpAMD64ADDQ {
2473 break
2474 }
2475 y := v_0.Args[1]
2476 if y != v_0.Args[0] {
2477 break
2478 }
2479 v.reset(OpAMD64LEAQ2)
2480 v.AddArg(x)
2481 v.AddArg(y)
2482 return true
2483 }
2484
2485
2486
2487 for {
2488 _ = v.Args[1]
2489 x := v.Args[0]
2490 v_1 := v.Args[1]
2491 if v_1.Op != OpAMD64ADDQ {
2492 break
2493 }
2494 y := v_1.Args[1]
2495 if x != v_1.Args[0] {
2496 break
2497 }
2498 v.reset(OpAMD64LEAQ2)
2499 v.AddArg(y)
2500 v.AddArg(x)
2501 return true
2502 }
2503
2504
2505
2506 for {
2507 _ = v.Args[1]
2508 x := v.Args[0]
2509 v_1 := v.Args[1]
2510 if v_1.Op != OpAMD64ADDQ {
2511 break
2512 }
2513 _ = v_1.Args[1]
2514 y := v_1.Args[0]
2515 if x != v_1.Args[1] {
2516 break
2517 }
2518 v.reset(OpAMD64LEAQ2)
2519 v.AddArg(y)
2520 v.AddArg(x)
2521 return true
2522 }
2523
2524
2525
2526 for {
2527 x := v.Args[1]
2528 v_0 := v.Args[0]
2529 if v_0.Op != OpAMD64ADDQ {
2530 break
2531 }
2532 y := v_0.Args[1]
2533 if x != v_0.Args[0] {
2534 break
2535 }
2536 v.reset(OpAMD64LEAQ2)
2537 v.AddArg(y)
2538 v.AddArg(x)
2539 return true
2540 }
2541
2542
2543
2544 for {
2545 x := v.Args[1]
2546 v_0 := v.Args[0]
2547 if v_0.Op != OpAMD64ADDQ {
2548 break
2549 }
2550 _ = v_0.Args[1]
2551 y := v_0.Args[0]
2552 if x != v_0.Args[1] {
2553 break
2554 }
2555 v.reset(OpAMD64LEAQ2)
2556 v.AddArg(y)
2557 v.AddArg(x)
2558 return true
2559 }
2560
2561
2562
2563 for {
2564 y := v.Args[1]
2565 v_0 := v.Args[0]
2566 if v_0.Op != OpAMD64ADDQconst {
2567 break
2568 }
2569 c := v_0.AuxInt
2570 x := v_0.Args[0]
2571 v.reset(OpAMD64LEAQ1)
2572 v.AuxInt = c
2573 v.AddArg(x)
2574 v.AddArg(y)
2575 return true
2576 }
2577
2578
2579
2580 for {
2581 _ = v.Args[1]
2582 y := v.Args[0]
2583 v_1 := v.Args[1]
2584 if v_1.Op != OpAMD64ADDQconst {
2585 break
2586 }
2587 c := v_1.AuxInt
2588 x := v_1.Args[0]
2589 v.reset(OpAMD64LEAQ1)
2590 v.AuxInt = c
2591 v.AddArg(x)
2592 v.AddArg(y)
2593 return true
2594 }
2595
2596
2597
2598 for {
2599 _ = v.Args[1]
2600 x := v.Args[0]
2601 v_1 := v.Args[1]
2602 if v_1.Op != OpAMD64LEAQ {
2603 break
2604 }
2605 c := v_1.AuxInt
2606 s := v_1.Aux
2607 y := v_1.Args[0]
2608 if !(x.Op != OpSB && y.Op != OpSB) {
2609 break
2610 }
2611 v.reset(OpAMD64LEAQ1)
2612 v.AuxInt = c
2613 v.Aux = s
2614 v.AddArg(x)
2615 v.AddArg(y)
2616 return true
2617 }
2618
2619
2620
2621 for {
2622 x := v.Args[1]
2623 v_0 := v.Args[0]
2624 if v_0.Op != OpAMD64LEAQ {
2625 break
2626 }
2627 c := v_0.AuxInt
2628 s := v_0.Aux
2629 y := v_0.Args[0]
2630 if !(x.Op != OpSB && y.Op != OpSB) {
2631 break
2632 }
2633 v.reset(OpAMD64LEAQ1)
2634 v.AuxInt = c
2635 v.Aux = s
2636 v.AddArg(x)
2637 v.AddArg(y)
2638 return true
2639 }
2640 return false
2641 }
2642 func rewriteValueAMD64_OpAMD64ADDQ_20(v *Value) bool {
2643
2644
2645
2646 for {
2647 _ = v.Args[1]
2648 x := v.Args[0]
2649 v_1 := v.Args[1]
2650 if v_1.Op != OpAMD64NEGQ {
2651 break
2652 }
2653 y := v_1.Args[0]
2654 v.reset(OpAMD64SUBQ)
2655 v.AddArg(x)
2656 v.AddArg(y)
2657 return true
2658 }
2659
2660
2661
2662 for {
2663 x := v.Args[1]
2664 v_0 := v.Args[0]
2665 if v_0.Op != OpAMD64NEGQ {
2666 break
2667 }
2668 y := v_0.Args[0]
2669 v.reset(OpAMD64SUBQ)
2670 v.AddArg(x)
2671 v.AddArg(y)
2672 return true
2673 }
2674
2675
2676
2677 for {
2678 _ = v.Args[1]
2679 x := v.Args[0]
2680 l := v.Args[1]
2681 if l.Op != OpAMD64MOVQload {
2682 break
2683 }
2684 off := l.AuxInt
2685 sym := l.Aux
2686 mem := l.Args[1]
2687 ptr := l.Args[0]
2688 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2689 break
2690 }
2691 v.reset(OpAMD64ADDQload)
2692 v.AuxInt = off
2693 v.Aux = sym
2694 v.AddArg(x)
2695 v.AddArg(ptr)
2696 v.AddArg(mem)
2697 return true
2698 }
2699
2700
2701
2702 for {
2703 x := v.Args[1]
2704 l := v.Args[0]
2705 if l.Op != OpAMD64MOVQload {
2706 break
2707 }
2708 off := l.AuxInt
2709 sym := l.Aux
2710 mem := l.Args[1]
2711 ptr := l.Args[0]
2712 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2713 break
2714 }
2715 v.reset(OpAMD64ADDQload)
2716 v.AuxInt = off
2717 v.Aux = sym
2718 v.AddArg(x)
2719 v.AddArg(ptr)
2720 v.AddArg(mem)
2721 return true
2722 }
2723 return false
2724 }
2725 func rewriteValueAMD64_OpAMD64ADDQcarry_0(v *Value) bool {
2726
2727
2728
2729 for {
2730 _ = v.Args[1]
2731 x := v.Args[0]
2732 v_1 := v.Args[1]
2733 if v_1.Op != OpAMD64MOVQconst {
2734 break
2735 }
2736 c := v_1.AuxInt
2737 if !(is32Bit(c)) {
2738 break
2739 }
2740 v.reset(OpAMD64ADDQconstcarry)
2741 v.AuxInt = c
2742 v.AddArg(x)
2743 return true
2744 }
2745
2746
2747
2748 for {
2749 x := v.Args[1]
2750 v_0 := v.Args[0]
2751 if v_0.Op != OpAMD64MOVQconst {
2752 break
2753 }
2754 c := v_0.AuxInt
2755 if !(is32Bit(c)) {
2756 break
2757 }
2758 v.reset(OpAMD64ADDQconstcarry)
2759 v.AuxInt = c
2760 v.AddArg(x)
2761 return true
2762 }
2763 return false
2764 }
2765 func rewriteValueAMD64_OpAMD64ADDQconst_0(v *Value) bool {
2766
2767
2768
2769 for {
2770 c := v.AuxInt
2771 v_0 := v.Args[0]
2772 if v_0.Op != OpAMD64ADDQ {
2773 break
2774 }
2775 y := v_0.Args[1]
2776 x := v_0.Args[0]
2777 v.reset(OpAMD64LEAQ1)
2778 v.AuxInt = c
2779 v.AddArg(x)
2780 v.AddArg(y)
2781 return true
2782 }
2783
2784
2785
2786 for {
2787 c := v.AuxInt
2788 v_0 := v.Args[0]
2789 if v_0.Op != OpAMD64SHLQconst {
2790 break
2791 }
2792 if v_0.AuxInt != 1 {
2793 break
2794 }
2795 x := v_0.Args[0]
2796 v.reset(OpAMD64LEAQ1)
2797 v.AuxInt = c
2798 v.AddArg(x)
2799 v.AddArg(x)
2800 return true
2801 }
2802
2803
2804
2805 for {
2806 c := v.AuxInt
2807 v_0 := v.Args[0]
2808 if v_0.Op != OpAMD64LEAQ {
2809 break
2810 }
2811 d := v_0.AuxInt
2812 s := v_0.Aux
2813 x := v_0.Args[0]
2814 if !(is32Bit(c + d)) {
2815 break
2816 }
2817 v.reset(OpAMD64LEAQ)
2818 v.AuxInt = c + d
2819 v.Aux = s
2820 v.AddArg(x)
2821 return true
2822 }
2823
2824
2825
2826 for {
2827 c := v.AuxInt
2828 v_0 := v.Args[0]
2829 if v_0.Op != OpAMD64LEAQ1 {
2830 break
2831 }
2832 d := v_0.AuxInt
2833 s := v_0.Aux
2834 y := v_0.Args[1]
2835 x := v_0.Args[0]
2836 if !(is32Bit(c + d)) {
2837 break
2838 }
2839 v.reset(OpAMD64LEAQ1)
2840 v.AuxInt = c + d
2841 v.Aux = s
2842 v.AddArg(x)
2843 v.AddArg(y)
2844 return true
2845 }
2846
2847
2848
2849 for {
2850 c := v.AuxInt
2851 v_0 := v.Args[0]
2852 if v_0.Op != OpAMD64LEAQ2 {
2853 break
2854 }
2855 d := v_0.AuxInt
2856 s := v_0.Aux
2857 y := v_0.Args[1]
2858 x := v_0.Args[0]
2859 if !(is32Bit(c + d)) {
2860 break
2861 }
2862 v.reset(OpAMD64LEAQ2)
2863 v.AuxInt = c + d
2864 v.Aux = s
2865 v.AddArg(x)
2866 v.AddArg(y)
2867 return true
2868 }
2869
2870
2871
2872 for {
2873 c := v.AuxInt
2874 v_0 := v.Args[0]
2875 if v_0.Op != OpAMD64LEAQ4 {
2876 break
2877 }
2878 d := v_0.AuxInt
2879 s := v_0.Aux
2880 y := v_0.Args[1]
2881 x := v_0.Args[0]
2882 if !(is32Bit(c + d)) {
2883 break
2884 }
2885 v.reset(OpAMD64LEAQ4)
2886 v.AuxInt = c + d
2887 v.Aux = s
2888 v.AddArg(x)
2889 v.AddArg(y)
2890 return true
2891 }
2892
2893
2894
2895 for {
2896 c := v.AuxInt
2897 v_0 := v.Args[0]
2898 if v_0.Op != OpAMD64LEAQ8 {
2899 break
2900 }
2901 d := v_0.AuxInt
2902 s := v_0.Aux
2903 y := v_0.Args[1]
2904 x := v_0.Args[0]
2905 if !(is32Bit(c + d)) {
2906 break
2907 }
2908 v.reset(OpAMD64LEAQ8)
2909 v.AuxInt = c + d
2910 v.Aux = s
2911 v.AddArg(x)
2912 v.AddArg(y)
2913 return true
2914 }
2915
2916
2917
2918 for {
2919 if v.AuxInt != 0 {
2920 break
2921 }
2922 x := v.Args[0]
2923 v.reset(OpCopy)
2924 v.Type = x.Type
2925 v.AddArg(x)
2926 return true
2927 }
2928
2929
2930
2931 for {
2932 c := v.AuxInt
2933 v_0 := v.Args[0]
2934 if v_0.Op != OpAMD64MOVQconst {
2935 break
2936 }
2937 d := v_0.AuxInt
2938 v.reset(OpAMD64MOVQconst)
2939 v.AuxInt = c + d
2940 return true
2941 }
2942
2943
2944
2945 for {
2946 c := v.AuxInt
2947 v_0 := v.Args[0]
2948 if v_0.Op != OpAMD64ADDQconst {
2949 break
2950 }
2951 d := v_0.AuxInt
2952 x := v_0.Args[0]
2953 if !(is32Bit(c + d)) {
2954 break
2955 }
2956 v.reset(OpAMD64ADDQconst)
2957 v.AuxInt = c + d
2958 v.AddArg(x)
2959 return true
2960 }
2961 return false
2962 }
2963 func rewriteValueAMD64_OpAMD64ADDQconst_10(v *Value) bool {
2964
2965
2966
2967 for {
2968 off := v.AuxInt
2969 x := v.Args[0]
2970 if x.Op != OpSP {
2971 break
2972 }
2973 v.reset(OpAMD64LEAQ)
2974 v.AuxInt = off
2975 v.AddArg(x)
2976 return true
2977 }
2978 return false
2979 }
2980 func rewriteValueAMD64_OpAMD64ADDQconstmodify_0(v *Value) bool {
2981
2982
2983
2984 for {
2985 valoff1 := v.AuxInt
2986 sym := v.Aux
2987 mem := v.Args[1]
2988 v_0 := v.Args[0]
2989 if v_0.Op != OpAMD64ADDQconst {
2990 break
2991 }
2992 off2 := v_0.AuxInt
2993 base := v_0.Args[0]
2994 if !(ValAndOff(valoff1).canAdd(off2)) {
2995 break
2996 }
2997 v.reset(OpAMD64ADDQconstmodify)
2998 v.AuxInt = ValAndOff(valoff1).add(off2)
2999 v.Aux = sym
3000 v.AddArg(base)
3001 v.AddArg(mem)
3002 return true
3003 }
3004
3005
3006
3007 for {
3008 valoff1 := v.AuxInt
3009 sym1 := v.Aux
3010 mem := v.Args[1]
3011 v_0 := v.Args[0]
3012 if v_0.Op != OpAMD64LEAQ {
3013 break
3014 }
3015 off2 := v_0.AuxInt
3016 sym2 := v_0.Aux
3017 base := v_0.Args[0]
3018 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
3019 break
3020 }
3021 v.reset(OpAMD64ADDQconstmodify)
3022 v.AuxInt = ValAndOff(valoff1).add(off2)
3023 v.Aux = mergeSym(sym1, sym2)
3024 v.AddArg(base)
3025 v.AddArg(mem)
3026 return true
3027 }
3028 return false
3029 }
3030 func rewriteValueAMD64_OpAMD64ADDQload_0(v *Value) bool {
3031 b := v.Block
3032 typ := &b.Func.Config.Types
3033
3034
3035
3036 for {
3037 off1 := v.AuxInt
3038 sym := v.Aux
3039 mem := v.Args[2]
3040 val := v.Args[0]
3041 v_1 := v.Args[1]
3042 if v_1.Op != OpAMD64ADDQconst {
3043 break
3044 }
3045 off2 := v_1.AuxInt
3046 base := v_1.Args[0]
3047 if !(is32Bit(off1 + off2)) {
3048 break
3049 }
3050 v.reset(OpAMD64ADDQload)
3051 v.AuxInt = off1 + off2
3052 v.Aux = sym
3053 v.AddArg(val)
3054 v.AddArg(base)
3055 v.AddArg(mem)
3056 return true
3057 }
3058
3059
3060
3061 for {
3062 off1 := v.AuxInt
3063 sym1 := v.Aux
3064 mem := v.Args[2]
3065 val := v.Args[0]
3066 v_1 := v.Args[1]
3067 if v_1.Op != OpAMD64LEAQ {
3068 break
3069 }
3070 off2 := v_1.AuxInt
3071 sym2 := v_1.Aux
3072 base := v_1.Args[0]
3073 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
3074 break
3075 }
3076 v.reset(OpAMD64ADDQload)
3077 v.AuxInt = off1 + off2
3078 v.Aux = mergeSym(sym1, sym2)
3079 v.AddArg(val)
3080 v.AddArg(base)
3081 v.AddArg(mem)
3082 return true
3083 }
3084
3085
3086
3087 for {
3088 off := v.AuxInt
3089 sym := v.Aux
3090 _ = v.Args[2]
3091 x := v.Args[0]
3092 ptr := v.Args[1]
3093 v_2 := v.Args[2]
3094 if v_2.Op != OpAMD64MOVSDstore {
3095 break
3096 }
3097 if v_2.AuxInt != off {
3098 break
3099 }
3100 if v_2.Aux != sym {
3101 break
3102 }
3103 _ = v_2.Args[2]
3104 if ptr != v_2.Args[0] {
3105 break
3106 }
3107 y := v_2.Args[1]
3108 v.reset(OpAMD64ADDQ)
3109 v.AddArg(x)
3110 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
3111 v0.AddArg(y)
3112 v.AddArg(v0)
3113 return true
3114 }
3115 return false
3116 }
3117 func rewriteValueAMD64_OpAMD64ADDQmodify_0(v *Value) bool {
3118
3119
3120
3121 for {
3122 off1 := v.AuxInt
3123 sym := v.Aux
3124 mem := v.Args[2]
3125 v_0 := v.Args[0]
3126 if v_0.Op != OpAMD64ADDQconst {
3127 break
3128 }
3129 off2 := v_0.AuxInt
3130 base := v_0.Args[0]
3131 val := v.Args[1]
3132 if !(is32Bit(off1 + off2)) {
3133 break
3134 }
3135 v.reset(OpAMD64ADDQmodify)
3136 v.AuxInt = off1 + off2
3137 v.Aux = sym
3138 v.AddArg(base)
3139 v.AddArg(val)
3140 v.AddArg(mem)
3141 return true
3142 }
3143
3144
3145
3146 for {
3147 off1 := v.AuxInt
3148 sym1 := v.Aux
3149 mem := v.Args[2]
3150 v_0 := v.Args[0]
3151 if v_0.Op != OpAMD64LEAQ {
3152 break
3153 }
3154 off2 := v_0.AuxInt
3155 sym2 := v_0.Aux
3156 base := v_0.Args[0]
3157 val := v.Args[1]
3158 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
3159 break
3160 }
3161 v.reset(OpAMD64ADDQmodify)
3162 v.AuxInt = off1 + off2
3163 v.Aux = mergeSym(sym1, sym2)
3164 v.AddArg(base)
3165 v.AddArg(val)
3166 v.AddArg(mem)
3167 return true
3168 }
3169 return false
3170 }
3171 func rewriteValueAMD64_OpAMD64ADDSD_0(v *Value) bool {
3172
3173
3174
3175 for {
3176 _ = v.Args[1]
3177 x := v.Args[0]
3178 l := v.Args[1]
3179 if l.Op != OpAMD64MOVSDload {
3180 break
3181 }
3182 off := l.AuxInt
3183 sym := l.Aux
3184 mem := l.Args[1]
3185 ptr := l.Args[0]
3186 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
3187 break
3188 }
3189 v.reset(OpAMD64ADDSDload)
3190 v.AuxInt = off
3191 v.Aux = sym
3192 v.AddArg(x)
3193 v.AddArg(ptr)
3194 v.AddArg(mem)
3195 return true
3196 }
3197
3198
3199
3200 for {
3201 x := v.Args[1]
3202 l := v.Args[0]
3203 if l.Op != OpAMD64MOVSDload {
3204 break
3205 }
3206 off := l.AuxInt
3207 sym := l.Aux
3208 mem := l.Args[1]
3209 ptr := l.Args[0]
3210 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
3211 break
3212 }
3213 v.reset(OpAMD64ADDSDload)
3214 v.AuxInt = off
3215 v.Aux = sym
3216 v.AddArg(x)
3217 v.AddArg(ptr)
3218 v.AddArg(mem)
3219 return true
3220 }
3221 return false
3222 }
3223 func rewriteValueAMD64_OpAMD64ADDSDload_0(v *Value) bool {
3224 b := v.Block
3225 typ := &b.Func.Config.Types
3226
3227
3228
3229 for {
3230 off1 := v.AuxInt
3231 sym := v.Aux
3232 mem := v.Args[2]
3233 val := v.Args[0]
3234 v_1 := v.Args[1]
3235 if v_1.Op != OpAMD64ADDQconst {
3236 break
3237 }
3238 off2 := v_1.AuxInt
3239 base := v_1.Args[0]
3240 if !(is32Bit(off1 + off2)) {
3241 break
3242 }
3243 v.reset(OpAMD64ADDSDload)
3244 v.AuxInt = off1 + off2
3245 v.Aux = sym
3246 v.AddArg(val)
3247 v.AddArg(base)
3248 v.AddArg(mem)
3249 return true
3250 }
3251
3252
3253
3254 for {
3255 off1 := v.AuxInt
3256 sym1 := v.Aux
3257 mem := v.Args[2]
3258 val := v.Args[0]
3259 v_1 := v.Args[1]
3260 if v_1.Op != OpAMD64LEAQ {
3261 break
3262 }
3263 off2 := v_1.AuxInt
3264 sym2 := v_1.Aux
3265 base := v_1.Args[0]
3266 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
3267 break
3268 }
3269 v.reset(OpAMD64ADDSDload)
3270 v.AuxInt = off1 + off2
3271 v.Aux = mergeSym(sym1, sym2)
3272 v.AddArg(val)
3273 v.AddArg(base)
3274 v.AddArg(mem)
3275 return true
3276 }
3277
3278
3279
3280 for {
3281 off := v.AuxInt
3282 sym := v.Aux
3283 _ = v.Args[2]
3284 x := v.Args[0]
3285 ptr := v.Args[1]
3286 v_2 := v.Args[2]
3287 if v_2.Op != OpAMD64MOVQstore {
3288 break
3289 }
3290 if v_2.AuxInt != off {
3291 break
3292 }
3293 if v_2.Aux != sym {
3294 break
3295 }
3296 _ = v_2.Args[2]
3297 if ptr != v_2.Args[0] {
3298 break
3299 }
3300 y := v_2.Args[1]
3301 v.reset(OpAMD64ADDSD)
3302 v.AddArg(x)
3303 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
3304 v0.AddArg(y)
3305 v.AddArg(v0)
3306 return true
3307 }
3308 return false
3309 }
3310 func rewriteValueAMD64_OpAMD64ADDSS_0(v *Value) bool {
3311
3312
3313
3314 for {
3315 _ = v.Args[1]
3316 x := v.Args[0]
3317 l := v.Args[1]
3318 if l.Op != OpAMD64MOVSSload {
3319 break
3320 }
3321 off := l.AuxInt
3322 sym := l.Aux
3323 mem := l.Args[1]
3324 ptr := l.Args[0]
3325 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
3326 break
3327 }
3328 v.reset(OpAMD64ADDSSload)
3329 v.AuxInt = off
3330 v.Aux = sym
3331 v.AddArg(x)
3332 v.AddArg(ptr)
3333 v.AddArg(mem)
3334 return true
3335 }
3336
3337
3338
3339 for {
3340 x := v.Args[1]
3341 l := v.Args[0]
3342 if l.Op != OpAMD64MOVSSload {
3343 break
3344 }
3345 off := l.AuxInt
3346 sym := l.Aux
3347 mem := l.Args[1]
3348 ptr := l.Args[0]
3349 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
3350 break
3351 }
3352 v.reset(OpAMD64ADDSSload)
3353 v.AuxInt = off
3354 v.Aux = sym
3355 v.AddArg(x)
3356 v.AddArg(ptr)
3357 v.AddArg(mem)
3358 return true
3359 }
3360 return false
3361 }
3362 func rewriteValueAMD64_OpAMD64ADDSSload_0(v *Value) bool {
3363 b := v.Block
3364 typ := &b.Func.Config.Types
3365
3366
3367
3368 for {
3369 off1 := v.AuxInt
3370 sym := v.Aux
3371 mem := v.Args[2]
3372 val := v.Args[0]
3373 v_1 := v.Args[1]
3374 if v_1.Op != OpAMD64ADDQconst {
3375 break
3376 }
3377 off2 := v_1.AuxInt
3378 base := v_1.Args[0]
3379 if !(is32Bit(off1 + off2)) {
3380 break
3381 }
3382 v.reset(OpAMD64ADDSSload)
3383 v.AuxInt = off1 + off2
3384 v.Aux = sym
3385 v.AddArg(val)
3386 v.AddArg(base)
3387 v.AddArg(mem)
3388 return true
3389 }
3390
3391
3392
3393 for {
3394 off1 := v.AuxInt
3395 sym1 := v.Aux
3396 mem := v.Args[2]
3397 val := v.Args[0]
3398 v_1 := v.Args[1]
3399 if v_1.Op != OpAMD64LEAQ {
3400 break
3401 }
3402 off2 := v_1.AuxInt
3403 sym2 := v_1.Aux
3404 base := v_1.Args[0]
3405 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
3406 break
3407 }
3408 v.reset(OpAMD64ADDSSload)
3409 v.AuxInt = off1 + off2
3410 v.Aux = mergeSym(sym1, sym2)
3411 v.AddArg(val)
3412 v.AddArg(base)
3413 v.AddArg(mem)
3414 return true
3415 }
3416
3417
3418
3419 for {
3420 off := v.AuxInt
3421 sym := v.Aux
3422 _ = v.Args[2]
3423 x := v.Args[0]
3424 ptr := v.Args[1]
3425 v_2 := v.Args[2]
3426 if v_2.Op != OpAMD64MOVLstore {
3427 break
3428 }
3429 if v_2.AuxInt != off {
3430 break
3431 }
3432 if v_2.Aux != sym {
3433 break
3434 }
3435 _ = v_2.Args[2]
3436 if ptr != v_2.Args[0] {
3437 break
3438 }
3439 y := v_2.Args[1]
3440 v.reset(OpAMD64ADDSS)
3441 v.AddArg(x)
3442 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
3443 v0.AddArg(y)
3444 v.AddArg(v0)
3445 return true
3446 }
3447 return false
3448 }
3449 func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool {
3450 b := v.Block
3451 config := b.Func.Config
3452
3453
3454
3455 for {
3456 x := v.Args[1]
3457 v_0 := v.Args[0]
3458 if v_0.Op != OpAMD64NOTL {
3459 break
3460 }
3461 v_0_0 := v_0.Args[0]
3462 if v_0_0.Op != OpAMD64SHLL {
3463 break
3464 }
3465 y := v_0_0.Args[1]
3466 v_0_0_0 := v_0_0.Args[0]
3467 if v_0_0_0.Op != OpAMD64MOVLconst {
3468 break
3469 }
3470 if v_0_0_0.AuxInt != 1 {
3471 break
3472 }
3473 if !(!config.nacl) {
3474 break
3475 }
3476 v.reset(OpAMD64BTRL)
3477 v.AddArg(x)
3478 v.AddArg(y)
3479 return true
3480 }
3481
3482
3483
3484 for {
3485 _ = v.Args[1]
3486 x := v.Args[0]
3487 v_1 := v.Args[1]
3488 if v_1.Op != OpAMD64NOTL {
3489 break
3490 }
3491 v_1_0 := v_1.Args[0]
3492 if v_1_0.Op != OpAMD64SHLL {
3493 break
3494 }
3495 y := v_1_0.Args[1]
3496 v_1_0_0 := v_1_0.Args[0]
3497 if v_1_0_0.Op != OpAMD64MOVLconst {
3498 break
3499 }
3500 if v_1_0_0.AuxInt != 1 {
3501 break
3502 }
3503 if !(!config.nacl) {
3504 break
3505 }
3506 v.reset(OpAMD64BTRL)
3507 v.AddArg(x)
3508 v.AddArg(y)
3509 return true
3510 }
3511
3512
3513
3514 for {
3515 x := v.Args[1]
3516 v_0 := v.Args[0]
3517 if v_0.Op != OpAMD64MOVLconst {
3518 break
3519 }
3520 c := v_0.AuxInt
3521 if !(isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl) {
3522 break
3523 }
3524 v.reset(OpAMD64BTRLconst)
3525 v.AuxInt = log2uint32(^c)
3526 v.AddArg(x)
3527 return true
3528 }
3529
3530
3531
3532 for {
3533 _ = v.Args[1]
3534 x := v.Args[0]
3535 v_1 := v.Args[1]
3536 if v_1.Op != OpAMD64MOVLconst {
3537 break
3538 }
3539 c := v_1.AuxInt
3540 if !(isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl) {
3541 break
3542 }
3543 v.reset(OpAMD64BTRLconst)
3544 v.AuxInt = log2uint32(^c)
3545 v.AddArg(x)
3546 return true
3547 }
3548
3549
3550
3551 for {
3552 _ = v.Args[1]
3553 x := v.Args[0]
3554 v_1 := v.Args[1]
3555 if v_1.Op != OpAMD64MOVLconst {
3556 break
3557 }
3558 c := v_1.AuxInt
3559 v.reset(OpAMD64ANDLconst)
3560 v.AuxInt = c
3561 v.AddArg(x)
3562 return true
3563 }
3564
3565
3566
3567 for {
3568 x := v.Args[1]
3569 v_0 := v.Args[0]
3570 if v_0.Op != OpAMD64MOVLconst {
3571 break
3572 }
3573 c := v_0.AuxInt
3574 v.reset(OpAMD64ANDLconst)
3575 v.AuxInt = c
3576 v.AddArg(x)
3577 return true
3578 }
3579
3580
3581
3582 for {
3583 x := v.Args[1]
3584 if x != v.Args[0] {
3585 break
3586 }
3587 v.reset(OpCopy)
3588 v.Type = x.Type
3589 v.AddArg(x)
3590 return true
3591 }
3592
3593
3594
3595 for {
3596 _ = v.Args[1]
3597 x := v.Args[0]
3598 l := v.Args[1]
3599 if l.Op != OpAMD64MOVLload {
3600 break
3601 }
3602 off := l.AuxInt
3603 sym := l.Aux
3604 mem := l.Args[1]
3605 ptr := l.Args[0]
3606 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
3607 break
3608 }
3609 v.reset(OpAMD64ANDLload)
3610 v.AuxInt = off
3611 v.Aux = sym
3612 v.AddArg(x)
3613 v.AddArg(ptr)
3614 v.AddArg(mem)
3615 return true
3616 }
3617
3618
3619
3620 for {
3621 x := v.Args[1]
3622 l := v.Args[0]
3623 if l.Op != OpAMD64MOVLload {
3624 break
3625 }
3626 off := l.AuxInt
3627 sym := l.Aux
3628 mem := l.Args[1]
3629 ptr := l.Args[0]
3630 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
3631 break
3632 }
3633 v.reset(OpAMD64ANDLload)
3634 v.AuxInt = off
3635 v.Aux = sym
3636 v.AddArg(x)
3637 v.AddArg(ptr)
3638 v.AddArg(mem)
3639 return true
3640 }
3641 return false
3642 }
3643 func rewriteValueAMD64_OpAMD64ANDLconst_0(v *Value) bool {
3644 b := v.Block
3645 config := b.Func.Config
3646
3647
3648
3649 for {
3650 c := v.AuxInt
3651 x := v.Args[0]
3652 if !(isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl) {
3653 break
3654 }
3655 v.reset(OpAMD64BTRLconst)
3656 v.AuxInt = log2uint32(^c)
3657 v.AddArg(x)
3658 return true
3659 }
3660
3661
3662
3663 for {
3664 c := v.AuxInt
3665 v_0 := v.Args[0]
3666 if v_0.Op != OpAMD64ANDLconst {
3667 break
3668 }
3669 d := v_0.AuxInt
3670 x := v_0.Args[0]
3671 v.reset(OpAMD64ANDLconst)
3672 v.AuxInt = c & d
3673 v.AddArg(x)
3674 return true
3675 }
3676
3677
3678
3679 for {
3680 c := v.AuxInt
3681 v_0 := v.Args[0]
3682 if v_0.Op != OpAMD64BTRLconst {
3683 break
3684 }
3685 d := v_0.AuxInt
3686 x := v_0.Args[0]
3687 v.reset(OpAMD64ANDLconst)
3688 v.AuxInt = c &^ (1 << uint32(d))
3689 v.AddArg(x)
3690 return true
3691 }
3692
3693
3694
3695 for {
3696 if v.AuxInt != 0xFF {
3697 break
3698 }
3699 x := v.Args[0]
3700 v.reset(OpAMD64MOVBQZX)
3701 v.AddArg(x)
3702 return true
3703 }
3704
3705
3706
3707 for {
3708 if v.AuxInt != 0xFFFF {
3709 break
3710 }
3711 x := v.Args[0]
3712 v.reset(OpAMD64MOVWQZX)
3713 v.AddArg(x)
3714 return true
3715 }
3716
3717
3718
3719 for {
3720 c := v.AuxInt
3721 if !(int32(c) == 0) {
3722 break
3723 }
3724 v.reset(OpAMD64MOVLconst)
3725 v.AuxInt = 0
3726 return true
3727 }
3728
3729
3730
3731 for {
3732 c := v.AuxInt
3733 x := v.Args[0]
3734 if !(int32(c) == -1) {
3735 break
3736 }
3737 v.reset(OpCopy)
3738 v.Type = x.Type
3739 v.AddArg(x)
3740 return true
3741 }
3742
3743
3744
3745 for {
3746 c := v.AuxInt
3747 v_0 := v.Args[0]
3748 if v_0.Op != OpAMD64MOVLconst {
3749 break
3750 }
3751 d := v_0.AuxInt
3752 v.reset(OpAMD64MOVLconst)
3753 v.AuxInt = c & d
3754 return true
3755 }
3756 return false
3757 }
3758 func rewriteValueAMD64_OpAMD64ANDLconstmodify_0(v *Value) bool {
3759
3760
3761
3762 for {
3763 valoff1 := v.AuxInt
3764 sym := v.Aux
3765 mem := v.Args[1]
3766 v_0 := v.Args[0]
3767 if v_0.Op != OpAMD64ADDQconst {
3768 break
3769 }
3770 off2 := v_0.AuxInt
3771 base := v_0.Args[0]
3772 if !(ValAndOff(valoff1).canAdd(off2)) {
3773 break
3774 }
3775 v.reset(OpAMD64ANDLconstmodify)
3776 v.AuxInt = ValAndOff(valoff1).add(off2)
3777 v.Aux = sym
3778 v.AddArg(base)
3779 v.AddArg(mem)
3780 return true
3781 }
3782
3783
3784
3785 for {
3786 valoff1 := v.AuxInt
3787 sym1 := v.Aux
3788 mem := v.Args[1]
3789 v_0 := v.Args[0]
3790 if v_0.Op != OpAMD64LEAQ {
3791 break
3792 }
3793 off2 := v_0.AuxInt
3794 sym2 := v_0.Aux
3795 base := v_0.Args[0]
3796 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
3797 break
3798 }
3799 v.reset(OpAMD64ANDLconstmodify)
3800 v.AuxInt = ValAndOff(valoff1).add(off2)
3801 v.Aux = mergeSym(sym1, sym2)
3802 v.AddArg(base)
3803 v.AddArg(mem)
3804 return true
3805 }
3806 return false
3807 }
3808 func rewriteValueAMD64_OpAMD64ANDLload_0(v *Value) bool {
3809 b := v.Block
3810 typ := &b.Func.Config.Types
3811
3812
3813
3814 for {
3815 off1 := v.AuxInt
3816 sym := v.Aux
3817 mem := v.Args[2]
3818 val := v.Args[0]
3819 v_1 := v.Args[1]
3820 if v_1.Op != OpAMD64ADDQconst {
3821 break
3822 }
3823 off2 := v_1.AuxInt
3824 base := v_1.Args[0]
3825 if !(is32Bit(off1 + off2)) {
3826 break
3827 }
3828 v.reset(OpAMD64ANDLload)
3829 v.AuxInt = off1 + off2
3830 v.Aux = sym
3831 v.AddArg(val)
3832 v.AddArg(base)
3833 v.AddArg(mem)
3834 return true
3835 }
3836
3837
3838
3839 for {
3840 off1 := v.AuxInt
3841 sym1 := v.Aux
3842 mem := v.Args[2]
3843 val := v.Args[0]
3844 v_1 := v.Args[1]
3845 if v_1.Op != OpAMD64LEAQ {
3846 break
3847 }
3848 off2 := v_1.AuxInt
3849 sym2 := v_1.Aux
3850 base := v_1.Args[0]
3851 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
3852 break
3853 }
3854 v.reset(OpAMD64ANDLload)
3855 v.AuxInt = off1 + off2
3856 v.Aux = mergeSym(sym1, sym2)
3857 v.AddArg(val)
3858 v.AddArg(base)
3859 v.AddArg(mem)
3860 return true
3861 }
3862
3863
3864
3865 for {
3866 off := v.AuxInt
3867 sym := v.Aux
3868 _ = v.Args[2]
3869 x := v.Args[0]
3870 ptr := v.Args[1]
3871 v_2 := v.Args[2]
3872 if v_2.Op != OpAMD64MOVSSstore {
3873 break
3874 }
3875 if v_2.AuxInt != off {
3876 break
3877 }
3878 if v_2.Aux != sym {
3879 break
3880 }
3881 _ = v_2.Args[2]
3882 if ptr != v_2.Args[0] {
3883 break
3884 }
3885 y := v_2.Args[1]
3886 v.reset(OpAMD64ANDL)
3887 v.AddArg(x)
3888 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
3889 v0.AddArg(y)
3890 v.AddArg(v0)
3891 return true
3892 }
3893 return false
3894 }
3895 func rewriteValueAMD64_OpAMD64ANDLmodify_0(v *Value) bool {
3896
3897
3898
3899 for {
3900 off1 := v.AuxInt
3901 sym := v.Aux
3902 mem := v.Args[2]
3903 v_0 := v.Args[0]
3904 if v_0.Op != OpAMD64ADDQconst {
3905 break
3906 }
3907 off2 := v_0.AuxInt
3908 base := v_0.Args[0]
3909 val := v.Args[1]
3910 if !(is32Bit(off1 + off2)) {
3911 break
3912 }
3913 v.reset(OpAMD64ANDLmodify)
3914 v.AuxInt = off1 + off2
3915 v.Aux = sym
3916 v.AddArg(base)
3917 v.AddArg(val)
3918 v.AddArg(mem)
3919 return true
3920 }
3921
3922
3923
3924 for {
3925 off1 := v.AuxInt
3926 sym1 := v.Aux
3927 mem := v.Args[2]
3928 v_0 := v.Args[0]
3929 if v_0.Op != OpAMD64LEAQ {
3930 break
3931 }
3932 off2 := v_0.AuxInt
3933 sym2 := v_0.Aux
3934 base := v_0.Args[0]
3935 val := v.Args[1]
3936 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
3937 break
3938 }
3939 v.reset(OpAMD64ANDLmodify)
3940 v.AuxInt = off1 + off2
3941 v.Aux = mergeSym(sym1, sym2)
3942 v.AddArg(base)
3943 v.AddArg(val)
3944 v.AddArg(mem)
3945 return true
3946 }
3947 return false
3948 }
3949 func rewriteValueAMD64_OpAMD64ANDQ_0(v *Value) bool {
3950 b := v.Block
3951 config := b.Func.Config
3952
3953
3954
3955 for {
3956 x := v.Args[1]
3957 v_0 := v.Args[0]
3958 if v_0.Op != OpAMD64NOTQ {
3959 break
3960 }
3961 v_0_0 := v_0.Args[0]
3962 if v_0_0.Op != OpAMD64SHLQ {
3963 break
3964 }
3965 y := v_0_0.Args[1]
3966 v_0_0_0 := v_0_0.Args[0]
3967 if v_0_0_0.Op != OpAMD64MOVQconst {
3968 break
3969 }
3970 if v_0_0_0.AuxInt != 1 {
3971 break
3972 }
3973 if !(!config.nacl) {
3974 break
3975 }
3976 v.reset(OpAMD64BTRQ)
3977 v.AddArg(x)
3978 v.AddArg(y)
3979 return true
3980 }
3981
3982
3983
3984 for {
3985 _ = v.Args[1]
3986 x := v.Args[0]
3987 v_1 := v.Args[1]
3988 if v_1.Op != OpAMD64NOTQ {
3989 break
3990 }
3991 v_1_0 := v_1.Args[0]
3992 if v_1_0.Op != OpAMD64SHLQ {
3993 break
3994 }
3995 y := v_1_0.Args[1]
3996 v_1_0_0 := v_1_0.Args[0]
3997 if v_1_0_0.Op != OpAMD64MOVQconst {
3998 break
3999 }
4000 if v_1_0_0.AuxInt != 1 {
4001 break
4002 }
4003 if !(!config.nacl) {
4004 break
4005 }
4006 v.reset(OpAMD64BTRQ)
4007 v.AddArg(x)
4008 v.AddArg(y)
4009 return true
4010 }
4011
4012
4013
4014 for {
4015 x := v.Args[1]
4016 v_0 := v.Args[0]
4017 if v_0.Op != OpAMD64MOVQconst {
4018 break
4019 }
4020 c := v_0.AuxInt
4021 if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl) {
4022 break
4023 }
4024 v.reset(OpAMD64BTRQconst)
4025 v.AuxInt = log2(^c)
4026 v.AddArg(x)
4027 return true
4028 }
4029
4030
4031
4032 for {
4033 _ = v.Args[1]
4034 x := v.Args[0]
4035 v_1 := v.Args[1]
4036 if v_1.Op != OpAMD64MOVQconst {
4037 break
4038 }
4039 c := v_1.AuxInt
4040 if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl) {
4041 break
4042 }
4043 v.reset(OpAMD64BTRQconst)
4044 v.AuxInt = log2(^c)
4045 v.AddArg(x)
4046 return true
4047 }
4048
4049
4050
4051 for {
4052 _ = v.Args[1]
4053 x := v.Args[0]
4054 v_1 := v.Args[1]
4055 if v_1.Op != OpAMD64MOVQconst {
4056 break
4057 }
4058 c := v_1.AuxInt
4059 if !(is32Bit(c)) {
4060 break
4061 }
4062 v.reset(OpAMD64ANDQconst)
4063 v.AuxInt = c
4064 v.AddArg(x)
4065 return true
4066 }
4067
4068
4069
4070 for {
4071 x := v.Args[1]
4072 v_0 := v.Args[0]
4073 if v_0.Op != OpAMD64MOVQconst {
4074 break
4075 }
4076 c := v_0.AuxInt
4077 if !(is32Bit(c)) {
4078 break
4079 }
4080 v.reset(OpAMD64ANDQconst)
4081 v.AuxInt = c
4082 v.AddArg(x)
4083 return true
4084 }
4085
4086
4087
4088 for {
4089 x := v.Args[1]
4090 if x != v.Args[0] {
4091 break
4092 }
4093 v.reset(OpCopy)
4094 v.Type = x.Type
4095 v.AddArg(x)
4096 return true
4097 }
4098
4099
4100
4101 for {
4102 _ = v.Args[1]
4103 x := v.Args[0]
4104 l := v.Args[1]
4105 if l.Op != OpAMD64MOVQload {
4106 break
4107 }
4108 off := l.AuxInt
4109 sym := l.Aux
4110 mem := l.Args[1]
4111 ptr := l.Args[0]
4112 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
4113 break
4114 }
4115 v.reset(OpAMD64ANDQload)
4116 v.AuxInt = off
4117 v.Aux = sym
4118 v.AddArg(x)
4119 v.AddArg(ptr)
4120 v.AddArg(mem)
4121 return true
4122 }
4123
4124
4125
4126 for {
4127 x := v.Args[1]
4128 l := v.Args[0]
4129 if l.Op != OpAMD64MOVQload {
4130 break
4131 }
4132 off := l.AuxInt
4133 sym := l.Aux
4134 mem := l.Args[1]
4135 ptr := l.Args[0]
4136 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
4137 break
4138 }
4139 v.reset(OpAMD64ANDQload)
4140 v.AuxInt = off
4141 v.Aux = sym
4142 v.AddArg(x)
4143 v.AddArg(ptr)
4144 v.AddArg(mem)
4145 return true
4146 }
4147 return false
4148 }
4149 func rewriteValueAMD64_OpAMD64ANDQconst_0(v *Value) bool {
4150 b := v.Block
4151 config := b.Func.Config
4152
4153
4154
4155 for {
4156 c := v.AuxInt
4157 x := v.Args[0]
4158 if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl) {
4159 break
4160 }
4161 v.reset(OpAMD64BTRQconst)
4162 v.AuxInt = log2(^c)
4163 v.AddArg(x)
4164 return true
4165 }
4166
4167
4168
4169 for {
4170 c := v.AuxInt
4171 v_0 := v.Args[0]
4172 if v_0.Op != OpAMD64ANDQconst {
4173 break
4174 }
4175 d := v_0.AuxInt
4176 x := v_0.Args[0]
4177 v.reset(OpAMD64ANDQconst)
4178 v.AuxInt = c & d
4179 v.AddArg(x)
4180 return true
4181 }
4182
4183
4184
4185 for {
4186 c := v.AuxInt
4187 v_0 := v.Args[0]
4188 if v_0.Op != OpAMD64BTRQconst {
4189 break
4190 }
4191 d := v_0.AuxInt
4192 x := v_0.Args[0]
4193 v.reset(OpAMD64ANDQconst)
4194 v.AuxInt = c &^ (1 << uint32(d))
4195 v.AddArg(x)
4196 return true
4197 }
4198
4199
4200
4201 for {
4202 if v.AuxInt != 0xFF {
4203 break
4204 }
4205 x := v.Args[0]
4206 v.reset(OpAMD64MOVBQZX)
4207 v.AddArg(x)
4208 return true
4209 }
4210
4211
4212
4213 for {
4214 if v.AuxInt != 0xFFFF {
4215 break
4216 }
4217 x := v.Args[0]
4218 v.reset(OpAMD64MOVWQZX)
4219 v.AddArg(x)
4220 return true
4221 }
4222
4223
4224
4225 for {
4226 if v.AuxInt != 0xFFFFFFFF {
4227 break
4228 }
4229 x := v.Args[0]
4230 v.reset(OpAMD64MOVLQZX)
4231 v.AddArg(x)
4232 return true
4233 }
4234
4235
4236
4237 for {
4238 if v.AuxInt != 0 {
4239 break
4240 }
4241 v.reset(OpAMD64MOVQconst)
4242 v.AuxInt = 0
4243 return true
4244 }
4245
4246
4247
4248 for {
4249 if v.AuxInt != -1 {
4250 break
4251 }
4252 x := v.Args[0]
4253 v.reset(OpCopy)
4254 v.Type = x.Type
4255 v.AddArg(x)
4256 return true
4257 }
4258
4259
4260
4261 for {
4262 c := v.AuxInt
4263 v_0 := v.Args[0]
4264 if v_0.Op != OpAMD64MOVQconst {
4265 break
4266 }
4267 d := v_0.AuxInt
4268 v.reset(OpAMD64MOVQconst)
4269 v.AuxInt = c & d
4270 return true
4271 }
4272 return false
4273 }
4274 func rewriteValueAMD64_OpAMD64ANDQconstmodify_0(v *Value) bool {
4275
4276
4277
4278 for {
4279 valoff1 := v.AuxInt
4280 sym := v.Aux
4281 mem := v.Args[1]
4282 v_0 := v.Args[0]
4283 if v_0.Op != OpAMD64ADDQconst {
4284 break
4285 }
4286 off2 := v_0.AuxInt
4287 base := v_0.Args[0]
4288 if !(ValAndOff(valoff1).canAdd(off2)) {
4289 break
4290 }
4291 v.reset(OpAMD64ANDQconstmodify)
4292 v.AuxInt = ValAndOff(valoff1).add(off2)
4293 v.Aux = sym
4294 v.AddArg(base)
4295 v.AddArg(mem)
4296 return true
4297 }
4298
4299
4300
4301 for {
4302 valoff1 := v.AuxInt
4303 sym1 := v.Aux
4304 mem := v.Args[1]
4305 v_0 := v.Args[0]
4306 if v_0.Op != OpAMD64LEAQ {
4307 break
4308 }
4309 off2 := v_0.AuxInt
4310 sym2 := v_0.Aux
4311 base := v_0.Args[0]
4312 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
4313 break
4314 }
4315 v.reset(OpAMD64ANDQconstmodify)
4316 v.AuxInt = ValAndOff(valoff1).add(off2)
4317 v.Aux = mergeSym(sym1, sym2)
4318 v.AddArg(base)
4319 v.AddArg(mem)
4320 return true
4321 }
4322 return false
4323 }
4324 func rewriteValueAMD64_OpAMD64ANDQload_0(v *Value) bool {
4325 b := v.Block
4326 typ := &b.Func.Config.Types
4327
4328
4329
4330 for {
4331 off1 := v.AuxInt
4332 sym := v.Aux
4333 mem := v.Args[2]
4334 val := v.Args[0]
4335 v_1 := v.Args[1]
4336 if v_1.Op != OpAMD64ADDQconst {
4337 break
4338 }
4339 off2 := v_1.AuxInt
4340 base := v_1.Args[0]
4341 if !(is32Bit(off1 + off2)) {
4342 break
4343 }
4344 v.reset(OpAMD64ANDQload)
4345 v.AuxInt = off1 + off2
4346 v.Aux = sym
4347 v.AddArg(val)
4348 v.AddArg(base)
4349 v.AddArg(mem)
4350 return true
4351 }
4352
4353
4354
4355 for {
4356 off1 := v.AuxInt
4357 sym1 := v.Aux
4358 mem := v.Args[2]
4359 val := v.Args[0]
4360 v_1 := v.Args[1]
4361 if v_1.Op != OpAMD64LEAQ {
4362 break
4363 }
4364 off2 := v_1.AuxInt
4365 sym2 := v_1.Aux
4366 base := v_1.Args[0]
4367 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
4368 break
4369 }
4370 v.reset(OpAMD64ANDQload)
4371 v.AuxInt = off1 + off2
4372 v.Aux = mergeSym(sym1, sym2)
4373 v.AddArg(val)
4374 v.AddArg(base)
4375 v.AddArg(mem)
4376 return true
4377 }
4378
4379
4380
4381 for {
4382 off := v.AuxInt
4383 sym := v.Aux
4384 _ = v.Args[2]
4385 x := v.Args[0]
4386 ptr := v.Args[1]
4387 v_2 := v.Args[2]
4388 if v_2.Op != OpAMD64MOVSDstore {
4389 break
4390 }
4391 if v_2.AuxInt != off {
4392 break
4393 }
4394 if v_2.Aux != sym {
4395 break
4396 }
4397 _ = v_2.Args[2]
4398 if ptr != v_2.Args[0] {
4399 break
4400 }
4401 y := v_2.Args[1]
4402 v.reset(OpAMD64ANDQ)
4403 v.AddArg(x)
4404 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
4405 v0.AddArg(y)
4406 v.AddArg(v0)
4407 return true
4408 }
4409 return false
4410 }
4411 func rewriteValueAMD64_OpAMD64ANDQmodify_0(v *Value) bool {
4412
4413
4414
4415 for {
4416 off1 := v.AuxInt
4417 sym := v.Aux
4418 mem := v.Args[2]
4419 v_0 := v.Args[0]
4420 if v_0.Op != OpAMD64ADDQconst {
4421 break
4422 }
4423 off2 := v_0.AuxInt
4424 base := v_0.Args[0]
4425 val := v.Args[1]
4426 if !(is32Bit(off1 + off2)) {
4427 break
4428 }
4429 v.reset(OpAMD64ANDQmodify)
4430 v.AuxInt = off1 + off2
4431 v.Aux = sym
4432 v.AddArg(base)
4433 v.AddArg(val)
4434 v.AddArg(mem)
4435 return true
4436 }
4437
4438
4439
4440 for {
4441 off1 := v.AuxInt
4442 sym1 := v.Aux
4443 mem := v.Args[2]
4444 v_0 := v.Args[0]
4445 if v_0.Op != OpAMD64LEAQ {
4446 break
4447 }
4448 off2 := v_0.AuxInt
4449 sym2 := v_0.Aux
4450 base := v_0.Args[0]
4451 val := v.Args[1]
4452 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
4453 break
4454 }
4455 v.reset(OpAMD64ANDQmodify)
4456 v.AuxInt = off1 + off2
4457 v.Aux = mergeSym(sym1, sym2)
4458 v.AddArg(base)
4459 v.AddArg(val)
4460 v.AddArg(mem)
4461 return true
4462 }
4463 return false
4464 }
4465 func rewriteValueAMD64_OpAMD64BSFQ_0(v *Value) bool {
4466 b := v.Block
4467
4468
4469
4470 for {
4471 v_0 := v.Args[0]
4472 if v_0.Op != OpAMD64ORQconst {
4473 break
4474 }
4475 t := v_0.Type
4476 if v_0.AuxInt != 1<<8 {
4477 break
4478 }
4479 v_0_0 := v_0.Args[0]
4480 if v_0_0.Op != OpAMD64MOVBQZX {
4481 break
4482 }
4483 x := v_0_0.Args[0]
4484 v.reset(OpAMD64BSFQ)
4485 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
4486 v0.AuxInt = 1 << 8
4487 v0.AddArg(x)
4488 v.AddArg(v0)
4489 return true
4490 }
4491
4492
4493
4494 for {
4495 v_0 := v.Args[0]
4496 if v_0.Op != OpAMD64ORQconst {
4497 break
4498 }
4499 t := v_0.Type
4500 if v_0.AuxInt != 1<<16 {
4501 break
4502 }
4503 v_0_0 := v_0.Args[0]
4504 if v_0_0.Op != OpAMD64MOVWQZX {
4505 break
4506 }
4507 x := v_0_0.Args[0]
4508 v.reset(OpAMD64BSFQ)
4509 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
4510 v0.AuxInt = 1 << 16
4511 v0.AddArg(x)
4512 v.AddArg(v0)
4513 return true
4514 }
4515 return false
4516 }
4517 func rewriteValueAMD64_OpAMD64BTCLconst_0(v *Value) bool {
4518
4519
4520
4521 for {
4522 c := v.AuxInt
4523 v_0 := v.Args[0]
4524 if v_0.Op != OpAMD64XORLconst {
4525 break
4526 }
4527 d := v_0.AuxInt
4528 x := v_0.Args[0]
4529 v.reset(OpAMD64XORLconst)
4530 v.AuxInt = d ^ 1<<uint32(c)
4531 v.AddArg(x)
4532 return true
4533 }
4534
4535
4536
4537 for {
4538 c := v.AuxInt
4539 v_0 := v.Args[0]
4540 if v_0.Op != OpAMD64BTCLconst {
4541 break
4542 }
4543 d := v_0.AuxInt
4544 x := v_0.Args[0]
4545 v.reset(OpAMD64XORLconst)
4546 v.AuxInt = 1<<uint32(c) ^ 1<<uint32(d)
4547 v.AddArg(x)
4548 return true
4549 }
4550
4551
4552
4553 for {
4554 c := v.AuxInt
4555 v_0 := v.Args[0]
4556 if v_0.Op != OpAMD64MOVLconst {
4557 break
4558 }
4559 d := v_0.AuxInt
4560 v.reset(OpAMD64MOVLconst)
4561 v.AuxInt = d ^ (1 << uint32(c))
4562 return true
4563 }
4564 return false
4565 }
4566 func rewriteValueAMD64_OpAMD64BTCLconstmodify_0(v *Value) bool {
4567
4568
4569
4570 for {
4571 valoff1 := v.AuxInt
4572 sym := v.Aux
4573 mem := v.Args[1]
4574 v_0 := v.Args[0]
4575 if v_0.Op != OpAMD64ADDQconst {
4576 break
4577 }
4578 off2 := v_0.AuxInt
4579 base := v_0.Args[0]
4580 if !(ValAndOff(valoff1).canAdd(off2)) {
4581 break
4582 }
4583 v.reset(OpAMD64BTCLconstmodify)
4584 v.AuxInt = ValAndOff(valoff1).add(off2)
4585 v.Aux = sym
4586 v.AddArg(base)
4587 v.AddArg(mem)
4588 return true
4589 }
4590
4591
4592
4593 for {
4594 valoff1 := v.AuxInt
4595 sym1 := v.Aux
4596 mem := v.Args[1]
4597 v_0 := v.Args[0]
4598 if v_0.Op != OpAMD64LEAQ {
4599 break
4600 }
4601 off2 := v_0.AuxInt
4602 sym2 := v_0.Aux
4603 base := v_0.Args[0]
4604 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
4605 break
4606 }
4607 v.reset(OpAMD64BTCLconstmodify)
4608 v.AuxInt = ValAndOff(valoff1).add(off2)
4609 v.Aux = mergeSym(sym1, sym2)
4610 v.AddArg(base)
4611 v.AddArg(mem)
4612 return true
4613 }
4614 return false
4615 }
4616 func rewriteValueAMD64_OpAMD64BTCLmodify_0(v *Value) bool {
4617
4618
4619
4620 for {
4621 off1 := v.AuxInt
4622 sym := v.Aux
4623 mem := v.Args[2]
4624 v_0 := v.Args[0]
4625 if v_0.Op != OpAMD64ADDQconst {
4626 break
4627 }
4628 off2 := v_0.AuxInt
4629 base := v_0.Args[0]
4630 val := v.Args[1]
4631 if !(is32Bit(off1 + off2)) {
4632 break
4633 }
4634 v.reset(OpAMD64BTCLmodify)
4635 v.AuxInt = off1 + off2
4636 v.Aux = sym
4637 v.AddArg(base)
4638 v.AddArg(val)
4639 v.AddArg(mem)
4640 return true
4641 }
4642
4643
4644
4645 for {
4646 off1 := v.AuxInt
4647 sym1 := v.Aux
4648 mem := v.Args[2]
4649 v_0 := v.Args[0]
4650 if v_0.Op != OpAMD64LEAQ {
4651 break
4652 }
4653 off2 := v_0.AuxInt
4654 sym2 := v_0.Aux
4655 base := v_0.Args[0]
4656 val := v.Args[1]
4657 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
4658 break
4659 }
4660 v.reset(OpAMD64BTCLmodify)
4661 v.AuxInt = off1 + off2
4662 v.Aux = mergeSym(sym1, sym2)
4663 v.AddArg(base)
4664 v.AddArg(val)
4665 v.AddArg(mem)
4666 return true
4667 }
4668 return false
4669 }
4670 func rewriteValueAMD64_OpAMD64BTCQconst_0(v *Value) bool {
4671
4672
4673
4674 for {
4675 c := v.AuxInt
4676 v_0 := v.Args[0]
4677 if v_0.Op != OpAMD64XORQconst {
4678 break
4679 }
4680 d := v_0.AuxInt
4681 x := v_0.Args[0]
4682 v.reset(OpAMD64XORQconst)
4683 v.AuxInt = d ^ 1<<uint32(c)
4684 v.AddArg(x)
4685 return true
4686 }
4687
4688
4689
4690 for {
4691 c := v.AuxInt
4692 v_0 := v.Args[0]
4693 if v_0.Op != OpAMD64BTCQconst {
4694 break
4695 }
4696 d := v_0.AuxInt
4697 x := v_0.Args[0]
4698 v.reset(OpAMD64XORQconst)
4699 v.AuxInt = 1<<uint32(c) ^ 1<<uint32(d)
4700 v.AddArg(x)
4701 return true
4702 }
4703
4704
4705
4706 for {
4707 c := v.AuxInt
4708 v_0 := v.Args[0]
4709 if v_0.Op != OpAMD64MOVQconst {
4710 break
4711 }
4712 d := v_0.AuxInt
4713 v.reset(OpAMD64MOVQconst)
4714 v.AuxInt = d ^ (1 << uint32(c))
4715 return true
4716 }
4717 return false
4718 }
4719 func rewriteValueAMD64_OpAMD64BTCQconstmodify_0(v *Value) bool {
4720
4721
4722
4723 for {
4724 valoff1 := v.AuxInt
4725 sym := v.Aux
4726 mem := v.Args[1]
4727 v_0 := v.Args[0]
4728 if v_0.Op != OpAMD64ADDQconst {
4729 break
4730 }
4731 off2 := v_0.AuxInt
4732 base := v_0.Args[0]
4733 if !(ValAndOff(valoff1).canAdd(off2)) {
4734 break
4735 }
4736 v.reset(OpAMD64BTCQconstmodify)
4737 v.AuxInt = ValAndOff(valoff1).add(off2)
4738 v.Aux = sym
4739 v.AddArg(base)
4740 v.AddArg(mem)
4741 return true
4742 }
4743
4744
4745
4746 for {
4747 valoff1 := v.AuxInt
4748 sym1 := v.Aux
4749 mem := v.Args[1]
4750 v_0 := v.Args[0]
4751 if v_0.Op != OpAMD64LEAQ {
4752 break
4753 }
4754 off2 := v_0.AuxInt
4755 sym2 := v_0.Aux
4756 base := v_0.Args[0]
4757 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
4758 break
4759 }
4760 v.reset(OpAMD64BTCQconstmodify)
4761 v.AuxInt = ValAndOff(valoff1).add(off2)
4762 v.Aux = mergeSym(sym1, sym2)
4763 v.AddArg(base)
4764 v.AddArg(mem)
4765 return true
4766 }
4767 return false
4768 }
4769 func rewriteValueAMD64_OpAMD64BTCQmodify_0(v *Value) bool {
4770
4771
4772
4773 for {
4774 off1 := v.AuxInt
4775 sym := v.Aux
4776 mem := v.Args[2]
4777 v_0 := v.Args[0]
4778 if v_0.Op != OpAMD64ADDQconst {
4779 break
4780 }
4781 off2 := v_0.AuxInt
4782 base := v_0.Args[0]
4783 val := v.Args[1]
4784 if !(is32Bit(off1 + off2)) {
4785 break
4786 }
4787 v.reset(OpAMD64BTCQmodify)
4788 v.AuxInt = off1 + off2
4789 v.Aux = sym
4790 v.AddArg(base)
4791 v.AddArg(val)
4792 v.AddArg(mem)
4793 return true
4794 }
4795
4796
4797
4798 for {
4799 off1 := v.AuxInt
4800 sym1 := v.Aux
4801 mem := v.Args[2]
4802 v_0 := v.Args[0]
4803 if v_0.Op != OpAMD64LEAQ {
4804 break
4805 }
4806 off2 := v_0.AuxInt
4807 sym2 := v_0.Aux
4808 base := v_0.Args[0]
4809 val := v.Args[1]
4810 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
4811 break
4812 }
4813 v.reset(OpAMD64BTCQmodify)
4814 v.AuxInt = off1 + off2
4815 v.Aux = mergeSym(sym1, sym2)
4816 v.AddArg(base)
4817 v.AddArg(val)
4818 v.AddArg(mem)
4819 return true
4820 }
4821 return false
4822 }
4823 func rewriteValueAMD64_OpAMD64BTLconst_0(v *Value) bool {
4824
4825
4826
4827 for {
4828 c := v.AuxInt
4829 v_0 := v.Args[0]
4830 if v_0.Op != OpAMD64SHRQconst {
4831 break
4832 }
4833 d := v_0.AuxInt
4834 x := v_0.Args[0]
4835 if !((c + d) < 64) {
4836 break
4837 }
4838 v.reset(OpAMD64BTQconst)
4839 v.AuxInt = c + d
4840 v.AddArg(x)
4841 return true
4842 }
4843
4844
4845
4846 for {
4847 c := v.AuxInt
4848 v_0 := v.Args[0]
4849 if v_0.Op != OpAMD64SHLQconst {
4850 break
4851 }
4852 d := v_0.AuxInt
4853 x := v_0.Args[0]
4854 if !(c > d) {
4855 break
4856 }
4857 v.reset(OpAMD64BTLconst)
4858 v.AuxInt = c - d
4859 v.AddArg(x)
4860 return true
4861 }
4862
4863
4864
4865 for {
4866 if v.AuxInt != 0 {
4867 break
4868 }
4869 s := v.Args[0]
4870 if s.Op != OpAMD64SHRQ {
4871 break
4872 }
4873 y := s.Args[1]
4874 x := s.Args[0]
4875 v.reset(OpAMD64BTQ)
4876 v.AddArg(y)
4877 v.AddArg(x)
4878 return true
4879 }
4880
4881
4882
4883 for {
4884 c := v.AuxInt
4885 v_0 := v.Args[0]
4886 if v_0.Op != OpAMD64SHRLconst {
4887 break
4888 }
4889 d := v_0.AuxInt
4890 x := v_0.Args[0]
4891 if !((c + d) < 32) {
4892 break
4893 }
4894 v.reset(OpAMD64BTLconst)
4895 v.AuxInt = c + d
4896 v.AddArg(x)
4897 return true
4898 }
4899
4900
4901
4902 for {
4903 c := v.AuxInt
4904 v_0 := v.Args[0]
4905 if v_0.Op != OpAMD64SHLLconst {
4906 break
4907 }
4908 d := v_0.AuxInt
4909 x := v_0.Args[0]
4910 if !(c > d) {
4911 break
4912 }
4913 v.reset(OpAMD64BTLconst)
4914 v.AuxInt = c - d
4915 v.AddArg(x)
4916 return true
4917 }
4918
4919
4920
4921 for {
4922 if v.AuxInt != 0 {
4923 break
4924 }
4925 s := v.Args[0]
4926 if s.Op != OpAMD64SHRL {
4927 break
4928 }
4929 y := s.Args[1]
4930 x := s.Args[0]
4931 v.reset(OpAMD64BTL)
4932 v.AddArg(y)
4933 v.AddArg(x)
4934 return true
4935 }
4936 return false
4937 }
4938 func rewriteValueAMD64_OpAMD64BTQconst_0(v *Value) bool {
4939
4940
4941
4942 for {
4943 c := v.AuxInt
4944 v_0 := v.Args[0]
4945 if v_0.Op != OpAMD64SHRQconst {
4946 break
4947 }
4948 d := v_0.AuxInt
4949 x := v_0.Args[0]
4950 if !((c + d) < 64) {
4951 break
4952 }
4953 v.reset(OpAMD64BTQconst)
4954 v.AuxInt = c + d
4955 v.AddArg(x)
4956 return true
4957 }
4958
4959
4960
4961 for {
4962 c := v.AuxInt
4963 v_0 := v.Args[0]
4964 if v_0.Op != OpAMD64SHLQconst {
4965 break
4966 }
4967 d := v_0.AuxInt
4968 x := v_0.Args[0]
4969 if !(c > d) {
4970 break
4971 }
4972 v.reset(OpAMD64BTQconst)
4973 v.AuxInt = c - d
4974 v.AddArg(x)
4975 return true
4976 }
4977
4978
4979
4980 for {
4981 if v.AuxInt != 0 {
4982 break
4983 }
4984 s := v.Args[0]
4985 if s.Op != OpAMD64SHRQ {
4986 break
4987 }
4988 y := s.Args[1]
4989 x := s.Args[0]
4990 v.reset(OpAMD64BTQ)
4991 v.AddArg(y)
4992 v.AddArg(x)
4993 return true
4994 }
4995 return false
4996 }
4997 func rewriteValueAMD64_OpAMD64BTRLconst_0(v *Value) bool {
4998
4999
5000
5001 for {
5002 c := v.AuxInt
5003 v_0 := v.Args[0]
5004 if v_0.Op != OpAMD64BTSLconst {
5005 break
5006 }
5007 if v_0.AuxInt != c {
5008 break
5009 }
5010 x := v_0.Args[0]
5011 v.reset(OpAMD64BTRLconst)
5012 v.AuxInt = c
5013 v.AddArg(x)
5014 return true
5015 }
5016
5017
5018
5019 for {
5020 c := v.AuxInt
5021 v_0 := v.Args[0]
5022 if v_0.Op != OpAMD64BTCLconst {
5023 break
5024 }
5025 if v_0.AuxInt != c {
5026 break
5027 }
5028 x := v_0.Args[0]
5029 v.reset(OpAMD64BTRLconst)
5030 v.AuxInt = c
5031 v.AddArg(x)
5032 return true
5033 }
5034
5035
5036
5037 for {
5038 c := v.AuxInt
5039 v_0 := v.Args[0]
5040 if v_0.Op != OpAMD64ANDLconst {
5041 break
5042 }
5043 d := v_0.AuxInt
5044 x := v_0.Args[0]
5045 v.reset(OpAMD64ANDLconst)
5046 v.AuxInt = d &^ (1 << uint32(c))
5047 v.AddArg(x)
5048 return true
5049 }
5050
5051
5052
5053 for {
5054 c := v.AuxInt
5055 v_0 := v.Args[0]
5056 if v_0.Op != OpAMD64BTRLconst {
5057 break
5058 }
5059 d := v_0.AuxInt
5060 x := v_0.Args[0]
5061 v.reset(OpAMD64ANDLconst)
5062 v.AuxInt = ^(1<<uint32(c) | 1<<uint32(d))
5063 v.AddArg(x)
5064 return true
5065 }
5066
5067
5068
5069 for {
5070 c := v.AuxInt
5071 v_0 := v.Args[0]
5072 if v_0.Op != OpAMD64MOVLconst {
5073 break
5074 }
5075 d := v_0.AuxInt
5076 v.reset(OpAMD64MOVLconst)
5077 v.AuxInt = d &^ (1 << uint32(c))
5078 return true
5079 }
5080 return false
5081 }
5082 func rewriteValueAMD64_OpAMD64BTRLconstmodify_0(v *Value) bool {
5083
5084
5085
5086 for {
5087 valoff1 := v.AuxInt
5088 sym := v.Aux
5089 mem := v.Args[1]
5090 v_0 := v.Args[0]
5091 if v_0.Op != OpAMD64ADDQconst {
5092 break
5093 }
5094 off2 := v_0.AuxInt
5095 base := v_0.Args[0]
5096 if !(ValAndOff(valoff1).canAdd(off2)) {
5097 break
5098 }
5099 v.reset(OpAMD64BTRLconstmodify)
5100 v.AuxInt = ValAndOff(valoff1).add(off2)
5101 v.Aux = sym
5102 v.AddArg(base)
5103 v.AddArg(mem)
5104 return true
5105 }
5106
5107
5108
5109 for {
5110 valoff1 := v.AuxInt
5111 sym1 := v.Aux
5112 mem := v.Args[1]
5113 v_0 := v.Args[0]
5114 if v_0.Op != OpAMD64LEAQ {
5115 break
5116 }
5117 off2 := v_0.AuxInt
5118 sym2 := v_0.Aux
5119 base := v_0.Args[0]
5120 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
5121 break
5122 }
5123 v.reset(OpAMD64BTRLconstmodify)
5124 v.AuxInt = ValAndOff(valoff1).add(off2)
5125 v.Aux = mergeSym(sym1, sym2)
5126 v.AddArg(base)
5127 v.AddArg(mem)
5128 return true
5129 }
5130 return false
5131 }
5132 func rewriteValueAMD64_OpAMD64BTRLmodify_0(v *Value) bool {
5133
5134
5135
5136 for {
5137 off1 := v.AuxInt
5138 sym := v.Aux
5139 mem := v.Args[2]
5140 v_0 := v.Args[0]
5141 if v_0.Op != OpAMD64ADDQconst {
5142 break
5143 }
5144 off2 := v_0.AuxInt
5145 base := v_0.Args[0]
5146 val := v.Args[1]
5147 if !(is32Bit(off1 + off2)) {
5148 break
5149 }
5150 v.reset(OpAMD64BTRLmodify)
5151 v.AuxInt = off1 + off2
5152 v.Aux = sym
5153 v.AddArg(base)
5154 v.AddArg(val)
5155 v.AddArg(mem)
5156 return true
5157 }
5158
5159
5160
5161 for {
5162 off1 := v.AuxInt
5163 sym1 := v.Aux
5164 mem := v.Args[2]
5165 v_0 := v.Args[0]
5166 if v_0.Op != OpAMD64LEAQ {
5167 break
5168 }
5169 off2 := v_0.AuxInt
5170 sym2 := v_0.Aux
5171 base := v_0.Args[0]
5172 val := v.Args[1]
5173 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
5174 break
5175 }
5176 v.reset(OpAMD64BTRLmodify)
5177 v.AuxInt = off1 + off2
5178 v.Aux = mergeSym(sym1, sym2)
5179 v.AddArg(base)
5180 v.AddArg(val)
5181 v.AddArg(mem)
5182 return true
5183 }
5184 return false
5185 }
5186 func rewriteValueAMD64_OpAMD64BTRQconst_0(v *Value) bool {
5187
5188
5189
5190 for {
5191 c := v.AuxInt
5192 v_0 := v.Args[0]
5193 if v_0.Op != OpAMD64BTSQconst {
5194 break
5195 }
5196 if v_0.AuxInt != c {
5197 break
5198 }
5199 x := v_0.Args[0]
5200 v.reset(OpAMD64BTRQconst)
5201 v.AuxInt = c
5202 v.AddArg(x)
5203 return true
5204 }
5205
5206
5207
5208 for {
5209 c := v.AuxInt
5210 v_0 := v.Args[0]
5211 if v_0.Op != OpAMD64BTCQconst {
5212 break
5213 }
5214 if v_0.AuxInt != c {
5215 break
5216 }
5217 x := v_0.Args[0]
5218 v.reset(OpAMD64BTRQconst)
5219 v.AuxInt = c
5220 v.AddArg(x)
5221 return true
5222 }
5223
5224
5225
5226 for {
5227 c := v.AuxInt
5228 v_0 := v.Args[0]
5229 if v_0.Op != OpAMD64ANDQconst {
5230 break
5231 }
5232 d := v_0.AuxInt
5233 x := v_0.Args[0]
5234 v.reset(OpAMD64ANDQconst)
5235 v.AuxInt = d &^ (1 << uint32(c))
5236 v.AddArg(x)
5237 return true
5238 }
5239
5240
5241
5242 for {
5243 c := v.AuxInt
5244 v_0 := v.Args[0]
5245 if v_0.Op != OpAMD64BTRQconst {
5246 break
5247 }
5248 d := v_0.AuxInt
5249 x := v_0.Args[0]
5250 v.reset(OpAMD64ANDQconst)
5251 v.AuxInt = ^(1<<uint32(c) | 1<<uint32(d))
5252 v.AddArg(x)
5253 return true
5254 }
5255
5256
5257
5258 for {
5259 c := v.AuxInt
5260 v_0 := v.Args[0]
5261 if v_0.Op != OpAMD64MOVQconst {
5262 break
5263 }
5264 d := v_0.AuxInt
5265 v.reset(OpAMD64MOVQconst)
5266 v.AuxInt = d &^ (1 << uint32(c))
5267 return true
5268 }
5269 return false
5270 }
5271 func rewriteValueAMD64_OpAMD64BTRQconstmodify_0(v *Value) bool {
5272
5273
5274
5275 for {
5276 valoff1 := v.AuxInt
5277 sym := v.Aux
5278 mem := v.Args[1]
5279 v_0 := v.Args[0]
5280 if v_0.Op != OpAMD64ADDQconst {
5281 break
5282 }
5283 off2 := v_0.AuxInt
5284 base := v_0.Args[0]
5285 if !(ValAndOff(valoff1).canAdd(off2)) {
5286 break
5287 }
5288 v.reset(OpAMD64BTRQconstmodify)
5289 v.AuxInt = ValAndOff(valoff1).add(off2)
5290 v.Aux = sym
5291 v.AddArg(base)
5292 v.AddArg(mem)
5293 return true
5294 }
5295
5296
5297
5298 for {
5299 valoff1 := v.AuxInt
5300 sym1 := v.Aux
5301 mem := v.Args[1]
5302 v_0 := v.Args[0]
5303 if v_0.Op != OpAMD64LEAQ {
5304 break
5305 }
5306 off2 := v_0.AuxInt
5307 sym2 := v_0.Aux
5308 base := v_0.Args[0]
5309 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
5310 break
5311 }
5312 v.reset(OpAMD64BTRQconstmodify)
5313 v.AuxInt = ValAndOff(valoff1).add(off2)
5314 v.Aux = mergeSym(sym1, sym2)
5315 v.AddArg(base)
5316 v.AddArg(mem)
5317 return true
5318 }
5319 return false
5320 }
5321 func rewriteValueAMD64_OpAMD64BTRQmodify_0(v *Value) bool {
5322
5323
5324
5325 for {
5326 off1 := v.AuxInt
5327 sym := v.Aux
5328 mem := v.Args[2]
5329 v_0 := v.Args[0]
5330 if v_0.Op != OpAMD64ADDQconst {
5331 break
5332 }
5333 off2 := v_0.AuxInt
5334 base := v_0.Args[0]
5335 val := v.Args[1]
5336 if !(is32Bit(off1 + off2)) {
5337 break
5338 }
5339 v.reset(OpAMD64BTRQmodify)
5340 v.AuxInt = off1 + off2
5341 v.Aux = sym
5342 v.AddArg(base)
5343 v.AddArg(val)
5344 v.AddArg(mem)
5345 return true
5346 }
5347
5348
5349
5350 for {
5351 off1 := v.AuxInt
5352 sym1 := v.Aux
5353 mem := v.Args[2]
5354 v_0 := v.Args[0]
5355 if v_0.Op != OpAMD64LEAQ {
5356 break
5357 }
5358 off2 := v_0.AuxInt
5359 sym2 := v_0.Aux
5360 base := v_0.Args[0]
5361 val := v.Args[1]
5362 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
5363 break
5364 }
5365 v.reset(OpAMD64BTRQmodify)
5366 v.AuxInt = off1 + off2
5367 v.Aux = mergeSym(sym1, sym2)
5368 v.AddArg(base)
5369 v.AddArg(val)
5370 v.AddArg(mem)
5371 return true
5372 }
5373 return false
5374 }
5375 func rewriteValueAMD64_OpAMD64BTSLconst_0(v *Value) bool {
5376
5377
5378
5379 for {
5380 c := v.AuxInt
5381 v_0 := v.Args[0]
5382 if v_0.Op != OpAMD64BTRLconst {
5383 break
5384 }
5385 if v_0.AuxInt != c {
5386 break
5387 }
5388 x := v_0.Args[0]
5389 v.reset(OpAMD64BTSLconst)
5390 v.AuxInt = c
5391 v.AddArg(x)
5392 return true
5393 }
5394
5395
5396
5397 for {
5398 c := v.AuxInt
5399 v_0 := v.Args[0]
5400 if v_0.Op != OpAMD64BTCLconst {
5401 break
5402 }
5403 if v_0.AuxInt != c {
5404 break
5405 }
5406 x := v_0.Args[0]
5407 v.reset(OpAMD64BTSLconst)
5408 v.AuxInt = c
5409 v.AddArg(x)
5410 return true
5411 }
5412
5413
5414
5415 for {
5416 c := v.AuxInt
5417 v_0 := v.Args[0]
5418 if v_0.Op != OpAMD64ORLconst {
5419 break
5420 }
5421 d := v_0.AuxInt
5422 x := v_0.Args[0]
5423 v.reset(OpAMD64ORLconst)
5424 v.AuxInt = d | 1<<uint32(c)
5425 v.AddArg(x)
5426 return true
5427 }
5428
5429
5430
5431 for {
5432 c := v.AuxInt
5433 v_0 := v.Args[0]
5434 if v_0.Op != OpAMD64BTSLconst {
5435 break
5436 }
5437 d := v_0.AuxInt
5438 x := v_0.Args[0]
5439 v.reset(OpAMD64ORLconst)
5440 v.AuxInt = 1<<uint32(d) | 1<<uint32(c)
5441 v.AddArg(x)
5442 return true
5443 }
5444
5445
5446
5447 for {
5448 c := v.AuxInt
5449 v_0 := v.Args[0]
5450 if v_0.Op != OpAMD64MOVLconst {
5451 break
5452 }
5453 d := v_0.AuxInt
5454 v.reset(OpAMD64MOVLconst)
5455 v.AuxInt = d | (1 << uint32(c))
5456 return true
5457 }
5458 return false
5459 }
5460 func rewriteValueAMD64_OpAMD64BTSLconstmodify_0(v *Value) bool {
5461
5462
5463
5464 for {
5465 valoff1 := v.AuxInt
5466 sym := v.Aux
5467 mem := v.Args[1]
5468 v_0 := v.Args[0]
5469 if v_0.Op != OpAMD64ADDQconst {
5470 break
5471 }
5472 off2 := v_0.AuxInt
5473 base := v_0.Args[0]
5474 if !(ValAndOff(valoff1).canAdd(off2)) {
5475 break
5476 }
5477 v.reset(OpAMD64BTSLconstmodify)
5478 v.AuxInt = ValAndOff(valoff1).add(off2)
5479 v.Aux = sym
5480 v.AddArg(base)
5481 v.AddArg(mem)
5482 return true
5483 }
5484
5485
5486
5487 for {
5488 valoff1 := v.AuxInt
5489 sym1 := v.Aux
5490 mem := v.Args[1]
5491 v_0 := v.Args[0]
5492 if v_0.Op != OpAMD64LEAQ {
5493 break
5494 }
5495 off2 := v_0.AuxInt
5496 sym2 := v_0.Aux
5497 base := v_0.Args[0]
5498 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
5499 break
5500 }
5501 v.reset(OpAMD64BTSLconstmodify)
5502 v.AuxInt = ValAndOff(valoff1).add(off2)
5503 v.Aux = mergeSym(sym1, sym2)
5504 v.AddArg(base)
5505 v.AddArg(mem)
5506 return true
5507 }
5508 return false
5509 }
5510 func rewriteValueAMD64_OpAMD64BTSLmodify_0(v *Value) bool {
5511
5512
5513
5514 for {
5515 off1 := v.AuxInt
5516 sym := v.Aux
5517 mem := v.Args[2]
5518 v_0 := v.Args[0]
5519 if v_0.Op != OpAMD64ADDQconst {
5520 break
5521 }
5522 off2 := v_0.AuxInt
5523 base := v_0.Args[0]
5524 val := v.Args[1]
5525 if !(is32Bit(off1 + off2)) {
5526 break
5527 }
5528 v.reset(OpAMD64BTSLmodify)
5529 v.AuxInt = off1 + off2
5530 v.Aux = sym
5531 v.AddArg(base)
5532 v.AddArg(val)
5533 v.AddArg(mem)
5534 return true
5535 }
5536
5537
5538
5539 for {
5540 off1 := v.AuxInt
5541 sym1 := v.Aux
5542 mem := v.Args[2]
5543 v_0 := v.Args[0]
5544 if v_0.Op != OpAMD64LEAQ {
5545 break
5546 }
5547 off2 := v_0.AuxInt
5548 sym2 := v_0.Aux
5549 base := v_0.Args[0]
5550 val := v.Args[1]
5551 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
5552 break
5553 }
5554 v.reset(OpAMD64BTSLmodify)
5555 v.AuxInt = off1 + off2
5556 v.Aux = mergeSym(sym1, sym2)
5557 v.AddArg(base)
5558 v.AddArg(val)
5559 v.AddArg(mem)
5560 return true
5561 }
5562 return false
5563 }
5564 func rewriteValueAMD64_OpAMD64BTSQconst_0(v *Value) bool {
5565
5566
5567
5568 for {
5569 c := v.AuxInt
5570 v_0 := v.Args[0]
5571 if v_0.Op != OpAMD64BTRQconst {
5572 break
5573 }
5574 if v_0.AuxInt != c {
5575 break
5576 }
5577 x := v_0.Args[0]
5578 v.reset(OpAMD64BTSQconst)
5579 v.AuxInt = c
5580 v.AddArg(x)
5581 return true
5582 }
5583
5584
5585
5586 for {
5587 c := v.AuxInt
5588 v_0 := v.Args[0]
5589 if v_0.Op != OpAMD64BTCQconst {
5590 break
5591 }
5592 if v_0.AuxInt != c {
5593 break
5594 }
5595 x := v_0.Args[0]
5596 v.reset(OpAMD64BTSQconst)
5597 v.AuxInt = c
5598 v.AddArg(x)
5599 return true
5600 }
5601
5602
5603
5604 for {
5605 c := v.AuxInt
5606 v_0 := v.Args[0]
5607 if v_0.Op != OpAMD64ORQconst {
5608 break
5609 }
5610 d := v_0.AuxInt
5611 x := v_0.Args[0]
5612 v.reset(OpAMD64ORQconst)
5613 v.AuxInt = d | 1<<uint32(c)
5614 v.AddArg(x)
5615 return true
5616 }
5617
5618
5619
5620 for {
5621 c := v.AuxInt
5622 v_0 := v.Args[0]
5623 if v_0.Op != OpAMD64BTSQconst {
5624 break
5625 }
5626 d := v_0.AuxInt
5627 x := v_0.Args[0]
5628 v.reset(OpAMD64ORQconst)
5629 v.AuxInt = 1<<uint32(d) | 1<<uint32(c)
5630 v.AddArg(x)
5631 return true
5632 }
5633
5634
5635
5636 for {
5637 c := v.AuxInt
5638 v_0 := v.Args[0]
5639 if v_0.Op != OpAMD64MOVQconst {
5640 break
5641 }
5642 d := v_0.AuxInt
5643 v.reset(OpAMD64MOVQconst)
5644 v.AuxInt = d | (1 << uint32(c))
5645 return true
5646 }
5647 return false
5648 }
5649 func rewriteValueAMD64_OpAMD64BTSQconstmodify_0(v *Value) bool {
5650
5651
5652
5653 for {
5654 valoff1 := v.AuxInt
5655 sym := v.Aux
5656 mem := v.Args[1]
5657 v_0 := v.Args[0]
5658 if v_0.Op != OpAMD64ADDQconst {
5659 break
5660 }
5661 off2 := v_0.AuxInt
5662 base := v_0.Args[0]
5663 if !(ValAndOff(valoff1).canAdd(off2)) {
5664 break
5665 }
5666 v.reset(OpAMD64BTSQconstmodify)
5667 v.AuxInt = ValAndOff(valoff1).add(off2)
5668 v.Aux = sym
5669 v.AddArg(base)
5670 v.AddArg(mem)
5671 return true
5672 }
5673
5674
5675
5676 for {
5677 valoff1 := v.AuxInt
5678 sym1 := v.Aux
5679 mem := v.Args[1]
5680 v_0 := v.Args[0]
5681 if v_0.Op != OpAMD64LEAQ {
5682 break
5683 }
5684 off2 := v_0.AuxInt
5685 sym2 := v_0.Aux
5686 base := v_0.Args[0]
5687 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
5688 break
5689 }
5690 v.reset(OpAMD64BTSQconstmodify)
5691 v.AuxInt = ValAndOff(valoff1).add(off2)
5692 v.Aux = mergeSym(sym1, sym2)
5693 v.AddArg(base)
5694 v.AddArg(mem)
5695 return true
5696 }
5697 return false
5698 }
5699 func rewriteValueAMD64_OpAMD64BTSQmodify_0(v *Value) bool {
5700
5701
5702
5703 for {
5704 off1 := v.AuxInt
5705 sym := v.Aux
5706 mem := v.Args[2]
5707 v_0 := v.Args[0]
5708 if v_0.Op != OpAMD64ADDQconst {
5709 break
5710 }
5711 off2 := v_0.AuxInt
5712 base := v_0.Args[0]
5713 val := v.Args[1]
5714 if !(is32Bit(off1 + off2)) {
5715 break
5716 }
5717 v.reset(OpAMD64BTSQmodify)
5718 v.AuxInt = off1 + off2
5719 v.Aux = sym
5720 v.AddArg(base)
5721 v.AddArg(val)
5722 v.AddArg(mem)
5723 return true
5724 }
5725
5726
5727
5728 for {
5729 off1 := v.AuxInt
5730 sym1 := v.Aux
5731 mem := v.Args[2]
5732 v_0 := v.Args[0]
5733 if v_0.Op != OpAMD64LEAQ {
5734 break
5735 }
5736 off2 := v_0.AuxInt
5737 sym2 := v_0.Aux
5738 base := v_0.Args[0]
5739 val := v.Args[1]
5740 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
5741 break
5742 }
5743 v.reset(OpAMD64BTSQmodify)
5744 v.AuxInt = off1 + off2
5745 v.Aux = mergeSym(sym1, sym2)
5746 v.AddArg(base)
5747 v.AddArg(val)
5748 v.AddArg(mem)
5749 return true
5750 }
5751 return false
5752 }
5753 func rewriteValueAMD64_OpAMD64CMOVLCC_0(v *Value) bool {
5754
5755
5756
5757 for {
5758 _ = v.Args[2]
5759 x := v.Args[0]
5760 y := v.Args[1]
5761 v_2 := v.Args[2]
5762 if v_2.Op != OpAMD64InvertFlags {
5763 break
5764 }
5765 cond := v_2.Args[0]
5766 v.reset(OpAMD64CMOVLLS)
5767 v.AddArg(x)
5768 v.AddArg(y)
5769 v.AddArg(cond)
5770 return true
5771 }
5772
5773
5774
5775 for {
5776 _ = v.Args[2]
5777 x := v.Args[1]
5778 v_2 := v.Args[2]
5779 if v_2.Op != OpAMD64FlagEQ {
5780 break
5781 }
5782 v.reset(OpCopy)
5783 v.Type = x.Type
5784 v.AddArg(x)
5785 return true
5786 }
5787
5788
5789
5790 for {
5791 _ = v.Args[2]
5792 x := v.Args[1]
5793 v_2 := v.Args[2]
5794 if v_2.Op != OpAMD64FlagGT_UGT {
5795 break
5796 }
5797 v.reset(OpCopy)
5798 v.Type = x.Type
5799 v.AddArg(x)
5800 return true
5801 }
5802
5803
5804
5805 for {
5806 _ = v.Args[2]
5807 y := v.Args[0]
5808 v_2 := v.Args[2]
5809 if v_2.Op != OpAMD64FlagGT_ULT {
5810 break
5811 }
5812 v.reset(OpCopy)
5813 v.Type = y.Type
5814 v.AddArg(y)
5815 return true
5816 }
5817
5818
5819
5820 for {
5821 _ = v.Args[2]
5822 y := v.Args[0]
5823 v_2 := v.Args[2]
5824 if v_2.Op != OpAMD64FlagLT_ULT {
5825 break
5826 }
5827 v.reset(OpCopy)
5828 v.Type = y.Type
5829 v.AddArg(y)
5830 return true
5831 }
5832
5833
5834
5835 for {
5836 _ = v.Args[2]
5837 x := v.Args[1]
5838 v_2 := v.Args[2]
5839 if v_2.Op != OpAMD64FlagLT_UGT {
5840 break
5841 }
5842 v.reset(OpCopy)
5843 v.Type = x.Type
5844 v.AddArg(x)
5845 return true
5846 }
5847 return false
5848 }
5849 func rewriteValueAMD64_OpAMD64CMOVLCS_0(v *Value) bool {
5850
5851
5852
5853 for {
5854 _ = v.Args[2]
5855 x := v.Args[0]
5856 y := v.Args[1]
5857 v_2 := v.Args[2]
5858 if v_2.Op != OpAMD64InvertFlags {
5859 break
5860 }
5861 cond := v_2.Args[0]
5862 v.reset(OpAMD64CMOVLHI)
5863 v.AddArg(x)
5864 v.AddArg(y)
5865 v.AddArg(cond)
5866 return true
5867 }
5868
5869
5870
5871 for {
5872 _ = v.Args[2]
5873 y := v.Args[0]
5874 v_2 := v.Args[2]
5875 if v_2.Op != OpAMD64FlagEQ {
5876 break
5877 }
5878 v.reset(OpCopy)
5879 v.Type = y.Type
5880 v.AddArg(y)
5881 return true
5882 }
5883
5884
5885
5886 for {
5887 _ = v.Args[2]
5888 y := v.Args[0]
5889 v_2 := v.Args[2]
5890 if v_2.Op != OpAMD64FlagGT_UGT {
5891 break
5892 }
5893 v.reset(OpCopy)
5894 v.Type = y.Type
5895 v.AddArg(y)
5896 return true
5897 }
5898
5899
5900
5901 for {
5902 _ = v.Args[2]
5903 x := v.Args[1]
5904 v_2 := v.Args[2]
5905 if v_2.Op != OpAMD64FlagGT_ULT {
5906 break
5907 }
5908 v.reset(OpCopy)
5909 v.Type = x.Type
5910 v.AddArg(x)
5911 return true
5912 }
5913
5914
5915
5916 for {
5917 _ = v.Args[2]
5918 x := v.Args[1]
5919 v_2 := v.Args[2]
5920 if v_2.Op != OpAMD64FlagLT_ULT {
5921 break
5922 }
5923 v.reset(OpCopy)
5924 v.Type = x.Type
5925 v.AddArg(x)
5926 return true
5927 }
5928
5929
5930
5931 for {
5932 _ = v.Args[2]
5933 y := v.Args[0]
5934 v_2 := v.Args[2]
5935 if v_2.Op != OpAMD64FlagLT_UGT {
5936 break
5937 }
5938 v.reset(OpCopy)
5939 v.Type = y.Type
5940 v.AddArg(y)
5941 return true
5942 }
5943 return false
5944 }
5945 func rewriteValueAMD64_OpAMD64CMOVLEQ_0(v *Value) bool {
5946
5947
5948
5949 for {
5950 _ = v.Args[2]
5951 x := v.Args[0]
5952 y := v.Args[1]
5953 v_2 := v.Args[2]
5954 if v_2.Op != OpAMD64InvertFlags {
5955 break
5956 }
5957 cond := v_2.Args[0]
5958 v.reset(OpAMD64CMOVLEQ)
5959 v.AddArg(x)
5960 v.AddArg(y)
5961 v.AddArg(cond)
5962 return true
5963 }
5964
5965
5966
5967 for {
5968 _ = v.Args[2]
5969 x := v.Args[1]
5970 v_2 := v.Args[2]
5971 if v_2.Op != OpAMD64FlagEQ {
5972 break
5973 }
5974 v.reset(OpCopy)
5975 v.Type = x.Type
5976 v.AddArg(x)
5977 return true
5978 }
5979
5980
5981
5982 for {
5983 _ = v.Args[2]
5984 y := v.Args[0]
5985 v_2 := v.Args[2]
5986 if v_2.Op != OpAMD64FlagGT_UGT {
5987 break
5988 }
5989 v.reset(OpCopy)
5990 v.Type = y.Type
5991 v.AddArg(y)
5992 return true
5993 }
5994
5995
5996
5997 for {
5998 _ = v.Args[2]
5999 y := v.Args[0]
6000 v_2 := v.Args[2]
6001 if v_2.Op != OpAMD64FlagGT_ULT {
6002 break
6003 }
6004 v.reset(OpCopy)
6005 v.Type = y.Type
6006 v.AddArg(y)
6007 return true
6008 }
6009
6010
6011
6012 for {
6013 _ = v.Args[2]
6014 y := v.Args[0]
6015 v_2 := v.Args[2]
6016 if v_2.Op != OpAMD64FlagLT_ULT {
6017 break
6018 }
6019 v.reset(OpCopy)
6020 v.Type = y.Type
6021 v.AddArg(y)
6022 return true
6023 }
6024
6025
6026
6027 for {
6028 _ = v.Args[2]
6029 y := v.Args[0]
6030 v_2 := v.Args[2]
6031 if v_2.Op != OpAMD64FlagLT_UGT {
6032 break
6033 }
6034 v.reset(OpCopy)
6035 v.Type = y.Type
6036 v.AddArg(y)
6037 return true
6038 }
6039 return false
6040 }
6041 func rewriteValueAMD64_OpAMD64CMOVLGE_0(v *Value) bool {
6042
6043
6044
6045 for {
6046 _ = v.Args[2]
6047 x := v.Args[0]
6048 y := v.Args[1]
6049 v_2 := v.Args[2]
6050 if v_2.Op != OpAMD64InvertFlags {
6051 break
6052 }
6053 cond := v_2.Args[0]
6054 v.reset(OpAMD64CMOVLLE)
6055 v.AddArg(x)
6056 v.AddArg(y)
6057 v.AddArg(cond)
6058 return true
6059 }
6060
6061
6062
6063 for {
6064 _ = v.Args[2]
6065 x := v.Args[1]
6066 v_2 := v.Args[2]
6067 if v_2.Op != OpAMD64FlagEQ {
6068 break
6069 }
6070 v.reset(OpCopy)
6071 v.Type = x.Type
6072 v.AddArg(x)
6073 return true
6074 }
6075
6076
6077
6078 for {
6079 _ = v.Args[2]
6080 x := v.Args[1]
6081 v_2 := v.Args[2]
6082 if v_2.Op != OpAMD64FlagGT_UGT {
6083 break
6084 }
6085 v.reset(OpCopy)
6086 v.Type = x.Type
6087 v.AddArg(x)
6088 return true
6089 }
6090
6091
6092
6093 for {
6094 _ = v.Args[2]
6095 x := v.Args[1]
6096 v_2 := v.Args[2]
6097 if v_2.Op != OpAMD64FlagGT_ULT {
6098 break
6099 }
6100 v.reset(OpCopy)
6101 v.Type = x.Type
6102 v.AddArg(x)
6103 return true
6104 }
6105
6106
6107
6108 for {
6109 _ = v.Args[2]
6110 y := v.Args[0]
6111 v_2 := v.Args[2]
6112 if v_2.Op != OpAMD64FlagLT_ULT {
6113 break
6114 }
6115 v.reset(OpCopy)
6116 v.Type = y.Type
6117 v.AddArg(y)
6118 return true
6119 }
6120
6121
6122
6123 for {
6124 _ = v.Args[2]
6125 y := v.Args[0]
6126 v_2 := v.Args[2]
6127 if v_2.Op != OpAMD64FlagLT_UGT {
6128 break
6129 }
6130 v.reset(OpCopy)
6131 v.Type = y.Type
6132 v.AddArg(y)
6133 return true
6134 }
6135 return false
6136 }
6137 func rewriteValueAMD64_OpAMD64CMOVLGT_0(v *Value) bool {
6138
6139
6140
6141 for {
6142 _ = v.Args[2]
6143 x := v.Args[0]
6144 y := v.Args[1]
6145 v_2 := v.Args[2]
6146 if v_2.Op != OpAMD64InvertFlags {
6147 break
6148 }
6149 cond := v_2.Args[0]
6150 v.reset(OpAMD64CMOVLLT)
6151 v.AddArg(x)
6152 v.AddArg(y)
6153 v.AddArg(cond)
6154 return true
6155 }
6156
6157
6158
6159 for {
6160 _ = v.Args[2]
6161 y := v.Args[0]
6162 v_2 := v.Args[2]
6163 if v_2.Op != OpAMD64FlagEQ {
6164 break
6165 }
6166 v.reset(OpCopy)
6167 v.Type = y.Type
6168 v.AddArg(y)
6169 return true
6170 }
6171
6172
6173
6174 for {
6175 _ = v.Args[2]
6176 x := v.Args[1]
6177 v_2 := v.Args[2]
6178 if v_2.Op != OpAMD64FlagGT_UGT {
6179 break
6180 }
6181 v.reset(OpCopy)
6182 v.Type = x.Type
6183 v.AddArg(x)
6184 return true
6185 }
6186
6187
6188
6189 for {
6190 _ = v.Args[2]
6191 x := v.Args[1]
6192 v_2 := v.Args[2]
6193 if v_2.Op != OpAMD64FlagGT_ULT {
6194 break
6195 }
6196 v.reset(OpCopy)
6197 v.Type = x.Type
6198 v.AddArg(x)
6199 return true
6200 }
6201
6202
6203
6204 for {
6205 _ = v.Args[2]
6206 y := v.Args[0]
6207 v_2 := v.Args[2]
6208 if v_2.Op != OpAMD64FlagLT_ULT {
6209 break
6210 }
6211 v.reset(OpCopy)
6212 v.Type = y.Type
6213 v.AddArg(y)
6214 return true
6215 }
6216
6217
6218
6219 for {
6220 _ = v.Args[2]
6221 y := v.Args[0]
6222 v_2 := v.Args[2]
6223 if v_2.Op != OpAMD64FlagLT_UGT {
6224 break
6225 }
6226 v.reset(OpCopy)
6227 v.Type = y.Type
6228 v.AddArg(y)
6229 return true
6230 }
6231 return false
6232 }
6233 func rewriteValueAMD64_OpAMD64CMOVLHI_0(v *Value) bool {
6234
6235
6236
6237 for {
6238 _ = v.Args[2]
6239 x := v.Args[0]
6240 y := v.Args[1]
6241 v_2 := v.Args[2]
6242 if v_2.Op != OpAMD64InvertFlags {
6243 break
6244 }
6245 cond := v_2.Args[0]
6246 v.reset(OpAMD64CMOVLCS)
6247 v.AddArg(x)
6248 v.AddArg(y)
6249 v.AddArg(cond)
6250 return true
6251 }
6252
6253
6254
6255 for {
6256 _ = v.Args[2]
6257 y := v.Args[0]
6258 v_2 := v.Args[2]
6259 if v_2.Op != OpAMD64FlagEQ {
6260 break
6261 }
6262 v.reset(OpCopy)
6263 v.Type = y.Type
6264 v.AddArg(y)
6265 return true
6266 }
6267
6268
6269
6270 for {
6271 _ = v.Args[2]
6272 x := v.Args[1]
6273 v_2 := v.Args[2]
6274 if v_2.Op != OpAMD64FlagGT_UGT {
6275 break
6276 }
6277 v.reset(OpCopy)
6278 v.Type = x.Type
6279 v.AddArg(x)
6280 return true
6281 }
6282
6283
6284
6285 for {
6286 _ = v.Args[2]
6287 y := v.Args[0]
6288 v_2 := v.Args[2]
6289 if v_2.Op != OpAMD64FlagGT_ULT {
6290 break
6291 }
6292 v.reset(OpCopy)
6293 v.Type = y.Type
6294 v.AddArg(y)
6295 return true
6296 }
6297
6298
6299
6300 for {
6301 _ = v.Args[2]
6302 y := v.Args[0]
6303 v_2 := v.Args[2]
6304 if v_2.Op != OpAMD64FlagLT_ULT {
6305 break
6306 }
6307 v.reset(OpCopy)
6308 v.Type = y.Type
6309 v.AddArg(y)
6310 return true
6311 }
6312
6313
6314
6315 for {
6316 _ = v.Args[2]
6317 x := v.Args[1]
6318 v_2 := v.Args[2]
6319 if v_2.Op != OpAMD64FlagLT_UGT {
6320 break
6321 }
6322 v.reset(OpCopy)
6323 v.Type = x.Type
6324 v.AddArg(x)
6325 return true
6326 }
6327 return false
6328 }
6329 func rewriteValueAMD64_OpAMD64CMOVLLE_0(v *Value) bool {
6330
6331
6332
6333 for {
6334 _ = v.Args[2]
6335 x := v.Args[0]
6336 y := v.Args[1]
6337 v_2 := v.Args[2]
6338 if v_2.Op != OpAMD64InvertFlags {
6339 break
6340 }
6341 cond := v_2.Args[0]
6342 v.reset(OpAMD64CMOVLGE)
6343 v.AddArg(x)
6344 v.AddArg(y)
6345 v.AddArg(cond)
6346 return true
6347 }
6348
6349
6350
6351 for {
6352 _ = v.Args[2]
6353 x := v.Args[1]
6354 v_2 := v.Args[2]
6355 if v_2.Op != OpAMD64FlagEQ {
6356 break
6357 }
6358 v.reset(OpCopy)
6359 v.Type = x.Type
6360 v.AddArg(x)
6361 return true
6362 }
6363
6364
6365
6366 for {
6367 _ = v.Args[2]
6368 y := v.Args[0]
6369 v_2 := v.Args[2]
6370 if v_2.Op != OpAMD64FlagGT_UGT {
6371 break
6372 }
6373 v.reset(OpCopy)
6374 v.Type = y.Type
6375 v.AddArg(y)
6376 return true
6377 }
6378
6379
6380
6381 for {
6382 _ = v.Args[2]
6383 y := v.Args[0]
6384 v_2 := v.Args[2]
6385 if v_2.Op != OpAMD64FlagGT_ULT {
6386 break
6387 }
6388 v.reset(OpCopy)
6389 v.Type = y.Type
6390 v.AddArg(y)
6391 return true
6392 }
6393
6394
6395
6396 for {
6397 _ = v.Args[2]
6398 x := v.Args[1]
6399 v_2 := v.Args[2]
6400 if v_2.Op != OpAMD64FlagLT_ULT {
6401 break
6402 }
6403 v.reset(OpCopy)
6404 v.Type = x.Type
6405 v.AddArg(x)
6406 return true
6407 }
6408
6409
6410
6411 for {
6412 _ = v.Args[2]
6413 x := v.Args[1]
6414 v_2 := v.Args[2]
6415 if v_2.Op != OpAMD64FlagLT_UGT {
6416 break
6417 }
6418 v.reset(OpCopy)
6419 v.Type = x.Type
6420 v.AddArg(x)
6421 return true
6422 }
6423 return false
6424 }
6425 func rewriteValueAMD64_OpAMD64CMOVLLS_0(v *Value) bool {
6426
6427
6428
6429 for {
6430 _ = v.Args[2]
6431 x := v.Args[0]
6432 y := v.Args[1]
6433 v_2 := v.Args[2]
6434 if v_2.Op != OpAMD64InvertFlags {
6435 break
6436 }
6437 cond := v_2.Args[0]
6438 v.reset(OpAMD64CMOVLCC)
6439 v.AddArg(x)
6440 v.AddArg(y)
6441 v.AddArg(cond)
6442 return true
6443 }
6444
6445
6446
6447 for {
6448 _ = v.Args[2]
6449 x := v.Args[1]
6450 v_2 := v.Args[2]
6451 if v_2.Op != OpAMD64FlagEQ {
6452 break
6453 }
6454 v.reset(OpCopy)
6455 v.Type = x.Type
6456 v.AddArg(x)
6457 return true
6458 }
6459
6460
6461
6462 for {
6463 _ = v.Args[2]
6464 y := v.Args[0]
6465 v_2 := v.Args[2]
6466 if v_2.Op != OpAMD64FlagGT_UGT {
6467 break
6468 }
6469 v.reset(OpCopy)
6470 v.Type = y.Type
6471 v.AddArg(y)
6472 return true
6473 }
6474
6475
6476
6477 for {
6478 _ = v.Args[2]
6479 x := v.Args[1]
6480 v_2 := v.Args[2]
6481 if v_2.Op != OpAMD64FlagGT_ULT {
6482 break
6483 }
6484 v.reset(OpCopy)
6485 v.Type = x.Type
6486 v.AddArg(x)
6487 return true
6488 }
6489
6490
6491
6492 for {
6493 _ = v.Args[2]
6494 x := v.Args[1]
6495 v_2 := v.Args[2]
6496 if v_2.Op != OpAMD64FlagLT_ULT {
6497 break
6498 }
6499 v.reset(OpCopy)
6500 v.Type = x.Type
6501 v.AddArg(x)
6502 return true
6503 }
6504
6505
6506
6507 for {
6508 _ = v.Args[2]
6509 y := v.Args[0]
6510 v_2 := v.Args[2]
6511 if v_2.Op != OpAMD64FlagLT_UGT {
6512 break
6513 }
6514 v.reset(OpCopy)
6515 v.Type = y.Type
6516 v.AddArg(y)
6517 return true
6518 }
6519 return false
6520 }
6521 func rewriteValueAMD64_OpAMD64CMOVLLT_0(v *Value) bool {
6522
6523
6524
6525 for {
6526 _ = v.Args[2]
6527 x := v.Args[0]
6528 y := v.Args[1]
6529 v_2 := v.Args[2]
6530 if v_2.Op != OpAMD64InvertFlags {
6531 break
6532 }
6533 cond := v_2.Args[0]
6534 v.reset(OpAMD64CMOVLGT)
6535 v.AddArg(x)
6536 v.AddArg(y)
6537 v.AddArg(cond)
6538 return true
6539 }
6540
6541
6542
6543 for {
6544 _ = v.Args[2]
6545 y := v.Args[0]
6546 v_2 := v.Args[2]
6547 if v_2.Op != OpAMD64FlagEQ {
6548 break
6549 }
6550 v.reset(OpCopy)
6551 v.Type = y.Type
6552 v.AddArg(y)
6553 return true
6554 }
6555
6556
6557
6558 for {
6559 _ = v.Args[2]
6560 y := v.Args[0]
6561 v_2 := v.Args[2]
6562 if v_2.Op != OpAMD64FlagGT_UGT {
6563 break
6564 }
6565 v.reset(OpCopy)
6566 v.Type = y.Type
6567 v.AddArg(y)
6568 return true
6569 }
6570
6571
6572
6573 for {
6574 _ = v.Args[2]
6575 y := v.Args[0]
6576 v_2 := v.Args[2]
6577 if v_2.Op != OpAMD64FlagGT_ULT {
6578 break
6579 }
6580 v.reset(OpCopy)
6581 v.Type = y.Type
6582 v.AddArg(y)
6583 return true
6584 }
6585
6586
6587
6588 for {
6589 _ = v.Args[2]
6590 x := v.Args[1]
6591 v_2 := v.Args[2]
6592 if v_2.Op != OpAMD64FlagLT_ULT {
6593 break
6594 }
6595 v.reset(OpCopy)
6596 v.Type = x.Type
6597 v.AddArg(x)
6598 return true
6599 }
6600
6601
6602
6603 for {
6604 _ = v.Args[2]
6605 x := v.Args[1]
6606 v_2 := v.Args[2]
6607 if v_2.Op != OpAMD64FlagLT_UGT {
6608 break
6609 }
6610 v.reset(OpCopy)
6611 v.Type = x.Type
6612 v.AddArg(x)
6613 return true
6614 }
6615 return false
6616 }
6617 func rewriteValueAMD64_OpAMD64CMOVLNE_0(v *Value) bool {
6618
6619
6620
6621 for {
6622 _ = v.Args[2]
6623 x := v.Args[0]
6624 y := v.Args[1]
6625 v_2 := v.Args[2]
6626 if v_2.Op != OpAMD64InvertFlags {
6627 break
6628 }
6629 cond := v_2.Args[0]
6630 v.reset(OpAMD64CMOVLNE)
6631 v.AddArg(x)
6632 v.AddArg(y)
6633 v.AddArg(cond)
6634 return true
6635 }
6636
6637
6638
6639 for {
6640 _ = v.Args[2]
6641 y := v.Args[0]
6642 v_2 := v.Args[2]
6643 if v_2.Op != OpAMD64FlagEQ {
6644 break
6645 }
6646 v.reset(OpCopy)
6647 v.Type = y.Type
6648 v.AddArg(y)
6649 return true
6650 }
6651
6652
6653
6654 for {
6655 _ = v.Args[2]
6656 x := v.Args[1]
6657 v_2 := v.Args[2]
6658 if v_2.Op != OpAMD64FlagGT_UGT {
6659 break
6660 }
6661 v.reset(OpCopy)
6662 v.Type = x.Type
6663 v.AddArg(x)
6664 return true
6665 }
6666
6667
6668
6669 for {
6670 _ = v.Args[2]
6671 x := v.Args[1]
6672 v_2 := v.Args[2]
6673 if v_2.Op != OpAMD64FlagGT_ULT {
6674 break
6675 }
6676 v.reset(OpCopy)
6677 v.Type = x.Type
6678 v.AddArg(x)
6679 return true
6680 }
6681
6682
6683
6684 for {
6685 _ = v.Args[2]
6686 x := v.Args[1]
6687 v_2 := v.Args[2]
6688 if v_2.Op != OpAMD64FlagLT_ULT {
6689 break
6690 }
6691 v.reset(OpCopy)
6692 v.Type = x.Type
6693 v.AddArg(x)
6694 return true
6695 }
6696
6697
6698
6699 for {
6700 _ = v.Args[2]
6701 x := v.Args[1]
6702 v_2 := v.Args[2]
6703 if v_2.Op != OpAMD64FlagLT_UGT {
6704 break
6705 }
6706 v.reset(OpCopy)
6707 v.Type = x.Type
6708 v.AddArg(x)
6709 return true
6710 }
6711 return false
6712 }
6713 func rewriteValueAMD64_OpAMD64CMOVQCC_0(v *Value) bool {
6714
6715
6716
6717 for {
6718 _ = v.Args[2]
6719 x := v.Args[0]
6720 y := v.Args[1]
6721 v_2 := v.Args[2]
6722 if v_2.Op != OpAMD64InvertFlags {
6723 break
6724 }
6725 cond := v_2.Args[0]
6726 v.reset(OpAMD64CMOVQLS)
6727 v.AddArg(x)
6728 v.AddArg(y)
6729 v.AddArg(cond)
6730 return true
6731 }
6732
6733
6734
6735 for {
6736 _ = v.Args[2]
6737 x := v.Args[1]
6738 v_2 := v.Args[2]
6739 if v_2.Op != OpAMD64FlagEQ {
6740 break
6741 }
6742 v.reset(OpCopy)
6743 v.Type = x.Type
6744 v.AddArg(x)
6745 return true
6746 }
6747
6748
6749
6750 for {
6751 _ = v.Args[2]
6752 x := v.Args[1]
6753 v_2 := v.Args[2]
6754 if v_2.Op != OpAMD64FlagGT_UGT {
6755 break
6756 }
6757 v.reset(OpCopy)
6758 v.Type = x.Type
6759 v.AddArg(x)
6760 return true
6761 }
6762
6763
6764
6765 for {
6766 _ = v.Args[2]
6767 y := v.Args[0]
6768 v_2 := v.Args[2]
6769 if v_2.Op != OpAMD64FlagGT_ULT {
6770 break
6771 }
6772 v.reset(OpCopy)
6773 v.Type = y.Type
6774 v.AddArg(y)
6775 return true
6776 }
6777
6778
6779
6780 for {
6781 _ = v.Args[2]
6782 y := v.Args[0]
6783 v_2 := v.Args[2]
6784 if v_2.Op != OpAMD64FlagLT_ULT {
6785 break
6786 }
6787 v.reset(OpCopy)
6788 v.Type = y.Type
6789 v.AddArg(y)
6790 return true
6791 }
6792
6793
6794
6795 for {
6796 _ = v.Args[2]
6797 x := v.Args[1]
6798 v_2 := v.Args[2]
6799 if v_2.Op != OpAMD64FlagLT_UGT {
6800 break
6801 }
6802 v.reset(OpCopy)
6803 v.Type = x.Type
6804 v.AddArg(x)
6805 return true
6806 }
6807 return false
6808 }
6809 func rewriteValueAMD64_OpAMD64CMOVQCS_0(v *Value) bool {
6810
6811
6812
6813 for {
6814 _ = v.Args[2]
6815 x := v.Args[0]
6816 y := v.Args[1]
6817 v_2 := v.Args[2]
6818 if v_2.Op != OpAMD64InvertFlags {
6819 break
6820 }
6821 cond := v_2.Args[0]
6822 v.reset(OpAMD64CMOVQHI)
6823 v.AddArg(x)
6824 v.AddArg(y)
6825 v.AddArg(cond)
6826 return true
6827 }
6828
6829
6830
6831 for {
6832 _ = v.Args[2]
6833 y := v.Args[0]
6834 v_2 := v.Args[2]
6835 if v_2.Op != OpAMD64FlagEQ {
6836 break
6837 }
6838 v.reset(OpCopy)
6839 v.Type = y.Type
6840 v.AddArg(y)
6841 return true
6842 }
6843
6844
6845
6846 for {
6847 _ = v.Args[2]
6848 y := v.Args[0]
6849 v_2 := v.Args[2]
6850 if v_2.Op != OpAMD64FlagGT_UGT {
6851 break
6852 }
6853 v.reset(OpCopy)
6854 v.Type = y.Type
6855 v.AddArg(y)
6856 return true
6857 }
6858
6859
6860
6861 for {
6862 _ = v.Args[2]
6863 x := v.Args[1]
6864 v_2 := v.Args[2]
6865 if v_2.Op != OpAMD64FlagGT_ULT {
6866 break
6867 }
6868 v.reset(OpCopy)
6869 v.Type = x.Type
6870 v.AddArg(x)
6871 return true
6872 }
6873
6874
6875
6876 for {
6877 _ = v.Args[2]
6878 x := v.Args[1]
6879 v_2 := v.Args[2]
6880 if v_2.Op != OpAMD64FlagLT_ULT {
6881 break
6882 }
6883 v.reset(OpCopy)
6884 v.Type = x.Type
6885 v.AddArg(x)
6886 return true
6887 }
6888
6889
6890
6891 for {
6892 _ = v.Args[2]
6893 y := v.Args[0]
6894 v_2 := v.Args[2]
6895 if v_2.Op != OpAMD64FlagLT_UGT {
6896 break
6897 }
6898 v.reset(OpCopy)
6899 v.Type = y.Type
6900 v.AddArg(y)
6901 return true
6902 }
6903 return false
6904 }
6905 func rewriteValueAMD64_OpAMD64CMOVQEQ_0(v *Value) bool {
6906
6907
6908
6909 for {
6910 _ = v.Args[2]
6911 x := v.Args[0]
6912 y := v.Args[1]
6913 v_2 := v.Args[2]
6914 if v_2.Op != OpAMD64InvertFlags {
6915 break
6916 }
6917 cond := v_2.Args[0]
6918 v.reset(OpAMD64CMOVQEQ)
6919 v.AddArg(x)
6920 v.AddArg(y)
6921 v.AddArg(cond)
6922 return true
6923 }
6924
6925
6926
6927 for {
6928 _ = v.Args[2]
6929 x := v.Args[1]
6930 v_2 := v.Args[2]
6931 if v_2.Op != OpAMD64FlagEQ {
6932 break
6933 }
6934 v.reset(OpCopy)
6935 v.Type = x.Type
6936 v.AddArg(x)
6937 return true
6938 }
6939
6940
6941
6942 for {
6943 _ = v.Args[2]
6944 y := v.Args[0]
6945 v_2 := v.Args[2]
6946 if v_2.Op != OpAMD64FlagGT_UGT {
6947 break
6948 }
6949 v.reset(OpCopy)
6950 v.Type = y.Type
6951 v.AddArg(y)
6952 return true
6953 }
6954
6955
6956
6957 for {
6958 _ = v.Args[2]
6959 y := v.Args[0]
6960 v_2 := v.Args[2]
6961 if v_2.Op != OpAMD64FlagGT_ULT {
6962 break
6963 }
6964 v.reset(OpCopy)
6965 v.Type = y.Type
6966 v.AddArg(y)
6967 return true
6968 }
6969
6970
6971
6972 for {
6973 _ = v.Args[2]
6974 y := v.Args[0]
6975 v_2 := v.Args[2]
6976 if v_2.Op != OpAMD64FlagLT_ULT {
6977 break
6978 }
6979 v.reset(OpCopy)
6980 v.Type = y.Type
6981 v.AddArg(y)
6982 return true
6983 }
6984
6985
6986
6987 for {
6988 _ = v.Args[2]
6989 y := v.Args[0]
6990 v_2 := v.Args[2]
6991 if v_2.Op != OpAMD64FlagLT_UGT {
6992 break
6993 }
6994 v.reset(OpCopy)
6995 v.Type = y.Type
6996 v.AddArg(y)
6997 return true
6998 }
6999
7000
7001
7002 for {
7003 _ = v.Args[2]
7004 x := v.Args[0]
7005 v_2 := v.Args[2]
7006 if v_2.Op != OpSelect1 {
7007 break
7008 }
7009 v_2_0 := v_2.Args[0]
7010 if v_2_0.Op != OpAMD64BSFQ {
7011 break
7012 }
7013 v_2_0_0 := v_2_0.Args[0]
7014 if v_2_0_0.Op != OpAMD64ORQconst {
7015 break
7016 }
7017 c := v_2_0_0.AuxInt
7018 if !(c != 0) {
7019 break
7020 }
7021 v.reset(OpCopy)
7022 v.Type = x.Type
7023 v.AddArg(x)
7024 return true
7025 }
7026 return false
7027 }
7028 func rewriteValueAMD64_OpAMD64CMOVQGE_0(v *Value) bool {
7029
7030
7031
7032 for {
7033 _ = v.Args[2]
7034 x := v.Args[0]
7035 y := v.Args[1]
7036 v_2 := v.Args[2]
7037 if v_2.Op != OpAMD64InvertFlags {
7038 break
7039 }
7040 cond := v_2.Args[0]
7041 v.reset(OpAMD64CMOVQLE)
7042 v.AddArg(x)
7043 v.AddArg(y)
7044 v.AddArg(cond)
7045 return true
7046 }
7047
7048
7049
7050 for {
7051 _ = v.Args[2]
7052 x := v.Args[1]
7053 v_2 := v.Args[2]
7054 if v_2.Op != OpAMD64FlagEQ {
7055 break
7056 }
7057 v.reset(OpCopy)
7058 v.Type = x.Type
7059 v.AddArg(x)
7060 return true
7061 }
7062
7063
7064
7065 for {
7066 _ = v.Args[2]
7067 x := v.Args[1]
7068 v_2 := v.Args[2]
7069 if v_2.Op != OpAMD64FlagGT_UGT {
7070 break
7071 }
7072 v.reset(OpCopy)
7073 v.Type = x.Type
7074 v.AddArg(x)
7075 return true
7076 }
7077
7078
7079
7080 for {
7081 _ = v.Args[2]
7082 x := v.Args[1]
7083 v_2 := v.Args[2]
7084 if v_2.Op != OpAMD64FlagGT_ULT {
7085 break
7086 }
7087 v.reset(OpCopy)
7088 v.Type = x.Type
7089 v.AddArg(x)
7090 return true
7091 }
7092
7093
7094
7095 for {
7096 _ = v.Args[2]
7097 y := v.Args[0]
7098 v_2 := v.Args[2]
7099 if v_2.Op != OpAMD64FlagLT_ULT {
7100 break
7101 }
7102 v.reset(OpCopy)
7103 v.Type = y.Type
7104 v.AddArg(y)
7105 return true
7106 }
7107
7108
7109
7110 for {
7111 _ = v.Args[2]
7112 y := v.Args[0]
7113 v_2 := v.Args[2]
7114 if v_2.Op != OpAMD64FlagLT_UGT {
7115 break
7116 }
7117 v.reset(OpCopy)
7118 v.Type = y.Type
7119 v.AddArg(y)
7120 return true
7121 }
7122 return false
7123 }
7124 func rewriteValueAMD64_OpAMD64CMOVQGT_0(v *Value) bool {
7125
7126
7127
7128 for {
7129 _ = v.Args[2]
7130 x := v.Args[0]
7131 y := v.Args[1]
7132 v_2 := v.Args[2]
7133 if v_2.Op != OpAMD64InvertFlags {
7134 break
7135 }
7136 cond := v_2.Args[0]
7137 v.reset(OpAMD64CMOVQLT)
7138 v.AddArg(x)
7139 v.AddArg(y)
7140 v.AddArg(cond)
7141 return true
7142 }
7143
7144
7145
7146 for {
7147 _ = v.Args[2]
7148 y := v.Args[0]
7149 v_2 := v.Args[2]
7150 if v_2.Op != OpAMD64FlagEQ {
7151 break
7152 }
7153 v.reset(OpCopy)
7154 v.Type = y.Type
7155 v.AddArg(y)
7156 return true
7157 }
7158
7159
7160
7161 for {
7162 _ = v.Args[2]
7163 x := v.Args[1]
7164 v_2 := v.Args[2]
7165 if v_2.Op != OpAMD64FlagGT_UGT {
7166 break
7167 }
7168 v.reset(OpCopy)
7169 v.Type = x.Type
7170 v.AddArg(x)
7171 return true
7172 }
7173
7174
7175
7176 for {
7177 _ = v.Args[2]
7178 x := v.Args[1]
7179 v_2 := v.Args[2]
7180 if v_2.Op != OpAMD64FlagGT_ULT {
7181 break
7182 }
7183 v.reset(OpCopy)
7184 v.Type = x.Type
7185 v.AddArg(x)
7186 return true
7187 }
7188
7189
7190
7191 for {
7192 _ = v.Args[2]
7193 y := v.Args[0]
7194 v_2 := v.Args[2]
7195 if v_2.Op != OpAMD64FlagLT_ULT {
7196 break
7197 }
7198 v.reset(OpCopy)
7199 v.Type = y.Type
7200 v.AddArg(y)
7201 return true
7202 }
7203
7204
7205
7206 for {
7207 _ = v.Args[2]
7208 y := v.Args[0]
7209 v_2 := v.Args[2]
7210 if v_2.Op != OpAMD64FlagLT_UGT {
7211 break
7212 }
7213 v.reset(OpCopy)
7214 v.Type = y.Type
7215 v.AddArg(y)
7216 return true
7217 }
7218 return false
7219 }
7220 func rewriteValueAMD64_OpAMD64CMOVQHI_0(v *Value) bool {
7221
7222
7223
7224 for {
7225 _ = v.Args[2]
7226 x := v.Args[0]
7227 y := v.Args[1]
7228 v_2 := v.Args[2]
7229 if v_2.Op != OpAMD64InvertFlags {
7230 break
7231 }
7232 cond := v_2.Args[0]
7233 v.reset(OpAMD64CMOVQCS)
7234 v.AddArg(x)
7235 v.AddArg(y)
7236 v.AddArg(cond)
7237 return true
7238 }
7239
7240
7241
7242 for {
7243 _ = v.Args[2]
7244 y := v.Args[0]
7245 v_2 := v.Args[2]
7246 if v_2.Op != OpAMD64FlagEQ {
7247 break
7248 }
7249 v.reset(OpCopy)
7250 v.Type = y.Type
7251 v.AddArg(y)
7252 return true
7253 }
7254
7255
7256
7257 for {
7258 _ = v.Args[2]
7259 x := v.Args[1]
7260 v_2 := v.Args[2]
7261 if v_2.Op != OpAMD64FlagGT_UGT {
7262 break
7263 }
7264 v.reset(OpCopy)
7265 v.Type = x.Type
7266 v.AddArg(x)
7267 return true
7268 }
7269
7270
7271
7272 for {
7273 _ = v.Args[2]
7274 y := v.Args[0]
7275 v_2 := v.Args[2]
7276 if v_2.Op != OpAMD64FlagGT_ULT {
7277 break
7278 }
7279 v.reset(OpCopy)
7280 v.Type = y.Type
7281 v.AddArg(y)
7282 return true
7283 }
7284
7285
7286
7287 for {
7288 _ = v.Args[2]
7289 y := v.Args[0]
7290 v_2 := v.Args[2]
7291 if v_2.Op != OpAMD64FlagLT_ULT {
7292 break
7293 }
7294 v.reset(OpCopy)
7295 v.Type = y.Type
7296 v.AddArg(y)
7297 return true
7298 }
7299
7300
7301
7302 for {
7303 _ = v.Args[2]
7304 x := v.Args[1]
7305 v_2 := v.Args[2]
7306 if v_2.Op != OpAMD64FlagLT_UGT {
7307 break
7308 }
7309 v.reset(OpCopy)
7310 v.Type = x.Type
7311 v.AddArg(x)
7312 return true
7313 }
7314 return false
7315 }
7316 func rewriteValueAMD64_OpAMD64CMOVQLE_0(v *Value) bool {
7317
7318
7319
7320 for {
7321 _ = v.Args[2]
7322 x := v.Args[0]
7323 y := v.Args[1]
7324 v_2 := v.Args[2]
7325 if v_2.Op != OpAMD64InvertFlags {
7326 break
7327 }
7328 cond := v_2.Args[0]
7329 v.reset(OpAMD64CMOVQGE)
7330 v.AddArg(x)
7331 v.AddArg(y)
7332 v.AddArg(cond)
7333 return true
7334 }
7335
7336
7337
7338 for {
7339 _ = v.Args[2]
7340 x := v.Args[1]
7341 v_2 := v.Args[2]
7342 if v_2.Op != OpAMD64FlagEQ {
7343 break
7344 }
7345 v.reset(OpCopy)
7346 v.Type = x.Type
7347 v.AddArg(x)
7348 return true
7349 }
7350
7351
7352
7353 for {
7354 _ = v.Args[2]
7355 y := v.Args[0]
7356 v_2 := v.Args[2]
7357 if v_2.Op != OpAMD64FlagGT_UGT {
7358 break
7359 }
7360 v.reset(OpCopy)
7361 v.Type = y.Type
7362 v.AddArg(y)
7363 return true
7364 }
7365
7366
7367
7368 for {
7369 _ = v.Args[2]
7370 y := v.Args[0]
7371 v_2 := v.Args[2]
7372 if v_2.Op != OpAMD64FlagGT_ULT {
7373 break
7374 }
7375 v.reset(OpCopy)
7376 v.Type = y.Type
7377 v.AddArg(y)
7378 return true
7379 }
7380
7381
7382
7383 for {
7384 _ = v.Args[2]
7385 x := v.Args[1]
7386 v_2 := v.Args[2]
7387 if v_2.Op != OpAMD64FlagLT_ULT {
7388 break
7389 }
7390 v.reset(OpCopy)
7391 v.Type = x.Type
7392 v.AddArg(x)
7393 return true
7394 }
7395
7396
7397
7398 for {
7399 _ = v.Args[2]
7400 x := v.Args[1]
7401 v_2 := v.Args[2]
7402 if v_2.Op != OpAMD64FlagLT_UGT {
7403 break
7404 }
7405 v.reset(OpCopy)
7406 v.Type = x.Type
7407 v.AddArg(x)
7408 return true
7409 }
7410 return false
7411 }
7412 func rewriteValueAMD64_OpAMD64CMOVQLS_0(v *Value) bool {
7413
7414
7415
7416 for {
7417 _ = v.Args[2]
7418 x := v.Args[0]
7419 y := v.Args[1]
7420 v_2 := v.Args[2]
7421 if v_2.Op != OpAMD64InvertFlags {
7422 break
7423 }
7424 cond := v_2.Args[0]
7425 v.reset(OpAMD64CMOVQCC)
7426 v.AddArg(x)
7427 v.AddArg(y)
7428 v.AddArg(cond)
7429 return true
7430 }
7431
7432
7433
7434 for {
7435 _ = v.Args[2]
7436 x := v.Args[1]
7437 v_2 := v.Args[2]
7438 if v_2.Op != OpAMD64FlagEQ {
7439 break
7440 }
7441 v.reset(OpCopy)
7442 v.Type = x.Type
7443 v.AddArg(x)
7444 return true
7445 }
7446
7447
7448
7449 for {
7450 _ = v.Args[2]
7451 y := v.Args[0]
7452 v_2 := v.Args[2]
7453 if v_2.Op != OpAMD64FlagGT_UGT {
7454 break
7455 }
7456 v.reset(OpCopy)
7457 v.Type = y.Type
7458 v.AddArg(y)
7459 return true
7460 }
7461
7462
7463
7464 for {
7465 _ = v.Args[2]
7466 x := v.Args[1]
7467 v_2 := v.Args[2]
7468 if v_2.Op != OpAMD64FlagGT_ULT {
7469 break
7470 }
7471 v.reset(OpCopy)
7472 v.Type = x.Type
7473 v.AddArg(x)
7474 return true
7475 }
7476
7477
7478
7479 for {
7480 _ = v.Args[2]
7481 x := v.Args[1]
7482 v_2 := v.Args[2]
7483 if v_2.Op != OpAMD64FlagLT_ULT {
7484 break
7485 }
7486 v.reset(OpCopy)
7487 v.Type = x.Type
7488 v.AddArg(x)
7489 return true
7490 }
7491
7492
7493
7494 for {
7495 _ = v.Args[2]
7496 y := v.Args[0]
7497 v_2 := v.Args[2]
7498 if v_2.Op != OpAMD64FlagLT_UGT {
7499 break
7500 }
7501 v.reset(OpCopy)
7502 v.Type = y.Type
7503 v.AddArg(y)
7504 return true
7505 }
7506 return false
7507 }
7508 func rewriteValueAMD64_OpAMD64CMOVQLT_0(v *Value) bool {
7509
7510
7511
7512 for {
7513 _ = v.Args[2]
7514 x := v.Args[0]
7515 y := v.Args[1]
7516 v_2 := v.Args[2]
7517 if v_2.Op != OpAMD64InvertFlags {
7518 break
7519 }
7520 cond := v_2.Args[0]
7521 v.reset(OpAMD64CMOVQGT)
7522 v.AddArg(x)
7523 v.AddArg(y)
7524 v.AddArg(cond)
7525 return true
7526 }
7527
7528
7529
7530 for {
7531 _ = v.Args[2]
7532 y := v.Args[0]
7533 v_2 := v.Args[2]
7534 if v_2.Op != OpAMD64FlagEQ {
7535 break
7536 }
7537 v.reset(OpCopy)
7538 v.Type = y.Type
7539 v.AddArg(y)
7540 return true
7541 }
7542
7543
7544
7545 for {
7546 _ = v.Args[2]
7547 y := v.Args[0]
7548 v_2 := v.Args[2]
7549 if v_2.Op != OpAMD64FlagGT_UGT {
7550 break
7551 }
7552 v.reset(OpCopy)
7553 v.Type = y.Type
7554 v.AddArg(y)
7555 return true
7556 }
7557
7558
7559
7560 for {
7561 _ = v.Args[2]
7562 y := v.Args[0]
7563 v_2 := v.Args[2]
7564 if v_2.Op != OpAMD64FlagGT_ULT {
7565 break
7566 }
7567 v.reset(OpCopy)
7568 v.Type = y.Type
7569 v.AddArg(y)
7570 return true
7571 }
7572
7573
7574
7575 for {
7576 _ = v.Args[2]
7577 x := v.Args[1]
7578 v_2 := v.Args[2]
7579 if v_2.Op != OpAMD64FlagLT_ULT {
7580 break
7581 }
7582 v.reset(OpCopy)
7583 v.Type = x.Type
7584 v.AddArg(x)
7585 return true
7586 }
7587
7588
7589
7590 for {
7591 _ = v.Args[2]
7592 x := v.Args[1]
7593 v_2 := v.Args[2]
7594 if v_2.Op != OpAMD64FlagLT_UGT {
7595 break
7596 }
7597 v.reset(OpCopy)
7598 v.Type = x.Type
7599 v.AddArg(x)
7600 return true
7601 }
7602 return false
7603 }
7604 func rewriteValueAMD64_OpAMD64CMOVQNE_0(v *Value) bool {
7605
7606
7607
7608 for {
7609 _ = v.Args[2]
7610 x := v.Args[0]
7611 y := v.Args[1]
7612 v_2 := v.Args[2]
7613 if v_2.Op != OpAMD64InvertFlags {
7614 break
7615 }
7616 cond := v_2.Args[0]
7617 v.reset(OpAMD64CMOVQNE)
7618 v.AddArg(x)
7619 v.AddArg(y)
7620 v.AddArg(cond)
7621 return true
7622 }
7623
7624
7625
7626 for {
7627 _ = v.Args[2]
7628 y := v.Args[0]
7629 v_2 := v.Args[2]
7630 if v_2.Op != OpAMD64FlagEQ {
7631 break
7632 }
7633 v.reset(OpCopy)
7634 v.Type = y.Type
7635 v.AddArg(y)
7636 return true
7637 }
7638
7639
7640
7641 for {
7642 _ = v.Args[2]
7643 x := v.Args[1]
7644 v_2 := v.Args[2]
7645 if v_2.Op != OpAMD64FlagGT_UGT {
7646 break
7647 }
7648 v.reset(OpCopy)
7649 v.Type = x.Type
7650 v.AddArg(x)
7651 return true
7652 }
7653
7654
7655
7656 for {
7657 _ = v.Args[2]
7658 x := v.Args[1]
7659 v_2 := v.Args[2]
7660 if v_2.Op != OpAMD64FlagGT_ULT {
7661 break
7662 }
7663 v.reset(OpCopy)
7664 v.Type = x.Type
7665 v.AddArg(x)
7666 return true
7667 }
7668
7669
7670
7671 for {
7672 _ = v.Args[2]
7673 x := v.Args[1]
7674 v_2 := v.Args[2]
7675 if v_2.Op != OpAMD64FlagLT_ULT {
7676 break
7677 }
7678 v.reset(OpCopy)
7679 v.Type = x.Type
7680 v.AddArg(x)
7681 return true
7682 }
7683
7684
7685
7686 for {
7687 _ = v.Args[2]
7688 x := v.Args[1]
7689 v_2 := v.Args[2]
7690 if v_2.Op != OpAMD64FlagLT_UGT {
7691 break
7692 }
7693 v.reset(OpCopy)
7694 v.Type = x.Type
7695 v.AddArg(x)
7696 return true
7697 }
7698 return false
7699 }
7700 func rewriteValueAMD64_OpAMD64CMOVWCC_0(v *Value) bool {
7701
7702
7703
7704 for {
7705 _ = v.Args[2]
7706 x := v.Args[0]
7707 y := v.Args[1]
7708 v_2 := v.Args[2]
7709 if v_2.Op != OpAMD64InvertFlags {
7710 break
7711 }
7712 cond := v_2.Args[0]
7713 v.reset(OpAMD64CMOVWLS)
7714 v.AddArg(x)
7715 v.AddArg(y)
7716 v.AddArg(cond)
7717 return true
7718 }
7719
7720
7721
7722 for {
7723 _ = v.Args[2]
7724 x := v.Args[1]
7725 v_2 := v.Args[2]
7726 if v_2.Op != OpAMD64FlagEQ {
7727 break
7728 }
7729 v.reset(OpCopy)
7730 v.Type = x.Type
7731 v.AddArg(x)
7732 return true
7733 }
7734
7735
7736
7737 for {
7738 _ = v.Args[2]
7739 x := v.Args[1]
7740 v_2 := v.Args[2]
7741 if v_2.Op != OpAMD64FlagGT_UGT {
7742 break
7743 }
7744 v.reset(OpCopy)
7745 v.Type = x.Type
7746 v.AddArg(x)
7747 return true
7748 }
7749
7750
7751
7752 for {
7753 _ = v.Args[2]
7754 y := v.Args[0]
7755 v_2 := v.Args[2]
7756 if v_2.Op != OpAMD64FlagGT_ULT {
7757 break
7758 }
7759 v.reset(OpCopy)
7760 v.Type = y.Type
7761 v.AddArg(y)
7762 return true
7763 }
7764
7765
7766
7767 for {
7768 _ = v.Args[2]
7769 y := v.Args[0]
7770 v_2 := v.Args[2]
7771 if v_2.Op != OpAMD64FlagLT_ULT {
7772 break
7773 }
7774 v.reset(OpCopy)
7775 v.Type = y.Type
7776 v.AddArg(y)
7777 return true
7778 }
7779
7780
7781
7782 for {
7783 _ = v.Args[2]
7784 x := v.Args[1]
7785 v_2 := v.Args[2]
7786 if v_2.Op != OpAMD64FlagLT_UGT {
7787 break
7788 }
7789 v.reset(OpCopy)
7790 v.Type = x.Type
7791 v.AddArg(x)
7792 return true
7793 }
7794 return false
7795 }
7796 func rewriteValueAMD64_OpAMD64CMOVWCS_0(v *Value) bool {
7797
7798
7799
7800 for {
7801 _ = v.Args[2]
7802 x := v.Args[0]
7803 y := v.Args[1]
7804 v_2 := v.Args[2]
7805 if v_2.Op != OpAMD64InvertFlags {
7806 break
7807 }
7808 cond := v_2.Args[0]
7809 v.reset(OpAMD64CMOVWHI)
7810 v.AddArg(x)
7811 v.AddArg(y)
7812 v.AddArg(cond)
7813 return true
7814 }
7815
7816
7817
7818 for {
7819 _ = v.Args[2]
7820 y := v.Args[0]
7821 v_2 := v.Args[2]
7822 if v_2.Op != OpAMD64FlagEQ {
7823 break
7824 }
7825 v.reset(OpCopy)
7826 v.Type = y.Type
7827 v.AddArg(y)
7828 return true
7829 }
7830
7831
7832
7833 for {
7834 _ = v.Args[2]
7835 y := v.Args[0]
7836 v_2 := v.Args[2]
7837 if v_2.Op != OpAMD64FlagGT_UGT {
7838 break
7839 }
7840 v.reset(OpCopy)
7841 v.Type = y.Type
7842 v.AddArg(y)
7843 return true
7844 }
7845
7846
7847
7848 for {
7849 _ = v.Args[2]
7850 x := v.Args[1]
7851 v_2 := v.Args[2]
7852 if v_2.Op != OpAMD64FlagGT_ULT {
7853 break
7854 }
7855 v.reset(OpCopy)
7856 v.Type = x.Type
7857 v.AddArg(x)
7858 return true
7859 }
7860
7861
7862
7863 for {
7864 _ = v.Args[2]
7865 x := v.Args[1]
7866 v_2 := v.Args[2]
7867 if v_2.Op != OpAMD64FlagLT_ULT {
7868 break
7869 }
7870 v.reset(OpCopy)
7871 v.Type = x.Type
7872 v.AddArg(x)
7873 return true
7874 }
7875
7876
7877
7878 for {
7879 _ = v.Args[2]
7880 y := v.Args[0]
7881 v_2 := v.Args[2]
7882 if v_2.Op != OpAMD64FlagLT_UGT {
7883 break
7884 }
7885 v.reset(OpCopy)
7886 v.Type = y.Type
7887 v.AddArg(y)
7888 return true
7889 }
7890 return false
7891 }
7892 func rewriteValueAMD64_OpAMD64CMOVWEQ_0(v *Value) bool {
7893
7894
7895
7896 for {
7897 _ = v.Args[2]
7898 x := v.Args[0]
7899 y := v.Args[1]
7900 v_2 := v.Args[2]
7901 if v_2.Op != OpAMD64InvertFlags {
7902 break
7903 }
7904 cond := v_2.Args[0]
7905 v.reset(OpAMD64CMOVWEQ)
7906 v.AddArg(x)
7907 v.AddArg(y)
7908 v.AddArg(cond)
7909 return true
7910 }
7911
7912
7913
7914 for {
7915 _ = v.Args[2]
7916 x := v.Args[1]
7917 v_2 := v.Args[2]
7918 if v_2.Op != OpAMD64FlagEQ {
7919 break
7920 }
7921 v.reset(OpCopy)
7922 v.Type = x.Type
7923 v.AddArg(x)
7924 return true
7925 }
7926
7927
7928
7929 for {
7930 _ = v.Args[2]
7931 y := v.Args[0]
7932 v_2 := v.Args[2]
7933 if v_2.Op != OpAMD64FlagGT_UGT {
7934 break
7935 }
7936 v.reset(OpCopy)
7937 v.Type = y.Type
7938 v.AddArg(y)
7939 return true
7940 }
7941
7942
7943
7944 for {
7945 _ = v.Args[2]
7946 y := v.Args[0]
7947 v_2 := v.Args[2]
7948 if v_2.Op != OpAMD64FlagGT_ULT {
7949 break
7950 }
7951 v.reset(OpCopy)
7952 v.Type = y.Type
7953 v.AddArg(y)
7954 return true
7955 }
7956
7957
7958
7959 for {
7960 _ = v.Args[2]
7961 y := v.Args[0]
7962 v_2 := v.Args[2]
7963 if v_2.Op != OpAMD64FlagLT_ULT {
7964 break
7965 }
7966 v.reset(OpCopy)
7967 v.Type = y.Type
7968 v.AddArg(y)
7969 return true
7970 }
7971
7972
7973
7974 for {
7975 _ = v.Args[2]
7976 y := v.Args[0]
7977 v_2 := v.Args[2]
7978 if v_2.Op != OpAMD64FlagLT_UGT {
7979 break
7980 }
7981 v.reset(OpCopy)
7982 v.Type = y.Type
7983 v.AddArg(y)
7984 return true
7985 }
7986 return false
7987 }
7988 func rewriteValueAMD64_OpAMD64CMOVWGE_0(v *Value) bool {
7989
7990
7991
7992 for {
7993 _ = v.Args[2]
7994 x := v.Args[0]
7995 y := v.Args[1]
7996 v_2 := v.Args[2]
7997 if v_2.Op != OpAMD64InvertFlags {
7998 break
7999 }
8000 cond := v_2.Args[0]
8001 v.reset(OpAMD64CMOVWLE)
8002 v.AddArg(x)
8003 v.AddArg(y)
8004 v.AddArg(cond)
8005 return true
8006 }
8007
8008
8009
8010 for {
8011 _ = v.Args[2]
8012 x := v.Args[1]
8013 v_2 := v.Args[2]
8014 if v_2.Op != OpAMD64FlagEQ {
8015 break
8016 }
8017 v.reset(OpCopy)
8018 v.Type = x.Type
8019 v.AddArg(x)
8020 return true
8021 }
8022
8023
8024
8025 for {
8026 _ = v.Args[2]
8027 x := v.Args[1]
8028 v_2 := v.Args[2]
8029 if v_2.Op != OpAMD64FlagGT_UGT {
8030 break
8031 }
8032 v.reset(OpCopy)
8033 v.Type = x.Type
8034 v.AddArg(x)
8035 return true
8036 }
8037
8038
8039
8040 for {
8041 _ = v.Args[2]
8042 x := v.Args[1]
8043 v_2 := v.Args[2]
8044 if v_2.Op != OpAMD64FlagGT_ULT {
8045 break
8046 }
8047 v.reset(OpCopy)
8048 v.Type = x.Type
8049 v.AddArg(x)
8050 return true
8051 }
8052
8053
8054
8055 for {
8056 _ = v.Args[2]
8057 y := v.Args[0]
8058 v_2 := v.Args[2]
8059 if v_2.Op != OpAMD64FlagLT_ULT {
8060 break
8061 }
8062 v.reset(OpCopy)
8063 v.Type = y.Type
8064 v.AddArg(y)
8065 return true
8066 }
8067
8068
8069
8070 for {
8071 _ = v.Args[2]
8072 y := v.Args[0]
8073 v_2 := v.Args[2]
8074 if v_2.Op != OpAMD64FlagLT_UGT {
8075 break
8076 }
8077 v.reset(OpCopy)
8078 v.Type = y.Type
8079 v.AddArg(y)
8080 return true
8081 }
8082 return false
8083 }
8084 func rewriteValueAMD64_OpAMD64CMOVWGT_0(v *Value) bool {
8085
8086
8087
8088 for {
8089 _ = v.Args[2]
8090 x := v.Args[0]
8091 y := v.Args[1]
8092 v_2 := v.Args[2]
8093 if v_2.Op != OpAMD64InvertFlags {
8094 break
8095 }
8096 cond := v_2.Args[0]
8097 v.reset(OpAMD64CMOVWLT)
8098 v.AddArg(x)
8099 v.AddArg(y)
8100 v.AddArg(cond)
8101 return true
8102 }
8103
8104
8105
8106 for {
8107 _ = v.Args[2]
8108 y := v.Args[0]
8109 v_2 := v.Args[2]
8110 if v_2.Op != OpAMD64FlagEQ {
8111 break
8112 }
8113 v.reset(OpCopy)
8114 v.Type = y.Type
8115 v.AddArg(y)
8116 return true
8117 }
8118
8119
8120
8121 for {
8122 _ = v.Args[2]
8123 x := v.Args[1]
8124 v_2 := v.Args[2]
8125 if v_2.Op != OpAMD64FlagGT_UGT {
8126 break
8127 }
8128 v.reset(OpCopy)
8129 v.Type = x.Type
8130 v.AddArg(x)
8131 return true
8132 }
8133
8134
8135
8136 for {
8137 _ = v.Args[2]
8138 x := v.Args[1]
8139 v_2 := v.Args[2]
8140 if v_2.Op != OpAMD64FlagGT_ULT {
8141 break
8142 }
8143 v.reset(OpCopy)
8144 v.Type = x.Type
8145 v.AddArg(x)
8146 return true
8147 }
8148
8149
8150
8151 for {
8152 _ = v.Args[2]
8153 y := v.Args[0]
8154 v_2 := v.Args[2]
8155 if v_2.Op != OpAMD64FlagLT_ULT {
8156 break
8157 }
8158 v.reset(OpCopy)
8159 v.Type = y.Type
8160 v.AddArg(y)
8161 return true
8162 }
8163
8164
8165
8166 for {
8167 _ = v.Args[2]
8168 y := v.Args[0]
8169 v_2 := v.Args[2]
8170 if v_2.Op != OpAMD64FlagLT_UGT {
8171 break
8172 }
8173 v.reset(OpCopy)
8174 v.Type = y.Type
8175 v.AddArg(y)
8176 return true
8177 }
8178 return false
8179 }
8180 func rewriteValueAMD64_OpAMD64CMOVWHI_0(v *Value) bool {
8181
8182
8183
8184 for {
8185 _ = v.Args[2]
8186 x := v.Args[0]
8187 y := v.Args[1]
8188 v_2 := v.Args[2]
8189 if v_2.Op != OpAMD64InvertFlags {
8190 break
8191 }
8192 cond := v_2.Args[0]
8193 v.reset(OpAMD64CMOVWCS)
8194 v.AddArg(x)
8195 v.AddArg(y)
8196 v.AddArg(cond)
8197 return true
8198 }
8199
8200
8201
8202 for {
8203 _ = v.Args[2]
8204 y := v.Args[0]
8205 v_2 := v.Args[2]
8206 if v_2.Op != OpAMD64FlagEQ {
8207 break
8208 }
8209 v.reset(OpCopy)
8210 v.Type = y.Type
8211 v.AddArg(y)
8212 return true
8213 }
8214
8215
8216
8217 for {
8218 _ = v.Args[2]
8219 x := v.Args[1]
8220 v_2 := v.Args[2]
8221 if v_2.Op != OpAMD64FlagGT_UGT {
8222 break
8223 }
8224 v.reset(OpCopy)
8225 v.Type = x.Type
8226 v.AddArg(x)
8227 return true
8228 }
8229
8230
8231
8232 for {
8233 _ = v.Args[2]
8234 y := v.Args[0]
8235 v_2 := v.Args[2]
8236 if v_2.Op != OpAMD64FlagGT_ULT {
8237 break
8238 }
8239 v.reset(OpCopy)
8240 v.Type = y.Type
8241 v.AddArg(y)
8242 return true
8243 }
8244
8245
8246
8247 for {
8248 _ = v.Args[2]
8249 y := v.Args[0]
8250 v_2 := v.Args[2]
8251 if v_2.Op != OpAMD64FlagLT_ULT {
8252 break
8253 }
8254 v.reset(OpCopy)
8255 v.Type = y.Type
8256 v.AddArg(y)
8257 return true
8258 }
8259
8260
8261
8262 for {
8263 _ = v.Args[2]
8264 x := v.Args[1]
8265 v_2 := v.Args[2]
8266 if v_2.Op != OpAMD64FlagLT_UGT {
8267 break
8268 }
8269 v.reset(OpCopy)
8270 v.Type = x.Type
8271 v.AddArg(x)
8272 return true
8273 }
8274 return false
8275 }
8276 func rewriteValueAMD64_OpAMD64CMOVWLE_0(v *Value) bool {
8277
8278
8279
8280 for {
8281 _ = v.Args[2]
8282 x := v.Args[0]
8283 y := v.Args[1]
8284 v_2 := v.Args[2]
8285 if v_2.Op != OpAMD64InvertFlags {
8286 break
8287 }
8288 cond := v_2.Args[0]
8289 v.reset(OpAMD64CMOVWGE)
8290 v.AddArg(x)
8291 v.AddArg(y)
8292 v.AddArg(cond)
8293 return true
8294 }
8295
8296
8297
8298 for {
8299 _ = v.Args[2]
8300 x := v.Args[1]
8301 v_2 := v.Args[2]
8302 if v_2.Op != OpAMD64FlagEQ {
8303 break
8304 }
8305 v.reset(OpCopy)
8306 v.Type = x.Type
8307 v.AddArg(x)
8308 return true
8309 }
8310
8311
8312
8313 for {
8314 _ = v.Args[2]
8315 y := v.Args[0]
8316 v_2 := v.Args[2]
8317 if v_2.Op != OpAMD64FlagGT_UGT {
8318 break
8319 }
8320 v.reset(OpCopy)
8321 v.Type = y.Type
8322 v.AddArg(y)
8323 return true
8324 }
8325
8326
8327
8328 for {
8329 _ = v.Args[2]
8330 y := v.Args[0]
8331 v_2 := v.Args[2]
8332 if v_2.Op != OpAMD64FlagGT_ULT {
8333 break
8334 }
8335 v.reset(OpCopy)
8336 v.Type = y.Type
8337 v.AddArg(y)
8338 return true
8339 }
8340
8341
8342
8343 for {
8344 _ = v.Args[2]
8345 x := v.Args[1]
8346 v_2 := v.Args[2]
8347 if v_2.Op != OpAMD64FlagLT_ULT {
8348 break
8349 }
8350 v.reset(OpCopy)
8351 v.Type = x.Type
8352 v.AddArg(x)
8353 return true
8354 }
8355
8356
8357
8358 for {
8359 _ = v.Args[2]
8360 x := v.Args[1]
8361 v_2 := v.Args[2]
8362 if v_2.Op != OpAMD64FlagLT_UGT {
8363 break
8364 }
8365 v.reset(OpCopy)
8366 v.Type = x.Type
8367 v.AddArg(x)
8368 return true
8369 }
8370 return false
8371 }
8372 func rewriteValueAMD64_OpAMD64CMOVWLS_0(v *Value) bool {
8373
8374
8375
8376 for {
8377 _ = v.Args[2]
8378 x := v.Args[0]
8379 y := v.Args[1]
8380 v_2 := v.Args[2]
8381 if v_2.Op != OpAMD64InvertFlags {
8382 break
8383 }
8384 cond := v_2.Args[0]
8385 v.reset(OpAMD64CMOVWCC)
8386 v.AddArg(x)
8387 v.AddArg(y)
8388 v.AddArg(cond)
8389 return true
8390 }
8391
8392
8393
8394 for {
8395 _ = v.Args[2]
8396 x := v.Args[1]
8397 v_2 := v.Args[2]
8398 if v_2.Op != OpAMD64FlagEQ {
8399 break
8400 }
8401 v.reset(OpCopy)
8402 v.Type = x.Type
8403 v.AddArg(x)
8404 return true
8405 }
8406
8407
8408
8409 for {
8410 _ = v.Args[2]
8411 y := v.Args[0]
8412 v_2 := v.Args[2]
8413 if v_2.Op != OpAMD64FlagGT_UGT {
8414 break
8415 }
8416 v.reset(OpCopy)
8417 v.Type = y.Type
8418 v.AddArg(y)
8419 return true
8420 }
8421
8422
8423
8424 for {
8425 _ = v.Args[2]
8426 x := v.Args[1]
8427 v_2 := v.Args[2]
8428 if v_2.Op != OpAMD64FlagGT_ULT {
8429 break
8430 }
8431 v.reset(OpCopy)
8432 v.Type = x.Type
8433 v.AddArg(x)
8434 return true
8435 }
8436
8437
8438
8439 for {
8440 _ = v.Args[2]
8441 x := v.Args[1]
8442 v_2 := v.Args[2]
8443 if v_2.Op != OpAMD64FlagLT_ULT {
8444 break
8445 }
8446 v.reset(OpCopy)
8447 v.Type = x.Type
8448 v.AddArg(x)
8449 return true
8450 }
8451
8452
8453
8454 for {
8455 _ = v.Args[2]
8456 y := v.Args[0]
8457 v_2 := v.Args[2]
8458 if v_2.Op != OpAMD64FlagLT_UGT {
8459 break
8460 }
8461 v.reset(OpCopy)
8462 v.Type = y.Type
8463 v.AddArg(y)
8464 return true
8465 }
8466 return false
8467 }
8468 func rewriteValueAMD64_OpAMD64CMOVWLT_0(v *Value) bool {
8469
8470
8471
8472 for {
8473 _ = v.Args[2]
8474 x := v.Args[0]
8475 y := v.Args[1]
8476 v_2 := v.Args[2]
8477 if v_2.Op != OpAMD64InvertFlags {
8478 break
8479 }
8480 cond := v_2.Args[0]
8481 v.reset(OpAMD64CMOVWGT)
8482 v.AddArg(x)
8483 v.AddArg(y)
8484 v.AddArg(cond)
8485 return true
8486 }
8487
8488
8489
8490 for {
8491 _ = v.Args[2]
8492 y := v.Args[0]
8493 v_2 := v.Args[2]
8494 if v_2.Op != OpAMD64FlagEQ {
8495 break
8496 }
8497 v.reset(OpCopy)
8498 v.Type = y.Type
8499 v.AddArg(y)
8500 return true
8501 }
8502
8503
8504
8505 for {
8506 _ = v.Args[2]
8507 y := v.Args[0]
8508 v_2 := v.Args[2]
8509 if v_2.Op != OpAMD64FlagGT_UGT {
8510 break
8511 }
8512 v.reset(OpCopy)
8513 v.Type = y.Type
8514 v.AddArg(y)
8515 return true
8516 }
8517
8518
8519
8520 for {
8521 _ = v.Args[2]
8522 y := v.Args[0]
8523 v_2 := v.Args[2]
8524 if v_2.Op != OpAMD64FlagGT_ULT {
8525 break
8526 }
8527 v.reset(OpCopy)
8528 v.Type = y.Type
8529 v.AddArg(y)
8530 return true
8531 }
8532
8533
8534
8535 for {
8536 _ = v.Args[2]
8537 x := v.Args[1]
8538 v_2 := v.Args[2]
8539 if v_2.Op != OpAMD64FlagLT_ULT {
8540 break
8541 }
8542 v.reset(OpCopy)
8543 v.Type = x.Type
8544 v.AddArg(x)
8545 return true
8546 }
8547
8548
8549
8550 for {
8551 _ = v.Args[2]
8552 x := v.Args[1]
8553 v_2 := v.Args[2]
8554 if v_2.Op != OpAMD64FlagLT_UGT {
8555 break
8556 }
8557 v.reset(OpCopy)
8558 v.Type = x.Type
8559 v.AddArg(x)
8560 return true
8561 }
8562 return false
8563 }
8564 func rewriteValueAMD64_OpAMD64CMOVWNE_0(v *Value) bool {
8565
8566
8567
8568 for {
8569 _ = v.Args[2]
8570 x := v.Args[0]
8571 y := v.Args[1]
8572 v_2 := v.Args[2]
8573 if v_2.Op != OpAMD64InvertFlags {
8574 break
8575 }
8576 cond := v_2.Args[0]
8577 v.reset(OpAMD64CMOVWNE)
8578 v.AddArg(x)
8579 v.AddArg(y)
8580 v.AddArg(cond)
8581 return true
8582 }
8583
8584
8585
8586 for {
8587 _ = v.Args[2]
8588 y := v.Args[0]
8589 v_2 := v.Args[2]
8590 if v_2.Op != OpAMD64FlagEQ {
8591 break
8592 }
8593 v.reset(OpCopy)
8594 v.Type = y.Type
8595 v.AddArg(y)
8596 return true
8597 }
8598
8599
8600
8601 for {
8602 _ = v.Args[2]
8603 x := v.Args[1]
8604 v_2 := v.Args[2]
8605 if v_2.Op != OpAMD64FlagGT_UGT {
8606 break
8607 }
8608 v.reset(OpCopy)
8609 v.Type = x.Type
8610 v.AddArg(x)
8611 return true
8612 }
8613
8614
8615
8616 for {
8617 _ = v.Args[2]
8618 x := v.Args[1]
8619 v_2 := v.Args[2]
8620 if v_2.Op != OpAMD64FlagGT_ULT {
8621 break
8622 }
8623 v.reset(OpCopy)
8624 v.Type = x.Type
8625 v.AddArg(x)
8626 return true
8627 }
8628
8629
8630
8631 for {
8632 _ = v.Args[2]
8633 x := v.Args[1]
8634 v_2 := v.Args[2]
8635 if v_2.Op != OpAMD64FlagLT_ULT {
8636 break
8637 }
8638 v.reset(OpCopy)
8639 v.Type = x.Type
8640 v.AddArg(x)
8641 return true
8642 }
8643
8644
8645
8646 for {
8647 _ = v.Args[2]
8648 x := v.Args[1]
8649 v_2 := v.Args[2]
8650 if v_2.Op != OpAMD64FlagLT_UGT {
8651 break
8652 }
8653 v.reset(OpCopy)
8654 v.Type = x.Type
8655 v.AddArg(x)
8656 return true
8657 }
8658 return false
8659 }
8660 func rewriteValueAMD64_OpAMD64CMPB_0(v *Value) bool {
8661 b := v.Block
8662
8663
8664
8665 for {
8666 _ = v.Args[1]
8667 x := v.Args[0]
8668 v_1 := v.Args[1]
8669 if v_1.Op != OpAMD64MOVLconst {
8670 break
8671 }
8672 c := v_1.AuxInt
8673 v.reset(OpAMD64CMPBconst)
8674 v.AuxInt = int64(int8(c))
8675 v.AddArg(x)
8676 return true
8677 }
8678
8679
8680
8681 for {
8682 x := v.Args[1]
8683 v_0 := v.Args[0]
8684 if v_0.Op != OpAMD64MOVLconst {
8685 break
8686 }
8687 c := v_0.AuxInt
8688 v.reset(OpAMD64InvertFlags)
8689 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
8690 v0.AuxInt = int64(int8(c))
8691 v0.AddArg(x)
8692 v.AddArg(v0)
8693 return true
8694 }
8695
8696
8697
8698 for {
8699 x := v.Args[1]
8700 l := v.Args[0]
8701 if l.Op != OpAMD64MOVBload {
8702 break
8703 }
8704 off := l.AuxInt
8705 sym := l.Aux
8706 mem := l.Args[1]
8707 ptr := l.Args[0]
8708 if !(canMergeLoad(v, l) && clobber(l)) {
8709 break
8710 }
8711 v.reset(OpAMD64CMPBload)
8712 v.AuxInt = off
8713 v.Aux = sym
8714 v.AddArg(ptr)
8715 v.AddArg(x)
8716 v.AddArg(mem)
8717 return true
8718 }
8719
8720
8721
8722 for {
8723 _ = v.Args[1]
8724 x := v.Args[0]
8725 l := v.Args[1]
8726 if l.Op != OpAMD64MOVBload {
8727 break
8728 }
8729 off := l.AuxInt
8730 sym := l.Aux
8731 mem := l.Args[1]
8732 ptr := l.Args[0]
8733 if !(canMergeLoad(v, l) && clobber(l)) {
8734 break
8735 }
8736 v.reset(OpAMD64InvertFlags)
8737 v0 := b.NewValue0(l.Pos, OpAMD64CMPBload, types.TypeFlags)
8738 v0.AuxInt = off
8739 v0.Aux = sym
8740 v0.AddArg(ptr)
8741 v0.AddArg(x)
8742 v0.AddArg(mem)
8743 v.AddArg(v0)
8744 return true
8745 }
8746 return false
8747 }
8748 func rewriteValueAMD64_OpAMD64CMPBconst_0(v *Value) bool {
8749 b := v.Block
8750
8751
8752
8753 for {
8754 y := v.AuxInt
8755 v_0 := v.Args[0]
8756 if v_0.Op != OpAMD64MOVLconst {
8757 break
8758 }
8759 x := v_0.AuxInt
8760 if !(int8(x) == int8(y)) {
8761 break
8762 }
8763 v.reset(OpAMD64FlagEQ)
8764 return true
8765 }
8766
8767
8768
8769 for {
8770 y := v.AuxInt
8771 v_0 := v.Args[0]
8772 if v_0.Op != OpAMD64MOVLconst {
8773 break
8774 }
8775 x := v_0.AuxInt
8776 if !(int8(x) < int8(y) && uint8(x) < uint8(y)) {
8777 break
8778 }
8779 v.reset(OpAMD64FlagLT_ULT)
8780 return true
8781 }
8782
8783
8784
8785 for {
8786 y := v.AuxInt
8787 v_0 := v.Args[0]
8788 if v_0.Op != OpAMD64MOVLconst {
8789 break
8790 }
8791 x := v_0.AuxInt
8792 if !(int8(x) < int8(y) && uint8(x) > uint8(y)) {
8793 break
8794 }
8795 v.reset(OpAMD64FlagLT_UGT)
8796 return true
8797 }
8798
8799
8800
8801 for {
8802 y := v.AuxInt
8803 v_0 := v.Args[0]
8804 if v_0.Op != OpAMD64MOVLconst {
8805 break
8806 }
8807 x := v_0.AuxInt
8808 if !(int8(x) > int8(y) && uint8(x) < uint8(y)) {
8809 break
8810 }
8811 v.reset(OpAMD64FlagGT_ULT)
8812 return true
8813 }
8814
8815
8816
8817 for {
8818 y := v.AuxInt
8819 v_0 := v.Args[0]
8820 if v_0.Op != OpAMD64MOVLconst {
8821 break
8822 }
8823 x := v_0.AuxInt
8824 if !(int8(x) > int8(y) && uint8(x) > uint8(y)) {
8825 break
8826 }
8827 v.reset(OpAMD64FlagGT_UGT)
8828 return true
8829 }
8830
8831
8832
8833 for {
8834 n := v.AuxInt
8835 v_0 := v.Args[0]
8836 if v_0.Op != OpAMD64ANDLconst {
8837 break
8838 }
8839 m := v_0.AuxInt
8840 if !(0 <= int8(m) && int8(m) < int8(n)) {
8841 break
8842 }
8843 v.reset(OpAMD64FlagLT_ULT)
8844 return true
8845 }
8846
8847
8848
8849 for {
8850 if v.AuxInt != 0 {
8851 break
8852 }
8853 v_0 := v.Args[0]
8854 if v_0.Op != OpAMD64ANDL {
8855 break
8856 }
8857 y := v_0.Args[1]
8858 x := v_0.Args[0]
8859 v.reset(OpAMD64TESTB)
8860 v.AddArg(x)
8861 v.AddArg(y)
8862 return true
8863 }
8864
8865
8866
8867 for {
8868 if v.AuxInt != 0 {
8869 break
8870 }
8871 v_0 := v.Args[0]
8872 if v_0.Op != OpAMD64ANDLconst {
8873 break
8874 }
8875 c := v_0.AuxInt
8876 x := v_0.Args[0]
8877 v.reset(OpAMD64TESTBconst)
8878 v.AuxInt = int64(int8(c))
8879 v.AddArg(x)
8880 return true
8881 }
8882
8883
8884
8885 for {
8886 if v.AuxInt != 0 {
8887 break
8888 }
8889 x := v.Args[0]
8890 v.reset(OpAMD64TESTB)
8891 v.AddArg(x)
8892 v.AddArg(x)
8893 return true
8894 }
8895
8896
8897
8898 for {
8899 c := v.AuxInt
8900 l := v.Args[0]
8901 if l.Op != OpAMD64MOVBload {
8902 break
8903 }
8904 off := l.AuxInt
8905 sym := l.Aux
8906 mem := l.Args[1]
8907 ptr := l.Args[0]
8908 if !(l.Uses == 1 && validValAndOff(c, off) && clobber(l)) {
8909 break
8910 }
8911 b = l.Block
8912 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
8913 v.reset(OpCopy)
8914 v.AddArg(v0)
8915 v0.AuxInt = makeValAndOff(c, off)
8916 v0.Aux = sym
8917 v0.AddArg(ptr)
8918 v0.AddArg(mem)
8919 return true
8920 }
8921 return false
8922 }
8923 func rewriteValueAMD64_OpAMD64CMPBconstload_0(v *Value) bool {
8924
8925
8926
8927 for {
8928 valoff1 := v.AuxInt
8929 sym := v.Aux
8930 mem := v.Args[1]
8931 v_0 := v.Args[0]
8932 if v_0.Op != OpAMD64ADDQconst {
8933 break
8934 }
8935 off2 := v_0.AuxInt
8936 base := v_0.Args[0]
8937 if !(ValAndOff(valoff1).canAdd(off2)) {
8938 break
8939 }
8940 v.reset(OpAMD64CMPBconstload)
8941 v.AuxInt = ValAndOff(valoff1).add(off2)
8942 v.Aux = sym
8943 v.AddArg(base)
8944 v.AddArg(mem)
8945 return true
8946 }
8947
8948
8949
8950 for {
8951 valoff1 := v.AuxInt
8952 sym1 := v.Aux
8953 mem := v.Args[1]
8954 v_0 := v.Args[0]
8955 if v_0.Op != OpAMD64LEAQ {
8956 break
8957 }
8958 off2 := v_0.AuxInt
8959 sym2 := v_0.Aux
8960 base := v_0.Args[0]
8961 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
8962 break
8963 }
8964 v.reset(OpAMD64CMPBconstload)
8965 v.AuxInt = ValAndOff(valoff1).add(off2)
8966 v.Aux = mergeSym(sym1, sym2)
8967 v.AddArg(base)
8968 v.AddArg(mem)
8969 return true
8970 }
8971 return false
8972 }
8973 func rewriteValueAMD64_OpAMD64CMPBload_0(v *Value) bool {
8974
8975
8976
8977 for {
8978 off1 := v.AuxInt
8979 sym := v.Aux
8980 mem := v.Args[2]
8981 v_0 := v.Args[0]
8982 if v_0.Op != OpAMD64ADDQconst {
8983 break
8984 }
8985 off2 := v_0.AuxInt
8986 base := v_0.Args[0]
8987 val := v.Args[1]
8988 if !(is32Bit(off1 + off2)) {
8989 break
8990 }
8991 v.reset(OpAMD64CMPBload)
8992 v.AuxInt = off1 + off2
8993 v.Aux = sym
8994 v.AddArg(base)
8995 v.AddArg(val)
8996 v.AddArg(mem)
8997 return true
8998 }
8999
9000
9001
9002 for {
9003 off1 := v.AuxInt
9004 sym1 := v.Aux
9005 mem := v.Args[2]
9006 v_0 := v.Args[0]
9007 if v_0.Op != OpAMD64LEAQ {
9008 break
9009 }
9010 off2 := v_0.AuxInt
9011 sym2 := v_0.Aux
9012 base := v_0.Args[0]
9013 val := v.Args[1]
9014 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
9015 break
9016 }
9017 v.reset(OpAMD64CMPBload)
9018 v.AuxInt = off1 + off2
9019 v.Aux = mergeSym(sym1, sym2)
9020 v.AddArg(base)
9021 v.AddArg(val)
9022 v.AddArg(mem)
9023 return true
9024 }
9025
9026
9027
9028 for {
9029 off := v.AuxInt
9030 sym := v.Aux
9031 mem := v.Args[2]
9032 ptr := v.Args[0]
9033 v_1 := v.Args[1]
9034 if v_1.Op != OpAMD64MOVLconst {
9035 break
9036 }
9037 c := v_1.AuxInt
9038 if !(validValAndOff(int64(int8(c)), off)) {
9039 break
9040 }
9041 v.reset(OpAMD64CMPBconstload)
9042 v.AuxInt = makeValAndOff(int64(int8(c)), off)
9043 v.Aux = sym
9044 v.AddArg(ptr)
9045 v.AddArg(mem)
9046 return true
9047 }
9048 return false
9049 }
9050 func rewriteValueAMD64_OpAMD64CMPL_0(v *Value) bool {
9051 b := v.Block
9052
9053
9054
9055 for {
9056 _ = v.Args[1]
9057 x := v.Args[0]
9058 v_1 := v.Args[1]
9059 if v_1.Op != OpAMD64MOVLconst {
9060 break
9061 }
9062 c := v_1.AuxInt
9063 v.reset(OpAMD64CMPLconst)
9064 v.AuxInt = c
9065 v.AddArg(x)
9066 return true
9067 }
9068
9069
9070
9071 for {
9072 x := v.Args[1]
9073 v_0 := v.Args[0]
9074 if v_0.Op != OpAMD64MOVLconst {
9075 break
9076 }
9077 c := v_0.AuxInt
9078 v.reset(OpAMD64InvertFlags)
9079 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
9080 v0.AuxInt = c
9081 v0.AddArg(x)
9082 v.AddArg(v0)
9083 return true
9084 }
9085
9086
9087
9088 for {
9089 x := v.Args[1]
9090 l := v.Args[0]
9091 if l.Op != OpAMD64MOVLload {
9092 break
9093 }
9094 off := l.AuxInt
9095 sym := l.Aux
9096 mem := l.Args[1]
9097 ptr := l.Args[0]
9098 if !(canMergeLoad(v, l) && clobber(l)) {
9099 break
9100 }
9101 v.reset(OpAMD64CMPLload)
9102 v.AuxInt = off
9103 v.Aux = sym
9104 v.AddArg(ptr)
9105 v.AddArg(x)
9106 v.AddArg(mem)
9107 return true
9108 }
9109
9110
9111
9112 for {
9113 _ = v.Args[1]
9114 x := v.Args[0]
9115 l := v.Args[1]
9116 if l.Op != OpAMD64MOVLload {
9117 break
9118 }
9119 off := l.AuxInt
9120 sym := l.Aux
9121 mem := l.Args[1]
9122 ptr := l.Args[0]
9123 if !(canMergeLoad(v, l) && clobber(l)) {
9124 break
9125 }
9126 v.reset(OpAMD64InvertFlags)
9127 v0 := b.NewValue0(l.Pos, OpAMD64CMPLload, types.TypeFlags)
9128 v0.AuxInt = off
9129 v0.Aux = sym
9130 v0.AddArg(ptr)
9131 v0.AddArg(x)
9132 v0.AddArg(mem)
9133 v.AddArg(v0)
9134 return true
9135 }
9136 return false
9137 }
9138 func rewriteValueAMD64_OpAMD64CMPLconst_0(v *Value) bool {
9139
9140
9141
9142 for {
9143 y := v.AuxInt
9144 v_0 := v.Args[0]
9145 if v_0.Op != OpAMD64MOVLconst {
9146 break
9147 }
9148 x := v_0.AuxInt
9149 if !(int32(x) == int32(y)) {
9150 break
9151 }
9152 v.reset(OpAMD64FlagEQ)
9153 return true
9154 }
9155
9156
9157
9158 for {
9159 y := v.AuxInt
9160 v_0 := v.Args[0]
9161 if v_0.Op != OpAMD64MOVLconst {
9162 break
9163 }
9164 x := v_0.AuxInt
9165 if !(int32(x) < int32(y) && uint32(x) < uint32(y)) {
9166 break
9167 }
9168 v.reset(OpAMD64FlagLT_ULT)
9169 return true
9170 }
9171
9172
9173
9174 for {
9175 y := v.AuxInt
9176 v_0 := v.Args[0]
9177 if v_0.Op != OpAMD64MOVLconst {
9178 break
9179 }
9180 x := v_0.AuxInt
9181 if !(int32(x) < int32(y) && uint32(x) > uint32(y)) {
9182 break
9183 }
9184 v.reset(OpAMD64FlagLT_UGT)
9185 return true
9186 }
9187
9188
9189
9190 for {
9191 y := v.AuxInt
9192 v_0 := v.Args[0]
9193 if v_0.Op != OpAMD64MOVLconst {
9194 break
9195 }
9196 x := v_0.AuxInt
9197 if !(int32(x) > int32(y) && uint32(x) < uint32(y)) {
9198 break
9199 }
9200 v.reset(OpAMD64FlagGT_ULT)
9201 return true
9202 }
9203
9204
9205
9206 for {
9207 y := v.AuxInt
9208 v_0 := v.Args[0]
9209 if v_0.Op != OpAMD64MOVLconst {
9210 break
9211 }
9212 x := v_0.AuxInt
9213 if !(int32(x) > int32(y) && uint32(x) > uint32(y)) {
9214 break
9215 }
9216 v.reset(OpAMD64FlagGT_UGT)
9217 return true
9218 }
9219
9220
9221
9222 for {
9223 n := v.AuxInt
9224 v_0 := v.Args[0]
9225 if v_0.Op != OpAMD64SHRLconst {
9226 break
9227 }
9228 c := v_0.AuxInt
9229 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
9230 break
9231 }
9232 v.reset(OpAMD64FlagLT_ULT)
9233 return true
9234 }
9235
9236
9237
9238 for {
9239 n := v.AuxInt
9240 v_0 := v.Args[0]
9241 if v_0.Op != OpAMD64ANDLconst {
9242 break
9243 }
9244 m := v_0.AuxInt
9245 if !(0 <= int32(m) && int32(m) < int32(n)) {
9246 break
9247 }
9248 v.reset(OpAMD64FlagLT_ULT)
9249 return true
9250 }
9251
9252
9253
9254 for {
9255 if v.AuxInt != 0 {
9256 break
9257 }
9258 v_0 := v.Args[0]
9259 if v_0.Op != OpAMD64ANDL {
9260 break
9261 }
9262 y := v_0.Args[1]
9263 x := v_0.Args[0]
9264 v.reset(OpAMD64TESTL)
9265 v.AddArg(x)
9266 v.AddArg(y)
9267 return true
9268 }
9269
9270
9271
9272 for {
9273 if v.AuxInt != 0 {
9274 break
9275 }
9276 v_0 := v.Args[0]
9277 if v_0.Op != OpAMD64ANDLconst {
9278 break
9279 }
9280 c := v_0.AuxInt
9281 x := v_0.Args[0]
9282 v.reset(OpAMD64TESTLconst)
9283 v.AuxInt = c
9284 v.AddArg(x)
9285 return true
9286 }
9287
9288
9289
9290 for {
9291 if v.AuxInt != 0 {
9292 break
9293 }
9294 x := v.Args[0]
9295 v.reset(OpAMD64TESTL)
9296 v.AddArg(x)
9297 v.AddArg(x)
9298 return true
9299 }
9300 return false
9301 }
9302 func rewriteValueAMD64_OpAMD64CMPLconst_10(v *Value) bool {
9303 b := v.Block
9304
9305
9306
9307 for {
9308 c := v.AuxInt
9309 l := v.Args[0]
9310 if l.Op != OpAMD64MOVLload {
9311 break
9312 }
9313 off := l.AuxInt
9314 sym := l.Aux
9315 mem := l.Args[1]
9316 ptr := l.Args[0]
9317 if !(l.Uses == 1 && validValAndOff(c, off) && clobber(l)) {
9318 break
9319 }
9320 b = l.Block
9321 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
9322 v.reset(OpCopy)
9323 v.AddArg(v0)
9324 v0.AuxInt = makeValAndOff(c, off)
9325 v0.Aux = sym
9326 v0.AddArg(ptr)
9327 v0.AddArg(mem)
9328 return true
9329 }
9330 return false
9331 }
9332 func rewriteValueAMD64_OpAMD64CMPLconstload_0(v *Value) bool {
9333
9334
9335
9336 for {
9337 valoff1 := v.AuxInt
9338 sym := v.Aux
9339 mem := v.Args[1]
9340 v_0 := v.Args[0]
9341 if v_0.Op != OpAMD64ADDQconst {
9342 break
9343 }
9344 off2 := v_0.AuxInt
9345 base := v_0.Args[0]
9346 if !(ValAndOff(valoff1).canAdd(off2)) {
9347 break
9348 }
9349 v.reset(OpAMD64CMPLconstload)
9350 v.AuxInt = ValAndOff(valoff1).add(off2)
9351 v.Aux = sym
9352 v.AddArg(base)
9353 v.AddArg(mem)
9354 return true
9355 }
9356
9357
9358
9359 for {
9360 valoff1 := v.AuxInt
9361 sym1 := v.Aux
9362 mem := v.Args[1]
9363 v_0 := v.Args[0]
9364 if v_0.Op != OpAMD64LEAQ {
9365 break
9366 }
9367 off2 := v_0.AuxInt
9368 sym2 := v_0.Aux
9369 base := v_0.Args[0]
9370 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
9371 break
9372 }
9373 v.reset(OpAMD64CMPLconstload)
9374 v.AuxInt = ValAndOff(valoff1).add(off2)
9375 v.Aux = mergeSym(sym1, sym2)
9376 v.AddArg(base)
9377 v.AddArg(mem)
9378 return true
9379 }
9380 return false
9381 }
9382 func rewriteValueAMD64_OpAMD64CMPLload_0(v *Value) bool {
9383
9384
9385
9386 for {
9387 off1 := v.AuxInt
9388 sym := v.Aux
9389 mem := v.Args[2]
9390 v_0 := v.Args[0]
9391 if v_0.Op != OpAMD64ADDQconst {
9392 break
9393 }
9394 off2 := v_0.AuxInt
9395 base := v_0.Args[0]
9396 val := v.Args[1]
9397 if !(is32Bit(off1 + off2)) {
9398 break
9399 }
9400 v.reset(OpAMD64CMPLload)
9401 v.AuxInt = off1 + off2
9402 v.Aux = sym
9403 v.AddArg(base)
9404 v.AddArg(val)
9405 v.AddArg(mem)
9406 return true
9407 }
9408
9409
9410
9411 for {
9412 off1 := v.AuxInt
9413 sym1 := v.Aux
9414 mem := v.Args[2]
9415 v_0 := v.Args[0]
9416 if v_0.Op != OpAMD64LEAQ {
9417 break
9418 }
9419 off2 := v_0.AuxInt
9420 sym2 := v_0.Aux
9421 base := v_0.Args[0]
9422 val := v.Args[1]
9423 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
9424 break
9425 }
9426 v.reset(OpAMD64CMPLload)
9427 v.AuxInt = off1 + off2
9428 v.Aux = mergeSym(sym1, sym2)
9429 v.AddArg(base)
9430 v.AddArg(val)
9431 v.AddArg(mem)
9432 return true
9433 }
9434
9435
9436
9437 for {
9438 off := v.AuxInt
9439 sym := v.Aux
9440 mem := v.Args[2]
9441 ptr := v.Args[0]
9442 v_1 := v.Args[1]
9443 if v_1.Op != OpAMD64MOVLconst {
9444 break
9445 }
9446 c := v_1.AuxInt
9447 if !(validValAndOff(c, off)) {
9448 break
9449 }
9450 v.reset(OpAMD64CMPLconstload)
9451 v.AuxInt = makeValAndOff(c, off)
9452 v.Aux = sym
9453 v.AddArg(ptr)
9454 v.AddArg(mem)
9455 return true
9456 }
9457 return false
9458 }
9459 func rewriteValueAMD64_OpAMD64CMPQ_0(v *Value) bool {
9460 b := v.Block
9461
9462
9463
9464 for {
9465 _ = v.Args[1]
9466 x := v.Args[0]
9467 v_1 := v.Args[1]
9468 if v_1.Op != OpAMD64MOVQconst {
9469 break
9470 }
9471 c := v_1.AuxInt
9472 if !(is32Bit(c)) {
9473 break
9474 }
9475 v.reset(OpAMD64CMPQconst)
9476 v.AuxInt = c
9477 v.AddArg(x)
9478 return true
9479 }
9480
9481
9482
9483 for {
9484 x := v.Args[1]
9485 v_0 := v.Args[0]
9486 if v_0.Op != OpAMD64MOVQconst {
9487 break
9488 }
9489 c := v_0.AuxInt
9490 if !(is32Bit(c)) {
9491 break
9492 }
9493 v.reset(OpAMD64InvertFlags)
9494 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
9495 v0.AuxInt = c
9496 v0.AddArg(x)
9497 v.AddArg(v0)
9498 return true
9499 }
9500
9501
9502
9503 for {
9504 x := v.Args[1]
9505 l := v.Args[0]
9506 if l.Op != OpAMD64MOVQload {
9507 break
9508 }
9509 off := l.AuxInt
9510 sym := l.Aux
9511 mem := l.Args[1]
9512 ptr := l.Args[0]
9513 if !(canMergeLoad(v, l) && clobber(l)) {
9514 break
9515 }
9516 v.reset(OpAMD64CMPQload)
9517 v.AuxInt = off
9518 v.Aux = sym
9519 v.AddArg(ptr)
9520 v.AddArg(x)
9521 v.AddArg(mem)
9522 return true
9523 }
9524
9525
9526
9527 for {
9528 _ = v.Args[1]
9529 x := v.Args[0]
9530 l := v.Args[1]
9531 if l.Op != OpAMD64MOVQload {
9532 break
9533 }
9534 off := l.AuxInt
9535 sym := l.Aux
9536 mem := l.Args[1]
9537 ptr := l.Args[0]
9538 if !(canMergeLoad(v, l) && clobber(l)) {
9539 break
9540 }
9541 v.reset(OpAMD64InvertFlags)
9542 v0 := b.NewValue0(l.Pos, OpAMD64CMPQload, types.TypeFlags)
9543 v0.AuxInt = off
9544 v0.Aux = sym
9545 v0.AddArg(ptr)
9546 v0.AddArg(x)
9547 v0.AddArg(mem)
9548 v.AddArg(v0)
9549 return true
9550 }
9551 return false
9552 }
9553 func rewriteValueAMD64_OpAMD64CMPQconst_0(v *Value) bool {
9554
9555
9556
9557 for {
9558 if v.AuxInt != 32 {
9559 break
9560 }
9561 v_0 := v.Args[0]
9562 if v_0.Op != OpAMD64NEGQ {
9563 break
9564 }
9565 v_0_0 := v_0.Args[0]
9566 if v_0_0.Op != OpAMD64ADDQconst {
9567 break
9568 }
9569 if v_0_0.AuxInt != -16 {
9570 break
9571 }
9572 v_0_0_0 := v_0_0.Args[0]
9573 if v_0_0_0.Op != OpAMD64ANDQconst {
9574 break
9575 }
9576 if v_0_0_0.AuxInt != 15 {
9577 break
9578 }
9579 v.reset(OpAMD64FlagLT_ULT)
9580 return true
9581 }
9582
9583
9584
9585 for {
9586 if v.AuxInt != 32 {
9587 break
9588 }
9589 v_0 := v.Args[0]
9590 if v_0.Op != OpAMD64NEGQ {
9591 break
9592 }
9593 v_0_0 := v_0.Args[0]
9594 if v_0_0.Op != OpAMD64ADDQconst {
9595 break
9596 }
9597 if v_0_0.AuxInt != -8 {
9598 break
9599 }
9600 v_0_0_0 := v_0_0.Args[0]
9601 if v_0_0_0.Op != OpAMD64ANDQconst {
9602 break
9603 }
9604 if v_0_0_0.AuxInt != 7 {
9605 break
9606 }
9607 v.reset(OpAMD64FlagLT_ULT)
9608 return true
9609 }
9610
9611
9612
9613 for {
9614 y := v.AuxInt
9615 v_0 := v.Args[0]
9616 if v_0.Op != OpAMD64MOVQconst {
9617 break
9618 }
9619 x := v_0.AuxInt
9620 if !(x == y) {
9621 break
9622 }
9623 v.reset(OpAMD64FlagEQ)
9624 return true
9625 }
9626
9627
9628
9629 for {
9630 y := v.AuxInt
9631 v_0 := v.Args[0]
9632 if v_0.Op != OpAMD64MOVQconst {
9633 break
9634 }
9635 x := v_0.AuxInt
9636 if !(x < y && uint64(x) < uint64(y)) {
9637 break
9638 }
9639 v.reset(OpAMD64FlagLT_ULT)
9640 return true
9641 }
9642
9643
9644
9645 for {
9646 y := v.AuxInt
9647 v_0 := v.Args[0]
9648 if v_0.Op != OpAMD64MOVQconst {
9649 break
9650 }
9651 x := v_0.AuxInt
9652 if !(x < y && uint64(x) > uint64(y)) {
9653 break
9654 }
9655 v.reset(OpAMD64FlagLT_UGT)
9656 return true
9657 }
9658
9659
9660
9661 for {
9662 y := v.AuxInt
9663 v_0 := v.Args[0]
9664 if v_0.Op != OpAMD64MOVQconst {
9665 break
9666 }
9667 x := v_0.AuxInt
9668 if !(x > y && uint64(x) < uint64(y)) {
9669 break
9670 }
9671 v.reset(OpAMD64FlagGT_ULT)
9672 return true
9673 }
9674
9675
9676
9677 for {
9678 y := v.AuxInt
9679 v_0 := v.Args[0]
9680 if v_0.Op != OpAMD64MOVQconst {
9681 break
9682 }
9683 x := v_0.AuxInt
9684 if !(x > y && uint64(x) > uint64(y)) {
9685 break
9686 }
9687 v.reset(OpAMD64FlagGT_UGT)
9688 return true
9689 }
9690
9691
9692
9693 for {
9694 c := v.AuxInt
9695 v_0 := v.Args[0]
9696 if v_0.Op != OpAMD64MOVBQZX {
9697 break
9698 }
9699 if !(0xFF < c) {
9700 break
9701 }
9702 v.reset(OpAMD64FlagLT_ULT)
9703 return true
9704 }
9705
9706
9707
9708 for {
9709 c := v.AuxInt
9710 v_0 := v.Args[0]
9711 if v_0.Op != OpAMD64MOVWQZX {
9712 break
9713 }
9714 if !(0xFFFF < c) {
9715 break
9716 }
9717 v.reset(OpAMD64FlagLT_ULT)
9718 return true
9719 }
9720
9721
9722
9723 for {
9724 c := v.AuxInt
9725 v_0 := v.Args[0]
9726 if v_0.Op != OpAMD64MOVLQZX {
9727 break
9728 }
9729 if !(0xFFFFFFFF < c) {
9730 break
9731 }
9732 v.reset(OpAMD64FlagLT_ULT)
9733 return true
9734 }
9735 return false
9736 }
9737 func rewriteValueAMD64_OpAMD64CMPQconst_10(v *Value) bool {
9738 b := v.Block
9739
9740
9741
9742 for {
9743 n := v.AuxInt
9744 v_0 := v.Args[0]
9745 if v_0.Op != OpAMD64SHRQconst {
9746 break
9747 }
9748 c := v_0.AuxInt
9749 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
9750 break
9751 }
9752 v.reset(OpAMD64FlagLT_ULT)
9753 return true
9754 }
9755
9756
9757
9758 for {
9759 n := v.AuxInt
9760 v_0 := v.Args[0]
9761 if v_0.Op != OpAMD64ANDQconst {
9762 break
9763 }
9764 m := v_0.AuxInt
9765 if !(0 <= m && m < n) {
9766 break
9767 }
9768 v.reset(OpAMD64FlagLT_ULT)
9769 return true
9770 }
9771
9772
9773
9774 for {
9775 n := v.AuxInt
9776 v_0 := v.Args[0]
9777 if v_0.Op != OpAMD64ANDLconst {
9778 break
9779 }
9780 m := v_0.AuxInt
9781 if !(0 <= m && m < n) {
9782 break
9783 }
9784 v.reset(OpAMD64FlagLT_ULT)
9785 return true
9786 }
9787
9788
9789
9790 for {
9791 if v.AuxInt != 0 {
9792 break
9793 }
9794 v_0 := v.Args[0]
9795 if v_0.Op != OpAMD64ANDQ {
9796 break
9797 }
9798 y := v_0.Args[1]
9799 x := v_0.Args[0]
9800 v.reset(OpAMD64TESTQ)
9801 v.AddArg(x)
9802 v.AddArg(y)
9803 return true
9804 }
9805
9806
9807
9808 for {
9809 if v.AuxInt != 0 {
9810 break
9811 }
9812 v_0 := v.Args[0]
9813 if v_0.Op != OpAMD64ANDQconst {
9814 break
9815 }
9816 c := v_0.AuxInt
9817 x := v_0.Args[0]
9818 v.reset(OpAMD64TESTQconst)
9819 v.AuxInt = c
9820 v.AddArg(x)
9821 return true
9822 }
9823
9824
9825
9826 for {
9827 if v.AuxInt != 0 {
9828 break
9829 }
9830 x := v.Args[0]
9831 v.reset(OpAMD64TESTQ)
9832 v.AddArg(x)
9833 v.AddArg(x)
9834 return true
9835 }
9836
9837
9838
9839 for {
9840 c := v.AuxInt
9841 l := v.Args[0]
9842 if l.Op != OpAMD64MOVQload {
9843 break
9844 }
9845 off := l.AuxInt
9846 sym := l.Aux
9847 mem := l.Args[1]
9848 ptr := l.Args[0]
9849 if !(l.Uses == 1 && validValAndOff(c, off) && clobber(l)) {
9850 break
9851 }
9852 b = l.Block
9853 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
9854 v.reset(OpCopy)
9855 v.AddArg(v0)
9856 v0.AuxInt = makeValAndOff(c, off)
9857 v0.Aux = sym
9858 v0.AddArg(ptr)
9859 v0.AddArg(mem)
9860 return true
9861 }
9862 return false
9863 }
9864 func rewriteValueAMD64_OpAMD64CMPQconstload_0(v *Value) bool {
9865
9866
9867
9868 for {
9869 valoff1 := v.AuxInt
9870 sym := v.Aux
9871 mem := v.Args[1]
9872 v_0 := v.Args[0]
9873 if v_0.Op != OpAMD64ADDQconst {
9874 break
9875 }
9876 off2 := v_0.AuxInt
9877 base := v_0.Args[0]
9878 if !(ValAndOff(valoff1).canAdd(off2)) {
9879 break
9880 }
9881 v.reset(OpAMD64CMPQconstload)
9882 v.AuxInt = ValAndOff(valoff1).add(off2)
9883 v.Aux = sym
9884 v.AddArg(base)
9885 v.AddArg(mem)
9886 return true
9887 }
9888
9889
9890
9891 for {
9892 valoff1 := v.AuxInt
9893 sym1 := v.Aux
9894 mem := v.Args[1]
9895 v_0 := v.Args[0]
9896 if v_0.Op != OpAMD64LEAQ {
9897 break
9898 }
9899 off2 := v_0.AuxInt
9900 sym2 := v_0.Aux
9901 base := v_0.Args[0]
9902 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
9903 break
9904 }
9905 v.reset(OpAMD64CMPQconstload)
9906 v.AuxInt = ValAndOff(valoff1).add(off2)
9907 v.Aux = mergeSym(sym1, sym2)
9908 v.AddArg(base)
9909 v.AddArg(mem)
9910 return true
9911 }
9912 return false
9913 }
9914 func rewriteValueAMD64_OpAMD64CMPQload_0(v *Value) bool {
9915
9916
9917
9918 for {
9919 off1 := v.AuxInt
9920 sym := v.Aux
9921 mem := v.Args[2]
9922 v_0 := v.Args[0]
9923 if v_0.Op != OpAMD64ADDQconst {
9924 break
9925 }
9926 off2 := v_0.AuxInt
9927 base := v_0.Args[0]
9928 val := v.Args[1]
9929 if !(is32Bit(off1 + off2)) {
9930 break
9931 }
9932 v.reset(OpAMD64CMPQload)
9933 v.AuxInt = off1 + off2
9934 v.Aux = sym
9935 v.AddArg(base)
9936 v.AddArg(val)
9937 v.AddArg(mem)
9938 return true
9939 }
9940
9941
9942
9943 for {
9944 off1 := v.AuxInt
9945 sym1 := v.Aux
9946 mem := v.Args[2]
9947 v_0 := v.Args[0]
9948 if v_0.Op != OpAMD64LEAQ {
9949 break
9950 }
9951 off2 := v_0.AuxInt
9952 sym2 := v_0.Aux
9953 base := v_0.Args[0]
9954 val := v.Args[1]
9955 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
9956 break
9957 }
9958 v.reset(OpAMD64CMPQload)
9959 v.AuxInt = off1 + off2
9960 v.Aux = mergeSym(sym1, sym2)
9961 v.AddArg(base)
9962 v.AddArg(val)
9963 v.AddArg(mem)
9964 return true
9965 }
9966
9967
9968
9969 for {
9970 off := v.AuxInt
9971 sym := v.Aux
9972 mem := v.Args[2]
9973 ptr := v.Args[0]
9974 v_1 := v.Args[1]
9975 if v_1.Op != OpAMD64MOVQconst {
9976 break
9977 }
9978 c := v_1.AuxInt
9979 if !(validValAndOff(c, off)) {
9980 break
9981 }
9982 v.reset(OpAMD64CMPQconstload)
9983 v.AuxInt = makeValAndOff(c, off)
9984 v.Aux = sym
9985 v.AddArg(ptr)
9986 v.AddArg(mem)
9987 return true
9988 }
9989 return false
9990 }
9991 func rewriteValueAMD64_OpAMD64CMPW_0(v *Value) bool {
9992 b := v.Block
9993
9994
9995
9996 for {
9997 _ = v.Args[1]
9998 x := v.Args[0]
9999 v_1 := v.Args[1]
10000 if v_1.Op != OpAMD64MOVLconst {
10001 break
10002 }
10003 c := v_1.AuxInt
10004 v.reset(OpAMD64CMPWconst)
10005 v.AuxInt = int64(int16(c))
10006 v.AddArg(x)
10007 return true
10008 }
10009
10010
10011
10012 for {
10013 x := v.Args[1]
10014 v_0 := v.Args[0]
10015 if v_0.Op != OpAMD64MOVLconst {
10016 break
10017 }
10018 c := v_0.AuxInt
10019 v.reset(OpAMD64InvertFlags)
10020 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
10021 v0.AuxInt = int64(int16(c))
10022 v0.AddArg(x)
10023 v.AddArg(v0)
10024 return true
10025 }
10026
10027
10028
10029 for {
10030 x := v.Args[1]
10031 l := v.Args[0]
10032 if l.Op != OpAMD64MOVWload {
10033 break
10034 }
10035 off := l.AuxInt
10036 sym := l.Aux
10037 mem := l.Args[1]
10038 ptr := l.Args[0]
10039 if !(canMergeLoad(v, l) && clobber(l)) {
10040 break
10041 }
10042 v.reset(OpAMD64CMPWload)
10043 v.AuxInt = off
10044 v.Aux = sym
10045 v.AddArg(ptr)
10046 v.AddArg(x)
10047 v.AddArg(mem)
10048 return true
10049 }
10050
10051
10052
10053 for {
10054 _ = v.Args[1]
10055 x := v.Args[0]
10056 l := v.Args[1]
10057 if l.Op != OpAMD64MOVWload {
10058 break
10059 }
10060 off := l.AuxInt
10061 sym := l.Aux
10062 mem := l.Args[1]
10063 ptr := l.Args[0]
10064 if !(canMergeLoad(v, l) && clobber(l)) {
10065 break
10066 }
10067 v.reset(OpAMD64InvertFlags)
10068 v0 := b.NewValue0(l.Pos, OpAMD64CMPWload, types.TypeFlags)
10069 v0.AuxInt = off
10070 v0.Aux = sym
10071 v0.AddArg(ptr)
10072 v0.AddArg(x)
10073 v0.AddArg(mem)
10074 v.AddArg(v0)
10075 return true
10076 }
10077 return false
10078 }
10079 func rewriteValueAMD64_OpAMD64CMPWconst_0(v *Value) bool {
10080 b := v.Block
10081
10082
10083
10084 for {
10085 y := v.AuxInt
10086 v_0 := v.Args[0]
10087 if v_0.Op != OpAMD64MOVLconst {
10088 break
10089 }
10090 x := v_0.AuxInt
10091 if !(int16(x) == int16(y)) {
10092 break
10093 }
10094 v.reset(OpAMD64FlagEQ)
10095 return true
10096 }
10097
10098
10099
10100 for {
10101 y := v.AuxInt
10102 v_0 := v.Args[0]
10103 if v_0.Op != OpAMD64MOVLconst {
10104 break
10105 }
10106 x := v_0.AuxInt
10107 if !(int16(x) < int16(y) && uint16(x) < uint16(y)) {
10108 break
10109 }
10110 v.reset(OpAMD64FlagLT_ULT)
10111 return true
10112 }
10113
10114
10115
10116 for {
10117 y := v.AuxInt
10118 v_0 := v.Args[0]
10119 if v_0.Op != OpAMD64MOVLconst {
10120 break
10121 }
10122 x := v_0.AuxInt
10123 if !(int16(x) < int16(y) && uint16(x) > uint16(y)) {
10124 break
10125 }
10126 v.reset(OpAMD64FlagLT_UGT)
10127 return true
10128 }
10129
10130
10131
10132 for {
10133 y := v.AuxInt
10134 v_0 := v.Args[0]
10135 if v_0.Op != OpAMD64MOVLconst {
10136 break
10137 }
10138 x := v_0.AuxInt
10139 if !(int16(x) > int16(y) && uint16(x) < uint16(y)) {
10140 break
10141 }
10142 v.reset(OpAMD64FlagGT_ULT)
10143 return true
10144 }
10145
10146
10147
10148 for {
10149 y := v.AuxInt
10150 v_0 := v.Args[0]
10151 if v_0.Op != OpAMD64MOVLconst {
10152 break
10153 }
10154 x := v_0.AuxInt
10155 if !(int16(x) > int16(y) && uint16(x) > uint16(y)) {
10156 break
10157 }
10158 v.reset(OpAMD64FlagGT_UGT)
10159 return true
10160 }
10161
10162
10163
10164 for {
10165 n := v.AuxInt
10166 v_0 := v.Args[0]
10167 if v_0.Op != OpAMD64ANDLconst {
10168 break
10169 }
10170 m := v_0.AuxInt
10171 if !(0 <= int16(m) && int16(m) < int16(n)) {
10172 break
10173 }
10174 v.reset(OpAMD64FlagLT_ULT)
10175 return true
10176 }
10177
10178
10179
10180 for {
10181 if v.AuxInt != 0 {
10182 break
10183 }
10184 v_0 := v.Args[0]
10185 if v_0.Op != OpAMD64ANDL {
10186 break
10187 }
10188 y := v_0.Args[1]
10189 x := v_0.Args[0]
10190 v.reset(OpAMD64TESTW)
10191 v.AddArg(x)
10192 v.AddArg(y)
10193 return true
10194 }
10195
10196
10197
10198 for {
10199 if v.AuxInt != 0 {
10200 break
10201 }
10202 v_0 := v.Args[0]
10203 if v_0.Op != OpAMD64ANDLconst {
10204 break
10205 }
10206 c := v_0.AuxInt
10207 x := v_0.Args[0]
10208 v.reset(OpAMD64TESTWconst)
10209 v.AuxInt = int64(int16(c))
10210 v.AddArg(x)
10211 return true
10212 }
10213
10214
10215
10216 for {
10217 if v.AuxInt != 0 {
10218 break
10219 }
10220 x := v.Args[0]
10221 v.reset(OpAMD64TESTW)
10222 v.AddArg(x)
10223 v.AddArg(x)
10224 return true
10225 }
10226
10227
10228
10229 for {
10230 c := v.AuxInt
10231 l := v.Args[0]
10232 if l.Op != OpAMD64MOVWload {
10233 break
10234 }
10235 off := l.AuxInt
10236 sym := l.Aux
10237 mem := l.Args[1]
10238 ptr := l.Args[0]
10239 if !(l.Uses == 1 && validValAndOff(c, off) && clobber(l)) {
10240 break
10241 }
10242 b = l.Block
10243 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
10244 v.reset(OpCopy)
10245 v.AddArg(v0)
10246 v0.AuxInt = makeValAndOff(c, off)
10247 v0.Aux = sym
10248 v0.AddArg(ptr)
10249 v0.AddArg(mem)
10250 return true
10251 }
10252 return false
10253 }
10254 func rewriteValueAMD64_OpAMD64CMPWconstload_0(v *Value) bool {
10255
10256
10257
10258 for {
10259 valoff1 := v.AuxInt
10260 sym := v.Aux
10261 mem := v.Args[1]
10262 v_0 := v.Args[0]
10263 if v_0.Op != OpAMD64ADDQconst {
10264 break
10265 }
10266 off2 := v_0.AuxInt
10267 base := v_0.Args[0]
10268 if !(ValAndOff(valoff1).canAdd(off2)) {
10269 break
10270 }
10271 v.reset(OpAMD64CMPWconstload)
10272 v.AuxInt = ValAndOff(valoff1).add(off2)
10273 v.Aux = sym
10274 v.AddArg(base)
10275 v.AddArg(mem)
10276 return true
10277 }
10278
10279
10280
10281 for {
10282 valoff1 := v.AuxInt
10283 sym1 := v.Aux
10284 mem := v.Args[1]
10285 v_0 := v.Args[0]
10286 if v_0.Op != OpAMD64LEAQ {
10287 break
10288 }
10289 off2 := v_0.AuxInt
10290 sym2 := v_0.Aux
10291 base := v_0.Args[0]
10292 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
10293 break
10294 }
10295 v.reset(OpAMD64CMPWconstload)
10296 v.AuxInt = ValAndOff(valoff1).add(off2)
10297 v.Aux = mergeSym(sym1, sym2)
10298 v.AddArg(base)
10299 v.AddArg(mem)
10300 return true
10301 }
10302 return false
10303 }
10304 func rewriteValueAMD64_OpAMD64CMPWload_0(v *Value) bool {
10305
10306
10307
10308 for {
10309 off1 := v.AuxInt
10310 sym := v.Aux
10311 mem := v.Args[2]
10312 v_0 := v.Args[0]
10313 if v_0.Op != OpAMD64ADDQconst {
10314 break
10315 }
10316 off2 := v_0.AuxInt
10317 base := v_0.Args[0]
10318 val := v.Args[1]
10319 if !(is32Bit(off1 + off2)) {
10320 break
10321 }
10322 v.reset(OpAMD64CMPWload)
10323 v.AuxInt = off1 + off2
10324 v.Aux = sym
10325 v.AddArg(base)
10326 v.AddArg(val)
10327 v.AddArg(mem)
10328 return true
10329 }
10330
10331
10332
10333 for {
10334 off1 := v.AuxInt
10335 sym1 := v.Aux
10336 mem := v.Args[2]
10337 v_0 := v.Args[0]
10338 if v_0.Op != OpAMD64LEAQ {
10339 break
10340 }
10341 off2 := v_0.AuxInt
10342 sym2 := v_0.Aux
10343 base := v_0.Args[0]
10344 val := v.Args[1]
10345 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
10346 break
10347 }
10348 v.reset(OpAMD64CMPWload)
10349 v.AuxInt = off1 + off2
10350 v.Aux = mergeSym(sym1, sym2)
10351 v.AddArg(base)
10352 v.AddArg(val)
10353 v.AddArg(mem)
10354 return true
10355 }
10356
10357
10358
10359 for {
10360 off := v.AuxInt
10361 sym := v.Aux
10362 mem := v.Args[2]
10363 ptr := v.Args[0]
10364 v_1 := v.Args[1]
10365 if v_1.Op != OpAMD64MOVLconst {
10366 break
10367 }
10368 c := v_1.AuxInt
10369 if !(validValAndOff(int64(int16(c)), off)) {
10370 break
10371 }
10372 v.reset(OpAMD64CMPWconstload)
10373 v.AuxInt = makeValAndOff(int64(int16(c)), off)
10374 v.Aux = sym
10375 v.AddArg(ptr)
10376 v.AddArg(mem)
10377 return true
10378 }
10379 return false
10380 }
10381 func rewriteValueAMD64_OpAMD64CMPXCHGLlock_0(v *Value) bool {
10382
10383
10384
10385 for {
10386 off1 := v.AuxInt
10387 sym := v.Aux
10388 mem := v.Args[3]
10389 v_0 := v.Args[0]
10390 if v_0.Op != OpAMD64ADDQconst {
10391 break
10392 }
10393 off2 := v_0.AuxInt
10394 ptr := v_0.Args[0]
10395 old := v.Args[1]
10396 new_ := v.Args[2]
10397 if !(is32Bit(off1 + off2)) {
10398 break
10399 }
10400 v.reset(OpAMD64CMPXCHGLlock)
10401 v.AuxInt = off1 + off2
10402 v.Aux = sym
10403 v.AddArg(ptr)
10404 v.AddArg(old)
10405 v.AddArg(new_)
10406 v.AddArg(mem)
10407 return true
10408 }
10409 return false
10410 }
10411 func rewriteValueAMD64_OpAMD64CMPXCHGQlock_0(v *Value) bool {
10412
10413
10414
10415 for {
10416 off1 := v.AuxInt
10417 sym := v.Aux
10418 mem := v.Args[3]
10419 v_0 := v.Args[0]
10420 if v_0.Op != OpAMD64ADDQconst {
10421 break
10422 }
10423 off2 := v_0.AuxInt
10424 ptr := v_0.Args[0]
10425 old := v.Args[1]
10426 new_ := v.Args[2]
10427 if !(is32Bit(off1 + off2)) {
10428 break
10429 }
10430 v.reset(OpAMD64CMPXCHGQlock)
10431 v.AuxInt = off1 + off2
10432 v.Aux = sym
10433 v.AddArg(ptr)
10434 v.AddArg(old)
10435 v.AddArg(new_)
10436 v.AddArg(mem)
10437 return true
10438 }
10439 return false
10440 }
10441 func rewriteValueAMD64_OpAMD64DIVSD_0(v *Value) bool {
10442
10443
10444
10445 for {
10446 _ = v.Args[1]
10447 x := v.Args[0]
10448 l := v.Args[1]
10449 if l.Op != OpAMD64MOVSDload {
10450 break
10451 }
10452 off := l.AuxInt
10453 sym := l.Aux
10454 mem := l.Args[1]
10455 ptr := l.Args[0]
10456 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
10457 break
10458 }
10459 v.reset(OpAMD64DIVSDload)
10460 v.AuxInt = off
10461 v.Aux = sym
10462 v.AddArg(x)
10463 v.AddArg(ptr)
10464 v.AddArg(mem)
10465 return true
10466 }
10467 return false
10468 }
10469 func rewriteValueAMD64_OpAMD64DIVSDload_0(v *Value) bool {
10470
10471
10472
10473 for {
10474 off1 := v.AuxInt
10475 sym := v.Aux
10476 mem := v.Args[2]
10477 val := v.Args[0]
10478 v_1 := v.Args[1]
10479 if v_1.Op != OpAMD64ADDQconst {
10480 break
10481 }
10482 off2 := v_1.AuxInt
10483 base := v_1.Args[0]
10484 if !(is32Bit(off1 + off2)) {
10485 break
10486 }
10487 v.reset(OpAMD64DIVSDload)
10488 v.AuxInt = off1 + off2
10489 v.Aux = sym
10490 v.AddArg(val)
10491 v.AddArg(base)
10492 v.AddArg(mem)
10493 return true
10494 }
10495
10496
10497
10498 for {
10499 off1 := v.AuxInt
10500 sym1 := v.Aux
10501 mem := v.Args[2]
10502 val := v.Args[0]
10503 v_1 := v.Args[1]
10504 if v_1.Op != OpAMD64LEAQ {
10505 break
10506 }
10507 off2 := v_1.AuxInt
10508 sym2 := v_1.Aux
10509 base := v_1.Args[0]
10510 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
10511 break
10512 }
10513 v.reset(OpAMD64DIVSDload)
10514 v.AuxInt = off1 + off2
10515 v.Aux = mergeSym(sym1, sym2)
10516 v.AddArg(val)
10517 v.AddArg(base)
10518 v.AddArg(mem)
10519 return true
10520 }
10521 return false
10522 }
10523 func rewriteValueAMD64_OpAMD64DIVSS_0(v *Value) bool {
10524
10525
10526
10527 for {
10528 _ = v.Args[1]
10529 x := v.Args[0]
10530 l := v.Args[1]
10531 if l.Op != OpAMD64MOVSSload {
10532 break
10533 }
10534 off := l.AuxInt
10535 sym := l.Aux
10536 mem := l.Args[1]
10537 ptr := l.Args[0]
10538 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
10539 break
10540 }
10541 v.reset(OpAMD64DIVSSload)
10542 v.AuxInt = off
10543 v.Aux = sym
10544 v.AddArg(x)
10545 v.AddArg(ptr)
10546 v.AddArg(mem)
10547 return true
10548 }
10549 return false
10550 }
10551 func rewriteValueAMD64_OpAMD64DIVSSload_0(v *Value) bool {
10552
10553
10554
10555 for {
10556 off1 := v.AuxInt
10557 sym := v.Aux
10558 mem := v.Args[2]
10559 val := v.Args[0]
10560 v_1 := v.Args[1]
10561 if v_1.Op != OpAMD64ADDQconst {
10562 break
10563 }
10564 off2 := v_1.AuxInt
10565 base := v_1.Args[0]
10566 if !(is32Bit(off1 + off2)) {
10567 break
10568 }
10569 v.reset(OpAMD64DIVSSload)
10570 v.AuxInt = off1 + off2
10571 v.Aux = sym
10572 v.AddArg(val)
10573 v.AddArg(base)
10574 v.AddArg(mem)
10575 return true
10576 }
10577
10578
10579
10580 for {
10581 off1 := v.AuxInt
10582 sym1 := v.Aux
10583 mem := v.Args[2]
10584 val := v.Args[0]
10585 v_1 := v.Args[1]
10586 if v_1.Op != OpAMD64LEAQ {
10587 break
10588 }
10589 off2 := v_1.AuxInt
10590 sym2 := v_1.Aux
10591 base := v_1.Args[0]
10592 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
10593 break
10594 }
10595 v.reset(OpAMD64DIVSSload)
10596 v.AuxInt = off1 + off2
10597 v.Aux = mergeSym(sym1, sym2)
10598 v.AddArg(val)
10599 v.AddArg(base)
10600 v.AddArg(mem)
10601 return true
10602 }
10603 return false
10604 }
10605 func rewriteValueAMD64_OpAMD64HMULL_0(v *Value) bool {
10606
10607
10608
10609 for {
10610 y := v.Args[1]
10611 x := v.Args[0]
10612 if !(!x.rematerializeable() && y.rematerializeable()) {
10613 break
10614 }
10615 v.reset(OpAMD64HMULL)
10616 v.AddArg(y)
10617 v.AddArg(x)
10618 return true
10619 }
10620 return false
10621 }
10622 func rewriteValueAMD64_OpAMD64HMULLU_0(v *Value) bool {
10623
10624
10625
10626 for {
10627 y := v.Args[1]
10628 x := v.Args[0]
10629 if !(!x.rematerializeable() && y.rematerializeable()) {
10630 break
10631 }
10632 v.reset(OpAMD64HMULLU)
10633 v.AddArg(y)
10634 v.AddArg(x)
10635 return true
10636 }
10637 return false
10638 }
10639 func rewriteValueAMD64_OpAMD64HMULQ_0(v *Value) bool {
10640
10641
10642
10643 for {
10644 y := v.Args[1]
10645 x := v.Args[0]
10646 if !(!x.rematerializeable() && y.rematerializeable()) {
10647 break
10648 }
10649 v.reset(OpAMD64HMULQ)
10650 v.AddArg(y)
10651 v.AddArg(x)
10652 return true
10653 }
10654 return false
10655 }
10656 func rewriteValueAMD64_OpAMD64HMULQU_0(v *Value) bool {
10657
10658
10659
10660 for {
10661 y := v.Args[1]
10662 x := v.Args[0]
10663 if !(!x.rematerializeable() && y.rematerializeable()) {
10664 break
10665 }
10666 v.reset(OpAMD64HMULQU)
10667 v.AddArg(y)
10668 v.AddArg(x)
10669 return true
10670 }
10671 return false
10672 }
10673 func rewriteValueAMD64_OpAMD64LEAL_0(v *Value) bool {
10674
10675
10676
10677 for {
10678 c := v.AuxInt
10679 s := v.Aux
10680 v_0 := v.Args[0]
10681 if v_0.Op != OpAMD64ADDLconst {
10682 break
10683 }
10684 d := v_0.AuxInt
10685 x := v_0.Args[0]
10686 if !(is32Bit(c + d)) {
10687 break
10688 }
10689 v.reset(OpAMD64LEAL)
10690 v.AuxInt = c + d
10691 v.Aux = s
10692 v.AddArg(x)
10693 return true
10694 }
10695
10696
10697
10698 for {
10699 c := v.AuxInt
10700 s := v.Aux
10701 v_0 := v.Args[0]
10702 if v_0.Op != OpAMD64ADDL {
10703 break
10704 }
10705 y := v_0.Args[1]
10706 x := v_0.Args[0]
10707 if !(x.Op != OpSB && y.Op != OpSB) {
10708 break
10709 }
10710 v.reset(OpAMD64LEAL1)
10711 v.AuxInt = c
10712 v.Aux = s
10713 v.AddArg(x)
10714 v.AddArg(y)
10715 return true
10716 }
10717 return false
10718 }
10719 func rewriteValueAMD64_OpAMD64LEAL1_0(v *Value) bool {
10720
10721
10722
10723 for {
10724 c := v.AuxInt
10725 s := v.Aux
10726 y := v.Args[1]
10727 v_0 := v.Args[0]
10728 if v_0.Op != OpAMD64ADDLconst {
10729 break
10730 }
10731 d := v_0.AuxInt
10732 x := v_0.Args[0]
10733 if !(is32Bit(c+d) && x.Op != OpSB) {
10734 break
10735 }
10736 v.reset(OpAMD64LEAL1)
10737 v.AuxInt = c + d
10738 v.Aux = s
10739 v.AddArg(x)
10740 v.AddArg(y)
10741 return true
10742 }
10743
10744
10745
10746 for {
10747 c := v.AuxInt
10748 s := v.Aux
10749 _ = v.Args[1]
10750 y := v.Args[0]
10751 v_1 := v.Args[1]
10752 if v_1.Op != OpAMD64ADDLconst {
10753 break
10754 }
10755 d := v_1.AuxInt
10756 x := v_1.Args[0]
10757 if !(is32Bit(c+d) && x.Op != OpSB) {
10758 break
10759 }
10760 v.reset(OpAMD64LEAL1)
10761 v.AuxInt = c + d
10762 v.Aux = s
10763 v.AddArg(x)
10764 v.AddArg(y)
10765 return true
10766 }
10767
10768
10769
10770 for {
10771 c := v.AuxInt
10772 s := v.Aux
10773 _ = v.Args[1]
10774 x := v.Args[0]
10775 v_1 := v.Args[1]
10776 if v_1.Op != OpAMD64SHLLconst {
10777 break
10778 }
10779 if v_1.AuxInt != 1 {
10780 break
10781 }
10782 y := v_1.Args[0]
10783 v.reset(OpAMD64LEAL2)
10784 v.AuxInt = c
10785 v.Aux = s
10786 v.AddArg(x)
10787 v.AddArg(y)
10788 return true
10789 }
10790
10791
10792
10793 for {
10794 c := v.AuxInt
10795 s := v.Aux
10796 x := v.Args[1]
10797 v_0 := v.Args[0]
10798 if v_0.Op != OpAMD64SHLLconst {
10799 break
10800 }
10801 if v_0.AuxInt != 1 {
10802 break
10803 }
10804 y := v_0.Args[0]
10805 v.reset(OpAMD64LEAL2)
10806 v.AuxInt = c
10807 v.Aux = s
10808 v.AddArg(x)
10809 v.AddArg(y)
10810 return true
10811 }
10812
10813
10814
10815 for {
10816 c := v.AuxInt
10817 s := v.Aux
10818 _ = v.Args[1]
10819 x := v.Args[0]
10820 v_1 := v.Args[1]
10821 if v_1.Op != OpAMD64SHLLconst {
10822 break
10823 }
10824 if v_1.AuxInt != 2 {
10825 break
10826 }
10827 y := v_1.Args[0]
10828 v.reset(OpAMD64LEAL4)
10829 v.AuxInt = c
10830 v.Aux = s
10831 v.AddArg(x)
10832 v.AddArg(y)
10833 return true
10834 }
10835
10836
10837
10838 for {
10839 c := v.AuxInt
10840 s := v.Aux
10841 x := v.Args[1]
10842 v_0 := v.Args[0]
10843 if v_0.Op != OpAMD64SHLLconst {
10844 break
10845 }
10846 if v_0.AuxInt != 2 {
10847 break
10848 }
10849 y := v_0.Args[0]
10850 v.reset(OpAMD64LEAL4)
10851 v.AuxInt = c
10852 v.Aux = s
10853 v.AddArg(x)
10854 v.AddArg(y)
10855 return true
10856 }
10857
10858
10859
10860 for {
10861 c := v.AuxInt
10862 s := v.Aux
10863 _ = v.Args[1]
10864 x := v.Args[0]
10865 v_1 := v.Args[1]
10866 if v_1.Op != OpAMD64SHLLconst {
10867 break
10868 }
10869 if v_1.AuxInt != 3 {
10870 break
10871 }
10872 y := v_1.Args[0]
10873 v.reset(OpAMD64LEAL8)
10874 v.AuxInt = c
10875 v.Aux = s
10876 v.AddArg(x)
10877 v.AddArg(y)
10878 return true
10879 }
10880
10881
10882
10883 for {
10884 c := v.AuxInt
10885 s := v.Aux
10886 x := v.Args[1]
10887 v_0 := v.Args[0]
10888 if v_0.Op != OpAMD64SHLLconst {
10889 break
10890 }
10891 if v_0.AuxInt != 3 {
10892 break
10893 }
10894 y := v_0.Args[0]
10895 v.reset(OpAMD64LEAL8)
10896 v.AuxInt = c
10897 v.Aux = s
10898 v.AddArg(x)
10899 v.AddArg(y)
10900 return true
10901 }
10902 return false
10903 }
10904 func rewriteValueAMD64_OpAMD64LEAL2_0(v *Value) bool {
10905
10906
10907
10908 for {
10909 c := v.AuxInt
10910 s := v.Aux
10911 y := v.Args[1]
10912 v_0 := v.Args[0]
10913 if v_0.Op != OpAMD64ADDLconst {
10914 break
10915 }
10916 d := v_0.AuxInt
10917 x := v_0.Args[0]
10918 if !(is32Bit(c+d) && x.Op != OpSB) {
10919 break
10920 }
10921 v.reset(OpAMD64LEAL2)
10922 v.AuxInt = c + d
10923 v.Aux = s
10924 v.AddArg(x)
10925 v.AddArg(y)
10926 return true
10927 }
10928
10929
10930
10931 for {
10932 c := v.AuxInt
10933 s := v.Aux
10934 _ = v.Args[1]
10935 x := v.Args[0]
10936 v_1 := v.Args[1]
10937 if v_1.Op != OpAMD64ADDLconst {
10938 break
10939 }
10940 d := v_1.AuxInt
10941 y := v_1.Args[0]
10942 if !(is32Bit(c+2*d) && y.Op != OpSB) {
10943 break
10944 }
10945 v.reset(OpAMD64LEAL2)
10946 v.AuxInt = c + 2*d
10947 v.Aux = s
10948 v.AddArg(x)
10949 v.AddArg(y)
10950 return true
10951 }
10952
10953
10954
10955 for {
10956 c := v.AuxInt
10957 s := v.Aux
10958 _ = v.Args[1]
10959 x := v.Args[0]
10960 v_1 := v.Args[1]
10961 if v_1.Op != OpAMD64SHLLconst {
10962 break
10963 }
10964 if v_1.AuxInt != 1 {
10965 break
10966 }
10967 y := v_1.Args[0]
10968 v.reset(OpAMD64LEAL4)
10969 v.AuxInt = c
10970 v.Aux = s
10971 v.AddArg(x)
10972 v.AddArg(y)
10973 return true
10974 }
10975
10976
10977
10978 for {
10979 c := v.AuxInt
10980 s := v.Aux
10981 _ = v.Args[1]
10982 x := v.Args[0]
10983 v_1 := v.Args[1]
10984 if v_1.Op != OpAMD64SHLLconst {
10985 break
10986 }
10987 if v_1.AuxInt != 2 {
10988 break
10989 }
10990 y := v_1.Args[0]
10991 v.reset(OpAMD64LEAL8)
10992 v.AuxInt = c
10993 v.Aux = s
10994 v.AddArg(x)
10995 v.AddArg(y)
10996 return true
10997 }
10998 return false
10999 }
11000 func rewriteValueAMD64_OpAMD64LEAL4_0(v *Value) bool {
11001
11002
11003
11004 for {
11005 c := v.AuxInt
11006 s := v.Aux
11007 y := v.Args[1]
11008 v_0 := v.Args[0]
11009 if v_0.Op != OpAMD64ADDLconst {
11010 break
11011 }
11012 d := v_0.AuxInt
11013 x := v_0.Args[0]
11014 if !(is32Bit(c+d) && x.Op != OpSB) {
11015 break
11016 }
11017 v.reset(OpAMD64LEAL4)
11018 v.AuxInt = c + d
11019 v.Aux = s
11020 v.AddArg(x)
11021 v.AddArg(y)
11022 return true
11023 }
11024
11025
11026
11027 for {
11028 c := v.AuxInt
11029 s := v.Aux
11030 _ = v.Args[1]
11031 x := v.Args[0]
11032 v_1 := v.Args[1]
11033 if v_1.Op != OpAMD64ADDLconst {
11034 break
11035 }
11036 d := v_1.AuxInt
11037 y := v_1.Args[0]
11038 if !(is32Bit(c+4*d) && y.Op != OpSB) {
11039 break
11040 }
11041 v.reset(OpAMD64LEAL4)
11042 v.AuxInt = c + 4*d
11043 v.Aux = s
11044 v.AddArg(x)
11045 v.AddArg(y)
11046 return true
11047 }
11048
11049
11050
11051 for {
11052 c := v.AuxInt
11053 s := v.Aux
11054 _ = v.Args[1]
11055 x := v.Args[0]
11056 v_1 := v.Args[1]
11057 if v_1.Op != OpAMD64SHLLconst {
11058 break
11059 }
11060 if v_1.AuxInt != 1 {
11061 break
11062 }
11063 y := v_1.Args[0]
11064 v.reset(OpAMD64LEAL8)
11065 v.AuxInt = c
11066 v.Aux = s
11067 v.AddArg(x)
11068 v.AddArg(y)
11069 return true
11070 }
11071 return false
11072 }
11073 func rewriteValueAMD64_OpAMD64LEAL8_0(v *Value) bool {
11074
11075
11076
11077 for {
11078 c := v.AuxInt
11079 s := v.Aux
11080 y := v.Args[1]
11081 v_0 := v.Args[0]
11082 if v_0.Op != OpAMD64ADDLconst {
11083 break
11084 }
11085 d := v_0.AuxInt
11086 x := v_0.Args[0]
11087 if !(is32Bit(c+d) && x.Op != OpSB) {
11088 break
11089 }
11090 v.reset(OpAMD64LEAL8)
11091 v.AuxInt = c + d
11092 v.Aux = s
11093 v.AddArg(x)
11094 v.AddArg(y)
11095 return true
11096 }
11097
11098
11099
11100 for {
11101 c := v.AuxInt
11102 s := v.Aux
11103 _ = v.Args[1]
11104 x := v.Args[0]
11105 v_1 := v.Args[1]
11106 if v_1.Op != OpAMD64ADDLconst {
11107 break
11108 }
11109 d := v_1.AuxInt
11110 y := v_1.Args[0]
11111 if !(is32Bit(c+8*d) && y.Op != OpSB) {
11112 break
11113 }
11114 v.reset(OpAMD64LEAL8)
11115 v.AuxInt = c + 8*d
11116 v.Aux = s
11117 v.AddArg(x)
11118 v.AddArg(y)
11119 return true
11120 }
11121 return false
11122 }
11123 func rewriteValueAMD64_OpAMD64LEAQ_0(v *Value) bool {
11124
11125
11126
11127 for {
11128 c := v.AuxInt
11129 s := v.Aux
11130 v_0 := v.Args[0]
11131 if v_0.Op != OpAMD64ADDQconst {
11132 break
11133 }
11134 d := v_0.AuxInt
11135 x := v_0.Args[0]
11136 if !(is32Bit(c + d)) {
11137 break
11138 }
11139 v.reset(OpAMD64LEAQ)
11140 v.AuxInt = c + d
11141 v.Aux = s
11142 v.AddArg(x)
11143 return true
11144 }
11145
11146
11147
11148 for {
11149 c := v.AuxInt
11150 s := v.Aux
11151 v_0 := v.Args[0]
11152 if v_0.Op != OpAMD64ADDQ {
11153 break
11154 }
11155 y := v_0.Args[1]
11156 x := v_0.Args[0]
11157 if !(x.Op != OpSB && y.Op != OpSB) {
11158 break
11159 }
11160 v.reset(OpAMD64LEAQ1)
11161 v.AuxInt = c
11162 v.Aux = s
11163 v.AddArg(x)
11164 v.AddArg(y)
11165 return true
11166 }
11167
11168
11169
11170 for {
11171 off1 := v.AuxInt
11172 sym1 := v.Aux
11173 v_0 := v.Args[0]
11174 if v_0.Op != OpAMD64LEAQ {
11175 break
11176 }
11177 off2 := v_0.AuxInt
11178 sym2 := v_0.Aux
11179 x := v_0.Args[0]
11180 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
11181 break
11182 }
11183 v.reset(OpAMD64LEAQ)
11184 v.AuxInt = off1 + off2
11185 v.Aux = mergeSym(sym1, sym2)
11186 v.AddArg(x)
11187 return true
11188 }
11189
11190
11191
11192 for {
11193 off1 := v.AuxInt
11194 sym1 := v.Aux
11195 v_0 := v.Args[0]
11196 if v_0.Op != OpAMD64LEAQ1 {
11197 break
11198 }
11199 off2 := v_0.AuxInt
11200 sym2 := v_0.Aux
11201 y := v_0.Args[1]
11202 x := v_0.Args[0]
11203 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
11204 break
11205 }
11206 v.reset(OpAMD64LEAQ1)
11207 v.AuxInt = off1 + off2
11208 v.Aux = mergeSym(sym1, sym2)
11209 v.AddArg(x)
11210 v.AddArg(y)
11211 return true
11212 }
11213
11214
11215
11216 for {
11217 off1 := v.AuxInt
11218 sym1 := v.Aux
11219 v_0 := v.Args[0]
11220 if v_0.Op != OpAMD64LEAQ2 {
11221 break
11222 }
11223 off2 := v_0.AuxInt
11224 sym2 := v_0.Aux
11225 y := v_0.Args[1]
11226 x := v_0.Args[0]
11227 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
11228 break
11229 }
11230 v.reset(OpAMD64LEAQ2)
11231 v.AuxInt = off1 + off2
11232 v.Aux = mergeSym(sym1, sym2)
11233 v.AddArg(x)
11234 v.AddArg(y)
11235 return true
11236 }
11237
11238
11239
11240 for {
11241 off1 := v.AuxInt
11242 sym1 := v.Aux
11243 v_0 := v.Args[0]
11244 if v_0.Op != OpAMD64LEAQ4 {
11245 break
11246 }
11247 off2 := v_0.AuxInt
11248 sym2 := v_0.Aux
11249 y := v_0.Args[1]
11250 x := v_0.Args[0]
11251 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
11252 break
11253 }
11254 v.reset(OpAMD64LEAQ4)
11255 v.AuxInt = off1 + off2
11256 v.Aux = mergeSym(sym1, sym2)
11257 v.AddArg(x)
11258 v.AddArg(y)
11259 return true
11260 }
11261
11262
11263
11264 for {
11265 off1 := v.AuxInt
11266 sym1 := v.Aux
11267 v_0 := v.Args[0]
11268 if v_0.Op != OpAMD64LEAQ8 {
11269 break
11270 }
11271 off2 := v_0.AuxInt
11272 sym2 := v_0.Aux
11273 y := v_0.Args[1]
11274 x := v_0.Args[0]
11275 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
11276 break
11277 }
11278 v.reset(OpAMD64LEAQ8)
11279 v.AuxInt = off1 + off2
11280 v.Aux = mergeSym(sym1, sym2)
11281 v.AddArg(x)
11282 v.AddArg(y)
11283 return true
11284 }
11285 return false
11286 }
11287 func rewriteValueAMD64_OpAMD64LEAQ1_0(v *Value) bool {
11288
11289
11290
11291 for {
11292 c := v.AuxInt
11293 s := v.Aux
11294 y := v.Args[1]
11295 v_0 := v.Args[0]
11296 if v_0.Op != OpAMD64ADDQconst {
11297 break
11298 }
11299 d := v_0.AuxInt
11300 x := v_0.Args[0]
11301 if !(is32Bit(c+d) && x.Op != OpSB) {
11302 break
11303 }
11304 v.reset(OpAMD64LEAQ1)
11305 v.AuxInt = c + d
11306 v.Aux = s
11307 v.AddArg(x)
11308 v.AddArg(y)
11309 return true
11310 }
11311
11312
11313
11314 for {
11315 c := v.AuxInt
11316 s := v.Aux
11317 _ = v.Args[1]
11318 y := v.Args[0]
11319 v_1 := v.Args[1]
11320 if v_1.Op != OpAMD64ADDQconst {
11321 break
11322 }
11323 d := v_1.AuxInt
11324 x := v_1.Args[0]
11325 if !(is32Bit(c+d) && x.Op != OpSB) {
11326 break
11327 }
11328 v.reset(OpAMD64LEAQ1)
11329 v.AuxInt = c + d
11330 v.Aux = s
11331 v.AddArg(x)
11332 v.AddArg(y)
11333 return true
11334 }
11335
11336
11337
11338 for {
11339 c := v.AuxInt
11340 s := v.Aux
11341 _ = v.Args[1]
11342 x := v.Args[0]
11343 v_1 := v.Args[1]
11344 if v_1.Op != OpAMD64SHLQconst {
11345 break
11346 }
11347 if v_1.AuxInt != 1 {
11348 break
11349 }
11350 y := v_1.Args[0]
11351 v.reset(OpAMD64LEAQ2)
11352 v.AuxInt = c
11353 v.Aux = s
11354 v.AddArg(x)
11355 v.AddArg(y)
11356 return true
11357 }
11358
11359
11360
11361 for {
11362 c := v.AuxInt
11363 s := v.Aux
11364 x := v.Args[1]
11365 v_0 := v.Args[0]
11366 if v_0.Op != OpAMD64SHLQconst {
11367 break
11368 }
11369 if v_0.AuxInt != 1 {
11370 break
11371 }
11372 y := v_0.Args[0]
11373 v.reset(OpAMD64LEAQ2)
11374 v.AuxInt = c
11375 v.Aux = s
11376 v.AddArg(x)
11377 v.AddArg(y)
11378 return true
11379 }
11380
11381
11382
11383 for {
11384 c := v.AuxInt
11385 s := v.Aux
11386 _ = v.Args[1]
11387 x := v.Args[0]
11388 v_1 := v.Args[1]
11389 if v_1.Op != OpAMD64SHLQconst {
11390 break
11391 }
11392 if v_1.AuxInt != 2 {
11393 break
11394 }
11395 y := v_1.Args[0]
11396 v.reset(OpAMD64LEAQ4)
11397 v.AuxInt = c
11398 v.Aux = s
11399 v.AddArg(x)
11400 v.AddArg(y)
11401 return true
11402 }
11403
11404
11405
11406 for {
11407 c := v.AuxInt
11408 s := v.Aux
11409 x := v.Args[1]
11410 v_0 := v.Args[0]
11411 if v_0.Op != OpAMD64SHLQconst {
11412 break
11413 }
11414 if v_0.AuxInt != 2 {
11415 break
11416 }
11417 y := v_0.Args[0]
11418 v.reset(OpAMD64LEAQ4)
11419 v.AuxInt = c
11420 v.Aux = s
11421 v.AddArg(x)
11422 v.AddArg(y)
11423 return true
11424 }
11425
11426
11427
11428 for {
11429 c := v.AuxInt
11430 s := v.Aux
11431 _ = v.Args[1]
11432 x := v.Args[0]
11433 v_1 := v.Args[1]
11434 if v_1.Op != OpAMD64SHLQconst {
11435 break
11436 }
11437 if v_1.AuxInt != 3 {
11438 break
11439 }
11440 y := v_1.Args[0]
11441 v.reset(OpAMD64LEAQ8)
11442 v.AuxInt = c
11443 v.Aux = s
11444 v.AddArg(x)
11445 v.AddArg(y)
11446 return true
11447 }
11448
11449
11450
11451 for {
11452 c := v.AuxInt
11453 s := v.Aux
11454 x := v.Args[1]
11455 v_0 := v.Args[0]
11456 if v_0.Op != OpAMD64SHLQconst {
11457 break
11458 }
11459 if v_0.AuxInt != 3 {
11460 break
11461 }
11462 y := v_0.Args[0]
11463 v.reset(OpAMD64LEAQ8)
11464 v.AuxInt = c
11465 v.Aux = s
11466 v.AddArg(x)
11467 v.AddArg(y)
11468 return true
11469 }
11470
11471
11472
11473 for {
11474 off1 := v.AuxInt
11475 sym1 := v.Aux
11476 y := v.Args[1]
11477 v_0 := v.Args[0]
11478 if v_0.Op != OpAMD64LEAQ {
11479 break
11480 }
11481 off2 := v_0.AuxInt
11482 sym2 := v_0.Aux
11483 x := v_0.Args[0]
11484 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
11485 break
11486 }
11487 v.reset(OpAMD64LEAQ1)
11488 v.AuxInt = off1 + off2
11489 v.Aux = mergeSym(sym1, sym2)
11490 v.AddArg(x)
11491 v.AddArg(y)
11492 return true
11493 }
11494
11495
11496
11497 for {
11498 off1 := v.AuxInt
11499 sym1 := v.Aux
11500 _ = v.Args[1]
11501 y := v.Args[0]
11502 v_1 := v.Args[1]
11503 if v_1.Op != OpAMD64LEAQ {
11504 break
11505 }
11506 off2 := v_1.AuxInt
11507 sym2 := v_1.Aux
11508 x := v_1.Args[0]
11509 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
11510 break
11511 }
11512 v.reset(OpAMD64LEAQ1)
11513 v.AuxInt = off1 + off2
11514 v.Aux = mergeSym(sym1, sym2)
11515 v.AddArg(x)
11516 v.AddArg(y)
11517 return true
11518 }
11519 return false
11520 }
11521 func rewriteValueAMD64_OpAMD64LEAQ2_0(v *Value) bool {
11522
11523
11524
11525 for {
11526 c := v.AuxInt
11527 s := v.Aux
11528 y := v.Args[1]
11529 v_0 := v.Args[0]
11530 if v_0.Op != OpAMD64ADDQconst {
11531 break
11532 }
11533 d := v_0.AuxInt
11534 x := v_0.Args[0]
11535 if !(is32Bit(c+d) && x.Op != OpSB) {
11536 break
11537 }
11538 v.reset(OpAMD64LEAQ2)
11539 v.AuxInt = c + d
11540 v.Aux = s
11541 v.AddArg(x)
11542 v.AddArg(y)
11543 return true
11544 }
11545
11546
11547
11548 for {
11549 c := v.AuxInt
11550 s := v.Aux
11551 _ = v.Args[1]
11552 x := v.Args[0]
11553 v_1 := v.Args[1]
11554 if v_1.Op != OpAMD64ADDQconst {
11555 break
11556 }
11557 d := v_1.AuxInt
11558 y := v_1.Args[0]
11559 if !(is32Bit(c+2*d) && y.Op != OpSB) {
11560 break
11561 }
11562 v.reset(OpAMD64LEAQ2)
11563 v.AuxInt = c + 2*d
11564 v.Aux = s
11565 v.AddArg(x)
11566 v.AddArg(y)
11567 return true
11568 }
11569
11570
11571
11572 for {
11573 c := v.AuxInt
11574 s := v.Aux
11575 _ = v.Args[1]
11576 x := v.Args[0]
11577 v_1 := v.Args[1]
11578 if v_1.Op != OpAMD64SHLQconst {
11579 break
11580 }
11581 if v_1.AuxInt != 1 {
11582 break
11583 }
11584 y := v_1.Args[0]
11585 v.reset(OpAMD64LEAQ4)
11586 v.AuxInt = c
11587 v.Aux = s
11588 v.AddArg(x)
11589 v.AddArg(y)
11590 return true
11591 }
11592
11593
11594
11595 for {
11596 c := v.AuxInt
11597 s := v.Aux
11598 _ = v.Args[1]
11599 x := v.Args[0]
11600 v_1 := v.Args[1]
11601 if v_1.Op != OpAMD64SHLQconst {
11602 break
11603 }
11604 if v_1.AuxInt != 2 {
11605 break
11606 }
11607 y := v_1.Args[0]
11608 v.reset(OpAMD64LEAQ8)
11609 v.AuxInt = c
11610 v.Aux = s
11611 v.AddArg(x)
11612 v.AddArg(y)
11613 return true
11614 }
11615
11616
11617
11618 for {
11619 off1 := v.AuxInt
11620 sym1 := v.Aux
11621 y := v.Args[1]
11622 v_0 := v.Args[0]
11623 if v_0.Op != OpAMD64LEAQ {
11624 break
11625 }
11626 off2 := v_0.AuxInt
11627 sym2 := v_0.Aux
11628 x := v_0.Args[0]
11629 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
11630 break
11631 }
11632 v.reset(OpAMD64LEAQ2)
11633 v.AuxInt = off1 + off2
11634 v.Aux = mergeSym(sym1, sym2)
11635 v.AddArg(x)
11636 v.AddArg(y)
11637 return true
11638 }
11639 return false
11640 }
11641 func rewriteValueAMD64_OpAMD64LEAQ4_0(v *Value) bool {
11642
11643
11644
11645 for {
11646 c := v.AuxInt
11647 s := v.Aux
11648 y := v.Args[1]
11649 v_0 := v.Args[0]
11650 if v_0.Op != OpAMD64ADDQconst {
11651 break
11652 }
11653 d := v_0.AuxInt
11654 x := v_0.Args[0]
11655 if !(is32Bit(c+d) && x.Op != OpSB) {
11656 break
11657 }
11658 v.reset(OpAMD64LEAQ4)
11659 v.AuxInt = c + d
11660 v.Aux = s
11661 v.AddArg(x)
11662 v.AddArg(y)
11663 return true
11664 }
11665
11666
11667
11668 for {
11669 c := v.AuxInt
11670 s := v.Aux
11671 _ = v.Args[1]
11672 x := v.Args[0]
11673 v_1 := v.Args[1]
11674 if v_1.Op != OpAMD64ADDQconst {
11675 break
11676 }
11677 d := v_1.AuxInt
11678 y := v_1.Args[0]
11679 if !(is32Bit(c+4*d) && y.Op != OpSB) {
11680 break
11681 }
11682 v.reset(OpAMD64LEAQ4)
11683 v.AuxInt = c + 4*d
11684 v.Aux = s
11685 v.AddArg(x)
11686 v.AddArg(y)
11687 return true
11688 }
11689
11690
11691
11692 for {
11693 c := v.AuxInt
11694 s := v.Aux
11695 _ = v.Args[1]
11696 x := v.Args[0]
11697 v_1 := v.Args[1]
11698 if v_1.Op != OpAMD64SHLQconst {
11699 break
11700 }
11701 if v_1.AuxInt != 1 {
11702 break
11703 }
11704 y := v_1.Args[0]
11705 v.reset(OpAMD64LEAQ8)
11706 v.AuxInt = c
11707 v.Aux = s
11708 v.AddArg(x)
11709 v.AddArg(y)
11710 return true
11711 }
11712
11713
11714
11715 for {
11716 off1 := v.AuxInt
11717 sym1 := v.Aux
11718 y := v.Args[1]
11719 v_0 := v.Args[0]
11720 if v_0.Op != OpAMD64LEAQ {
11721 break
11722 }
11723 off2 := v_0.AuxInt
11724 sym2 := v_0.Aux
11725 x := v_0.Args[0]
11726 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
11727 break
11728 }
11729 v.reset(OpAMD64LEAQ4)
11730 v.AuxInt = off1 + off2
11731 v.Aux = mergeSym(sym1, sym2)
11732 v.AddArg(x)
11733 v.AddArg(y)
11734 return true
11735 }
11736 return false
11737 }
11738 func rewriteValueAMD64_OpAMD64LEAQ8_0(v *Value) bool {
11739
11740
11741
11742 for {
11743 c := v.AuxInt
11744 s := v.Aux
11745 y := v.Args[1]
11746 v_0 := v.Args[0]
11747 if v_0.Op != OpAMD64ADDQconst {
11748 break
11749 }
11750 d := v_0.AuxInt
11751 x := v_0.Args[0]
11752 if !(is32Bit(c+d) && x.Op != OpSB) {
11753 break
11754 }
11755 v.reset(OpAMD64LEAQ8)
11756 v.AuxInt = c + d
11757 v.Aux = s
11758 v.AddArg(x)
11759 v.AddArg(y)
11760 return true
11761 }
11762
11763
11764
11765 for {
11766 c := v.AuxInt
11767 s := v.Aux
11768 _ = v.Args[1]
11769 x := v.Args[0]
11770 v_1 := v.Args[1]
11771 if v_1.Op != OpAMD64ADDQconst {
11772 break
11773 }
11774 d := v_1.AuxInt
11775 y := v_1.Args[0]
11776 if !(is32Bit(c+8*d) && y.Op != OpSB) {
11777 break
11778 }
11779 v.reset(OpAMD64LEAQ8)
11780 v.AuxInt = c + 8*d
11781 v.Aux = s
11782 v.AddArg(x)
11783 v.AddArg(y)
11784 return true
11785 }
11786
11787
11788
11789 for {
11790 off1 := v.AuxInt
11791 sym1 := v.Aux
11792 y := v.Args[1]
11793 v_0 := v.Args[0]
11794 if v_0.Op != OpAMD64LEAQ {
11795 break
11796 }
11797 off2 := v_0.AuxInt
11798 sym2 := v_0.Aux
11799 x := v_0.Args[0]
11800 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
11801 break
11802 }
11803 v.reset(OpAMD64LEAQ8)
11804 v.AuxInt = off1 + off2
11805 v.Aux = mergeSym(sym1, sym2)
11806 v.AddArg(x)
11807 v.AddArg(y)
11808 return true
11809 }
11810 return false
11811 }
11812 func rewriteValueAMD64_OpAMD64MOVBQSX_0(v *Value) bool {
11813 b := v.Block
11814
11815
11816
11817 for {
11818 x := v.Args[0]
11819 if x.Op != OpAMD64MOVBload {
11820 break
11821 }
11822 off := x.AuxInt
11823 sym := x.Aux
11824 mem := x.Args[1]
11825 ptr := x.Args[0]
11826 if !(x.Uses == 1 && clobber(x)) {
11827 break
11828 }
11829 b = x.Block
11830 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
11831 v.reset(OpCopy)
11832 v.AddArg(v0)
11833 v0.AuxInt = off
11834 v0.Aux = sym
11835 v0.AddArg(ptr)
11836 v0.AddArg(mem)
11837 return true
11838 }
11839
11840
11841
11842 for {
11843 x := v.Args[0]
11844 if x.Op != OpAMD64MOVWload {
11845 break
11846 }
11847 off := x.AuxInt
11848 sym := x.Aux
11849 mem := x.Args[1]
11850 ptr := x.Args[0]
11851 if !(x.Uses == 1 && clobber(x)) {
11852 break
11853 }
11854 b = x.Block
11855 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
11856 v.reset(OpCopy)
11857 v.AddArg(v0)
11858 v0.AuxInt = off
11859 v0.Aux = sym
11860 v0.AddArg(ptr)
11861 v0.AddArg(mem)
11862 return true
11863 }
11864
11865
11866
11867 for {
11868 x := v.Args[0]
11869 if x.Op != OpAMD64MOVLload {
11870 break
11871 }
11872 off := x.AuxInt
11873 sym := x.Aux
11874 mem := x.Args[1]
11875 ptr := x.Args[0]
11876 if !(x.Uses == 1 && clobber(x)) {
11877 break
11878 }
11879 b = x.Block
11880 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
11881 v.reset(OpCopy)
11882 v.AddArg(v0)
11883 v0.AuxInt = off
11884 v0.Aux = sym
11885 v0.AddArg(ptr)
11886 v0.AddArg(mem)
11887 return true
11888 }
11889
11890
11891
11892 for {
11893 x := v.Args[0]
11894 if x.Op != OpAMD64MOVQload {
11895 break
11896 }
11897 off := x.AuxInt
11898 sym := x.Aux
11899 mem := x.Args[1]
11900 ptr := x.Args[0]
11901 if !(x.Uses == 1 && clobber(x)) {
11902 break
11903 }
11904 b = x.Block
11905 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
11906 v.reset(OpCopy)
11907 v.AddArg(v0)
11908 v0.AuxInt = off
11909 v0.Aux = sym
11910 v0.AddArg(ptr)
11911 v0.AddArg(mem)
11912 return true
11913 }
11914
11915
11916
11917 for {
11918 v_0 := v.Args[0]
11919 if v_0.Op != OpAMD64ANDLconst {
11920 break
11921 }
11922 c := v_0.AuxInt
11923 x := v_0.Args[0]
11924 if !(c&0x80 == 0) {
11925 break
11926 }
11927 v.reset(OpAMD64ANDLconst)
11928 v.AuxInt = c & 0x7f
11929 v.AddArg(x)
11930 return true
11931 }
11932
11933
11934
11935 for {
11936 v_0 := v.Args[0]
11937 if v_0.Op != OpAMD64MOVBQSX {
11938 break
11939 }
11940 x := v_0.Args[0]
11941 v.reset(OpAMD64MOVBQSX)
11942 v.AddArg(x)
11943 return true
11944 }
11945 return false
11946 }
11947 func rewriteValueAMD64_OpAMD64MOVBQSXload_0(v *Value) bool {
11948
11949
11950
11951 for {
11952 off := v.AuxInt
11953 sym := v.Aux
11954 _ = v.Args[1]
11955 ptr := v.Args[0]
11956 v_1 := v.Args[1]
11957 if v_1.Op != OpAMD64MOVBstore {
11958 break
11959 }
11960 off2 := v_1.AuxInt
11961 sym2 := v_1.Aux
11962 _ = v_1.Args[2]
11963 ptr2 := v_1.Args[0]
11964 x := v_1.Args[1]
11965 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
11966 break
11967 }
11968 v.reset(OpAMD64MOVBQSX)
11969 v.AddArg(x)
11970 return true
11971 }
11972
11973
11974
11975 for {
11976 off1 := v.AuxInt
11977 sym1 := v.Aux
11978 mem := v.Args[1]
11979 v_0 := v.Args[0]
11980 if v_0.Op != OpAMD64LEAQ {
11981 break
11982 }
11983 off2 := v_0.AuxInt
11984 sym2 := v_0.Aux
11985 base := v_0.Args[0]
11986 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
11987 break
11988 }
11989 v.reset(OpAMD64MOVBQSXload)
11990 v.AuxInt = off1 + off2
11991 v.Aux = mergeSym(sym1, sym2)
11992 v.AddArg(base)
11993 v.AddArg(mem)
11994 return true
11995 }
11996 return false
11997 }
11998 func rewriteValueAMD64_OpAMD64MOVBQZX_0(v *Value) bool {
11999 b := v.Block
12000
12001
12002
12003 for {
12004 x := v.Args[0]
12005 if x.Op != OpAMD64MOVBload {
12006 break
12007 }
12008 off := x.AuxInt
12009 sym := x.Aux
12010 mem := x.Args[1]
12011 ptr := x.Args[0]
12012 if !(x.Uses == 1 && clobber(x)) {
12013 break
12014 }
12015 b = x.Block
12016 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
12017 v.reset(OpCopy)
12018 v.AddArg(v0)
12019 v0.AuxInt = off
12020 v0.Aux = sym
12021 v0.AddArg(ptr)
12022 v0.AddArg(mem)
12023 return true
12024 }
12025
12026
12027
12028 for {
12029 x := v.Args[0]
12030 if x.Op != OpAMD64MOVWload {
12031 break
12032 }
12033 off := x.AuxInt
12034 sym := x.Aux
12035 mem := x.Args[1]
12036 ptr := x.Args[0]
12037 if !(x.Uses == 1 && clobber(x)) {
12038 break
12039 }
12040 b = x.Block
12041 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
12042 v.reset(OpCopy)
12043 v.AddArg(v0)
12044 v0.AuxInt = off
12045 v0.Aux = sym
12046 v0.AddArg(ptr)
12047 v0.AddArg(mem)
12048 return true
12049 }
12050
12051
12052
12053 for {
12054 x := v.Args[0]
12055 if x.Op != OpAMD64MOVLload {
12056 break
12057 }
12058 off := x.AuxInt
12059 sym := x.Aux
12060 mem := x.Args[1]
12061 ptr := x.Args[0]
12062 if !(x.Uses == 1 && clobber(x)) {
12063 break
12064 }
12065 b = x.Block
12066 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
12067 v.reset(OpCopy)
12068 v.AddArg(v0)
12069 v0.AuxInt = off
12070 v0.Aux = sym
12071 v0.AddArg(ptr)
12072 v0.AddArg(mem)
12073 return true
12074 }
12075
12076
12077
12078 for {
12079 x := v.Args[0]
12080 if x.Op != OpAMD64MOVQload {
12081 break
12082 }
12083 off := x.AuxInt
12084 sym := x.Aux
12085 mem := x.Args[1]
12086 ptr := x.Args[0]
12087 if !(x.Uses == 1 && clobber(x)) {
12088 break
12089 }
12090 b = x.Block
12091 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
12092 v.reset(OpCopy)
12093 v.AddArg(v0)
12094 v0.AuxInt = off
12095 v0.Aux = sym
12096 v0.AddArg(ptr)
12097 v0.AddArg(mem)
12098 return true
12099 }
12100
12101
12102
12103 for {
12104 x := v.Args[0]
12105 if !(zeroUpper56Bits(x, 3)) {
12106 break
12107 }
12108 v.reset(OpCopy)
12109 v.Type = x.Type
12110 v.AddArg(x)
12111 return true
12112 }
12113
12114
12115
12116 for {
12117 x := v.Args[0]
12118 if x.Op != OpAMD64MOVBloadidx1 {
12119 break
12120 }
12121 off := x.AuxInt
12122 sym := x.Aux
12123 mem := x.Args[2]
12124 ptr := x.Args[0]
12125 idx := x.Args[1]
12126 if !(x.Uses == 1 && clobber(x)) {
12127 break
12128 }
12129 b = x.Block
12130 v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, v.Type)
12131 v.reset(OpCopy)
12132 v.AddArg(v0)
12133 v0.AuxInt = off
12134 v0.Aux = sym
12135 v0.AddArg(ptr)
12136 v0.AddArg(idx)
12137 v0.AddArg(mem)
12138 return true
12139 }
12140
12141
12142
12143 for {
12144 v_0 := v.Args[0]
12145 if v_0.Op != OpAMD64ANDLconst {
12146 break
12147 }
12148 c := v_0.AuxInt
12149 x := v_0.Args[0]
12150 v.reset(OpAMD64ANDLconst)
12151 v.AuxInt = c & 0xff
12152 v.AddArg(x)
12153 return true
12154 }
12155
12156
12157
12158 for {
12159 v_0 := v.Args[0]
12160 if v_0.Op != OpAMD64MOVBQZX {
12161 break
12162 }
12163 x := v_0.Args[0]
12164 v.reset(OpAMD64MOVBQZX)
12165 v.AddArg(x)
12166 return true
12167 }
12168 return false
12169 }
12170 func rewriteValueAMD64_OpAMD64MOVBatomicload_0(v *Value) bool {
12171
12172
12173
12174 for {
12175 off1 := v.AuxInt
12176 sym := v.Aux
12177 mem := v.Args[1]
12178 v_0 := v.Args[0]
12179 if v_0.Op != OpAMD64ADDQconst {
12180 break
12181 }
12182 off2 := v_0.AuxInt
12183 ptr := v_0.Args[0]
12184 if !(is32Bit(off1 + off2)) {
12185 break
12186 }
12187 v.reset(OpAMD64MOVBatomicload)
12188 v.AuxInt = off1 + off2
12189 v.Aux = sym
12190 v.AddArg(ptr)
12191 v.AddArg(mem)
12192 return true
12193 }
12194
12195
12196
12197 for {
12198 off1 := v.AuxInt
12199 sym1 := v.Aux
12200 mem := v.Args[1]
12201 v_0 := v.Args[0]
12202 if v_0.Op != OpAMD64LEAQ {
12203 break
12204 }
12205 off2 := v_0.AuxInt
12206 sym2 := v_0.Aux
12207 ptr := v_0.Args[0]
12208 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
12209 break
12210 }
12211 v.reset(OpAMD64MOVBatomicload)
12212 v.AuxInt = off1 + off2
12213 v.Aux = mergeSym(sym1, sym2)
12214 v.AddArg(ptr)
12215 v.AddArg(mem)
12216 return true
12217 }
12218 return false
12219 }
12220 func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool {
12221
12222
12223
12224 for {
12225 off := v.AuxInt
12226 sym := v.Aux
12227 _ = v.Args[1]
12228 ptr := v.Args[0]
12229 v_1 := v.Args[1]
12230 if v_1.Op != OpAMD64MOVBstore {
12231 break
12232 }
12233 off2 := v_1.AuxInt
12234 sym2 := v_1.Aux
12235 _ = v_1.Args[2]
12236 ptr2 := v_1.Args[0]
12237 x := v_1.Args[1]
12238 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
12239 break
12240 }
12241 v.reset(OpAMD64MOVBQZX)
12242 v.AddArg(x)
12243 return true
12244 }
12245
12246
12247
12248 for {
12249 off1 := v.AuxInt
12250 sym := v.Aux
12251 mem := v.Args[1]
12252 v_0 := v.Args[0]
12253 if v_0.Op != OpAMD64ADDQconst {
12254 break
12255 }
12256 off2 := v_0.AuxInt
12257 ptr := v_0.Args[0]
12258 if !(is32Bit(off1 + off2)) {
12259 break
12260 }
12261 v.reset(OpAMD64MOVBload)
12262 v.AuxInt = off1 + off2
12263 v.Aux = sym
12264 v.AddArg(ptr)
12265 v.AddArg(mem)
12266 return true
12267 }
12268
12269
12270
12271 for {
12272 off1 := v.AuxInt
12273 sym1 := v.Aux
12274 mem := v.Args[1]
12275 v_0 := v.Args[0]
12276 if v_0.Op != OpAMD64LEAQ {
12277 break
12278 }
12279 off2 := v_0.AuxInt
12280 sym2 := v_0.Aux
12281 base := v_0.Args[0]
12282 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
12283 break
12284 }
12285 v.reset(OpAMD64MOVBload)
12286 v.AuxInt = off1 + off2
12287 v.Aux = mergeSym(sym1, sym2)
12288 v.AddArg(base)
12289 v.AddArg(mem)
12290 return true
12291 }
12292
12293
12294
12295 for {
12296 off1 := v.AuxInt
12297 sym1 := v.Aux
12298 mem := v.Args[1]
12299 v_0 := v.Args[0]
12300 if v_0.Op != OpAMD64LEAQ1 {
12301 break
12302 }
12303 off2 := v_0.AuxInt
12304 sym2 := v_0.Aux
12305 idx := v_0.Args[1]
12306 ptr := v_0.Args[0]
12307 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
12308 break
12309 }
12310 v.reset(OpAMD64MOVBloadidx1)
12311 v.AuxInt = off1 + off2
12312 v.Aux = mergeSym(sym1, sym2)
12313 v.AddArg(ptr)
12314 v.AddArg(idx)
12315 v.AddArg(mem)
12316 return true
12317 }
12318
12319
12320
12321 for {
12322 off := v.AuxInt
12323 sym := v.Aux
12324 mem := v.Args[1]
12325 v_0 := v.Args[0]
12326 if v_0.Op != OpAMD64ADDQ {
12327 break
12328 }
12329 idx := v_0.Args[1]
12330 ptr := v_0.Args[0]
12331 if !(ptr.Op != OpSB) {
12332 break
12333 }
12334 v.reset(OpAMD64MOVBloadidx1)
12335 v.AuxInt = off
12336 v.Aux = sym
12337 v.AddArg(ptr)
12338 v.AddArg(idx)
12339 v.AddArg(mem)
12340 return true
12341 }
12342
12343
12344
12345 for {
12346 off1 := v.AuxInt
12347 sym1 := v.Aux
12348 mem := v.Args[1]
12349 v_0 := v.Args[0]
12350 if v_0.Op != OpAMD64LEAL {
12351 break
12352 }
12353 off2 := v_0.AuxInt
12354 sym2 := v_0.Aux
12355 base := v_0.Args[0]
12356 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
12357 break
12358 }
12359 v.reset(OpAMD64MOVBload)
12360 v.AuxInt = off1 + off2
12361 v.Aux = mergeSym(sym1, sym2)
12362 v.AddArg(base)
12363 v.AddArg(mem)
12364 return true
12365 }
12366
12367
12368
12369 for {
12370 off1 := v.AuxInt
12371 sym := v.Aux
12372 mem := v.Args[1]
12373 v_0 := v.Args[0]
12374 if v_0.Op != OpAMD64ADDLconst {
12375 break
12376 }
12377 off2 := v_0.AuxInt
12378 ptr := v_0.Args[0]
12379 if !(is32Bit(off1 + off2)) {
12380 break
12381 }
12382 v.reset(OpAMD64MOVBload)
12383 v.AuxInt = off1 + off2
12384 v.Aux = sym
12385 v.AddArg(ptr)
12386 v.AddArg(mem)
12387 return true
12388 }
12389
12390
12391
12392 for {
12393 off := v.AuxInt
12394 sym := v.Aux
12395 _ = v.Args[1]
12396 v_0 := v.Args[0]
12397 if v_0.Op != OpSB {
12398 break
12399 }
12400 if !(symIsRO(sym)) {
12401 break
12402 }
12403 v.reset(OpAMD64MOVLconst)
12404 v.AuxInt = int64(read8(sym, off))
12405 return true
12406 }
12407 return false
12408 }
12409 func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool {
12410
12411
12412
12413 for {
12414 c := v.AuxInt
12415 sym := v.Aux
12416 mem := v.Args[2]
12417 v_0 := v.Args[0]
12418 if v_0.Op != OpAMD64ADDQconst {
12419 break
12420 }
12421 d := v_0.AuxInt
12422 ptr := v_0.Args[0]
12423 idx := v.Args[1]
12424 if !(is32Bit(c + d)) {
12425 break
12426 }
12427 v.reset(OpAMD64MOVBloadidx1)
12428 v.AuxInt = c + d
12429 v.Aux = sym
12430 v.AddArg(ptr)
12431 v.AddArg(idx)
12432 v.AddArg(mem)
12433 return true
12434 }
12435
12436
12437
12438 for {
12439 c := v.AuxInt
12440 sym := v.Aux
12441 mem := v.Args[2]
12442 idx := v.Args[0]
12443 v_1 := v.Args[1]
12444 if v_1.Op != OpAMD64ADDQconst {
12445 break
12446 }
12447 d := v_1.AuxInt
12448 ptr := v_1.Args[0]
12449 if !(is32Bit(c + d)) {
12450 break
12451 }
12452 v.reset(OpAMD64MOVBloadidx1)
12453 v.AuxInt = c + d
12454 v.Aux = sym
12455 v.AddArg(ptr)
12456 v.AddArg(idx)
12457 v.AddArg(mem)
12458 return true
12459 }
12460
12461
12462
12463 for {
12464 c := v.AuxInt
12465 sym := v.Aux
12466 mem := v.Args[2]
12467 ptr := v.Args[0]
12468 v_1 := v.Args[1]
12469 if v_1.Op != OpAMD64ADDQconst {
12470 break
12471 }
12472 d := v_1.AuxInt
12473 idx := v_1.Args[0]
12474 if !(is32Bit(c + d)) {
12475 break
12476 }
12477 v.reset(OpAMD64MOVBloadidx1)
12478 v.AuxInt = c + d
12479 v.Aux = sym
12480 v.AddArg(ptr)
12481 v.AddArg(idx)
12482 v.AddArg(mem)
12483 return true
12484 }
12485
12486
12487
12488 for {
12489 c := v.AuxInt
12490 sym := v.Aux
12491 mem := v.Args[2]
12492 v_0 := v.Args[0]
12493 if v_0.Op != OpAMD64ADDQconst {
12494 break
12495 }
12496 d := v_0.AuxInt
12497 idx := v_0.Args[0]
12498 ptr := v.Args[1]
12499 if !(is32Bit(c + d)) {
12500 break
12501 }
12502 v.reset(OpAMD64MOVBloadidx1)
12503 v.AuxInt = c + d
12504 v.Aux = sym
12505 v.AddArg(ptr)
12506 v.AddArg(idx)
12507 v.AddArg(mem)
12508 return true
12509 }
12510
12511
12512
12513 for {
12514 i := v.AuxInt
12515 s := v.Aux
12516 mem := v.Args[2]
12517 p := v.Args[0]
12518 v_1 := v.Args[1]
12519 if v_1.Op != OpAMD64MOVQconst {
12520 break
12521 }
12522 c := v_1.AuxInt
12523 if !(is32Bit(i + c)) {
12524 break
12525 }
12526 v.reset(OpAMD64MOVBload)
12527 v.AuxInt = i + c
12528 v.Aux = s
12529 v.AddArg(p)
12530 v.AddArg(mem)
12531 return true
12532 }
12533
12534
12535
12536 for {
12537 i := v.AuxInt
12538 s := v.Aux
12539 mem := v.Args[2]
12540 v_0 := v.Args[0]
12541 if v_0.Op != OpAMD64MOVQconst {
12542 break
12543 }
12544 c := v_0.AuxInt
12545 p := v.Args[1]
12546 if !(is32Bit(i + c)) {
12547 break
12548 }
12549 v.reset(OpAMD64MOVBload)
12550 v.AuxInt = i + c
12551 v.Aux = s
12552 v.AddArg(p)
12553 v.AddArg(mem)
12554 return true
12555 }
12556 return false
12557 }
12558 func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool {
12559
12560
12561
12562 for {
12563 off := v.AuxInt
12564 sym := v.Aux
12565 mem := v.Args[2]
12566 ptr := v.Args[0]
12567 y := v.Args[1]
12568 if y.Op != OpAMD64SETL {
12569 break
12570 }
12571 x := y.Args[0]
12572 if !(y.Uses == 1) {
12573 break
12574 }
12575 v.reset(OpAMD64SETLstore)
12576 v.AuxInt = off
12577 v.Aux = sym
12578 v.AddArg(ptr)
12579 v.AddArg(x)
12580 v.AddArg(mem)
12581 return true
12582 }
12583
12584
12585
12586 for {
12587 off := v.AuxInt
12588 sym := v.Aux
12589 mem := v.Args[2]
12590 ptr := v.Args[0]
12591 y := v.Args[1]
12592 if y.Op != OpAMD64SETLE {
12593 break
12594 }
12595 x := y.Args[0]
12596 if !(y.Uses == 1) {
12597 break
12598 }
12599 v.reset(OpAMD64SETLEstore)
12600 v.AuxInt = off
12601 v.Aux = sym
12602 v.AddArg(ptr)
12603 v.AddArg(x)
12604 v.AddArg(mem)
12605 return true
12606 }
12607
12608
12609
12610 for {
12611 off := v.AuxInt
12612 sym := v.Aux
12613 mem := v.Args[2]
12614 ptr := v.Args[0]
12615 y := v.Args[1]
12616 if y.Op != OpAMD64SETG {
12617 break
12618 }
12619 x := y.Args[0]
12620 if !(y.Uses == 1) {
12621 break
12622 }
12623 v.reset(OpAMD64SETGstore)
12624 v.AuxInt = off
12625 v.Aux = sym
12626 v.AddArg(ptr)
12627 v.AddArg(x)
12628 v.AddArg(mem)
12629 return true
12630 }
12631
12632
12633
12634 for {
12635 off := v.AuxInt
12636 sym := v.Aux
12637 mem := v.Args[2]
12638 ptr := v.Args[0]
12639 y := v.Args[1]
12640 if y.Op != OpAMD64SETGE {
12641 break
12642 }
12643 x := y.Args[0]
12644 if !(y.Uses == 1) {
12645 break
12646 }
12647 v.reset(OpAMD64SETGEstore)
12648 v.AuxInt = off
12649 v.Aux = sym
12650 v.AddArg(ptr)
12651 v.AddArg(x)
12652 v.AddArg(mem)
12653 return true
12654 }
12655
12656
12657
12658 for {
12659 off := v.AuxInt
12660 sym := v.Aux
12661 mem := v.Args[2]
12662 ptr := v.Args[0]
12663 y := v.Args[1]
12664 if y.Op != OpAMD64SETEQ {
12665 break
12666 }
12667 x := y.Args[0]
12668 if !(y.Uses == 1) {
12669 break
12670 }
12671 v.reset(OpAMD64SETEQstore)
12672 v.AuxInt = off
12673 v.Aux = sym
12674 v.AddArg(ptr)
12675 v.AddArg(x)
12676 v.AddArg(mem)
12677 return true
12678 }
12679
12680
12681
12682 for {
12683 off := v.AuxInt
12684 sym := v.Aux
12685 mem := v.Args[2]
12686 ptr := v.Args[0]
12687 y := v.Args[1]
12688 if y.Op != OpAMD64SETNE {
12689 break
12690 }
12691 x := y.Args[0]
12692 if !(y.Uses == 1) {
12693 break
12694 }
12695 v.reset(OpAMD64SETNEstore)
12696 v.AuxInt = off
12697 v.Aux = sym
12698 v.AddArg(ptr)
12699 v.AddArg(x)
12700 v.AddArg(mem)
12701 return true
12702 }
12703
12704
12705
12706 for {
12707 off := v.AuxInt
12708 sym := v.Aux
12709 mem := v.Args[2]
12710 ptr := v.Args[0]
12711 y := v.Args[1]
12712 if y.Op != OpAMD64SETB {
12713 break
12714 }
12715 x := y.Args[0]
12716 if !(y.Uses == 1) {
12717 break
12718 }
12719 v.reset(OpAMD64SETBstore)
12720 v.AuxInt = off
12721 v.Aux = sym
12722 v.AddArg(ptr)
12723 v.AddArg(x)
12724 v.AddArg(mem)
12725 return true
12726 }
12727
12728
12729
12730 for {
12731 off := v.AuxInt
12732 sym := v.Aux
12733 mem := v.Args[2]
12734 ptr := v.Args[0]
12735 y := v.Args[1]
12736 if y.Op != OpAMD64SETBE {
12737 break
12738 }
12739 x := y.Args[0]
12740 if !(y.Uses == 1) {
12741 break
12742 }
12743 v.reset(OpAMD64SETBEstore)
12744 v.AuxInt = off
12745 v.Aux = sym
12746 v.AddArg(ptr)
12747 v.AddArg(x)
12748 v.AddArg(mem)
12749 return true
12750 }
12751
12752
12753
12754 for {
12755 off := v.AuxInt
12756 sym := v.Aux
12757 mem := v.Args[2]
12758 ptr := v.Args[0]
12759 y := v.Args[1]
12760 if y.Op != OpAMD64SETA {
12761 break
12762 }
12763 x := y.Args[0]
12764 if !(y.Uses == 1) {
12765 break
12766 }
12767 v.reset(OpAMD64SETAstore)
12768 v.AuxInt = off
12769 v.Aux = sym
12770 v.AddArg(ptr)
12771 v.AddArg(x)
12772 v.AddArg(mem)
12773 return true
12774 }
12775
12776
12777
12778 for {
12779 off := v.AuxInt
12780 sym := v.Aux
12781 mem := v.Args[2]
12782 ptr := v.Args[0]
12783 y := v.Args[1]
12784 if y.Op != OpAMD64SETAE {
12785 break
12786 }
12787 x := y.Args[0]
12788 if !(y.Uses == 1) {
12789 break
12790 }
12791 v.reset(OpAMD64SETAEstore)
12792 v.AuxInt = off
12793 v.Aux = sym
12794 v.AddArg(ptr)
12795 v.AddArg(x)
12796 v.AddArg(mem)
12797 return true
12798 }
12799 return false
12800 }
12801 func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool {
12802 b := v.Block
12803
12804
12805
12806 for {
12807 off := v.AuxInt
12808 sym := v.Aux
12809 mem := v.Args[2]
12810 ptr := v.Args[0]
12811 v_1 := v.Args[1]
12812 if v_1.Op != OpAMD64MOVBQSX {
12813 break
12814 }
12815 x := v_1.Args[0]
12816 v.reset(OpAMD64MOVBstore)
12817 v.AuxInt = off
12818 v.Aux = sym
12819 v.AddArg(ptr)
12820 v.AddArg(x)
12821 v.AddArg(mem)
12822 return true
12823 }
12824
12825
12826
12827 for {
12828 off := v.AuxInt
12829 sym := v.Aux
12830 mem := v.Args[2]
12831 ptr := v.Args[0]
12832 v_1 := v.Args[1]
12833 if v_1.Op != OpAMD64MOVBQZX {
12834 break
12835 }
12836 x := v_1.Args[0]
12837 v.reset(OpAMD64MOVBstore)
12838 v.AuxInt = off
12839 v.Aux = sym
12840 v.AddArg(ptr)
12841 v.AddArg(x)
12842 v.AddArg(mem)
12843 return true
12844 }
12845
12846
12847
12848 for {
12849 off1 := v.AuxInt
12850 sym := v.Aux
12851 mem := v.Args[2]
12852 v_0 := v.Args[0]
12853 if v_0.Op != OpAMD64ADDQconst {
12854 break
12855 }
12856 off2 := v_0.AuxInt
12857 ptr := v_0.Args[0]
12858 val := v.Args[1]
12859 if !(is32Bit(off1 + off2)) {
12860 break
12861 }
12862 v.reset(OpAMD64MOVBstore)
12863 v.AuxInt = off1 + off2
12864 v.Aux = sym
12865 v.AddArg(ptr)
12866 v.AddArg(val)
12867 v.AddArg(mem)
12868 return true
12869 }
12870
12871
12872
12873 for {
12874 off := v.AuxInt
12875 sym := v.Aux
12876 mem := v.Args[2]
12877 ptr := v.Args[0]
12878 v_1 := v.Args[1]
12879 if v_1.Op != OpAMD64MOVLconst {
12880 break
12881 }
12882 c := v_1.AuxInt
12883 if !(validOff(off)) {
12884 break
12885 }
12886 v.reset(OpAMD64MOVBstoreconst)
12887 v.AuxInt = makeValAndOff(int64(int8(c)), off)
12888 v.Aux = sym
12889 v.AddArg(ptr)
12890 v.AddArg(mem)
12891 return true
12892 }
12893
12894
12895
12896 for {
12897 off := v.AuxInt
12898 sym := v.Aux
12899 mem := v.Args[2]
12900 ptr := v.Args[0]
12901 v_1 := v.Args[1]
12902 if v_1.Op != OpAMD64MOVQconst {
12903 break
12904 }
12905 c := v_1.AuxInt
12906 if !(validOff(off)) {
12907 break
12908 }
12909 v.reset(OpAMD64MOVBstoreconst)
12910 v.AuxInt = makeValAndOff(int64(int8(c)), off)
12911 v.Aux = sym
12912 v.AddArg(ptr)
12913 v.AddArg(mem)
12914 return true
12915 }
12916
12917
12918
12919 for {
12920 off1 := v.AuxInt
12921 sym1 := v.Aux
12922 mem := v.Args[2]
12923 v_0 := v.Args[0]
12924 if v_0.Op != OpAMD64LEAQ {
12925 break
12926 }
12927 off2 := v_0.AuxInt
12928 sym2 := v_0.Aux
12929 base := v_0.Args[0]
12930 val := v.Args[1]
12931 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
12932 break
12933 }
12934 v.reset(OpAMD64MOVBstore)
12935 v.AuxInt = off1 + off2
12936 v.Aux = mergeSym(sym1, sym2)
12937 v.AddArg(base)
12938 v.AddArg(val)
12939 v.AddArg(mem)
12940 return true
12941 }
12942
12943
12944
12945 for {
12946 off1 := v.AuxInt
12947 sym1 := v.Aux
12948 mem := v.Args[2]
12949 v_0 := v.Args[0]
12950 if v_0.Op != OpAMD64LEAQ1 {
12951 break
12952 }
12953 off2 := v_0.AuxInt
12954 sym2 := v_0.Aux
12955 idx := v_0.Args[1]
12956 ptr := v_0.Args[0]
12957 val := v.Args[1]
12958 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
12959 break
12960 }
12961 v.reset(OpAMD64MOVBstoreidx1)
12962 v.AuxInt = off1 + off2
12963 v.Aux = mergeSym(sym1, sym2)
12964 v.AddArg(ptr)
12965 v.AddArg(idx)
12966 v.AddArg(val)
12967 v.AddArg(mem)
12968 return true
12969 }
12970
12971
12972
12973 for {
12974 off := v.AuxInt
12975 sym := v.Aux
12976 mem := v.Args[2]
12977 v_0 := v.Args[0]
12978 if v_0.Op != OpAMD64ADDQ {
12979 break
12980 }
12981 idx := v_0.Args[1]
12982 ptr := v_0.Args[0]
12983 val := v.Args[1]
12984 if !(ptr.Op != OpSB) {
12985 break
12986 }
12987 v.reset(OpAMD64MOVBstoreidx1)
12988 v.AuxInt = off
12989 v.Aux = sym
12990 v.AddArg(ptr)
12991 v.AddArg(idx)
12992 v.AddArg(val)
12993 v.AddArg(mem)
12994 return true
12995 }
12996
12997
12998
12999 for {
13000 i := v.AuxInt
13001 s := v.Aux
13002 _ = v.Args[2]
13003 p := v.Args[0]
13004 w := v.Args[1]
13005 x0 := v.Args[2]
13006 if x0.Op != OpAMD64MOVBstore {
13007 break
13008 }
13009 if x0.AuxInt != i-1 {
13010 break
13011 }
13012 if x0.Aux != s {
13013 break
13014 }
13015 mem := x0.Args[2]
13016 if p != x0.Args[0] {
13017 break
13018 }
13019 x0_1 := x0.Args[1]
13020 if x0_1.Op != OpAMD64SHRWconst {
13021 break
13022 }
13023 if x0_1.AuxInt != 8 {
13024 break
13025 }
13026 if w != x0_1.Args[0] {
13027 break
13028 }
13029 if !(x0.Uses == 1 && clobber(x0)) {
13030 break
13031 }
13032 v.reset(OpAMD64MOVWstore)
13033 v.AuxInt = i - 1
13034 v.Aux = s
13035 v.AddArg(p)
13036 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type)
13037 v0.AuxInt = 8
13038 v0.AddArg(w)
13039 v.AddArg(v0)
13040 v.AddArg(mem)
13041 return true
13042 }
13043
13044
13045
13046 for {
13047 i := v.AuxInt
13048 s := v.Aux
13049 _ = v.Args[2]
13050 p := v.Args[0]
13051 w := v.Args[1]
13052 x2 := v.Args[2]
13053 if x2.Op != OpAMD64MOVBstore {
13054 break
13055 }
13056 if x2.AuxInt != i-1 {
13057 break
13058 }
13059 if x2.Aux != s {
13060 break
13061 }
13062 _ = x2.Args[2]
13063 if p != x2.Args[0] {
13064 break
13065 }
13066 x2_1 := x2.Args[1]
13067 if x2_1.Op != OpAMD64SHRLconst {
13068 break
13069 }
13070 if x2_1.AuxInt != 8 {
13071 break
13072 }
13073 if w != x2_1.Args[0] {
13074 break
13075 }
13076 x1 := x2.Args[2]
13077 if x1.Op != OpAMD64MOVBstore {
13078 break
13079 }
13080 if x1.AuxInt != i-2 {
13081 break
13082 }
13083 if x1.Aux != s {
13084 break
13085 }
13086 _ = x1.Args[2]
13087 if p != x1.Args[0] {
13088 break
13089 }
13090 x1_1 := x1.Args[1]
13091 if x1_1.Op != OpAMD64SHRLconst {
13092 break
13093 }
13094 if x1_1.AuxInt != 16 {
13095 break
13096 }
13097 if w != x1_1.Args[0] {
13098 break
13099 }
13100 x0 := x1.Args[2]
13101 if x0.Op != OpAMD64MOVBstore {
13102 break
13103 }
13104 if x0.AuxInt != i-3 {
13105 break
13106 }
13107 if x0.Aux != s {
13108 break
13109 }
13110 mem := x0.Args[2]
13111 if p != x0.Args[0] {
13112 break
13113 }
13114 x0_1 := x0.Args[1]
13115 if x0_1.Op != OpAMD64SHRLconst {
13116 break
13117 }
13118 if x0_1.AuxInt != 24 {
13119 break
13120 }
13121 if w != x0_1.Args[0] {
13122 break
13123 }
13124 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) {
13125 break
13126 }
13127 v.reset(OpAMD64MOVLstore)
13128 v.AuxInt = i - 3
13129 v.Aux = s
13130 v.AddArg(p)
13131 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type)
13132 v0.AddArg(w)
13133 v.AddArg(v0)
13134 v.AddArg(mem)
13135 return true
13136 }
13137 return false
13138 }
13139 func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool {
13140 b := v.Block
13141 typ := &b.Func.Config.Types
13142
13143
13144
13145 for {
13146 i := v.AuxInt
13147 s := v.Aux
13148 _ = v.Args[2]
13149 p := v.Args[0]
13150 w := v.Args[1]
13151 x6 := v.Args[2]
13152 if x6.Op != OpAMD64MOVBstore {
13153 break
13154 }
13155 if x6.AuxInt != i-1 {
13156 break
13157 }
13158 if x6.Aux != s {
13159 break
13160 }
13161 _ = x6.Args[2]
13162 if p != x6.Args[0] {
13163 break
13164 }
13165 x6_1 := x6.Args[1]
13166 if x6_1.Op != OpAMD64SHRQconst {
13167 break
13168 }
13169 if x6_1.AuxInt != 8 {
13170 break
13171 }
13172 if w != x6_1.Args[0] {
13173 break
13174 }
13175 x5 := x6.Args[2]
13176 if x5.Op != OpAMD64MOVBstore {
13177 break
13178 }
13179 if x5.AuxInt != i-2 {
13180 break
13181 }
13182 if x5.Aux != s {
13183 break
13184 }
13185 _ = x5.Args[2]
13186 if p != x5.Args[0] {
13187 break
13188 }
13189 x5_1 := x5.Args[1]
13190 if x5_1.Op != OpAMD64SHRQconst {
13191 break
13192 }
13193 if x5_1.AuxInt != 16 {
13194 break
13195 }
13196 if w != x5_1.Args[0] {
13197 break
13198 }
13199 x4 := x5.Args[2]
13200 if x4.Op != OpAMD64MOVBstore {
13201 break
13202 }
13203 if x4.AuxInt != i-3 {
13204 break
13205 }
13206 if x4.Aux != s {
13207 break
13208 }
13209 _ = x4.Args[2]
13210 if p != x4.Args[0] {
13211 break
13212 }
13213 x4_1 := x4.Args[1]
13214 if x4_1.Op != OpAMD64SHRQconst {
13215 break
13216 }
13217 if x4_1.AuxInt != 24 {
13218 break
13219 }
13220 if w != x4_1.Args[0] {
13221 break
13222 }
13223 x3 := x4.Args[2]
13224 if x3.Op != OpAMD64MOVBstore {
13225 break
13226 }
13227 if x3.AuxInt != i-4 {
13228 break
13229 }
13230 if x3.Aux != s {
13231 break
13232 }
13233 _ = x3.Args[2]
13234 if p != x3.Args[0] {
13235 break
13236 }
13237 x3_1 := x3.Args[1]
13238 if x3_1.Op != OpAMD64SHRQconst {
13239 break
13240 }
13241 if x3_1.AuxInt != 32 {
13242 break
13243 }
13244 if w != x3_1.Args[0] {
13245 break
13246 }
13247 x2 := x3.Args[2]
13248 if x2.Op != OpAMD64MOVBstore {
13249 break
13250 }
13251 if x2.AuxInt != i-5 {
13252 break
13253 }
13254 if x2.Aux != s {
13255 break
13256 }
13257 _ = x2.Args[2]
13258 if p != x2.Args[0] {
13259 break
13260 }
13261 x2_1 := x2.Args[1]
13262 if x2_1.Op != OpAMD64SHRQconst {
13263 break
13264 }
13265 if x2_1.AuxInt != 40 {
13266 break
13267 }
13268 if w != x2_1.Args[0] {
13269 break
13270 }
13271 x1 := x2.Args[2]
13272 if x1.Op != OpAMD64MOVBstore {
13273 break
13274 }
13275 if x1.AuxInt != i-6 {
13276 break
13277 }
13278 if x1.Aux != s {
13279 break
13280 }
13281 _ = x1.Args[2]
13282 if p != x1.Args[0] {
13283 break
13284 }
13285 x1_1 := x1.Args[1]
13286 if x1_1.Op != OpAMD64SHRQconst {
13287 break
13288 }
13289 if x1_1.AuxInt != 48 {
13290 break
13291 }
13292 if w != x1_1.Args[0] {
13293 break
13294 }
13295 x0 := x1.Args[2]
13296 if x0.Op != OpAMD64MOVBstore {
13297 break
13298 }
13299 if x0.AuxInt != i-7 {
13300 break
13301 }
13302 if x0.Aux != s {
13303 break
13304 }
13305 mem := x0.Args[2]
13306 if p != x0.Args[0] {
13307 break
13308 }
13309 x0_1 := x0.Args[1]
13310 if x0_1.Op != OpAMD64SHRQconst {
13311 break
13312 }
13313 if x0_1.AuxInt != 56 {
13314 break
13315 }
13316 if w != x0_1.Args[0] {
13317 break
13318 }
13319 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) {
13320 break
13321 }
13322 v.reset(OpAMD64MOVQstore)
13323 v.AuxInt = i - 7
13324 v.Aux = s
13325 v.AddArg(p)
13326 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type)
13327 v0.AddArg(w)
13328 v.AddArg(v0)
13329 v.AddArg(mem)
13330 return true
13331 }
13332
13333
13334
13335 for {
13336 i := v.AuxInt
13337 s := v.Aux
13338 _ = v.Args[2]
13339 p := v.Args[0]
13340 v_1 := v.Args[1]
13341 if v_1.Op != OpAMD64SHRWconst {
13342 break
13343 }
13344 if v_1.AuxInt != 8 {
13345 break
13346 }
13347 w := v_1.Args[0]
13348 x := v.Args[2]
13349 if x.Op != OpAMD64MOVBstore {
13350 break
13351 }
13352 if x.AuxInt != i-1 {
13353 break
13354 }
13355 if x.Aux != s {
13356 break
13357 }
13358 mem := x.Args[2]
13359 if p != x.Args[0] {
13360 break
13361 }
13362 if w != x.Args[1] {
13363 break
13364 }
13365 if !(x.Uses == 1 && clobber(x)) {
13366 break
13367 }
13368 v.reset(OpAMD64MOVWstore)
13369 v.AuxInt = i - 1
13370 v.Aux = s
13371 v.AddArg(p)
13372 v.AddArg(w)
13373 v.AddArg(mem)
13374 return true
13375 }
13376
13377
13378
13379 for {
13380 i := v.AuxInt
13381 s := v.Aux
13382 _ = v.Args[2]
13383 p := v.Args[0]
13384 v_1 := v.Args[1]
13385 if v_1.Op != OpAMD64SHRLconst {
13386 break
13387 }
13388 if v_1.AuxInt != 8 {
13389 break
13390 }
13391 w := v_1.Args[0]
13392 x := v.Args[2]
13393 if x.Op != OpAMD64MOVBstore {
13394 break
13395 }
13396 if x.AuxInt != i-1 {
13397 break
13398 }
13399 if x.Aux != s {
13400 break
13401 }
13402 mem := x.Args[2]
13403 if p != x.Args[0] {
13404 break
13405 }
13406 if w != x.Args[1] {
13407 break
13408 }
13409 if !(x.Uses == 1 && clobber(x)) {
13410 break
13411 }
13412 v.reset(OpAMD64MOVWstore)
13413 v.AuxInt = i - 1
13414 v.Aux = s
13415 v.AddArg(p)
13416 v.AddArg(w)
13417 v.AddArg(mem)
13418 return true
13419 }
13420
13421
13422
13423 for {
13424 i := v.AuxInt
13425 s := v.Aux
13426 _ = v.Args[2]
13427 p := v.Args[0]
13428 v_1 := v.Args[1]
13429 if v_1.Op != OpAMD64SHRQconst {
13430 break
13431 }
13432 if v_1.AuxInt != 8 {
13433 break
13434 }
13435 w := v_1.Args[0]
13436 x := v.Args[2]
13437 if x.Op != OpAMD64MOVBstore {
13438 break
13439 }
13440 if x.AuxInt != i-1 {
13441 break
13442 }
13443 if x.Aux != s {
13444 break
13445 }
13446 mem := x.Args[2]
13447 if p != x.Args[0] {
13448 break
13449 }
13450 if w != x.Args[1] {
13451 break
13452 }
13453 if !(x.Uses == 1 && clobber(x)) {
13454 break
13455 }
13456 v.reset(OpAMD64MOVWstore)
13457 v.AuxInt = i - 1
13458 v.Aux = s
13459 v.AddArg(p)
13460 v.AddArg(w)
13461 v.AddArg(mem)
13462 return true
13463 }
13464
13465
13466
13467 for {
13468 i := v.AuxInt
13469 s := v.Aux
13470 _ = v.Args[2]
13471 p := v.Args[0]
13472 w := v.Args[1]
13473 x := v.Args[2]
13474 if x.Op != OpAMD64MOVBstore {
13475 break
13476 }
13477 if x.AuxInt != i+1 {
13478 break
13479 }
13480 if x.Aux != s {
13481 break
13482 }
13483 mem := x.Args[2]
13484 if p != x.Args[0] {
13485 break
13486 }
13487 x_1 := x.Args[1]
13488 if x_1.Op != OpAMD64SHRWconst {
13489 break
13490 }
13491 if x_1.AuxInt != 8 {
13492 break
13493 }
13494 if w != x_1.Args[0] {
13495 break
13496 }
13497 if !(x.Uses == 1 && clobber(x)) {
13498 break
13499 }
13500 v.reset(OpAMD64MOVWstore)
13501 v.AuxInt = i
13502 v.Aux = s
13503 v.AddArg(p)
13504 v.AddArg(w)
13505 v.AddArg(mem)
13506 return true
13507 }
13508
13509
13510
13511 for {
13512 i := v.AuxInt
13513 s := v.Aux
13514 _ = v.Args[2]
13515 p := v.Args[0]
13516 w := v.Args[1]
13517 x := v.Args[2]
13518 if x.Op != OpAMD64MOVBstore {
13519 break
13520 }
13521 if x.AuxInt != i+1 {
13522 break
13523 }
13524 if x.Aux != s {
13525 break
13526 }
13527 mem := x.Args[2]
13528 if p != x.Args[0] {
13529 break
13530 }
13531 x_1 := x.Args[1]
13532 if x_1.Op != OpAMD64SHRLconst {
13533 break
13534 }
13535 if x_1.AuxInt != 8 {
13536 break
13537 }
13538 if w != x_1.Args[0] {
13539 break
13540 }
13541 if !(x.Uses == 1 && clobber(x)) {
13542 break
13543 }
13544 v.reset(OpAMD64MOVWstore)
13545 v.AuxInt = i
13546 v.Aux = s
13547 v.AddArg(p)
13548 v.AddArg(w)
13549 v.AddArg(mem)
13550 return true
13551 }
13552
13553
13554
13555 for {
13556 i := v.AuxInt
13557 s := v.Aux
13558 _ = v.Args[2]
13559 p := v.Args[0]
13560 w := v.Args[1]
13561 x := v.Args[2]
13562 if x.Op != OpAMD64MOVBstore {
13563 break
13564 }
13565 if x.AuxInt != i+1 {
13566 break
13567 }
13568 if x.Aux != s {
13569 break
13570 }
13571 mem := x.Args[2]
13572 if p != x.Args[0] {
13573 break
13574 }
13575 x_1 := x.Args[1]
13576 if x_1.Op != OpAMD64SHRQconst {
13577 break
13578 }
13579 if x_1.AuxInt != 8 {
13580 break
13581 }
13582 if w != x_1.Args[0] {
13583 break
13584 }
13585 if !(x.Uses == 1 && clobber(x)) {
13586 break
13587 }
13588 v.reset(OpAMD64MOVWstore)
13589 v.AuxInt = i
13590 v.Aux = s
13591 v.AddArg(p)
13592 v.AddArg(w)
13593 v.AddArg(mem)
13594 return true
13595 }
13596
13597
13598
13599 for {
13600 i := v.AuxInt
13601 s := v.Aux
13602 _ = v.Args[2]
13603 p := v.Args[0]
13604 v_1 := v.Args[1]
13605 if v_1.Op != OpAMD64SHRLconst {
13606 break
13607 }
13608 j := v_1.AuxInt
13609 w := v_1.Args[0]
13610 x := v.Args[2]
13611 if x.Op != OpAMD64MOVBstore {
13612 break
13613 }
13614 if x.AuxInt != i-1 {
13615 break
13616 }
13617 if x.Aux != s {
13618 break
13619 }
13620 mem := x.Args[2]
13621 if p != x.Args[0] {
13622 break
13623 }
13624 w0 := x.Args[1]
13625 if w0.Op != OpAMD64SHRLconst {
13626 break
13627 }
13628 if w0.AuxInt != j-8 {
13629 break
13630 }
13631 if w != w0.Args[0] {
13632 break
13633 }
13634 if !(x.Uses == 1 && clobber(x)) {
13635 break
13636 }
13637 v.reset(OpAMD64MOVWstore)
13638 v.AuxInt = i - 1
13639 v.Aux = s
13640 v.AddArg(p)
13641 v.AddArg(w0)
13642 v.AddArg(mem)
13643 return true
13644 }
13645
13646
13647
13648 for {
13649 i := v.AuxInt
13650 s := v.Aux
13651 _ = v.Args[2]
13652 p := v.Args[0]
13653 v_1 := v.Args[1]
13654 if v_1.Op != OpAMD64SHRQconst {
13655 break
13656 }
13657 j := v_1.AuxInt
13658 w := v_1.Args[0]
13659 x := v.Args[2]
13660 if x.Op != OpAMD64MOVBstore {
13661 break
13662 }
13663 if x.AuxInt != i-1 {
13664 break
13665 }
13666 if x.Aux != s {
13667 break
13668 }
13669 mem := x.Args[2]
13670 if p != x.Args[0] {
13671 break
13672 }
13673 w0 := x.Args[1]
13674 if w0.Op != OpAMD64SHRQconst {
13675 break
13676 }
13677 if w0.AuxInt != j-8 {
13678 break
13679 }
13680 if w != w0.Args[0] {
13681 break
13682 }
13683 if !(x.Uses == 1 && clobber(x)) {
13684 break
13685 }
13686 v.reset(OpAMD64MOVWstore)
13687 v.AuxInt = i - 1
13688 v.Aux = s
13689 v.AddArg(p)
13690 v.AddArg(w0)
13691 v.AddArg(mem)
13692 return true
13693 }
13694
13695
13696
13697 for {
13698 i := v.AuxInt
13699 s := v.Aux
13700 _ = v.Args[2]
13701 p := v.Args[0]
13702 x1 := v.Args[1]
13703 if x1.Op != OpAMD64MOVBload {
13704 break
13705 }
13706 j := x1.AuxInt
13707 s2 := x1.Aux
13708 mem := x1.Args[1]
13709 p2 := x1.Args[0]
13710 mem2 := v.Args[2]
13711 if mem2.Op != OpAMD64MOVBstore {
13712 break
13713 }
13714 if mem2.AuxInt != i-1 {
13715 break
13716 }
13717 if mem2.Aux != s {
13718 break
13719 }
13720 _ = mem2.Args[2]
13721 if p != mem2.Args[0] {
13722 break
13723 }
13724 x2 := mem2.Args[1]
13725 if x2.Op != OpAMD64MOVBload {
13726 break
13727 }
13728 if x2.AuxInt != j-1 {
13729 break
13730 }
13731 if x2.Aux != s2 {
13732 break
13733 }
13734 _ = x2.Args[1]
13735 if p2 != x2.Args[0] {
13736 break
13737 }
13738 if mem != x2.Args[1] {
13739 break
13740 }
13741 if mem != mem2.Args[2] {
13742 break
13743 }
13744 if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) {
13745 break
13746 }
13747 v.reset(OpAMD64MOVWstore)
13748 v.AuxInt = i - 1
13749 v.Aux = s
13750 v.AddArg(p)
13751 v0 := b.NewValue0(x2.Pos, OpAMD64MOVWload, typ.UInt16)
13752 v0.AuxInt = j - 1
13753 v0.Aux = s2
13754 v0.AddArg(p2)
13755 v0.AddArg(mem)
13756 v.AddArg(v0)
13757 v.AddArg(mem)
13758 return true
13759 }
13760 return false
13761 }
13762 func rewriteValueAMD64_OpAMD64MOVBstore_30(v *Value) bool {
13763
13764
13765
13766 for {
13767 off1 := v.AuxInt
13768 sym1 := v.Aux
13769 mem := v.Args[2]
13770 v_0 := v.Args[0]
13771 if v_0.Op != OpAMD64LEAL {
13772 break
13773 }
13774 off2 := v_0.AuxInt
13775 sym2 := v_0.Aux
13776 base := v_0.Args[0]
13777 val := v.Args[1]
13778 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
13779 break
13780 }
13781 v.reset(OpAMD64MOVBstore)
13782 v.AuxInt = off1 + off2
13783 v.Aux = mergeSym(sym1, sym2)
13784 v.AddArg(base)
13785 v.AddArg(val)
13786 v.AddArg(mem)
13787 return true
13788 }
13789
13790
13791
13792 for {
13793 off1 := v.AuxInt
13794 sym := v.Aux
13795 mem := v.Args[2]
13796 v_0 := v.Args[0]
13797 if v_0.Op != OpAMD64ADDLconst {
13798 break
13799 }
13800 off2 := v_0.AuxInt
13801 ptr := v_0.Args[0]
13802 val := v.Args[1]
13803 if !(is32Bit(off1 + off2)) {
13804 break
13805 }
13806 v.reset(OpAMD64MOVBstore)
13807 v.AuxInt = off1 + off2
13808 v.Aux = sym
13809 v.AddArg(ptr)
13810 v.AddArg(val)
13811 v.AddArg(mem)
13812 return true
13813 }
13814 return false
13815 }
13816 func rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v *Value) bool {
13817
13818
13819
13820 for {
13821 sc := v.AuxInt
13822 s := v.Aux
13823 mem := v.Args[1]
13824 v_0 := v.Args[0]
13825 if v_0.Op != OpAMD64ADDQconst {
13826 break
13827 }
13828 off := v_0.AuxInt
13829 ptr := v_0.Args[0]
13830 if !(ValAndOff(sc).canAdd(off)) {
13831 break
13832 }
13833 v.reset(OpAMD64MOVBstoreconst)
13834 v.AuxInt = ValAndOff(sc).add(off)
13835 v.Aux = s
13836 v.AddArg(ptr)
13837 v.AddArg(mem)
13838 return true
13839 }
13840
13841
13842
13843 for {
13844 sc := v.AuxInt
13845 sym1 := v.Aux
13846 mem := v.Args[1]
13847 v_0 := v.Args[0]
13848 if v_0.Op != OpAMD64LEAQ {
13849 break
13850 }
13851 off := v_0.AuxInt
13852 sym2 := v_0.Aux
13853 ptr := v_0.Args[0]
13854 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
13855 break
13856 }
13857 v.reset(OpAMD64MOVBstoreconst)
13858 v.AuxInt = ValAndOff(sc).add(off)
13859 v.Aux = mergeSym(sym1, sym2)
13860 v.AddArg(ptr)
13861 v.AddArg(mem)
13862 return true
13863 }
13864
13865
13866
13867 for {
13868 x := v.AuxInt
13869 sym1 := v.Aux
13870 mem := v.Args[1]
13871 v_0 := v.Args[0]
13872 if v_0.Op != OpAMD64LEAQ1 {
13873 break
13874 }
13875 off := v_0.AuxInt
13876 sym2 := v_0.Aux
13877 idx := v_0.Args[1]
13878 ptr := v_0.Args[0]
13879 if !(canMergeSym(sym1, sym2)) {
13880 break
13881 }
13882 v.reset(OpAMD64MOVBstoreconstidx1)
13883 v.AuxInt = ValAndOff(x).add(off)
13884 v.Aux = mergeSym(sym1, sym2)
13885 v.AddArg(ptr)
13886 v.AddArg(idx)
13887 v.AddArg(mem)
13888 return true
13889 }
13890
13891
13892
13893 for {
13894 x := v.AuxInt
13895 sym := v.Aux
13896 mem := v.Args[1]
13897 v_0 := v.Args[0]
13898 if v_0.Op != OpAMD64ADDQ {
13899 break
13900 }
13901 idx := v_0.Args[1]
13902 ptr := v_0.Args[0]
13903 v.reset(OpAMD64MOVBstoreconstidx1)
13904 v.AuxInt = x
13905 v.Aux = sym
13906 v.AddArg(ptr)
13907 v.AddArg(idx)
13908 v.AddArg(mem)
13909 return true
13910 }
13911
13912
13913
13914 for {
13915 c := v.AuxInt
13916 s := v.Aux
13917 _ = v.Args[1]
13918 p := v.Args[0]
13919 x := v.Args[1]
13920 if x.Op != OpAMD64MOVBstoreconst {
13921 break
13922 }
13923 a := x.AuxInt
13924 if x.Aux != s {
13925 break
13926 }
13927 mem := x.Args[1]
13928 if p != x.Args[0] {
13929 break
13930 }
13931 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
13932 break
13933 }
13934 v.reset(OpAMD64MOVWstoreconst)
13935 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
13936 v.Aux = s
13937 v.AddArg(p)
13938 v.AddArg(mem)
13939 return true
13940 }
13941
13942
13943
13944 for {
13945 a := v.AuxInt
13946 s := v.Aux
13947 _ = v.Args[1]
13948 p := v.Args[0]
13949 x := v.Args[1]
13950 if x.Op != OpAMD64MOVBstoreconst {
13951 break
13952 }
13953 c := x.AuxInt
13954 if x.Aux != s {
13955 break
13956 }
13957 mem := x.Args[1]
13958 if p != x.Args[0] {
13959 break
13960 }
13961 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
13962 break
13963 }
13964 v.reset(OpAMD64MOVWstoreconst)
13965 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
13966 v.Aux = s
13967 v.AddArg(p)
13968 v.AddArg(mem)
13969 return true
13970 }
13971
13972
13973
13974 for {
13975 sc := v.AuxInt
13976 sym1 := v.Aux
13977 mem := v.Args[1]
13978 v_0 := v.Args[0]
13979 if v_0.Op != OpAMD64LEAL {
13980 break
13981 }
13982 off := v_0.AuxInt
13983 sym2 := v_0.Aux
13984 ptr := v_0.Args[0]
13985 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
13986 break
13987 }
13988 v.reset(OpAMD64MOVBstoreconst)
13989 v.AuxInt = ValAndOff(sc).add(off)
13990 v.Aux = mergeSym(sym1, sym2)
13991 v.AddArg(ptr)
13992 v.AddArg(mem)
13993 return true
13994 }
13995
13996
13997
13998 for {
13999 sc := v.AuxInt
14000 s := v.Aux
14001 mem := v.Args[1]
14002 v_0 := v.Args[0]
14003 if v_0.Op != OpAMD64ADDLconst {
14004 break
14005 }
14006 off := v_0.AuxInt
14007 ptr := v_0.Args[0]
14008 if !(ValAndOff(sc).canAdd(off)) {
14009 break
14010 }
14011 v.reset(OpAMD64MOVBstoreconst)
14012 v.AuxInt = ValAndOff(sc).add(off)
14013 v.Aux = s
14014 v.AddArg(ptr)
14015 v.AddArg(mem)
14016 return true
14017 }
14018 return false
14019 }
14020 func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v *Value) bool {
14021
14022
14023
14024 for {
14025 x := v.AuxInt
14026 sym := v.Aux
14027 mem := v.Args[2]
14028 v_0 := v.Args[0]
14029 if v_0.Op != OpAMD64ADDQconst {
14030 break
14031 }
14032 c := v_0.AuxInt
14033 ptr := v_0.Args[0]
14034 idx := v.Args[1]
14035 if !(ValAndOff(x).canAdd(c)) {
14036 break
14037 }
14038 v.reset(OpAMD64MOVBstoreconstidx1)
14039 v.AuxInt = ValAndOff(x).add(c)
14040 v.Aux = sym
14041 v.AddArg(ptr)
14042 v.AddArg(idx)
14043 v.AddArg(mem)
14044 return true
14045 }
14046
14047
14048
14049 for {
14050 x := v.AuxInt
14051 sym := v.Aux
14052 mem := v.Args[2]
14053 ptr := v.Args[0]
14054 v_1 := v.Args[1]
14055 if v_1.Op != OpAMD64ADDQconst {
14056 break
14057 }
14058 c := v_1.AuxInt
14059 idx := v_1.Args[0]
14060 if !(ValAndOff(x).canAdd(c)) {
14061 break
14062 }
14063 v.reset(OpAMD64MOVBstoreconstidx1)
14064 v.AuxInt = ValAndOff(x).add(c)
14065 v.Aux = sym
14066 v.AddArg(ptr)
14067 v.AddArg(idx)
14068 v.AddArg(mem)
14069 return true
14070 }
14071
14072
14073
14074 for {
14075 c := v.AuxInt
14076 s := v.Aux
14077 _ = v.Args[2]
14078 p := v.Args[0]
14079 i := v.Args[1]
14080 x := v.Args[2]
14081 if x.Op != OpAMD64MOVBstoreconstidx1 {
14082 break
14083 }
14084 a := x.AuxInt
14085 if x.Aux != s {
14086 break
14087 }
14088 mem := x.Args[2]
14089 if p != x.Args[0] {
14090 break
14091 }
14092 if i != x.Args[1] {
14093 break
14094 }
14095 if !(x.Uses == 1 && ValAndOff(a).Off()+1 == ValAndOff(c).Off() && clobber(x)) {
14096 break
14097 }
14098 v.reset(OpAMD64MOVWstoreconstidx1)
14099 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xff|ValAndOff(c).Val()<<8, ValAndOff(a).Off())
14100 v.Aux = s
14101 v.AddArg(p)
14102 v.AddArg(i)
14103 v.AddArg(mem)
14104 return true
14105 }
14106 return false
14107 }
14108 func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool {
14109 b := v.Block
14110
14111
14112
14113 for {
14114 c := v.AuxInt
14115 sym := v.Aux
14116 mem := v.Args[3]
14117 v_0 := v.Args[0]
14118 if v_0.Op != OpAMD64ADDQconst {
14119 break
14120 }
14121 d := v_0.AuxInt
14122 ptr := v_0.Args[0]
14123 idx := v.Args[1]
14124 val := v.Args[2]
14125 if !(is32Bit(c + d)) {
14126 break
14127 }
14128 v.reset(OpAMD64MOVBstoreidx1)
14129 v.AuxInt = c + d
14130 v.Aux = sym
14131 v.AddArg(ptr)
14132 v.AddArg(idx)
14133 v.AddArg(val)
14134 v.AddArg(mem)
14135 return true
14136 }
14137
14138
14139
14140 for {
14141 c := v.AuxInt
14142 sym := v.Aux
14143 mem := v.Args[3]
14144 ptr := v.Args[0]
14145 v_1 := v.Args[1]
14146 if v_1.Op != OpAMD64ADDQconst {
14147 break
14148 }
14149 d := v_1.AuxInt
14150 idx := v_1.Args[0]
14151 val := v.Args[2]
14152 if !(is32Bit(c + d)) {
14153 break
14154 }
14155 v.reset(OpAMD64MOVBstoreidx1)
14156 v.AuxInt = c + d
14157 v.Aux = sym
14158 v.AddArg(ptr)
14159 v.AddArg(idx)
14160 v.AddArg(val)
14161 v.AddArg(mem)
14162 return true
14163 }
14164
14165
14166
14167 for {
14168 i := v.AuxInt
14169 s := v.Aux
14170 _ = v.Args[3]
14171 p := v.Args[0]
14172 idx := v.Args[1]
14173 w := v.Args[2]
14174 x0 := v.Args[3]
14175 if x0.Op != OpAMD64MOVBstoreidx1 {
14176 break
14177 }
14178 if x0.AuxInt != i-1 {
14179 break
14180 }
14181 if x0.Aux != s {
14182 break
14183 }
14184 mem := x0.Args[3]
14185 if p != x0.Args[0] {
14186 break
14187 }
14188 if idx != x0.Args[1] {
14189 break
14190 }
14191 x0_2 := x0.Args[2]
14192 if x0_2.Op != OpAMD64SHRWconst {
14193 break
14194 }
14195 if x0_2.AuxInt != 8 {
14196 break
14197 }
14198 if w != x0_2.Args[0] {
14199 break
14200 }
14201 if !(x0.Uses == 1 && clobber(x0)) {
14202 break
14203 }
14204 v.reset(OpAMD64MOVWstoreidx1)
14205 v.AuxInt = i - 1
14206 v.Aux = s
14207 v.AddArg(p)
14208 v.AddArg(idx)
14209 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type)
14210 v0.AuxInt = 8
14211 v0.AddArg(w)
14212 v.AddArg(v0)
14213 v.AddArg(mem)
14214 return true
14215 }
14216
14217
14218
14219 for {
14220 i := v.AuxInt
14221 s := v.Aux
14222 _ = v.Args[3]
14223 p := v.Args[0]
14224 idx := v.Args[1]
14225 w := v.Args[2]
14226 x2 := v.Args[3]
14227 if x2.Op != OpAMD64MOVBstoreidx1 {
14228 break
14229 }
14230 if x2.AuxInt != i-1 {
14231 break
14232 }
14233 if x2.Aux != s {
14234 break
14235 }
14236 _ = x2.Args[3]
14237 if p != x2.Args[0] {
14238 break
14239 }
14240 if idx != x2.Args[1] {
14241 break
14242 }
14243 x2_2 := x2.Args[2]
14244 if x2_2.Op != OpAMD64SHRLconst {
14245 break
14246 }
14247 if x2_2.AuxInt != 8 {
14248 break
14249 }
14250 if w != x2_2.Args[0] {
14251 break
14252 }
14253 x1 := x2.Args[3]
14254 if x1.Op != OpAMD64MOVBstoreidx1 {
14255 break
14256 }
14257 if x1.AuxInt != i-2 {
14258 break
14259 }
14260 if x1.Aux != s {
14261 break
14262 }
14263 _ = x1.Args[3]
14264 if p != x1.Args[0] {
14265 break
14266 }
14267 if idx != x1.Args[1] {
14268 break
14269 }
14270 x1_2 := x1.Args[2]
14271 if x1_2.Op != OpAMD64SHRLconst {
14272 break
14273 }
14274 if x1_2.AuxInt != 16 {
14275 break
14276 }
14277 if w != x1_2.Args[0] {
14278 break
14279 }
14280 x0 := x1.Args[3]
14281 if x0.Op != OpAMD64MOVBstoreidx1 {
14282 break
14283 }
14284 if x0.AuxInt != i-3 {
14285 break
14286 }
14287 if x0.Aux != s {
14288 break
14289 }
14290 mem := x0.Args[3]
14291 if p != x0.Args[0] {
14292 break
14293 }
14294 if idx != x0.Args[1] {
14295 break
14296 }
14297 x0_2 := x0.Args[2]
14298 if x0_2.Op != OpAMD64SHRLconst {
14299 break
14300 }
14301 if x0_2.AuxInt != 24 {
14302 break
14303 }
14304 if w != x0_2.Args[0] {
14305 break
14306 }
14307 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) {
14308 break
14309 }
14310 v.reset(OpAMD64MOVLstoreidx1)
14311 v.AuxInt = i - 3
14312 v.Aux = s
14313 v.AddArg(p)
14314 v.AddArg(idx)
14315 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type)
14316 v0.AddArg(w)
14317 v.AddArg(v0)
14318 v.AddArg(mem)
14319 return true
14320 }
14321
14322
14323
14324 for {
14325 i := v.AuxInt
14326 s := v.Aux
14327 _ = v.Args[3]
14328 p := v.Args[0]
14329 idx := v.Args[1]
14330 w := v.Args[2]
14331 x6 := v.Args[3]
14332 if x6.Op != OpAMD64MOVBstoreidx1 {
14333 break
14334 }
14335 if x6.AuxInt != i-1 {
14336 break
14337 }
14338 if x6.Aux != s {
14339 break
14340 }
14341 _ = x6.Args[3]
14342 if p != x6.Args[0] {
14343 break
14344 }
14345 if idx != x6.Args[1] {
14346 break
14347 }
14348 x6_2 := x6.Args[2]
14349 if x6_2.Op != OpAMD64SHRQconst {
14350 break
14351 }
14352 if x6_2.AuxInt != 8 {
14353 break
14354 }
14355 if w != x6_2.Args[0] {
14356 break
14357 }
14358 x5 := x6.Args[3]
14359 if x5.Op != OpAMD64MOVBstoreidx1 {
14360 break
14361 }
14362 if x5.AuxInt != i-2 {
14363 break
14364 }
14365 if x5.Aux != s {
14366 break
14367 }
14368 _ = x5.Args[3]
14369 if p != x5.Args[0] {
14370 break
14371 }
14372 if idx != x5.Args[1] {
14373 break
14374 }
14375 x5_2 := x5.Args[2]
14376 if x5_2.Op != OpAMD64SHRQconst {
14377 break
14378 }
14379 if x5_2.AuxInt != 16 {
14380 break
14381 }
14382 if w != x5_2.Args[0] {
14383 break
14384 }
14385 x4 := x5.Args[3]
14386 if x4.Op != OpAMD64MOVBstoreidx1 {
14387 break
14388 }
14389 if x4.AuxInt != i-3 {
14390 break
14391 }
14392 if x4.Aux != s {
14393 break
14394 }
14395 _ = x4.Args[3]
14396 if p != x4.Args[0] {
14397 break
14398 }
14399 if idx != x4.Args[1] {
14400 break
14401 }
14402 x4_2 := x4.Args[2]
14403 if x4_2.Op != OpAMD64SHRQconst {
14404 break
14405 }
14406 if x4_2.AuxInt != 24 {
14407 break
14408 }
14409 if w != x4_2.Args[0] {
14410 break
14411 }
14412 x3 := x4.Args[3]
14413 if x3.Op != OpAMD64MOVBstoreidx1 {
14414 break
14415 }
14416 if x3.AuxInt != i-4 {
14417 break
14418 }
14419 if x3.Aux != s {
14420 break
14421 }
14422 _ = x3.Args[3]
14423 if p != x3.Args[0] {
14424 break
14425 }
14426 if idx != x3.Args[1] {
14427 break
14428 }
14429 x3_2 := x3.Args[2]
14430 if x3_2.Op != OpAMD64SHRQconst {
14431 break
14432 }
14433 if x3_2.AuxInt != 32 {
14434 break
14435 }
14436 if w != x3_2.Args[0] {
14437 break
14438 }
14439 x2 := x3.Args[3]
14440 if x2.Op != OpAMD64MOVBstoreidx1 {
14441 break
14442 }
14443 if x2.AuxInt != i-5 {
14444 break
14445 }
14446 if x2.Aux != s {
14447 break
14448 }
14449 _ = x2.Args[3]
14450 if p != x2.Args[0] {
14451 break
14452 }
14453 if idx != x2.Args[1] {
14454 break
14455 }
14456 x2_2 := x2.Args[2]
14457 if x2_2.Op != OpAMD64SHRQconst {
14458 break
14459 }
14460 if x2_2.AuxInt != 40 {
14461 break
14462 }
14463 if w != x2_2.Args[0] {
14464 break
14465 }
14466 x1 := x2.Args[3]
14467 if x1.Op != OpAMD64MOVBstoreidx1 {
14468 break
14469 }
14470 if x1.AuxInt != i-6 {
14471 break
14472 }
14473 if x1.Aux != s {
14474 break
14475 }
14476 _ = x1.Args[3]
14477 if p != x1.Args[0] {
14478 break
14479 }
14480 if idx != x1.Args[1] {
14481 break
14482 }
14483 x1_2 := x1.Args[2]
14484 if x1_2.Op != OpAMD64SHRQconst {
14485 break
14486 }
14487 if x1_2.AuxInt != 48 {
14488 break
14489 }
14490 if w != x1_2.Args[0] {
14491 break
14492 }
14493 x0 := x1.Args[3]
14494 if x0.Op != OpAMD64MOVBstoreidx1 {
14495 break
14496 }
14497 if x0.AuxInt != i-7 {
14498 break
14499 }
14500 if x0.Aux != s {
14501 break
14502 }
14503 mem := x0.Args[3]
14504 if p != x0.Args[0] {
14505 break
14506 }
14507 if idx != x0.Args[1] {
14508 break
14509 }
14510 x0_2 := x0.Args[2]
14511 if x0_2.Op != OpAMD64SHRQconst {
14512 break
14513 }
14514 if x0_2.AuxInt != 56 {
14515 break
14516 }
14517 if w != x0_2.Args[0] {
14518 break
14519 }
14520 if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) {
14521 break
14522 }
14523 v.reset(OpAMD64MOVQstoreidx1)
14524 v.AuxInt = i - 7
14525 v.Aux = s
14526 v.AddArg(p)
14527 v.AddArg(idx)
14528 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type)
14529 v0.AddArg(w)
14530 v.AddArg(v0)
14531 v.AddArg(mem)
14532 return true
14533 }
14534
14535
14536
14537 for {
14538 i := v.AuxInt
14539 s := v.Aux
14540 _ = v.Args[3]
14541 p := v.Args[0]
14542 idx := v.Args[1]
14543 v_2 := v.Args[2]
14544 if v_2.Op != OpAMD64SHRWconst {
14545 break
14546 }
14547 if v_2.AuxInt != 8 {
14548 break
14549 }
14550 w := v_2.Args[0]
14551 x := v.Args[3]
14552 if x.Op != OpAMD64MOVBstoreidx1 {
14553 break
14554 }
14555 if x.AuxInt != i-1 {
14556 break
14557 }
14558 if x.Aux != s {
14559 break
14560 }
14561 mem := x.Args[3]
14562 if p != x.Args[0] {
14563 break
14564 }
14565 if idx != x.Args[1] {
14566 break
14567 }
14568 if w != x.Args[2] {
14569 break
14570 }
14571 if !(x.Uses == 1 && clobber(x)) {
14572 break
14573 }
14574 v.reset(OpAMD64MOVWstoreidx1)
14575 v.AuxInt = i - 1
14576 v.Aux = s
14577 v.AddArg(p)
14578 v.AddArg(idx)
14579 v.AddArg(w)
14580 v.AddArg(mem)
14581 return true
14582 }
14583
14584
14585
14586 for {
14587 i := v.AuxInt
14588 s := v.Aux
14589 _ = v.Args[3]
14590 p := v.Args[0]
14591 idx := v.Args[1]
14592 v_2 := v.Args[2]
14593 if v_2.Op != OpAMD64SHRLconst {
14594 break
14595 }
14596 if v_2.AuxInt != 8 {
14597 break
14598 }
14599 w := v_2.Args[0]
14600 x := v.Args[3]
14601 if x.Op != OpAMD64MOVBstoreidx1 {
14602 break
14603 }
14604 if x.AuxInt != i-1 {
14605 break
14606 }
14607 if x.Aux != s {
14608 break
14609 }
14610 mem := x.Args[3]
14611 if p != x.Args[0] {
14612 break
14613 }
14614 if idx != x.Args[1] {
14615 break
14616 }
14617 if w != x.Args[2] {
14618 break
14619 }
14620 if !(x.Uses == 1 && clobber(x)) {
14621 break
14622 }
14623 v.reset(OpAMD64MOVWstoreidx1)
14624 v.AuxInt = i - 1
14625 v.Aux = s
14626 v.AddArg(p)
14627 v.AddArg(idx)
14628 v.AddArg(w)
14629 v.AddArg(mem)
14630 return true
14631 }
14632
14633
14634
14635 for {
14636 i := v.AuxInt
14637 s := v.Aux
14638 _ = v.Args[3]
14639 p := v.Args[0]
14640 idx := v.Args[1]
14641 v_2 := v.Args[2]
14642 if v_2.Op != OpAMD64SHRQconst {
14643 break
14644 }
14645 if v_2.AuxInt != 8 {
14646 break
14647 }
14648 w := v_2.Args[0]
14649 x := v.Args[3]
14650 if x.Op != OpAMD64MOVBstoreidx1 {
14651 break
14652 }
14653 if x.AuxInt != i-1 {
14654 break
14655 }
14656 if x.Aux != s {
14657 break
14658 }
14659 mem := x.Args[3]
14660 if p != x.Args[0] {
14661 break
14662 }
14663 if idx != x.Args[1] {
14664 break
14665 }
14666 if w != x.Args[2] {
14667 break
14668 }
14669 if !(x.Uses == 1 && clobber(x)) {
14670 break
14671 }
14672 v.reset(OpAMD64MOVWstoreidx1)
14673 v.AuxInt = i - 1
14674 v.Aux = s
14675 v.AddArg(p)
14676 v.AddArg(idx)
14677 v.AddArg(w)
14678 v.AddArg(mem)
14679 return true
14680 }
14681
14682
14683
14684 for {
14685 i := v.AuxInt
14686 s := v.Aux
14687 _ = v.Args[3]
14688 p := v.Args[0]
14689 idx := v.Args[1]
14690 v_2 := v.Args[2]
14691 if v_2.Op != OpAMD64SHRLconst {
14692 break
14693 }
14694 j := v_2.AuxInt
14695 w := v_2.Args[0]
14696 x := v.Args[3]
14697 if x.Op != OpAMD64MOVBstoreidx1 {
14698 break
14699 }
14700 if x.AuxInt != i-1 {
14701 break
14702 }
14703 if x.Aux != s {
14704 break
14705 }
14706 mem := x.Args[3]
14707 if p != x.Args[0] {
14708 break
14709 }
14710 if idx != x.Args[1] {
14711 break
14712 }
14713 w0 := x.Args[2]
14714 if w0.Op != OpAMD64SHRLconst {
14715 break
14716 }
14717 if w0.AuxInt != j-8 {
14718 break
14719 }
14720 if w != w0.Args[0] {
14721 break
14722 }
14723 if !(x.Uses == 1 && clobber(x)) {
14724 break
14725 }
14726 v.reset(OpAMD64MOVWstoreidx1)
14727 v.AuxInt = i - 1
14728 v.Aux = s
14729 v.AddArg(p)
14730 v.AddArg(idx)
14731 v.AddArg(w0)
14732 v.AddArg(mem)
14733 return true
14734 }
14735
14736
14737
14738 for {
14739 i := v.AuxInt
14740 s := v.Aux
14741 _ = v.Args[3]
14742 p := v.Args[0]
14743 idx := v.Args[1]
14744 v_2 := v.Args[2]
14745 if v_2.Op != OpAMD64SHRQconst {
14746 break
14747 }
14748 j := v_2.AuxInt
14749 w := v_2.Args[0]
14750 x := v.Args[3]
14751 if x.Op != OpAMD64MOVBstoreidx1 {
14752 break
14753 }
14754 if x.AuxInt != i-1 {
14755 break
14756 }
14757 if x.Aux != s {
14758 break
14759 }
14760 mem := x.Args[3]
14761 if p != x.Args[0] {
14762 break
14763 }
14764 if idx != x.Args[1] {
14765 break
14766 }
14767 w0 := x.Args[2]
14768 if w0.Op != OpAMD64SHRQconst {
14769 break
14770 }
14771 if w0.AuxInt != j-8 {
14772 break
14773 }
14774 if w != w0.Args[0] {
14775 break
14776 }
14777 if !(x.Uses == 1 && clobber(x)) {
14778 break
14779 }
14780 v.reset(OpAMD64MOVWstoreidx1)
14781 v.AuxInt = i - 1
14782 v.Aux = s
14783 v.AddArg(p)
14784 v.AddArg(idx)
14785 v.AddArg(w0)
14786 v.AddArg(mem)
14787 return true
14788 }
14789 return false
14790 }
14791 func rewriteValueAMD64_OpAMD64MOVBstoreidx1_10(v *Value) bool {
14792
14793
14794
14795 for {
14796 i := v.AuxInt
14797 s := v.Aux
14798 mem := v.Args[3]
14799 p := v.Args[0]
14800 v_1 := v.Args[1]
14801 if v_1.Op != OpAMD64MOVQconst {
14802 break
14803 }
14804 c := v_1.AuxInt
14805 w := v.Args[2]
14806 if !(is32Bit(i + c)) {
14807 break
14808 }
14809 v.reset(OpAMD64MOVBstore)
14810 v.AuxInt = i + c
14811 v.Aux = s
14812 v.AddArg(p)
14813 v.AddArg(w)
14814 v.AddArg(mem)
14815 return true
14816 }
14817 return false
14818 }
14819 func rewriteValueAMD64_OpAMD64MOVLQSX_0(v *Value) bool {
14820 b := v.Block
14821
14822
14823
14824 for {
14825 x := v.Args[0]
14826 if x.Op != OpAMD64MOVLload {
14827 break
14828 }
14829 off := x.AuxInt
14830 sym := x.Aux
14831 mem := x.Args[1]
14832 ptr := x.Args[0]
14833 if !(x.Uses == 1 && clobber(x)) {
14834 break
14835 }
14836 b = x.Block
14837 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
14838 v.reset(OpCopy)
14839 v.AddArg(v0)
14840 v0.AuxInt = off
14841 v0.Aux = sym
14842 v0.AddArg(ptr)
14843 v0.AddArg(mem)
14844 return true
14845 }
14846
14847
14848
14849 for {
14850 x := v.Args[0]
14851 if x.Op != OpAMD64MOVQload {
14852 break
14853 }
14854 off := x.AuxInt
14855 sym := x.Aux
14856 mem := x.Args[1]
14857 ptr := x.Args[0]
14858 if !(x.Uses == 1 && clobber(x)) {
14859 break
14860 }
14861 b = x.Block
14862 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
14863 v.reset(OpCopy)
14864 v.AddArg(v0)
14865 v0.AuxInt = off
14866 v0.Aux = sym
14867 v0.AddArg(ptr)
14868 v0.AddArg(mem)
14869 return true
14870 }
14871
14872
14873
14874 for {
14875 v_0 := v.Args[0]
14876 if v_0.Op != OpAMD64ANDLconst {
14877 break
14878 }
14879 c := v_0.AuxInt
14880 x := v_0.Args[0]
14881 if !(c&0x80000000 == 0) {
14882 break
14883 }
14884 v.reset(OpAMD64ANDLconst)
14885 v.AuxInt = c & 0x7fffffff
14886 v.AddArg(x)
14887 return true
14888 }
14889
14890
14891
14892 for {
14893 v_0 := v.Args[0]
14894 if v_0.Op != OpAMD64MOVLQSX {
14895 break
14896 }
14897 x := v_0.Args[0]
14898 v.reset(OpAMD64MOVLQSX)
14899 v.AddArg(x)
14900 return true
14901 }
14902
14903
14904
14905 for {
14906 v_0 := v.Args[0]
14907 if v_0.Op != OpAMD64MOVWQSX {
14908 break
14909 }
14910 x := v_0.Args[0]
14911 v.reset(OpAMD64MOVWQSX)
14912 v.AddArg(x)
14913 return true
14914 }
14915
14916
14917
14918 for {
14919 v_0 := v.Args[0]
14920 if v_0.Op != OpAMD64MOVBQSX {
14921 break
14922 }
14923 x := v_0.Args[0]
14924 v.reset(OpAMD64MOVBQSX)
14925 v.AddArg(x)
14926 return true
14927 }
14928 return false
14929 }
14930 func rewriteValueAMD64_OpAMD64MOVLQSXload_0(v *Value) bool {
14931
14932
14933
14934 for {
14935 off := v.AuxInt
14936 sym := v.Aux
14937 _ = v.Args[1]
14938 ptr := v.Args[0]
14939 v_1 := v.Args[1]
14940 if v_1.Op != OpAMD64MOVLstore {
14941 break
14942 }
14943 off2 := v_1.AuxInt
14944 sym2 := v_1.Aux
14945 _ = v_1.Args[2]
14946 ptr2 := v_1.Args[0]
14947 x := v_1.Args[1]
14948 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
14949 break
14950 }
14951 v.reset(OpAMD64MOVLQSX)
14952 v.AddArg(x)
14953 return true
14954 }
14955
14956
14957
14958 for {
14959 off1 := v.AuxInt
14960 sym1 := v.Aux
14961 mem := v.Args[1]
14962 v_0 := v.Args[0]
14963 if v_0.Op != OpAMD64LEAQ {
14964 break
14965 }
14966 off2 := v_0.AuxInt
14967 sym2 := v_0.Aux
14968 base := v_0.Args[0]
14969 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
14970 break
14971 }
14972 v.reset(OpAMD64MOVLQSXload)
14973 v.AuxInt = off1 + off2
14974 v.Aux = mergeSym(sym1, sym2)
14975 v.AddArg(base)
14976 v.AddArg(mem)
14977 return true
14978 }
14979 return false
14980 }
14981 func rewriteValueAMD64_OpAMD64MOVLQZX_0(v *Value) bool {
14982 b := v.Block
14983
14984
14985
14986 for {
14987 x := v.Args[0]
14988 if x.Op != OpAMD64MOVLload {
14989 break
14990 }
14991 off := x.AuxInt
14992 sym := x.Aux
14993 mem := x.Args[1]
14994 ptr := x.Args[0]
14995 if !(x.Uses == 1 && clobber(x)) {
14996 break
14997 }
14998 b = x.Block
14999 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
15000 v.reset(OpCopy)
15001 v.AddArg(v0)
15002 v0.AuxInt = off
15003 v0.Aux = sym
15004 v0.AddArg(ptr)
15005 v0.AddArg(mem)
15006 return true
15007 }
15008
15009
15010
15011 for {
15012 x := v.Args[0]
15013 if x.Op != OpAMD64MOVQload {
15014 break
15015 }
15016 off := x.AuxInt
15017 sym := x.Aux
15018 mem := x.Args[1]
15019 ptr := x.Args[0]
15020 if !(x.Uses == 1 && clobber(x)) {
15021 break
15022 }
15023 b = x.Block
15024 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
15025 v.reset(OpCopy)
15026 v.AddArg(v0)
15027 v0.AuxInt = off
15028 v0.Aux = sym
15029 v0.AddArg(ptr)
15030 v0.AddArg(mem)
15031 return true
15032 }
15033
15034
15035
15036 for {
15037 x := v.Args[0]
15038 if !(zeroUpper32Bits(x, 3)) {
15039 break
15040 }
15041 v.reset(OpCopy)
15042 v.Type = x.Type
15043 v.AddArg(x)
15044 return true
15045 }
15046
15047
15048
15049 for {
15050 x := v.Args[0]
15051 if x.Op != OpAMD64MOVLloadidx1 {
15052 break
15053 }
15054 off := x.AuxInt
15055 sym := x.Aux
15056 mem := x.Args[2]
15057 ptr := x.Args[0]
15058 idx := x.Args[1]
15059 if !(x.Uses == 1 && clobber(x)) {
15060 break
15061 }
15062 b = x.Block
15063 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, v.Type)
15064 v.reset(OpCopy)
15065 v.AddArg(v0)
15066 v0.AuxInt = off
15067 v0.Aux = sym
15068 v0.AddArg(ptr)
15069 v0.AddArg(idx)
15070 v0.AddArg(mem)
15071 return true
15072 }
15073
15074
15075
15076 for {
15077 x := v.Args[0]
15078 if x.Op != OpAMD64MOVLloadidx4 {
15079 break
15080 }
15081 off := x.AuxInt
15082 sym := x.Aux
15083 mem := x.Args[2]
15084 ptr := x.Args[0]
15085 idx := x.Args[1]
15086 if !(x.Uses == 1 && clobber(x)) {
15087 break
15088 }
15089 b = x.Block
15090 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, v.Type)
15091 v.reset(OpCopy)
15092 v.AddArg(v0)
15093 v0.AuxInt = off
15094 v0.Aux = sym
15095 v0.AddArg(ptr)
15096 v0.AddArg(idx)
15097 v0.AddArg(mem)
15098 return true
15099 }
15100
15101
15102
15103 for {
15104 v_0 := v.Args[0]
15105 if v_0.Op != OpAMD64ANDLconst {
15106 break
15107 }
15108 c := v_0.AuxInt
15109 x := v_0.Args[0]
15110 v.reset(OpAMD64ANDLconst)
15111 v.AuxInt = c
15112 v.AddArg(x)
15113 return true
15114 }
15115
15116
15117
15118 for {
15119 v_0 := v.Args[0]
15120 if v_0.Op != OpAMD64MOVLQZX {
15121 break
15122 }
15123 x := v_0.Args[0]
15124 v.reset(OpAMD64MOVLQZX)
15125 v.AddArg(x)
15126 return true
15127 }
15128
15129
15130
15131 for {
15132 v_0 := v.Args[0]
15133 if v_0.Op != OpAMD64MOVWQZX {
15134 break
15135 }
15136 x := v_0.Args[0]
15137 v.reset(OpAMD64MOVWQZX)
15138 v.AddArg(x)
15139 return true
15140 }
15141
15142
15143
15144 for {
15145 v_0 := v.Args[0]
15146 if v_0.Op != OpAMD64MOVBQZX {
15147 break
15148 }
15149 x := v_0.Args[0]
15150 v.reset(OpAMD64MOVBQZX)
15151 v.AddArg(x)
15152 return true
15153 }
15154 return false
15155 }
15156 func rewriteValueAMD64_OpAMD64MOVLatomicload_0(v *Value) bool {
15157
15158
15159
15160 for {
15161 off1 := v.AuxInt
15162 sym := v.Aux
15163 mem := v.Args[1]
15164 v_0 := v.Args[0]
15165 if v_0.Op != OpAMD64ADDQconst {
15166 break
15167 }
15168 off2 := v_0.AuxInt
15169 ptr := v_0.Args[0]
15170 if !(is32Bit(off1 + off2)) {
15171 break
15172 }
15173 v.reset(OpAMD64MOVLatomicload)
15174 v.AuxInt = off1 + off2
15175 v.Aux = sym
15176 v.AddArg(ptr)
15177 v.AddArg(mem)
15178 return true
15179 }
15180
15181
15182
15183 for {
15184 off1 := v.AuxInt
15185 sym1 := v.Aux
15186 mem := v.Args[1]
15187 v_0 := v.Args[0]
15188 if v_0.Op != OpAMD64LEAQ {
15189 break
15190 }
15191 off2 := v_0.AuxInt
15192 sym2 := v_0.Aux
15193 ptr := v_0.Args[0]
15194 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
15195 break
15196 }
15197 v.reset(OpAMD64MOVLatomicload)
15198 v.AuxInt = off1 + off2
15199 v.Aux = mergeSym(sym1, sym2)
15200 v.AddArg(ptr)
15201 v.AddArg(mem)
15202 return true
15203 }
15204 return false
15205 }
15206 func rewriteValueAMD64_OpAMD64MOVLf2i_0(v *Value) bool {
15207 b := v.Block
15208
15209
15210
15211 for {
15212 t := v.Type
15213 v_0 := v.Args[0]
15214 if v_0.Op != OpArg {
15215 break
15216 }
15217 u := v_0.Type
15218 off := v_0.AuxInt
15219 sym := v_0.Aux
15220 if !(t.Size() == u.Size()) {
15221 break
15222 }
15223 b = b.Func.Entry
15224 v0 := b.NewValue0(v.Pos, OpArg, t)
15225 v.reset(OpCopy)
15226 v.AddArg(v0)
15227 v0.AuxInt = off
15228 v0.Aux = sym
15229 return true
15230 }
15231 return false
15232 }
15233 func rewriteValueAMD64_OpAMD64MOVLi2f_0(v *Value) bool {
15234 b := v.Block
15235
15236
15237
15238 for {
15239 t := v.Type
15240 v_0 := v.Args[0]
15241 if v_0.Op != OpArg {
15242 break
15243 }
15244 u := v_0.Type
15245 off := v_0.AuxInt
15246 sym := v_0.Aux
15247 if !(t.Size() == u.Size()) {
15248 break
15249 }
15250 b = b.Func.Entry
15251 v0 := b.NewValue0(v.Pos, OpArg, t)
15252 v.reset(OpCopy)
15253 v.AddArg(v0)
15254 v0.AuxInt = off
15255 v0.Aux = sym
15256 return true
15257 }
15258 return false
15259 }
15260 func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool {
15261
15262
15263
15264 for {
15265 off := v.AuxInt
15266 sym := v.Aux
15267 _ = v.Args[1]
15268 ptr := v.Args[0]
15269 v_1 := v.Args[1]
15270 if v_1.Op != OpAMD64MOVLstore {
15271 break
15272 }
15273 off2 := v_1.AuxInt
15274 sym2 := v_1.Aux
15275 _ = v_1.Args[2]
15276 ptr2 := v_1.Args[0]
15277 x := v_1.Args[1]
15278 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
15279 break
15280 }
15281 v.reset(OpAMD64MOVLQZX)
15282 v.AddArg(x)
15283 return true
15284 }
15285
15286
15287
15288 for {
15289 off1 := v.AuxInt
15290 sym := v.Aux
15291 mem := v.Args[1]
15292 v_0 := v.Args[0]
15293 if v_0.Op != OpAMD64ADDQconst {
15294 break
15295 }
15296 off2 := v_0.AuxInt
15297 ptr := v_0.Args[0]
15298 if !(is32Bit(off1 + off2)) {
15299 break
15300 }
15301 v.reset(OpAMD64MOVLload)
15302 v.AuxInt = off1 + off2
15303 v.Aux = sym
15304 v.AddArg(ptr)
15305 v.AddArg(mem)
15306 return true
15307 }
15308
15309
15310
15311 for {
15312 off1 := v.AuxInt
15313 sym1 := v.Aux
15314 mem := v.Args[1]
15315 v_0 := v.Args[0]
15316 if v_0.Op != OpAMD64LEAQ {
15317 break
15318 }
15319 off2 := v_0.AuxInt
15320 sym2 := v_0.Aux
15321 base := v_0.Args[0]
15322 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
15323 break
15324 }
15325 v.reset(OpAMD64MOVLload)
15326 v.AuxInt = off1 + off2
15327 v.Aux = mergeSym(sym1, sym2)
15328 v.AddArg(base)
15329 v.AddArg(mem)
15330 return true
15331 }
15332
15333
15334
15335 for {
15336 off1 := v.AuxInt
15337 sym1 := v.Aux
15338 mem := v.Args[1]
15339 v_0 := v.Args[0]
15340 if v_0.Op != OpAMD64LEAQ1 {
15341 break
15342 }
15343 off2 := v_0.AuxInt
15344 sym2 := v_0.Aux
15345 idx := v_0.Args[1]
15346 ptr := v_0.Args[0]
15347 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
15348 break
15349 }
15350 v.reset(OpAMD64MOVLloadidx1)
15351 v.AuxInt = off1 + off2
15352 v.Aux = mergeSym(sym1, sym2)
15353 v.AddArg(ptr)
15354 v.AddArg(idx)
15355 v.AddArg(mem)
15356 return true
15357 }
15358
15359
15360
15361 for {
15362 off1 := v.AuxInt
15363 sym1 := v.Aux
15364 mem := v.Args[1]
15365 v_0 := v.Args[0]
15366 if v_0.Op != OpAMD64LEAQ4 {
15367 break
15368 }
15369 off2 := v_0.AuxInt
15370 sym2 := v_0.Aux
15371 idx := v_0.Args[1]
15372 ptr := v_0.Args[0]
15373 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
15374 break
15375 }
15376 v.reset(OpAMD64MOVLloadidx4)
15377 v.AuxInt = off1 + off2
15378 v.Aux = mergeSym(sym1, sym2)
15379 v.AddArg(ptr)
15380 v.AddArg(idx)
15381 v.AddArg(mem)
15382 return true
15383 }
15384
15385
15386
15387 for {
15388 off1 := v.AuxInt
15389 sym1 := v.Aux
15390 mem := v.Args[1]
15391 v_0 := v.Args[0]
15392 if v_0.Op != OpAMD64LEAQ8 {
15393 break
15394 }
15395 off2 := v_0.AuxInt
15396 sym2 := v_0.Aux
15397 idx := v_0.Args[1]
15398 ptr := v_0.Args[0]
15399 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
15400 break
15401 }
15402 v.reset(OpAMD64MOVLloadidx8)
15403 v.AuxInt = off1 + off2
15404 v.Aux = mergeSym(sym1, sym2)
15405 v.AddArg(ptr)
15406 v.AddArg(idx)
15407 v.AddArg(mem)
15408 return true
15409 }
15410
15411
15412
15413 for {
15414 off := v.AuxInt
15415 sym := v.Aux
15416 mem := v.Args[1]
15417 v_0 := v.Args[0]
15418 if v_0.Op != OpAMD64ADDQ {
15419 break
15420 }
15421 idx := v_0.Args[1]
15422 ptr := v_0.Args[0]
15423 if !(ptr.Op != OpSB) {
15424 break
15425 }
15426 v.reset(OpAMD64MOVLloadidx1)
15427 v.AuxInt = off
15428 v.Aux = sym
15429 v.AddArg(ptr)
15430 v.AddArg(idx)
15431 v.AddArg(mem)
15432 return true
15433 }
15434
15435
15436
15437 for {
15438 off1 := v.AuxInt
15439 sym1 := v.Aux
15440 mem := v.Args[1]
15441 v_0 := v.Args[0]
15442 if v_0.Op != OpAMD64LEAL {
15443 break
15444 }
15445 off2 := v_0.AuxInt
15446 sym2 := v_0.Aux
15447 base := v_0.Args[0]
15448 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
15449 break
15450 }
15451 v.reset(OpAMD64MOVLload)
15452 v.AuxInt = off1 + off2
15453 v.Aux = mergeSym(sym1, sym2)
15454 v.AddArg(base)
15455 v.AddArg(mem)
15456 return true
15457 }
15458
15459
15460
15461 for {
15462 off1 := v.AuxInt
15463 sym := v.Aux
15464 mem := v.Args[1]
15465 v_0 := v.Args[0]
15466 if v_0.Op != OpAMD64ADDLconst {
15467 break
15468 }
15469 off2 := v_0.AuxInt
15470 ptr := v_0.Args[0]
15471 if !(is32Bit(off1 + off2)) {
15472 break
15473 }
15474 v.reset(OpAMD64MOVLload)
15475 v.AuxInt = off1 + off2
15476 v.Aux = sym
15477 v.AddArg(ptr)
15478 v.AddArg(mem)
15479 return true
15480 }
15481
15482
15483
15484 for {
15485 off := v.AuxInt
15486 sym := v.Aux
15487 _ = v.Args[1]
15488 ptr := v.Args[0]
15489 v_1 := v.Args[1]
15490 if v_1.Op != OpAMD64MOVSSstore {
15491 break
15492 }
15493 if v_1.AuxInt != off {
15494 break
15495 }
15496 if v_1.Aux != sym {
15497 break
15498 }
15499 _ = v_1.Args[2]
15500 if ptr != v_1.Args[0] {
15501 break
15502 }
15503 val := v_1.Args[1]
15504 v.reset(OpAMD64MOVLf2i)
15505 v.AddArg(val)
15506 return true
15507 }
15508 return false
15509 }
15510 func rewriteValueAMD64_OpAMD64MOVLload_10(v *Value) bool {
15511 b := v.Block
15512 config := b.Func.Config
15513
15514
15515
15516 for {
15517 off := v.AuxInt
15518 sym := v.Aux
15519 _ = v.Args[1]
15520 v_0 := v.Args[0]
15521 if v_0.Op != OpSB {
15522 break
15523 }
15524 if !(symIsRO(sym)) {
15525 break
15526 }
15527 v.reset(OpAMD64MOVQconst)
15528 v.AuxInt = int64(read32(sym, off, config.BigEndian))
15529 return true
15530 }
15531 return false
15532 }
15533 func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool {
15534
15535
15536
15537 for {
15538 c := v.AuxInt
15539 sym := v.Aux
15540 mem := v.Args[2]
15541 ptr := v.Args[0]
15542 v_1 := v.Args[1]
15543 if v_1.Op != OpAMD64SHLQconst {
15544 break
15545 }
15546 if v_1.AuxInt != 2 {
15547 break
15548 }
15549 idx := v_1.Args[0]
15550 v.reset(OpAMD64MOVLloadidx4)
15551 v.AuxInt = c
15552 v.Aux = sym
15553 v.AddArg(ptr)
15554 v.AddArg(idx)
15555 v.AddArg(mem)
15556 return true
15557 }
15558
15559
15560
15561 for {
15562 c := v.AuxInt
15563 sym := v.Aux
15564 mem := v.Args[2]
15565 v_0 := v.Args[0]
15566 if v_0.Op != OpAMD64SHLQconst {
15567 break
15568 }
15569 if v_0.AuxInt != 2 {
15570 break
15571 }
15572 idx := v_0.Args[0]
15573 ptr := v.Args[1]
15574 v.reset(OpAMD64MOVLloadidx4)
15575 v.AuxInt = c
15576 v.Aux = sym
15577 v.AddArg(ptr)
15578 v.AddArg(idx)
15579 v.AddArg(mem)
15580 return true
15581 }
15582
15583
15584
15585 for {
15586 c := v.AuxInt
15587 sym := v.Aux
15588 mem := v.Args[2]
15589 ptr := v.Args[0]
15590 v_1 := v.Args[1]
15591 if v_1.Op != OpAMD64SHLQconst {
15592 break
15593 }
15594 if v_1.AuxInt != 3 {
15595 break
15596 }
15597 idx := v_1.Args[0]
15598 v.reset(OpAMD64MOVLloadidx8)
15599 v.AuxInt = c
15600 v.Aux = sym
15601 v.AddArg(ptr)
15602 v.AddArg(idx)
15603 v.AddArg(mem)
15604 return true
15605 }
15606
15607
15608
15609 for {
15610 c := v.AuxInt
15611 sym := v.Aux
15612 mem := v.Args[2]
15613 v_0 := v.Args[0]
15614 if v_0.Op != OpAMD64SHLQconst {
15615 break
15616 }
15617 if v_0.AuxInt != 3 {
15618 break
15619 }
15620 idx := v_0.Args[0]
15621 ptr := v.Args[1]
15622 v.reset(OpAMD64MOVLloadidx8)
15623 v.AuxInt = c
15624 v.Aux = sym
15625 v.AddArg(ptr)
15626 v.AddArg(idx)
15627 v.AddArg(mem)
15628 return true
15629 }
15630
15631
15632
15633 for {
15634 c := v.AuxInt
15635 sym := v.Aux
15636 mem := v.Args[2]
15637 v_0 := v.Args[0]
15638 if v_0.Op != OpAMD64ADDQconst {
15639 break
15640 }
15641 d := v_0.AuxInt
15642 ptr := v_0.Args[0]
15643 idx := v.Args[1]
15644 if !(is32Bit(c + d)) {
15645 break
15646 }
15647 v.reset(OpAMD64MOVLloadidx1)
15648 v.AuxInt = c + d
15649 v.Aux = sym
15650 v.AddArg(ptr)
15651 v.AddArg(idx)
15652 v.AddArg(mem)
15653 return true
15654 }
15655
15656
15657
15658 for {
15659 c := v.AuxInt
15660 sym := v.Aux
15661 mem := v.Args[2]
15662 idx := v.Args[0]
15663 v_1 := v.Args[1]
15664 if v_1.Op != OpAMD64ADDQconst {
15665 break
15666 }
15667 d := v_1.AuxInt
15668 ptr := v_1.Args[0]
15669 if !(is32Bit(c + d)) {
15670 break
15671 }
15672 v.reset(OpAMD64MOVLloadidx1)
15673 v.AuxInt = c + d
15674 v.Aux = sym
15675 v.AddArg(ptr)
15676 v.AddArg(idx)
15677 v.AddArg(mem)
15678 return true
15679 }
15680
15681
15682
15683 for {
15684 c := v.AuxInt
15685 sym := v.Aux
15686 mem := v.Args[2]
15687 ptr := v.Args[0]
15688 v_1 := v.Args[1]
15689 if v_1.Op != OpAMD64ADDQconst {
15690 break
15691 }
15692 d := v_1.AuxInt
15693 idx := v_1.Args[0]
15694 if !(is32Bit(c + d)) {
15695 break
15696 }
15697 v.reset(OpAMD64MOVLloadidx1)
15698 v.AuxInt = c + d
15699 v.Aux = sym
15700 v.AddArg(ptr)
15701 v.AddArg(idx)
15702 v.AddArg(mem)
15703 return true
15704 }
15705
15706
15707
15708 for {
15709 c := v.AuxInt
15710 sym := v.Aux
15711 mem := v.Args[2]
15712 v_0 := v.Args[0]
15713 if v_0.Op != OpAMD64ADDQconst {
15714 break
15715 }
15716 d := v_0.AuxInt
15717 idx := v_0.Args[0]
15718 ptr := v.Args[1]
15719 if !(is32Bit(c + d)) {
15720 break
15721 }
15722 v.reset(OpAMD64MOVLloadidx1)
15723 v.AuxInt = c + d
15724 v.Aux = sym
15725 v.AddArg(ptr)
15726 v.AddArg(idx)
15727 v.AddArg(mem)
15728 return true
15729 }
15730
15731
15732
15733 for {
15734 i := v.AuxInt
15735 s := v.Aux
15736 mem := v.Args[2]
15737 p := v.Args[0]
15738 v_1 := v.Args[1]
15739 if v_1.Op != OpAMD64MOVQconst {
15740 break
15741 }
15742 c := v_1.AuxInt
15743 if !(is32Bit(i + c)) {
15744 break
15745 }
15746 v.reset(OpAMD64MOVLload)
15747 v.AuxInt = i + c
15748 v.Aux = s
15749 v.AddArg(p)
15750 v.AddArg(mem)
15751 return true
15752 }
15753
15754
15755
15756 for {
15757 i := v.AuxInt
15758 s := v.Aux
15759 mem := v.Args[2]
15760 v_0 := v.Args[0]
15761 if v_0.Op != OpAMD64MOVQconst {
15762 break
15763 }
15764 c := v_0.AuxInt
15765 p := v.Args[1]
15766 if !(is32Bit(i + c)) {
15767 break
15768 }
15769 v.reset(OpAMD64MOVLload)
15770 v.AuxInt = i + c
15771 v.Aux = s
15772 v.AddArg(p)
15773 v.AddArg(mem)
15774 return true
15775 }
15776 return false
15777 }
15778 func rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v *Value) bool {
15779
15780
15781
15782 for {
15783 c := v.AuxInt
15784 sym := v.Aux
15785 mem := v.Args[2]
15786 v_0 := v.Args[0]
15787 if v_0.Op != OpAMD64ADDQconst {
15788 break
15789 }
15790 d := v_0.AuxInt
15791 ptr := v_0.Args[0]
15792 idx := v.Args[1]
15793 if !(is32Bit(c + d)) {
15794 break
15795 }
15796 v.reset(OpAMD64MOVLloadidx4)
15797 v.AuxInt = c + d
15798 v.Aux = sym
15799 v.AddArg(ptr)
15800 v.AddArg(idx)
15801 v.AddArg(mem)
15802 return true
15803 }
15804
15805
15806
15807 for {
15808 c := v.AuxInt
15809 sym := v.Aux
15810 mem := v.Args[2]
15811 ptr := v.Args[0]
15812 v_1 := v.Args[1]
15813 if v_1.Op != OpAMD64ADDQconst {
15814 break
15815 }
15816 d := v_1.AuxInt
15817 idx := v_1.Args[0]
15818 if !(is32Bit(c + 4*d)) {
15819 break
15820 }
15821 v.reset(OpAMD64MOVLloadidx4)
15822 v.AuxInt = c + 4*d
15823 v.Aux = sym
15824 v.AddArg(ptr)
15825 v.AddArg(idx)
15826 v.AddArg(mem)
15827 return true
15828 }
15829
15830
15831
15832 for {
15833 i := v.AuxInt
15834 s := v.Aux
15835 mem := v.Args[2]
15836 p := v.Args[0]
15837 v_1 := v.Args[1]
15838 if v_1.Op != OpAMD64MOVQconst {
15839 break
15840 }
15841 c := v_1.AuxInt
15842 if !(is32Bit(i + 4*c)) {
15843 break
15844 }
15845 v.reset(OpAMD64MOVLload)
15846 v.AuxInt = i + 4*c
15847 v.Aux = s
15848 v.AddArg(p)
15849 v.AddArg(mem)
15850 return true
15851 }
15852 return false
15853 }
15854 func rewriteValueAMD64_OpAMD64MOVLloadidx8_0(v *Value) bool {
15855
15856
15857
15858 for {
15859 c := v.AuxInt
15860 sym := v.Aux
15861 mem := v.Args[2]
15862 v_0 := v.Args[0]
15863 if v_0.Op != OpAMD64ADDQconst {
15864 break
15865 }
15866 d := v_0.AuxInt
15867 ptr := v_0.Args[0]
15868 idx := v.Args[1]
15869 if !(is32Bit(c + d)) {
15870 break
15871 }
15872 v.reset(OpAMD64MOVLloadidx8)
15873 v.AuxInt = c + d
15874 v.Aux = sym
15875 v.AddArg(ptr)
15876 v.AddArg(idx)
15877 v.AddArg(mem)
15878 return true
15879 }
15880
15881
15882
15883 for {
15884 c := v.AuxInt
15885 sym := v.Aux
15886 mem := v.Args[2]
15887 ptr := v.Args[0]
15888 v_1 := v.Args[1]
15889 if v_1.Op != OpAMD64ADDQconst {
15890 break
15891 }
15892 d := v_1.AuxInt
15893 idx := v_1.Args[0]
15894 if !(is32Bit(c + 8*d)) {
15895 break
15896 }
15897 v.reset(OpAMD64MOVLloadidx8)
15898 v.AuxInt = c + 8*d
15899 v.Aux = sym
15900 v.AddArg(ptr)
15901 v.AddArg(idx)
15902 v.AddArg(mem)
15903 return true
15904 }
15905
15906
15907
15908 for {
15909 i := v.AuxInt
15910 s := v.Aux
15911 mem := v.Args[2]
15912 p := v.Args[0]
15913 v_1 := v.Args[1]
15914 if v_1.Op != OpAMD64MOVQconst {
15915 break
15916 }
15917 c := v_1.AuxInt
15918 if !(is32Bit(i + 8*c)) {
15919 break
15920 }
15921 v.reset(OpAMD64MOVLload)
15922 v.AuxInt = i + 8*c
15923 v.Aux = s
15924 v.AddArg(p)
15925 v.AddArg(mem)
15926 return true
15927 }
15928 return false
15929 }
15930 func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool {
15931
15932
15933
15934 for {
15935 off := v.AuxInt
15936 sym := v.Aux
15937 mem := v.Args[2]
15938 ptr := v.Args[0]
15939 v_1 := v.Args[1]
15940 if v_1.Op != OpAMD64MOVLQSX {
15941 break
15942 }
15943 x := v_1.Args[0]
15944 v.reset(OpAMD64MOVLstore)
15945 v.AuxInt = off
15946 v.Aux = sym
15947 v.AddArg(ptr)
15948 v.AddArg(x)
15949 v.AddArg(mem)
15950 return true
15951 }
15952
15953
15954
15955 for {
15956 off := v.AuxInt
15957 sym := v.Aux
15958 mem := v.Args[2]
15959 ptr := v.Args[0]
15960 v_1 := v.Args[1]
15961 if v_1.Op != OpAMD64MOVLQZX {
15962 break
15963 }
15964 x := v_1.Args[0]
15965 v.reset(OpAMD64MOVLstore)
15966 v.AuxInt = off
15967 v.Aux = sym
15968 v.AddArg(ptr)
15969 v.AddArg(x)
15970 v.AddArg(mem)
15971 return true
15972 }
15973
15974
15975
15976 for {
15977 off1 := v.AuxInt
15978 sym := v.Aux
15979 mem := v.Args[2]
15980 v_0 := v.Args[0]
15981 if v_0.Op != OpAMD64ADDQconst {
15982 break
15983 }
15984 off2 := v_0.AuxInt
15985 ptr := v_0.Args[0]
15986 val := v.Args[1]
15987 if !(is32Bit(off1 + off2)) {
15988 break
15989 }
15990 v.reset(OpAMD64MOVLstore)
15991 v.AuxInt = off1 + off2
15992 v.Aux = sym
15993 v.AddArg(ptr)
15994 v.AddArg(val)
15995 v.AddArg(mem)
15996 return true
15997 }
15998
15999
16000
16001 for {
16002 off := v.AuxInt
16003 sym := v.Aux
16004 mem := v.Args[2]
16005 ptr := v.Args[0]
16006 v_1 := v.Args[1]
16007 if v_1.Op != OpAMD64MOVLconst {
16008 break
16009 }
16010 c := v_1.AuxInt
16011 if !(validOff(off)) {
16012 break
16013 }
16014 v.reset(OpAMD64MOVLstoreconst)
16015 v.AuxInt = makeValAndOff(int64(int32(c)), off)
16016 v.Aux = sym
16017 v.AddArg(ptr)
16018 v.AddArg(mem)
16019 return true
16020 }
16021
16022
16023
16024 for {
16025 off := v.AuxInt
16026 sym := v.Aux
16027 mem := v.Args[2]
16028 ptr := v.Args[0]
16029 v_1 := v.Args[1]
16030 if v_1.Op != OpAMD64MOVQconst {
16031 break
16032 }
16033 c := v_1.AuxInt
16034 if !(validOff(off)) {
16035 break
16036 }
16037 v.reset(OpAMD64MOVLstoreconst)
16038 v.AuxInt = makeValAndOff(int64(int32(c)), off)
16039 v.Aux = sym
16040 v.AddArg(ptr)
16041 v.AddArg(mem)
16042 return true
16043 }
16044
16045
16046
16047 for {
16048 off1 := v.AuxInt
16049 sym1 := v.Aux
16050 mem := v.Args[2]
16051 v_0 := v.Args[0]
16052 if v_0.Op != OpAMD64LEAQ {
16053 break
16054 }
16055 off2 := v_0.AuxInt
16056 sym2 := v_0.Aux
16057 base := v_0.Args[0]
16058 val := v.Args[1]
16059 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
16060 break
16061 }
16062 v.reset(OpAMD64MOVLstore)
16063 v.AuxInt = off1 + off2
16064 v.Aux = mergeSym(sym1, sym2)
16065 v.AddArg(base)
16066 v.AddArg(val)
16067 v.AddArg(mem)
16068 return true
16069 }
16070
16071
16072
16073 for {
16074 off1 := v.AuxInt
16075 sym1 := v.Aux
16076 mem := v.Args[2]
16077 v_0 := v.Args[0]
16078 if v_0.Op != OpAMD64LEAQ1 {
16079 break
16080 }
16081 off2 := v_0.AuxInt
16082 sym2 := v_0.Aux
16083 idx := v_0.Args[1]
16084 ptr := v_0.Args[0]
16085 val := v.Args[1]
16086 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
16087 break
16088 }
16089 v.reset(OpAMD64MOVLstoreidx1)
16090 v.AuxInt = off1 + off2
16091 v.Aux = mergeSym(sym1, sym2)
16092 v.AddArg(ptr)
16093 v.AddArg(idx)
16094 v.AddArg(val)
16095 v.AddArg(mem)
16096 return true
16097 }
16098
16099
16100
16101 for {
16102 off1 := v.AuxInt
16103 sym1 := v.Aux
16104 mem := v.Args[2]
16105 v_0 := v.Args[0]
16106 if v_0.Op != OpAMD64LEAQ4 {
16107 break
16108 }
16109 off2 := v_0.AuxInt
16110 sym2 := v_0.Aux
16111 idx := v_0.Args[1]
16112 ptr := v_0.Args[0]
16113 val := v.Args[1]
16114 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
16115 break
16116 }
16117 v.reset(OpAMD64MOVLstoreidx4)
16118 v.AuxInt = off1 + off2
16119 v.Aux = mergeSym(sym1, sym2)
16120 v.AddArg(ptr)
16121 v.AddArg(idx)
16122 v.AddArg(val)
16123 v.AddArg(mem)
16124 return true
16125 }
16126
16127
16128
16129 for {
16130 off1 := v.AuxInt
16131 sym1 := v.Aux
16132 mem := v.Args[2]
16133 v_0 := v.Args[0]
16134 if v_0.Op != OpAMD64LEAQ8 {
16135 break
16136 }
16137 off2 := v_0.AuxInt
16138 sym2 := v_0.Aux
16139 idx := v_0.Args[1]
16140 ptr := v_0.Args[0]
16141 val := v.Args[1]
16142 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
16143 break
16144 }
16145 v.reset(OpAMD64MOVLstoreidx8)
16146 v.AuxInt = off1 + off2
16147 v.Aux = mergeSym(sym1, sym2)
16148 v.AddArg(ptr)
16149 v.AddArg(idx)
16150 v.AddArg(val)
16151 v.AddArg(mem)
16152 return true
16153 }
16154
16155
16156
16157 for {
16158 off := v.AuxInt
16159 sym := v.Aux
16160 mem := v.Args[2]
16161 v_0 := v.Args[0]
16162 if v_0.Op != OpAMD64ADDQ {
16163 break
16164 }
16165 idx := v_0.Args[1]
16166 ptr := v_0.Args[0]
16167 val := v.Args[1]
16168 if !(ptr.Op != OpSB) {
16169 break
16170 }
16171 v.reset(OpAMD64MOVLstoreidx1)
16172 v.AuxInt = off
16173 v.Aux = sym
16174 v.AddArg(ptr)
16175 v.AddArg(idx)
16176 v.AddArg(val)
16177 v.AddArg(mem)
16178 return true
16179 }
16180 return false
16181 }
16182 func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool {
16183 b := v.Block
16184 typ := &b.Func.Config.Types
16185
16186
16187
16188 for {
16189 i := v.AuxInt
16190 s := v.Aux
16191 _ = v.Args[2]
16192 p := v.Args[0]
16193 v_1 := v.Args[1]
16194 if v_1.Op != OpAMD64SHRQconst {
16195 break
16196 }
16197 if v_1.AuxInt != 32 {
16198 break
16199 }
16200 w := v_1.Args[0]
16201 x := v.Args[2]
16202 if x.Op != OpAMD64MOVLstore {
16203 break
16204 }
16205 if x.AuxInt != i-4 {
16206 break
16207 }
16208 if x.Aux != s {
16209 break
16210 }
16211 mem := x.Args[2]
16212 if p != x.Args[0] {
16213 break
16214 }
16215 if w != x.Args[1] {
16216 break
16217 }
16218 if !(x.Uses == 1 && clobber(x)) {
16219 break
16220 }
16221 v.reset(OpAMD64MOVQstore)
16222 v.AuxInt = i - 4
16223 v.Aux = s
16224 v.AddArg(p)
16225 v.AddArg(w)
16226 v.AddArg(mem)
16227 return true
16228 }
16229
16230
16231
16232 for {
16233 i := v.AuxInt
16234 s := v.Aux
16235 _ = v.Args[2]
16236 p := v.Args[0]
16237 v_1 := v.Args[1]
16238 if v_1.Op != OpAMD64SHRQconst {
16239 break
16240 }
16241 j := v_1.AuxInt
16242 w := v_1.Args[0]
16243 x := v.Args[2]
16244 if x.Op != OpAMD64MOVLstore {
16245 break
16246 }
16247 if x.AuxInt != i-4 {
16248 break
16249 }
16250 if x.Aux != s {
16251 break
16252 }
16253 mem := x.Args[2]
16254 if p != x.Args[0] {
16255 break
16256 }
16257 w0 := x.Args[1]
16258 if w0.Op != OpAMD64SHRQconst {
16259 break
16260 }
16261 if w0.AuxInt != j-32 {
16262 break
16263 }
16264 if w != w0.Args[0] {
16265 break
16266 }
16267 if !(x.Uses == 1 && clobber(x)) {
16268 break
16269 }
16270 v.reset(OpAMD64MOVQstore)
16271 v.AuxInt = i - 4
16272 v.Aux = s
16273 v.AddArg(p)
16274 v.AddArg(w0)
16275 v.AddArg(mem)
16276 return true
16277 }
16278
16279
16280
16281 for {
16282 i := v.AuxInt
16283 s := v.Aux
16284 _ = v.Args[2]
16285 p := v.Args[0]
16286 x1 := v.Args[1]
16287 if x1.Op != OpAMD64MOVLload {
16288 break
16289 }
16290 j := x1.AuxInt
16291 s2 := x1.Aux
16292 mem := x1.Args[1]
16293 p2 := x1.Args[0]
16294 mem2 := v.Args[2]
16295 if mem2.Op != OpAMD64MOVLstore {
16296 break
16297 }
16298 if mem2.AuxInt != i-4 {
16299 break
16300 }
16301 if mem2.Aux != s {
16302 break
16303 }
16304 _ = mem2.Args[2]
16305 if p != mem2.Args[0] {
16306 break
16307 }
16308 x2 := mem2.Args[1]
16309 if x2.Op != OpAMD64MOVLload {
16310 break
16311 }
16312 if x2.AuxInt != j-4 {
16313 break
16314 }
16315 if x2.Aux != s2 {
16316 break
16317 }
16318 _ = x2.Args[1]
16319 if p2 != x2.Args[0] {
16320 break
16321 }
16322 if mem != x2.Args[1] {
16323 break
16324 }
16325 if mem != mem2.Args[2] {
16326 break
16327 }
16328 if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) {
16329 break
16330 }
16331 v.reset(OpAMD64MOVQstore)
16332 v.AuxInt = i - 4
16333 v.Aux = s
16334 v.AddArg(p)
16335 v0 := b.NewValue0(x2.Pos, OpAMD64MOVQload, typ.UInt64)
16336 v0.AuxInt = j - 4
16337 v0.Aux = s2
16338 v0.AddArg(p2)
16339 v0.AddArg(mem)
16340 v.AddArg(v0)
16341 v.AddArg(mem)
16342 return true
16343 }
16344
16345
16346
16347 for {
16348 off1 := v.AuxInt
16349 sym1 := v.Aux
16350 mem := v.Args[2]
16351 v_0 := v.Args[0]
16352 if v_0.Op != OpAMD64LEAL {
16353 break
16354 }
16355 off2 := v_0.AuxInt
16356 sym2 := v_0.Aux
16357 base := v_0.Args[0]
16358 val := v.Args[1]
16359 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
16360 break
16361 }
16362 v.reset(OpAMD64MOVLstore)
16363 v.AuxInt = off1 + off2
16364 v.Aux = mergeSym(sym1, sym2)
16365 v.AddArg(base)
16366 v.AddArg(val)
16367 v.AddArg(mem)
16368 return true
16369 }
16370
16371
16372
16373 for {
16374 off1 := v.AuxInt
16375 sym := v.Aux
16376 mem := v.Args[2]
16377 v_0 := v.Args[0]
16378 if v_0.Op != OpAMD64ADDLconst {
16379 break
16380 }
16381 off2 := v_0.AuxInt
16382 ptr := v_0.Args[0]
16383 val := v.Args[1]
16384 if !(is32Bit(off1 + off2)) {
16385 break
16386 }
16387 v.reset(OpAMD64MOVLstore)
16388 v.AuxInt = off1 + off2
16389 v.Aux = sym
16390 v.AddArg(ptr)
16391 v.AddArg(val)
16392 v.AddArg(mem)
16393 return true
16394 }
16395
16396
16397
16398 for {
16399 off := v.AuxInt
16400 sym := v.Aux
16401 mem := v.Args[2]
16402 ptr := v.Args[0]
16403 y := v.Args[1]
16404 if y.Op != OpAMD64ADDLload {
16405 break
16406 }
16407 if y.AuxInt != off {
16408 break
16409 }
16410 if y.Aux != sym {
16411 break
16412 }
16413 _ = y.Args[2]
16414 x := y.Args[0]
16415 if ptr != y.Args[1] {
16416 break
16417 }
16418 if mem != y.Args[2] {
16419 break
16420 }
16421 if !(y.Uses == 1 && clobber(y)) {
16422 break
16423 }
16424 v.reset(OpAMD64ADDLmodify)
16425 v.AuxInt = off
16426 v.Aux = sym
16427 v.AddArg(ptr)
16428 v.AddArg(x)
16429 v.AddArg(mem)
16430 return true
16431 }
16432
16433
16434
16435 for {
16436 off := v.AuxInt
16437 sym := v.Aux
16438 mem := v.Args[2]
16439 ptr := v.Args[0]
16440 y := v.Args[1]
16441 if y.Op != OpAMD64ANDLload {
16442 break
16443 }
16444 if y.AuxInt != off {
16445 break
16446 }
16447 if y.Aux != sym {
16448 break
16449 }
16450 _ = y.Args[2]
16451 x := y.Args[0]
16452 if ptr != y.Args[1] {
16453 break
16454 }
16455 if mem != y.Args[2] {
16456 break
16457 }
16458 if !(y.Uses == 1 && clobber(y)) {
16459 break
16460 }
16461 v.reset(OpAMD64ANDLmodify)
16462 v.AuxInt = off
16463 v.Aux = sym
16464 v.AddArg(ptr)
16465 v.AddArg(x)
16466 v.AddArg(mem)
16467 return true
16468 }
16469
16470
16471
16472 for {
16473 off := v.AuxInt
16474 sym := v.Aux
16475 mem := v.Args[2]
16476 ptr := v.Args[0]
16477 y := v.Args[1]
16478 if y.Op != OpAMD64ORLload {
16479 break
16480 }
16481 if y.AuxInt != off {
16482 break
16483 }
16484 if y.Aux != sym {
16485 break
16486 }
16487 _ = y.Args[2]
16488 x := y.Args[0]
16489 if ptr != y.Args[1] {
16490 break
16491 }
16492 if mem != y.Args[2] {
16493 break
16494 }
16495 if !(y.Uses == 1 && clobber(y)) {
16496 break
16497 }
16498 v.reset(OpAMD64ORLmodify)
16499 v.AuxInt = off
16500 v.Aux = sym
16501 v.AddArg(ptr)
16502 v.AddArg(x)
16503 v.AddArg(mem)
16504 return true
16505 }
16506
16507
16508
16509 for {
16510 off := v.AuxInt
16511 sym := v.Aux
16512 mem := v.Args[2]
16513 ptr := v.Args[0]
16514 y := v.Args[1]
16515 if y.Op != OpAMD64XORLload {
16516 break
16517 }
16518 if y.AuxInt != off {
16519 break
16520 }
16521 if y.Aux != sym {
16522 break
16523 }
16524 _ = y.Args[2]
16525 x := y.Args[0]
16526 if ptr != y.Args[1] {
16527 break
16528 }
16529 if mem != y.Args[2] {
16530 break
16531 }
16532 if !(y.Uses == 1 && clobber(y)) {
16533 break
16534 }
16535 v.reset(OpAMD64XORLmodify)
16536 v.AuxInt = off
16537 v.Aux = sym
16538 v.AddArg(ptr)
16539 v.AddArg(x)
16540 v.AddArg(mem)
16541 return true
16542 }
16543
16544
16545
16546 for {
16547 off := v.AuxInt
16548 sym := v.Aux
16549 mem := v.Args[2]
16550 ptr := v.Args[0]
16551 y := v.Args[1]
16552 if y.Op != OpAMD64ADDL {
16553 break
16554 }
16555 x := y.Args[1]
16556 l := y.Args[0]
16557 if l.Op != OpAMD64MOVLload {
16558 break
16559 }
16560 if l.AuxInt != off {
16561 break
16562 }
16563 if l.Aux != sym {
16564 break
16565 }
16566 _ = l.Args[1]
16567 if ptr != l.Args[0] {
16568 break
16569 }
16570 if mem != l.Args[1] {
16571 break
16572 }
16573 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
16574 break
16575 }
16576 v.reset(OpAMD64ADDLmodify)
16577 v.AuxInt = off
16578 v.Aux = sym
16579 v.AddArg(ptr)
16580 v.AddArg(x)
16581 v.AddArg(mem)
16582 return true
16583 }
16584 return false
16585 }
16586 func rewriteValueAMD64_OpAMD64MOVLstore_20(v *Value) bool {
16587
16588
16589
16590 for {
16591 off := v.AuxInt
16592 sym := v.Aux
16593 mem := v.Args[2]
16594 ptr := v.Args[0]
16595 y := v.Args[1]
16596 if y.Op != OpAMD64ADDL {
16597 break
16598 }
16599 _ = y.Args[1]
16600 x := y.Args[0]
16601 l := y.Args[1]
16602 if l.Op != OpAMD64MOVLload {
16603 break
16604 }
16605 if l.AuxInt != off {
16606 break
16607 }
16608 if l.Aux != sym {
16609 break
16610 }
16611 _ = l.Args[1]
16612 if ptr != l.Args[0] {
16613 break
16614 }
16615 if mem != l.Args[1] {
16616 break
16617 }
16618 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
16619 break
16620 }
16621 v.reset(OpAMD64ADDLmodify)
16622 v.AuxInt = off
16623 v.Aux = sym
16624 v.AddArg(ptr)
16625 v.AddArg(x)
16626 v.AddArg(mem)
16627 return true
16628 }
16629
16630
16631
16632 for {
16633 off := v.AuxInt
16634 sym := v.Aux
16635 mem := v.Args[2]
16636 ptr := v.Args[0]
16637 y := v.Args[1]
16638 if y.Op != OpAMD64SUBL {
16639 break
16640 }
16641 x := y.Args[1]
16642 l := y.Args[0]
16643 if l.Op != OpAMD64MOVLload {
16644 break
16645 }
16646 if l.AuxInt != off {
16647 break
16648 }
16649 if l.Aux != sym {
16650 break
16651 }
16652 _ = l.Args[1]
16653 if ptr != l.Args[0] {
16654 break
16655 }
16656 if mem != l.Args[1] {
16657 break
16658 }
16659 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
16660 break
16661 }
16662 v.reset(OpAMD64SUBLmodify)
16663 v.AuxInt = off
16664 v.Aux = sym
16665 v.AddArg(ptr)
16666 v.AddArg(x)
16667 v.AddArg(mem)
16668 return true
16669 }
16670
16671
16672
16673 for {
16674 off := v.AuxInt
16675 sym := v.Aux
16676 mem := v.Args[2]
16677 ptr := v.Args[0]
16678 y := v.Args[1]
16679 if y.Op != OpAMD64ANDL {
16680 break
16681 }
16682 x := y.Args[1]
16683 l := y.Args[0]
16684 if l.Op != OpAMD64MOVLload {
16685 break
16686 }
16687 if l.AuxInt != off {
16688 break
16689 }
16690 if l.Aux != sym {
16691 break
16692 }
16693 _ = l.Args[1]
16694 if ptr != l.Args[0] {
16695 break
16696 }
16697 if mem != l.Args[1] {
16698 break
16699 }
16700 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
16701 break
16702 }
16703 v.reset(OpAMD64ANDLmodify)
16704 v.AuxInt = off
16705 v.Aux = sym
16706 v.AddArg(ptr)
16707 v.AddArg(x)
16708 v.AddArg(mem)
16709 return true
16710 }
16711
16712
16713
16714 for {
16715 off := v.AuxInt
16716 sym := v.Aux
16717 mem := v.Args[2]
16718 ptr := v.Args[0]
16719 y := v.Args[1]
16720 if y.Op != OpAMD64ANDL {
16721 break
16722 }
16723 _ = y.Args[1]
16724 x := y.Args[0]
16725 l := y.Args[1]
16726 if l.Op != OpAMD64MOVLload {
16727 break
16728 }
16729 if l.AuxInt != off {
16730 break
16731 }
16732 if l.Aux != sym {
16733 break
16734 }
16735 _ = l.Args[1]
16736 if ptr != l.Args[0] {
16737 break
16738 }
16739 if mem != l.Args[1] {
16740 break
16741 }
16742 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
16743 break
16744 }
16745 v.reset(OpAMD64ANDLmodify)
16746 v.AuxInt = off
16747 v.Aux = sym
16748 v.AddArg(ptr)
16749 v.AddArg(x)
16750 v.AddArg(mem)
16751 return true
16752 }
16753
16754
16755
16756 for {
16757 off := v.AuxInt
16758 sym := v.Aux
16759 mem := v.Args[2]
16760 ptr := v.Args[0]
16761 y := v.Args[1]
16762 if y.Op != OpAMD64ORL {
16763 break
16764 }
16765 x := y.Args[1]
16766 l := y.Args[0]
16767 if l.Op != OpAMD64MOVLload {
16768 break
16769 }
16770 if l.AuxInt != off {
16771 break
16772 }
16773 if l.Aux != sym {
16774 break
16775 }
16776 _ = l.Args[1]
16777 if ptr != l.Args[0] {
16778 break
16779 }
16780 if mem != l.Args[1] {
16781 break
16782 }
16783 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
16784 break
16785 }
16786 v.reset(OpAMD64ORLmodify)
16787 v.AuxInt = off
16788 v.Aux = sym
16789 v.AddArg(ptr)
16790 v.AddArg(x)
16791 v.AddArg(mem)
16792 return true
16793 }
16794
16795
16796
16797 for {
16798 off := v.AuxInt
16799 sym := v.Aux
16800 mem := v.Args[2]
16801 ptr := v.Args[0]
16802 y := v.Args[1]
16803 if y.Op != OpAMD64ORL {
16804 break
16805 }
16806 _ = y.Args[1]
16807 x := y.Args[0]
16808 l := y.Args[1]
16809 if l.Op != OpAMD64MOVLload {
16810 break
16811 }
16812 if l.AuxInt != off {
16813 break
16814 }
16815 if l.Aux != sym {
16816 break
16817 }
16818 _ = l.Args[1]
16819 if ptr != l.Args[0] {
16820 break
16821 }
16822 if mem != l.Args[1] {
16823 break
16824 }
16825 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
16826 break
16827 }
16828 v.reset(OpAMD64ORLmodify)
16829 v.AuxInt = off
16830 v.Aux = sym
16831 v.AddArg(ptr)
16832 v.AddArg(x)
16833 v.AddArg(mem)
16834 return true
16835 }
16836
16837
16838
16839 for {
16840 off := v.AuxInt
16841 sym := v.Aux
16842 mem := v.Args[2]
16843 ptr := v.Args[0]
16844 y := v.Args[1]
16845 if y.Op != OpAMD64XORL {
16846 break
16847 }
16848 x := y.Args[1]
16849 l := y.Args[0]
16850 if l.Op != OpAMD64MOVLload {
16851 break
16852 }
16853 if l.AuxInt != off {
16854 break
16855 }
16856 if l.Aux != sym {
16857 break
16858 }
16859 _ = l.Args[1]
16860 if ptr != l.Args[0] {
16861 break
16862 }
16863 if mem != l.Args[1] {
16864 break
16865 }
16866 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
16867 break
16868 }
16869 v.reset(OpAMD64XORLmodify)
16870 v.AuxInt = off
16871 v.Aux = sym
16872 v.AddArg(ptr)
16873 v.AddArg(x)
16874 v.AddArg(mem)
16875 return true
16876 }
16877
16878
16879
16880 for {
16881 off := v.AuxInt
16882 sym := v.Aux
16883 mem := v.Args[2]
16884 ptr := v.Args[0]
16885 y := v.Args[1]
16886 if y.Op != OpAMD64XORL {
16887 break
16888 }
16889 _ = y.Args[1]
16890 x := y.Args[0]
16891 l := y.Args[1]
16892 if l.Op != OpAMD64MOVLload {
16893 break
16894 }
16895 if l.AuxInt != off {
16896 break
16897 }
16898 if l.Aux != sym {
16899 break
16900 }
16901 _ = l.Args[1]
16902 if ptr != l.Args[0] {
16903 break
16904 }
16905 if mem != l.Args[1] {
16906 break
16907 }
16908 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
16909 break
16910 }
16911 v.reset(OpAMD64XORLmodify)
16912 v.AuxInt = off
16913 v.Aux = sym
16914 v.AddArg(ptr)
16915 v.AddArg(x)
16916 v.AddArg(mem)
16917 return true
16918 }
16919
16920
16921
16922 for {
16923 off := v.AuxInt
16924 sym := v.Aux
16925 mem := v.Args[2]
16926 ptr := v.Args[0]
16927 y := v.Args[1]
16928 if y.Op != OpAMD64BTCL {
16929 break
16930 }
16931 x := y.Args[1]
16932 l := y.Args[0]
16933 if l.Op != OpAMD64MOVLload {
16934 break
16935 }
16936 if l.AuxInt != off {
16937 break
16938 }
16939 if l.Aux != sym {
16940 break
16941 }
16942 _ = l.Args[1]
16943 if ptr != l.Args[0] {
16944 break
16945 }
16946 if mem != l.Args[1] {
16947 break
16948 }
16949 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
16950 break
16951 }
16952 v.reset(OpAMD64BTCLmodify)
16953 v.AuxInt = off
16954 v.Aux = sym
16955 v.AddArg(ptr)
16956 v.AddArg(x)
16957 v.AddArg(mem)
16958 return true
16959 }
16960
16961
16962
16963 for {
16964 off := v.AuxInt
16965 sym := v.Aux
16966 mem := v.Args[2]
16967 ptr := v.Args[0]
16968 y := v.Args[1]
16969 if y.Op != OpAMD64BTRL {
16970 break
16971 }
16972 x := y.Args[1]
16973 l := y.Args[0]
16974 if l.Op != OpAMD64MOVLload {
16975 break
16976 }
16977 if l.AuxInt != off {
16978 break
16979 }
16980 if l.Aux != sym {
16981 break
16982 }
16983 _ = l.Args[1]
16984 if ptr != l.Args[0] {
16985 break
16986 }
16987 if mem != l.Args[1] {
16988 break
16989 }
16990 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
16991 break
16992 }
16993 v.reset(OpAMD64BTRLmodify)
16994 v.AuxInt = off
16995 v.Aux = sym
16996 v.AddArg(ptr)
16997 v.AddArg(x)
16998 v.AddArg(mem)
16999 return true
17000 }
17001 return false
17002 }
17003 func rewriteValueAMD64_OpAMD64MOVLstore_30(v *Value) bool {
17004
17005
17006
17007 for {
17008 off := v.AuxInt
17009 sym := v.Aux
17010 mem := v.Args[2]
17011 ptr := v.Args[0]
17012 y := v.Args[1]
17013 if y.Op != OpAMD64BTSL {
17014 break
17015 }
17016 x := y.Args[1]
17017 l := y.Args[0]
17018 if l.Op != OpAMD64MOVLload {
17019 break
17020 }
17021 if l.AuxInt != off {
17022 break
17023 }
17024 if l.Aux != sym {
17025 break
17026 }
17027 _ = l.Args[1]
17028 if ptr != l.Args[0] {
17029 break
17030 }
17031 if mem != l.Args[1] {
17032 break
17033 }
17034 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
17035 break
17036 }
17037 v.reset(OpAMD64BTSLmodify)
17038 v.AuxInt = off
17039 v.Aux = sym
17040 v.AddArg(ptr)
17041 v.AddArg(x)
17042 v.AddArg(mem)
17043 return true
17044 }
17045
17046
17047
17048 for {
17049 off := v.AuxInt
17050 sym := v.Aux
17051 mem := v.Args[2]
17052 ptr := v.Args[0]
17053 a := v.Args[1]
17054 if a.Op != OpAMD64ADDLconst {
17055 break
17056 }
17057 c := a.AuxInt
17058 l := a.Args[0]
17059 if l.Op != OpAMD64MOVLload {
17060 break
17061 }
17062 if l.AuxInt != off {
17063 break
17064 }
17065 if l.Aux != sym {
17066 break
17067 }
17068 _ = l.Args[1]
17069 ptr2 := l.Args[0]
17070 if mem != l.Args[1] {
17071 break
17072 }
17073 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
17074 break
17075 }
17076 v.reset(OpAMD64ADDLconstmodify)
17077 v.AuxInt = makeValAndOff(c, off)
17078 v.Aux = sym
17079 v.AddArg(ptr)
17080 v.AddArg(mem)
17081 return true
17082 }
17083
17084
17085
17086 for {
17087 off := v.AuxInt
17088 sym := v.Aux
17089 mem := v.Args[2]
17090 ptr := v.Args[0]
17091 a := v.Args[1]
17092 if a.Op != OpAMD64ANDLconst {
17093 break
17094 }
17095 c := a.AuxInt
17096 l := a.Args[0]
17097 if l.Op != OpAMD64MOVLload {
17098 break
17099 }
17100 if l.AuxInt != off {
17101 break
17102 }
17103 if l.Aux != sym {
17104 break
17105 }
17106 _ = l.Args[1]
17107 ptr2 := l.Args[0]
17108 if mem != l.Args[1] {
17109 break
17110 }
17111 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
17112 break
17113 }
17114 v.reset(OpAMD64ANDLconstmodify)
17115 v.AuxInt = makeValAndOff(c, off)
17116 v.Aux = sym
17117 v.AddArg(ptr)
17118 v.AddArg(mem)
17119 return true
17120 }
17121
17122
17123
17124 for {
17125 off := v.AuxInt
17126 sym := v.Aux
17127 mem := v.Args[2]
17128 ptr := v.Args[0]
17129 a := v.Args[1]
17130 if a.Op != OpAMD64ORLconst {
17131 break
17132 }
17133 c := a.AuxInt
17134 l := a.Args[0]
17135 if l.Op != OpAMD64MOVLload {
17136 break
17137 }
17138 if l.AuxInt != off {
17139 break
17140 }
17141 if l.Aux != sym {
17142 break
17143 }
17144 _ = l.Args[1]
17145 ptr2 := l.Args[0]
17146 if mem != l.Args[1] {
17147 break
17148 }
17149 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
17150 break
17151 }
17152 v.reset(OpAMD64ORLconstmodify)
17153 v.AuxInt = makeValAndOff(c, off)
17154 v.Aux = sym
17155 v.AddArg(ptr)
17156 v.AddArg(mem)
17157 return true
17158 }
17159
17160
17161
17162 for {
17163 off := v.AuxInt
17164 sym := v.Aux
17165 mem := v.Args[2]
17166 ptr := v.Args[0]
17167 a := v.Args[1]
17168 if a.Op != OpAMD64XORLconst {
17169 break
17170 }
17171 c := a.AuxInt
17172 l := a.Args[0]
17173 if l.Op != OpAMD64MOVLload {
17174 break
17175 }
17176 if l.AuxInt != off {
17177 break
17178 }
17179 if l.Aux != sym {
17180 break
17181 }
17182 _ = l.Args[1]
17183 ptr2 := l.Args[0]
17184 if mem != l.Args[1] {
17185 break
17186 }
17187 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
17188 break
17189 }
17190 v.reset(OpAMD64XORLconstmodify)
17191 v.AuxInt = makeValAndOff(c, off)
17192 v.Aux = sym
17193 v.AddArg(ptr)
17194 v.AddArg(mem)
17195 return true
17196 }
17197
17198
17199
17200 for {
17201 off := v.AuxInt
17202 sym := v.Aux
17203 mem := v.Args[2]
17204 ptr := v.Args[0]
17205 a := v.Args[1]
17206 if a.Op != OpAMD64BTCLconst {
17207 break
17208 }
17209 c := a.AuxInt
17210 l := a.Args[0]
17211 if l.Op != OpAMD64MOVLload {
17212 break
17213 }
17214 if l.AuxInt != off {
17215 break
17216 }
17217 if l.Aux != sym {
17218 break
17219 }
17220 _ = l.Args[1]
17221 ptr2 := l.Args[0]
17222 if mem != l.Args[1] {
17223 break
17224 }
17225 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
17226 break
17227 }
17228 v.reset(OpAMD64BTCLconstmodify)
17229 v.AuxInt = makeValAndOff(c, off)
17230 v.Aux = sym
17231 v.AddArg(ptr)
17232 v.AddArg(mem)
17233 return true
17234 }
17235
17236
17237
17238 for {
17239 off := v.AuxInt
17240 sym := v.Aux
17241 mem := v.Args[2]
17242 ptr := v.Args[0]
17243 a := v.Args[1]
17244 if a.Op != OpAMD64BTRLconst {
17245 break
17246 }
17247 c := a.AuxInt
17248 l := a.Args[0]
17249 if l.Op != OpAMD64MOVLload {
17250 break
17251 }
17252 if l.AuxInt != off {
17253 break
17254 }
17255 if l.Aux != sym {
17256 break
17257 }
17258 _ = l.Args[1]
17259 ptr2 := l.Args[0]
17260 if mem != l.Args[1] {
17261 break
17262 }
17263 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
17264 break
17265 }
17266 v.reset(OpAMD64BTRLconstmodify)
17267 v.AuxInt = makeValAndOff(c, off)
17268 v.Aux = sym
17269 v.AddArg(ptr)
17270 v.AddArg(mem)
17271 return true
17272 }
17273
17274
17275
17276 for {
17277 off := v.AuxInt
17278 sym := v.Aux
17279 mem := v.Args[2]
17280 ptr := v.Args[0]
17281 a := v.Args[1]
17282 if a.Op != OpAMD64BTSLconst {
17283 break
17284 }
17285 c := a.AuxInt
17286 l := a.Args[0]
17287 if l.Op != OpAMD64MOVLload {
17288 break
17289 }
17290 if l.AuxInt != off {
17291 break
17292 }
17293 if l.Aux != sym {
17294 break
17295 }
17296 _ = l.Args[1]
17297 ptr2 := l.Args[0]
17298 if mem != l.Args[1] {
17299 break
17300 }
17301 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
17302 break
17303 }
17304 v.reset(OpAMD64BTSLconstmodify)
17305 v.AuxInt = makeValAndOff(c, off)
17306 v.Aux = sym
17307 v.AddArg(ptr)
17308 v.AddArg(mem)
17309 return true
17310 }
17311
17312
17313
17314 for {
17315 off := v.AuxInt
17316 sym := v.Aux
17317 mem := v.Args[2]
17318 ptr := v.Args[0]
17319 v_1 := v.Args[1]
17320 if v_1.Op != OpAMD64MOVLf2i {
17321 break
17322 }
17323 val := v_1.Args[0]
17324 v.reset(OpAMD64MOVSSstore)
17325 v.AuxInt = off
17326 v.Aux = sym
17327 v.AddArg(ptr)
17328 v.AddArg(val)
17329 v.AddArg(mem)
17330 return true
17331 }
17332 return false
17333 }
17334 func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool {
17335 b := v.Block
17336 typ := &b.Func.Config.Types
17337
17338
17339
17340 for {
17341 sc := v.AuxInt
17342 s := v.Aux
17343 mem := v.Args[1]
17344 v_0 := v.Args[0]
17345 if v_0.Op != OpAMD64ADDQconst {
17346 break
17347 }
17348 off := v_0.AuxInt
17349 ptr := v_0.Args[0]
17350 if !(ValAndOff(sc).canAdd(off)) {
17351 break
17352 }
17353 v.reset(OpAMD64MOVLstoreconst)
17354 v.AuxInt = ValAndOff(sc).add(off)
17355 v.Aux = s
17356 v.AddArg(ptr)
17357 v.AddArg(mem)
17358 return true
17359 }
17360
17361
17362
17363 for {
17364 sc := v.AuxInt
17365 sym1 := v.Aux
17366 mem := v.Args[1]
17367 v_0 := v.Args[0]
17368 if v_0.Op != OpAMD64LEAQ {
17369 break
17370 }
17371 off := v_0.AuxInt
17372 sym2 := v_0.Aux
17373 ptr := v_0.Args[0]
17374 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
17375 break
17376 }
17377 v.reset(OpAMD64MOVLstoreconst)
17378 v.AuxInt = ValAndOff(sc).add(off)
17379 v.Aux = mergeSym(sym1, sym2)
17380 v.AddArg(ptr)
17381 v.AddArg(mem)
17382 return true
17383 }
17384
17385
17386
17387 for {
17388 x := v.AuxInt
17389 sym1 := v.Aux
17390 mem := v.Args[1]
17391 v_0 := v.Args[0]
17392 if v_0.Op != OpAMD64LEAQ1 {
17393 break
17394 }
17395 off := v_0.AuxInt
17396 sym2 := v_0.Aux
17397 idx := v_0.Args[1]
17398 ptr := v_0.Args[0]
17399 if !(canMergeSym(sym1, sym2)) {
17400 break
17401 }
17402 v.reset(OpAMD64MOVLstoreconstidx1)
17403 v.AuxInt = ValAndOff(x).add(off)
17404 v.Aux = mergeSym(sym1, sym2)
17405 v.AddArg(ptr)
17406 v.AddArg(idx)
17407 v.AddArg(mem)
17408 return true
17409 }
17410
17411
17412
17413 for {
17414 x := v.AuxInt
17415 sym1 := v.Aux
17416 mem := v.Args[1]
17417 v_0 := v.Args[0]
17418 if v_0.Op != OpAMD64LEAQ4 {
17419 break
17420 }
17421 off := v_0.AuxInt
17422 sym2 := v_0.Aux
17423 idx := v_0.Args[1]
17424 ptr := v_0.Args[0]
17425 if !(canMergeSym(sym1, sym2)) {
17426 break
17427 }
17428 v.reset(OpAMD64MOVLstoreconstidx4)
17429 v.AuxInt = ValAndOff(x).add(off)
17430 v.Aux = mergeSym(sym1, sym2)
17431 v.AddArg(ptr)
17432 v.AddArg(idx)
17433 v.AddArg(mem)
17434 return true
17435 }
17436
17437
17438
17439 for {
17440 x := v.AuxInt
17441 sym := v.Aux
17442 mem := v.Args[1]
17443 v_0 := v.Args[0]
17444 if v_0.Op != OpAMD64ADDQ {
17445 break
17446 }
17447 idx := v_0.Args[1]
17448 ptr := v_0.Args[0]
17449 v.reset(OpAMD64MOVLstoreconstidx1)
17450 v.AuxInt = x
17451 v.Aux = sym
17452 v.AddArg(ptr)
17453 v.AddArg(idx)
17454 v.AddArg(mem)
17455 return true
17456 }
17457
17458
17459
17460 for {
17461 c := v.AuxInt
17462 s := v.Aux
17463 _ = v.Args[1]
17464 p := v.Args[0]
17465 x := v.Args[1]
17466 if x.Op != OpAMD64MOVLstoreconst {
17467 break
17468 }
17469 a := x.AuxInt
17470 if x.Aux != s {
17471 break
17472 }
17473 mem := x.Args[1]
17474 if p != x.Args[0] {
17475 break
17476 }
17477 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) {
17478 break
17479 }
17480 v.reset(OpAMD64MOVQstore)
17481 v.AuxInt = ValAndOff(a).Off()
17482 v.Aux = s
17483 v.AddArg(p)
17484 v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64)
17485 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
17486 v.AddArg(v0)
17487 v.AddArg(mem)
17488 return true
17489 }
17490
17491
17492
17493 for {
17494 a := v.AuxInt
17495 s := v.Aux
17496 _ = v.Args[1]
17497 p := v.Args[0]
17498 x := v.Args[1]
17499 if x.Op != OpAMD64MOVLstoreconst {
17500 break
17501 }
17502 c := x.AuxInt
17503 if x.Aux != s {
17504 break
17505 }
17506 mem := x.Args[1]
17507 if p != x.Args[0] {
17508 break
17509 }
17510 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) {
17511 break
17512 }
17513 v.reset(OpAMD64MOVQstore)
17514 v.AuxInt = ValAndOff(a).Off()
17515 v.Aux = s
17516 v.AddArg(p)
17517 v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64)
17518 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
17519 v.AddArg(v0)
17520 v.AddArg(mem)
17521 return true
17522 }
17523
17524
17525
17526 for {
17527 sc := v.AuxInt
17528 sym1 := v.Aux
17529 mem := v.Args[1]
17530 v_0 := v.Args[0]
17531 if v_0.Op != OpAMD64LEAL {
17532 break
17533 }
17534 off := v_0.AuxInt
17535 sym2 := v_0.Aux
17536 ptr := v_0.Args[0]
17537 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
17538 break
17539 }
17540 v.reset(OpAMD64MOVLstoreconst)
17541 v.AuxInt = ValAndOff(sc).add(off)
17542 v.Aux = mergeSym(sym1, sym2)
17543 v.AddArg(ptr)
17544 v.AddArg(mem)
17545 return true
17546 }
17547
17548
17549
17550 for {
17551 sc := v.AuxInt
17552 s := v.Aux
17553 mem := v.Args[1]
17554 v_0 := v.Args[0]
17555 if v_0.Op != OpAMD64ADDLconst {
17556 break
17557 }
17558 off := v_0.AuxInt
17559 ptr := v_0.Args[0]
17560 if !(ValAndOff(sc).canAdd(off)) {
17561 break
17562 }
17563 v.reset(OpAMD64MOVLstoreconst)
17564 v.AuxInt = ValAndOff(sc).add(off)
17565 v.Aux = s
17566 v.AddArg(ptr)
17567 v.AddArg(mem)
17568 return true
17569 }
17570 return false
17571 }
17572 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool {
17573 b := v.Block
17574 typ := &b.Func.Config.Types
17575
17576
17577
17578 for {
17579 c := v.AuxInt
17580 sym := v.Aux
17581 mem := v.Args[2]
17582 ptr := v.Args[0]
17583 v_1 := v.Args[1]
17584 if v_1.Op != OpAMD64SHLQconst {
17585 break
17586 }
17587 if v_1.AuxInt != 2 {
17588 break
17589 }
17590 idx := v_1.Args[0]
17591 v.reset(OpAMD64MOVLstoreconstidx4)
17592 v.AuxInt = c
17593 v.Aux = sym
17594 v.AddArg(ptr)
17595 v.AddArg(idx)
17596 v.AddArg(mem)
17597 return true
17598 }
17599
17600
17601
17602 for {
17603 x := v.AuxInt
17604 sym := v.Aux
17605 mem := v.Args[2]
17606 v_0 := v.Args[0]
17607 if v_0.Op != OpAMD64ADDQconst {
17608 break
17609 }
17610 c := v_0.AuxInt
17611 ptr := v_0.Args[0]
17612 idx := v.Args[1]
17613 if !(ValAndOff(x).canAdd(c)) {
17614 break
17615 }
17616 v.reset(OpAMD64MOVLstoreconstidx1)
17617 v.AuxInt = ValAndOff(x).add(c)
17618 v.Aux = sym
17619 v.AddArg(ptr)
17620 v.AddArg(idx)
17621 v.AddArg(mem)
17622 return true
17623 }
17624
17625
17626
17627 for {
17628 x := v.AuxInt
17629 sym := v.Aux
17630 mem := v.Args[2]
17631 ptr := v.Args[0]
17632 v_1 := v.Args[1]
17633 if v_1.Op != OpAMD64ADDQconst {
17634 break
17635 }
17636 c := v_1.AuxInt
17637 idx := v_1.Args[0]
17638 if !(ValAndOff(x).canAdd(c)) {
17639 break
17640 }
17641 v.reset(OpAMD64MOVLstoreconstidx1)
17642 v.AuxInt = ValAndOff(x).add(c)
17643 v.Aux = sym
17644 v.AddArg(ptr)
17645 v.AddArg(idx)
17646 v.AddArg(mem)
17647 return true
17648 }
17649
17650
17651
17652 for {
17653 c := v.AuxInt
17654 s := v.Aux
17655 _ = v.Args[2]
17656 p := v.Args[0]
17657 i := v.Args[1]
17658 x := v.Args[2]
17659 if x.Op != OpAMD64MOVLstoreconstidx1 {
17660 break
17661 }
17662 a := x.AuxInt
17663 if x.Aux != s {
17664 break
17665 }
17666 mem := x.Args[2]
17667 if p != x.Args[0] {
17668 break
17669 }
17670 if i != x.Args[1] {
17671 break
17672 }
17673 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) {
17674 break
17675 }
17676 v.reset(OpAMD64MOVQstoreidx1)
17677 v.AuxInt = ValAndOff(a).Off()
17678 v.Aux = s
17679 v.AddArg(p)
17680 v.AddArg(i)
17681 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
17682 v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
17683 v.AddArg(v0)
17684 v.AddArg(mem)
17685 return true
17686 }
17687 return false
17688 }
17689 func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool {
17690 b := v.Block
17691 typ := &b.Func.Config.Types
17692
17693
17694
17695 for {
17696 x := v.AuxInt
17697 sym := v.Aux
17698 mem := v.Args[2]
17699 v_0 := v.Args[0]
17700 if v_0.Op != OpAMD64ADDQconst {
17701 break
17702 }
17703 c := v_0.AuxInt
17704 ptr := v_0.Args[0]
17705 idx := v.Args[1]
17706 if !(ValAndOff(x).canAdd(c)) {
17707 break
17708 }
17709 v.reset(OpAMD64MOVLstoreconstidx4)
17710 v.AuxInt = ValAndOff(x).add(c)
17711 v.Aux = sym
17712 v.AddArg(ptr)
17713 v.AddArg(idx)
17714 v.AddArg(mem)
17715 return true
17716 }
17717
17718
17719
17720 for {
17721 x := v.AuxInt
17722 sym := v.Aux
17723 mem := v.Args[2]
17724 ptr := v.Args[0]
17725 v_1 := v.Args[1]
17726 if v_1.Op != OpAMD64ADDQconst {
17727 break
17728 }
17729 c := v_1.AuxInt
17730 idx := v_1.Args[0]
17731 if !(ValAndOff(x).canAdd(4 * c)) {
17732 break
17733 }
17734 v.reset(OpAMD64MOVLstoreconstidx4)
17735 v.AuxInt = ValAndOff(x).add(4 * c)
17736 v.Aux = sym
17737 v.AddArg(ptr)
17738 v.AddArg(idx)
17739 v.AddArg(mem)
17740 return true
17741 }
17742
17743
17744
17745 for {
17746 c := v.AuxInt
17747 s := v.Aux
17748 _ = v.Args[2]
17749 p := v.Args[0]
17750 i := v.Args[1]
17751 x := v.Args[2]
17752 if x.Op != OpAMD64MOVLstoreconstidx4 {
17753 break
17754 }
17755 a := x.AuxInt
17756 if x.Aux != s {
17757 break
17758 }
17759 mem := x.Args[2]
17760 if p != x.Args[0] {
17761 break
17762 }
17763 if i != x.Args[1] {
17764 break
17765 }
17766 if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) {
17767 break
17768 }
17769 v.reset(OpAMD64MOVQstoreidx1)
17770 v.AuxInt = ValAndOff(a).Off()
17771 v.Aux = s
17772 v.AddArg(p)
17773 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type)
17774 v0.AuxInt = 2
17775 v0.AddArg(i)
17776 v.AddArg(v0)
17777 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
17778 v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
17779 v.AddArg(v1)
17780 v.AddArg(mem)
17781 return true
17782 }
17783 return false
17784 }
17785 func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool {
17786
17787
17788
17789 for {
17790 c := v.AuxInt
17791 sym := v.Aux
17792 mem := v.Args[3]
17793 ptr := v.Args[0]
17794 v_1 := v.Args[1]
17795 if v_1.Op != OpAMD64SHLQconst {
17796 break
17797 }
17798 if v_1.AuxInt != 2 {
17799 break
17800 }
17801 idx := v_1.Args[0]
17802 val := v.Args[2]
17803 v.reset(OpAMD64MOVLstoreidx4)
17804 v.AuxInt = c
17805 v.Aux = sym
17806 v.AddArg(ptr)
17807 v.AddArg(idx)
17808 v.AddArg(val)
17809 v.AddArg(mem)
17810 return true
17811 }
17812
17813
17814
17815 for {
17816 c := v.AuxInt
17817 sym := v.Aux
17818 mem := v.Args[3]
17819 ptr := v.Args[0]
17820 v_1 := v.Args[1]
17821 if v_1.Op != OpAMD64SHLQconst {
17822 break
17823 }
17824 if v_1.AuxInt != 3 {
17825 break
17826 }
17827 idx := v_1.Args[0]
17828 val := v.Args[2]
17829 v.reset(OpAMD64MOVLstoreidx8)
17830 v.AuxInt = c
17831 v.Aux = sym
17832 v.AddArg(ptr)
17833 v.AddArg(idx)
17834 v.AddArg(val)
17835 v.AddArg(mem)
17836 return true
17837 }
17838
17839
17840
17841 for {
17842 c := v.AuxInt
17843 sym := v.Aux
17844 mem := v.Args[3]
17845 v_0 := v.Args[0]
17846 if v_0.Op != OpAMD64ADDQconst {
17847 break
17848 }
17849 d := v_0.AuxInt
17850 ptr := v_0.Args[0]
17851 idx := v.Args[1]
17852 val := v.Args[2]
17853 if !(is32Bit(c + d)) {
17854 break
17855 }
17856 v.reset(OpAMD64MOVLstoreidx1)
17857 v.AuxInt = c + d
17858 v.Aux = sym
17859 v.AddArg(ptr)
17860 v.AddArg(idx)
17861 v.AddArg(val)
17862 v.AddArg(mem)
17863 return true
17864 }
17865
17866
17867
17868 for {
17869 c := v.AuxInt
17870 sym := v.Aux
17871 mem := v.Args[3]
17872 ptr := v.Args[0]
17873 v_1 := v.Args[1]
17874 if v_1.Op != OpAMD64ADDQconst {
17875 break
17876 }
17877 d := v_1.AuxInt
17878 idx := v_1.Args[0]
17879 val := v.Args[2]
17880 if !(is32Bit(c + d)) {
17881 break
17882 }
17883 v.reset(OpAMD64MOVLstoreidx1)
17884 v.AuxInt = c + d
17885 v.Aux = sym
17886 v.AddArg(ptr)
17887 v.AddArg(idx)
17888 v.AddArg(val)
17889 v.AddArg(mem)
17890 return true
17891 }
17892
17893
17894
17895 for {
17896 i := v.AuxInt
17897 s := v.Aux
17898 _ = v.Args[3]
17899 p := v.Args[0]
17900 idx := v.Args[1]
17901 v_2 := v.Args[2]
17902 if v_2.Op != OpAMD64SHRQconst {
17903 break
17904 }
17905 if v_2.AuxInt != 32 {
17906 break
17907 }
17908 w := v_2.Args[0]
17909 x := v.Args[3]
17910 if x.Op != OpAMD64MOVLstoreidx1 {
17911 break
17912 }
17913 if x.AuxInt != i-4 {
17914 break
17915 }
17916 if x.Aux != s {
17917 break
17918 }
17919 mem := x.Args[3]
17920 if p != x.Args[0] {
17921 break
17922 }
17923 if idx != x.Args[1] {
17924 break
17925 }
17926 if w != x.Args[2] {
17927 break
17928 }
17929 if !(x.Uses == 1 && clobber(x)) {
17930 break
17931 }
17932 v.reset(OpAMD64MOVQstoreidx1)
17933 v.AuxInt = i - 4
17934 v.Aux = s
17935 v.AddArg(p)
17936 v.AddArg(idx)
17937 v.AddArg(w)
17938 v.AddArg(mem)
17939 return true
17940 }
17941
17942
17943
17944 for {
17945 i := v.AuxInt
17946 s := v.Aux
17947 _ = v.Args[3]
17948 p := v.Args[0]
17949 idx := v.Args[1]
17950 v_2 := v.Args[2]
17951 if v_2.Op != OpAMD64SHRQconst {
17952 break
17953 }
17954 j := v_2.AuxInt
17955 w := v_2.Args[0]
17956 x := v.Args[3]
17957 if x.Op != OpAMD64MOVLstoreidx1 {
17958 break
17959 }
17960 if x.AuxInt != i-4 {
17961 break
17962 }
17963 if x.Aux != s {
17964 break
17965 }
17966 mem := x.Args[3]
17967 if p != x.Args[0] {
17968 break
17969 }
17970 if idx != x.Args[1] {
17971 break
17972 }
17973 w0 := x.Args[2]
17974 if w0.Op != OpAMD64SHRQconst {
17975 break
17976 }
17977 if w0.AuxInt != j-32 {
17978 break
17979 }
17980 if w != w0.Args[0] {
17981 break
17982 }
17983 if !(x.Uses == 1 && clobber(x)) {
17984 break
17985 }
17986 v.reset(OpAMD64MOVQstoreidx1)
17987 v.AuxInt = i - 4
17988 v.Aux = s
17989 v.AddArg(p)
17990 v.AddArg(idx)
17991 v.AddArg(w0)
17992 v.AddArg(mem)
17993 return true
17994 }
17995
17996
17997
17998 for {
17999 i := v.AuxInt
18000 s := v.Aux
18001 mem := v.Args[3]
18002 p := v.Args[0]
18003 v_1 := v.Args[1]
18004 if v_1.Op != OpAMD64MOVQconst {
18005 break
18006 }
18007 c := v_1.AuxInt
18008 w := v.Args[2]
18009 if !(is32Bit(i + c)) {
18010 break
18011 }
18012 v.reset(OpAMD64MOVLstore)
18013 v.AuxInt = i + c
18014 v.Aux = s
18015 v.AddArg(p)
18016 v.AddArg(w)
18017 v.AddArg(mem)
18018 return true
18019 }
18020 return false
18021 }
18022 func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool {
18023 b := v.Block
18024
18025
18026
18027 for {
18028 c := v.AuxInt
18029 sym := v.Aux
18030 mem := v.Args[3]
18031 v_0 := v.Args[0]
18032 if v_0.Op != OpAMD64ADDQconst {
18033 break
18034 }
18035 d := v_0.AuxInt
18036 ptr := v_0.Args[0]
18037 idx := v.Args[1]
18038 val := v.Args[2]
18039 if !(is32Bit(c + d)) {
18040 break
18041 }
18042 v.reset(OpAMD64MOVLstoreidx4)
18043 v.AuxInt = c + d
18044 v.Aux = sym
18045 v.AddArg(ptr)
18046 v.AddArg(idx)
18047 v.AddArg(val)
18048 v.AddArg(mem)
18049 return true
18050 }
18051
18052
18053
18054 for {
18055 c := v.AuxInt
18056 sym := v.Aux
18057 mem := v.Args[3]
18058 ptr := v.Args[0]
18059 v_1 := v.Args[1]
18060 if v_1.Op != OpAMD64ADDQconst {
18061 break
18062 }
18063 d := v_1.AuxInt
18064 idx := v_1.Args[0]
18065 val := v.Args[2]
18066 if !(is32Bit(c + 4*d)) {
18067 break
18068 }
18069 v.reset(OpAMD64MOVLstoreidx4)
18070 v.AuxInt = c + 4*d
18071 v.Aux = sym
18072 v.AddArg(ptr)
18073 v.AddArg(idx)
18074 v.AddArg(val)
18075 v.AddArg(mem)
18076 return true
18077 }
18078
18079
18080
18081 for {
18082 i := v.AuxInt
18083 s := v.Aux
18084 _ = v.Args[3]
18085 p := v.Args[0]
18086 idx := v.Args[1]
18087 v_2 := v.Args[2]
18088 if v_2.Op != OpAMD64SHRQconst {
18089 break
18090 }
18091 if v_2.AuxInt != 32 {
18092 break
18093 }
18094 w := v_2.Args[0]
18095 x := v.Args[3]
18096 if x.Op != OpAMD64MOVLstoreidx4 {
18097 break
18098 }
18099 if x.AuxInt != i-4 {
18100 break
18101 }
18102 if x.Aux != s {
18103 break
18104 }
18105 mem := x.Args[3]
18106 if p != x.Args[0] {
18107 break
18108 }
18109 if idx != x.Args[1] {
18110 break
18111 }
18112 if w != x.Args[2] {
18113 break
18114 }
18115 if !(x.Uses == 1 && clobber(x)) {
18116 break
18117 }
18118 v.reset(OpAMD64MOVQstoreidx1)
18119 v.AuxInt = i - 4
18120 v.Aux = s
18121 v.AddArg(p)
18122 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type)
18123 v0.AuxInt = 2
18124 v0.AddArg(idx)
18125 v.AddArg(v0)
18126 v.AddArg(w)
18127 v.AddArg(mem)
18128 return true
18129 }
18130
18131
18132
18133 for {
18134 i := v.AuxInt
18135 s := v.Aux
18136 _ = v.Args[3]
18137 p := v.Args[0]
18138 idx := v.Args[1]
18139 v_2 := v.Args[2]
18140 if v_2.Op != OpAMD64SHRQconst {
18141 break
18142 }
18143 j := v_2.AuxInt
18144 w := v_2.Args[0]
18145 x := v.Args[3]
18146 if x.Op != OpAMD64MOVLstoreidx4 {
18147 break
18148 }
18149 if x.AuxInt != i-4 {
18150 break
18151 }
18152 if x.Aux != s {
18153 break
18154 }
18155 mem := x.Args[3]
18156 if p != x.Args[0] {
18157 break
18158 }
18159 if idx != x.Args[1] {
18160 break
18161 }
18162 w0 := x.Args[2]
18163 if w0.Op != OpAMD64SHRQconst {
18164 break
18165 }
18166 if w0.AuxInt != j-32 {
18167 break
18168 }
18169 if w != w0.Args[0] {
18170 break
18171 }
18172 if !(x.Uses == 1 && clobber(x)) {
18173 break
18174 }
18175 v.reset(OpAMD64MOVQstoreidx1)
18176 v.AuxInt = i - 4
18177 v.Aux = s
18178 v.AddArg(p)
18179 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type)
18180 v0.AuxInt = 2
18181 v0.AddArg(idx)
18182 v.AddArg(v0)
18183 v.AddArg(w0)
18184 v.AddArg(mem)
18185 return true
18186 }
18187
18188
18189
18190 for {
18191 i := v.AuxInt
18192 s := v.Aux
18193 mem := v.Args[3]
18194 p := v.Args[0]
18195 v_1 := v.Args[1]
18196 if v_1.Op != OpAMD64MOVQconst {
18197 break
18198 }
18199 c := v_1.AuxInt
18200 w := v.Args[2]
18201 if !(is32Bit(i + 4*c)) {
18202 break
18203 }
18204 v.reset(OpAMD64MOVLstore)
18205 v.AuxInt = i + 4*c
18206 v.Aux = s
18207 v.AddArg(p)
18208 v.AddArg(w)
18209 v.AddArg(mem)
18210 return true
18211 }
18212 return false
18213 }
18214 func rewriteValueAMD64_OpAMD64MOVLstoreidx8_0(v *Value) bool {
18215
18216
18217
18218 for {
18219 c := v.AuxInt
18220 sym := v.Aux
18221 mem := v.Args[3]
18222 v_0 := v.Args[0]
18223 if v_0.Op != OpAMD64ADDQconst {
18224 break
18225 }
18226 d := v_0.AuxInt
18227 ptr := v_0.Args[0]
18228 idx := v.Args[1]
18229 val := v.Args[2]
18230 if !(is32Bit(c + d)) {
18231 break
18232 }
18233 v.reset(OpAMD64MOVLstoreidx8)
18234 v.AuxInt = c + d
18235 v.Aux = sym
18236 v.AddArg(ptr)
18237 v.AddArg(idx)
18238 v.AddArg(val)
18239 v.AddArg(mem)
18240 return true
18241 }
18242
18243
18244
18245 for {
18246 c := v.AuxInt
18247 sym := v.Aux
18248 mem := v.Args[3]
18249 ptr := v.Args[0]
18250 v_1 := v.Args[1]
18251 if v_1.Op != OpAMD64ADDQconst {
18252 break
18253 }
18254 d := v_1.AuxInt
18255 idx := v_1.Args[0]
18256 val := v.Args[2]
18257 if !(is32Bit(c + 8*d)) {
18258 break
18259 }
18260 v.reset(OpAMD64MOVLstoreidx8)
18261 v.AuxInt = c + 8*d
18262 v.Aux = sym
18263 v.AddArg(ptr)
18264 v.AddArg(idx)
18265 v.AddArg(val)
18266 v.AddArg(mem)
18267 return true
18268 }
18269
18270
18271
18272 for {
18273 i := v.AuxInt
18274 s := v.Aux
18275 mem := v.Args[3]
18276 p := v.Args[0]
18277 v_1 := v.Args[1]
18278 if v_1.Op != OpAMD64MOVQconst {
18279 break
18280 }
18281 c := v_1.AuxInt
18282 w := v.Args[2]
18283 if !(is32Bit(i + 8*c)) {
18284 break
18285 }
18286 v.reset(OpAMD64MOVLstore)
18287 v.AuxInt = i + 8*c
18288 v.Aux = s
18289 v.AddArg(p)
18290 v.AddArg(w)
18291 v.AddArg(mem)
18292 return true
18293 }
18294 return false
18295 }
18296 func rewriteValueAMD64_OpAMD64MOVOload_0(v *Value) bool {
18297
18298
18299
18300 for {
18301 off1 := v.AuxInt
18302 sym := v.Aux
18303 mem := v.Args[1]
18304 v_0 := v.Args[0]
18305 if v_0.Op != OpAMD64ADDQconst {
18306 break
18307 }
18308 off2 := v_0.AuxInt
18309 ptr := v_0.Args[0]
18310 if !(is32Bit(off1 + off2)) {
18311 break
18312 }
18313 v.reset(OpAMD64MOVOload)
18314 v.AuxInt = off1 + off2
18315 v.Aux = sym
18316 v.AddArg(ptr)
18317 v.AddArg(mem)
18318 return true
18319 }
18320
18321
18322
18323 for {
18324 off1 := v.AuxInt
18325 sym1 := v.Aux
18326 mem := v.Args[1]
18327 v_0 := v.Args[0]
18328 if v_0.Op != OpAMD64LEAQ {
18329 break
18330 }
18331 off2 := v_0.AuxInt
18332 sym2 := v_0.Aux
18333 base := v_0.Args[0]
18334 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
18335 break
18336 }
18337 v.reset(OpAMD64MOVOload)
18338 v.AuxInt = off1 + off2
18339 v.Aux = mergeSym(sym1, sym2)
18340 v.AddArg(base)
18341 v.AddArg(mem)
18342 return true
18343 }
18344 return false
18345 }
18346 func rewriteValueAMD64_OpAMD64MOVOstore_0(v *Value) bool {
18347
18348
18349
18350 for {
18351 off1 := v.AuxInt
18352 sym := v.Aux
18353 mem := v.Args[2]
18354 v_0 := v.Args[0]
18355 if v_0.Op != OpAMD64ADDQconst {
18356 break
18357 }
18358 off2 := v_0.AuxInt
18359 ptr := v_0.Args[0]
18360 val := v.Args[1]
18361 if !(is32Bit(off1 + off2)) {
18362 break
18363 }
18364 v.reset(OpAMD64MOVOstore)
18365 v.AuxInt = off1 + off2
18366 v.Aux = sym
18367 v.AddArg(ptr)
18368 v.AddArg(val)
18369 v.AddArg(mem)
18370 return true
18371 }
18372
18373
18374
18375 for {
18376 off1 := v.AuxInt
18377 sym1 := v.Aux
18378 mem := v.Args[2]
18379 v_0 := v.Args[0]
18380 if v_0.Op != OpAMD64LEAQ {
18381 break
18382 }
18383 off2 := v_0.AuxInt
18384 sym2 := v_0.Aux
18385 base := v_0.Args[0]
18386 val := v.Args[1]
18387 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
18388 break
18389 }
18390 v.reset(OpAMD64MOVOstore)
18391 v.AuxInt = off1 + off2
18392 v.Aux = mergeSym(sym1, sym2)
18393 v.AddArg(base)
18394 v.AddArg(val)
18395 v.AddArg(mem)
18396 return true
18397 }
18398 return false
18399 }
18400 func rewriteValueAMD64_OpAMD64MOVQatomicload_0(v *Value) bool {
18401
18402
18403
18404 for {
18405 off1 := v.AuxInt
18406 sym := v.Aux
18407 mem := v.Args[1]
18408 v_0 := v.Args[0]
18409 if v_0.Op != OpAMD64ADDQconst {
18410 break
18411 }
18412 off2 := v_0.AuxInt
18413 ptr := v_0.Args[0]
18414 if !(is32Bit(off1 + off2)) {
18415 break
18416 }
18417 v.reset(OpAMD64MOVQatomicload)
18418 v.AuxInt = off1 + off2
18419 v.Aux = sym
18420 v.AddArg(ptr)
18421 v.AddArg(mem)
18422 return true
18423 }
18424
18425
18426
18427 for {
18428 off1 := v.AuxInt
18429 sym1 := v.Aux
18430 mem := v.Args[1]
18431 v_0 := v.Args[0]
18432 if v_0.Op != OpAMD64LEAQ {
18433 break
18434 }
18435 off2 := v_0.AuxInt
18436 sym2 := v_0.Aux
18437 ptr := v_0.Args[0]
18438 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
18439 break
18440 }
18441 v.reset(OpAMD64MOVQatomicload)
18442 v.AuxInt = off1 + off2
18443 v.Aux = mergeSym(sym1, sym2)
18444 v.AddArg(ptr)
18445 v.AddArg(mem)
18446 return true
18447 }
18448 return false
18449 }
18450 func rewriteValueAMD64_OpAMD64MOVQf2i_0(v *Value) bool {
18451 b := v.Block
18452
18453
18454
18455 for {
18456 t := v.Type
18457 v_0 := v.Args[0]
18458 if v_0.Op != OpArg {
18459 break
18460 }
18461 u := v_0.Type
18462 off := v_0.AuxInt
18463 sym := v_0.Aux
18464 if !(t.Size() == u.Size()) {
18465 break
18466 }
18467 b = b.Func.Entry
18468 v0 := b.NewValue0(v.Pos, OpArg, t)
18469 v.reset(OpCopy)
18470 v.AddArg(v0)
18471 v0.AuxInt = off
18472 v0.Aux = sym
18473 return true
18474 }
18475 return false
18476 }
18477 func rewriteValueAMD64_OpAMD64MOVQi2f_0(v *Value) bool {
18478 b := v.Block
18479
18480
18481
18482 for {
18483 t := v.Type
18484 v_0 := v.Args[0]
18485 if v_0.Op != OpArg {
18486 break
18487 }
18488 u := v_0.Type
18489 off := v_0.AuxInt
18490 sym := v_0.Aux
18491 if !(t.Size() == u.Size()) {
18492 break
18493 }
18494 b = b.Func.Entry
18495 v0 := b.NewValue0(v.Pos, OpArg, t)
18496 v.reset(OpCopy)
18497 v.AddArg(v0)
18498 v0.AuxInt = off
18499 v0.Aux = sym
18500 return true
18501 }
18502 return false
18503 }
18504 func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool {
18505 b := v.Block
18506 config := b.Func.Config
18507
18508
18509
18510 for {
18511 off := v.AuxInt
18512 sym := v.Aux
18513 _ = v.Args[1]
18514 ptr := v.Args[0]
18515 v_1 := v.Args[1]
18516 if v_1.Op != OpAMD64MOVQstore {
18517 break
18518 }
18519 off2 := v_1.AuxInt
18520 sym2 := v_1.Aux
18521 _ = v_1.Args[2]
18522 ptr2 := v_1.Args[0]
18523 x := v_1.Args[1]
18524 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
18525 break
18526 }
18527 v.reset(OpCopy)
18528 v.Type = x.Type
18529 v.AddArg(x)
18530 return true
18531 }
18532
18533
18534
18535 for {
18536 off1 := v.AuxInt
18537 sym := v.Aux
18538 mem := v.Args[1]
18539 v_0 := v.Args[0]
18540 if v_0.Op != OpAMD64ADDQconst {
18541 break
18542 }
18543 off2 := v_0.AuxInt
18544 ptr := v_0.Args[0]
18545 if !(is32Bit(off1 + off2)) {
18546 break
18547 }
18548 v.reset(OpAMD64MOVQload)
18549 v.AuxInt = off1 + off2
18550 v.Aux = sym
18551 v.AddArg(ptr)
18552 v.AddArg(mem)
18553 return true
18554 }
18555
18556
18557
18558 for {
18559 off1 := v.AuxInt
18560 sym1 := v.Aux
18561 mem := v.Args[1]
18562 v_0 := v.Args[0]
18563 if v_0.Op != OpAMD64LEAQ {
18564 break
18565 }
18566 off2 := v_0.AuxInt
18567 sym2 := v_0.Aux
18568 base := v_0.Args[0]
18569 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
18570 break
18571 }
18572 v.reset(OpAMD64MOVQload)
18573 v.AuxInt = off1 + off2
18574 v.Aux = mergeSym(sym1, sym2)
18575 v.AddArg(base)
18576 v.AddArg(mem)
18577 return true
18578 }
18579
18580
18581
18582 for {
18583 off1 := v.AuxInt
18584 sym1 := v.Aux
18585 mem := v.Args[1]
18586 v_0 := v.Args[0]
18587 if v_0.Op != OpAMD64LEAQ1 {
18588 break
18589 }
18590 off2 := v_0.AuxInt
18591 sym2 := v_0.Aux
18592 idx := v_0.Args[1]
18593 ptr := v_0.Args[0]
18594 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
18595 break
18596 }
18597 v.reset(OpAMD64MOVQloadidx1)
18598 v.AuxInt = off1 + off2
18599 v.Aux = mergeSym(sym1, sym2)
18600 v.AddArg(ptr)
18601 v.AddArg(idx)
18602 v.AddArg(mem)
18603 return true
18604 }
18605
18606
18607
18608 for {
18609 off1 := v.AuxInt
18610 sym1 := v.Aux
18611 mem := v.Args[1]
18612 v_0 := v.Args[0]
18613 if v_0.Op != OpAMD64LEAQ8 {
18614 break
18615 }
18616 off2 := v_0.AuxInt
18617 sym2 := v_0.Aux
18618 idx := v_0.Args[1]
18619 ptr := v_0.Args[0]
18620 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
18621 break
18622 }
18623 v.reset(OpAMD64MOVQloadidx8)
18624 v.AuxInt = off1 + off2
18625 v.Aux = mergeSym(sym1, sym2)
18626 v.AddArg(ptr)
18627 v.AddArg(idx)
18628 v.AddArg(mem)
18629 return true
18630 }
18631
18632
18633
18634 for {
18635 off := v.AuxInt
18636 sym := v.Aux
18637 mem := v.Args[1]
18638 v_0 := v.Args[0]
18639 if v_0.Op != OpAMD64ADDQ {
18640 break
18641 }
18642 idx := v_0.Args[1]
18643 ptr := v_0.Args[0]
18644 if !(ptr.Op != OpSB) {
18645 break
18646 }
18647 v.reset(OpAMD64MOVQloadidx1)
18648 v.AuxInt = off
18649 v.Aux = sym
18650 v.AddArg(ptr)
18651 v.AddArg(idx)
18652 v.AddArg(mem)
18653 return true
18654 }
18655
18656
18657
18658 for {
18659 off1 := v.AuxInt
18660 sym1 := v.Aux
18661 mem := v.Args[1]
18662 v_0 := v.Args[0]
18663 if v_0.Op != OpAMD64LEAL {
18664 break
18665 }
18666 off2 := v_0.AuxInt
18667 sym2 := v_0.Aux
18668 base := v_0.Args[0]
18669 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
18670 break
18671 }
18672 v.reset(OpAMD64MOVQload)
18673 v.AuxInt = off1 + off2
18674 v.Aux = mergeSym(sym1, sym2)
18675 v.AddArg(base)
18676 v.AddArg(mem)
18677 return true
18678 }
18679
18680
18681
18682 for {
18683 off1 := v.AuxInt
18684 sym := v.Aux
18685 mem := v.Args[1]
18686 v_0 := v.Args[0]
18687 if v_0.Op != OpAMD64ADDLconst {
18688 break
18689 }
18690 off2 := v_0.AuxInt
18691 ptr := v_0.Args[0]
18692 if !(is32Bit(off1 + off2)) {
18693 break
18694 }
18695 v.reset(OpAMD64MOVQload)
18696 v.AuxInt = off1 + off2
18697 v.Aux = sym
18698 v.AddArg(ptr)
18699 v.AddArg(mem)
18700 return true
18701 }
18702
18703
18704
18705 for {
18706 off := v.AuxInt
18707 sym := v.Aux
18708 _ = v.Args[1]
18709 ptr := v.Args[0]
18710 v_1 := v.Args[1]
18711 if v_1.Op != OpAMD64MOVSDstore {
18712 break
18713 }
18714 if v_1.AuxInt != off {
18715 break
18716 }
18717 if v_1.Aux != sym {
18718 break
18719 }
18720 _ = v_1.Args[2]
18721 if ptr != v_1.Args[0] {
18722 break
18723 }
18724 val := v_1.Args[1]
18725 v.reset(OpAMD64MOVQf2i)
18726 v.AddArg(val)
18727 return true
18728 }
18729
18730
18731
18732 for {
18733 off := v.AuxInt
18734 sym := v.Aux
18735 _ = v.Args[1]
18736 v_0 := v.Args[0]
18737 if v_0.Op != OpSB {
18738 break
18739 }
18740 if !(symIsRO(sym)) {
18741 break
18742 }
18743 v.reset(OpAMD64MOVQconst)
18744 v.AuxInt = int64(read64(sym, off, config.BigEndian))
18745 return true
18746 }
18747 return false
18748 }
18749 func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool {
18750
18751
18752
18753 for {
18754 c := v.AuxInt
18755 sym := v.Aux
18756 mem := v.Args[2]
18757 ptr := v.Args[0]
18758 v_1 := v.Args[1]
18759 if v_1.Op != OpAMD64SHLQconst {
18760 break
18761 }
18762 if v_1.AuxInt != 3 {
18763 break
18764 }
18765 idx := v_1.Args[0]
18766 v.reset(OpAMD64MOVQloadidx8)
18767 v.AuxInt = c
18768 v.Aux = sym
18769 v.AddArg(ptr)
18770 v.AddArg(idx)
18771 v.AddArg(mem)
18772 return true
18773 }
18774
18775
18776
18777 for {
18778 c := v.AuxInt
18779 sym := v.Aux
18780 mem := v.Args[2]
18781 v_0 := v.Args[0]
18782 if v_0.Op != OpAMD64SHLQconst {
18783 break
18784 }
18785 if v_0.AuxInt != 3 {
18786 break
18787 }
18788 idx := v_0.Args[0]
18789 ptr := v.Args[1]
18790 v.reset(OpAMD64MOVQloadidx8)
18791 v.AuxInt = c
18792 v.Aux = sym
18793 v.AddArg(ptr)
18794 v.AddArg(idx)
18795 v.AddArg(mem)
18796 return true
18797 }
18798
18799
18800
18801 for {
18802 c := v.AuxInt
18803 sym := v.Aux
18804 mem := v.Args[2]
18805 v_0 := v.Args[0]
18806 if v_0.Op != OpAMD64ADDQconst {
18807 break
18808 }
18809 d := v_0.AuxInt
18810 ptr := v_0.Args[0]
18811 idx := v.Args[1]
18812 if !(is32Bit(c + d)) {
18813 break
18814 }
18815 v.reset(OpAMD64MOVQloadidx1)
18816 v.AuxInt = c + d
18817 v.Aux = sym
18818 v.AddArg(ptr)
18819 v.AddArg(idx)
18820 v.AddArg(mem)
18821 return true
18822 }
18823
18824
18825
18826 for {
18827 c := v.AuxInt
18828 sym := v.Aux
18829 mem := v.Args[2]
18830 idx := v.Args[0]
18831 v_1 := v.Args[1]
18832 if v_1.Op != OpAMD64ADDQconst {
18833 break
18834 }
18835 d := v_1.AuxInt
18836 ptr := v_1.Args[0]
18837 if !(is32Bit(c + d)) {
18838 break
18839 }
18840 v.reset(OpAMD64MOVQloadidx1)
18841 v.AuxInt = c + d
18842 v.Aux = sym
18843 v.AddArg(ptr)
18844 v.AddArg(idx)
18845 v.AddArg(mem)
18846 return true
18847 }
18848
18849
18850
18851 for {
18852 c := v.AuxInt
18853 sym := v.Aux
18854 mem := v.Args[2]
18855 ptr := v.Args[0]
18856 v_1 := v.Args[1]
18857 if v_1.Op != OpAMD64ADDQconst {
18858 break
18859 }
18860 d := v_1.AuxInt
18861 idx := v_1.Args[0]
18862 if !(is32Bit(c + d)) {
18863 break
18864 }
18865 v.reset(OpAMD64MOVQloadidx1)
18866 v.AuxInt = c + d
18867 v.Aux = sym
18868 v.AddArg(ptr)
18869 v.AddArg(idx)
18870 v.AddArg(mem)
18871 return true
18872 }
18873
18874
18875
18876 for {
18877 c := v.AuxInt
18878 sym := v.Aux
18879 mem := v.Args[2]
18880 v_0 := v.Args[0]
18881 if v_0.Op != OpAMD64ADDQconst {
18882 break
18883 }
18884 d := v_0.AuxInt
18885 idx := v_0.Args[0]
18886 ptr := v.Args[1]
18887 if !(is32Bit(c + d)) {
18888 break
18889 }
18890 v.reset(OpAMD64MOVQloadidx1)
18891 v.AuxInt = c + d
18892 v.Aux = sym
18893 v.AddArg(ptr)
18894 v.AddArg(idx)
18895 v.AddArg(mem)
18896 return true
18897 }
18898
18899
18900
18901 for {
18902 i := v.AuxInt
18903 s := v.Aux
18904 mem := v.Args[2]
18905 p := v.Args[0]
18906 v_1 := v.Args[1]
18907 if v_1.Op != OpAMD64MOVQconst {
18908 break
18909 }
18910 c := v_1.AuxInt
18911 if !(is32Bit(i + c)) {
18912 break
18913 }
18914 v.reset(OpAMD64MOVQload)
18915 v.AuxInt = i + c
18916 v.Aux = s
18917 v.AddArg(p)
18918 v.AddArg(mem)
18919 return true
18920 }
18921
18922
18923
18924 for {
18925 i := v.AuxInt
18926 s := v.Aux
18927 mem := v.Args[2]
18928 v_0 := v.Args[0]
18929 if v_0.Op != OpAMD64MOVQconst {
18930 break
18931 }
18932 c := v_0.AuxInt
18933 p := v.Args[1]
18934 if !(is32Bit(i + c)) {
18935 break
18936 }
18937 v.reset(OpAMD64MOVQload)
18938 v.AuxInt = i + c
18939 v.Aux = s
18940 v.AddArg(p)
18941 v.AddArg(mem)
18942 return true
18943 }
18944 return false
18945 }
18946 func rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v *Value) bool {
18947
18948
18949
18950 for {
18951 c := v.AuxInt
18952 sym := v.Aux
18953 mem := v.Args[2]
18954 v_0 := v.Args[0]
18955 if v_0.Op != OpAMD64ADDQconst {
18956 break
18957 }
18958 d := v_0.AuxInt
18959 ptr := v_0.Args[0]
18960 idx := v.Args[1]
18961 if !(is32Bit(c + d)) {
18962 break
18963 }
18964 v.reset(OpAMD64MOVQloadidx8)
18965 v.AuxInt = c + d
18966 v.Aux = sym
18967 v.AddArg(ptr)
18968 v.AddArg(idx)
18969 v.AddArg(mem)
18970 return true
18971 }
18972
18973
18974
18975 for {
18976 c := v.AuxInt
18977 sym := v.Aux
18978 mem := v.Args[2]
18979 ptr := v.Args[0]
18980 v_1 := v.Args[1]
18981 if v_1.Op != OpAMD64ADDQconst {
18982 break
18983 }
18984 d := v_1.AuxInt
18985 idx := v_1.Args[0]
18986 if !(is32Bit(c + 8*d)) {
18987 break
18988 }
18989 v.reset(OpAMD64MOVQloadidx8)
18990 v.AuxInt = c + 8*d
18991 v.Aux = sym
18992 v.AddArg(ptr)
18993 v.AddArg(idx)
18994 v.AddArg(mem)
18995 return true
18996 }
18997
18998
18999
19000 for {
19001 i := v.AuxInt
19002 s := v.Aux
19003 mem := v.Args[2]
19004 p := v.Args[0]
19005 v_1 := v.Args[1]
19006 if v_1.Op != OpAMD64MOVQconst {
19007 break
19008 }
19009 c := v_1.AuxInt
19010 if !(is32Bit(i + 8*c)) {
19011 break
19012 }
19013 v.reset(OpAMD64MOVQload)
19014 v.AuxInt = i + 8*c
19015 v.Aux = s
19016 v.AddArg(p)
19017 v.AddArg(mem)
19018 return true
19019 }
19020 return false
19021 }
19022 func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool {
19023
19024
19025
19026 for {
19027 off1 := v.AuxInt
19028 sym := v.Aux
19029 mem := v.Args[2]
19030 v_0 := v.Args[0]
19031 if v_0.Op != OpAMD64ADDQconst {
19032 break
19033 }
19034 off2 := v_0.AuxInt
19035 ptr := v_0.Args[0]
19036 val := v.Args[1]
19037 if !(is32Bit(off1 + off2)) {
19038 break
19039 }
19040 v.reset(OpAMD64MOVQstore)
19041 v.AuxInt = off1 + off2
19042 v.Aux = sym
19043 v.AddArg(ptr)
19044 v.AddArg(val)
19045 v.AddArg(mem)
19046 return true
19047 }
19048
19049
19050
19051 for {
19052 off := v.AuxInt
19053 sym := v.Aux
19054 mem := v.Args[2]
19055 ptr := v.Args[0]
19056 v_1 := v.Args[1]
19057 if v_1.Op != OpAMD64MOVQconst {
19058 break
19059 }
19060 c := v_1.AuxInt
19061 if !(validValAndOff(c, off)) {
19062 break
19063 }
19064 v.reset(OpAMD64MOVQstoreconst)
19065 v.AuxInt = makeValAndOff(c, off)
19066 v.Aux = sym
19067 v.AddArg(ptr)
19068 v.AddArg(mem)
19069 return true
19070 }
19071
19072
19073
19074 for {
19075 off1 := v.AuxInt
19076 sym1 := v.Aux
19077 mem := v.Args[2]
19078 v_0 := v.Args[0]
19079 if v_0.Op != OpAMD64LEAQ {
19080 break
19081 }
19082 off2 := v_0.AuxInt
19083 sym2 := v_0.Aux
19084 base := v_0.Args[0]
19085 val := v.Args[1]
19086 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
19087 break
19088 }
19089 v.reset(OpAMD64MOVQstore)
19090 v.AuxInt = off1 + off2
19091 v.Aux = mergeSym(sym1, sym2)
19092 v.AddArg(base)
19093 v.AddArg(val)
19094 v.AddArg(mem)
19095 return true
19096 }
19097
19098
19099
19100 for {
19101 off1 := v.AuxInt
19102 sym1 := v.Aux
19103 mem := v.Args[2]
19104 v_0 := v.Args[0]
19105 if v_0.Op != OpAMD64LEAQ1 {
19106 break
19107 }
19108 off2 := v_0.AuxInt
19109 sym2 := v_0.Aux
19110 idx := v_0.Args[1]
19111 ptr := v_0.Args[0]
19112 val := v.Args[1]
19113 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
19114 break
19115 }
19116 v.reset(OpAMD64MOVQstoreidx1)
19117 v.AuxInt = off1 + off2
19118 v.Aux = mergeSym(sym1, sym2)
19119 v.AddArg(ptr)
19120 v.AddArg(idx)
19121 v.AddArg(val)
19122 v.AddArg(mem)
19123 return true
19124 }
19125
19126
19127
19128 for {
19129 off1 := v.AuxInt
19130 sym1 := v.Aux
19131 mem := v.Args[2]
19132 v_0 := v.Args[0]
19133 if v_0.Op != OpAMD64LEAQ8 {
19134 break
19135 }
19136 off2 := v_0.AuxInt
19137 sym2 := v_0.Aux
19138 idx := v_0.Args[1]
19139 ptr := v_0.Args[0]
19140 val := v.Args[1]
19141 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
19142 break
19143 }
19144 v.reset(OpAMD64MOVQstoreidx8)
19145 v.AuxInt = off1 + off2
19146 v.Aux = mergeSym(sym1, sym2)
19147 v.AddArg(ptr)
19148 v.AddArg(idx)
19149 v.AddArg(val)
19150 v.AddArg(mem)
19151 return true
19152 }
19153
19154
19155
19156 for {
19157 off := v.AuxInt
19158 sym := v.Aux
19159 mem := v.Args[2]
19160 v_0 := v.Args[0]
19161 if v_0.Op != OpAMD64ADDQ {
19162 break
19163 }
19164 idx := v_0.Args[1]
19165 ptr := v_0.Args[0]
19166 val := v.Args[1]
19167 if !(ptr.Op != OpSB) {
19168 break
19169 }
19170 v.reset(OpAMD64MOVQstoreidx1)
19171 v.AuxInt = off
19172 v.Aux = sym
19173 v.AddArg(ptr)
19174 v.AddArg(idx)
19175 v.AddArg(val)
19176 v.AddArg(mem)
19177 return true
19178 }
19179
19180
19181
19182 for {
19183 off1 := v.AuxInt
19184 sym1 := v.Aux
19185 mem := v.Args[2]
19186 v_0 := v.Args[0]
19187 if v_0.Op != OpAMD64LEAL {
19188 break
19189 }
19190 off2 := v_0.AuxInt
19191 sym2 := v_0.Aux
19192 base := v_0.Args[0]
19193 val := v.Args[1]
19194 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
19195 break
19196 }
19197 v.reset(OpAMD64MOVQstore)
19198 v.AuxInt = off1 + off2
19199 v.Aux = mergeSym(sym1, sym2)
19200 v.AddArg(base)
19201 v.AddArg(val)
19202 v.AddArg(mem)
19203 return true
19204 }
19205
19206
19207
19208 for {
19209 off1 := v.AuxInt
19210 sym := v.Aux
19211 mem := v.Args[2]
19212 v_0 := v.Args[0]
19213 if v_0.Op != OpAMD64ADDLconst {
19214 break
19215 }
19216 off2 := v_0.AuxInt
19217 ptr := v_0.Args[0]
19218 val := v.Args[1]
19219 if !(is32Bit(off1 + off2)) {
19220 break
19221 }
19222 v.reset(OpAMD64MOVQstore)
19223 v.AuxInt = off1 + off2
19224 v.Aux = sym
19225 v.AddArg(ptr)
19226 v.AddArg(val)
19227 v.AddArg(mem)
19228 return true
19229 }
19230
19231
19232
19233 for {
19234 off := v.AuxInt
19235 sym := v.Aux
19236 mem := v.Args[2]
19237 ptr := v.Args[0]
19238 y := v.Args[1]
19239 if y.Op != OpAMD64ADDQload {
19240 break
19241 }
19242 if y.AuxInt != off {
19243 break
19244 }
19245 if y.Aux != sym {
19246 break
19247 }
19248 _ = y.Args[2]
19249 x := y.Args[0]
19250 if ptr != y.Args[1] {
19251 break
19252 }
19253 if mem != y.Args[2] {
19254 break
19255 }
19256 if !(y.Uses == 1 && clobber(y)) {
19257 break
19258 }
19259 v.reset(OpAMD64ADDQmodify)
19260 v.AuxInt = off
19261 v.Aux = sym
19262 v.AddArg(ptr)
19263 v.AddArg(x)
19264 v.AddArg(mem)
19265 return true
19266 }
19267
19268
19269
19270 for {
19271 off := v.AuxInt
19272 sym := v.Aux
19273 mem := v.Args[2]
19274 ptr := v.Args[0]
19275 y := v.Args[1]
19276 if y.Op != OpAMD64ANDQload {
19277 break
19278 }
19279 if y.AuxInt != off {
19280 break
19281 }
19282 if y.Aux != sym {
19283 break
19284 }
19285 _ = y.Args[2]
19286 x := y.Args[0]
19287 if ptr != y.Args[1] {
19288 break
19289 }
19290 if mem != y.Args[2] {
19291 break
19292 }
19293 if !(y.Uses == 1 && clobber(y)) {
19294 break
19295 }
19296 v.reset(OpAMD64ANDQmodify)
19297 v.AuxInt = off
19298 v.Aux = sym
19299 v.AddArg(ptr)
19300 v.AddArg(x)
19301 v.AddArg(mem)
19302 return true
19303 }
19304 return false
19305 }
19306 func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool {
19307
19308
19309
19310 for {
19311 off := v.AuxInt
19312 sym := v.Aux
19313 mem := v.Args[2]
19314 ptr := v.Args[0]
19315 y := v.Args[1]
19316 if y.Op != OpAMD64ORQload {
19317 break
19318 }
19319 if y.AuxInt != off {
19320 break
19321 }
19322 if y.Aux != sym {
19323 break
19324 }
19325 _ = y.Args[2]
19326 x := y.Args[0]
19327 if ptr != y.Args[1] {
19328 break
19329 }
19330 if mem != y.Args[2] {
19331 break
19332 }
19333 if !(y.Uses == 1 && clobber(y)) {
19334 break
19335 }
19336 v.reset(OpAMD64ORQmodify)
19337 v.AuxInt = off
19338 v.Aux = sym
19339 v.AddArg(ptr)
19340 v.AddArg(x)
19341 v.AddArg(mem)
19342 return true
19343 }
19344
19345
19346
19347 for {
19348 off := v.AuxInt
19349 sym := v.Aux
19350 mem := v.Args[2]
19351 ptr := v.Args[0]
19352 y := v.Args[1]
19353 if y.Op != OpAMD64XORQload {
19354 break
19355 }
19356 if y.AuxInt != off {
19357 break
19358 }
19359 if y.Aux != sym {
19360 break
19361 }
19362 _ = y.Args[2]
19363 x := y.Args[0]
19364 if ptr != y.Args[1] {
19365 break
19366 }
19367 if mem != y.Args[2] {
19368 break
19369 }
19370 if !(y.Uses == 1 && clobber(y)) {
19371 break
19372 }
19373 v.reset(OpAMD64XORQmodify)
19374 v.AuxInt = off
19375 v.Aux = sym
19376 v.AddArg(ptr)
19377 v.AddArg(x)
19378 v.AddArg(mem)
19379 return true
19380 }
19381
19382
19383
19384 for {
19385 off := v.AuxInt
19386 sym := v.Aux
19387 mem := v.Args[2]
19388 ptr := v.Args[0]
19389 y := v.Args[1]
19390 if y.Op != OpAMD64ADDQ {
19391 break
19392 }
19393 x := y.Args[1]
19394 l := y.Args[0]
19395 if l.Op != OpAMD64MOVQload {
19396 break
19397 }
19398 if l.AuxInt != off {
19399 break
19400 }
19401 if l.Aux != sym {
19402 break
19403 }
19404 _ = l.Args[1]
19405 if ptr != l.Args[0] {
19406 break
19407 }
19408 if mem != l.Args[1] {
19409 break
19410 }
19411 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
19412 break
19413 }
19414 v.reset(OpAMD64ADDQmodify)
19415 v.AuxInt = off
19416 v.Aux = sym
19417 v.AddArg(ptr)
19418 v.AddArg(x)
19419 v.AddArg(mem)
19420 return true
19421 }
19422
19423
19424
19425 for {
19426 off := v.AuxInt
19427 sym := v.Aux
19428 mem := v.Args[2]
19429 ptr := v.Args[0]
19430 y := v.Args[1]
19431 if y.Op != OpAMD64ADDQ {
19432 break
19433 }
19434 _ = y.Args[1]
19435 x := y.Args[0]
19436 l := y.Args[1]
19437 if l.Op != OpAMD64MOVQload {
19438 break
19439 }
19440 if l.AuxInt != off {
19441 break
19442 }
19443 if l.Aux != sym {
19444 break
19445 }
19446 _ = l.Args[1]
19447 if ptr != l.Args[0] {
19448 break
19449 }
19450 if mem != l.Args[1] {
19451 break
19452 }
19453 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
19454 break
19455 }
19456 v.reset(OpAMD64ADDQmodify)
19457 v.AuxInt = off
19458 v.Aux = sym
19459 v.AddArg(ptr)
19460 v.AddArg(x)
19461 v.AddArg(mem)
19462 return true
19463 }
19464
19465
19466
19467 for {
19468 off := v.AuxInt
19469 sym := v.Aux
19470 mem := v.Args[2]
19471 ptr := v.Args[0]
19472 y := v.Args[1]
19473 if y.Op != OpAMD64SUBQ {
19474 break
19475 }
19476 x := y.Args[1]
19477 l := y.Args[0]
19478 if l.Op != OpAMD64MOVQload {
19479 break
19480 }
19481 if l.AuxInt != off {
19482 break
19483 }
19484 if l.Aux != sym {
19485 break
19486 }
19487 _ = l.Args[1]
19488 if ptr != l.Args[0] {
19489 break
19490 }
19491 if mem != l.Args[1] {
19492 break
19493 }
19494 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
19495 break
19496 }
19497 v.reset(OpAMD64SUBQmodify)
19498 v.AuxInt = off
19499 v.Aux = sym
19500 v.AddArg(ptr)
19501 v.AddArg(x)
19502 v.AddArg(mem)
19503 return true
19504 }
19505
19506
19507
19508 for {
19509 off := v.AuxInt
19510 sym := v.Aux
19511 mem := v.Args[2]
19512 ptr := v.Args[0]
19513 y := v.Args[1]
19514 if y.Op != OpAMD64ANDQ {
19515 break
19516 }
19517 x := y.Args[1]
19518 l := y.Args[0]
19519 if l.Op != OpAMD64MOVQload {
19520 break
19521 }
19522 if l.AuxInt != off {
19523 break
19524 }
19525 if l.Aux != sym {
19526 break
19527 }
19528 _ = l.Args[1]
19529 if ptr != l.Args[0] {
19530 break
19531 }
19532 if mem != l.Args[1] {
19533 break
19534 }
19535 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
19536 break
19537 }
19538 v.reset(OpAMD64ANDQmodify)
19539 v.AuxInt = off
19540 v.Aux = sym
19541 v.AddArg(ptr)
19542 v.AddArg(x)
19543 v.AddArg(mem)
19544 return true
19545 }
19546
19547
19548
19549 for {
19550 off := v.AuxInt
19551 sym := v.Aux
19552 mem := v.Args[2]
19553 ptr := v.Args[0]
19554 y := v.Args[1]
19555 if y.Op != OpAMD64ANDQ {
19556 break
19557 }
19558 _ = y.Args[1]
19559 x := y.Args[0]
19560 l := y.Args[1]
19561 if l.Op != OpAMD64MOVQload {
19562 break
19563 }
19564 if l.AuxInt != off {
19565 break
19566 }
19567 if l.Aux != sym {
19568 break
19569 }
19570 _ = l.Args[1]
19571 if ptr != l.Args[0] {
19572 break
19573 }
19574 if mem != l.Args[1] {
19575 break
19576 }
19577 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
19578 break
19579 }
19580 v.reset(OpAMD64ANDQmodify)
19581 v.AuxInt = off
19582 v.Aux = sym
19583 v.AddArg(ptr)
19584 v.AddArg(x)
19585 v.AddArg(mem)
19586 return true
19587 }
19588
19589
19590
19591 for {
19592 off := v.AuxInt
19593 sym := v.Aux
19594 mem := v.Args[2]
19595 ptr := v.Args[0]
19596 y := v.Args[1]
19597 if y.Op != OpAMD64ORQ {
19598 break
19599 }
19600 x := y.Args[1]
19601 l := y.Args[0]
19602 if l.Op != OpAMD64MOVQload {
19603 break
19604 }
19605 if l.AuxInt != off {
19606 break
19607 }
19608 if l.Aux != sym {
19609 break
19610 }
19611 _ = l.Args[1]
19612 if ptr != l.Args[0] {
19613 break
19614 }
19615 if mem != l.Args[1] {
19616 break
19617 }
19618 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
19619 break
19620 }
19621 v.reset(OpAMD64ORQmodify)
19622 v.AuxInt = off
19623 v.Aux = sym
19624 v.AddArg(ptr)
19625 v.AddArg(x)
19626 v.AddArg(mem)
19627 return true
19628 }
19629
19630
19631
19632 for {
19633 off := v.AuxInt
19634 sym := v.Aux
19635 mem := v.Args[2]
19636 ptr := v.Args[0]
19637 y := v.Args[1]
19638 if y.Op != OpAMD64ORQ {
19639 break
19640 }
19641 _ = y.Args[1]
19642 x := y.Args[0]
19643 l := y.Args[1]
19644 if l.Op != OpAMD64MOVQload {
19645 break
19646 }
19647 if l.AuxInt != off {
19648 break
19649 }
19650 if l.Aux != sym {
19651 break
19652 }
19653 _ = l.Args[1]
19654 if ptr != l.Args[0] {
19655 break
19656 }
19657 if mem != l.Args[1] {
19658 break
19659 }
19660 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
19661 break
19662 }
19663 v.reset(OpAMD64ORQmodify)
19664 v.AuxInt = off
19665 v.Aux = sym
19666 v.AddArg(ptr)
19667 v.AddArg(x)
19668 v.AddArg(mem)
19669 return true
19670 }
19671
19672
19673
19674 for {
19675 off := v.AuxInt
19676 sym := v.Aux
19677 mem := v.Args[2]
19678 ptr := v.Args[0]
19679 y := v.Args[1]
19680 if y.Op != OpAMD64XORQ {
19681 break
19682 }
19683 x := y.Args[1]
19684 l := y.Args[0]
19685 if l.Op != OpAMD64MOVQload {
19686 break
19687 }
19688 if l.AuxInt != off {
19689 break
19690 }
19691 if l.Aux != sym {
19692 break
19693 }
19694 _ = l.Args[1]
19695 if ptr != l.Args[0] {
19696 break
19697 }
19698 if mem != l.Args[1] {
19699 break
19700 }
19701 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
19702 break
19703 }
19704 v.reset(OpAMD64XORQmodify)
19705 v.AuxInt = off
19706 v.Aux = sym
19707 v.AddArg(ptr)
19708 v.AddArg(x)
19709 v.AddArg(mem)
19710 return true
19711 }
19712 return false
19713 }
19714 func rewriteValueAMD64_OpAMD64MOVQstore_20(v *Value) bool {
19715
19716
19717
19718 for {
19719 off := v.AuxInt
19720 sym := v.Aux
19721 mem := v.Args[2]
19722 ptr := v.Args[0]
19723 y := v.Args[1]
19724 if y.Op != OpAMD64XORQ {
19725 break
19726 }
19727 _ = y.Args[1]
19728 x := y.Args[0]
19729 l := y.Args[1]
19730 if l.Op != OpAMD64MOVQload {
19731 break
19732 }
19733 if l.AuxInt != off {
19734 break
19735 }
19736 if l.Aux != sym {
19737 break
19738 }
19739 _ = l.Args[1]
19740 if ptr != l.Args[0] {
19741 break
19742 }
19743 if mem != l.Args[1] {
19744 break
19745 }
19746 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
19747 break
19748 }
19749 v.reset(OpAMD64XORQmodify)
19750 v.AuxInt = off
19751 v.Aux = sym
19752 v.AddArg(ptr)
19753 v.AddArg(x)
19754 v.AddArg(mem)
19755 return true
19756 }
19757
19758
19759
19760 for {
19761 off := v.AuxInt
19762 sym := v.Aux
19763 mem := v.Args[2]
19764 ptr := v.Args[0]
19765 y := v.Args[1]
19766 if y.Op != OpAMD64BTCQ {
19767 break
19768 }
19769 x := y.Args[1]
19770 l := y.Args[0]
19771 if l.Op != OpAMD64MOVQload {
19772 break
19773 }
19774 if l.AuxInt != off {
19775 break
19776 }
19777 if l.Aux != sym {
19778 break
19779 }
19780 _ = l.Args[1]
19781 if ptr != l.Args[0] {
19782 break
19783 }
19784 if mem != l.Args[1] {
19785 break
19786 }
19787 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
19788 break
19789 }
19790 v.reset(OpAMD64BTCQmodify)
19791 v.AuxInt = off
19792 v.Aux = sym
19793 v.AddArg(ptr)
19794 v.AddArg(x)
19795 v.AddArg(mem)
19796 return true
19797 }
19798
19799
19800
19801 for {
19802 off := v.AuxInt
19803 sym := v.Aux
19804 mem := v.Args[2]
19805 ptr := v.Args[0]
19806 y := v.Args[1]
19807 if y.Op != OpAMD64BTRQ {
19808 break
19809 }
19810 x := y.Args[1]
19811 l := y.Args[0]
19812 if l.Op != OpAMD64MOVQload {
19813 break
19814 }
19815 if l.AuxInt != off {
19816 break
19817 }
19818 if l.Aux != sym {
19819 break
19820 }
19821 _ = l.Args[1]
19822 if ptr != l.Args[0] {
19823 break
19824 }
19825 if mem != l.Args[1] {
19826 break
19827 }
19828 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
19829 break
19830 }
19831 v.reset(OpAMD64BTRQmodify)
19832 v.AuxInt = off
19833 v.Aux = sym
19834 v.AddArg(ptr)
19835 v.AddArg(x)
19836 v.AddArg(mem)
19837 return true
19838 }
19839
19840
19841
19842 for {
19843 off := v.AuxInt
19844 sym := v.Aux
19845 mem := v.Args[2]
19846 ptr := v.Args[0]
19847 y := v.Args[1]
19848 if y.Op != OpAMD64BTSQ {
19849 break
19850 }
19851 x := y.Args[1]
19852 l := y.Args[0]
19853 if l.Op != OpAMD64MOVQload {
19854 break
19855 }
19856 if l.AuxInt != off {
19857 break
19858 }
19859 if l.Aux != sym {
19860 break
19861 }
19862 _ = l.Args[1]
19863 if ptr != l.Args[0] {
19864 break
19865 }
19866 if mem != l.Args[1] {
19867 break
19868 }
19869 if !(y.Uses == 1 && l.Uses == 1 && clobber(y) && clobber(l)) {
19870 break
19871 }
19872 v.reset(OpAMD64BTSQmodify)
19873 v.AuxInt = off
19874 v.Aux = sym
19875 v.AddArg(ptr)
19876 v.AddArg(x)
19877 v.AddArg(mem)
19878 return true
19879 }
19880
19881
19882
19883 for {
19884 off := v.AuxInt
19885 sym := v.Aux
19886 mem := v.Args[2]
19887 ptr := v.Args[0]
19888 a := v.Args[1]
19889 if a.Op != OpAMD64ADDQconst {
19890 break
19891 }
19892 c := a.AuxInt
19893 l := a.Args[0]
19894 if l.Op != OpAMD64MOVQload {
19895 break
19896 }
19897 if l.AuxInt != off {
19898 break
19899 }
19900 if l.Aux != sym {
19901 break
19902 }
19903 _ = l.Args[1]
19904 ptr2 := l.Args[0]
19905 if mem != l.Args[1] {
19906 break
19907 }
19908 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
19909 break
19910 }
19911 v.reset(OpAMD64ADDQconstmodify)
19912 v.AuxInt = makeValAndOff(c, off)
19913 v.Aux = sym
19914 v.AddArg(ptr)
19915 v.AddArg(mem)
19916 return true
19917 }
19918
19919
19920
19921 for {
19922 off := v.AuxInt
19923 sym := v.Aux
19924 mem := v.Args[2]
19925 ptr := v.Args[0]
19926 a := v.Args[1]
19927 if a.Op != OpAMD64ANDQconst {
19928 break
19929 }
19930 c := a.AuxInt
19931 l := a.Args[0]
19932 if l.Op != OpAMD64MOVQload {
19933 break
19934 }
19935 if l.AuxInt != off {
19936 break
19937 }
19938 if l.Aux != sym {
19939 break
19940 }
19941 _ = l.Args[1]
19942 ptr2 := l.Args[0]
19943 if mem != l.Args[1] {
19944 break
19945 }
19946 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
19947 break
19948 }
19949 v.reset(OpAMD64ANDQconstmodify)
19950 v.AuxInt = makeValAndOff(c, off)
19951 v.Aux = sym
19952 v.AddArg(ptr)
19953 v.AddArg(mem)
19954 return true
19955 }
19956
19957
19958
19959 for {
19960 off := v.AuxInt
19961 sym := v.Aux
19962 mem := v.Args[2]
19963 ptr := v.Args[0]
19964 a := v.Args[1]
19965 if a.Op != OpAMD64ORQconst {
19966 break
19967 }
19968 c := a.AuxInt
19969 l := a.Args[0]
19970 if l.Op != OpAMD64MOVQload {
19971 break
19972 }
19973 if l.AuxInt != off {
19974 break
19975 }
19976 if l.Aux != sym {
19977 break
19978 }
19979 _ = l.Args[1]
19980 ptr2 := l.Args[0]
19981 if mem != l.Args[1] {
19982 break
19983 }
19984 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
19985 break
19986 }
19987 v.reset(OpAMD64ORQconstmodify)
19988 v.AuxInt = makeValAndOff(c, off)
19989 v.Aux = sym
19990 v.AddArg(ptr)
19991 v.AddArg(mem)
19992 return true
19993 }
19994
19995
19996
19997 for {
19998 off := v.AuxInt
19999 sym := v.Aux
20000 mem := v.Args[2]
20001 ptr := v.Args[0]
20002 a := v.Args[1]
20003 if a.Op != OpAMD64XORQconst {
20004 break
20005 }
20006 c := a.AuxInt
20007 l := a.Args[0]
20008 if l.Op != OpAMD64MOVQload {
20009 break
20010 }
20011 if l.AuxInt != off {
20012 break
20013 }
20014 if l.Aux != sym {
20015 break
20016 }
20017 _ = l.Args[1]
20018 ptr2 := l.Args[0]
20019 if mem != l.Args[1] {
20020 break
20021 }
20022 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
20023 break
20024 }
20025 v.reset(OpAMD64XORQconstmodify)
20026 v.AuxInt = makeValAndOff(c, off)
20027 v.Aux = sym
20028 v.AddArg(ptr)
20029 v.AddArg(mem)
20030 return true
20031 }
20032
20033
20034
20035 for {
20036 off := v.AuxInt
20037 sym := v.Aux
20038 mem := v.Args[2]
20039 ptr := v.Args[0]
20040 a := v.Args[1]
20041 if a.Op != OpAMD64BTCQconst {
20042 break
20043 }
20044 c := a.AuxInt
20045 l := a.Args[0]
20046 if l.Op != OpAMD64MOVQload {
20047 break
20048 }
20049 if l.AuxInt != off {
20050 break
20051 }
20052 if l.Aux != sym {
20053 break
20054 }
20055 _ = l.Args[1]
20056 ptr2 := l.Args[0]
20057 if mem != l.Args[1] {
20058 break
20059 }
20060 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
20061 break
20062 }
20063 v.reset(OpAMD64BTCQconstmodify)
20064 v.AuxInt = makeValAndOff(c, off)
20065 v.Aux = sym
20066 v.AddArg(ptr)
20067 v.AddArg(mem)
20068 return true
20069 }
20070
20071
20072
20073 for {
20074 off := v.AuxInt
20075 sym := v.Aux
20076 mem := v.Args[2]
20077 ptr := v.Args[0]
20078 a := v.Args[1]
20079 if a.Op != OpAMD64BTRQconst {
20080 break
20081 }
20082 c := a.AuxInt
20083 l := a.Args[0]
20084 if l.Op != OpAMD64MOVQload {
20085 break
20086 }
20087 if l.AuxInt != off {
20088 break
20089 }
20090 if l.Aux != sym {
20091 break
20092 }
20093 _ = l.Args[1]
20094 ptr2 := l.Args[0]
20095 if mem != l.Args[1] {
20096 break
20097 }
20098 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
20099 break
20100 }
20101 v.reset(OpAMD64BTRQconstmodify)
20102 v.AuxInt = makeValAndOff(c, off)
20103 v.Aux = sym
20104 v.AddArg(ptr)
20105 v.AddArg(mem)
20106 return true
20107 }
20108 return false
20109 }
20110 func rewriteValueAMD64_OpAMD64MOVQstore_30(v *Value) bool {
20111
20112
20113
20114 for {
20115 off := v.AuxInt
20116 sym := v.Aux
20117 mem := v.Args[2]
20118 ptr := v.Args[0]
20119 a := v.Args[1]
20120 if a.Op != OpAMD64BTSQconst {
20121 break
20122 }
20123 c := a.AuxInt
20124 l := a.Args[0]
20125 if l.Op != OpAMD64MOVQload {
20126 break
20127 }
20128 if l.AuxInt != off {
20129 break
20130 }
20131 if l.Aux != sym {
20132 break
20133 }
20134 _ = l.Args[1]
20135 ptr2 := l.Args[0]
20136 if mem != l.Args[1] {
20137 break
20138 }
20139 if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off) && clobber(l) && clobber(a)) {
20140 break
20141 }
20142 v.reset(OpAMD64BTSQconstmodify)
20143 v.AuxInt = makeValAndOff(c, off)
20144 v.Aux = sym
20145 v.AddArg(ptr)
20146 v.AddArg(mem)
20147 return true
20148 }
20149
20150
20151
20152 for {
20153 off := v.AuxInt
20154 sym := v.Aux
20155 mem := v.Args[2]
20156 ptr := v.Args[0]
20157 v_1 := v.Args[1]
20158 if v_1.Op != OpAMD64MOVQf2i {
20159 break
20160 }
20161 val := v_1.Args[0]
20162 v.reset(OpAMD64MOVSDstore)
20163 v.AuxInt = off
20164 v.Aux = sym
20165 v.AddArg(ptr)
20166 v.AddArg(val)
20167 v.AddArg(mem)
20168 return true
20169 }
20170 return false
20171 }
20172 func rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v *Value) bool {
20173 b := v.Block
20174 config := b.Func.Config
20175
20176
20177
20178 for {
20179 sc := v.AuxInt
20180 s := v.Aux
20181 mem := v.Args[1]
20182 v_0 := v.Args[0]
20183 if v_0.Op != OpAMD64ADDQconst {
20184 break
20185 }
20186 off := v_0.AuxInt
20187 ptr := v_0.Args[0]
20188 if !(ValAndOff(sc).canAdd(off)) {
20189 break
20190 }
20191 v.reset(OpAMD64MOVQstoreconst)
20192 v.AuxInt = ValAndOff(sc).add(off)
20193 v.Aux = s
20194 v.AddArg(ptr)
20195 v.AddArg(mem)
20196 return true
20197 }
20198
20199
20200
20201 for {
20202 sc := v.AuxInt
20203 sym1 := v.Aux
20204 mem := v.Args[1]
20205 v_0 := v.Args[0]
20206 if v_0.Op != OpAMD64LEAQ {
20207 break
20208 }
20209 off := v_0.AuxInt
20210 sym2 := v_0.Aux
20211 ptr := v_0.Args[0]
20212 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
20213 break
20214 }
20215 v.reset(OpAMD64MOVQstoreconst)
20216 v.AuxInt = ValAndOff(sc).add(off)
20217 v.Aux = mergeSym(sym1, sym2)
20218 v.AddArg(ptr)
20219 v.AddArg(mem)
20220 return true
20221 }
20222
20223
20224
20225 for {
20226 x := v.AuxInt
20227 sym1 := v.Aux
20228 mem := v.Args[1]
20229 v_0 := v.Args[0]
20230 if v_0.Op != OpAMD64LEAQ1 {
20231 break
20232 }
20233 off := v_0.AuxInt
20234 sym2 := v_0.Aux
20235 idx := v_0.Args[1]
20236 ptr := v_0.Args[0]
20237 if !(canMergeSym(sym1, sym2)) {
20238 break
20239 }
20240 v.reset(OpAMD64MOVQstoreconstidx1)
20241 v.AuxInt = ValAndOff(x).add(off)
20242 v.Aux = mergeSym(sym1, sym2)
20243 v.AddArg(ptr)
20244 v.AddArg(idx)
20245 v.AddArg(mem)
20246 return true
20247 }
20248
20249
20250
20251 for {
20252 x := v.AuxInt
20253 sym1 := v.Aux
20254 mem := v.Args[1]
20255 v_0 := v.Args[0]
20256 if v_0.Op != OpAMD64LEAQ8 {
20257 break
20258 }
20259 off := v_0.AuxInt
20260 sym2 := v_0.Aux
20261 idx := v_0.Args[1]
20262 ptr := v_0.Args[0]
20263 if !(canMergeSym(sym1, sym2)) {
20264 break
20265 }
20266 v.reset(OpAMD64MOVQstoreconstidx8)
20267 v.AuxInt = ValAndOff(x).add(off)
20268 v.Aux = mergeSym(sym1, sym2)
20269 v.AddArg(ptr)
20270 v.AddArg(idx)
20271 v.AddArg(mem)
20272 return true
20273 }
20274
20275
20276
20277 for {
20278 x := v.AuxInt
20279 sym := v.Aux
20280 mem := v.Args[1]
20281 v_0 := v.Args[0]
20282 if v_0.Op != OpAMD64ADDQ {
20283 break
20284 }
20285 idx := v_0.Args[1]
20286 ptr := v_0.Args[0]
20287 v.reset(OpAMD64MOVQstoreconstidx1)
20288 v.AuxInt = x
20289 v.Aux = sym
20290 v.AddArg(ptr)
20291 v.AddArg(idx)
20292 v.AddArg(mem)
20293 return true
20294 }
20295
20296
20297
20298 for {
20299 c := v.AuxInt
20300 s := v.Aux
20301 _ = v.Args[1]
20302 p := v.Args[0]
20303 x := v.Args[1]
20304 if x.Op != OpAMD64MOVQstoreconst {
20305 break
20306 }
20307 c2 := x.AuxInt
20308 if x.Aux != s {
20309 break
20310 }
20311 mem := x.Args[1]
20312 if p != x.Args[0] {
20313 break
20314 }
20315 if !(config.useSSE && x.Uses == 1 && ValAndOff(c2).Off()+8 == ValAndOff(c).Off() && ValAndOff(c).Val() == 0 && ValAndOff(c2).Val() == 0 && clobber(x)) {
20316 break
20317 }
20318 v.reset(OpAMD64MOVOstore)
20319 v.AuxInt = ValAndOff(c2).Off()
20320 v.Aux = s
20321 v.AddArg(p)
20322 v0 := b.NewValue0(x.Pos, OpAMD64MOVOconst, types.TypeInt128)
20323 v0.AuxInt = 0
20324 v.AddArg(v0)
20325 v.AddArg(mem)
20326 return true
20327 }
20328
20329
20330
20331 for {
20332 sc := v.AuxInt
20333 sym1 := v.Aux
20334 mem := v.Args[1]
20335 v_0 := v.Args[0]
20336 if v_0.Op != OpAMD64LEAL {
20337 break
20338 }
20339 off := v_0.AuxInt
20340 sym2 := v_0.Aux
20341 ptr := v_0.Args[0]
20342 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
20343 break
20344 }
20345 v.reset(OpAMD64MOVQstoreconst)
20346 v.AuxInt = ValAndOff(sc).add(off)
20347 v.Aux = mergeSym(sym1, sym2)
20348 v.AddArg(ptr)
20349 v.AddArg(mem)
20350 return true
20351 }
20352
20353
20354
20355 for {
20356 sc := v.AuxInt
20357 s := v.Aux
20358 mem := v.Args[1]
20359 v_0 := v.Args[0]
20360 if v_0.Op != OpAMD64ADDLconst {
20361 break
20362 }
20363 off := v_0.AuxInt
20364 ptr := v_0.Args[0]
20365 if !(ValAndOff(sc).canAdd(off)) {
20366 break
20367 }
20368 v.reset(OpAMD64MOVQstoreconst)
20369 v.AuxInt = ValAndOff(sc).add(off)
20370 v.Aux = s
20371 v.AddArg(ptr)
20372 v.AddArg(mem)
20373 return true
20374 }
20375 return false
20376 }
20377 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool {
20378
20379
20380
20381 for {
20382 c := v.AuxInt
20383 sym := v.Aux
20384 mem := v.Args[2]
20385 ptr := v.Args[0]
20386 v_1 := v.Args[1]
20387 if v_1.Op != OpAMD64SHLQconst {
20388 break
20389 }
20390 if v_1.AuxInt != 3 {
20391 break
20392 }
20393 idx := v_1.Args[0]
20394 v.reset(OpAMD64MOVQstoreconstidx8)
20395 v.AuxInt = c
20396 v.Aux = sym
20397 v.AddArg(ptr)
20398 v.AddArg(idx)
20399 v.AddArg(mem)
20400 return true
20401 }
20402
20403
20404
20405 for {
20406 x := v.AuxInt
20407 sym := v.Aux
20408 mem := v.Args[2]
20409 v_0 := v.Args[0]
20410 if v_0.Op != OpAMD64ADDQconst {
20411 break
20412 }
20413 c := v_0.AuxInt
20414 ptr := v_0.Args[0]
20415 idx := v.Args[1]
20416 if !(ValAndOff(x).canAdd(c)) {
20417 break
20418 }
20419 v.reset(OpAMD64MOVQstoreconstidx1)
20420 v.AuxInt = ValAndOff(x).add(c)
20421 v.Aux = sym
20422 v.AddArg(ptr)
20423 v.AddArg(idx)
20424 v.AddArg(mem)
20425 return true
20426 }
20427
20428
20429
20430 for {
20431 x := v.AuxInt
20432 sym := v.Aux
20433 mem := v.Args[2]
20434 ptr := v.Args[0]
20435 v_1 := v.Args[1]
20436 if v_1.Op != OpAMD64ADDQconst {
20437 break
20438 }
20439 c := v_1.AuxInt
20440 idx := v_1.Args[0]
20441 if !(ValAndOff(x).canAdd(c)) {
20442 break
20443 }
20444 v.reset(OpAMD64MOVQstoreconstidx1)
20445 v.AuxInt = ValAndOff(x).add(c)
20446 v.Aux = sym
20447 v.AddArg(ptr)
20448 v.AddArg(idx)
20449 v.AddArg(mem)
20450 return true
20451 }
20452 return false
20453 }
20454 func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v *Value) bool {
20455
20456
20457
20458 for {
20459 x := v.AuxInt
20460 sym := v.Aux
20461 mem := v.Args[2]
20462 v_0 := v.Args[0]
20463 if v_0.Op != OpAMD64ADDQconst {
20464 break
20465 }
20466 c := v_0.AuxInt
20467 ptr := v_0.Args[0]
20468 idx := v.Args[1]
20469 if !(ValAndOff(x).canAdd(c)) {
20470 break
20471 }
20472 v.reset(OpAMD64MOVQstoreconstidx8)
20473 v.AuxInt = ValAndOff(x).add(c)
20474 v.Aux = sym
20475 v.AddArg(ptr)
20476 v.AddArg(idx)
20477 v.AddArg(mem)
20478 return true
20479 }
20480
20481
20482
20483 for {
20484 x := v.AuxInt
20485 sym := v.Aux
20486 mem := v.Args[2]
20487 ptr := v.Args[0]
20488 v_1 := v.Args[1]
20489 if v_1.Op != OpAMD64ADDQconst {
20490 break
20491 }
20492 c := v_1.AuxInt
20493 idx := v_1.Args[0]
20494 if !(ValAndOff(x).canAdd(8 * c)) {
20495 break
20496 }
20497 v.reset(OpAMD64MOVQstoreconstidx8)
20498 v.AuxInt = ValAndOff(x).add(8 * c)
20499 v.Aux = sym
20500 v.AddArg(ptr)
20501 v.AddArg(idx)
20502 v.AddArg(mem)
20503 return true
20504 }
20505 return false
20506 }
20507 func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool {
20508
20509
20510
20511 for {
20512 c := v.AuxInt
20513 sym := v.Aux
20514 mem := v.Args[3]
20515 ptr := v.Args[0]
20516 v_1 := v.Args[1]
20517 if v_1.Op != OpAMD64SHLQconst {
20518 break
20519 }
20520 if v_1.AuxInt != 3 {
20521 break
20522 }
20523 idx := v_1.Args[0]
20524 val := v.Args[2]
20525 v.reset(OpAMD64MOVQstoreidx8)
20526 v.AuxInt = c
20527 v.Aux = sym
20528 v.AddArg(ptr)
20529 v.AddArg(idx)
20530 v.AddArg(val)
20531 v.AddArg(mem)
20532 return true
20533 }
20534
20535
20536
20537 for {
20538 c := v.AuxInt
20539 sym := v.Aux
20540 mem := v.Args[3]
20541 v_0 := v.Args[0]
20542 if v_0.Op != OpAMD64ADDQconst {
20543 break
20544 }
20545 d := v_0.AuxInt
20546 ptr := v_0.Args[0]
20547 idx := v.Args[1]
20548 val := v.Args[2]
20549 if !(is32Bit(c + d)) {
20550 break
20551 }
20552 v.reset(OpAMD64MOVQstoreidx1)
20553 v.AuxInt = c + d
20554 v.Aux = sym
20555 v.AddArg(ptr)
20556 v.AddArg(idx)
20557 v.AddArg(val)
20558 v.AddArg(mem)
20559 return true
20560 }
20561
20562
20563
20564 for {
20565 c := v.AuxInt
20566 sym := v.Aux
20567 mem := v.Args[3]
20568 ptr := v.Args[0]
20569 v_1 := v.Args[1]
20570 if v_1.Op != OpAMD64ADDQconst {
20571 break
20572 }
20573 d := v_1.AuxInt
20574 idx := v_1.Args[0]
20575 val := v.Args[2]
20576 if !(is32Bit(c + d)) {
20577 break
20578 }
20579 v.reset(OpAMD64MOVQstoreidx1)
20580 v.AuxInt = c + d
20581 v.Aux = sym
20582 v.AddArg(ptr)
20583 v.AddArg(idx)
20584 v.AddArg(val)
20585 v.AddArg(mem)
20586 return true
20587 }
20588
20589
20590
20591 for {
20592 i := v.AuxInt
20593 s := v.Aux
20594 mem := v.Args[3]
20595 p := v.Args[0]
20596 v_1 := v.Args[1]
20597 if v_1.Op != OpAMD64MOVQconst {
20598 break
20599 }
20600 c := v_1.AuxInt
20601 w := v.Args[2]
20602 if !(is32Bit(i + c)) {
20603 break
20604 }
20605 v.reset(OpAMD64MOVQstore)
20606 v.AuxInt = i + c
20607 v.Aux = s
20608 v.AddArg(p)
20609 v.AddArg(w)
20610 v.AddArg(mem)
20611 return true
20612 }
20613 return false
20614 }
20615 func rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v *Value) bool {
20616
20617
20618
20619 for {
20620 c := v.AuxInt
20621 sym := v.Aux
20622 mem := v.Args[3]
20623 v_0 := v.Args[0]
20624 if v_0.Op != OpAMD64ADDQconst {
20625 break
20626 }
20627 d := v_0.AuxInt
20628 ptr := v_0.Args[0]
20629 idx := v.Args[1]
20630 val := v.Args[2]
20631 if !(is32Bit(c + d)) {
20632 break
20633 }
20634 v.reset(OpAMD64MOVQstoreidx8)
20635 v.AuxInt = c + d
20636 v.Aux = sym
20637 v.AddArg(ptr)
20638 v.AddArg(idx)
20639 v.AddArg(val)
20640 v.AddArg(mem)
20641 return true
20642 }
20643
20644
20645
20646 for {
20647 c := v.AuxInt
20648 sym := v.Aux
20649 mem := v.Args[3]
20650 ptr := v.Args[0]
20651 v_1 := v.Args[1]
20652 if v_1.Op != OpAMD64ADDQconst {
20653 break
20654 }
20655 d := v_1.AuxInt
20656 idx := v_1.Args[0]
20657 val := v.Args[2]
20658 if !(is32Bit(c + 8*d)) {
20659 break
20660 }
20661 v.reset(OpAMD64MOVQstoreidx8)
20662 v.AuxInt = c + 8*d
20663 v.Aux = sym
20664 v.AddArg(ptr)
20665 v.AddArg(idx)
20666 v.AddArg(val)
20667 v.AddArg(mem)
20668 return true
20669 }
20670
20671
20672
20673 for {
20674 i := v.AuxInt
20675 s := v.Aux
20676 mem := v.Args[3]
20677 p := v.Args[0]
20678 v_1 := v.Args[1]
20679 if v_1.Op != OpAMD64MOVQconst {
20680 break
20681 }
20682 c := v_1.AuxInt
20683 w := v.Args[2]
20684 if !(is32Bit(i + 8*c)) {
20685 break
20686 }
20687 v.reset(OpAMD64MOVQstore)
20688 v.AuxInt = i + 8*c
20689 v.Aux = s
20690 v.AddArg(p)
20691 v.AddArg(w)
20692 v.AddArg(mem)
20693 return true
20694 }
20695 return false
20696 }
20697 func rewriteValueAMD64_OpAMD64MOVSDload_0(v *Value) bool {
20698
20699
20700
20701 for {
20702 off1 := v.AuxInt
20703 sym := v.Aux
20704 mem := v.Args[1]
20705 v_0 := v.Args[0]
20706 if v_0.Op != OpAMD64ADDQconst {
20707 break
20708 }
20709 off2 := v_0.AuxInt
20710 ptr := v_0.Args[0]
20711 if !(is32Bit(off1 + off2)) {
20712 break
20713 }
20714 v.reset(OpAMD64MOVSDload)
20715 v.AuxInt = off1 + off2
20716 v.Aux = sym
20717 v.AddArg(ptr)
20718 v.AddArg(mem)
20719 return true
20720 }
20721
20722
20723
20724 for {
20725 off1 := v.AuxInt
20726 sym1 := v.Aux
20727 mem := v.Args[1]
20728 v_0 := v.Args[0]
20729 if v_0.Op != OpAMD64LEAQ {
20730 break
20731 }
20732 off2 := v_0.AuxInt
20733 sym2 := v_0.Aux
20734 base := v_0.Args[0]
20735 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
20736 break
20737 }
20738 v.reset(OpAMD64MOVSDload)
20739 v.AuxInt = off1 + off2
20740 v.Aux = mergeSym(sym1, sym2)
20741 v.AddArg(base)
20742 v.AddArg(mem)
20743 return true
20744 }
20745
20746
20747
20748 for {
20749 off1 := v.AuxInt
20750 sym1 := v.Aux
20751 mem := v.Args[1]
20752 v_0 := v.Args[0]
20753 if v_0.Op != OpAMD64LEAQ1 {
20754 break
20755 }
20756 off2 := v_0.AuxInt
20757 sym2 := v_0.Aux
20758 idx := v_0.Args[1]
20759 ptr := v_0.Args[0]
20760 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
20761 break
20762 }
20763 v.reset(OpAMD64MOVSDloadidx1)
20764 v.AuxInt = off1 + off2
20765 v.Aux = mergeSym(sym1, sym2)
20766 v.AddArg(ptr)
20767 v.AddArg(idx)
20768 v.AddArg(mem)
20769 return true
20770 }
20771
20772
20773
20774 for {
20775 off1 := v.AuxInt
20776 sym1 := v.Aux
20777 mem := v.Args[1]
20778 v_0 := v.Args[0]
20779 if v_0.Op != OpAMD64LEAQ8 {
20780 break
20781 }
20782 off2 := v_0.AuxInt
20783 sym2 := v_0.Aux
20784 idx := v_0.Args[1]
20785 ptr := v_0.Args[0]
20786 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
20787 break
20788 }
20789 v.reset(OpAMD64MOVSDloadidx8)
20790 v.AuxInt = off1 + off2
20791 v.Aux = mergeSym(sym1, sym2)
20792 v.AddArg(ptr)
20793 v.AddArg(idx)
20794 v.AddArg(mem)
20795 return true
20796 }
20797
20798
20799
20800 for {
20801 off := v.AuxInt
20802 sym := v.Aux
20803 mem := v.Args[1]
20804 v_0 := v.Args[0]
20805 if v_0.Op != OpAMD64ADDQ {
20806 break
20807 }
20808 idx := v_0.Args[1]
20809 ptr := v_0.Args[0]
20810 if !(ptr.Op != OpSB) {
20811 break
20812 }
20813 v.reset(OpAMD64MOVSDloadidx1)
20814 v.AuxInt = off
20815 v.Aux = sym
20816 v.AddArg(ptr)
20817 v.AddArg(idx)
20818 v.AddArg(mem)
20819 return true
20820 }
20821
20822
20823
20824 for {
20825 off := v.AuxInt
20826 sym := v.Aux
20827 _ = v.Args[1]
20828 ptr := v.Args[0]
20829 v_1 := v.Args[1]
20830 if v_1.Op != OpAMD64MOVQstore {
20831 break
20832 }
20833 if v_1.AuxInt != off {
20834 break
20835 }
20836 if v_1.Aux != sym {
20837 break
20838 }
20839 _ = v_1.Args[2]
20840 if ptr != v_1.Args[0] {
20841 break
20842 }
20843 val := v_1.Args[1]
20844 v.reset(OpAMD64MOVQi2f)
20845 v.AddArg(val)
20846 return true
20847 }
20848 return false
20849 }
20850 func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool {
20851
20852
20853
20854 for {
20855 c := v.AuxInt
20856 sym := v.Aux
20857 mem := v.Args[2]
20858 ptr := v.Args[0]
20859 v_1 := v.Args[1]
20860 if v_1.Op != OpAMD64SHLQconst {
20861 break
20862 }
20863 if v_1.AuxInt != 3 {
20864 break
20865 }
20866 idx := v_1.Args[0]
20867 v.reset(OpAMD64MOVSDloadidx8)
20868 v.AuxInt = c
20869 v.Aux = sym
20870 v.AddArg(ptr)
20871 v.AddArg(idx)
20872 v.AddArg(mem)
20873 return true
20874 }
20875
20876
20877
20878 for {
20879 c := v.AuxInt
20880 sym := v.Aux
20881 mem := v.Args[2]
20882 v_0 := v.Args[0]
20883 if v_0.Op != OpAMD64ADDQconst {
20884 break
20885 }
20886 d := v_0.AuxInt
20887 ptr := v_0.Args[0]
20888 idx := v.Args[1]
20889 if !(is32Bit(c + d)) {
20890 break
20891 }
20892 v.reset(OpAMD64MOVSDloadidx1)
20893 v.AuxInt = c + d
20894 v.Aux = sym
20895 v.AddArg(ptr)
20896 v.AddArg(idx)
20897 v.AddArg(mem)
20898 return true
20899 }
20900
20901
20902
20903 for {
20904 c := v.AuxInt
20905 sym := v.Aux
20906 mem := v.Args[2]
20907 ptr := v.Args[0]
20908 v_1 := v.Args[1]
20909 if v_1.Op != OpAMD64ADDQconst {
20910 break
20911 }
20912 d := v_1.AuxInt
20913 idx := v_1.Args[0]
20914 if !(is32Bit(c + d)) {
20915 break
20916 }
20917 v.reset(OpAMD64MOVSDloadidx1)
20918 v.AuxInt = c + d
20919 v.Aux = sym
20920 v.AddArg(ptr)
20921 v.AddArg(idx)
20922 v.AddArg(mem)
20923 return true
20924 }
20925
20926
20927
20928 for {
20929 i := v.AuxInt
20930 s := v.Aux
20931 mem := v.Args[2]
20932 p := v.Args[0]
20933 v_1 := v.Args[1]
20934 if v_1.Op != OpAMD64MOVQconst {
20935 break
20936 }
20937 c := v_1.AuxInt
20938 if !(is32Bit(i + c)) {
20939 break
20940 }
20941 v.reset(OpAMD64MOVSDload)
20942 v.AuxInt = i + c
20943 v.Aux = s
20944 v.AddArg(p)
20945 v.AddArg(mem)
20946 return true
20947 }
20948 return false
20949 }
20950 func rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v *Value) bool {
20951
20952
20953
20954 for {
20955 c := v.AuxInt
20956 sym := v.Aux
20957 mem := v.Args[2]
20958 v_0 := v.Args[0]
20959 if v_0.Op != OpAMD64ADDQconst {
20960 break
20961 }
20962 d := v_0.AuxInt
20963 ptr := v_0.Args[0]
20964 idx := v.Args[1]
20965 if !(is32Bit(c + d)) {
20966 break
20967 }
20968 v.reset(OpAMD64MOVSDloadidx8)
20969 v.AuxInt = c + d
20970 v.Aux = sym
20971 v.AddArg(ptr)
20972 v.AddArg(idx)
20973 v.AddArg(mem)
20974 return true
20975 }
20976
20977
20978
20979 for {
20980 c := v.AuxInt
20981 sym := v.Aux
20982 mem := v.Args[2]
20983 ptr := v.Args[0]
20984 v_1 := v.Args[1]
20985 if v_1.Op != OpAMD64ADDQconst {
20986 break
20987 }
20988 d := v_1.AuxInt
20989 idx := v_1.Args[0]
20990 if !(is32Bit(c + 8*d)) {
20991 break
20992 }
20993 v.reset(OpAMD64MOVSDloadidx8)
20994 v.AuxInt = c + 8*d
20995 v.Aux = sym
20996 v.AddArg(ptr)
20997 v.AddArg(idx)
20998 v.AddArg(mem)
20999 return true
21000 }
21001
21002
21003
21004 for {
21005 i := v.AuxInt
21006 s := v.Aux
21007 mem := v.Args[2]
21008 p := v.Args[0]
21009 v_1 := v.Args[1]
21010 if v_1.Op != OpAMD64MOVQconst {
21011 break
21012 }
21013 c := v_1.AuxInt
21014 if !(is32Bit(i + 8*c)) {
21015 break
21016 }
21017 v.reset(OpAMD64MOVSDload)
21018 v.AuxInt = i + 8*c
21019 v.Aux = s
21020 v.AddArg(p)
21021 v.AddArg(mem)
21022 return true
21023 }
21024 return false
21025 }
21026 func rewriteValueAMD64_OpAMD64MOVSDstore_0(v *Value) bool {
21027
21028
21029
21030 for {
21031 off1 := v.AuxInt
21032 sym := v.Aux
21033 mem := v.Args[2]
21034 v_0 := v.Args[0]
21035 if v_0.Op != OpAMD64ADDQconst {
21036 break
21037 }
21038 off2 := v_0.AuxInt
21039 ptr := v_0.Args[0]
21040 val := v.Args[1]
21041 if !(is32Bit(off1 + off2)) {
21042 break
21043 }
21044 v.reset(OpAMD64MOVSDstore)
21045 v.AuxInt = off1 + off2
21046 v.Aux = sym
21047 v.AddArg(ptr)
21048 v.AddArg(val)
21049 v.AddArg(mem)
21050 return true
21051 }
21052
21053
21054
21055 for {
21056 off1 := v.AuxInt
21057 sym1 := v.Aux
21058 mem := v.Args[2]
21059 v_0 := v.Args[0]
21060 if v_0.Op != OpAMD64LEAQ {
21061 break
21062 }
21063 off2 := v_0.AuxInt
21064 sym2 := v_0.Aux
21065 base := v_0.Args[0]
21066 val := v.Args[1]
21067 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
21068 break
21069 }
21070 v.reset(OpAMD64MOVSDstore)
21071 v.AuxInt = off1 + off2
21072 v.Aux = mergeSym(sym1, sym2)
21073 v.AddArg(base)
21074 v.AddArg(val)
21075 v.AddArg(mem)
21076 return true
21077 }
21078
21079
21080
21081 for {
21082 off1 := v.AuxInt
21083 sym1 := v.Aux
21084 mem := v.Args[2]
21085 v_0 := v.Args[0]
21086 if v_0.Op != OpAMD64LEAQ1 {
21087 break
21088 }
21089 off2 := v_0.AuxInt
21090 sym2 := v_0.Aux
21091 idx := v_0.Args[1]
21092 ptr := v_0.Args[0]
21093 val := v.Args[1]
21094 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
21095 break
21096 }
21097 v.reset(OpAMD64MOVSDstoreidx1)
21098 v.AuxInt = off1 + off2
21099 v.Aux = mergeSym(sym1, sym2)
21100 v.AddArg(ptr)
21101 v.AddArg(idx)
21102 v.AddArg(val)
21103 v.AddArg(mem)
21104 return true
21105 }
21106
21107
21108
21109 for {
21110 off1 := v.AuxInt
21111 sym1 := v.Aux
21112 mem := v.Args[2]
21113 v_0 := v.Args[0]
21114 if v_0.Op != OpAMD64LEAQ8 {
21115 break
21116 }
21117 off2 := v_0.AuxInt
21118 sym2 := v_0.Aux
21119 idx := v_0.Args[1]
21120 ptr := v_0.Args[0]
21121 val := v.Args[1]
21122 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
21123 break
21124 }
21125 v.reset(OpAMD64MOVSDstoreidx8)
21126 v.AuxInt = off1 + off2
21127 v.Aux = mergeSym(sym1, sym2)
21128 v.AddArg(ptr)
21129 v.AddArg(idx)
21130 v.AddArg(val)
21131 v.AddArg(mem)
21132 return true
21133 }
21134
21135
21136
21137 for {
21138 off := v.AuxInt
21139 sym := v.Aux
21140 mem := v.Args[2]
21141 v_0 := v.Args[0]
21142 if v_0.Op != OpAMD64ADDQ {
21143 break
21144 }
21145 idx := v_0.Args[1]
21146 ptr := v_0.Args[0]
21147 val := v.Args[1]
21148 if !(ptr.Op != OpSB) {
21149 break
21150 }
21151 v.reset(OpAMD64MOVSDstoreidx1)
21152 v.AuxInt = off
21153 v.Aux = sym
21154 v.AddArg(ptr)
21155 v.AddArg(idx)
21156 v.AddArg(val)
21157 v.AddArg(mem)
21158 return true
21159 }
21160
21161
21162
21163 for {
21164 off := v.AuxInt
21165 sym := v.Aux
21166 mem := v.Args[2]
21167 ptr := v.Args[0]
21168 v_1 := v.Args[1]
21169 if v_1.Op != OpAMD64MOVQi2f {
21170 break
21171 }
21172 val := v_1.Args[0]
21173 v.reset(OpAMD64MOVQstore)
21174 v.AuxInt = off
21175 v.Aux = sym
21176 v.AddArg(ptr)
21177 v.AddArg(val)
21178 v.AddArg(mem)
21179 return true
21180 }
21181 return false
21182 }
21183 func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool {
21184
21185
21186
21187 for {
21188 c := v.AuxInt
21189 sym := v.Aux
21190 mem := v.Args[3]
21191 ptr := v.Args[0]
21192 v_1 := v.Args[1]
21193 if v_1.Op != OpAMD64SHLQconst {
21194 break
21195 }
21196 if v_1.AuxInt != 3 {
21197 break
21198 }
21199 idx := v_1.Args[0]
21200 val := v.Args[2]
21201 v.reset(OpAMD64MOVSDstoreidx8)
21202 v.AuxInt = c
21203 v.Aux = sym
21204 v.AddArg(ptr)
21205 v.AddArg(idx)
21206 v.AddArg(val)
21207 v.AddArg(mem)
21208 return true
21209 }
21210
21211
21212
21213 for {
21214 c := v.AuxInt
21215 sym := v.Aux
21216 mem := v.Args[3]
21217 v_0 := v.Args[0]
21218 if v_0.Op != OpAMD64ADDQconst {
21219 break
21220 }
21221 d := v_0.AuxInt
21222 ptr := v_0.Args[0]
21223 idx := v.Args[1]
21224 val := v.Args[2]
21225 if !(is32Bit(c + d)) {
21226 break
21227 }
21228 v.reset(OpAMD64MOVSDstoreidx1)
21229 v.AuxInt = c + d
21230 v.Aux = sym
21231 v.AddArg(ptr)
21232 v.AddArg(idx)
21233 v.AddArg(val)
21234 v.AddArg(mem)
21235 return true
21236 }
21237
21238
21239
21240 for {
21241 c := v.AuxInt
21242 sym := v.Aux
21243 mem := v.Args[3]
21244 ptr := v.Args[0]
21245 v_1 := v.Args[1]
21246 if v_1.Op != OpAMD64ADDQconst {
21247 break
21248 }
21249 d := v_1.AuxInt
21250 idx := v_1.Args[0]
21251 val := v.Args[2]
21252 if !(is32Bit(c + d)) {
21253 break
21254 }
21255 v.reset(OpAMD64MOVSDstoreidx1)
21256 v.AuxInt = c + d
21257 v.Aux = sym
21258 v.AddArg(ptr)
21259 v.AddArg(idx)
21260 v.AddArg(val)
21261 v.AddArg(mem)
21262 return true
21263 }
21264
21265
21266
21267 for {
21268 i := v.AuxInt
21269 s := v.Aux
21270 mem := v.Args[3]
21271 p := v.Args[0]
21272 v_1 := v.Args[1]
21273 if v_1.Op != OpAMD64MOVQconst {
21274 break
21275 }
21276 c := v_1.AuxInt
21277 w := v.Args[2]
21278 if !(is32Bit(i + c)) {
21279 break
21280 }
21281 v.reset(OpAMD64MOVSDstore)
21282 v.AuxInt = i + c
21283 v.Aux = s
21284 v.AddArg(p)
21285 v.AddArg(w)
21286 v.AddArg(mem)
21287 return true
21288 }
21289 return false
21290 }
21291 func rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v *Value) bool {
21292
21293
21294
21295 for {
21296 c := v.AuxInt
21297 sym := v.Aux
21298 mem := v.Args[3]
21299 v_0 := v.Args[0]
21300 if v_0.Op != OpAMD64ADDQconst {
21301 break
21302 }
21303 d := v_0.AuxInt
21304 ptr := v_0.Args[0]
21305 idx := v.Args[1]
21306 val := v.Args[2]
21307 if !(is32Bit(c + d)) {
21308 break
21309 }
21310 v.reset(OpAMD64MOVSDstoreidx8)
21311 v.AuxInt = c + d
21312 v.Aux = sym
21313 v.AddArg(ptr)
21314 v.AddArg(idx)
21315 v.AddArg(val)
21316 v.AddArg(mem)
21317 return true
21318 }
21319
21320
21321
21322 for {
21323 c := v.AuxInt
21324 sym := v.Aux
21325 mem := v.Args[3]
21326 ptr := v.Args[0]
21327 v_1 := v.Args[1]
21328 if v_1.Op != OpAMD64ADDQconst {
21329 break
21330 }
21331 d := v_1.AuxInt
21332 idx := v_1.Args[0]
21333 val := v.Args[2]
21334 if !(is32Bit(c + 8*d)) {
21335 break
21336 }
21337 v.reset(OpAMD64MOVSDstoreidx8)
21338 v.AuxInt = c + 8*d
21339 v.Aux = sym
21340 v.AddArg(ptr)
21341 v.AddArg(idx)
21342 v.AddArg(val)
21343 v.AddArg(mem)
21344 return true
21345 }
21346
21347
21348
21349 for {
21350 i := v.AuxInt
21351 s := v.Aux
21352 mem := v.Args[3]
21353 p := v.Args[0]
21354 v_1 := v.Args[1]
21355 if v_1.Op != OpAMD64MOVQconst {
21356 break
21357 }
21358 c := v_1.AuxInt
21359 w := v.Args[2]
21360 if !(is32Bit(i + 8*c)) {
21361 break
21362 }
21363 v.reset(OpAMD64MOVSDstore)
21364 v.AuxInt = i + 8*c
21365 v.Aux = s
21366 v.AddArg(p)
21367 v.AddArg(w)
21368 v.AddArg(mem)
21369 return true
21370 }
21371 return false
21372 }
21373 func rewriteValueAMD64_OpAMD64MOVSSload_0(v *Value) bool {
21374
21375
21376
21377 for {
21378 off1 := v.AuxInt
21379 sym := v.Aux
21380 mem := v.Args[1]
21381 v_0 := v.Args[0]
21382 if v_0.Op != OpAMD64ADDQconst {
21383 break
21384 }
21385 off2 := v_0.AuxInt
21386 ptr := v_0.Args[0]
21387 if !(is32Bit(off1 + off2)) {
21388 break
21389 }
21390 v.reset(OpAMD64MOVSSload)
21391 v.AuxInt = off1 + off2
21392 v.Aux = sym
21393 v.AddArg(ptr)
21394 v.AddArg(mem)
21395 return true
21396 }
21397
21398
21399
21400 for {
21401 off1 := v.AuxInt
21402 sym1 := v.Aux
21403 mem := v.Args[1]
21404 v_0 := v.Args[0]
21405 if v_0.Op != OpAMD64LEAQ {
21406 break
21407 }
21408 off2 := v_0.AuxInt
21409 sym2 := v_0.Aux
21410 base := v_0.Args[0]
21411 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
21412 break
21413 }
21414 v.reset(OpAMD64MOVSSload)
21415 v.AuxInt = off1 + off2
21416 v.Aux = mergeSym(sym1, sym2)
21417 v.AddArg(base)
21418 v.AddArg(mem)
21419 return true
21420 }
21421
21422
21423
21424 for {
21425 off1 := v.AuxInt
21426 sym1 := v.Aux
21427 mem := v.Args[1]
21428 v_0 := v.Args[0]
21429 if v_0.Op != OpAMD64LEAQ1 {
21430 break
21431 }
21432 off2 := v_0.AuxInt
21433 sym2 := v_0.Aux
21434 idx := v_0.Args[1]
21435 ptr := v_0.Args[0]
21436 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
21437 break
21438 }
21439 v.reset(OpAMD64MOVSSloadidx1)
21440 v.AuxInt = off1 + off2
21441 v.Aux = mergeSym(sym1, sym2)
21442 v.AddArg(ptr)
21443 v.AddArg(idx)
21444 v.AddArg(mem)
21445 return true
21446 }
21447
21448
21449
21450 for {
21451 off1 := v.AuxInt
21452 sym1 := v.Aux
21453 mem := v.Args[1]
21454 v_0 := v.Args[0]
21455 if v_0.Op != OpAMD64LEAQ4 {
21456 break
21457 }
21458 off2 := v_0.AuxInt
21459 sym2 := v_0.Aux
21460 idx := v_0.Args[1]
21461 ptr := v_0.Args[0]
21462 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
21463 break
21464 }
21465 v.reset(OpAMD64MOVSSloadidx4)
21466 v.AuxInt = off1 + off2
21467 v.Aux = mergeSym(sym1, sym2)
21468 v.AddArg(ptr)
21469 v.AddArg(idx)
21470 v.AddArg(mem)
21471 return true
21472 }
21473
21474
21475
21476 for {
21477 off := v.AuxInt
21478 sym := v.Aux
21479 mem := v.Args[1]
21480 v_0 := v.Args[0]
21481 if v_0.Op != OpAMD64ADDQ {
21482 break
21483 }
21484 idx := v_0.Args[1]
21485 ptr := v_0.Args[0]
21486 if !(ptr.Op != OpSB) {
21487 break
21488 }
21489 v.reset(OpAMD64MOVSSloadidx1)
21490 v.AuxInt = off
21491 v.Aux = sym
21492 v.AddArg(ptr)
21493 v.AddArg(idx)
21494 v.AddArg(mem)
21495 return true
21496 }
21497
21498
21499
21500 for {
21501 off := v.AuxInt
21502 sym := v.Aux
21503 _ = v.Args[1]
21504 ptr := v.Args[0]
21505 v_1 := v.Args[1]
21506 if v_1.Op != OpAMD64MOVLstore {
21507 break
21508 }
21509 if v_1.AuxInt != off {
21510 break
21511 }
21512 if v_1.Aux != sym {
21513 break
21514 }
21515 _ = v_1.Args[2]
21516 if ptr != v_1.Args[0] {
21517 break
21518 }
21519 val := v_1.Args[1]
21520 v.reset(OpAMD64MOVLi2f)
21521 v.AddArg(val)
21522 return true
21523 }
21524 return false
21525 }
21526 func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool {
21527
21528
21529
21530 for {
21531 c := v.AuxInt
21532 sym := v.Aux
21533 mem := v.Args[2]
21534 ptr := v.Args[0]
21535 v_1 := v.Args[1]
21536 if v_1.Op != OpAMD64SHLQconst {
21537 break
21538 }
21539 if v_1.AuxInt != 2 {
21540 break
21541 }
21542 idx := v_1.Args[0]
21543 v.reset(OpAMD64MOVSSloadidx4)
21544 v.AuxInt = c
21545 v.Aux = sym
21546 v.AddArg(ptr)
21547 v.AddArg(idx)
21548 v.AddArg(mem)
21549 return true
21550 }
21551
21552
21553
21554 for {
21555 c := v.AuxInt
21556 sym := v.Aux
21557 mem := v.Args[2]
21558 v_0 := v.Args[0]
21559 if v_0.Op != OpAMD64ADDQconst {
21560 break
21561 }
21562 d := v_0.AuxInt
21563 ptr := v_0.Args[0]
21564 idx := v.Args[1]
21565 if !(is32Bit(c + d)) {
21566 break
21567 }
21568 v.reset(OpAMD64MOVSSloadidx1)
21569 v.AuxInt = c + d
21570 v.Aux = sym
21571 v.AddArg(ptr)
21572 v.AddArg(idx)
21573 v.AddArg(mem)
21574 return true
21575 }
21576
21577
21578
21579 for {
21580 c := v.AuxInt
21581 sym := v.Aux
21582 mem := v.Args[2]
21583 ptr := v.Args[0]
21584 v_1 := v.Args[1]
21585 if v_1.Op != OpAMD64ADDQconst {
21586 break
21587 }
21588 d := v_1.AuxInt
21589 idx := v_1.Args[0]
21590 if !(is32Bit(c + d)) {
21591 break
21592 }
21593 v.reset(OpAMD64MOVSSloadidx1)
21594 v.AuxInt = c + d
21595 v.Aux = sym
21596 v.AddArg(ptr)
21597 v.AddArg(idx)
21598 v.AddArg(mem)
21599 return true
21600 }
21601
21602
21603
21604 for {
21605 i := v.AuxInt
21606 s := v.Aux
21607 mem := v.Args[2]
21608 p := v.Args[0]
21609 v_1 := v.Args[1]
21610 if v_1.Op != OpAMD64MOVQconst {
21611 break
21612 }
21613 c := v_1.AuxInt
21614 if !(is32Bit(i + c)) {
21615 break
21616 }
21617 v.reset(OpAMD64MOVSSload)
21618 v.AuxInt = i + c
21619 v.Aux = s
21620 v.AddArg(p)
21621 v.AddArg(mem)
21622 return true
21623 }
21624 return false
21625 }
21626 func rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v *Value) bool {
21627
21628
21629
21630 for {
21631 c := v.AuxInt
21632 sym := v.Aux
21633 mem := v.Args[2]
21634 v_0 := v.Args[0]
21635 if v_0.Op != OpAMD64ADDQconst {
21636 break
21637 }
21638 d := v_0.AuxInt
21639 ptr := v_0.Args[0]
21640 idx := v.Args[1]
21641 if !(is32Bit(c + d)) {
21642 break
21643 }
21644 v.reset(OpAMD64MOVSSloadidx4)
21645 v.AuxInt = c + d
21646 v.Aux = sym
21647 v.AddArg(ptr)
21648 v.AddArg(idx)
21649 v.AddArg(mem)
21650 return true
21651 }
21652
21653
21654
21655 for {
21656 c := v.AuxInt
21657 sym := v.Aux
21658 mem := v.Args[2]
21659 ptr := v.Args[0]
21660 v_1 := v.Args[1]
21661 if v_1.Op != OpAMD64ADDQconst {
21662 break
21663 }
21664 d := v_1.AuxInt
21665 idx := v_1.Args[0]
21666 if !(is32Bit(c + 4*d)) {
21667 break
21668 }
21669 v.reset(OpAMD64MOVSSloadidx4)
21670 v.AuxInt = c + 4*d
21671 v.Aux = sym
21672 v.AddArg(ptr)
21673 v.AddArg(idx)
21674 v.AddArg(mem)
21675 return true
21676 }
21677
21678
21679
21680 for {
21681 i := v.AuxInt
21682 s := v.Aux
21683 mem := v.Args[2]
21684 p := v.Args[0]
21685 v_1 := v.Args[1]
21686 if v_1.Op != OpAMD64MOVQconst {
21687 break
21688 }
21689 c := v_1.AuxInt
21690 if !(is32Bit(i + 4*c)) {
21691 break
21692 }
21693 v.reset(OpAMD64MOVSSload)
21694 v.AuxInt = i + 4*c
21695 v.Aux = s
21696 v.AddArg(p)
21697 v.AddArg(mem)
21698 return true
21699 }
21700 return false
21701 }
21702 func rewriteValueAMD64_OpAMD64MOVSSstore_0(v *Value) bool {
21703
21704
21705
21706 for {
21707 off1 := v.AuxInt
21708 sym := v.Aux
21709 mem := v.Args[2]
21710 v_0 := v.Args[0]
21711 if v_0.Op != OpAMD64ADDQconst {
21712 break
21713 }
21714 off2 := v_0.AuxInt
21715 ptr := v_0.Args[0]
21716 val := v.Args[1]
21717 if !(is32Bit(off1 + off2)) {
21718 break
21719 }
21720 v.reset(OpAMD64MOVSSstore)
21721 v.AuxInt = off1 + off2
21722 v.Aux = sym
21723 v.AddArg(ptr)
21724 v.AddArg(val)
21725 v.AddArg(mem)
21726 return true
21727 }
21728
21729
21730
21731 for {
21732 off1 := v.AuxInt
21733 sym1 := v.Aux
21734 mem := v.Args[2]
21735 v_0 := v.Args[0]
21736 if v_0.Op != OpAMD64LEAQ {
21737 break
21738 }
21739 off2 := v_0.AuxInt
21740 sym2 := v_0.Aux
21741 base := v_0.Args[0]
21742 val := v.Args[1]
21743 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
21744 break
21745 }
21746 v.reset(OpAMD64MOVSSstore)
21747 v.AuxInt = off1 + off2
21748 v.Aux = mergeSym(sym1, sym2)
21749 v.AddArg(base)
21750 v.AddArg(val)
21751 v.AddArg(mem)
21752 return true
21753 }
21754
21755
21756
21757 for {
21758 off1 := v.AuxInt
21759 sym1 := v.Aux
21760 mem := v.Args[2]
21761 v_0 := v.Args[0]
21762 if v_0.Op != OpAMD64LEAQ1 {
21763 break
21764 }
21765 off2 := v_0.AuxInt
21766 sym2 := v_0.Aux
21767 idx := v_0.Args[1]
21768 ptr := v_0.Args[0]
21769 val := v.Args[1]
21770 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
21771 break
21772 }
21773 v.reset(OpAMD64MOVSSstoreidx1)
21774 v.AuxInt = off1 + off2
21775 v.Aux = mergeSym(sym1, sym2)
21776 v.AddArg(ptr)
21777 v.AddArg(idx)
21778 v.AddArg(val)
21779 v.AddArg(mem)
21780 return true
21781 }
21782
21783
21784
21785 for {
21786 off1 := v.AuxInt
21787 sym1 := v.Aux
21788 mem := v.Args[2]
21789 v_0 := v.Args[0]
21790 if v_0.Op != OpAMD64LEAQ4 {
21791 break
21792 }
21793 off2 := v_0.AuxInt
21794 sym2 := v_0.Aux
21795 idx := v_0.Args[1]
21796 ptr := v_0.Args[0]
21797 val := v.Args[1]
21798 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
21799 break
21800 }
21801 v.reset(OpAMD64MOVSSstoreidx4)
21802 v.AuxInt = off1 + off2
21803 v.Aux = mergeSym(sym1, sym2)
21804 v.AddArg(ptr)
21805 v.AddArg(idx)
21806 v.AddArg(val)
21807 v.AddArg(mem)
21808 return true
21809 }
21810
21811
21812
21813 for {
21814 off := v.AuxInt
21815 sym := v.Aux
21816 mem := v.Args[2]
21817 v_0 := v.Args[0]
21818 if v_0.Op != OpAMD64ADDQ {
21819 break
21820 }
21821 idx := v_0.Args[1]
21822 ptr := v_0.Args[0]
21823 val := v.Args[1]
21824 if !(ptr.Op != OpSB) {
21825 break
21826 }
21827 v.reset(OpAMD64MOVSSstoreidx1)
21828 v.AuxInt = off
21829 v.Aux = sym
21830 v.AddArg(ptr)
21831 v.AddArg(idx)
21832 v.AddArg(val)
21833 v.AddArg(mem)
21834 return true
21835 }
21836
21837
21838
21839 for {
21840 off := v.AuxInt
21841 sym := v.Aux
21842 mem := v.Args[2]
21843 ptr := v.Args[0]
21844 v_1 := v.Args[1]
21845 if v_1.Op != OpAMD64MOVLi2f {
21846 break
21847 }
21848 val := v_1.Args[0]
21849 v.reset(OpAMD64MOVLstore)
21850 v.AuxInt = off
21851 v.Aux = sym
21852 v.AddArg(ptr)
21853 v.AddArg(val)
21854 v.AddArg(mem)
21855 return true
21856 }
21857 return false
21858 }
21859 func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool {
21860
21861
21862
21863 for {
21864 c := v.AuxInt
21865 sym := v.Aux
21866 mem := v.Args[3]
21867 ptr := v.Args[0]
21868 v_1 := v.Args[1]
21869 if v_1.Op != OpAMD64SHLQconst {
21870 break
21871 }
21872 if v_1.AuxInt != 2 {
21873 break
21874 }
21875 idx := v_1.Args[0]
21876 val := v.Args[2]
21877 v.reset(OpAMD64MOVSSstoreidx4)
21878 v.AuxInt = c
21879 v.Aux = sym
21880 v.AddArg(ptr)
21881 v.AddArg(idx)
21882 v.AddArg(val)
21883 v.AddArg(mem)
21884 return true
21885 }
21886
21887
21888
21889 for {
21890 c := v.AuxInt
21891 sym := v.Aux
21892 mem := v.Args[3]
21893 v_0 := v.Args[0]
21894 if v_0.Op != OpAMD64ADDQconst {
21895 break
21896 }
21897 d := v_0.AuxInt
21898 ptr := v_0.Args[0]
21899 idx := v.Args[1]
21900 val := v.Args[2]
21901 if !(is32Bit(c + d)) {
21902 break
21903 }
21904 v.reset(OpAMD64MOVSSstoreidx1)
21905 v.AuxInt = c + d
21906 v.Aux = sym
21907 v.AddArg(ptr)
21908 v.AddArg(idx)
21909 v.AddArg(val)
21910 v.AddArg(mem)
21911 return true
21912 }
21913
21914
21915
21916 for {
21917 c := v.AuxInt
21918 sym := v.Aux
21919 mem := v.Args[3]
21920 ptr := v.Args[0]
21921 v_1 := v.Args[1]
21922 if v_1.Op != OpAMD64ADDQconst {
21923 break
21924 }
21925 d := v_1.AuxInt
21926 idx := v_1.Args[0]
21927 val := v.Args[2]
21928 if !(is32Bit(c + d)) {
21929 break
21930 }
21931 v.reset(OpAMD64MOVSSstoreidx1)
21932 v.AuxInt = c + d
21933 v.Aux = sym
21934 v.AddArg(ptr)
21935 v.AddArg(idx)
21936 v.AddArg(val)
21937 v.AddArg(mem)
21938 return true
21939 }
21940
21941
21942
21943 for {
21944 i := v.AuxInt
21945 s := v.Aux
21946 mem := v.Args[3]
21947 p := v.Args[0]
21948 v_1 := v.Args[1]
21949 if v_1.Op != OpAMD64MOVQconst {
21950 break
21951 }
21952 c := v_1.AuxInt
21953 w := v.Args[2]
21954 if !(is32Bit(i + c)) {
21955 break
21956 }
21957 v.reset(OpAMD64MOVSSstore)
21958 v.AuxInt = i + c
21959 v.Aux = s
21960 v.AddArg(p)
21961 v.AddArg(w)
21962 v.AddArg(mem)
21963 return true
21964 }
21965 return false
21966 }
21967 func rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v *Value) bool {
21968
21969
21970
21971 for {
21972 c := v.AuxInt
21973 sym := v.Aux
21974 mem := v.Args[3]
21975 v_0 := v.Args[0]
21976 if v_0.Op != OpAMD64ADDQconst {
21977 break
21978 }
21979 d := v_0.AuxInt
21980 ptr := v_0.Args[0]
21981 idx := v.Args[1]
21982 val := v.Args[2]
21983 if !(is32Bit(c + d)) {
21984 break
21985 }
21986 v.reset(OpAMD64MOVSSstoreidx4)
21987 v.AuxInt = c + d
21988 v.Aux = sym
21989 v.AddArg(ptr)
21990 v.AddArg(idx)
21991 v.AddArg(val)
21992 v.AddArg(mem)
21993 return true
21994 }
21995
21996
21997
21998 for {
21999 c := v.AuxInt
22000 sym := v.Aux
22001 mem := v.Args[3]
22002 ptr := v.Args[0]
22003 v_1 := v.Args[1]
22004 if v_1.Op != OpAMD64ADDQconst {
22005 break
22006 }
22007 d := v_1.AuxInt
22008 idx := v_1.Args[0]
22009 val := v.Args[2]
22010 if !(is32Bit(c + 4*d)) {
22011 break
22012 }
22013 v.reset(OpAMD64MOVSSstoreidx4)
22014 v.AuxInt = c + 4*d
22015 v.Aux = sym
22016 v.AddArg(ptr)
22017 v.AddArg(idx)
22018 v.AddArg(val)
22019 v.AddArg(mem)
22020 return true
22021 }
22022
22023
22024
22025 for {
22026 i := v.AuxInt
22027 s := v.Aux
22028 mem := v.Args[3]
22029 p := v.Args[0]
22030 v_1 := v.Args[1]
22031 if v_1.Op != OpAMD64MOVQconst {
22032 break
22033 }
22034 c := v_1.AuxInt
22035 w := v.Args[2]
22036 if !(is32Bit(i + 4*c)) {
22037 break
22038 }
22039 v.reset(OpAMD64MOVSSstore)
22040 v.AuxInt = i + 4*c
22041 v.Aux = s
22042 v.AddArg(p)
22043 v.AddArg(w)
22044 v.AddArg(mem)
22045 return true
22046 }
22047 return false
22048 }
22049 func rewriteValueAMD64_OpAMD64MOVWQSX_0(v *Value) bool {
22050 b := v.Block
22051
22052
22053
22054 for {
22055 x := v.Args[0]
22056 if x.Op != OpAMD64MOVWload {
22057 break
22058 }
22059 off := x.AuxInt
22060 sym := x.Aux
22061 mem := x.Args[1]
22062 ptr := x.Args[0]
22063 if !(x.Uses == 1 && clobber(x)) {
22064 break
22065 }
22066 b = x.Block
22067 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
22068 v.reset(OpCopy)
22069 v.AddArg(v0)
22070 v0.AuxInt = off
22071 v0.Aux = sym
22072 v0.AddArg(ptr)
22073 v0.AddArg(mem)
22074 return true
22075 }
22076
22077
22078
22079 for {
22080 x := v.Args[0]
22081 if x.Op != OpAMD64MOVLload {
22082 break
22083 }
22084 off := x.AuxInt
22085 sym := x.Aux
22086 mem := x.Args[1]
22087 ptr := x.Args[0]
22088 if !(x.Uses == 1 && clobber(x)) {
22089 break
22090 }
22091 b = x.Block
22092 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
22093 v.reset(OpCopy)
22094 v.AddArg(v0)
22095 v0.AuxInt = off
22096 v0.Aux = sym
22097 v0.AddArg(ptr)
22098 v0.AddArg(mem)
22099 return true
22100 }
22101
22102
22103
22104 for {
22105 x := v.Args[0]
22106 if x.Op != OpAMD64MOVQload {
22107 break
22108 }
22109 off := x.AuxInt
22110 sym := x.Aux
22111 mem := x.Args[1]
22112 ptr := x.Args[0]
22113 if !(x.Uses == 1 && clobber(x)) {
22114 break
22115 }
22116 b = x.Block
22117 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
22118 v.reset(OpCopy)
22119 v.AddArg(v0)
22120 v0.AuxInt = off
22121 v0.Aux = sym
22122 v0.AddArg(ptr)
22123 v0.AddArg(mem)
22124 return true
22125 }
22126
22127
22128
22129 for {
22130 v_0 := v.Args[0]
22131 if v_0.Op != OpAMD64ANDLconst {
22132 break
22133 }
22134 c := v_0.AuxInt
22135 x := v_0.Args[0]
22136 if !(c&0x8000 == 0) {
22137 break
22138 }
22139 v.reset(OpAMD64ANDLconst)
22140 v.AuxInt = c & 0x7fff
22141 v.AddArg(x)
22142 return true
22143 }
22144
22145
22146
22147 for {
22148 v_0 := v.Args[0]
22149 if v_0.Op != OpAMD64MOVWQSX {
22150 break
22151 }
22152 x := v_0.Args[0]
22153 v.reset(OpAMD64MOVWQSX)
22154 v.AddArg(x)
22155 return true
22156 }
22157
22158
22159
22160 for {
22161 v_0 := v.Args[0]
22162 if v_0.Op != OpAMD64MOVBQSX {
22163 break
22164 }
22165 x := v_0.Args[0]
22166 v.reset(OpAMD64MOVBQSX)
22167 v.AddArg(x)
22168 return true
22169 }
22170 return false
22171 }
22172 func rewriteValueAMD64_OpAMD64MOVWQSXload_0(v *Value) bool {
22173
22174
22175
22176 for {
22177 off := v.AuxInt
22178 sym := v.Aux
22179 _ = v.Args[1]
22180 ptr := v.Args[0]
22181 v_1 := v.Args[1]
22182 if v_1.Op != OpAMD64MOVWstore {
22183 break
22184 }
22185 off2 := v_1.AuxInt
22186 sym2 := v_1.Aux
22187 _ = v_1.Args[2]
22188 ptr2 := v_1.Args[0]
22189 x := v_1.Args[1]
22190 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
22191 break
22192 }
22193 v.reset(OpAMD64MOVWQSX)
22194 v.AddArg(x)
22195 return true
22196 }
22197
22198
22199
22200 for {
22201 off1 := v.AuxInt
22202 sym1 := v.Aux
22203 mem := v.Args[1]
22204 v_0 := v.Args[0]
22205 if v_0.Op != OpAMD64LEAQ {
22206 break
22207 }
22208 off2 := v_0.AuxInt
22209 sym2 := v_0.Aux
22210 base := v_0.Args[0]
22211 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
22212 break
22213 }
22214 v.reset(OpAMD64MOVWQSXload)
22215 v.AuxInt = off1 + off2
22216 v.Aux = mergeSym(sym1, sym2)
22217 v.AddArg(base)
22218 v.AddArg(mem)
22219 return true
22220 }
22221 return false
22222 }
22223 func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool {
22224 b := v.Block
22225
22226
22227
22228 for {
22229 x := v.Args[0]
22230 if x.Op != OpAMD64MOVWload {
22231 break
22232 }
22233 off := x.AuxInt
22234 sym := x.Aux
22235 mem := x.Args[1]
22236 ptr := x.Args[0]
22237 if !(x.Uses == 1 && clobber(x)) {
22238 break
22239 }
22240 b = x.Block
22241 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
22242 v.reset(OpCopy)
22243 v.AddArg(v0)
22244 v0.AuxInt = off
22245 v0.Aux = sym
22246 v0.AddArg(ptr)
22247 v0.AddArg(mem)
22248 return true
22249 }
22250
22251
22252
22253 for {
22254 x := v.Args[0]
22255 if x.Op != OpAMD64MOVLload {
22256 break
22257 }
22258 off := x.AuxInt
22259 sym := x.Aux
22260 mem := x.Args[1]
22261 ptr := x.Args[0]
22262 if !(x.Uses == 1 && clobber(x)) {
22263 break
22264 }
22265 b = x.Block
22266 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
22267 v.reset(OpCopy)
22268 v.AddArg(v0)
22269 v0.AuxInt = off
22270 v0.Aux = sym
22271 v0.AddArg(ptr)
22272 v0.AddArg(mem)
22273 return true
22274 }
22275
22276
22277
22278 for {
22279 x := v.Args[0]
22280 if x.Op != OpAMD64MOVQload {
22281 break
22282 }
22283 off := x.AuxInt
22284 sym := x.Aux
22285 mem := x.Args[1]
22286 ptr := x.Args[0]
22287 if !(x.Uses == 1 && clobber(x)) {
22288 break
22289 }
22290 b = x.Block
22291 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
22292 v.reset(OpCopy)
22293 v.AddArg(v0)
22294 v0.AuxInt = off
22295 v0.Aux = sym
22296 v0.AddArg(ptr)
22297 v0.AddArg(mem)
22298 return true
22299 }
22300
22301
22302
22303 for {
22304 x := v.Args[0]
22305 if !(zeroUpper48Bits(x, 3)) {
22306 break
22307 }
22308 v.reset(OpCopy)
22309 v.Type = x.Type
22310 v.AddArg(x)
22311 return true
22312 }
22313
22314
22315
22316 for {
22317 x := v.Args[0]
22318 if x.Op != OpAMD64MOVWloadidx1 {
22319 break
22320 }
22321 off := x.AuxInt
22322 sym := x.Aux
22323 mem := x.Args[2]
22324 ptr := x.Args[0]
22325 idx := x.Args[1]
22326 if !(x.Uses == 1 && clobber(x)) {
22327 break
22328 }
22329 b = x.Block
22330 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
22331 v.reset(OpCopy)
22332 v.AddArg(v0)
22333 v0.AuxInt = off
22334 v0.Aux = sym
22335 v0.AddArg(ptr)
22336 v0.AddArg(idx)
22337 v0.AddArg(mem)
22338 return true
22339 }
22340
22341
22342
22343 for {
22344 x := v.Args[0]
22345 if x.Op != OpAMD64MOVWloadidx2 {
22346 break
22347 }
22348 off := x.AuxInt
22349 sym := x.Aux
22350 mem := x.Args[2]
22351 ptr := x.Args[0]
22352 idx := x.Args[1]
22353 if !(x.Uses == 1 && clobber(x)) {
22354 break
22355 }
22356 b = x.Block
22357 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, v.Type)
22358 v.reset(OpCopy)
22359 v.AddArg(v0)
22360 v0.AuxInt = off
22361 v0.Aux = sym
22362 v0.AddArg(ptr)
22363 v0.AddArg(idx)
22364 v0.AddArg(mem)
22365 return true
22366 }
22367
22368
22369
22370 for {
22371 v_0 := v.Args[0]
22372 if v_0.Op != OpAMD64ANDLconst {
22373 break
22374 }
22375 c := v_0.AuxInt
22376 x := v_0.Args[0]
22377 v.reset(OpAMD64ANDLconst)
22378 v.AuxInt = c & 0xffff
22379 v.AddArg(x)
22380 return true
22381 }
22382
22383
22384
22385 for {
22386 v_0 := v.Args[0]
22387 if v_0.Op != OpAMD64MOVWQZX {
22388 break
22389 }
22390 x := v_0.Args[0]
22391 v.reset(OpAMD64MOVWQZX)
22392 v.AddArg(x)
22393 return true
22394 }
22395
22396
22397
22398 for {
22399 v_0 := v.Args[0]
22400 if v_0.Op != OpAMD64MOVBQZX {
22401 break
22402 }
22403 x := v_0.Args[0]
22404 v.reset(OpAMD64MOVBQZX)
22405 v.AddArg(x)
22406 return true
22407 }
22408 return false
22409 }
22410 func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool {
22411 b := v.Block
22412 config := b.Func.Config
22413
22414
22415
22416 for {
22417 off := v.AuxInt
22418 sym := v.Aux
22419 _ = v.Args[1]
22420 ptr := v.Args[0]
22421 v_1 := v.Args[1]
22422 if v_1.Op != OpAMD64MOVWstore {
22423 break
22424 }
22425 off2 := v_1.AuxInt
22426 sym2 := v_1.Aux
22427 _ = v_1.Args[2]
22428 ptr2 := v_1.Args[0]
22429 x := v_1.Args[1]
22430 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
22431 break
22432 }
22433 v.reset(OpAMD64MOVWQZX)
22434 v.AddArg(x)
22435 return true
22436 }
22437
22438
22439
22440 for {
22441 off1 := v.AuxInt
22442 sym := v.Aux
22443 mem := v.Args[1]
22444 v_0 := v.Args[0]
22445 if v_0.Op != OpAMD64ADDQconst {
22446 break
22447 }
22448 off2 := v_0.AuxInt
22449 ptr := v_0.Args[0]
22450 if !(is32Bit(off1 + off2)) {
22451 break
22452 }
22453 v.reset(OpAMD64MOVWload)
22454 v.AuxInt = off1 + off2
22455 v.Aux = sym
22456 v.AddArg(ptr)
22457 v.AddArg(mem)
22458 return true
22459 }
22460
22461
22462
22463 for {
22464 off1 := v.AuxInt
22465 sym1 := v.Aux
22466 mem := v.Args[1]
22467 v_0 := v.Args[0]
22468 if v_0.Op != OpAMD64LEAQ {
22469 break
22470 }
22471 off2 := v_0.AuxInt
22472 sym2 := v_0.Aux
22473 base := v_0.Args[0]
22474 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
22475 break
22476 }
22477 v.reset(OpAMD64MOVWload)
22478 v.AuxInt = off1 + off2
22479 v.Aux = mergeSym(sym1, sym2)
22480 v.AddArg(base)
22481 v.AddArg(mem)
22482 return true
22483 }
22484
22485
22486
22487 for {
22488 off1 := v.AuxInt
22489 sym1 := v.Aux
22490 mem := v.Args[1]
22491 v_0 := v.Args[0]
22492 if v_0.Op != OpAMD64LEAQ1 {
22493 break
22494 }
22495 off2 := v_0.AuxInt
22496 sym2 := v_0.Aux
22497 idx := v_0.Args[1]
22498 ptr := v_0.Args[0]
22499 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
22500 break
22501 }
22502 v.reset(OpAMD64MOVWloadidx1)
22503 v.AuxInt = off1 + off2
22504 v.Aux = mergeSym(sym1, sym2)
22505 v.AddArg(ptr)
22506 v.AddArg(idx)
22507 v.AddArg(mem)
22508 return true
22509 }
22510
22511
22512
22513 for {
22514 off1 := v.AuxInt
22515 sym1 := v.Aux
22516 mem := v.Args[1]
22517 v_0 := v.Args[0]
22518 if v_0.Op != OpAMD64LEAQ2 {
22519 break
22520 }
22521 off2 := v_0.AuxInt
22522 sym2 := v_0.Aux
22523 idx := v_0.Args[1]
22524 ptr := v_0.Args[0]
22525 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
22526 break
22527 }
22528 v.reset(OpAMD64MOVWloadidx2)
22529 v.AuxInt = off1 + off2
22530 v.Aux = mergeSym(sym1, sym2)
22531 v.AddArg(ptr)
22532 v.AddArg(idx)
22533 v.AddArg(mem)
22534 return true
22535 }
22536
22537
22538
22539 for {
22540 off := v.AuxInt
22541 sym := v.Aux
22542 mem := v.Args[1]
22543 v_0 := v.Args[0]
22544 if v_0.Op != OpAMD64ADDQ {
22545 break
22546 }
22547 idx := v_0.Args[1]
22548 ptr := v_0.Args[0]
22549 if !(ptr.Op != OpSB) {
22550 break
22551 }
22552 v.reset(OpAMD64MOVWloadidx1)
22553 v.AuxInt = off
22554 v.Aux = sym
22555 v.AddArg(ptr)
22556 v.AddArg(idx)
22557 v.AddArg(mem)
22558 return true
22559 }
22560
22561
22562
22563 for {
22564 off1 := v.AuxInt
22565 sym1 := v.Aux
22566 mem := v.Args[1]
22567 v_0 := v.Args[0]
22568 if v_0.Op != OpAMD64LEAL {
22569 break
22570 }
22571 off2 := v_0.AuxInt
22572 sym2 := v_0.Aux
22573 base := v_0.Args[0]
22574 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
22575 break
22576 }
22577 v.reset(OpAMD64MOVWload)
22578 v.AuxInt = off1 + off2
22579 v.Aux = mergeSym(sym1, sym2)
22580 v.AddArg(base)
22581 v.AddArg(mem)
22582 return true
22583 }
22584
22585
22586
22587 for {
22588 off1 := v.AuxInt
22589 sym := v.Aux
22590 mem := v.Args[1]
22591 v_0 := v.Args[0]
22592 if v_0.Op != OpAMD64ADDLconst {
22593 break
22594 }
22595 off2 := v_0.AuxInt
22596 ptr := v_0.Args[0]
22597 if !(is32Bit(off1 + off2)) {
22598 break
22599 }
22600 v.reset(OpAMD64MOVWload)
22601 v.AuxInt = off1 + off2
22602 v.Aux = sym
22603 v.AddArg(ptr)
22604 v.AddArg(mem)
22605 return true
22606 }
22607
22608
22609
22610 for {
22611 off := v.AuxInt
22612 sym := v.Aux
22613 _ = v.Args[1]
22614 v_0 := v.Args[0]
22615 if v_0.Op != OpSB {
22616 break
22617 }
22618 if !(symIsRO(sym)) {
22619 break
22620 }
22621 v.reset(OpAMD64MOVLconst)
22622 v.AuxInt = int64(read16(sym, off, config.BigEndian))
22623 return true
22624 }
22625 return false
22626 }
22627 func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool {
22628
22629
22630
22631 for {
22632 c := v.AuxInt
22633 sym := v.Aux
22634 mem := v.Args[2]
22635 ptr := v.Args[0]
22636 v_1 := v.Args[1]
22637 if v_1.Op != OpAMD64SHLQconst {
22638 break
22639 }
22640 if v_1.AuxInt != 1 {
22641 break
22642 }
22643 idx := v_1.Args[0]
22644 v.reset(OpAMD64MOVWloadidx2)
22645 v.AuxInt = c
22646 v.Aux = sym
22647 v.AddArg(ptr)
22648 v.AddArg(idx)
22649 v.AddArg(mem)
22650 return true
22651 }
22652
22653
22654
22655 for {
22656 c := v.AuxInt
22657 sym := v.Aux
22658 mem := v.Args[2]
22659 v_0 := v.Args[0]
22660 if v_0.Op != OpAMD64SHLQconst {
22661 break
22662 }
22663 if v_0.AuxInt != 1 {
22664 break
22665 }
22666 idx := v_0.Args[0]
22667 ptr := v.Args[1]
22668 v.reset(OpAMD64MOVWloadidx2)
22669 v.AuxInt = c
22670 v.Aux = sym
22671 v.AddArg(ptr)
22672 v.AddArg(idx)
22673 v.AddArg(mem)
22674 return true
22675 }
22676
22677
22678
22679 for {
22680 c := v.AuxInt
22681 sym := v.Aux
22682 mem := v.Args[2]
22683 v_0 := v.Args[0]
22684 if v_0.Op != OpAMD64ADDQconst {
22685 break
22686 }
22687 d := v_0.AuxInt
22688 ptr := v_0.Args[0]
22689 idx := v.Args[1]
22690 if !(is32Bit(c + d)) {
22691 break
22692 }
22693 v.reset(OpAMD64MOVWloadidx1)
22694 v.AuxInt = c + d
22695 v.Aux = sym
22696 v.AddArg(ptr)
22697 v.AddArg(idx)
22698 v.AddArg(mem)
22699 return true
22700 }
22701
22702
22703
22704 for {
22705 c := v.AuxInt
22706 sym := v.Aux
22707 mem := v.Args[2]
22708 idx := v.Args[0]
22709 v_1 := v.Args[1]
22710 if v_1.Op != OpAMD64ADDQconst {
22711 break
22712 }
22713 d := v_1.AuxInt
22714 ptr := v_1.Args[0]
22715 if !(is32Bit(c + d)) {
22716 break
22717 }
22718 v.reset(OpAMD64MOVWloadidx1)
22719 v.AuxInt = c + d
22720 v.Aux = sym
22721 v.AddArg(ptr)
22722 v.AddArg(idx)
22723 v.AddArg(mem)
22724 return true
22725 }
22726
22727
22728
22729 for {
22730 c := v.AuxInt
22731 sym := v.Aux
22732 mem := v.Args[2]
22733 ptr := v.Args[0]
22734 v_1 := v.Args[1]
22735 if v_1.Op != OpAMD64ADDQconst {
22736 break
22737 }
22738 d := v_1.AuxInt
22739 idx := v_1.Args[0]
22740 if !(is32Bit(c + d)) {
22741 break
22742 }
22743 v.reset(OpAMD64MOVWloadidx1)
22744 v.AuxInt = c + d
22745 v.Aux = sym
22746 v.AddArg(ptr)
22747 v.AddArg(idx)
22748 v.AddArg(mem)
22749 return true
22750 }
22751
22752
22753
22754 for {
22755 c := v.AuxInt
22756 sym := v.Aux
22757 mem := v.Args[2]
22758 v_0 := v.Args[0]
22759 if v_0.Op != OpAMD64ADDQconst {
22760 break
22761 }
22762 d := v_0.AuxInt
22763 idx := v_0.Args[0]
22764 ptr := v.Args[1]
22765 if !(is32Bit(c + d)) {
22766 break
22767 }
22768 v.reset(OpAMD64MOVWloadidx1)
22769 v.AuxInt = c + d
22770 v.Aux = sym
22771 v.AddArg(ptr)
22772 v.AddArg(idx)
22773 v.AddArg(mem)
22774 return true
22775 }
22776
22777
22778
22779 for {
22780 i := v.AuxInt
22781 s := v.Aux
22782 mem := v.Args[2]
22783 p := v.Args[0]
22784 v_1 := v.Args[1]
22785 if v_1.Op != OpAMD64MOVQconst {
22786 break
22787 }
22788 c := v_1.AuxInt
22789 if !(is32Bit(i + c)) {
22790 break
22791 }
22792 v.reset(OpAMD64MOVWload)
22793 v.AuxInt = i + c
22794 v.Aux = s
22795 v.AddArg(p)
22796 v.AddArg(mem)
22797 return true
22798 }
22799
22800
22801
22802 for {
22803 i := v.AuxInt
22804 s := v.Aux
22805 mem := v.Args[2]
22806 v_0 := v.Args[0]
22807 if v_0.Op != OpAMD64MOVQconst {
22808 break
22809 }
22810 c := v_0.AuxInt
22811 p := v.Args[1]
22812 if !(is32Bit(i + c)) {
22813 break
22814 }
22815 v.reset(OpAMD64MOVWload)
22816 v.AuxInt = i + c
22817 v.Aux = s
22818 v.AddArg(p)
22819 v.AddArg(mem)
22820 return true
22821 }
22822 return false
22823 }
22824 func rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v *Value) bool {
22825
22826
22827
22828 for {
22829 c := v.AuxInt
22830 sym := v.Aux
22831 mem := v.Args[2]
22832 v_0 := v.Args[0]
22833 if v_0.Op != OpAMD64ADDQconst {
22834 break
22835 }
22836 d := v_0.AuxInt
22837 ptr := v_0.Args[0]
22838 idx := v.Args[1]
22839 if !(is32Bit(c + d)) {
22840 break
22841 }
22842 v.reset(OpAMD64MOVWloadidx2)
22843 v.AuxInt = c + d
22844 v.Aux = sym
22845 v.AddArg(ptr)
22846 v.AddArg(idx)
22847 v.AddArg(mem)
22848 return true
22849 }
22850
22851
22852
22853 for {
22854 c := v.AuxInt
22855 sym := v.Aux
22856 mem := v.Args[2]
22857 ptr := v.Args[0]
22858 v_1 := v.Args[1]
22859 if v_1.Op != OpAMD64ADDQconst {
22860 break
22861 }
22862 d := v_1.AuxInt
22863 idx := v_1.Args[0]
22864 if !(is32Bit(c + 2*d)) {
22865 break
22866 }
22867 v.reset(OpAMD64MOVWloadidx2)
22868 v.AuxInt = c + 2*d
22869 v.Aux = sym
22870 v.AddArg(ptr)
22871 v.AddArg(idx)
22872 v.AddArg(mem)
22873 return true
22874 }
22875
22876
22877
22878 for {
22879 i := v.AuxInt
22880 s := v.Aux
22881 mem := v.Args[2]
22882 p := v.Args[0]
22883 v_1 := v.Args[1]
22884 if v_1.Op != OpAMD64MOVQconst {
22885 break
22886 }
22887 c := v_1.AuxInt
22888 if !(is32Bit(i + 2*c)) {
22889 break
22890 }
22891 v.reset(OpAMD64MOVWload)
22892 v.AuxInt = i + 2*c
22893 v.Aux = s
22894 v.AddArg(p)
22895 v.AddArg(mem)
22896 return true
22897 }
22898 return false
22899 }
22900 func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool {
22901
22902
22903
22904 for {
22905 off := v.AuxInt
22906 sym := v.Aux
22907 mem := v.Args[2]
22908 ptr := v.Args[0]
22909 v_1 := v.Args[1]
22910 if v_1.Op != OpAMD64MOVWQSX {
22911 break
22912 }
22913 x := v_1.Args[0]
22914 v.reset(OpAMD64MOVWstore)
22915 v.AuxInt = off
22916 v.Aux = sym
22917 v.AddArg(ptr)
22918 v.AddArg(x)
22919 v.AddArg(mem)
22920 return true
22921 }
22922
22923
22924
22925 for {
22926 off := v.AuxInt
22927 sym := v.Aux
22928 mem := v.Args[2]
22929 ptr := v.Args[0]
22930 v_1 := v.Args[1]
22931 if v_1.Op != OpAMD64MOVWQZX {
22932 break
22933 }
22934 x := v_1.Args[0]
22935 v.reset(OpAMD64MOVWstore)
22936 v.AuxInt = off
22937 v.Aux = sym
22938 v.AddArg(ptr)
22939 v.AddArg(x)
22940 v.AddArg(mem)
22941 return true
22942 }
22943
22944
22945
22946 for {
22947 off1 := v.AuxInt
22948 sym := v.Aux
22949 mem := v.Args[2]
22950 v_0 := v.Args[0]
22951 if v_0.Op != OpAMD64ADDQconst {
22952 break
22953 }
22954 off2 := v_0.AuxInt
22955 ptr := v_0.Args[0]
22956 val := v.Args[1]
22957 if !(is32Bit(off1 + off2)) {
22958 break
22959 }
22960 v.reset(OpAMD64MOVWstore)
22961 v.AuxInt = off1 + off2
22962 v.Aux = sym
22963 v.AddArg(ptr)
22964 v.AddArg(val)
22965 v.AddArg(mem)
22966 return true
22967 }
22968
22969
22970
22971 for {
22972 off := v.AuxInt
22973 sym := v.Aux
22974 mem := v.Args[2]
22975 ptr := v.Args[0]
22976 v_1 := v.Args[1]
22977 if v_1.Op != OpAMD64MOVLconst {
22978 break
22979 }
22980 c := v_1.AuxInt
22981 if !(validOff(off)) {
22982 break
22983 }
22984 v.reset(OpAMD64MOVWstoreconst)
22985 v.AuxInt = makeValAndOff(int64(int16(c)), off)
22986 v.Aux = sym
22987 v.AddArg(ptr)
22988 v.AddArg(mem)
22989 return true
22990 }
22991
22992
22993
22994 for {
22995 off := v.AuxInt
22996 sym := v.Aux
22997 mem := v.Args[2]
22998 ptr := v.Args[0]
22999 v_1 := v.Args[1]
23000 if v_1.Op != OpAMD64MOVQconst {
23001 break
23002 }
23003 c := v_1.AuxInt
23004 if !(validOff(off)) {
23005 break
23006 }
23007 v.reset(OpAMD64MOVWstoreconst)
23008 v.AuxInt = makeValAndOff(int64(int16(c)), off)
23009 v.Aux = sym
23010 v.AddArg(ptr)
23011 v.AddArg(mem)
23012 return true
23013 }
23014
23015
23016
23017 for {
23018 off1 := v.AuxInt
23019 sym1 := v.Aux
23020 mem := v.Args[2]
23021 v_0 := v.Args[0]
23022 if v_0.Op != OpAMD64LEAQ {
23023 break
23024 }
23025 off2 := v_0.AuxInt
23026 sym2 := v_0.Aux
23027 base := v_0.Args[0]
23028 val := v.Args[1]
23029 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
23030 break
23031 }
23032 v.reset(OpAMD64MOVWstore)
23033 v.AuxInt = off1 + off2
23034 v.Aux = mergeSym(sym1, sym2)
23035 v.AddArg(base)
23036 v.AddArg(val)
23037 v.AddArg(mem)
23038 return true
23039 }
23040
23041
23042
23043 for {
23044 off1 := v.AuxInt
23045 sym1 := v.Aux
23046 mem := v.Args[2]
23047 v_0 := v.Args[0]
23048 if v_0.Op != OpAMD64LEAQ1 {
23049 break
23050 }
23051 off2 := v_0.AuxInt
23052 sym2 := v_0.Aux
23053 idx := v_0.Args[1]
23054 ptr := v_0.Args[0]
23055 val := v.Args[1]
23056 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
23057 break
23058 }
23059 v.reset(OpAMD64MOVWstoreidx1)
23060 v.AuxInt = off1 + off2
23061 v.Aux = mergeSym(sym1, sym2)
23062 v.AddArg(ptr)
23063 v.AddArg(idx)
23064 v.AddArg(val)
23065 v.AddArg(mem)
23066 return true
23067 }
23068
23069
23070
23071 for {
23072 off1 := v.AuxInt
23073 sym1 := v.Aux
23074 mem := v.Args[2]
23075 v_0 := v.Args[0]
23076 if v_0.Op != OpAMD64LEAQ2 {
23077 break
23078 }
23079 off2 := v_0.AuxInt
23080 sym2 := v_0.Aux
23081 idx := v_0.Args[1]
23082 ptr := v_0.Args[0]
23083 val := v.Args[1]
23084 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
23085 break
23086 }
23087 v.reset(OpAMD64MOVWstoreidx2)
23088 v.AuxInt = off1 + off2
23089 v.Aux = mergeSym(sym1, sym2)
23090 v.AddArg(ptr)
23091 v.AddArg(idx)
23092 v.AddArg(val)
23093 v.AddArg(mem)
23094 return true
23095 }
23096
23097
23098
23099 for {
23100 off := v.AuxInt
23101 sym := v.Aux
23102 mem := v.Args[2]
23103 v_0 := v.Args[0]
23104 if v_0.Op != OpAMD64ADDQ {
23105 break
23106 }
23107 idx := v_0.Args[1]
23108 ptr := v_0.Args[0]
23109 val := v.Args[1]
23110 if !(ptr.Op != OpSB) {
23111 break
23112 }
23113 v.reset(OpAMD64MOVWstoreidx1)
23114 v.AuxInt = off
23115 v.Aux = sym
23116 v.AddArg(ptr)
23117 v.AddArg(idx)
23118 v.AddArg(val)
23119 v.AddArg(mem)
23120 return true
23121 }
23122
23123
23124
23125 for {
23126 i := v.AuxInt
23127 s := v.Aux
23128 _ = v.Args[2]
23129 p := v.Args[0]
23130 v_1 := v.Args[1]
23131 if v_1.Op != OpAMD64SHRLconst {
23132 break
23133 }
23134 if v_1.AuxInt != 16 {
23135 break
23136 }
23137 w := v_1.Args[0]
23138 x := v.Args[2]
23139 if x.Op != OpAMD64MOVWstore {
23140 break
23141 }
23142 if x.AuxInt != i-2 {
23143 break
23144 }
23145 if x.Aux != s {
23146 break
23147 }
23148 mem := x.Args[2]
23149 if p != x.Args[0] {
23150 break
23151 }
23152 if w != x.Args[1] {
23153 break
23154 }
23155 if !(x.Uses == 1 && clobber(x)) {
23156 break
23157 }
23158 v.reset(OpAMD64MOVLstore)
23159 v.AuxInt = i - 2
23160 v.Aux = s
23161 v.AddArg(p)
23162 v.AddArg(w)
23163 v.AddArg(mem)
23164 return true
23165 }
23166 return false
23167 }
23168 func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool {
23169 b := v.Block
23170 typ := &b.Func.Config.Types
23171
23172
23173
23174 for {
23175 i := v.AuxInt
23176 s := v.Aux
23177 _ = v.Args[2]
23178 p := v.Args[0]
23179 v_1 := v.Args[1]
23180 if v_1.Op != OpAMD64SHRQconst {
23181 break
23182 }
23183 if v_1.AuxInt != 16 {
23184 break
23185 }
23186 w := v_1.Args[0]
23187 x := v.Args[2]
23188 if x.Op != OpAMD64MOVWstore {
23189 break
23190 }
23191 if x.AuxInt != i-2 {
23192 break
23193 }
23194 if x.Aux != s {
23195 break
23196 }
23197 mem := x.Args[2]
23198 if p != x.Args[0] {
23199 break
23200 }
23201 if w != x.Args[1] {
23202 break
23203 }
23204 if !(x.Uses == 1 && clobber(x)) {
23205 break
23206 }
23207 v.reset(OpAMD64MOVLstore)
23208 v.AuxInt = i - 2
23209 v.Aux = s
23210 v.AddArg(p)
23211 v.AddArg(w)
23212 v.AddArg(mem)
23213 return true
23214 }
23215
23216
23217
23218 for {
23219 i := v.AuxInt
23220 s := v.Aux
23221 _ = v.Args[2]
23222 p := v.Args[0]
23223 v_1 := v.Args[1]
23224 if v_1.Op != OpAMD64SHRLconst {
23225 break
23226 }
23227 j := v_1.AuxInt
23228 w := v_1.Args[0]
23229 x := v.Args[2]
23230 if x.Op != OpAMD64MOVWstore {
23231 break
23232 }
23233 if x.AuxInt != i-2 {
23234 break
23235 }
23236 if x.Aux != s {
23237 break
23238 }
23239 mem := x.Args[2]
23240 if p != x.Args[0] {
23241 break
23242 }
23243 w0 := x.Args[1]
23244 if w0.Op != OpAMD64SHRLconst {
23245 break
23246 }
23247 if w0.AuxInt != j-16 {
23248 break
23249 }
23250 if w != w0.Args[0] {
23251 break
23252 }
23253 if !(x.Uses == 1 && clobber(x)) {
23254 break
23255 }
23256 v.reset(OpAMD64MOVLstore)
23257 v.AuxInt = i - 2
23258 v.Aux = s
23259 v.AddArg(p)
23260 v.AddArg(w0)
23261 v.AddArg(mem)
23262 return true
23263 }
23264
23265
23266
23267 for {
23268 i := v.AuxInt
23269 s := v.Aux
23270 _ = v.Args[2]
23271 p := v.Args[0]
23272 v_1 := v.Args[1]
23273 if v_1.Op != OpAMD64SHRQconst {
23274 break
23275 }
23276 j := v_1.AuxInt
23277 w := v_1.Args[0]
23278 x := v.Args[2]
23279 if x.Op != OpAMD64MOVWstore {
23280 break
23281 }
23282 if x.AuxInt != i-2 {
23283 break
23284 }
23285 if x.Aux != s {
23286 break
23287 }
23288 mem := x.Args[2]
23289 if p != x.Args[0] {
23290 break
23291 }
23292 w0 := x.Args[1]
23293 if w0.Op != OpAMD64SHRQconst {
23294 break
23295 }
23296 if w0.AuxInt != j-16 {
23297 break
23298 }
23299 if w != w0.Args[0] {
23300 break
23301 }
23302 if !(x.Uses == 1 && clobber(x)) {
23303 break
23304 }
23305 v.reset(OpAMD64MOVLstore)
23306 v.AuxInt = i - 2
23307 v.Aux = s
23308 v.AddArg(p)
23309 v.AddArg(w0)
23310 v.AddArg(mem)
23311 return true
23312 }
23313
23314
23315
23316 for {
23317 i := v.AuxInt
23318 s := v.Aux
23319 _ = v.Args[2]
23320 p := v.Args[0]
23321 x1 := v.Args[1]
23322 if x1.Op != OpAMD64MOVWload {
23323 break
23324 }
23325 j := x1.AuxInt
23326 s2 := x1.Aux
23327 mem := x1.Args[1]
23328 p2 := x1.Args[0]
23329 mem2 := v.Args[2]
23330 if mem2.Op != OpAMD64MOVWstore {
23331 break
23332 }
23333 if mem2.AuxInt != i-2 {
23334 break
23335 }
23336 if mem2.Aux != s {
23337 break
23338 }
23339 _ = mem2.Args[2]
23340 if p != mem2.Args[0] {
23341 break
23342 }
23343 x2 := mem2.Args[1]
23344 if x2.Op != OpAMD64MOVWload {
23345 break
23346 }
23347 if x2.AuxInt != j-2 {
23348 break
23349 }
23350 if x2.Aux != s2 {
23351 break
23352 }
23353 _ = x2.Args[1]
23354 if p2 != x2.Args[0] {
23355 break
23356 }
23357 if mem != x2.Args[1] {
23358 break
23359 }
23360 if mem != mem2.Args[2] {
23361 break
23362 }
23363 if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) {
23364 break
23365 }
23366 v.reset(OpAMD64MOVLstore)
23367 v.AuxInt = i - 2
23368 v.Aux = s
23369 v.AddArg(p)
23370 v0 := b.NewValue0(x2.Pos, OpAMD64MOVLload, typ.UInt32)
23371 v0.AuxInt = j - 2
23372 v0.Aux = s2
23373 v0.AddArg(p2)
23374 v0.AddArg(mem)
23375 v.AddArg(v0)
23376 v.AddArg(mem)
23377 return true
23378 }
23379
23380
23381
23382 for {
23383 off1 := v.AuxInt
23384 sym1 := v.Aux
23385 mem := v.Args[2]
23386 v_0 := v.Args[0]
23387 if v_0.Op != OpAMD64LEAL {
23388 break
23389 }
23390 off2 := v_0.AuxInt
23391 sym2 := v_0.Aux
23392 base := v_0.Args[0]
23393 val := v.Args[1]
23394 if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
23395 break
23396 }
23397 v.reset(OpAMD64MOVWstore)
23398 v.AuxInt = off1 + off2
23399 v.Aux = mergeSym(sym1, sym2)
23400 v.AddArg(base)
23401 v.AddArg(val)
23402 v.AddArg(mem)
23403 return true
23404 }
23405
23406
23407
23408 for {
23409 off1 := v.AuxInt
23410 sym := v.Aux
23411 mem := v.Args[2]
23412 v_0 := v.Args[0]
23413 if v_0.Op != OpAMD64ADDLconst {
23414 break
23415 }
23416 off2 := v_0.AuxInt
23417 ptr := v_0.Args[0]
23418 val := v.Args[1]
23419 if !(is32Bit(off1 + off2)) {
23420 break
23421 }
23422 v.reset(OpAMD64MOVWstore)
23423 v.AuxInt = off1 + off2
23424 v.Aux = sym
23425 v.AddArg(ptr)
23426 v.AddArg(val)
23427 v.AddArg(mem)
23428 return true
23429 }
23430 return false
23431 }
23432 func rewriteValueAMD64_OpAMD64MOVWstoreconst_0(v *Value) bool {
23433
23434
23435
23436 for {
23437 sc := v.AuxInt
23438 s := v.Aux
23439 mem := v.Args[1]
23440 v_0 := v.Args[0]
23441 if v_0.Op != OpAMD64ADDQconst {
23442 break
23443 }
23444 off := v_0.AuxInt
23445 ptr := v_0.Args[0]
23446 if !(ValAndOff(sc).canAdd(off)) {
23447 break
23448 }
23449 v.reset(OpAMD64MOVWstoreconst)
23450 v.AuxInt = ValAndOff(sc).add(off)
23451 v.Aux = s
23452 v.AddArg(ptr)
23453 v.AddArg(mem)
23454 return true
23455 }
23456
23457
23458
23459 for {
23460 sc := v.AuxInt
23461 sym1 := v.Aux
23462 mem := v.Args[1]
23463 v_0 := v.Args[0]
23464 if v_0.Op != OpAMD64LEAQ {
23465 break
23466 }
23467 off := v_0.AuxInt
23468 sym2 := v_0.Aux
23469 ptr := v_0.Args[0]
23470 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
23471 break
23472 }
23473 v.reset(OpAMD64MOVWstoreconst)
23474 v.AuxInt = ValAndOff(sc).add(off)
23475 v.Aux = mergeSym(sym1, sym2)
23476 v.AddArg(ptr)
23477 v.AddArg(mem)
23478 return true
23479 }
23480
23481
23482
23483 for {
23484 x := v.AuxInt
23485 sym1 := v.Aux
23486 mem := v.Args[1]
23487 v_0 := v.Args[0]
23488 if v_0.Op != OpAMD64LEAQ1 {
23489 break
23490 }
23491 off := v_0.AuxInt
23492 sym2 := v_0.Aux
23493 idx := v_0.Args[1]
23494 ptr := v_0.Args[0]
23495 if !(canMergeSym(sym1, sym2)) {
23496 break
23497 }
23498 v.reset(OpAMD64MOVWstoreconstidx1)
23499 v.AuxInt = ValAndOff(x).add(off)
23500 v.Aux = mergeSym(sym1, sym2)
23501 v.AddArg(ptr)
23502 v.AddArg(idx)
23503 v.AddArg(mem)
23504 return true
23505 }
23506
23507
23508
23509 for {
23510 x := v.AuxInt
23511 sym1 := v.Aux
23512 mem := v.Args[1]
23513 v_0 := v.Args[0]
23514 if v_0.Op != OpAMD64LEAQ2 {
23515 break
23516 }
23517 off := v_0.AuxInt
23518 sym2 := v_0.Aux
23519 idx := v_0.Args[1]
23520 ptr := v_0.Args[0]
23521 if !(canMergeSym(sym1, sym2)) {
23522 break
23523 }
23524 v.reset(OpAMD64MOVWstoreconstidx2)
23525 v.AuxInt = ValAndOff(x).add(off)
23526 v.Aux = mergeSym(sym1, sym2)
23527 v.AddArg(ptr)
23528 v.AddArg(idx)
23529 v.AddArg(mem)
23530 return true
23531 }
23532
23533
23534
23535 for {
23536 x := v.AuxInt
23537 sym := v.Aux
23538 mem := v.Args[1]
23539 v_0 := v.Args[0]
23540 if v_0.Op != OpAMD64ADDQ {
23541 break
23542 }
23543 idx := v_0.Args[1]
23544 ptr := v_0.Args[0]
23545 v.reset(OpAMD64MOVWstoreconstidx1)
23546 v.AuxInt = x
23547 v.Aux = sym
23548 v.AddArg(ptr)
23549 v.AddArg(idx)
23550 v.AddArg(mem)
23551 return true
23552 }
23553
23554
23555
23556 for {
23557 c := v.AuxInt
23558 s := v.Aux
23559 _ = v.Args[1]
23560 p := v.Args[0]
23561 x := v.Args[1]
23562 if x.Op != OpAMD64MOVWstoreconst {
23563 break
23564 }
23565 a := x.AuxInt
23566 if x.Aux != s {
23567 break
23568 }
23569 mem := x.Args[1]
23570 if p != x.Args[0] {
23571 break
23572 }
23573 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
23574 break
23575 }
23576 v.reset(OpAMD64MOVLstoreconst)
23577 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
23578 v.Aux = s
23579 v.AddArg(p)
23580 v.AddArg(mem)
23581 return true
23582 }
23583
23584
23585
23586 for {
23587 a := v.AuxInt
23588 s := v.Aux
23589 _ = v.Args[1]
23590 p := v.Args[0]
23591 x := v.Args[1]
23592 if x.Op != OpAMD64MOVWstoreconst {
23593 break
23594 }
23595 c := x.AuxInt
23596 if x.Aux != s {
23597 break
23598 }
23599 mem := x.Args[1]
23600 if p != x.Args[0] {
23601 break
23602 }
23603 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
23604 break
23605 }
23606 v.reset(OpAMD64MOVLstoreconst)
23607 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
23608 v.Aux = s
23609 v.AddArg(p)
23610 v.AddArg(mem)
23611 return true
23612 }
23613
23614
23615
23616 for {
23617 sc := v.AuxInt
23618 sym1 := v.Aux
23619 mem := v.Args[1]
23620 v_0 := v.Args[0]
23621 if v_0.Op != OpAMD64LEAL {
23622 break
23623 }
23624 off := v_0.AuxInt
23625 sym2 := v_0.Aux
23626 ptr := v_0.Args[0]
23627 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) {
23628 break
23629 }
23630 v.reset(OpAMD64MOVWstoreconst)
23631 v.AuxInt = ValAndOff(sc).add(off)
23632 v.Aux = mergeSym(sym1, sym2)
23633 v.AddArg(ptr)
23634 v.AddArg(mem)
23635 return true
23636 }
23637
23638
23639
23640 for {
23641 sc := v.AuxInt
23642 s := v.Aux
23643 mem := v.Args[1]
23644 v_0 := v.Args[0]
23645 if v_0.Op != OpAMD64ADDLconst {
23646 break
23647 }
23648 off := v_0.AuxInt
23649 ptr := v_0.Args[0]
23650 if !(ValAndOff(sc).canAdd(off)) {
23651 break
23652 }
23653 v.reset(OpAMD64MOVWstoreconst)
23654 v.AuxInt = ValAndOff(sc).add(off)
23655 v.Aux = s
23656 v.AddArg(ptr)
23657 v.AddArg(mem)
23658 return true
23659 }
23660 return false
23661 }
23662 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v *Value) bool {
23663
23664
23665
23666 for {
23667 c := v.AuxInt
23668 sym := v.Aux
23669 mem := v.Args[2]
23670 ptr := v.Args[0]
23671 v_1 := v.Args[1]
23672 if v_1.Op != OpAMD64SHLQconst {
23673 break
23674 }
23675 if v_1.AuxInt != 1 {
23676 break
23677 }
23678 idx := v_1.Args[0]
23679 v.reset(OpAMD64MOVWstoreconstidx2)
23680 v.AuxInt = c
23681 v.Aux = sym
23682 v.AddArg(ptr)
23683 v.AddArg(idx)
23684 v.AddArg(mem)
23685 return true
23686 }
23687
23688
23689
23690 for {
23691 x := v.AuxInt
23692 sym := v.Aux
23693 mem := v.Args[2]
23694 v_0 := v.Args[0]
23695 if v_0.Op != OpAMD64ADDQconst {
23696 break
23697 }
23698 c := v_0.AuxInt
23699 ptr := v_0.Args[0]
23700 idx := v.Args[1]
23701 if !(ValAndOff(x).canAdd(c)) {
23702 break
23703 }
23704 v.reset(OpAMD64MOVWstoreconstidx1)
23705 v.AuxInt = ValAndOff(x).add(c)
23706 v.Aux = sym
23707 v.AddArg(ptr)
23708 v.AddArg(idx)
23709 v.AddArg(mem)
23710 return true
23711 }
23712
23713
23714
23715 for {
23716 x := v.AuxInt
23717 sym := v.Aux
23718 mem := v.Args[2]
23719 ptr := v.Args[0]
23720 v_1 := v.Args[1]
23721 if v_1.Op != OpAMD64ADDQconst {
23722 break
23723 }
23724 c := v_1.AuxInt
23725 idx := v_1.Args[0]
23726 if !(ValAndOff(x).canAdd(c)) {
23727 break
23728 }
23729 v.reset(OpAMD64MOVWstoreconstidx1)
23730 v.AuxInt = ValAndOff(x).add(c)
23731 v.Aux = sym
23732 v.AddArg(ptr)
23733 v.AddArg(idx)
23734 v.AddArg(mem)
23735 return true
23736 }
23737
23738
23739
23740 for {
23741 c := v.AuxInt
23742 s := v.Aux
23743 _ = v.Args[2]
23744 p := v.Args[0]
23745 i := v.Args[1]
23746 x := v.Args[2]
23747 if x.Op != OpAMD64MOVWstoreconstidx1 {
23748 break
23749 }
23750 a := x.AuxInt
23751 if x.Aux != s {
23752 break
23753 }
23754 mem := x.Args[2]
23755 if p != x.Args[0] {
23756 break
23757 }
23758 if i != x.Args[1] {
23759 break
23760 }
23761 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
23762 break
23763 }
23764 v.reset(OpAMD64MOVLstoreconstidx1)
23765 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
23766 v.Aux = s
23767 v.AddArg(p)
23768 v.AddArg(i)
23769 v.AddArg(mem)
23770 return true
23771 }
23772 return false
23773 }
23774 func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v *Value) bool {
23775 b := v.Block
23776
23777
23778
23779 for {
23780 x := v.AuxInt
23781 sym := v.Aux
23782 mem := v.Args[2]
23783 v_0 := v.Args[0]
23784 if v_0.Op != OpAMD64ADDQconst {
23785 break
23786 }
23787 c := v_0.AuxInt
23788 ptr := v_0.Args[0]
23789 idx := v.Args[1]
23790 if !(ValAndOff(x).canAdd(c)) {
23791 break
23792 }
23793 v.reset(OpAMD64MOVWstoreconstidx2)
23794 v.AuxInt = ValAndOff(x).add(c)
23795 v.Aux = sym
23796 v.AddArg(ptr)
23797 v.AddArg(idx)
23798 v.AddArg(mem)
23799 return true
23800 }
23801
23802
23803
23804 for {
23805 x := v.AuxInt
23806 sym := v.Aux
23807 mem := v.Args[2]
23808 ptr := v.Args[0]
23809 v_1 := v.Args[1]
23810 if v_1.Op != OpAMD64ADDQconst {
23811 break
23812 }
23813 c := v_1.AuxInt
23814 idx := v_1.Args[0]
23815 if !(ValAndOff(x).canAdd(2 * c)) {
23816 break
23817 }
23818 v.reset(OpAMD64MOVWstoreconstidx2)
23819 v.AuxInt = ValAndOff(x).add(2 * c)
23820 v.Aux = sym
23821 v.AddArg(ptr)
23822 v.AddArg(idx)
23823 v.AddArg(mem)
23824 return true
23825 }
23826
23827
23828
23829 for {
23830 c := v.AuxInt
23831 s := v.Aux
23832 _ = v.Args[2]
23833 p := v.Args[0]
23834 i := v.Args[1]
23835 x := v.Args[2]
23836 if x.Op != OpAMD64MOVWstoreconstidx2 {
23837 break
23838 }
23839 a := x.AuxInt
23840 if x.Aux != s {
23841 break
23842 }
23843 mem := x.Args[2]
23844 if p != x.Args[0] {
23845 break
23846 }
23847 if i != x.Args[1] {
23848 break
23849 }
23850 if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
23851 break
23852 }
23853 v.reset(OpAMD64MOVLstoreconstidx1)
23854 v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off())
23855 v.Aux = s
23856 v.AddArg(p)
23857 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type)
23858 v0.AuxInt = 1
23859 v0.AddArg(i)
23860 v.AddArg(v0)
23861 v.AddArg(mem)
23862 return true
23863 }
23864 return false
23865 }
23866 func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool {
23867
23868
23869
23870 for {
23871 c := v.AuxInt
23872 sym := v.Aux
23873 mem := v.Args[3]
23874 ptr := v.Args[0]
23875 v_1 := v.Args[1]
23876 if v_1.Op != OpAMD64SHLQconst {
23877 break
23878 }
23879 if v_1.AuxInt != 1 {
23880 break
23881 }
23882 idx := v_1.Args[0]
23883 val := v.Args[2]
23884 v.reset(OpAMD64MOVWstoreidx2)
23885 v.AuxInt = c
23886 v.Aux = sym
23887 v.AddArg(ptr)
23888 v.AddArg(idx)
23889 v.AddArg(val)
23890 v.AddArg(mem)
23891 return true
23892 }
23893
23894
23895
23896 for {
23897 c := v.AuxInt
23898 sym := v.Aux
23899 mem := v.Args[3]
23900 v_0 := v.Args[0]
23901 if v_0.Op != OpAMD64ADDQconst {
23902 break
23903 }
23904 d := v_0.AuxInt
23905 ptr := v_0.Args[0]
23906 idx := v.Args[1]
23907 val := v.Args[2]
23908 if !(is32Bit(c + d)) {
23909 break
23910 }
23911 v.reset(OpAMD64MOVWstoreidx1)
23912 v.AuxInt = c + d
23913 v.Aux = sym
23914 v.AddArg(ptr)
23915 v.AddArg(idx)
23916 v.AddArg(val)
23917 v.AddArg(mem)
23918 return true
23919 }
23920
23921
23922
23923 for {
23924 c := v.AuxInt
23925 sym := v.Aux
23926 mem := v.Args[3]
23927 ptr := v.Args[0]
23928 v_1 := v.Args[1]
23929 if v_1.Op != OpAMD64ADDQconst {
23930 break
23931 }
23932 d := v_1.AuxInt
23933 idx := v_1.Args[0]
23934 val := v.Args[2]
23935 if !(is32Bit(c + d)) {
23936 break
23937 }
23938 v.reset(OpAMD64MOVWstoreidx1)
23939 v.AuxInt = c + d
23940 v.Aux = sym
23941 v.AddArg(ptr)
23942 v.AddArg(idx)
23943 v.AddArg(val)
23944 v.AddArg(mem)
23945 return true
23946 }
23947
23948
23949
23950 for {
23951 i := v.AuxInt
23952 s := v.Aux
23953 _ = v.Args[3]
23954 p := v.Args[0]
23955 idx := v.Args[1]
23956 v_2 := v.Args[2]
23957 if v_2.Op != OpAMD64SHRLconst {
23958 break
23959 }
23960 if v_2.AuxInt != 16 {
23961 break
23962 }
23963 w := v_2.Args[0]
23964 x := v.Args[3]
23965 if x.Op != OpAMD64MOVWstoreidx1 {
23966 break
23967 }
23968 if x.AuxInt != i-2 {
23969 break
23970 }
23971 if x.Aux != s {
23972 break
23973 }
23974 mem := x.Args[3]
23975 if p != x.Args[0] {
23976 break
23977 }
23978 if idx != x.Args[1] {
23979 break
23980 }
23981 if w != x.Args[2] {
23982 break
23983 }
23984 if !(x.Uses == 1 && clobber(x)) {
23985 break
23986 }
23987 v.reset(OpAMD64MOVLstoreidx1)
23988 v.AuxInt = i - 2
23989 v.Aux = s
23990 v.AddArg(p)
23991 v.AddArg(idx)
23992 v.AddArg(w)
23993 v.AddArg(mem)
23994 return true
23995 }
23996
23997
23998
23999 for {
24000 i := v.AuxInt
24001 s := v.Aux
24002 _ = v.Args[3]
24003 p := v.Args[0]
24004 idx := v.Args[1]
24005 v_2 := v.Args[2]
24006 if v_2.Op != OpAMD64SHRQconst {
24007 break
24008 }
24009 if v_2.AuxInt != 16 {
24010 break
24011 }
24012 w := v_2.Args[0]
24013 x := v.Args[3]
24014 if x.Op != OpAMD64MOVWstoreidx1 {
24015 break
24016 }
24017 if x.AuxInt != i-2 {
24018 break
24019 }
24020 if x.Aux != s {
24021 break
24022 }
24023 mem := x.Args[3]
24024 if p != x.Args[0] {
24025 break
24026 }
24027 if idx != x.Args[1] {
24028 break
24029 }
24030 if w != x.Args[2] {
24031 break
24032 }
24033 if !(x.Uses == 1 && clobber(x)) {
24034 break
24035 }
24036 v.reset(OpAMD64MOVLstoreidx1)
24037 v.AuxInt = i - 2
24038 v.Aux = s
24039 v.AddArg(p)
24040 v.AddArg(idx)
24041 v.AddArg(w)
24042 v.AddArg(mem)
24043 return true
24044 }
24045
24046
24047
24048 for {
24049 i := v.AuxInt
24050 s := v.Aux
24051 _ = v.Args[3]
24052 p := v.Args[0]
24053 idx := v.Args[1]
24054 v_2 := v.Args[2]
24055 if v_2.Op != OpAMD64SHRLconst {
24056 break
24057 }
24058 j := v_2.AuxInt
24059 w := v_2.Args[0]
24060 x := v.Args[3]
24061 if x.Op != OpAMD64MOVWstoreidx1 {
24062 break
24063 }
24064 if x.AuxInt != i-2 {
24065 break
24066 }
24067 if x.Aux != s {
24068 break
24069 }
24070 mem := x.Args[3]
24071 if p != x.Args[0] {
24072 break
24073 }
24074 if idx != x.Args[1] {
24075 break
24076 }
24077 w0 := x.Args[2]
24078 if w0.Op != OpAMD64SHRLconst {
24079 break
24080 }
24081 if w0.AuxInt != j-16 {
24082 break
24083 }
24084 if w != w0.Args[0] {
24085 break
24086 }
24087 if !(x.Uses == 1 && clobber(x)) {
24088 break
24089 }
24090 v.reset(OpAMD64MOVLstoreidx1)
24091 v.AuxInt = i - 2
24092 v.Aux = s
24093 v.AddArg(p)
24094 v.AddArg(idx)
24095 v.AddArg(w0)
24096 v.AddArg(mem)
24097 return true
24098 }
24099
24100
24101
24102 for {
24103 i := v.AuxInt
24104 s := v.Aux
24105 _ = v.Args[3]
24106 p := v.Args[0]
24107 idx := v.Args[1]
24108 v_2 := v.Args[2]
24109 if v_2.Op != OpAMD64SHRQconst {
24110 break
24111 }
24112 j := v_2.AuxInt
24113 w := v_2.Args[0]
24114 x := v.Args[3]
24115 if x.Op != OpAMD64MOVWstoreidx1 {
24116 break
24117 }
24118 if x.AuxInt != i-2 {
24119 break
24120 }
24121 if x.Aux != s {
24122 break
24123 }
24124 mem := x.Args[3]
24125 if p != x.Args[0] {
24126 break
24127 }
24128 if idx != x.Args[1] {
24129 break
24130 }
24131 w0 := x.Args[2]
24132 if w0.Op != OpAMD64SHRQconst {
24133 break
24134 }
24135 if w0.AuxInt != j-16 {
24136 break
24137 }
24138 if w != w0.Args[0] {
24139 break
24140 }
24141 if !(x.Uses == 1 && clobber(x)) {
24142 break
24143 }
24144 v.reset(OpAMD64MOVLstoreidx1)
24145 v.AuxInt = i - 2
24146 v.Aux = s
24147 v.AddArg(p)
24148 v.AddArg(idx)
24149 v.AddArg(w0)
24150 v.AddArg(mem)
24151 return true
24152 }
24153
24154
24155
24156 for {
24157 i := v.AuxInt
24158 s := v.Aux
24159 mem := v.Args[3]
24160 p := v.Args[0]
24161 v_1 := v.Args[1]
24162 if v_1.Op != OpAMD64MOVQconst {
24163 break
24164 }
24165 c := v_1.AuxInt
24166 w := v.Args[2]
24167 if !(is32Bit(i + c)) {
24168 break
24169 }
24170 v.reset(OpAMD64MOVWstore)
24171 v.AuxInt = i + c
24172 v.Aux = s
24173 v.AddArg(p)
24174 v.AddArg(w)
24175 v.AddArg(mem)
24176 return true
24177 }
24178 return false
24179 }
24180 func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool {
24181 b := v.Block
24182
24183
24184
24185 for {
24186 c := v.AuxInt
24187 sym := v.Aux
24188 mem := v.Args[3]
24189 v_0 := v.Args[0]
24190 if v_0.Op != OpAMD64ADDQconst {
24191 break
24192 }
24193 d := v_0.AuxInt
24194 ptr := v_0.Args[0]
24195 idx := v.Args[1]
24196 val := v.Args[2]
24197 if !(is32Bit(c + d)) {
24198 break
24199 }
24200 v.reset(OpAMD64MOVWstoreidx2)
24201 v.AuxInt = c + d
24202 v.Aux = sym
24203 v.AddArg(ptr)
24204 v.AddArg(idx)
24205 v.AddArg(val)
24206 v.AddArg(mem)
24207 return true
24208 }
24209
24210
24211
24212 for {
24213 c := v.AuxInt
24214 sym := v.Aux
24215 mem := v.Args[3]
24216 ptr := v.Args[0]
24217 v_1 := v.Args[1]
24218 if v_1.Op != OpAMD64ADDQconst {
24219 break
24220 }
24221 d := v_1.AuxInt
24222 idx := v_1.Args[0]
24223 val := v.Args[2]
24224 if !(is32Bit(c + 2*d)) {
24225 break
24226 }
24227 v.reset(OpAMD64MOVWstoreidx2)
24228 v.AuxInt = c + 2*d
24229 v.Aux = sym
24230 v.AddArg(ptr)
24231 v.AddArg(idx)
24232 v.AddArg(val)
24233 v.AddArg(mem)
24234 return true
24235 }
24236
24237
24238
24239 for {
24240 i := v.AuxInt
24241 s := v.Aux
24242 _ = v.Args[3]
24243 p := v.Args[0]
24244 idx := v.Args[1]
24245 v_2 := v.Args[2]
24246 if v_2.Op != OpAMD64SHRLconst {
24247 break
24248 }
24249 if v_2.AuxInt != 16 {
24250 break
24251 }
24252 w := v_2.Args[0]
24253 x := v.Args[3]
24254 if x.Op != OpAMD64MOVWstoreidx2 {
24255 break
24256 }
24257 if x.AuxInt != i-2 {
24258 break
24259 }
24260 if x.Aux != s {
24261 break
24262 }
24263 mem := x.Args[3]
24264 if p != x.Args[0] {
24265 break
24266 }
24267 if idx != x.Args[1] {
24268 break
24269 }
24270 if w != x.Args[2] {
24271 break
24272 }
24273 if !(x.Uses == 1 && clobber(x)) {
24274 break
24275 }
24276 v.reset(OpAMD64MOVLstoreidx1)
24277 v.AuxInt = i - 2
24278 v.Aux = s
24279 v.AddArg(p)
24280 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type)
24281 v0.AuxInt = 1
24282 v0.AddArg(idx)
24283 v.AddArg(v0)
24284 v.AddArg(w)
24285 v.AddArg(mem)
24286 return true
24287 }
24288
24289
24290
24291 for {
24292 i := v.AuxInt
24293 s := v.Aux
24294 _ = v.Args[3]
24295 p := v.Args[0]
24296 idx := v.Args[1]
24297 v_2 := v.Args[2]
24298 if v_2.Op != OpAMD64SHRQconst {
24299 break
24300 }
24301 if v_2.AuxInt != 16 {
24302 break
24303 }
24304 w := v_2.Args[0]
24305 x := v.Args[3]
24306 if x.Op != OpAMD64MOVWstoreidx2 {
24307 break
24308 }
24309 if x.AuxInt != i-2 {
24310 break
24311 }
24312 if x.Aux != s {
24313 break
24314 }
24315 mem := x.Args[3]
24316 if p != x.Args[0] {
24317 break
24318 }
24319 if idx != x.Args[1] {
24320 break
24321 }
24322 if w != x.Args[2] {
24323 break
24324 }
24325 if !(x.Uses == 1 && clobber(x)) {
24326 break
24327 }
24328 v.reset(OpAMD64MOVLstoreidx1)
24329 v.AuxInt = i - 2
24330 v.Aux = s
24331 v.AddArg(p)
24332 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type)
24333 v0.AuxInt = 1
24334 v0.AddArg(idx)
24335 v.AddArg(v0)
24336 v.AddArg(w)
24337 v.AddArg(mem)
24338 return true
24339 }
24340
24341
24342
24343 for {
24344 i := v.AuxInt
24345 s := v.Aux
24346 _ = v.Args[3]
24347 p := v.Args[0]
24348 idx := v.Args[1]
24349 v_2 := v.Args[2]
24350 if v_2.Op != OpAMD64SHRQconst {
24351 break
24352 }
24353 j := v_2.AuxInt
24354 w := v_2.Args[0]
24355 x := v.Args[3]
24356 if x.Op != OpAMD64MOVWstoreidx2 {
24357 break
24358 }
24359 if x.AuxInt != i-2 {
24360 break
24361 }
24362 if x.Aux != s {
24363 break
24364 }
24365 mem := x.Args[3]
24366 if p != x.Args[0] {
24367 break
24368 }
24369 if idx != x.Args[1] {
24370 break
24371 }
24372 w0 := x.Args[2]
24373 if w0.Op != OpAMD64SHRQconst {
24374 break
24375 }
24376 if w0.AuxInt != j-16 {
24377 break
24378 }
24379 if w != w0.Args[0] {
24380 break
24381 }
24382 if !(x.Uses == 1 && clobber(x)) {
24383 break
24384 }
24385 v.reset(OpAMD64MOVLstoreidx1)
24386 v.AuxInt = i - 2
24387 v.Aux = s
24388 v.AddArg(p)
24389 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type)
24390 v0.AuxInt = 1
24391 v0.AddArg(idx)
24392 v.AddArg(v0)
24393 v.AddArg(w0)
24394 v.AddArg(mem)
24395 return true
24396 }
24397
24398
24399
24400 for {
24401 i := v.AuxInt
24402 s := v.Aux
24403 mem := v.Args[3]
24404 p := v.Args[0]
24405 v_1 := v.Args[1]
24406 if v_1.Op != OpAMD64MOVQconst {
24407 break
24408 }
24409 c := v_1.AuxInt
24410 w := v.Args[2]
24411 if !(is32Bit(i + 2*c)) {
24412 break
24413 }
24414 v.reset(OpAMD64MOVWstore)
24415 v.AuxInt = i + 2*c
24416 v.Aux = s
24417 v.AddArg(p)
24418 v.AddArg(w)
24419 v.AddArg(mem)
24420 return true
24421 }
24422 return false
24423 }
24424 func rewriteValueAMD64_OpAMD64MULL_0(v *Value) bool {
24425
24426
24427
24428 for {
24429 _ = v.Args[1]
24430 x := v.Args[0]
24431 v_1 := v.Args[1]
24432 if v_1.Op != OpAMD64MOVLconst {
24433 break
24434 }
24435 c := v_1.AuxInt
24436 v.reset(OpAMD64MULLconst)
24437 v.AuxInt = c
24438 v.AddArg(x)
24439 return true
24440 }
24441
24442
24443
24444 for {
24445 x := v.Args[1]
24446 v_0 := v.Args[0]
24447 if v_0.Op != OpAMD64MOVLconst {
24448 break
24449 }
24450 c := v_0.AuxInt
24451 v.reset(OpAMD64MULLconst)
24452 v.AuxInt = c
24453 v.AddArg(x)
24454 return true
24455 }
24456 return false
24457 }
24458 func rewriteValueAMD64_OpAMD64MULLconst_0(v *Value) bool {
24459 b := v.Block
24460
24461
24462
24463 for {
24464 c := v.AuxInt
24465 v_0 := v.Args[0]
24466 if v_0.Op != OpAMD64MULLconst {
24467 break
24468 }
24469 d := v_0.AuxInt
24470 x := v_0.Args[0]
24471 v.reset(OpAMD64MULLconst)
24472 v.AuxInt = int64(int32(c * d))
24473 v.AddArg(x)
24474 return true
24475 }
24476
24477
24478
24479 for {
24480 if v.AuxInt != -9 {
24481 break
24482 }
24483 x := v.Args[0]
24484 v.reset(OpAMD64NEGL)
24485 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
24486 v0.AddArg(x)
24487 v0.AddArg(x)
24488 v.AddArg(v0)
24489 return true
24490 }
24491
24492
24493
24494 for {
24495 if v.AuxInt != -5 {
24496 break
24497 }
24498 x := v.Args[0]
24499 v.reset(OpAMD64NEGL)
24500 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
24501 v0.AddArg(x)
24502 v0.AddArg(x)
24503 v.AddArg(v0)
24504 return true
24505 }
24506
24507
24508
24509 for {
24510 if v.AuxInt != -3 {
24511 break
24512 }
24513 x := v.Args[0]
24514 v.reset(OpAMD64NEGL)
24515 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
24516 v0.AddArg(x)
24517 v0.AddArg(x)
24518 v.AddArg(v0)
24519 return true
24520 }
24521
24522
24523
24524 for {
24525 if v.AuxInt != -1 {
24526 break
24527 }
24528 x := v.Args[0]
24529 v.reset(OpAMD64NEGL)
24530 v.AddArg(x)
24531 return true
24532 }
24533
24534
24535
24536 for {
24537 if v.AuxInt != 0 {
24538 break
24539 }
24540 v.reset(OpAMD64MOVLconst)
24541 v.AuxInt = 0
24542 return true
24543 }
24544
24545
24546
24547 for {
24548 if v.AuxInt != 1 {
24549 break
24550 }
24551 x := v.Args[0]
24552 v.reset(OpCopy)
24553 v.Type = x.Type
24554 v.AddArg(x)
24555 return true
24556 }
24557
24558
24559
24560 for {
24561 if v.AuxInt != 3 {
24562 break
24563 }
24564 x := v.Args[0]
24565 v.reset(OpAMD64LEAL2)
24566 v.AddArg(x)
24567 v.AddArg(x)
24568 return true
24569 }
24570
24571
24572
24573 for {
24574 if v.AuxInt != 5 {
24575 break
24576 }
24577 x := v.Args[0]
24578 v.reset(OpAMD64LEAL4)
24579 v.AddArg(x)
24580 v.AddArg(x)
24581 return true
24582 }
24583
24584
24585
24586 for {
24587 if v.AuxInt != 7 {
24588 break
24589 }
24590 x := v.Args[0]
24591 v.reset(OpAMD64LEAL2)
24592 v.AddArg(x)
24593 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
24594 v0.AddArg(x)
24595 v0.AddArg(x)
24596 v.AddArg(v0)
24597 return true
24598 }
24599 return false
24600 }
24601 func rewriteValueAMD64_OpAMD64MULLconst_10(v *Value) bool {
24602 b := v.Block
24603
24604
24605
24606 for {
24607 if v.AuxInt != 9 {
24608 break
24609 }
24610 x := v.Args[0]
24611 v.reset(OpAMD64LEAL8)
24612 v.AddArg(x)
24613 v.AddArg(x)
24614 return true
24615 }
24616
24617
24618
24619 for {
24620 if v.AuxInt != 11 {
24621 break
24622 }
24623 x := v.Args[0]
24624 v.reset(OpAMD64LEAL2)
24625 v.AddArg(x)
24626 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
24627 v0.AddArg(x)
24628 v0.AddArg(x)
24629 v.AddArg(v0)
24630 return true
24631 }
24632
24633
24634
24635 for {
24636 if v.AuxInt != 13 {
24637 break
24638 }
24639 x := v.Args[0]
24640 v.reset(OpAMD64LEAL4)
24641 v.AddArg(x)
24642 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
24643 v0.AddArg(x)
24644 v0.AddArg(x)
24645 v.AddArg(v0)
24646 return true
24647 }
24648
24649
24650
24651 for {
24652 if v.AuxInt != 19 {
24653 break
24654 }
24655 x := v.Args[0]
24656 v.reset(OpAMD64LEAL2)
24657 v.AddArg(x)
24658 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
24659 v0.AddArg(x)
24660 v0.AddArg(x)
24661 v.AddArg(v0)
24662 return true
24663 }
24664
24665
24666
24667 for {
24668 if v.AuxInt != 21 {
24669 break
24670 }
24671 x := v.Args[0]
24672 v.reset(OpAMD64LEAL4)
24673 v.AddArg(x)
24674 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
24675 v0.AddArg(x)
24676 v0.AddArg(x)
24677 v.AddArg(v0)
24678 return true
24679 }
24680
24681
24682
24683 for {
24684 if v.AuxInt != 25 {
24685 break
24686 }
24687 x := v.Args[0]
24688 v.reset(OpAMD64LEAL8)
24689 v.AddArg(x)
24690 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
24691 v0.AddArg(x)
24692 v0.AddArg(x)
24693 v.AddArg(v0)
24694 return true
24695 }
24696
24697
24698
24699 for {
24700 if v.AuxInt != 27 {
24701 break
24702 }
24703 x := v.Args[0]
24704 v.reset(OpAMD64LEAL8)
24705 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
24706 v0.AddArg(x)
24707 v0.AddArg(x)
24708 v.AddArg(v0)
24709 v1 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
24710 v1.AddArg(x)
24711 v1.AddArg(x)
24712 v.AddArg(v1)
24713 return true
24714 }
24715
24716
24717
24718 for {
24719 if v.AuxInt != 37 {
24720 break
24721 }
24722 x := v.Args[0]
24723 v.reset(OpAMD64LEAL4)
24724 v.AddArg(x)
24725 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
24726 v0.AddArg(x)
24727 v0.AddArg(x)
24728 v.AddArg(v0)
24729 return true
24730 }
24731
24732
24733
24734 for {
24735 if v.AuxInt != 41 {
24736 break
24737 }
24738 x := v.Args[0]
24739 v.reset(OpAMD64LEAL8)
24740 v.AddArg(x)
24741 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
24742 v0.AddArg(x)
24743 v0.AddArg(x)
24744 v.AddArg(v0)
24745 return true
24746 }
24747
24748
24749
24750 for {
24751 if v.AuxInt != 45 {
24752 break
24753 }
24754 x := v.Args[0]
24755 v.reset(OpAMD64LEAL8)
24756 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
24757 v0.AddArg(x)
24758 v0.AddArg(x)
24759 v.AddArg(v0)
24760 v1 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
24761 v1.AddArg(x)
24762 v1.AddArg(x)
24763 v.AddArg(v1)
24764 return true
24765 }
24766 return false
24767 }
24768 func rewriteValueAMD64_OpAMD64MULLconst_20(v *Value) bool {
24769 b := v.Block
24770
24771
24772
24773 for {
24774 if v.AuxInt != 73 {
24775 break
24776 }
24777 x := v.Args[0]
24778 v.reset(OpAMD64LEAL8)
24779 v.AddArg(x)
24780 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
24781 v0.AddArg(x)
24782 v0.AddArg(x)
24783 v.AddArg(v0)
24784 return true
24785 }
24786
24787
24788
24789 for {
24790 if v.AuxInt != 81 {
24791 break
24792 }
24793 x := v.Args[0]
24794 v.reset(OpAMD64LEAL8)
24795 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
24796 v0.AddArg(x)
24797 v0.AddArg(x)
24798 v.AddArg(v0)
24799 v1 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
24800 v1.AddArg(x)
24801 v1.AddArg(x)
24802 v.AddArg(v1)
24803 return true
24804 }
24805
24806
24807
24808 for {
24809 c := v.AuxInt
24810 x := v.Args[0]
24811 if !(isPowerOfTwo(c+1) && c >= 15) {
24812 break
24813 }
24814 v.reset(OpAMD64SUBL)
24815 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
24816 v0.AuxInt = log2(c + 1)
24817 v0.AddArg(x)
24818 v.AddArg(v0)
24819 v.AddArg(x)
24820 return true
24821 }
24822
24823
24824
24825 for {
24826 c := v.AuxInt
24827 x := v.Args[0]
24828 if !(isPowerOfTwo(c-1) && c >= 17) {
24829 break
24830 }
24831 v.reset(OpAMD64LEAL1)
24832 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
24833 v0.AuxInt = log2(c - 1)
24834 v0.AddArg(x)
24835 v.AddArg(v0)
24836 v.AddArg(x)
24837 return true
24838 }
24839
24840
24841
24842 for {
24843 c := v.AuxInt
24844 x := v.Args[0]
24845 if !(isPowerOfTwo(c-2) && c >= 34) {
24846 break
24847 }
24848 v.reset(OpAMD64LEAL2)
24849 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
24850 v0.AuxInt = log2(c - 2)
24851 v0.AddArg(x)
24852 v.AddArg(v0)
24853 v.AddArg(x)
24854 return true
24855 }
24856
24857
24858
24859 for {
24860 c := v.AuxInt
24861 x := v.Args[0]
24862 if !(isPowerOfTwo(c-4) && c >= 68) {
24863 break
24864 }
24865 v.reset(OpAMD64LEAL4)
24866 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
24867 v0.AuxInt = log2(c - 4)
24868 v0.AddArg(x)
24869 v.AddArg(v0)
24870 v.AddArg(x)
24871 return true
24872 }
24873
24874
24875
24876 for {
24877 c := v.AuxInt
24878 x := v.Args[0]
24879 if !(isPowerOfTwo(c-8) && c >= 136) {
24880 break
24881 }
24882 v.reset(OpAMD64LEAL8)
24883 v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
24884 v0.AuxInt = log2(c - 8)
24885 v0.AddArg(x)
24886 v.AddArg(v0)
24887 v.AddArg(x)
24888 return true
24889 }
24890
24891
24892
24893 for {
24894 c := v.AuxInt
24895 x := v.Args[0]
24896 if !(c%3 == 0 && isPowerOfTwo(c/3)) {
24897 break
24898 }
24899 v.reset(OpAMD64SHLLconst)
24900 v.AuxInt = log2(c / 3)
24901 v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
24902 v0.AddArg(x)
24903 v0.AddArg(x)
24904 v.AddArg(v0)
24905 return true
24906 }
24907
24908
24909
24910 for {
24911 c := v.AuxInt
24912 x := v.Args[0]
24913 if !(c%5 == 0 && isPowerOfTwo(c/5)) {
24914 break
24915 }
24916 v.reset(OpAMD64SHLLconst)
24917 v.AuxInt = log2(c / 5)
24918 v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
24919 v0.AddArg(x)
24920 v0.AddArg(x)
24921 v.AddArg(v0)
24922 return true
24923 }
24924
24925
24926
24927 for {
24928 c := v.AuxInt
24929 x := v.Args[0]
24930 if !(c%9 == 0 && isPowerOfTwo(c/9)) {
24931 break
24932 }
24933 v.reset(OpAMD64SHLLconst)
24934 v.AuxInt = log2(c / 9)
24935 v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
24936 v0.AddArg(x)
24937 v0.AddArg(x)
24938 v.AddArg(v0)
24939 return true
24940 }
24941 return false
24942 }
24943 func rewriteValueAMD64_OpAMD64MULLconst_30(v *Value) bool {
24944
24945
24946
24947 for {
24948 c := v.AuxInt
24949 v_0 := v.Args[0]
24950 if v_0.Op != OpAMD64MOVLconst {
24951 break
24952 }
24953 d := v_0.AuxInt
24954 v.reset(OpAMD64MOVLconst)
24955 v.AuxInt = int64(int32(c * d))
24956 return true
24957 }
24958 return false
24959 }
24960 func rewriteValueAMD64_OpAMD64MULQ_0(v *Value) bool {
24961
24962
24963
24964 for {
24965 _ = v.Args[1]
24966 x := v.Args[0]
24967 v_1 := v.Args[1]
24968 if v_1.Op != OpAMD64MOVQconst {
24969 break
24970 }
24971 c := v_1.AuxInt
24972 if !(is32Bit(c)) {
24973 break
24974 }
24975 v.reset(OpAMD64MULQconst)
24976 v.AuxInt = c
24977 v.AddArg(x)
24978 return true
24979 }
24980
24981
24982
24983 for {
24984 x := v.Args[1]
24985 v_0 := v.Args[0]
24986 if v_0.Op != OpAMD64MOVQconst {
24987 break
24988 }
24989 c := v_0.AuxInt
24990 if !(is32Bit(c)) {
24991 break
24992 }
24993 v.reset(OpAMD64MULQconst)
24994 v.AuxInt = c
24995 v.AddArg(x)
24996 return true
24997 }
24998 return false
24999 }
25000 func rewriteValueAMD64_OpAMD64MULQconst_0(v *Value) bool {
25001 b := v.Block
25002
25003
25004
25005 for {
25006 c := v.AuxInt
25007 v_0 := v.Args[0]
25008 if v_0.Op != OpAMD64MULQconst {
25009 break
25010 }
25011 d := v_0.AuxInt
25012 x := v_0.Args[0]
25013 if !(is32Bit(c * d)) {
25014 break
25015 }
25016 v.reset(OpAMD64MULQconst)
25017 v.AuxInt = c * d
25018 v.AddArg(x)
25019 return true
25020 }
25021
25022
25023
25024 for {
25025 if v.AuxInt != -9 {
25026 break
25027 }
25028 x := v.Args[0]
25029 v.reset(OpAMD64NEGQ)
25030 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
25031 v0.AddArg(x)
25032 v0.AddArg(x)
25033 v.AddArg(v0)
25034 return true
25035 }
25036
25037
25038
25039 for {
25040 if v.AuxInt != -5 {
25041 break
25042 }
25043 x := v.Args[0]
25044 v.reset(OpAMD64NEGQ)
25045 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
25046 v0.AddArg(x)
25047 v0.AddArg(x)
25048 v.AddArg(v0)
25049 return true
25050 }
25051
25052
25053
25054 for {
25055 if v.AuxInt != -3 {
25056 break
25057 }
25058 x := v.Args[0]
25059 v.reset(OpAMD64NEGQ)
25060 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
25061 v0.AddArg(x)
25062 v0.AddArg(x)
25063 v.AddArg(v0)
25064 return true
25065 }
25066
25067
25068
25069 for {
25070 if v.AuxInt != -1 {
25071 break
25072 }
25073 x := v.Args[0]
25074 v.reset(OpAMD64NEGQ)
25075 v.AddArg(x)
25076 return true
25077 }
25078
25079
25080
25081 for {
25082 if v.AuxInt != 0 {
25083 break
25084 }
25085 v.reset(OpAMD64MOVQconst)
25086 v.AuxInt = 0
25087 return true
25088 }
25089
25090
25091
25092 for {
25093 if v.AuxInt != 1 {
25094 break
25095 }
25096 x := v.Args[0]
25097 v.reset(OpCopy)
25098 v.Type = x.Type
25099 v.AddArg(x)
25100 return true
25101 }
25102
25103
25104
25105 for {
25106 if v.AuxInt != 3 {
25107 break
25108 }
25109 x := v.Args[0]
25110 v.reset(OpAMD64LEAQ2)
25111 v.AddArg(x)
25112 v.AddArg(x)
25113 return true
25114 }
25115
25116
25117
25118 for {
25119 if v.AuxInt != 5 {
25120 break
25121 }
25122 x := v.Args[0]
25123 v.reset(OpAMD64LEAQ4)
25124 v.AddArg(x)
25125 v.AddArg(x)
25126 return true
25127 }
25128
25129
25130
25131 for {
25132 if v.AuxInt != 7 {
25133 break
25134 }
25135 x := v.Args[0]
25136 v.reset(OpAMD64LEAQ2)
25137 v.AddArg(x)
25138 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
25139 v0.AddArg(x)
25140 v0.AddArg(x)
25141 v.AddArg(v0)
25142 return true
25143 }
25144 return false
25145 }
25146 func rewriteValueAMD64_OpAMD64MULQconst_10(v *Value) bool {
25147 b := v.Block
25148
25149
25150
25151 for {
25152 if v.AuxInt != 9 {
25153 break
25154 }
25155 x := v.Args[0]
25156 v.reset(OpAMD64LEAQ8)
25157 v.AddArg(x)
25158 v.AddArg(x)
25159 return true
25160 }
25161
25162
25163
25164 for {
25165 if v.AuxInt != 11 {
25166 break
25167 }
25168 x := v.Args[0]
25169 v.reset(OpAMD64LEAQ2)
25170 v.AddArg(x)
25171 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
25172 v0.AddArg(x)
25173 v0.AddArg(x)
25174 v.AddArg(v0)
25175 return true
25176 }
25177
25178
25179
25180 for {
25181 if v.AuxInt != 13 {
25182 break
25183 }
25184 x := v.Args[0]
25185 v.reset(OpAMD64LEAQ4)
25186 v.AddArg(x)
25187 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
25188 v0.AddArg(x)
25189 v0.AddArg(x)
25190 v.AddArg(v0)
25191 return true
25192 }
25193
25194
25195
25196 for {
25197 if v.AuxInt != 19 {
25198 break
25199 }
25200 x := v.Args[0]
25201 v.reset(OpAMD64LEAQ2)
25202 v.AddArg(x)
25203 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
25204 v0.AddArg(x)
25205 v0.AddArg(x)
25206 v.AddArg(v0)
25207 return true
25208 }
25209
25210
25211
25212 for {
25213 if v.AuxInt != 21 {
25214 break
25215 }
25216 x := v.Args[0]
25217 v.reset(OpAMD64LEAQ4)
25218 v.AddArg(x)
25219 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
25220 v0.AddArg(x)
25221 v0.AddArg(x)
25222 v.AddArg(v0)
25223 return true
25224 }
25225
25226
25227
25228 for {
25229 if v.AuxInt != 25 {
25230 break
25231 }
25232 x := v.Args[0]
25233 v.reset(OpAMD64LEAQ8)
25234 v.AddArg(x)
25235 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
25236 v0.AddArg(x)
25237 v0.AddArg(x)
25238 v.AddArg(v0)
25239 return true
25240 }
25241
25242
25243
25244 for {
25245 if v.AuxInt != 27 {
25246 break
25247 }
25248 x := v.Args[0]
25249 v.reset(OpAMD64LEAQ8)
25250 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
25251 v0.AddArg(x)
25252 v0.AddArg(x)
25253 v.AddArg(v0)
25254 v1 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
25255 v1.AddArg(x)
25256 v1.AddArg(x)
25257 v.AddArg(v1)
25258 return true
25259 }
25260
25261
25262
25263 for {
25264 if v.AuxInt != 37 {
25265 break
25266 }
25267 x := v.Args[0]
25268 v.reset(OpAMD64LEAQ4)
25269 v.AddArg(x)
25270 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
25271 v0.AddArg(x)
25272 v0.AddArg(x)
25273 v.AddArg(v0)
25274 return true
25275 }
25276
25277
25278
25279 for {
25280 if v.AuxInt != 41 {
25281 break
25282 }
25283 x := v.Args[0]
25284 v.reset(OpAMD64LEAQ8)
25285 v.AddArg(x)
25286 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
25287 v0.AddArg(x)
25288 v0.AddArg(x)
25289 v.AddArg(v0)
25290 return true
25291 }
25292
25293
25294
25295 for {
25296 if v.AuxInt != 45 {
25297 break
25298 }
25299 x := v.Args[0]
25300 v.reset(OpAMD64LEAQ8)
25301 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
25302 v0.AddArg(x)
25303 v0.AddArg(x)
25304 v.AddArg(v0)
25305 v1 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
25306 v1.AddArg(x)
25307 v1.AddArg(x)
25308 v.AddArg(v1)
25309 return true
25310 }
25311 return false
25312 }
25313 func rewriteValueAMD64_OpAMD64MULQconst_20(v *Value) bool {
25314 b := v.Block
25315
25316
25317
25318 for {
25319 if v.AuxInt != 73 {
25320 break
25321 }
25322 x := v.Args[0]
25323 v.reset(OpAMD64LEAQ8)
25324 v.AddArg(x)
25325 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
25326 v0.AddArg(x)
25327 v0.AddArg(x)
25328 v.AddArg(v0)
25329 return true
25330 }
25331
25332
25333
25334 for {
25335 if v.AuxInt != 81 {
25336 break
25337 }
25338 x := v.Args[0]
25339 v.reset(OpAMD64LEAQ8)
25340 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
25341 v0.AddArg(x)
25342 v0.AddArg(x)
25343 v.AddArg(v0)
25344 v1 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
25345 v1.AddArg(x)
25346 v1.AddArg(x)
25347 v.AddArg(v1)
25348 return true
25349 }
25350
25351
25352
25353 for {
25354 c := v.AuxInt
25355 x := v.Args[0]
25356 if !(isPowerOfTwo(c+1) && c >= 15) {
25357 break
25358 }
25359 v.reset(OpAMD64SUBQ)
25360 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
25361 v0.AuxInt = log2(c + 1)
25362 v0.AddArg(x)
25363 v.AddArg(v0)
25364 v.AddArg(x)
25365 return true
25366 }
25367
25368
25369
25370 for {
25371 c := v.AuxInt
25372 x := v.Args[0]
25373 if !(isPowerOfTwo(c-1) && c >= 17) {
25374 break
25375 }
25376 v.reset(OpAMD64LEAQ1)
25377 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
25378 v0.AuxInt = log2(c - 1)
25379 v0.AddArg(x)
25380 v.AddArg(v0)
25381 v.AddArg(x)
25382 return true
25383 }
25384
25385
25386
25387 for {
25388 c := v.AuxInt
25389 x := v.Args[0]
25390 if !(isPowerOfTwo(c-2) && c >= 34) {
25391 break
25392 }
25393 v.reset(OpAMD64LEAQ2)
25394 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
25395 v0.AuxInt = log2(c - 2)
25396 v0.AddArg(x)
25397 v.AddArg(v0)
25398 v.AddArg(x)
25399 return true
25400 }
25401
25402
25403
25404 for {
25405 c := v.AuxInt
25406 x := v.Args[0]
25407 if !(isPowerOfTwo(c-4) && c >= 68) {
25408 break
25409 }
25410 v.reset(OpAMD64LEAQ4)
25411 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
25412 v0.AuxInt = log2(c - 4)
25413 v0.AddArg(x)
25414 v.AddArg(v0)
25415 v.AddArg(x)
25416 return true
25417 }
25418
25419
25420
25421 for {
25422 c := v.AuxInt
25423 x := v.Args[0]
25424 if !(isPowerOfTwo(c-8) && c >= 136) {
25425 break
25426 }
25427 v.reset(OpAMD64LEAQ8)
25428 v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
25429 v0.AuxInt = log2(c - 8)
25430 v0.AddArg(x)
25431 v.AddArg(v0)
25432 v.AddArg(x)
25433 return true
25434 }
25435
25436
25437
25438 for {
25439 c := v.AuxInt
25440 x := v.Args[0]
25441 if !(c%3 == 0 && isPowerOfTwo(c/3)) {
25442 break
25443 }
25444 v.reset(OpAMD64SHLQconst)
25445 v.AuxInt = log2(c / 3)
25446 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
25447 v0.AddArg(x)
25448 v0.AddArg(x)
25449 v.AddArg(v0)
25450 return true
25451 }
25452
25453
25454
25455 for {
25456 c := v.AuxInt
25457 x := v.Args[0]
25458 if !(c%5 == 0 && isPowerOfTwo(c/5)) {
25459 break
25460 }
25461 v.reset(OpAMD64SHLQconst)
25462 v.AuxInt = log2(c / 5)
25463 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
25464 v0.AddArg(x)
25465 v0.AddArg(x)
25466 v.AddArg(v0)
25467 return true
25468 }
25469
25470
25471
25472 for {
25473 c := v.AuxInt
25474 x := v.Args[0]
25475 if !(c%9 == 0 && isPowerOfTwo(c/9)) {
25476 break
25477 }
25478 v.reset(OpAMD64SHLQconst)
25479 v.AuxInt = log2(c / 9)
25480 v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
25481 v0.AddArg(x)
25482 v0.AddArg(x)
25483 v.AddArg(v0)
25484 return true
25485 }
25486 return false
25487 }
25488 func rewriteValueAMD64_OpAMD64MULQconst_30(v *Value) bool {
25489
25490
25491
25492 for {
25493 c := v.AuxInt
25494 v_0 := v.Args[0]
25495 if v_0.Op != OpAMD64MOVQconst {
25496 break
25497 }
25498 d := v_0.AuxInt
25499 v.reset(OpAMD64MOVQconst)
25500 v.AuxInt = c * d
25501 return true
25502 }
25503 return false
25504 }
25505 func rewriteValueAMD64_OpAMD64MULSD_0(v *Value) bool {
25506
25507
25508
25509 for {
25510 _ = v.Args[1]
25511 x := v.Args[0]
25512 l := v.Args[1]
25513 if l.Op != OpAMD64MOVSDload {
25514 break
25515 }
25516 off := l.AuxInt
25517 sym := l.Aux
25518 mem := l.Args[1]
25519 ptr := l.Args[0]
25520 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
25521 break
25522 }
25523 v.reset(OpAMD64MULSDload)
25524 v.AuxInt = off
25525 v.Aux = sym
25526 v.AddArg(x)
25527 v.AddArg(ptr)
25528 v.AddArg(mem)
25529 return true
25530 }
25531
25532
25533
25534 for {
25535 x := v.Args[1]
25536 l := v.Args[0]
25537 if l.Op != OpAMD64MOVSDload {
25538 break
25539 }
25540 off := l.AuxInt
25541 sym := l.Aux
25542 mem := l.Args[1]
25543 ptr := l.Args[0]
25544 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
25545 break
25546 }
25547 v.reset(OpAMD64MULSDload)
25548 v.AuxInt = off
25549 v.Aux = sym
25550 v.AddArg(x)
25551 v.AddArg(ptr)
25552 v.AddArg(mem)
25553 return true
25554 }
25555 return false
25556 }
25557 func rewriteValueAMD64_OpAMD64MULSDload_0(v *Value) bool {
25558 b := v.Block
25559 typ := &b.Func.Config.Types
25560
25561
25562
25563 for {
25564 off1 := v.AuxInt
25565 sym := v.Aux
25566 mem := v.Args[2]
25567 val := v.Args[0]
25568 v_1 := v.Args[1]
25569 if v_1.Op != OpAMD64ADDQconst {
25570 break
25571 }
25572 off2 := v_1.AuxInt
25573 base := v_1.Args[0]
25574 if !(is32Bit(off1 + off2)) {
25575 break
25576 }
25577 v.reset(OpAMD64MULSDload)
25578 v.AuxInt = off1 + off2
25579 v.Aux = sym
25580 v.AddArg(val)
25581 v.AddArg(base)
25582 v.AddArg(mem)
25583 return true
25584 }
25585
25586
25587
25588 for {
25589 off1 := v.AuxInt
25590 sym1 := v.Aux
25591 mem := v.Args[2]
25592 val := v.Args[0]
25593 v_1 := v.Args[1]
25594 if v_1.Op != OpAMD64LEAQ {
25595 break
25596 }
25597 off2 := v_1.AuxInt
25598 sym2 := v_1.Aux
25599 base := v_1.Args[0]
25600 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
25601 break
25602 }
25603 v.reset(OpAMD64MULSDload)
25604 v.AuxInt = off1 + off2
25605 v.Aux = mergeSym(sym1, sym2)
25606 v.AddArg(val)
25607 v.AddArg(base)
25608 v.AddArg(mem)
25609 return true
25610 }
25611
25612
25613
25614 for {
25615 off := v.AuxInt
25616 sym := v.Aux
25617 _ = v.Args[2]
25618 x := v.Args[0]
25619 ptr := v.Args[1]
25620 v_2 := v.Args[2]
25621 if v_2.Op != OpAMD64MOVQstore {
25622 break
25623 }
25624 if v_2.AuxInt != off {
25625 break
25626 }
25627 if v_2.Aux != sym {
25628 break
25629 }
25630 _ = v_2.Args[2]
25631 if ptr != v_2.Args[0] {
25632 break
25633 }
25634 y := v_2.Args[1]
25635 v.reset(OpAMD64MULSD)
25636 v.AddArg(x)
25637 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
25638 v0.AddArg(y)
25639 v.AddArg(v0)
25640 return true
25641 }
25642 return false
25643 }
25644 func rewriteValueAMD64_OpAMD64MULSS_0(v *Value) bool {
25645
25646
25647
25648 for {
25649 _ = v.Args[1]
25650 x := v.Args[0]
25651 l := v.Args[1]
25652 if l.Op != OpAMD64MOVSSload {
25653 break
25654 }
25655 off := l.AuxInt
25656 sym := l.Aux
25657 mem := l.Args[1]
25658 ptr := l.Args[0]
25659 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
25660 break
25661 }
25662 v.reset(OpAMD64MULSSload)
25663 v.AuxInt = off
25664 v.Aux = sym
25665 v.AddArg(x)
25666 v.AddArg(ptr)
25667 v.AddArg(mem)
25668 return true
25669 }
25670
25671
25672
25673 for {
25674 x := v.Args[1]
25675 l := v.Args[0]
25676 if l.Op != OpAMD64MOVSSload {
25677 break
25678 }
25679 off := l.AuxInt
25680 sym := l.Aux
25681 mem := l.Args[1]
25682 ptr := l.Args[0]
25683 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
25684 break
25685 }
25686 v.reset(OpAMD64MULSSload)
25687 v.AuxInt = off
25688 v.Aux = sym
25689 v.AddArg(x)
25690 v.AddArg(ptr)
25691 v.AddArg(mem)
25692 return true
25693 }
25694 return false
25695 }
25696 func rewriteValueAMD64_OpAMD64MULSSload_0(v *Value) bool {
25697 b := v.Block
25698 typ := &b.Func.Config.Types
25699
25700
25701
25702 for {
25703 off1 := v.AuxInt
25704 sym := v.Aux
25705 mem := v.Args[2]
25706 val := v.Args[0]
25707 v_1 := v.Args[1]
25708 if v_1.Op != OpAMD64ADDQconst {
25709 break
25710 }
25711 off2 := v_1.AuxInt
25712 base := v_1.Args[0]
25713 if !(is32Bit(off1 + off2)) {
25714 break
25715 }
25716 v.reset(OpAMD64MULSSload)
25717 v.AuxInt = off1 + off2
25718 v.Aux = sym
25719 v.AddArg(val)
25720 v.AddArg(base)
25721 v.AddArg(mem)
25722 return true
25723 }
25724
25725
25726
25727 for {
25728 off1 := v.AuxInt
25729 sym1 := v.Aux
25730 mem := v.Args[2]
25731 val := v.Args[0]
25732 v_1 := v.Args[1]
25733 if v_1.Op != OpAMD64LEAQ {
25734 break
25735 }
25736 off2 := v_1.AuxInt
25737 sym2 := v_1.Aux
25738 base := v_1.Args[0]
25739 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
25740 break
25741 }
25742 v.reset(OpAMD64MULSSload)
25743 v.AuxInt = off1 + off2
25744 v.Aux = mergeSym(sym1, sym2)
25745 v.AddArg(val)
25746 v.AddArg(base)
25747 v.AddArg(mem)
25748 return true
25749 }
25750
25751
25752
25753 for {
25754 off := v.AuxInt
25755 sym := v.Aux
25756 _ = v.Args[2]
25757 x := v.Args[0]
25758 ptr := v.Args[1]
25759 v_2 := v.Args[2]
25760 if v_2.Op != OpAMD64MOVLstore {
25761 break
25762 }
25763 if v_2.AuxInt != off {
25764 break
25765 }
25766 if v_2.Aux != sym {
25767 break
25768 }
25769 _ = v_2.Args[2]
25770 if ptr != v_2.Args[0] {
25771 break
25772 }
25773 y := v_2.Args[1]
25774 v.reset(OpAMD64MULSS)
25775 v.AddArg(x)
25776 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
25777 v0.AddArg(y)
25778 v.AddArg(v0)
25779 return true
25780 }
25781 return false
25782 }
25783 func rewriteValueAMD64_OpAMD64NEGL_0(v *Value) bool {
25784
25785
25786
25787 for {
25788 v_0 := v.Args[0]
25789 if v_0.Op != OpAMD64NEGL {
25790 break
25791 }
25792 x := v_0.Args[0]
25793 v.reset(OpCopy)
25794 v.Type = x.Type
25795 v.AddArg(x)
25796 return true
25797 }
25798
25799
25800
25801 for {
25802 v_0 := v.Args[0]
25803 if v_0.Op != OpAMD64MOVLconst {
25804 break
25805 }
25806 c := v_0.AuxInt
25807 v.reset(OpAMD64MOVLconst)
25808 v.AuxInt = int64(int32(-c))
25809 return true
25810 }
25811 return false
25812 }
25813 func rewriteValueAMD64_OpAMD64NEGQ_0(v *Value) bool {
25814
25815
25816
25817 for {
25818 v_0 := v.Args[0]
25819 if v_0.Op != OpAMD64NEGQ {
25820 break
25821 }
25822 x := v_0.Args[0]
25823 v.reset(OpCopy)
25824 v.Type = x.Type
25825 v.AddArg(x)
25826 return true
25827 }
25828
25829
25830
25831 for {
25832 v_0 := v.Args[0]
25833 if v_0.Op != OpAMD64MOVQconst {
25834 break
25835 }
25836 c := v_0.AuxInt
25837 v.reset(OpAMD64MOVQconst)
25838 v.AuxInt = -c
25839 return true
25840 }
25841
25842
25843
25844 for {
25845 v_0 := v.Args[0]
25846 if v_0.Op != OpAMD64ADDQconst {
25847 break
25848 }
25849 c := v_0.AuxInt
25850 v_0_0 := v_0.Args[0]
25851 if v_0_0.Op != OpAMD64NEGQ {
25852 break
25853 }
25854 x := v_0_0.Args[0]
25855 if !(c != -(1 << 31)) {
25856 break
25857 }
25858 v.reset(OpAMD64ADDQconst)
25859 v.AuxInt = -c
25860 v.AddArg(x)
25861 return true
25862 }
25863 return false
25864 }
25865 func rewriteValueAMD64_OpAMD64NOTL_0(v *Value) bool {
25866
25867
25868
25869 for {
25870 v_0 := v.Args[0]
25871 if v_0.Op != OpAMD64MOVLconst {
25872 break
25873 }
25874 c := v_0.AuxInt
25875 v.reset(OpAMD64MOVLconst)
25876 v.AuxInt = ^c
25877 return true
25878 }
25879 return false
25880 }
25881 func rewriteValueAMD64_OpAMD64NOTQ_0(v *Value) bool {
25882
25883
25884
25885 for {
25886 v_0 := v.Args[0]
25887 if v_0.Op != OpAMD64MOVQconst {
25888 break
25889 }
25890 c := v_0.AuxInt
25891 v.reset(OpAMD64MOVQconst)
25892 v.AuxInt = ^c
25893 return true
25894 }
25895 return false
25896 }
25897 func rewriteValueAMD64_OpAMD64ORL_0(v *Value) bool {
25898 b := v.Block
25899 config := b.Func.Config
25900
25901
25902
25903 for {
25904 x := v.Args[1]
25905 v_0 := v.Args[0]
25906 if v_0.Op != OpAMD64SHLL {
25907 break
25908 }
25909 y := v_0.Args[1]
25910 v_0_0 := v_0.Args[0]
25911 if v_0_0.Op != OpAMD64MOVLconst {
25912 break
25913 }
25914 if v_0_0.AuxInt != 1 {
25915 break
25916 }
25917 if !(!config.nacl) {
25918 break
25919 }
25920 v.reset(OpAMD64BTSL)
25921 v.AddArg(x)
25922 v.AddArg(y)
25923 return true
25924 }
25925
25926
25927
25928 for {
25929 _ = v.Args[1]
25930 x := v.Args[0]
25931 v_1 := v.Args[1]
25932 if v_1.Op != OpAMD64SHLL {
25933 break
25934 }
25935 y := v_1.Args[1]
25936 v_1_0 := v_1.Args[0]
25937 if v_1_0.Op != OpAMD64MOVLconst {
25938 break
25939 }
25940 if v_1_0.AuxInt != 1 {
25941 break
25942 }
25943 if !(!config.nacl) {
25944 break
25945 }
25946 v.reset(OpAMD64BTSL)
25947 v.AddArg(x)
25948 v.AddArg(y)
25949 return true
25950 }
25951
25952
25953
25954 for {
25955 x := v.Args[1]
25956 v_0 := v.Args[0]
25957 if v_0.Op != OpAMD64MOVLconst {
25958 break
25959 }
25960 c := v_0.AuxInt
25961 if !(isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) {
25962 break
25963 }
25964 v.reset(OpAMD64BTSLconst)
25965 v.AuxInt = log2uint32(c)
25966 v.AddArg(x)
25967 return true
25968 }
25969
25970
25971
25972 for {
25973 _ = v.Args[1]
25974 x := v.Args[0]
25975 v_1 := v.Args[1]
25976 if v_1.Op != OpAMD64MOVLconst {
25977 break
25978 }
25979 c := v_1.AuxInt
25980 if !(isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) {
25981 break
25982 }
25983 v.reset(OpAMD64BTSLconst)
25984 v.AuxInt = log2uint32(c)
25985 v.AddArg(x)
25986 return true
25987 }
25988
25989
25990
25991 for {
25992 _ = v.Args[1]
25993 x := v.Args[0]
25994 v_1 := v.Args[1]
25995 if v_1.Op != OpAMD64MOVLconst {
25996 break
25997 }
25998 c := v_1.AuxInt
25999 v.reset(OpAMD64ORLconst)
26000 v.AuxInt = c
26001 v.AddArg(x)
26002 return true
26003 }
26004
26005
26006
26007 for {
26008 x := v.Args[1]
26009 v_0 := v.Args[0]
26010 if v_0.Op != OpAMD64MOVLconst {
26011 break
26012 }
26013 c := v_0.AuxInt
26014 v.reset(OpAMD64ORLconst)
26015 v.AuxInt = c
26016 v.AddArg(x)
26017 return true
26018 }
26019
26020
26021
26022 for {
26023 _ = v.Args[1]
26024 v_0 := v.Args[0]
26025 if v_0.Op != OpAMD64SHLLconst {
26026 break
26027 }
26028 c := v_0.AuxInt
26029 x := v_0.Args[0]
26030 v_1 := v.Args[1]
26031 if v_1.Op != OpAMD64SHRLconst {
26032 break
26033 }
26034 d := v_1.AuxInt
26035 if x != v_1.Args[0] {
26036 break
26037 }
26038 if !(d == 32-c) {
26039 break
26040 }
26041 v.reset(OpAMD64ROLLconst)
26042 v.AuxInt = c
26043 v.AddArg(x)
26044 return true
26045 }
26046
26047
26048
26049 for {
26050 _ = v.Args[1]
26051 v_0 := v.Args[0]
26052 if v_0.Op != OpAMD64SHRLconst {
26053 break
26054 }
26055 d := v_0.AuxInt
26056 x := v_0.Args[0]
26057 v_1 := v.Args[1]
26058 if v_1.Op != OpAMD64SHLLconst {
26059 break
26060 }
26061 c := v_1.AuxInt
26062 if x != v_1.Args[0] {
26063 break
26064 }
26065 if !(d == 32-c) {
26066 break
26067 }
26068 v.reset(OpAMD64ROLLconst)
26069 v.AuxInt = c
26070 v.AddArg(x)
26071 return true
26072 }
26073
26074
26075
26076 for {
26077 t := v.Type
26078 _ = v.Args[1]
26079 v_0 := v.Args[0]
26080 if v_0.Op != OpAMD64SHLLconst {
26081 break
26082 }
26083 c := v_0.AuxInt
26084 x := v_0.Args[0]
26085 v_1 := v.Args[1]
26086 if v_1.Op != OpAMD64SHRWconst {
26087 break
26088 }
26089 d := v_1.AuxInt
26090 if x != v_1.Args[0] {
26091 break
26092 }
26093 if !(d == 16-c && c < 16 && t.Size() == 2) {
26094 break
26095 }
26096 v.reset(OpAMD64ROLWconst)
26097 v.AuxInt = c
26098 v.AddArg(x)
26099 return true
26100 }
26101
26102
26103
26104 for {
26105 t := v.Type
26106 _ = v.Args[1]
26107 v_0 := v.Args[0]
26108 if v_0.Op != OpAMD64SHRWconst {
26109 break
26110 }
26111 d := v_0.AuxInt
26112 x := v_0.Args[0]
26113 v_1 := v.Args[1]
26114 if v_1.Op != OpAMD64SHLLconst {
26115 break
26116 }
26117 c := v_1.AuxInt
26118 if x != v_1.Args[0] {
26119 break
26120 }
26121 if !(d == 16-c && c < 16 && t.Size() == 2) {
26122 break
26123 }
26124 v.reset(OpAMD64ROLWconst)
26125 v.AuxInt = c
26126 v.AddArg(x)
26127 return true
26128 }
26129 return false
26130 }
26131 func rewriteValueAMD64_OpAMD64ORL_10(v *Value) bool {
26132
26133
26134
26135 for {
26136 t := v.Type
26137 _ = v.Args[1]
26138 v_0 := v.Args[0]
26139 if v_0.Op != OpAMD64SHLLconst {
26140 break
26141 }
26142 c := v_0.AuxInt
26143 x := v_0.Args[0]
26144 v_1 := v.Args[1]
26145 if v_1.Op != OpAMD64SHRBconst {
26146 break
26147 }
26148 d := v_1.AuxInt
26149 if x != v_1.Args[0] {
26150 break
26151 }
26152 if !(d == 8-c && c < 8 && t.Size() == 1) {
26153 break
26154 }
26155 v.reset(OpAMD64ROLBconst)
26156 v.AuxInt = c
26157 v.AddArg(x)
26158 return true
26159 }
26160
26161
26162
26163 for {
26164 t := v.Type
26165 _ = v.Args[1]
26166 v_0 := v.Args[0]
26167 if v_0.Op != OpAMD64SHRBconst {
26168 break
26169 }
26170 d := v_0.AuxInt
26171 x := v_0.Args[0]
26172 v_1 := v.Args[1]
26173 if v_1.Op != OpAMD64SHLLconst {
26174 break
26175 }
26176 c := v_1.AuxInt
26177 if x != v_1.Args[0] {
26178 break
26179 }
26180 if !(d == 8-c && c < 8 && t.Size() == 1) {
26181 break
26182 }
26183 v.reset(OpAMD64ROLBconst)
26184 v.AuxInt = c
26185 v.AddArg(x)
26186 return true
26187 }
26188
26189
26190
26191 for {
26192 _ = v.Args[1]
26193 v_0 := v.Args[0]
26194 if v_0.Op != OpAMD64SHLL {
26195 break
26196 }
26197 y := v_0.Args[1]
26198 x := v_0.Args[0]
26199 v_1 := v.Args[1]
26200 if v_1.Op != OpAMD64ANDL {
26201 break
26202 }
26203 _ = v_1.Args[1]
26204 v_1_0 := v_1.Args[0]
26205 if v_1_0.Op != OpAMD64SHRL {
26206 break
26207 }
26208 _ = v_1_0.Args[1]
26209 if x != v_1_0.Args[0] {
26210 break
26211 }
26212 v_1_0_1 := v_1_0.Args[1]
26213 if v_1_0_1.Op != OpAMD64NEGQ {
26214 break
26215 }
26216 if y != v_1_0_1.Args[0] {
26217 break
26218 }
26219 v_1_1 := v_1.Args[1]
26220 if v_1_1.Op != OpAMD64SBBLcarrymask {
26221 break
26222 }
26223 v_1_1_0 := v_1_1.Args[0]
26224 if v_1_1_0.Op != OpAMD64CMPQconst {
26225 break
26226 }
26227 if v_1_1_0.AuxInt != 32 {
26228 break
26229 }
26230 v_1_1_0_0 := v_1_1_0.Args[0]
26231 if v_1_1_0_0.Op != OpAMD64NEGQ {
26232 break
26233 }
26234 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
26235 if v_1_1_0_0_0.Op != OpAMD64ADDQconst {
26236 break
26237 }
26238 if v_1_1_0_0_0.AuxInt != -32 {
26239 break
26240 }
26241 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
26242 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst {
26243 break
26244 }
26245 if v_1_1_0_0_0_0.AuxInt != 31 {
26246 break
26247 }
26248 if y != v_1_1_0_0_0_0.Args[0] {
26249 break
26250 }
26251 v.reset(OpAMD64ROLL)
26252 v.AddArg(x)
26253 v.AddArg(y)
26254 return true
26255 }
26256
26257
26258
26259 for {
26260 _ = v.Args[1]
26261 v_0 := v.Args[0]
26262 if v_0.Op != OpAMD64SHLL {
26263 break
26264 }
26265 y := v_0.Args[1]
26266 x := v_0.Args[0]
26267 v_1 := v.Args[1]
26268 if v_1.Op != OpAMD64ANDL {
26269 break
26270 }
26271 _ = v_1.Args[1]
26272 v_1_0 := v_1.Args[0]
26273 if v_1_0.Op != OpAMD64SBBLcarrymask {
26274 break
26275 }
26276 v_1_0_0 := v_1_0.Args[0]
26277 if v_1_0_0.Op != OpAMD64CMPQconst {
26278 break
26279 }
26280 if v_1_0_0.AuxInt != 32 {
26281 break
26282 }
26283 v_1_0_0_0 := v_1_0_0.Args[0]
26284 if v_1_0_0_0.Op != OpAMD64NEGQ {
26285 break
26286 }
26287 v_1_0_0_0_0 := v_1_0_0_0.Args[0]
26288 if v_1_0_0_0_0.Op != OpAMD64ADDQconst {
26289 break
26290 }
26291 if v_1_0_0_0_0.AuxInt != -32 {
26292 break
26293 }
26294 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0]
26295 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst {
26296 break
26297 }
26298 if v_1_0_0_0_0_0.AuxInt != 31 {
26299 break
26300 }
26301 if y != v_1_0_0_0_0_0.Args[0] {
26302 break
26303 }
26304 v_1_1 := v_1.Args[1]
26305 if v_1_1.Op != OpAMD64SHRL {
26306 break
26307 }
26308 _ = v_1_1.Args[1]
26309 if x != v_1_1.Args[0] {
26310 break
26311 }
26312 v_1_1_1 := v_1_1.Args[1]
26313 if v_1_1_1.Op != OpAMD64NEGQ {
26314 break
26315 }
26316 if y != v_1_1_1.Args[0] {
26317 break
26318 }
26319 v.reset(OpAMD64ROLL)
26320 v.AddArg(x)
26321 v.AddArg(y)
26322 return true
26323 }
26324
26325
26326
26327 for {
26328 _ = v.Args[1]
26329 v_0 := v.Args[0]
26330 if v_0.Op != OpAMD64ANDL {
26331 break
26332 }
26333 _ = v_0.Args[1]
26334 v_0_0 := v_0.Args[0]
26335 if v_0_0.Op != OpAMD64SHRL {
26336 break
26337 }
26338 _ = v_0_0.Args[1]
26339 x := v_0_0.Args[0]
26340 v_0_0_1 := v_0_0.Args[1]
26341 if v_0_0_1.Op != OpAMD64NEGQ {
26342 break
26343 }
26344 y := v_0_0_1.Args[0]
26345 v_0_1 := v_0.Args[1]
26346 if v_0_1.Op != OpAMD64SBBLcarrymask {
26347 break
26348 }
26349 v_0_1_0 := v_0_1.Args[0]
26350 if v_0_1_0.Op != OpAMD64CMPQconst {
26351 break
26352 }
26353 if v_0_1_0.AuxInt != 32 {
26354 break
26355 }
26356 v_0_1_0_0 := v_0_1_0.Args[0]
26357 if v_0_1_0_0.Op != OpAMD64NEGQ {
26358 break
26359 }
26360 v_0_1_0_0_0 := v_0_1_0_0.Args[0]
26361 if v_0_1_0_0_0.Op != OpAMD64ADDQconst {
26362 break
26363 }
26364 if v_0_1_0_0_0.AuxInt != -32 {
26365 break
26366 }
26367 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0]
26368 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst {
26369 break
26370 }
26371 if v_0_1_0_0_0_0.AuxInt != 31 {
26372 break
26373 }
26374 if y != v_0_1_0_0_0_0.Args[0] {
26375 break
26376 }
26377 v_1 := v.Args[1]
26378 if v_1.Op != OpAMD64SHLL {
26379 break
26380 }
26381 _ = v_1.Args[1]
26382 if x != v_1.Args[0] {
26383 break
26384 }
26385 if y != v_1.Args[1] {
26386 break
26387 }
26388 v.reset(OpAMD64ROLL)
26389 v.AddArg(x)
26390 v.AddArg(y)
26391 return true
26392 }
26393
26394
26395
26396 for {
26397 _ = v.Args[1]
26398 v_0 := v.Args[0]
26399 if v_0.Op != OpAMD64ANDL {
26400 break
26401 }
26402 _ = v_0.Args[1]
26403 v_0_0 := v_0.Args[0]
26404 if v_0_0.Op != OpAMD64SBBLcarrymask {
26405 break
26406 }
26407 v_0_0_0 := v_0_0.Args[0]
26408 if v_0_0_0.Op != OpAMD64CMPQconst {
26409 break
26410 }
26411 if v_0_0_0.AuxInt != 32 {
26412 break
26413 }
26414 v_0_0_0_0 := v_0_0_0.Args[0]
26415 if v_0_0_0_0.Op != OpAMD64NEGQ {
26416 break
26417 }
26418 v_0_0_0_0_0 := v_0_0_0_0.Args[0]
26419 if v_0_0_0_0_0.Op != OpAMD64ADDQconst {
26420 break
26421 }
26422 if v_0_0_0_0_0.AuxInt != -32 {
26423 break
26424 }
26425 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0]
26426 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst {
26427 break
26428 }
26429 if v_0_0_0_0_0_0.AuxInt != 31 {
26430 break
26431 }
26432 y := v_0_0_0_0_0_0.Args[0]
26433 v_0_1 := v_0.Args[1]
26434 if v_0_1.Op != OpAMD64SHRL {
26435 break
26436 }
26437 _ = v_0_1.Args[1]
26438 x := v_0_1.Args[0]
26439 v_0_1_1 := v_0_1.Args[1]
26440 if v_0_1_1.Op != OpAMD64NEGQ {
26441 break
26442 }
26443 if y != v_0_1_1.Args[0] {
26444 break
26445 }
26446 v_1 := v.Args[1]
26447 if v_1.Op != OpAMD64SHLL {
26448 break
26449 }
26450 _ = v_1.Args[1]
26451 if x != v_1.Args[0] {
26452 break
26453 }
26454 if y != v_1.Args[1] {
26455 break
26456 }
26457 v.reset(OpAMD64ROLL)
26458 v.AddArg(x)
26459 v.AddArg(y)
26460 return true
26461 }
26462
26463
26464
26465 for {
26466 _ = v.Args[1]
26467 v_0 := v.Args[0]
26468 if v_0.Op != OpAMD64SHLL {
26469 break
26470 }
26471 y := v_0.Args[1]
26472 x := v_0.Args[0]
26473 v_1 := v.Args[1]
26474 if v_1.Op != OpAMD64ANDL {
26475 break
26476 }
26477 _ = v_1.Args[1]
26478 v_1_0 := v_1.Args[0]
26479 if v_1_0.Op != OpAMD64SHRL {
26480 break
26481 }
26482 _ = v_1_0.Args[1]
26483 if x != v_1_0.Args[0] {
26484 break
26485 }
26486 v_1_0_1 := v_1_0.Args[1]
26487 if v_1_0_1.Op != OpAMD64NEGL {
26488 break
26489 }
26490 if y != v_1_0_1.Args[0] {
26491 break
26492 }
26493 v_1_1 := v_1.Args[1]
26494 if v_1_1.Op != OpAMD64SBBLcarrymask {
26495 break
26496 }
26497 v_1_1_0 := v_1_1.Args[0]
26498 if v_1_1_0.Op != OpAMD64CMPLconst {
26499 break
26500 }
26501 if v_1_1_0.AuxInt != 32 {
26502 break
26503 }
26504 v_1_1_0_0 := v_1_1_0.Args[0]
26505 if v_1_1_0_0.Op != OpAMD64NEGL {
26506 break
26507 }
26508 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
26509 if v_1_1_0_0_0.Op != OpAMD64ADDLconst {
26510 break
26511 }
26512 if v_1_1_0_0_0.AuxInt != -32 {
26513 break
26514 }
26515 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
26516 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst {
26517 break
26518 }
26519 if v_1_1_0_0_0_0.AuxInt != 31 {
26520 break
26521 }
26522 if y != v_1_1_0_0_0_0.Args[0] {
26523 break
26524 }
26525 v.reset(OpAMD64ROLL)
26526 v.AddArg(x)
26527 v.AddArg(y)
26528 return true
26529 }
26530
26531
26532
26533 for {
26534 _ = v.Args[1]
26535 v_0 := v.Args[0]
26536 if v_0.Op != OpAMD64SHLL {
26537 break
26538 }
26539 y := v_0.Args[1]
26540 x := v_0.Args[0]
26541 v_1 := v.Args[1]
26542 if v_1.Op != OpAMD64ANDL {
26543 break
26544 }
26545 _ = v_1.Args[1]
26546 v_1_0 := v_1.Args[0]
26547 if v_1_0.Op != OpAMD64SBBLcarrymask {
26548 break
26549 }
26550 v_1_0_0 := v_1_0.Args[0]
26551 if v_1_0_0.Op != OpAMD64CMPLconst {
26552 break
26553 }
26554 if v_1_0_0.AuxInt != 32 {
26555 break
26556 }
26557 v_1_0_0_0 := v_1_0_0.Args[0]
26558 if v_1_0_0_0.Op != OpAMD64NEGL {
26559 break
26560 }
26561 v_1_0_0_0_0 := v_1_0_0_0.Args[0]
26562 if v_1_0_0_0_0.Op != OpAMD64ADDLconst {
26563 break
26564 }
26565 if v_1_0_0_0_0.AuxInt != -32 {
26566 break
26567 }
26568 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0]
26569 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst {
26570 break
26571 }
26572 if v_1_0_0_0_0_0.AuxInt != 31 {
26573 break
26574 }
26575 if y != v_1_0_0_0_0_0.Args[0] {
26576 break
26577 }
26578 v_1_1 := v_1.Args[1]
26579 if v_1_1.Op != OpAMD64SHRL {
26580 break
26581 }
26582 _ = v_1_1.Args[1]
26583 if x != v_1_1.Args[0] {
26584 break
26585 }
26586 v_1_1_1 := v_1_1.Args[1]
26587 if v_1_1_1.Op != OpAMD64NEGL {
26588 break
26589 }
26590 if y != v_1_1_1.Args[0] {
26591 break
26592 }
26593 v.reset(OpAMD64ROLL)
26594 v.AddArg(x)
26595 v.AddArg(y)
26596 return true
26597 }
26598
26599
26600
26601 for {
26602 _ = v.Args[1]
26603 v_0 := v.Args[0]
26604 if v_0.Op != OpAMD64ANDL {
26605 break
26606 }
26607 _ = v_0.Args[1]
26608 v_0_0 := v_0.Args[0]
26609 if v_0_0.Op != OpAMD64SHRL {
26610 break
26611 }
26612 _ = v_0_0.Args[1]
26613 x := v_0_0.Args[0]
26614 v_0_0_1 := v_0_0.Args[1]
26615 if v_0_0_1.Op != OpAMD64NEGL {
26616 break
26617 }
26618 y := v_0_0_1.Args[0]
26619 v_0_1 := v_0.Args[1]
26620 if v_0_1.Op != OpAMD64SBBLcarrymask {
26621 break
26622 }
26623 v_0_1_0 := v_0_1.Args[0]
26624 if v_0_1_0.Op != OpAMD64CMPLconst {
26625 break
26626 }
26627 if v_0_1_0.AuxInt != 32 {
26628 break
26629 }
26630 v_0_1_0_0 := v_0_1_0.Args[0]
26631 if v_0_1_0_0.Op != OpAMD64NEGL {
26632 break
26633 }
26634 v_0_1_0_0_0 := v_0_1_0_0.Args[0]
26635 if v_0_1_0_0_0.Op != OpAMD64ADDLconst {
26636 break
26637 }
26638 if v_0_1_0_0_0.AuxInt != -32 {
26639 break
26640 }
26641 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0]
26642 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst {
26643 break
26644 }
26645 if v_0_1_0_0_0_0.AuxInt != 31 {
26646 break
26647 }
26648 if y != v_0_1_0_0_0_0.Args[0] {
26649 break
26650 }
26651 v_1 := v.Args[1]
26652 if v_1.Op != OpAMD64SHLL {
26653 break
26654 }
26655 _ = v_1.Args[1]
26656 if x != v_1.Args[0] {
26657 break
26658 }
26659 if y != v_1.Args[1] {
26660 break
26661 }
26662 v.reset(OpAMD64ROLL)
26663 v.AddArg(x)
26664 v.AddArg(y)
26665 return true
26666 }
26667
26668
26669
26670 for {
26671 _ = v.Args[1]
26672 v_0 := v.Args[0]
26673 if v_0.Op != OpAMD64ANDL {
26674 break
26675 }
26676 _ = v_0.Args[1]
26677 v_0_0 := v_0.Args[0]
26678 if v_0_0.Op != OpAMD64SBBLcarrymask {
26679 break
26680 }
26681 v_0_0_0 := v_0_0.Args[0]
26682 if v_0_0_0.Op != OpAMD64CMPLconst {
26683 break
26684 }
26685 if v_0_0_0.AuxInt != 32 {
26686 break
26687 }
26688 v_0_0_0_0 := v_0_0_0.Args[0]
26689 if v_0_0_0_0.Op != OpAMD64NEGL {
26690 break
26691 }
26692 v_0_0_0_0_0 := v_0_0_0_0.Args[0]
26693 if v_0_0_0_0_0.Op != OpAMD64ADDLconst {
26694 break
26695 }
26696 if v_0_0_0_0_0.AuxInt != -32 {
26697 break
26698 }
26699 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0]
26700 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst {
26701 break
26702 }
26703 if v_0_0_0_0_0_0.AuxInt != 31 {
26704 break
26705 }
26706 y := v_0_0_0_0_0_0.Args[0]
26707 v_0_1 := v_0.Args[1]
26708 if v_0_1.Op != OpAMD64SHRL {
26709 break
26710 }
26711 _ = v_0_1.Args[1]
26712 x := v_0_1.Args[0]
26713 v_0_1_1 := v_0_1.Args[1]
26714 if v_0_1_1.Op != OpAMD64NEGL {
26715 break
26716 }
26717 if y != v_0_1_1.Args[0] {
26718 break
26719 }
26720 v_1 := v.Args[1]
26721 if v_1.Op != OpAMD64SHLL {
26722 break
26723 }
26724 _ = v_1.Args[1]
26725 if x != v_1.Args[0] {
26726 break
26727 }
26728 if y != v_1.Args[1] {
26729 break
26730 }
26731 v.reset(OpAMD64ROLL)
26732 v.AddArg(x)
26733 v.AddArg(y)
26734 return true
26735 }
26736 return false
26737 }
26738 func rewriteValueAMD64_OpAMD64ORL_20(v *Value) bool {
26739
26740
26741
26742 for {
26743 _ = v.Args[1]
26744 v_0 := v.Args[0]
26745 if v_0.Op != OpAMD64SHRL {
26746 break
26747 }
26748 y := v_0.Args[1]
26749 x := v_0.Args[0]
26750 v_1 := v.Args[1]
26751 if v_1.Op != OpAMD64ANDL {
26752 break
26753 }
26754 _ = v_1.Args[1]
26755 v_1_0 := v_1.Args[0]
26756 if v_1_0.Op != OpAMD64SHLL {
26757 break
26758 }
26759 _ = v_1_0.Args[1]
26760 if x != v_1_0.Args[0] {
26761 break
26762 }
26763 v_1_0_1 := v_1_0.Args[1]
26764 if v_1_0_1.Op != OpAMD64NEGQ {
26765 break
26766 }
26767 if y != v_1_0_1.Args[0] {
26768 break
26769 }
26770 v_1_1 := v_1.Args[1]
26771 if v_1_1.Op != OpAMD64SBBLcarrymask {
26772 break
26773 }
26774 v_1_1_0 := v_1_1.Args[0]
26775 if v_1_1_0.Op != OpAMD64CMPQconst {
26776 break
26777 }
26778 if v_1_1_0.AuxInt != 32 {
26779 break
26780 }
26781 v_1_1_0_0 := v_1_1_0.Args[0]
26782 if v_1_1_0_0.Op != OpAMD64NEGQ {
26783 break
26784 }
26785 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
26786 if v_1_1_0_0_0.Op != OpAMD64ADDQconst {
26787 break
26788 }
26789 if v_1_1_0_0_0.AuxInt != -32 {
26790 break
26791 }
26792 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
26793 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst {
26794 break
26795 }
26796 if v_1_1_0_0_0_0.AuxInt != 31 {
26797 break
26798 }
26799 if y != v_1_1_0_0_0_0.Args[0] {
26800 break
26801 }
26802 v.reset(OpAMD64RORL)
26803 v.AddArg(x)
26804 v.AddArg(y)
26805 return true
26806 }
26807
26808
26809
26810 for {
26811 _ = v.Args[1]
26812 v_0 := v.Args[0]
26813 if v_0.Op != OpAMD64SHRL {
26814 break
26815 }
26816 y := v_0.Args[1]
26817 x := v_0.Args[0]
26818 v_1 := v.Args[1]
26819 if v_1.Op != OpAMD64ANDL {
26820 break
26821 }
26822 _ = v_1.Args[1]
26823 v_1_0 := v_1.Args[0]
26824 if v_1_0.Op != OpAMD64SBBLcarrymask {
26825 break
26826 }
26827 v_1_0_0 := v_1_0.Args[0]
26828 if v_1_0_0.Op != OpAMD64CMPQconst {
26829 break
26830 }
26831 if v_1_0_0.AuxInt != 32 {
26832 break
26833 }
26834 v_1_0_0_0 := v_1_0_0.Args[0]
26835 if v_1_0_0_0.Op != OpAMD64NEGQ {
26836 break
26837 }
26838 v_1_0_0_0_0 := v_1_0_0_0.Args[0]
26839 if v_1_0_0_0_0.Op != OpAMD64ADDQconst {
26840 break
26841 }
26842 if v_1_0_0_0_0.AuxInt != -32 {
26843 break
26844 }
26845 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0]
26846 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst {
26847 break
26848 }
26849 if v_1_0_0_0_0_0.AuxInt != 31 {
26850 break
26851 }
26852 if y != v_1_0_0_0_0_0.Args[0] {
26853 break
26854 }
26855 v_1_1 := v_1.Args[1]
26856 if v_1_1.Op != OpAMD64SHLL {
26857 break
26858 }
26859 _ = v_1_1.Args[1]
26860 if x != v_1_1.Args[0] {
26861 break
26862 }
26863 v_1_1_1 := v_1_1.Args[1]
26864 if v_1_1_1.Op != OpAMD64NEGQ {
26865 break
26866 }
26867 if y != v_1_1_1.Args[0] {
26868 break
26869 }
26870 v.reset(OpAMD64RORL)
26871 v.AddArg(x)
26872 v.AddArg(y)
26873 return true
26874 }
26875
26876
26877
26878 for {
26879 _ = v.Args[1]
26880 v_0 := v.Args[0]
26881 if v_0.Op != OpAMD64ANDL {
26882 break
26883 }
26884 _ = v_0.Args[1]
26885 v_0_0 := v_0.Args[0]
26886 if v_0_0.Op != OpAMD64SHLL {
26887 break
26888 }
26889 _ = v_0_0.Args[1]
26890 x := v_0_0.Args[0]
26891 v_0_0_1 := v_0_0.Args[1]
26892 if v_0_0_1.Op != OpAMD64NEGQ {
26893 break
26894 }
26895 y := v_0_0_1.Args[0]
26896 v_0_1 := v_0.Args[1]
26897 if v_0_1.Op != OpAMD64SBBLcarrymask {
26898 break
26899 }
26900 v_0_1_0 := v_0_1.Args[0]
26901 if v_0_1_0.Op != OpAMD64CMPQconst {
26902 break
26903 }
26904 if v_0_1_0.AuxInt != 32 {
26905 break
26906 }
26907 v_0_1_0_0 := v_0_1_0.Args[0]
26908 if v_0_1_0_0.Op != OpAMD64NEGQ {
26909 break
26910 }
26911 v_0_1_0_0_0 := v_0_1_0_0.Args[0]
26912 if v_0_1_0_0_0.Op != OpAMD64ADDQconst {
26913 break
26914 }
26915 if v_0_1_0_0_0.AuxInt != -32 {
26916 break
26917 }
26918 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0]
26919 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst {
26920 break
26921 }
26922 if v_0_1_0_0_0_0.AuxInt != 31 {
26923 break
26924 }
26925 if y != v_0_1_0_0_0_0.Args[0] {
26926 break
26927 }
26928 v_1 := v.Args[1]
26929 if v_1.Op != OpAMD64SHRL {
26930 break
26931 }
26932 _ = v_1.Args[1]
26933 if x != v_1.Args[0] {
26934 break
26935 }
26936 if y != v_1.Args[1] {
26937 break
26938 }
26939 v.reset(OpAMD64RORL)
26940 v.AddArg(x)
26941 v.AddArg(y)
26942 return true
26943 }
26944
26945
26946
26947 for {
26948 _ = v.Args[1]
26949 v_0 := v.Args[0]
26950 if v_0.Op != OpAMD64ANDL {
26951 break
26952 }
26953 _ = v_0.Args[1]
26954 v_0_0 := v_0.Args[0]
26955 if v_0_0.Op != OpAMD64SBBLcarrymask {
26956 break
26957 }
26958 v_0_0_0 := v_0_0.Args[0]
26959 if v_0_0_0.Op != OpAMD64CMPQconst {
26960 break
26961 }
26962 if v_0_0_0.AuxInt != 32 {
26963 break
26964 }
26965 v_0_0_0_0 := v_0_0_0.Args[0]
26966 if v_0_0_0_0.Op != OpAMD64NEGQ {
26967 break
26968 }
26969 v_0_0_0_0_0 := v_0_0_0_0.Args[0]
26970 if v_0_0_0_0_0.Op != OpAMD64ADDQconst {
26971 break
26972 }
26973 if v_0_0_0_0_0.AuxInt != -32 {
26974 break
26975 }
26976 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0]
26977 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst {
26978 break
26979 }
26980 if v_0_0_0_0_0_0.AuxInt != 31 {
26981 break
26982 }
26983 y := v_0_0_0_0_0_0.Args[0]
26984 v_0_1 := v_0.Args[1]
26985 if v_0_1.Op != OpAMD64SHLL {
26986 break
26987 }
26988 _ = v_0_1.Args[1]
26989 x := v_0_1.Args[0]
26990 v_0_1_1 := v_0_1.Args[1]
26991 if v_0_1_1.Op != OpAMD64NEGQ {
26992 break
26993 }
26994 if y != v_0_1_1.Args[0] {
26995 break
26996 }
26997 v_1 := v.Args[1]
26998 if v_1.Op != OpAMD64SHRL {
26999 break
27000 }
27001 _ = v_1.Args[1]
27002 if x != v_1.Args[0] {
27003 break
27004 }
27005 if y != v_1.Args[1] {
27006 break
27007 }
27008 v.reset(OpAMD64RORL)
27009 v.AddArg(x)
27010 v.AddArg(y)
27011 return true
27012 }
27013
27014
27015
27016 for {
27017 _ = v.Args[1]
27018 v_0 := v.Args[0]
27019 if v_0.Op != OpAMD64SHRL {
27020 break
27021 }
27022 y := v_0.Args[1]
27023 x := v_0.Args[0]
27024 v_1 := v.Args[1]
27025 if v_1.Op != OpAMD64ANDL {
27026 break
27027 }
27028 _ = v_1.Args[1]
27029 v_1_0 := v_1.Args[0]
27030 if v_1_0.Op != OpAMD64SHLL {
27031 break
27032 }
27033 _ = v_1_0.Args[1]
27034 if x != v_1_0.Args[0] {
27035 break
27036 }
27037 v_1_0_1 := v_1_0.Args[1]
27038 if v_1_0_1.Op != OpAMD64NEGL {
27039 break
27040 }
27041 if y != v_1_0_1.Args[0] {
27042 break
27043 }
27044 v_1_1 := v_1.Args[1]
27045 if v_1_1.Op != OpAMD64SBBLcarrymask {
27046 break
27047 }
27048 v_1_1_0 := v_1_1.Args[0]
27049 if v_1_1_0.Op != OpAMD64CMPLconst {
27050 break
27051 }
27052 if v_1_1_0.AuxInt != 32 {
27053 break
27054 }
27055 v_1_1_0_0 := v_1_1_0.Args[0]
27056 if v_1_1_0_0.Op != OpAMD64NEGL {
27057 break
27058 }
27059 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
27060 if v_1_1_0_0_0.Op != OpAMD64ADDLconst {
27061 break
27062 }
27063 if v_1_1_0_0_0.AuxInt != -32 {
27064 break
27065 }
27066 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
27067 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst {
27068 break
27069 }
27070 if v_1_1_0_0_0_0.AuxInt != 31 {
27071 break
27072 }
27073 if y != v_1_1_0_0_0_0.Args[0] {
27074 break
27075 }
27076 v.reset(OpAMD64RORL)
27077 v.AddArg(x)
27078 v.AddArg(y)
27079 return true
27080 }
27081
27082
27083
27084 for {
27085 _ = v.Args[1]
27086 v_0 := v.Args[0]
27087 if v_0.Op != OpAMD64SHRL {
27088 break
27089 }
27090 y := v_0.Args[1]
27091 x := v_0.Args[0]
27092 v_1 := v.Args[1]
27093 if v_1.Op != OpAMD64ANDL {
27094 break
27095 }
27096 _ = v_1.Args[1]
27097 v_1_0 := v_1.Args[0]
27098 if v_1_0.Op != OpAMD64SBBLcarrymask {
27099 break
27100 }
27101 v_1_0_0 := v_1_0.Args[0]
27102 if v_1_0_0.Op != OpAMD64CMPLconst {
27103 break
27104 }
27105 if v_1_0_0.AuxInt != 32 {
27106 break
27107 }
27108 v_1_0_0_0 := v_1_0_0.Args[0]
27109 if v_1_0_0_0.Op != OpAMD64NEGL {
27110 break
27111 }
27112 v_1_0_0_0_0 := v_1_0_0_0.Args[0]
27113 if v_1_0_0_0_0.Op != OpAMD64ADDLconst {
27114 break
27115 }
27116 if v_1_0_0_0_0.AuxInt != -32 {
27117 break
27118 }
27119 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0]
27120 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst {
27121 break
27122 }
27123 if v_1_0_0_0_0_0.AuxInt != 31 {
27124 break
27125 }
27126 if y != v_1_0_0_0_0_0.Args[0] {
27127 break
27128 }
27129 v_1_1 := v_1.Args[1]
27130 if v_1_1.Op != OpAMD64SHLL {
27131 break
27132 }
27133 _ = v_1_1.Args[1]
27134 if x != v_1_1.Args[0] {
27135 break
27136 }
27137 v_1_1_1 := v_1_1.Args[1]
27138 if v_1_1_1.Op != OpAMD64NEGL {
27139 break
27140 }
27141 if y != v_1_1_1.Args[0] {
27142 break
27143 }
27144 v.reset(OpAMD64RORL)
27145 v.AddArg(x)
27146 v.AddArg(y)
27147 return true
27148 }
27149
27150
27151
27152 for {
27153 _ = v.Args[1]
27154 v_0 := v.Args[0]
27155 if v_0.Op != OpAMD64ANDL {
27156 break
27157 }
27158 _ = v_0.Args[1]
27159 v_0_0 := v_0.Args[0]
27160 if v_0_0.Op != OpAMD64SHLL {
27161 break
27162 }
27163 _ = v_0_0.Args[1]
27164 x := v_0_0.Args[0]
27165 v_0_0_1 := v_0_0.Args[1]
27166 if v_0_0_1.Op != OpAMD64NEGL {
27167 break
27168 }
27169 y := v_0_0_1.Args[0]
27170 v_0_1 := v_0.Args[1]
27171 if v_0_1.Op != OpAMD64SBBLcarrymask {
27172 break
27173 }
27174 v_0_1_0 := v_0_1.Args[0]
27175 if v_0_1_0.Op != OpAMD64CMPLconst {
27176 break
27177 }
27178 if v_0_1_0.AuxInt != 32 {
27179 break
27180 }
27181 v_0_1_0_0 := v_0_1_0.Args[0]
27182 if v_0_1_0_0.Op != OpAMD64NEGL {
27183 break
27184 }
27185 v_0_1_0_0_0 := v_0_1_0_0.Args[0]
27186 if v_0_1_0_0_0.Op != OpAMD64ADDLconst {
27187 break
27188 }
27189 if v_0_1_0_0_0.AuxInt != -32 {
27190 break
27191 }
27192 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0]
27193 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst {
27194 break
27195 }
27196 if v_0_1_0_0_0_0.AuxInt != 31 {
27197 break
27198 }
27199 if y != v_0_1_0_0_0_0.Args[0] {
27200 break
27201 }
27202 v_1 := v.Args[1]
27203 if v_1.Op != OpAMD64SHRL {
27204 break
27205 }
27206 _ = v_1.Args[1]
27207 if x != v_1.Args[0] {
27208 break
27209 }
27210 if y != v_1.Args[1] {
27211 break
27212 }
27213 v.reset(OpAMD64RORL)
27214 v.AddArg(x)
27215 v.AddArg(y)
27216 return true
27217 }
27218
27219
27220
27221 for {
27222 _ = v.Args[1]
27223 v_0 := v.Args[0]
27224 if v_0.Op != OpAMD64ANDL {
27225 break
27226 }
27227 _ = v_0.Args[1]
27228 v_0_0 := v_0.Args[0]
27229 if v_0_0.Op != OpAMD64SBBLcarrymask {
27230 break
27231 }
27232 v_0_0_0 := v_0_0.Args[0]
27233 if v_0_0_0.Op != OpAMD64CMPLconst {
27234 break
27235 }
27236 if v_0_0_0.AuxInt != 32 {
27237 break
27238 }
27239 v_0_0_0_0 := v_0_0_0.Args[0]
27240 if v_0_0_0_0.Op != OpAMD64NEGL {
27241 break
27242 }
27243 v_0_0_0_0_0 := v_0_0_0_0.Args[0]
27244 if v_0_0_0_0_0.Op != OpAMD64ADDLconst {
27245 break
27246 }
27247 if v_0_0_0_0_0.AuxInt != -32 {
27248 break
27249 }
27250 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0]
27251 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst {
27252 break
27253 }
27254 if v_0_0_0_0_0_0.AuxInt != 31 {
27255 break
27256 }
27257 y := v_0_0_0_0_0_0.Args[0]
27258 v_0_1 := v_0.Args[1]
27259 if v_0_1.Op != OpAMD64SHLL {
27260 break
27261 }
27262 _ = v_0_1.Args[1]
27263 x := v_0_1.Args[0]
27264 v_0_1_1 := v_0_1.Args[1]
27265 if v_0_1_1.Op != OpAMD64NEGL {
27266 break
27267 }
27268 if y != v_0_1_1.Args[0] {
27269 break
27270 }
27271 v_1 := v.Args[1]
27272 if v_1.Op != OpAMD64SHRL {
27273 break
27274 }
27275 _ = v_1.Args[1]
27276 if x != v_1.Args[0] {
27277 break
27278 }
27279 if y != v_1.Args[1] {
27280 break
27281 }
27282 v.reset(OpAMD64RORL)
27283 v.AddArg(x)
27284 v.AddArg(y)
27285 return true
27286 }
27287
27288
27289
27290 for {
27291 _ = v.Args[1]
27292 v_0 := v.Args[0]
27293 if v_0.Op != OpAMD64SHLL {
27294 break
27295 }
27296 _ = v_0.Args[1]
27297 x := v_0.Args[0]
27298 v_0_1 := v_0.Args[1]
27299 if v_0_1.Op != OpAMD64ANDQconst {
27300 break
27301 }
27302 if v_0_1.AuxInt != 15 {
27303 break
27304 }
27305 y := v_0_1.Args[0]
27306 v_1 := v.Args[1]
27307 if v_1.Op != OpAMD64ANDL {
27308 break
27309 }
27310 _ = v_1.Args[1]
27311 v_1_0 := v_1.Args[0]
27312 if v_1_0.Op != OpAMD64SHRW {
27313 break
27314 }
27315 _ = v_1_0.Args[1]
27316 if x != v_1_0.Args[0] {
27317 break
27318 }
27319 v_1_0_1 := v_1_0.Args[1]
27320 if v_1_0_1.Op != OpAMD64NEGQ {
27321 break
27322 }
27323 v_1_0_1_0 := v_1_0_1.Args[0]
27324 if v_1_0_1_0.Op != OpAMD64ADDQconst {
27325 break
27326 }
27327 if v_1_0_1_0.AuxInt != -16 {
27328 break
27329 }
27330 v_1_0_1_0_0 := v_1_0_1_0.Args[0]
27331 if v_1_0_1_0_0.Op != OpAMD64ANDQconst {
27332 break
27333 }
27334 if v_1_0_1_0_0.AuxInt != 15 {
27335 break
27336 }
27337 if y != v_1_0_1_0_0.Args[0] {
27338 break
27339 }
27340 v_1_1 := v_1.Args[1]
27341 if v_1_1.Op != OpAMD64SBBLcarrymask {
27342 break
27343 }
27344 v_1_1_0 := v_1_1.Args[0]
27345 if v_1_1_0.Op != OpAMD64CMPQconst {
27346 break
27347 }
27348 if v_1_1_0.AuxInt != 16 {
27349 break
27350 }
27351 v_1_1_0_0 := v_1_1_0.Args[0]
27352 if v_1_1_0_0.Op != OpAMD64NEGQ {
27353 break
27354 }
27355 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
27356 if v_1_1_0_0_0.Op != OpAMD64ADDQconst {
27357 break
27358 }
27359 if v_1_1_0_0_0.AuxInt != -16 {
27360 break
27361 }
27362 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
27363 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst {
27364 break
27365 }
27366 if v_1_1_0_0_0_0.AuxInt != 15 {
27367 break
27368 }
27369 if y != v_1_1_0_0_0_0.Args[0] {
27370 break
27371 }
27372 if !(v.Type.Size() == 2) {
27373 break
27374 }
27375 v.reset(OpAMD64ROLW)
27376 v.AddArg(x)
27377 v.AddArg(y)
27378 return true
27379 }
27380
27381
27382
27383 for {
27384 _ = v.Args[1]
27385 v_0 := v.Args[0]
27386 if v_0.Op != OpAMD64SHLL {
27387 break
27388 }
27389 _ = v_0.Args[1]
27390 x := v_0.Args[0]
27391 v_0_1 := v_0.Args[1]
27392 if v_0_1.Op != OpAMD64ANDQconst {
27393 break
27394 }
27395 if v_0_1.AuxInt != 15 {
27396 break
27397 }
27398 y := v_0_1.Args[0]
27399 v_1 := v.Args[1]
27400 if v_1.Op != OpAMD64ANDL {
27401 break
27402 }
27403 _ = v_1.Args[1]
27404 v_1_0 := v_1.Args[0]
27405 if v_1_0.Op != OpAMD64SBBLcarrymask {
27406 break
27407 }
27408 v_1_0_0 := v_1_0.Args[0]
27409 if v_1_0_0.Op != OpAMD64CMPQconst {
27410 break
27411 }
27412 if v_1_0_0.AuxInt != 16 {
27413 break
27414 }
27415 v_1_0_0_0 := v_1_0_0.Args[0]
27416 if v_1_0_0_0.Op != OpAMD64NEGQ {
27417 break
27418 }
27419 v_1_0_0_0_0 := v_1_0_0_0.Args[0]
27420 if v_1_0_0_0_0.Op != OpAMD64ADDQconst {
27421 break
27422 }
27423 if v_1_0_0_0_0.AuxInt != -16 {
27424 break
27425 }
27426 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0]
27427 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst {
27428 break
27429 }
27430 if v_1_0_0_0_0_0.AuxInt != 15 {
27431 break
27432 }
27433 if y != v_1_0_0_0_0_0.Args[0] {
27434 break
27435 }
27436 v_1_1 := v_1.Args[1]
27437 if v_1_1.Op != OpAMD64SHRW {
27438 break
27439 }
27440 _ = v_1_1.Args[1]
27441 if x != v_1_1.Args[0] {
27442 break
27443 }
27444 v_1_1_1 := v_1_1.Args[1]
27445 if v_1_1_1.Op != OpAMD64NEGQ {
27446 break
27447 }
27448 v_1_1_1_0 := v_1_1_1.Args[0]
27449 if v_1_1_1_0.Op != OpAMD64ADDQconst {
27450 break
27451 }
27452 if v_1_1_1_0.AuxInt != -16 {
27453 break
27454 }
27455 v_1_1_1_0_0 := v_1_1_1_0.Args[0]
27456 if v_1_1_1_0_0.Op != OpAMD64ANDQconst {
27457 break
27458 }
27459 if v_1_1_1_0_0.AuxInt != 15 {
27460 break
27461 }
27462 if y != v_1_1_1_0_0.Args[0] {
27463 break
27464 }
27465 if !(v.Type.Size() == 2) {
27466 break
27467 }
27468 v.reset(OpAMD64ROLW)
27469 v.AddArg(x)
27470 v.AddArg(y)
27471 return true
27472 }
27473 return false
27474 }
27475 func rewriteValueAMD64_OpAMD64ORL_30(v *Value) bool {
27476
27477
27478
27479 for {
27480 _ = v.Args[1]
27481 v_0 := v.Args[0]
27482 if v_0.Op != OpAMD64ANDL {
27483 break
27484 }
27485 _ = v_0.Args[1]
27486 v_0_0 := v_0.Args[0]
27487 if v_0_0.Op != OpAMD64SHRW {
27488 break
27489 }
27490 _ = v_0_0.Args[1]
27491 x := v_0_0.Args[0]
27492 v_0_0_1 := v_0_0.Args[1]
27493 if v_0_0_1.Op != OpAMD64NEGQ {
27494 break
27495 }
27496 v_0_0_1_0 := v_0_0_1.Args[0]
27497 if v_0_0_1_0.Op != OpAMD64ADDQconst {
27498 break
27499 }
27500 if v_0_0_1_0.AuxInt != -16 {
27501 break
27502 }
27503 v_0_0_1_0_0 := v_0_0_1_0.Args[0]
27504 if v_0_0_1_0_0.Op != OpAMD64ANDQconst {
27505 break
27506 }
27507 if v_0_0_1_0_0.AuxInt != 15 {
27508 break
27509 }
27510 y := v_0_0_1_0_0.Args[0]
27511 v_0_1 := v_0.Args[1]
27512 if v_0_1.Op != OpAMD64SBBLcarrymask {
27513 break
27514 }
27515 v_0_1_0 := v_0_1.Args[0]
27516 if v_0_1_0.Op != OpAMD64CMPQconst {
27517 break
27518 }
27519 if v_0_1_0.AuxInt != 16 {
27520 break
27521 }
27522 v_0_1_0_0 := v_0_1_0.Args[0]
27523 if v_0_1_0_0.Op != OpAMD64NEGQ {
27524 break
27525 }
27526 v_0_1_0_0_0 := v_0_1_0_0.Args[0]
27527 if v_0_1_0_0_0.Op != OpAMD64ADDQconst {
27528 break
27529 }
27530 if v_0_1_0_0_0.AuxInt != -16 {
27531 break
27532 }
27533 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0]
27534 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst {
27535 break
27536 }
27537 if v_0_1_0_0_0_0.AuxInt != 15 {
27538 break
27539 }
27540 if y != v_0_1_0_0_0_0.Args[0] {
27541 break
27542 }
27543 v_1 := v.Args[1]
27544 if v_1.Op != OpAMD64SHLL {
27545 break
27546 }
27547 _ = v_1.Args[1]
27548 if x != v_1.Args[0] {
27549 break
27550 }
27551 v_1_1 := v_1.Args[1]
27552 if v_1_1.Op != OpAMD64ANDQconst {
27553 break
27554 }
27555 if v_1_1.AuxInt != 15 {
27556 break
27557 }
27558 if y != v_1_1.Args[0] {
27559 break
27560 }
27561 if !(v.Type.Size() == 2) {
27562 break
27563 }
27564 v.reset(OpAMD64ROLW)
27565 v.AddArg(x)
27566 v.AddArg(y)
27567 return true
27568 }
27569
27570
27571
27572 for {
27573 _ = v.Args[1]
27574 v_0 := v.Args[0]
27575 if v_0.Op != OpAMD64ANDL {
27576 break
27577 }
27578 _ = v_0.Args[1]
27579 v_0_0 := v_0.Args[0]
27580 if v_0_0.Op != OpAMD64SBBLcarrymask {
27581 break
27582 }
27583 v_0_0_0 := v_0_0.Args[0]
27584 if v_0_0_0.Op != OpAMD64CMPQconst {
27585 break
27586 }
27587 if v_0_0_0.AuxInt != 16 {
27588 break
27589 }
27590 v_0_0_0_0 := v_0_0_0.Args[0]
27591 if v_0_0_0_0.Op != OpAMD64NEGQ {
27592 break
27593 }
27594 v_0_0_0_0_0 := v_0_0_0_0.Args[0]
27595 if v_0_0_0_0_0.Op != OpAMD64ADDQconst {
27596 break
27597 }
27598 if v_0_0_0_0_0.AuxInt != -16 {
27599 break
27600 }
27601 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0]
27602 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst {
27603 break
27604 }
27605 if v_0_0_0_0_0_0.AuxInt != 15 {
27606 break
27607 }
27608 y := v_0_0_0_0_0_0.Args[0]
27609 v_0_1 := v_0.Args[1]
27610 if v_0_1.Op != OpAMD64SHRW {
27611 break
27612 }
27613 _ = v_0_1.Args[1]
27614 x := v_0_1.Args[0]
27615 v_0_1_1 := v_0_1.Args[1]
27616 if v_0_1_1.Op != OpAMD64NEGQ {
27617 break
27618 }
27619 v_0_1_1_0 := v_0_1_1.Args[0]
27620 if v_0_1_1_0.Op != OpAMD64ADDQconst {
27621 break
27622 }
27623 if v_0_1_1_0.AuxInt != -16 {
27624 break
27625 }
27626 v_0_1_1_0_0 := v_0_1_1_0.Args[0]
27627 if v_0_1_1_0_0.Op != OpAMD64ANDQconst {
27628 break
27629 }
27630 if v_0_1_1_0_0.AuxInt != 15 {
27631 break
27632 }
27633 if y != v_0_1_1_0_0.Args[0] {
27634 break
27635 }
27636 v_1 := v.Args[1]
27637 if v_1.Op != OpAMD64SHLL {
27638 break
27639 }
27640 _ = v_1.Args[1]
27641 if x != v_1.Args[0] {
27642 break
27643 }
27644 v_1_1 := v_1.Args[1]
27645 if v_1_1.Op != OpAMD64ANDQconst {
27646 break
27647 }
27648 if v_1_1.AuxInt != 15 {
27649 break
27650 }
27651 if y != v_1_1.Args[0] {
27652 break
27653 }
27654 if !(v.Type.Size() == 2) {
27655 break
27656 }
27657 v.reset(OpAMD64ROLW)
27658 v.AddArg(x)
27659 v.AddArg(y)
27660 return true
27661 }
27662
27663
27664
27665 for {
27666 _ = v.Args[1]
27667 v_0 := v.Args[0]
27668 if v_0.Op != OpAMD64SHLL {
27669 break
27670 }
27671 _ = v_0.Args[1]
27672 x := v_0.Args[0]
27673 v_0_1 := v_0.Args[1]
27674 if v_0_1.Op != OpAMD64ANDLconst {
27675 break
27676 }
27677 if v_0_1.AuxInt != 15 {
27678 break
27679 }
27680 y := v_0_1.Args[0]
27681 v_1 := v.Args[1]
27682 if v_1.Op != OpAMD64ANDL {
27683 break
27684 }
27685 _ = v_1.Args[1]
27686 v_1_0 := v_1.Args[0]
27687 if v_1_0.Op != OpAMD64SHRW {
27688 break
27689 }
27690 _ = v_1_0.Args[1]
27691 if x != v_1_0.Args[0] {
27692 break
27693 }
27694 v_1_0_1 := v_1_0.Args[1]
27695 if v_1_0_1.Op != OpAMD64NEGL {
27696 break
27697 }
27698 v_1_0_1_0 := v_1_0_1.Args[0]
27699 if v_1_0_1_0.Op != OpAMD64ADDLconst {
27700 break
27701 }
27702 if v_1_0_1_0.AuxInt != -16 {
27703 break
27704 }
27705 v_1_0_1_0_0 := v_1_0_1_0.Args[0]
27706 if v_1_0_1_0_0.Op != OpAMD64ANDLconst {
27707 break
27708 }
27709 if v_1_0_1_0_0.AuxInt != 15 {
27710 break
27711 }
27712 if y != v_1_0_1_0_0.Args[0] {
27713 break
27714 }
27715 v_1_1 := v_1.Args[1]
27716 if v_1_1.Op != OpAMD64SBBLcarrymask {
27717 break
27718 }
27719 v_1_1_0 := v_1_1.Args[0]
27720 if v_1_1_0.Op != OpAMD64CMPLconst {
27721 break
27722 }
27723 if v_1_1_0.AuxInt != 16 {
27724 break
27725 }
27726 v_1_1_0_0 := v_1_1_0.Args[0]
27727 if v_1_1_0_0.Op != OpAMD64NEGL {
27728 break
27729 }
27730 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
27731 if v_1_1_0_0_0.Op != OpAMD64ADDLconst {
27732 break
27733 }
27734 if v_1_1_0_0_0.AuxInt != -16 {
27735 break
27736 }
27737 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
27738 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst {
27739 break
27740 }
27741 if v_1_1_0_0_0_0.AuxInt != 15 {
27742 break
27743 }
27744 if y != v_1_1_0_0_0_0.Args[0] {
27745 break
27746 }
27747 if !(v.Type.Size() == 2) {
27748 break
27749 }
27750 v.reset(OpAMD64ROLW)
27751 v.AddArg(x)
27752 v.AddArg(y)
27753 return true
27754 }
27755
27756
27757
27758 for {
27759 _ = v.Args[1]
27760 v_0 := v.Args[0]
27761 if v_0.Op != OpAMD64SHLL {
27762 break
27763 }
27764 _ = v_0.Args[1]
27765 x := v_0.Args[0]
27766 v_0_1 := v_0.Args[1]
27767 if v_0_1.Op != OpAMD64ANDLconst {
27768 break
27769 }
27770 if v_0_1.AuxInt != 15 {
27771 break
27772 }
27773 y := v_0_1.Args[0]
27774 v_1 := v.Args[1]
27775 if v_1.Op != OpAMD64ANDL {
27776 break
27777 }
27778 _ = v_1.Args[1]
27779 v_1_0 := v_1.Args[0]
27780 if v_1_0.Op != OpAMD64SBBLcarrymask {
27781 break
27782 }
27783 v_1_0_0 := v_1_0.Args[0]
27784 if v_1_0_0.Op != OpAMD64CMPLconst {
27785 break
27786 }
27787 if v_1_0_0.AuxInt != 16 {
27788 break
27789 }
27790 v_1_0_0_0 := v_1_0_0.Args[0]
27791 if v_1_0_0_0.Op != OpAMD64NEGL {
27792 break
27793 }
27794 v_1_0_0_0_0 := v_1_0_0_0.Args[0]
27795 if v_1_0_0_0_0.Op != OpAMD64ADDLconst {
27796 break
27797 }
27798 if v_1_0_0_0_0.AuxInt != -16 {
27799 break
27800 }
27801 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0]
27802 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst {
27803 break
27804 }
27805 if v_1_0_0_0_0_0.AuxInt != 15 {
27806 break
27807 }
27808 if y != v_1_0_0_0_0_0.Args[0] {
27809 break
27810 }
27811 v_1_1 := v_1.Args[1]
27812 if v_1_1.Op != OpAMD64SHRW {
27813 break
27814 }
27815 _ = v_1_1.Args[1]
27816 if x != v_1_1.Args[0] {
27817 break
27818 }
27819 v_1_1_1 := v_1_1.Args[1]
27820 if v_1_1_1.Op != OpAMD64NEGL {
27821 break
27822 }
27823 v_1_1_1_0 := v_1_1_1.Args[0]
27824 if v_1_1_1_0.Op != OpAMD64ADDLconst {
27825 break
27826 }
27827 if v_1_1_1_0.AuxInt != -16 {
27828 break
27829 }
27830 v_1_1_1_0_0 := v_1_1_1_0.Args[0]
27831 if v_1_1_1_0_0.Op != OpAMD64ANDLconst {
27832 break
27833 }
27834 if v_1_1_1_0_0.AuxInt != 15 {
27835 break
27836 }
27837 if y != v_1_1_1_0_0.Args[0] {
27838 break
27839 }
27840 if !(v.Type.Size() == 2) {
27841 break
27842 }
27843 v.reset(OpAMD64ROLW)
27844 v.AddArg(x)
27845 v.AddArg(y)
27846 return true
27847 }
27848
27849
27850
27851 for {
27852 _ = v.Args[1]
27853 v_0 := v.Args[0]
27854 if v_0.Op != OpAMD64ANDL {
27855 break
27856 }
27857 _ = v_0.Args[1]
27858 v_0_0 := v_0.Args[0]
27859 if v_0_0.Op != OpAMD64SHRW {
27860 break
27861 }
27862 _ = v_0_0.Args[1]
27863 x := v_0_0.Args[0]
27864 v_0_0_1 := v_0_0.Args[1]
27865 if v_0_0_1.Op != OpAMD64NEGL {
27866 break
27867 }
27868 v_0_0_1_0 := v_0_0_1.Args[0]
27869 if v_0_0_1_0.Op != OpAMD64ADDLconst {
27870 break
27871 }
27872 if v_0_0_1_0.AuxInt != -16 {
27873 break
27874 }
27875 v_0_0_1_0_0 := v_0_0_1_0.Args[0]
27876 if v_0_0_1_0_0.Op != OpAMD64ANDLconst {
27877 break
27878 }
27879 if v_0_0_1_0_0.AuxInt != 15 {
27880 break
27881 }
27882 y := v_0_0_1_0_0.Args[0]
27883 v_0_1 := v_0.Args[1]
27884 if v_0_1.Op != OpAMD64SBBLcarrymask {
27885 break
27886 }
27887 v_0_1_0 := v_0_1.Args[0]
27888 if v_0_1_0.Op != OpAMD64CMPLconst {
27889 break
27890 }
27891 if v_0_1_0.AuxInt != 16 {
27892 break
27893 }
27894 v_0_1_0_0 := v_0_1_0.Args[0]
27895 if v_0_1_0_0.Op != OpAMD64NEGL {
27896 break
27897 }
27898 v_0_1_0_0_0 := v_0_1_0_0.Args[0]
27899 if v_0_1_0_0_0.Op != OpAMD64ADDLconst {
27900 break
27901 }
27902 if v_0_1_0_0_0.AuxInt != -16 {
27903 break
27904 }
27905 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0]
27906 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst {
27907 break
27908 }
27909 if v_0_1_0_0_0_0.AuxInt != 15 {
27910 break
27911 }
27912 if y != v_0_1_0_0_0_0.Args[0] {
27913 break
27914 }
27915 v_1 := v.Args[1]
27916 if v_1.Op != OpAMD64SHLL {
27917 break
27918 }
27919 _ = v_1.Args[1]
27920 if x != v_1.Args[0] {
27921 break
27922 }
27923 v_1_1 := v_1.Args[1]
27924 if v_1_1.Op != OpAMD64ANDLconst {
27925 break
27926 }
27927 if v_1_1.AuxInt != 15 {
27928 break
27929 }
27930 if y != v_1_1.Args[0] {
27931 break
27932 }
27933 if !(v.Type.Size() == 2) {
27934 break
27935 }
27936 v.reset(OpAMD64ROLW)
27937 v.AddArg(x)
27938 v.AddArg(y)
27939 return true
27940 }
27941
27942
27943
27944 for {
27945 _ = v.Args[1]
27946 v_0 := v.Args[0]
27947 if v_0.Op != OpAMD64ANDL {
27948 break
27949 }
27950 _ = v_0.Args[1]
27951 v_0_0 := v_0.Args[0]
27952 if v_0_0.Op != OpAMD64SBBLcarrymask {
27953 break
27954 }
27955 v_0_0_0 := v_0_0.Args[0]
27956 if v_0_0_0.Op != OpAMD64CMPLconst {
27957 break
27958 }
27959 if v_0_0_0.AuxInt != 16 {
27960 break
27961 }
27962 v_0_0_0_0 := v_0_0_0.Args[0]
27963 if v_0_0_0_0.Op != OpAMD64NEGL {
27964 break
27965 }
27966 v_0_0_0_0_0 := v_0_0_0_0.Args[0]
27967 if v_0_0_0_0_0.Op != OpAMD64ADDLconst {
27968 break
27969 }
27970 if v_0_0_0_0_0.AuxInt != -16 {
27971 break
27972 }
27973 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0]
27974 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst {
27975 break
27976 }
27977 if v_0_0_0_0_0_0.AuxInt != 15 {
27978 break
27979 }
27980 y := v_0_0_0_0_0_0.Args[0]
27981 v_0_1 := v_0.Args[1]
27982 if v_0_1.Op != OpAMD64SHRW {
27983 break
27984 }
27985 _ = v_0_1.Args[1]
27986 x := v_0_1.Args[0]
27987 v_0_1_1 := v_0_1.Args[1]
27988 if v_0_1_1.Op != OpAMD64NEGL {
27989 break
27990 }
27991 v_0_1_1_0 := v_0_1_1.Args[0]
27992 if v_0_1_1_0.Op != OpAMD64ADDLconst {
27993 break
27994 }
27995 if v_0_1_1_0.AuxInt != -16 {
27996 break
27997 }
27998 v_0_1_1_0_0 := v_0_1_1_0.Args[0]
27999 if v_0_1_1_0_0.Op != OpAMD64ANDLconst {
28000 break
28001 }
28002 if v_0_1_1_0_0.AuxInt != 15 {
28003 break
28004 }
28005 if y != v_0_1_1_0_0.Args[0] {
28006 break
28007 }
28008 v_1 := v.Args[1]
28009 if v_1.Op != OpAMD64SHLL {
28010 break
28011 }
28012 _ = v_1.Args[1]
28013 if x != v_1.Args[0] {
28014 break
28015 }
28016 v_1_1 := v_1.Args[1]
28017 if v_1_1.Op != OpAMD64ANDLconst {
28018 break
28019 }
28020 if v_1_1.AuxInt != 15 {
28021 break
28022 }
28023 if y != v_1_1.Args[0] {
28024 break
28025 }
28026 if !(v.Type.Size() == 2) {
28027 break
28028 }
28029 v.reset(OpAMD64ROLW)
28030 v.AddArg(x)
28031 v.AddArg(y)
28032 return true
28033 }
28034
28035
28036
28037 for {
28038 _ = v.Args[1]
28039 v_0 := v.Args[0]
28040 if v_0.Op != OpAMD64SHRW {
28041 break
28042 }
28043 _ = v_0.Args[1]
28044 x := v_0.Args[0]
28045 v_0_1 := v_0.Args[1]
28046 if v_0_1.Op != OpAMD64ANDQconst {
28047 break
28048 }
28049 if v_0_1.AuxInt != 15 {
28050 break
28051 }
28052 y := v_0_1.Args[0]
28053 v_1 := v.Args[1]
28054 if v_1.Op != OpAMD64SHLL {
28055 break
28056 }
28057 _ = v_1.Args[1]
28058 if x != v_1.Args[0] {
28059 break
28060 }
28061 v_1_1 := v_1.Args[1]
28062 if v_1_1.Op != OpAMD64NEGQ {
28063 break
28064 }
28065 v_1_1_0 := v_1_1.Args[0]
28066 if v_1_1_0.Op != OpAMD64ADDQconst {
28067 break
28068 }
28069 if v_1_1_0.AuxInt != -16 {
28070 break
28071 }
28072 v_1_1_0_0 := v_1_1_0.Args[0]
28073 if v_1_1_0_0.Op != OpAMD64ANDQconst {
28074 break
28075 }
28076 if v_1_1_0_0.AuxInt != 15 {
28077 break
28078 }
28079 if y != v_1_1_0_0.Args[0] {
28080 break
28081 }
28082 if !(v.Type.Size() == 2) {
28083 break
28084 }
28085 v.reset(OpAMD64RORW)
28086 v.AddArg(x)
28087 v.AddArg(y)
28088 return true
28089 }
28090
28091
28092
28093 for {
28094 _ = v.Args[1]
28095 v_0 := v.Args[0]
28096 if v_0.Op != OpAMD64SHLL {
28097 break
28098 }
28099 _ = v_0.Args[1]
28100 x := v_0.Args[0]
28101 v_0_1 := v_0.Args[1]
28102 if v_0_1.Op != OpAMD64NEGQ {
28103 break
28104 }
28105 v_0_1_0 := v_0_1.Args[0]
28106 if v_0_1_0.Op != OpAMD64ADDQconst {
28107 break
28108 }
28109 if v_0_1_0.AuxInt != -16 {
28110 break
28111 }
28112 v_0_1_0_0 := v_0_1_0.Args[0]
28113 if v_0_1_0_0.Op != OpAMD64ANDQconst {
28114 break
28115 }
28116 if v_0_1_0_0.AuxInt != 15 {
28117 break
28118 }
28119 y := v_0_1_0_0.Args[0]
28120 v_1 := v.Args[1]
28121 if v_1.Op != OpAMD64SHRW {
28122 break
28123 }
28124 _ = v_1.Args[1]
28125 if x != v_1.Args[0] {
28126 break
28127 }
28128 v_1_1 := v_1.Args[1]
28129 if v_1_1.Op != OpAMD64ANDQconst {
28130 break
28131 }
28132 if v_1_1.AuxInt != 15 {
28133 break
28134 }
28135 if y != v_1_1.Args[0] {
28136 break
28137 }
28138 if !(v.Type.Size() == 2) {
28139 break
28140 }
28141 v.reset(OpAMD64RORW)
28142 v.AddArg(x)
28143 v.AddArg(y)
28144 return true
28145 }
28146
28147
28148
28149 for {
28150 _ = v.Args[1]
28151 v_0 := v.Args[0]
28152 if v_0.Op != OpAMD64SHRW {
28153 break
28154 }
28155 _ = v_0.Args[1]
28156 x := v_0.Args[0]
28157 v_0_1 := v_0.Args[1]
28158 if v_0_1.Op != OpAMD64ANDLconst {
28159 break
28160 }
28161 if v_0_1.AuxInt != 15 {
28162 break
28163 }
28164 y := v_0_1.Args[0]
28165 v_1 := v.Args[1]
28166 if v_1.Op != OpAMD64SHLL {
28167 break
28168 }
28169 _ = v_1.Args[1]
28170 if x != v_1.Args[0] {
28171 break
28172 }
28173 v_1_1 := v_1.Args[1]
28174 if v_1_1.Op != OpAMD64NEGL {
28175 break
28176 }
28177 v_1_1_0 := v_1_1.Args[0]
28178 if v_1_1_0.Op != OpAMD64ADDLconst {
28179 break
28180 }
28181 if v_1_1_0.AuxInt != -16 {
28182 break
28183 }
28184 v_1_1_0_0 := v_1_1_0.Args[0]
28185 if v_1_1_0_0.Op != OpAMD64ANDLconst {
28186 break
28187 }
28188 if v_1_1_0_0.AuxInt != 15 {
28189 break
28190 }
28191 if y != v_1_1_0_0.Args[0] {
28192 break
28193 }
28194 if !(v.Type.Size() == 2) {
28195 break
28196 }
28197 v.reset(OpAMD64RORW)
28198 v.AddArg(x)
28199 v.AddArg(y)
28200 return true
28201 }
28202
28203
28204
28205 for {
28206 _ = v.Args[1]
28207 v_0 := v.Args[0]
28208 if v_0.Op != OpAMD64SHLL {
28209 break
28210 }
28211 _ = v_0.Args[1]
28212 x := v_0.Args[0]
28213 v_0_1 := v_0.Args[1]
28214 if v_0_1.Op != OpAMD64NEGL {
28215 break
28216 }
28217 v_0_1_0 := v_0_1.Args[0]
28218 if v_0_1_0.Op != OpAMD64ADDLconst {
28219 break
28220 }
28221 if v_0_1_0.AuxInt != -16 {
28222 break
28223 }
28224 v_0_1_0_0 := v_0_1_0.Args[0]
28225 if v_0_1_0_0.Op != OpAMD64ANDLconst {
28226 break
28227 }
28228 if v_0_1_0_0.AuxInt != 15 {
28229 break
28230 }
28231 y := v_0_1_0_0.Args[0]
28232 v_1 := v.Args[1]
28233 if v_1.Op != OpAMD64SHRW {
28234 break
28235 }
28236 _ = v_1.Args[1]
28237 if x != v_1.Args[0] {
28238 break
28239 }
28240 v_1_1 := v_1.Args[1]
28241 if v_1_1.Op != OpAMD64ANDLconst {
28242 break
28243 }
28244 if v_1_1.AuxInt != 15 {
28245 break
28246 }
28247 if y != v_1_1.Args[0] {
28248 break
28249 }
28250 if !(v.Type.Size() == 2) {
28251 break
28252 }
28253 v.reset(OpAMD64RORW)
28254 v.AddArg(x)
28255 v.AddArg(y)
28256 return true
28257 }
28258 return false
28259 }
28260 func rewriteValueAMD64_OpAMD64ORL_40(v *Value) bool {
28261
28262
28263
28264 for {
28265 _ = v.Args[1]
28266 v_0 := v.Args[0]
28267 if v_0.Op != OpAMD64SHLL {
28268 break
28269 }
28270 _ = v_0.Args[1]
28271 x := v_0.Args[0]
28272 v_0_1 := v_0.Args[1]
28273 if v_0_1.Op != OpAMD64ANDQconst {
28274 break
28275 }
28276 if v_0_1.AuxInt != 7 {
28277 break
28278 }
28279 y := v_0_1.Args[0]
28280 v_1 := v.Args[1]
28281 if v_1.Op != OpAMD64ANDL {
28282 break
28283 }
28284 _ = v_1.Args[1]
28285 v_1_0 := v_1.Args[0]
28286 if v_1_0.Op != OpAMD64SHRB {
28287 break
28288 }
28289 _ = v_1_0.Args[1]
28290 if x != v_1_0.Args[0] {
28291 break
28292 }
28293 v_1_0_1 := v_1_0.Args[1]
28294 if v_1_0_1.Op != OpAMD64NEGQ {
28295 break
28296 }
28297 v_1_0_1_0 := v_1_0_1.Args[0]
28298 if v_1_0_1_0.Op != OpAMD64ADDQconst {
28299 break
28300 }
28301 if v_1_0_1_0.AuxInt != -8 {
28302 break
28303 }
28304 v_1_0_1_0_0 := v_1_0_1_0.Args[0]
28305 if v_1_0_1_0_0.Op != OpAMD64ANDQconst {
28306 break
28307 }
28308 if v_1_0_1_0_0.AuxInt != 7 {
28309 break
28310 }
28311 if y != v_1_0_1_0_0.Args[0] {
28312 break
28313 }
28314 v_1_1 := v_1.Args[1]
28315 if v_1_1.Op != OpAMD64SBBLcarrymask {
28316 break
28317 }
28318 v_1_1_0 := v_1_1.Args[0]
28319 if v_1_1_0.Op != OpAMD64CMPQconst {
28320 break
28321 }
28322 if v_1_1_0.AuxInt != 8 {
28323 break
28324 }
28325 v_1_1_0_0 := v_1_1_0.Args[0]
28326 if v_1_1_0_0.Op != OpAMD64NEGQ {
28327 break
28328 }
28329 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
28330 if v_1_1_0_0_0.Op != OpAMD64ADDQconst {
28331 break
28332 }
28333 if v_1_1_0_0_0.AuxInt != -8 {
28334 break
28335 }
28336 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
28337 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst {
28338 break
28339 }
28340 if v_1_1_0_0_0_0.AuxInt != 7 {
28341 break
28342 }
28343 if y != v_1_1_0_0_0_0.Args[0] {
28344 break
28345 }
28346 if !(v.Type.Size() == 1) {
28347 break
28348 }
28349 v.reset(OpAMD64ROLB)
28350 v.AddArg(x)
28351 v.AddArg(y)
28352 return true
28353 }
28354
28355
28356
28357 for {
28358 _ = v.Args[1]
28359 v_0 := v.Args[0]
28360 if v_0.Op != OpAMD64SHLL {
28361 break
28362 }
28363 _ = v_0.Args[1]
28364 x := v_0.Args[0]
28365 v_0_1 := v_0.Args[1]
28366 if v_0_1.Op != OpAMD64ANDQconst {
28367 break
28368 }
28369 if v_0_1.AuxInt != 7 {
28370 break
28371 }
28372 y := v_0_1.Args[0]
28373 v_1 := v.Args[1]
28374 if v_1.Op != OpAMD64ANDL {
28375 break
28376 }
28377 _ = v_1.Args[1]
28378 v_1_0 := v_1.Args[0]
28379 if v_1_0.Op != OpAMD64SBBLcarrymask {
28380 break
28381 }
28382 v_1_0_0 := v_1_0.Args[0]
28383 if v_1_0_0.Op != OpAMD64CMPQconst {
28384 break
28385 }
28386 if v_1_0_0.AuxInt != 8 {
28387 break
28388 }
28389 v_1_0_0_0 := v_1_0_0.Args[0]
28390 if v_1_0_0_0.Op != OpAMD64NEGQ {
28391 break
28392 }
28393 v_1_0_0_0_0 := v_1_0_0_0.Args[0]
28394 if v_1_0_0_0_0.Op != OpAMD64ADDQconst {
28395 break
28396 }
28397 if v_1_0_0_0_0.AuxInt != -8 {
28398 break
28399 }
28400 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0]
28401 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst {
28402 break
28403 }
28404 if v_1_0_0_0_0_0.AuxInt != 7 {
28405 break
28406 }
28407 if y != v_1_0_0_0_0_0.Args[0] {
28408 break
28409 }
28410 v_1_1 := v_1.Args[1]
28411 if v_1_1.Op != OpAMD64SHRB {
28412 break
28413 }
28414 _ = v_1_1.Args[1]
28415 if x != v_1_1.Args[0] {
28416 break
28417 }
28418 v_1_1_1 := v_1_1.Args[1]
28419 if v_1_1_1.Op != OpAMD64NEGQ {
28420 break
28421 }
28422 v_1_1_1_0 := v_1_1_1.Args[0]
28423 if v_1_1_1_0.Op != OpAMD64ADDQconst {
28424 break
28425 }
28426 if v_1_1_1_0.AuxInt != -8 {
28427 break
28428 }
28429 v_1_1_1_0_0 := v_1_1_1_0.Args[0]
28430 if v_1_1_1_0_0.Op != OpAMD64ANDQconst {
28431 break
28432 }
28433 if v_1_1_1_0_0.AuxInt != 7 {
28434 break
28435 }
28436 if y != v_1_1_1_0_0.Args[0] {
28437 break
28438 }
28439 if !(v.Type.Size() == 1) {
28440 break
28441 }
28442 v.reset(OpAMD64ROLB)
28443 v.AddArg(x)
28444 v.AddArg(y)
28445 return true
28446 }
28447
28448
28449
28450 for {
28451 _ = v.Args[1]
28452 v_0 := v.Args[0]
28453 if v_0.Op != OpAMD64ANDL {
28454 break
28455 }
28456 _ = v_0.Args[1]
28457 v_0_0 := v_0.Args[0]
28458 if v_0_0.Op != OpAMD64SHRB {
28459 break
28460 }
28461 _ = v_0_0.Args[1]
28462 x := v_0_0.Args[0]
28463 v_0_0_1 := v_0_0.Args[1]
28464 if v_0_0_1.Op != OpAMD64NEGQ {
28465 break
28466 }
28467 v_0_0_1_0 := v_0_0_1.Args[0]
28468 if v_0_0_1_0.Op != OpAMD64ADDQconst {
28469 break
28470 }
28471 if v_0_0_1_0.AuxInt != -8 {
28472 break
28473 }
28474 v_0_0_1_0_0 := v_0_0_1_0.Args[0]
28475 if v_0_0_1_0_0.Op != OpAMD64ANDQconst {
28476 break
28477 }
28478 if v_0_0_1_0_0.AuxInt != 7 {
28479 break
28480 }
28481 y := v_0_0_1_0_0.Args[0]
28482 v_0_1 := v_0.Args[1]
28483 if v_0_1.Op != OpAMD64SBBLcarrymask {
28484 break
28485 }
28486 v_0_1_0 := v_0_1.Args[0]
28487 if v_0_1_0.Op != OpAMD64CMPQconst {
28488 break
28489 }
28490 if v_0_1_0.AuxInt != 8 {
28491 break
28492 }
28493 v_0_1_0_0 := v_0_1_0.Args[0]
28494 if v_0_1_0_0.Op != OpAMD64NEGQ {
28495 break
28496 }
28497 v_0_1_0_0_0 := v_0_1_0_0.Args[0]
28498 if v_0_1_0_0_0.Op != OpAMD64ADDQconst {
28499 break
28500 }
28501 if v_0_1_0_0_0.AuxInt != -8 {
28502 break
28503 }
28504 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0]
28505 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst {
28506 break
28507 }
28508 if v_0_1_0_0_0_0.AuxInt != 7 {
28509 break
28510 }
28511 if y != v_0_1_0_0_0_0.Args[0] {
28512 break
28513 }
28514 v_1 := v.Args[1]
28515 if v_1.Op != OpAMD64SHLL {
28516 break
28517 }
28518 _ = v_1.Args[1]
28519 if x != v_1.Args[0] {
28520 break
28521 }
28522 v_1_1 := v_1.Args[1]
28523 if v_1_1.Op != OpAMD64ANDQconst {
28524 break
28525 }
28526 if v_1_1.AuxInt != 7 {
28527 break
28528 }
28529 if y != v_1_1.Args[0] {
28530 break
28531 }
28532 if !(v.Type.Size() == 1) {
28533 break
28534 }
28535 v.reset(OpAMD64ROLB)
28536 v.AddArg(x)
28537 v.AddArg(y)
28538 return true
28539 }
28540
28541
28542
28543 for {
28544 _ = v.Args[1]
28545 v_0 := v.Args[0]
28546 if v_0.Op != OpAMD64ANDL {
28547 break
28548 }
28549 _ = v_0.Args[1]
28550 v_0_0 := v_0.Args[0]
28551 if v_0_0.Op != OpAMD64SBBLcarrymask {
28552 break
28553 }
28554 v_0_0_0 := v_0_0.Args[0]
28555 if v_0_0_0.Op != OpAMD64CMPQconst {
28556 break
28557 }
28558 if v_0_0_0.AuxInt != 8 {
28559 break
28560 }
28561 v_0_0_0_0 := v_0_0_0.Args[0]
28562 if v_0_0_0_0.Op != OpAMD64NEGQ {
28563 break
28564 }
28565 v_0_0_0_0_0 := v_0_0_0_0.Args[0]
28566 if v_0_0_0_0_0.Op != OpAMD64ADDQconst {
28567 break
28568 }
28569 if v_0_0_0_0_0.AuxInt != -8 {
28570 break
28571 }
28572 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0]
28573 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst {
28574 break
28575 }
28576 if v_0_0_0_0_0_0.AuxInt != 7 {
28577 break
28578 }
28579 y := v_0_0_0_0_0_0.Args[0]
28580 v_0_1 := v_0.Args[1]
28581 if v_0_1.Op != OpAMD64SHRB {
28582 break
28583 }
28584 _ = v_0_1.Args[1]
28585 x := v_0_1.Args[0]
28586 v_0_1_1 := v_0_1.Args[1]
28587 if v_0_1_1.Op != OpAMD64NEGQ {
28588 break
28589 }
28590 v_0_1_1_0 := v_0_1_1.Args[0]
28591 if v_0_1_1_0.Op != OpAMD64ADDQconst {
28592 break
28593 }
28594 if v_0_1_1_0.AuxInt != -8 {
28595 break
28596 }
28597 v_0_1_1_0_0 := v_0_1_1_0.Args[0]
28598 if v_0_1_1_0_0.Op != OpAMD64ANDQconst {
28599 break
28600 }
28601 if v_0_1_1_0_0.AuxInt != 7 {
28602 break
28603 }
28604 if y != v_0_1_1_0_0.Args[0] {
28605 break
28606 }
28607 v_1 := v.Args[1]
28608 if v_1.Op != OpAMD64SHLL {
28609 break
28610 }
28611 _ = v_1.Args[1]
28612 if x != v_1.Args[0] {
28613 break
28614 }
28615 v_1_1 := v_1.Args[1]
28616 if v_1_1.Op != OpAMD64ANDQconst {
28617 break
28618 }
28619 if v_1_1.AuxInt != 7 {
28620 break
28621 }
28622 if y != v_1_1.Args[0] {
28623 break
28624 }
28625 if !(v.Type.Size() == 1) {
28626 break
28627 }
28628 v.reset(OpAMD64ROLB)
28629 v.AddArg(x)
28630 v.AddArg(y)
28631 return true
28632 }
28633
28634
28635
28636 for {
28637 _ = v.Args[1]
28638 v_0 := v.Args[0]
28639 if v_0.Op != OpAMD64SHLL {
28640 break
28641 }
28642 _ = v_0.Args[1]
28643 x := v_0.Args[0]
28644 v_0_1 := v_0.Args[1]
28645 if v_0_1.Op != OpAMD64ANDLconst {
28646 break
28647 }
28648 if v_0_1.AuxInt != 7 {
28649 break
28650 }
28651 y := v_0_1.Args[0]
28652 v_1 := v.Args[1]
28653 if v_1.Op != OpAMD64ANDL {
28654 break
28655 }
28656 _ = v_1.Args[1]
28657 v_1_0 := v_1.Args[0]
28658 if v_1_0.Op != OpAMD64SHRB {
28659 break
28660 }
28661 _ = v_1_0.Args[1]
28662 if x != v_1_0.Args[0] {
28663 break
28664 }
28665 v_1_0_1 := v_1_0.Args[1]
28666 if v_1_0_1.Op != OpAMD64NEGL {
28667 break
28668 }
28669 v_1_0_1_0 := v_1_0_1.Args[0]
28670 if v_1_0_1_0.Op != OpAMD64ADDLconst {
28671 break
28672 }
28673 if v_1_0_1_0.AuxInt != -8 {
28674 break
28675 }
28676 v_1_0_1_0_0 := v_1_0_1_0.Args[0]
28677 if v_1_0_1_0_0.Op != OpAMD64ANDLconst {
28678 break
28679 }
28680 if v_1_0_1_0_0.AuxInt != 7 {
28681 break
28682 }
28683 if y != v_1_0_1_0_0.Args[0] {
28684 break
28685 }
28686 v_1_1 := v_1.Args[1]
28687 if v_1_1.Op != OpAMD64SBBLcarrymask {
28688 break
28689 }
28690 v_1_1_0 := v_1_1.Args[0]
28691 if v_1_1_0.Op != OpAMD64CMPLconst {
28692 break
28693 }
28694 if v_1_1_0.AuxInt != 8 {
28695 break
28696 }
28697 v_1_1_0_0 := v_1_1_0.Args[0]
28698 if v_1_1_0_0.Op != OpAMD64NEGL {
28699 break
28700 }
28701 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
28702 if v_1_1_0_0_0.Op != OpAMD64ADDLconst {
28703 break
28704 }
28705 if v_1_1_0_0_0.AuxInt != -8 {
28706 break
28707 }
28708 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
28709 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst {
28710 break
28711 }
28712 if v_1_1_0_0_0_0.AuxInt != 7 {
28713 break
28714 }
28715 if y != v_1_1_0_0_0_0.Args[0] {
28716 break
28717 }
28718 if !(v.Type.Size() == 1) {
28719 break
28720 }
28721 v.reset(OpAMD64ROLB)
28722 v.AddArg(x)
28723 v.AddArg(y)
28724 return true
28725 }
28726
28727
28728
28729 for {
28730 _ = v.Args[1]
28731 v_0 := v.Args[0]
28732 if v_0.Op != OpAMD64SHLL {
28733 break
28734 }
28735 _ = v_0.Args[1]
28736 x := v_0.Args[0]
28737 v_0_1 := v_0.Args[1]
28738 if v_0_1.Op != OpAMD64ANDLconst {
28739 break
28740 }
28741 if v_0_1.AuxInt != 7 {
28742 break
28743 }
28744 y := v_0_1.Args[0]
28745 v_1 := v.Args[1]
28746 if v_1.Op != OpAMD64ANDL {
28747 break
28748 }
28749 _ = v_1.Args[1]
28750 v_1_0 := v_1.Args[0]
28751 if v_1_0.Op != OpAMD64SBBLcarrymask {
28752 break
28753 }
28754 v_1_0_0 := v_1_0.Args[0]
28755 if v_1_0_0.Op != OpAMD64CMPLconst {
28756 break
28757 }
28758 if v_1_0_0.AuxInt != 8 {
28759 break
28760 }
28761 v_1_0_0_0 := v_1_0_0.Args[0]
28762 if v_1_0_0_0.Op != OpAMD64NEGL {
28763 break
28764 }
28765 v_1_0_0_0_0 := v_1_0_0_0.Args[0]
28766 if v_1_0_0_0_0.Op != OpAMD64ADDLconst {
28767 break
28768 }
28769 if v_1_0_0_0_0.AuxInt != -8 {
28770 break
28771 }
28772 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0]
28773 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst {
28774 break
28775 }
28776 if v_1_0_0_0_0_0.AuxInt != 7 {
28777 break
28778 }
28779 if y != v_1_0_0_0_0_0.Args[0] {
28780 break
28781 }
28782 v_1_1 := v_1.Args[1]
28783 if v_1_1.Op != OpAMD64SHRB {
28784 break
28785 }
28786 _ = v_1_1.Args[1]
28787 if x != v_1_1.Args[0] {
28788 break
28789 }
28790 v_1_1_1 := v_1_1.Args[1]
28791 if v_1_1_1.Op != OpAMD64NEGL {
28792 break
28793 }
28794 v_1_1_1_0 := v_1_1_1.Args[0]
28795 if v_1_1_1_0.Op != OpAMD64ADDLconst {
28796 break
28797 }
28798 if v_1_1_1_0.AuxInt != -8 {
28799 break
28800 }
28801 v_1_1_1_0_0 := v_1_1_1_0.Args[0]
28802 if v_1_1_1_0_0.Op != OpAMD64ANDLconst {
28803 break
28804 }
28805 if v_1_1_1_0_0.AuxInt != 7 {
28806 break
28807 }
28808 if y != v_1_1_1_0_0.Args[0] {
28809 break
28810 }
28811 if !(v.Type.Size() == 1) {
28812 break
28813 }
28814 v.reset(OpAMD64ROLB)
28815 v.AddArg(x)
28816 v.AddArg(y)
28817 return true
28818 }
28819
28820
28821
28822 for {
28823 _ = v.Args[1]
28824 v_0 := v.Args[0]
28825 if v_0.Op != OpAMD64ANDL {
28826 break
28827 }
28828 _ = v_0.Args[1]
28829 v_0_0 := v_0.Args[0]
28830 if v_0_0.Op != OpAMD64SHRB {
28831 break
28832 }
28833 _ = v_0_0.Args[1]
28834 x := v_0_0.Args[0]
28835 v_0_0_1 := v_0_0.Args[1]
28836 if v_0_0_1.Op != OpAMD64NEGL {
28837 break
28838 }
28839 v_0_0_1_0 := v_0_0_1.Args[0]
28840 if v_0_0_1_0.Op != OpAMD64ADDLconst {
28841 break
28842 }
28843 if v_0_0_1_0.AuxInt != -8 {
28844 break
28845 }
28846 v_0_0_1_0_0 := v_0_0_1_0.Args[0]
28847 if v_0_0_1_0_0.Op != OpAMD64ANDLconst {
28848 break
28849 }
28850 if v_0_0_1_0_0.AuxInt != 7 {
28851 break
28852 }
28853 y := v_0_0_1_0_0.Args[0]
28854 v_0_1 := v_0.Args[1]
28855 if v_0_1.Op != OpAMD64SBBLcarrymask {
28856 break
28857 }
28858 v_0_1_0 := v_0_1.Args[0]
28859 if v_0_1_0.Op != OpAMD64CMPLconst {
28860 break
28861 }
28862 if v_0_1_0.AuxInt != 8 {
28863 break
28864 }
28865 v_0_1_0_0 := v_0_1_0.Args[0]
28866 if v_0_1_0_0.Op != OpAMD64NEGL {
28867 break
28868 }
28869 v_0_1_0_0_0 := v_0_1_0_0.Args[0]
28870 if v_0_1_0_0_0.Op != OpAMD64ADDLconst {
28871 break
28872 }
28873 if v_0_1_0_0_0.AuxInt != -8 {
28874 break
28875 }
28876 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0]
28877 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst {
28878 break
28879 }
28880 if v_0_1_0_0_0_0.AuxInt != 7 {
28881 break
28882 }
28883 if y != v_0_1_0_0_0_0.Args[0] {
28884 break
28885 }
28886 v_1 := v.Args[1]
28887 if v_1.Op != OpAMD64SHLL {
28888 break
28889 }
28890 _ = v_1.Args[1]
28891 if x != v_1.Args[0] {
28892 break
28893 }
28894 v_1_1 := v_1.Args[1]
28895 if v_1_1.Op != OpAMD64ANDLconst {
28896 break
28897 }
28898 if v_1_1.AuxInt != 7 {
28899 break
28900 }
28901 if y != v_1_1.Args[0] {
28902 break
28903 }
28904 if !(v.Type.Size() == 1) {
28905 break
28906 }
28907 v.reset(OpAMD64ROLB)
28908 v.AddArg(x)
28909 v.AddArg(y)
28910 return true
28911 }
28912
28913
28914
28915 for {
28916 _ = v.Args[1]
28917 v_0 := v.Args[0]
28918 if v_0.Op != OpAMD64ANDL {
28919 break
28920 }
28921 _ = v_0.Args[1]
28922 v_0_0 := v_0.Args[0]
28923 if v_0_0.Op != OpAMD64SBBLcarrymask {
28924 break
28925 }
28926 v_0_0_0 := v_0_0.Args[0]
28927 if v_0_0_0.Op != OpAMD64CMPLconst {
28928 break
28929 }
28930 if v_0_0_0.AuxInt != 8 {
28931 break
28932 }
28933 v_0_0_0_0 := v_0_0_0.Args[0]
28934 if v_0_0_0_0.Op != OpAMD64NEGL {
28935 break
28936 }
28937 v_0_0_0_0_0 := v_0_0_0_0.Args[0]
28938 if v_0_0_0_0_0.Op != OpAMD64ADDLconst {
28939 break
28940 }
28941 if v_0_0_0_0_0.AuxInt != -8 {
28942 break
28943 }
28944 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0]
28945 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst {
28946 break
28947 }
28948 if v_0_0_0_0_0_0.AuxInt != 7 {
28949 break
28950 }
28951 y := v_0_0_0_0_0_0.Args[0]
28952 v_0_1 := v_0.Args[1]
28953 if v_0_1.Op != OpAMD64SHRB {
28954 break
28955 }
28956 _ = v_0_1.Args[1]
28957 x := v_0_1.Args[0]
28958 v_0_1_1 := v_0_1.Args[1]
28959 if v_0_1_1.Op != OpAMD64NEGL {
28960 break
28961 }
28962 v_0_1_1_0 := v_0_1_1.Args[0]
28963 if v_0_1_1_0.Op != OpAMD64ADDLconst {
28964 break
28965 }
28966 if v_0_1_1_0.AuxInt != -8 {
28967 break
28968 }
28969 v_0_1_1_0_0 := v_0_1_1_0.Args[0]
28970 if v_0_1_1_0_0.Op != OpAMD64ANDLconst {
28971 break
28972 }
28973 if v_0_1_1_0_0.AuxInt != 7 {
28974 break
28975 }
28976 if y != v_0_1_1_0_0.Args[0] {
28977 break
28978 }
28979 v_1 := v.Args[1]
28980 if v_1.Op != OpAMD64SHLL {
28981 break
28982 }
28983 _ = v_1.Args[1]
28984 if x != v_1.Args[0] {
28985 break
28986 }
28987 v_1_1 := v_1.Args[1]
28988 if v_1_1.Op != OpAMD64ANDLconst {
28989 break
28990 }
28991 if v_1_1.AuxInt != 7 {
28992 break
28993 }
28994 if y != v_1_1.Args[0] {
28995 break
28996 }
28997 if !(v.Type.Size() == 1) {
28998 break
28999 }
29000 v.reset(OpAMD64ROLB)
29001 v.AddArg(x)
29002 v.AddArg(y)
29003 return true
29004 }
29005
29006
29007
29008 for {
29009 _ = v.Args[1]
29010 v_0 := v.Args[0]
29011 if v_0.Op != OpAMD64SHRB {
29012 break
29013 }
29014 _ = v_0.Args[1]
29015 x := v_0.Args[0]
29016 v_0_1 := v_0.Args[1]
29017 if v_0_1.Op != OpAMD64ANDQconst {
29018 break
29019 }
29020 if v_0_1.AuxInt != 7 {
29021 break
29022 }
29023 y := v_0_1.Args[0]
29024 v_1 := v.Args[1]
29025 if v_1.Op != OpAMD64SHLL {
29026 break
29027 }
29028 _ = v_1.Args[1]
29029 if x != v_1.Args[0] {
29030 break
29031 }
29032 v_1_1 := v_1.Args[1]
29033 if v_1_1.Op != OpAMD64NEGQ {
29034 break
29035 }
29036 v_1_1_0 := v_1_1.Args[0]
29037 if v_1_1_0.Op != OpAMD64ADDQconst {
29038 break
29039 }
29040 if v_1_1_0.AuxInt != -8 {
29041 break
29042 }
29043 v_1_1_0_0 := v_1_1_0.Args[0]
29044 if v_1_1_0_0.Op != OpAMD64ANDQconst {
29045 break
29046 }
29047 if v_1_1_0_0.AuxInt != 7 {
29048 break
29049 }
29050 if y != v_1_1_0_0.Args[0] {
29051 break
29052 }
29053 if !(v.Type.Size() == 1) {
29054 break
29055 }
29056 v.reset(OpAMD64RORB)
29057 v.AddArg(x)
29058 v.AddArg(y)
29059 return true
29060 }
29061
29062
29063
29064 for {
29065 _ = v.Args[1]
29066 v_0 := v.Args[0]
29067 if v_0.Op != OpAMD64SHLL {
29068 break
29069 }
29070 _ = v_0.Args[1]
29071 x := v_0.Args[0]
29072 v_0_1 := v_0.Args[1]
29073 if v_0_1.Op != OpAMD64NEGQ {
29074 break
29075 }
29076 v_0_1_0 := v_0_1.Args[0]
29077 if v_0_1_0.Op != OpAMD64ADDQconst {
29078 break
29079 }
29080 if v_0_1_0.AuxInt != -8 {
29081 break
29082 }
29083 v_0_1_0_0 := v_0_1_0.Args[0]
29084 if v_0_1_0_0.Op != OpAMD64ANDQconst {
29085 break
29086 }
29087 if v_0_1_0_0.AuxInt != 7 {
29088 break
29089 }
29090 y := v_0_1_0_0.Args[0]
29091 v_1 := v.Args[1]
29092 if v_1.Op != OpAMD64SHRB {
29093 break
29094 }
29095 _ = v_1.Args[1]
29096 if x != v_1.Args[0] {
29097 break
29098 }
29099 v_1_1 := v_1.Args[1]
29100 if v_1_1.Op != OpAMD64ANDQconst {
29101 break
29102 }
29103 if v_1_1.AuxInt != 7 {
29104 break
29105 }
29106 if y != v_1_1.Args[0] {
29107 break
29108 }
29109 if !(v.Type.Size() == 1) {
29110 break
29111 }
29112 v.reset(OpAMD64RORB)
29113 v.AddArg(x)
29114 v.AddArg(y)
29115 return true
29116 }
29117 return false
29118 }
29119 func rewriteValueAMD64_OpAMD64ORL_50(v *Value) bool {
29120 b := v.Block
29121 typ := &b.Func.Config.Types
29122
29123
29124
29125 for {
29126 _ = v.Args[1]
29127 v_0 := v.Args[0]
29128 if v_0.Op != OpAMD64SHRB {
29129 break
29130 }
29131 _ = v_0.Args[1]
29132 x := v_0.Args[0]
29133 v_0_1 := v_0.Args[1]
29134 if v_0_1.Op != OpAMD64ANDLconst {
29135 break
29136 }
29137 if v_0_1.AuxInt != 7 {
29138 break
29139 }
29140 y := v_0_1.Args[0]
29141 v_1 := v.Args[1]
29142 if v_1.Op != OpAMD64SHLL {
29143 break
29144 }
29145 _ = v_1.Args[1]
29146 if x != v_1.Args[0] {
29147 break
29148 }
29149 v_1_1 := v_1.Args[1]
29150 if v_1_1.Op != OpAMD64NEGL {
29151 break
29152 }
29153 v_1_1_0 := v_1_1.Args[0]
29154 if v_1_1_0.Op != OpAMD64ADDLconst {
29155 break
29156 }
29157 if v_1_1_0.AuxInt != -8 {
29158 break
29159 }
29160 v_1_1_0_0 := v_1_1_0.Args[0]
29161 if v_1_1_0_0.Op != OpAMD64ANDLconst {
29162 break
29163 }
29164 if v_1_1_0_0.AuxInt != 7 {
29165 break
29166 }
29167 if y != v_1_1_0_0.Args[0] {
29168 break
29169 }
29170 if !(v.Type.Size() == 1) {
29171 break
29172 }
29173 v.reset(OpAMD64RORB)
29174 v.AddArg(x)
29175 v.AddArg(y)
29176 return true
29177 }
29178
29179
29180
29181 for {
29182 _ = v.Args[1]
29183 v_0 := v.Args[0]
29184 if v_0.Op != OpAMD64SHLL {
29185 break
29186 }
29187 _ = v_0.Args[1]
29188 x := v_0.Args[0]
29189 v_0_1 := v_0.Args[1]
29190 if v_0_1.Op != OpAMD64NEGL {
29191 break
29192 }
29193 v_0_1_0 := v_0_1.Args[0]
29194 if v_0_1_0.Op != OpAMD64ADDLconst {
29195 break
29196 }
29197 if v_0_1_0.AuxInt != -8 {
29198 break
29199 }
29200 v_0_1_0_0 := v_0_1_0.Args[0]
29201 if v_0_1_0_0.Op != OpAMD64ANDLconst {
29202 break
29203 }
29204 if v_0_1_0_0.AuxInt != 7 {
29205 break
29206 }
29207 y := v_0_1_0_0.Args[0]
29208 v_1 := v.Args[1]
29209 if v_1.Op != OpAMD64SHRB {
29210 break
29211 }
29212 _ = v_1.Args[1]
29213 if x != v_1.Args[0] {
29214 break
29215 }
29216 v_1_1 := v_1.Args[1]
29217 if v_1_1.Op != OpAMD64ANDLconst {
29218 break
29219 }
29220 if v_1_1.AuxInt != 7 {
29221 break
29222 }
29223 if y != v_1_1.Args[0] {
29224 break
29225 }
29226 if !(v.Type.Size() == 1) {
29227 break
29228 }
29229 v.reset(OpAMD64RORB)
29230 v.AddArg(x)
29231 v.AddArg(y)
29232 return true
29233 }
29234
29235
29236
29237 for {
29238 x := v.Args[1]
29239 if x != v.Args[0] {
29240 break
29241 }
29242 v.reset(OpCopy)
29243 v.Type = x.Type
29244 v.AddArg(x)
29245 return true
29246 }
29247
29248
29249
29250 for {
29251 _ = v.Args[1]
29252 x0 := v.Args[0]
29253 if x0.Op != OpAMD64MOVBload {
29254 break
29255 }
29256 i0 := x0.AuxInt
29257 s := x0.Aux
29258 mem := x0.Args[1]
29259 p := x0.Args[0]
29260 sh := v.Args[1]
29261 if sh.Op != OpAMD64SHLLconst {
29262 break
29263 }
29264 if sh.AuxInt != 8 {
29265 break
29266 }
29267 x1 := sh.Args[0]
29268 if x1.Op != OpAMD64MOVBload {
29269 break
29270 }
29271 i1 := x1.AuxInt
29272 if x1.Aux != s {
29273 break
29274 }
29275 _ = x1.Args[1]
29276 if p != x1.Args[0] {
29277 break
29278 }
29279 if mem != x1.Args[1] {
29280 break
29281 }
29282 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
29283 break
29284 }
29285 b = mergePoint(b, x0, x1)
29286 v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
29287 v.reset(OpCopy)
29288 v.AddArg(v0)
29289 v0.AuxInt = i0
29290 v0.Aux = s
29291 v0.AddArg(p)
29292 v0.AddArg(mem)
29293 return true
29294 }
29295
29296
29297
29298 for {
29299 _ = v.Args[1]
29300 sh := v.Args[0]
29301 if sh.Op != OpAMD64SHLLconst {
29302 break
29303 }
29304 if sh.AuxInt != 8 {
29305 break
29306 }
29307 x1 := sh.Args[0]
29308 if x1.Op != OpAMD64MOVBload {
29309 break
29310 }
29311 i1 := x1.AuxInt
29312 s := x1.Aux
29313 mem := x1.Args[1]
29314 p := x1.Args[0]
29315 x0 := v.Args[1]
29316 if x0.Op != OpAMD64MOVBload {
29317 break
29318 }
29319 i0 := x0.AuxInt
29320 if x0.Aux != s {
29321 break
29322 }
29323 _ = x0.Args[1]
29324 if p != x0.Args[0] {
29325 break
29326 }
29327 if mem != x0.Args[1] {
29328 break
29329 }
29330 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
29331 break
29332 }
29333 b = mergePoint(b, x0, x1)
29334 v0 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
29335 v.reset(OpCopy)
29336 v.AddArg(v0)
29337 v0.AuxInt = i0
29338 v0.Aux = s
29339 v0.AddArg(p)
29340 v0.AddArg(mem)
29341 return true
29342 }
29343
29344
29345
29346 for {
29347 _ = v.Args[1]
29348 x0 := v.Args[0]
29349 if x0.Op != OpAMD64MOVWload {
29350 break
29351 }
29352 i0 := x0.AuxInt
29353 s := x0.Aux
29354 mem := x0.Args[1]
29355 p := x0.Args[0]
29356 sh := v.Args[1]
29357 if sh.Op != OpAMD64SHLLconst {
29358 break
29359 }
29360 if sh.AuxInt != 16 {
29361 break
29362 }
29363 x1 := sh.Args[0]
29364 if x1.Op != OpAMD64MOVWload {
29365 break
29366 }
29367 i1 := x1.AuxInt
29368 if x1.Aux != s {
29369 break
29370 }
29371 _ = x1.Args[1]
29372 if p != x1.Args[0] {
29373 break
29374 }
29375 if mem != x1.Args[1] {
29376 break
29377 }
29378 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
29379 break
29380 }
29381 b = mergePoint(b, x0, x1)
29382 v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
29383 v.reset(OpCopy)
29384 v.AddArg(v0)
29385 v0.AuxInt = i0
29386 v0.Aux = s
29387 v0.AddArg(p)
29388 v0.AddArg(mem)
29389 return true
29390 }
29391
29392
29393
29394 for {
29395 _ = v.Args[1]
29396 sh := v.Args[0]
29397 if sh.Op != OpAMD64SHLLconst {
29398 break
29399 }
29400 if sh.AuxInt != 16 {
29401 break
29402 }
29403 x1 := sh.Args[0]
29404 if x1.Op != OpAMD64MOVWload {
29405 break
29406 }
29407 i1 := x1.AuxInt
29408 s := x1.Aux
29409 mem := x1.Args[1]
29410 p := x1.Args[0]
29411 x0 := v.Args[1]
29412 if x0.Op != OpAMD64MOVWload {
29413 break
29414 }
29415 i0 := x0.AuxInt
29416 if x0.Aux != s {
29417 break
29418 }
29419 _ = x0.Args[1]
29420 if p != x0.Args[0] {
29421 break
29422 }
29423 if mem != x0.Args[1] {
29424 break
29425 }
29426 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
29427 break
29428 }
29429 b = mergePoint(b, x0, x1)
29430 v0 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
29431 v.reset(OpCopy)
29432 v.AddArg(v0)
29433 v0.AuxInt = i0
29434 v0.Aux = s
29435 v0.AddArg(p)
29436 v0.AddArg(mem)
29437 return true
29438 }
29439
29440
29441
29442 for {
29443 _ = v.Args[1]
29444 s1 := v.Args[0]
29445 if s1.Op != OpAMD64SHLLconst {
29446 break
29447 }
29448 j1 := s1.AuxInt
29449 x1 := s1.Args[0]
29450 if x1.Op != OpAMD64MOVBload {
29451 break
29452 }
29453 i1 := x1.AuxInt
29454 s := x1.Aux
29455 mem := x1.Args[1]
29456 p := x1.Args[0]
29457 or := v.Args[1]
29458 if or.Op != OpAMD64ORL {
29459 break
29460 }
29461 y := or.Args[1]
29462 s0 := or.Args[0]
29463 if s0.Op != OpAMD64SHLLconst {
29464 break
29465 }
29466 j0 := s0.AuxInt
29467 x0 := s0.Args[0]
29468 if x0.Op != OpAMD64MOVBload {
29469 break
29470 }
29471 i0 := x0.AuxInt
29472 if x0.Aux != s {
29473 break
29474 }
29475 _ = x0.Args[1]
29476 if p != x0.Args[0] {
29477 break
29478 }
29479 if mem != x0.Args[1] {
29480 break
29481 }
29482 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
29483 break
29484 }
29485 b = mergePoint(b, x0, x1, y)
29486 v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type)
29487 v.reset(OpCopy)
29488 v.AddArg(v0)
29489 v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type)
29490 v1.AuxInt = j0
29491 v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
29492 v2.AuxInt = i0
29493 v2.Aux = s
29494 v2.AddArg(p)
29495 v2.AddArg(mem)
29496 v1.AddArg(v2)
29497 v0.AddArg(v1)
29498 v0.AddArg(y)
29499 return true
29500 }
29501
29502
29503
29504 for {
29505 _ = v.Args[1]
29506 s1 := v.Args[0]
29507 if s1.Op != OpAMD64SHLLconst {
29508 break
29509 }
29510 j1 := s1.AuxInt
29511 x1 := s1.Args[0]
29512 if x1.Op != OpAMD64MOVBload {
29513 break
29514 }
29515 i1 := x1.AuxInt
29516 s := x1.Aux
29517 mem := x1.Args[1]
29518 p := x1.Args[0]
29519 or := v.Args[1]
29520 if or.Op != OpAMD64ORL {
29521 break
29522 }
29523 _ = or.Args[1]
29524 y := or.Args[0]
29525 s0 := or.Args[1]
29526 if s0.Op != OpAMD64SHLLconst {
29527 break
29528 }
29529 j0 := s0.AuxInt
29530 x0 := s0.Args[0]
29531 if x0.Op != OpAMD64MOVBload {
29532 break
29533 }
29534 i0 := x0.AuxInt
29535 if x0.Aux != s {
29536 break
29537 }
29538 _ = x0.Args[1]
29539 if p != x0.Args[0] {
29540 break
29541 }
29542 if mem != x0.Args[1] {
29543 break
29544 }
29545 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
29546 break
29547 }
29548 b = mergePoint(b, x0, x1, y)
29549 v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type)
29550 v.reset(OpCopy)
29551 v.AddArg(v0)
29552 v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type)
29553 v1.AuxInt = j0
29554 v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
29555 v2.AuxInt = i0
29556 v2.Aux = s
29557 v2.AddArg(p)
29558 v2.AddArg(mem)
29559 v1.AddArg(v2)
29560 v0.AddArg(v1)
29561 v0.AddArg(y)
29562 return true
29563 }
29564
29565
29566
29567 for {
29568 _ = v.Args[1]
29569 or := v.Args[0]
29570 if or.Op != OpAMD64ORL {
29571 break
29572 }
29573 y := or.Args[1]
29574 s0 := or.Args[0]
29575 if s0.Op != OpAMD64SHLLconst {
29576 break
29577 }
29578 j0 := s0.AuxInt
29579 x0 := s0.Args[0]
29580 if x0.Op != OpAMD64MOVBload {
29581 break
29582 }
29583 i0 := x0.AuxInt
29584 s := x0.Aux
29585 mem := x0.Args[1]
29586 p := x0.Args[0]
29587 s1 := v.Args[1]
29588 if s1.Op != OpAMD64SHLLconst {
29589 break
29590 }
29591 j1 := s1.AuxInt
29592 x1 := s1.Args[0]
29593 if x1.Op != OpAMD64MOVBload {
29594 break
29595 }
29596 i1 := x1.AuxInt
29597 if x1.Aux != s {
29598 break
29599 }
29600 _ = x1.Args[1]
29601 if p != x1.Args[0] {
29602 break
29603 }
29604 if mem != x1.Args[1] {
29605 break
29606 }
29607 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
29608 break
29609 }
29610 b = mergePoint(b, x0, x1, y)
29611 v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type)
29612 v.reset(OpCopy)
29613 v.AddArg(v0)
29614 v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type)
29615 v1.AuxInt = j0
29616 v2 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
29617 v2.AuxInt = i0
29618 v2.Aux = s
29619 v2.AddArg(p)
29620 v2.AddArg(mem)
29621 v1.AddArg(v2)
29622 v0.AddArg(v1)
29623 v0.AddArg(y)
29624 return true
29625 }
29626 return false
29627 }
29628 func rewriteValueAMD64_OpAMD64ORL_60(v *Value) bool {
29629 b := v.Block
29630 typ := &b.Func.Config.Types
29631
29632
29633
29634 for {
29635 _ = v.Args[1]
29636 or := v.Args[0]
29637 if or.Op != OpAMD64ORL {
29638 break
29639 }
29640 _ = or.Args[1]
29641 y := or.Args[0]
29642 s0 := or.Args[1]
29643 if s0.Op != OpAMD64SHLLconst {
29644 break
29645 }
29646 j0 := s0.AuxInt
29647 x0 := s0.Args[0]
29648 if x0.Op != OpAMD64MOVBload {
29649 break
29650 }
29651 i0 := x0.AuxInt
29652 s := x0.Aux
29653 mem := x0.Args[1]
29654 p := x0.Args[0]
29655 s1 := v.Args[1]
29656 if s1.Op != OpAMD64SHLLconst {
29657 break
29658 }
29659 j1 := s1.AuxInt
29660 x1 := s1.Args[0]
29661 if x1.Op != OpAMD64MOVBload {
29662 break
29663 }
29664 i1 := x1.AuxInt
29665 if x1.Aux != s {
29666 break
29667 }
29668 _ = x1.Args[1]
29669 if p != x1.Args[0] {
29670 break
29671 }
29672 if mem != x1.Args[1] {
29673 break
29674 }
29675 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
29676 break
29677 }
29678 b = mergePoint(b, x0, x1, y)
29679 v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type)
29680 v.reset(OpCopy)
29681 v.AddArg(v0)
29682 v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type)
29683 v1.AuxInt = j0
29684 v2 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
29685 v2.AuxInt = i0
29686 v2.Aux = s
29687 v2.AddArg(p)
29688 v2.AddArg(mem)
29689 v1.AddArg(v2)
29690 v0.AddArg(v1)
29691 v0.AddArg(y)
29692 return true
29693 }
29694
29695
29696
29697 for {
29698 _ = v.Args[1]
29699 x0 := v.Args[0]
29700 if x0.Op != OpAMD64MOVBloadidx1 {
29701 break
29702 }
29703 i0 := x0.AuxInt
29704 s := x0.Aux
29705 mem := x0.Args[2]
29706 p := x0.Args[0]
29707 idx := x0.Args[1]
29708 sh := v.Args[1]
29709 if sh.Op != OpAMD64SHLLconst {
29710 break
29711 }
29712 if sh.AuxInt != 8 {
29713 break
29714 }
29715 x1 := sh.Args[0]
29716 if x1.Op != OpAMD64MOVBloadidx1 {
29717 break
29718 }
29719 i1 := x1.AuxInt
29720 if x1.Aux != s {
29721 break
29722 }
29723 _ = x1.Args[2]
29724 if p != x1.Args[0] {
29725 break
29726 }
29727 if idx != x1.Args[1] {
29728 break
29729 }
29730 if mem != x1.Args[2] {
29731 break
29732 }
29733 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
29734 break
29735 }
29736 b = mergePoint(b, x0, x1)
29737 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
29738 v.reset(OpCopy)
29739 v.AddArg(v0)
29740 v0.AuxInt = i0
29741 v0.Aux = s
29742 v0.AddArg(p)
29743 v0.AddArg(idx)
29744 v0.AddArg(mem)
29745 return true
29746 }
29747
29748
29749
29750 for {
29751 _ = v.Args[1]
29752 x0 := v.Args[0]
29753 if x0.Op != OpAMD64MOVBloadidx1 {
29754 break
29755 }
29756 i0 := x0.AuxInt
29757 s := x0.Aux
29758 mem := x0.Args[2]
29759 idx := x0.Args[0]
29760 p := x0.Args[1]
29761 sh := v.Args[1]
29762 if sh.Op != OpAMD64SHLLconst {
29763 break
29764 }
29765 if sh.AuxInt != 8 {
29766 break
29767 }
29768 x1 := sh.Args[0]
29769 if x1.Op != OpAMD64MOVBloadidx1 {
29770 break
29771 }
29772 i1 := x1.AuxInt
29773 if x1.Aux != s {
29774 break
29775 }
29776 _ = x1.Args[2]
29777 if p != x1.Args[0] {
29778 break
29779 }
29780 if idx != x1.Args[1] {
29781 break
29782 }
29783 if mem != x1.Args[2] {
29784 break
29785 }
29786 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
29787 break
29788 }
29789 b = mergePoint(b, x0, x1)
29790 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
29791 v.reset(OpCopy)
29792 v.AddArg(v0)
29793 v0.AuxInt = i0
29794 v0.Aux = s
29795 v0.AddArg(p)
29796 v0.AddArg(idx)
29797 v0.AddArg(mem)
29798 return true
29799 }
29800
29801
29802
29803 for {
29804 _ = v.Args[1]
29805 x0 := v.Args[0]
29806 if x0.Op != OpAMD64MOVBloadidx1 {
29807 break
29808 }
29809 i0 := x0.AuxInt
29810 s := x0.Aux
29811 mem := x0.Args[2]
29812 p := x0.Args[0]
29813 idx := x0.Args[1]
29814 sh := v.Args[1]
29815 if sh.Op != OpAMD64SHLLconst {
29816 break
29817 }
29818 if sh.AuxInt != 8 {
29819 break
29820 }
29821 x1 := sh.Args[0]
29822 if x1.Op != OpAMD64MOVBloadidx1 {
29823 break
29824 }
29825 i1 := x1.AuxInt
29826 if x1.Aux != s {
29827 break
29828 }
29829 _ = x1.Args[2]
29830 if idx != x1.Args[0] {
29831 break
29832 }
29833 if p != x1.Args[1] {
29834 break
29835 }
29836 if mem != x1.Args[2] {
29837 break
29838 }
29839 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
29840 break
29841 }
29842 b = mergePoint(b, x0, x1)
29843 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
29844 v.reset(OpCopy)
29845 v.AddArg(v0)
29846 v0.AuxInt = i0
29847 v0.Aux = s
29848 v0.AddArg(p)
29849 v0.AddArg(idx)
29850 v0.AddArg(mem)
29851 return true
29852 }
29853
29854
29855
29856 for {
29857 _ = v.Args[1]
29858 x0 := v.Args[0]
29859 if x0.Op != OpAMD64MOVBloadidx1 {
29860 break
29861 }
29862 i0 := x0.AuxInt
29863 s := x0.Aux
29864 mem := x0.Args[2]
29865 idx := x0.Args[0]
29866 p := x0.Args[1]
29867 sh := v.Args[1]
29868 if sh.Op != OpAMD64SHLLconst {
29869 break
29870 }
29871 if sh.AuxInt != 8 {
29872 break
29873 }
29874 x1 := sh.Args[0]
29875 if x1.Op != OpAMD64MOVBloadidx1 {
29876 break
29877 }
29878 i1 := x1.AuxInt
29879 if x1.Aux != s {
29880 break
29881 }
29882 _ = x1.Args[2]
29883 if idx != x1.Args[0] {
29884 break
29885 }
29886 if p != x1.Args[1] {
29887 break
29888 }
29889 if mem != x1.Args[2] {
29890 break
29891 }
29892 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
29893 break
29894 }
29895 b = mergePoint(b, x0, x1)
29896 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
29897 v.reset(OpCopy)
29898 v.AddArg(v0)
29899 v0.AuxInt = i0
29900 v0.Aux = s
29901 v0.AddArg(p)
29902 v0.AddArg(idx)
29903 v0.AddArg(mem)
29904 return true
29905 }
29906
29907
29908
29909 for {
29910 _ = v.Args[1]
29911 sh := v.Args[0]
29912 if sh.Op != OpAMD64SHLLconst {
29913 break
29914 }
29915 if sh.AuxInt != 8 {
29916 break
29917 }
29918 x1 := sh.Args[0]
29919 if x1.Op != OpAMD64MOVBloadidx1 {
29920 break
29921 }
29922 i1 := x1.AuxInt
29923 s := x1.Aux
29924 mem := x1.Args[2]
29925 p := x1.Args[0]
29926 idx := x1.Args[1]
29927 x0 := v.Args[1]
29928 if x0.Op != OpAMD64MOVBloadidx1 {
29929 break
29930 }
29931 i0 := x0.AuxInt
29932 if x0.Aux != s {
29933 break
29934 }
29935 _ = x0.Args[2]
29936 if p != x0.Args[0] {
29937 break
29938 }
29939 if idx != x0.Args[1] {
29940 break
29941 }
29942 if mem != x0.Args[2] {
29943 break
29944 }
29945 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
29946 break
29947 }
29948 b = mergePoint(b, x0, x1)
29949 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
29950 v.reset(OpCopy)
29951 v.AddArg(v0)
29952 v0.AuxInt = i0
29953 v0.Aux = s
29954 v0.AddArg(p)
29955 v0.AddArg(idx)
29956 v0.AddArg(mem)
29957 return true
29958 }
29959
29960
29961
29962 for {
29963 _ = v.Args[1]
29964 sh := v.Args[0]
29965 if sh.Op != OpAMD64SHLLconst {
29966 break
29967 }
29968 if sh.AuxInt != 8 {
29969 break
29970 }
29971 x1 := sh.Args[0]
29972 if x1.Op != OpAMD64MOVBloadidx1 {
29973 break
29974 }
29975 i1 := x1.AuxInt
29976 s := x1.Aux
29977 mem := x1.Args[2]
29978 idx := x1.Args[0]
29979 p := x1.Args[1]
29980 x0 := v.Args[1]
29981 if x0.Op != OpAMD64MOVBloadidx1 {
29982 break
29983 }
29984 i0 := x0.AuxInt
29985 if x0.Aux != s {
29986 break
29987 }
29988 _ = x0.Args[2]
29989 if p != x0.Args[0] {
29990 break
29991 }
29992 if idx != x0.Args[1] {
29993 break
29994 }
29995 if mem != x0.Args[2] {
29996 break
29997 }
29998 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
29999 break
30000 }
30001 b = mergePoint(b, x0, x1)
30002 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
30003 v.reset(OpCopy)
30004 v.AddArg(v0)
30005 v0.AuxInt = i0
30006 v0.Aux = s
30007 v0.AddArg(p)
30008 v0.AddArg(idx)
30009 v0.AddArg(mem)
30010 return true
30011 }
30012
30013
30014
30015 for {
30016 _ = v.Args[1]
30017 sh := v.Args[0]
30018 if sh.Op != OpAMD64SHLLconst {
30019 break
30020 }
30021 if sh.AuxInt != 8 {
30022 break
30023 }
30024 x1 := sh.Args[0]
30025 if x1.Op != OpAMD64MOVBloadidx1 {
30026 break
30027 }
30028 i1 := x1.AuxInt
30029 s := x1.Aux
30030 mem := x1.Args[2]
30031 p := x1.Args[0]
30032 idx := x1.Args[1]
30033 x0 := v.Args[1]
30034 if x0.Op != OpAMD64MOVBloadidx1 {
30035 break
30036 }
30037 i0 := x0.AuxInt
30038 if x0.Aux != s {
30039 break
30040 }
30041 _ = x0.Args[2]
30042 if idx != x0.Args[0] {
30043 break
30044 }
30045 if p != x0.Args[1] {
30046 break
30047 }
30048 if mem != x0.Args[2] {
30049 break
30050 }
30051 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
30052 break
30053 }
30054 b = mergePoint(b, x0, x1)
30055 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
30056 v.reset(OpCopy)
30057 v.AddArg(v0)
30058 v0.AuxInt = i0
30059 v0.Aux = s
30060 v0.AddArg(p)
30061 v0.AddArg(idx)
30062 v0.AddArg(mem)
30063 return true
30064 }
30065
30066
30067
30068 for {
30069 _ = v.Args[1]
30070 sh := v.Args[0]
30071 if sh.Op != OpAMD64SHLLconst {
30072 break
30073 }
30074 if sh.AuxInt != 8 {
30075 break
30076 }
30077 x1 := sh.Args[0]
30078 if x1.Op != OpAMD64MOVBloadidx1 {
30079 break
30080 }
30081 i1 := x1.AuxInt
30082 s := x1.Aux
30083 mem := x1.Args[2]
30084 idx := x1.Args[0]
30085 p := x1.Args[1]
30086 x0 := v.Args[1]
30087 if x0.Op != OpAMD64MOVBloadidx1 {
30088 break
30089 }
30090 i0 := x0.AuxInt
30091 if x0.Aux != s {
30092 break
30093 }
30094 _ = x0.Args[2]
30095 if idx != x0.Args[0] {
30096 break
30097 }
30098 if p != x0.Args[1] {
30099 break
30100 }
30101 if mem != x0.Args[2] {
30102 break
30103 }
30104 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
30105 break
30106 }
30107 b = mergePoint(b, x0, x1)
30108 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
30109 v.reset(OpCopy)
30110 v.AddArg(v0)
30111 v0.AuxInt = i0
30112 v0.Aux = s
30113 v0.AddArg(p)
30114 v0.AddArg(idx)
30115 v0.AddArg(mem)
30116 return true
30117 }
30118
30119
30120
30121 for {
30122 _ = v.Args[1]
30123 x0 := v.Args[0]
30124 if x0.Op != OpAMD64MOVWloadidx1 {
30125 break
30126 }
30127 i0 := x0.AuxInt
30128 s := x0.Aux
30129 mem := x0.Args[2]
30130 p := x0.Args[0]
30131 idx := x0.Args[1]
30132 sh := v.Args[1]
30133 if sh.Op != OpAMD64SHLLconst {
30134 break
30135 }
30136 if sh.AuxInt != 16 {
30137 break
30138 }
30139 x1 := sh.Args[0]
30140 if x1.Op != OpAMD64MOVWloadidx1 {
30141 break
30142 }
30143 i1 := x1.AuxInt
30144 if x1.Aux != s {
30145 break
30146 }
30147 _ = x1.Args[2]
30148 if p != x1.Args[0] {
30149 break
30150 }
30151 if idx != x1.Args[1] {
30152 break
30153 }
30154 if mem != x1.Args[2] {
30155 break
30156 }
30157 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
30158 break
30159 }
30160 b = mergePoint(b, x0, x1)
30161 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
30162 v.reset(OpCopy)
30163 v.AddArg(v0)
30164 v0.AuxInt = i0
30165 v0.Aux = s
30166 v0.AddArg(p)
30167 v0.AddArg(idx)
30168 v0.AddArg(mem)
30169 return true
30170 }
30171 return false
30172 }
30173 func rewriteValueAMD64_OpAMD64ORL_70(v *Value) bool {
30174 b := v.Block
30175 typ := &b.Func.Config.Types
30176
30177
30178
30179 for {
30180 _ = v.Args[1]
30181 x0 := v.Args[0]
30182 if x0.Op != OpAMD64MOVWloadidx1 {
30183 break
30184 }
30185 i0 := x0.AuxInt
30186 s := x0.Aux
30187 mem := x0.Args[2]
30188 idx := x0.Args[0]
30189 p := x0.Args[1]
30190 sh := v.Args[1]
30191 if sh.Op != OpAMD64SHLLconst {
30192 break
30193 }
30194 if sh.AuxInt != 16 {
30195 break
30196 }
30197 x1 := sh.Args[0]
30198 if x1.Op != OpAMD64MOVWloadidx1 {
30199 break
30200 }
30201 i1 := x1.AuxInt
30202 if x1.Aux != s {
30203 break
30204 }
30205 _ = x1.Args[2]
30206 if p != x1.Args[0] {
30207 break
30208 }
30209 if idx != x1.Args[1] {
30210 break
30211 }
30212 if mem != x1.Args[2] {
30213 break
30214 }
30215 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
30216 break
30217 }
30218 b = mergePoint(b, x0, x1)
30219 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
30220 v.reset(OpCopy)
30221 v.AddArg(v0)
30222 v0.AuxInt = i0
30223 v0.Aux = s
30224 v0.AddArg(p)
30225 v0.AddArg(idx)
30226 v0.AddArg(mem)
30227 return true
30228 }
30229
30230
30231
30232 for {
30233 _ = v.Args[1]
30234 x0 := v.Args[0]
30235 if x0.Op != OpAMD64MOVWloadidx1 {
30236 break
30237 }
30238 i0 := x0.AuxInt
30239 s := x0.Aux
30240 mem := x0.Args[2]
30241 p := x0.Args[0]
30242 idx := x0.Args[1]
30243 sh := v.Args[1]
30244 if sh.Op != OpAMD64SHLLconst {
30245 break
30246 }
30247 if sh.AuxInt != 16 {
30248 break
30249 }
30250 x1 := sh.Args[0]
30251 if x1.Op != OpAMD64MOVWloadidx1 {
30252 break
30253 }
30254 i1 := x1.AuxInt
30255 if x1.Aux != s {
30256 break
30257 }
30258 _ = x1.Args[2]
30259 if idx != x1.Args[0] {
30260 break
30261 }
30262 if p != x1.Args[1] {
30263 break
30264 }
30265 if mem != x1.Args[2] {
30266 break
30267 }
30268 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
30269 break
30270 }
30271 b = mergePoint(b, x0, x1)
30272 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
30273 v.reset(OpCopy)
30274 v.AddArg(v0)
30275 v0.AuxInt = i0
30276 v0.Aux = s
30277 v0.AddArg(p)
30278 v0.AddArg(idx)
30279 v0.AddArg(mem)
30280 return true
30281 }
30282
30283
30284
30285 for {
30286 _ = v.Args[1]
30287 x0 := v.Args[0]
30288 if x0.Op != OpAMD64MOVWloadidx1 {
30289 break
30290 }
30291 i0 := x0.AuxInt
30292 s := x0.Aux
30293 mem := x0.Args[2]
30294 idx := x0.Args[0]
30295 p := x0.Args[1]
30296 sh := v.Args[1]
30297 if sh.Op != OpAMD64SHLLconst {
30298 break
30299 }
30300 if sh.AuxInt != 16 {
30301 break
30302 }
30303 x1 := sh.Args[0]
30304 if x1.Op != OpAMD64MOVWloadidx1 {
30305 break
30306 }
30307 i1 := x1.AuxInt
30308 if x1.Aux != s {
30309 break
30310 }
30311 _ = x1.Args[2]
30312 if idx != x1.Args[0] {
30313 break
30314 }
30315 if p != x1.Args[1] {
30316 break
30317 }
30318 if mem != x1.Args[2] {
30319 break
30320 }
30321 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
30322 break
30323 }
30324 b = mergePoint(b, x0, x1)
30325 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
30326 v.reset(OpCopy)
30327 v.AddArg(v0)
30328 v0.AuxInt = i0
30329 v0.Aux = s
30330 v0.AddArg(p)
30331 v0.AddArg(idx)
30332 v0.AddArg(mem)
30333 return true
30334 }
30335
30336
30337
30338 for {
30339 _ = v.Args[1]
30340 sh := v.Args[0]
30341 if sh.Op != OpAMD64SHLLconst {
30342 break
30343 }
30344 if sh.AuxInt != 16 {
30345 break
30346 }
30347 x1 := sh.Args[0]
30348 if x1.Op != OpAMD64MOVWloadidx1 {
30349 break
30350 }
30351 i1 := x1.AuxInt
30352 s := x1.Aux
30353 mem := x1.Args[2]
30354 p := x1.Args[0]
30355 idx := x1.Args[1]
30356 x0 := v.Args[1]
30357 if x0.Op != OpAMD64MOVWloadidx1 {
30358 break
30359 }
30360 i0 := x0.AuxInt
30361 if x0.Aux != s {
30362 break
30363 }
30364 _ = x0.Args[2]
30365 if p != x0.Args[0] {
30366 break
30367 }
30368 if idx != x0.Args[1] {
30369 break
30370 }
30371 if mem != x0.Args[2] {
30372 break
30373 }
30374 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
30375 break
30376 }
30377 b = mergePoint(b, x0, x1)
30378 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
30379 v.reset(OpCopy)
30380 v.AddArg(v0)
30381 v0.AuxInt = i0
30382 v0.Aux = s
30383 v0.AddArg(p)
30384 v0.AddArg(idx)
30385 v0.AddArg(mem)
30386 return true
30387 }
30388
30389
30390
30391 for {
30392 _ = v.Args[1]
30393 sh := v.Args[0]
30394 if sh.Op != OpAMD64SHLLconst {
30395 break
30396 }
30397 if sh.AuxInt != 16 {
30398 break
30399 }
30400 x1 := sh.Args[0]
30401 if x1.Op != OpAMD64MOVWloadidx1 {
30402 break
30403 }
30404 i1 := x1.AuxInt
30405 s := x1.Aux
30406 mem := x1.Args[2]
30407 idx := x1.Args[0]
30408 p := x1.Args[1]
30409 x0 := v.Args[1]
30410 if x0.Op != OpAMD64MOVWloadidx1 {
30411 break
30412 }
30413 i0 := x0.AuxInt
30414 if x0.Aux != s {
30415 break
30416 }
30417 _ = x0.Args[2]
30418 if p != x0.Args[0] {
30419 break
30420 }
30421 if idx != x0.Args[1] {
30422 break
30423 }
30424 if mem != x0.Args[2] {
30425 break
30426 }
30427 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
30428 break
30429 }
30430 b = mergePoint(b, x0, x1)
30431 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
30432 v.reset(OpCopy)
30433 v.AddArg(v0)
30434 v0.AuxInt = i0
30435 v0.Aux = s
30436 v0.AddArg(p)
30437 v0.AddArg(idx)
30438 v0.AddArg(mem)
30439 return true
30440 }
30441
30442
30443
30444 for {
30445 _ = v.Args[1]
30446 sh := v.Args[0]
30447 if sh.Op != OpAMD64SHLLconst {
30448 break
30449 }
30450 if sh.AuxInt != 16 {
30451 break
30452 }
30453 x1 := sh.Args[0]
30454 if x1.Op != OpAMD64MOVWloadidx1 {
30455 break
30456 }
30457 i1 := x1.AuxInt
30458 s := x1.Aux
30459 mem := x1.Args[2]
30460 p := x1.Args[0]
30461 idx := x1.Args[1]
30462 x0 := v.Args[1]
30463 if x0.Op != OpAMD64MOVWloadidx1 {
30464 break
30465 }
30466 i0 := x0.AuxInt
30467 if x0.Aux != s {
30468 break
30469 }
30470 _ = x0.Args[2]
30471 if idx != x0.Args[0] {
30472 break
30473 }
30474 if p != x0.Args[1] {
30475 break
30476 }
30477 if mem != x0.Args[2] {
30478 break
30479 }
30480 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
30481 break
30482 }
30483 b = mergePoint(b, x0, x1)
30484 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
30485 v.reset(OpCopy)
30486 v.AddArg(v0)
30487 v0.AuxInt = i0
30488 v0.Aux = s
30489 v0.AddArg(p)
30490 v0.AddArg(idx)
30491 v0.AddArg(mem)
30492 return true
30493 }
30494
30495
30496
30497 for {
30498 _ = v.Args[1]
30499 sh := v.Args[0]
30500 if sh.Op != OpAMD64SHLLconst {
30501 break
30502 }
30503 if sh.AuxInt != 16 {
30504 break
30505 }
30506 x1 := sh.Args[0]
30507 if x1.Op != OpAMD64MOVWloadidx1 {
30508 break
30509 }
30510 i1 := x1.AuxInt
30511 s := x1.Aux
30512 mem := x1.Args[2]
30513 idx := x1.Args[0]
30514 p := x1.Args[1]
30515 x0 := v.Args[1]
30516 if x0.Op != OpAMD64MOVWloadidx1 {
30517 break
30518 }
30519 i0 := x0.AuxInt
30520 if x0.Aux != s {
30521 break
30522 }
30523 _ = x0.Args[2]
30524 if idx != x0.Args[0] {
30525 break
30526 }
30527 if p != x0.Args[1] {
30528 break
30529 }
30530 if mem != x0.Args[2] {
30531 break
30532 }
30533 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
30534 break
30535 }
30536 b = mergePoint(b, x0, x1)
30537 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
30538 v.reset(OpCopy)
30539 v.AddArg(v0)
30540 v0.AuxInt = i0
30541 v0.Aux = s
30542 v0.AddArg(p)
30543 v0.AddArg(idx)
30544 v0.AddArg(mem)
30545 return true
30546 }
30547
30548
30549
30550 for {
30551 _ = v.Args[1]
30552 s1 := v.Args[0]
30553 if s1.Op != OpAMD64SHLLconst {
30554 break
30555 }
30556 j1 := s1.AuxInt
30557 x1 := s1.Args[0]
30558 if x1.Op != OpAMD64MOVBloadidx1 {
30559 break
30560 }
30561 i1 := x1.AuxInt
30562 s := x1.Aux
30563 mem := x1.Args[2]
30564 p := x1.Args[0]
30565 idx := x1.Args[1]
30566 or := v.Args[1]
30567 if or.Op != OpAMD64ORL {
30568 break
30569 }
30570 y := or.Args[1]
30571 s0 := or.Args[0]
30572 if s0.Op != OpAMD64SHLLconst {
30573 break
30574 }
30575 j0 := s0.AuxInt
30576 x0 := s0.Args[0]
30577 if x0.Op != OpAMD64MOVBloadidx1 {
30578 break
30579 }
30580 i0 := x0.AuxInt
30581 if x0.Aux != s {
30582 break
30583 }
30584 _ = x0.Args[2]
30585 if p != x0.Args[0] {
30586 break
30587 }
30588 if idx != x0.Args[1] {
30589 break
30590 }
30591 if mem != x0.Args[2] {
30592 break
30593 }
30594 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
30595 break
30596 }
30597 b = mergePoint(b, x0, x1, y)
30598 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
30599 v.reset(OpCopy)
30600 v.AddArg(v0)
30601 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
30602 v1.AuxInt = j0
30603 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
30604 v2.AuxInt = i0
30605 v2.Aux = s
30606 v2.AddArg(p)
30607 v2.AddArg(idx)
30608 v2.AddArg(mem)
30609 v1.AddArg(v2)
30610 v0.AddArg(v1)
30611 v0.AddArg(y)
30612 return true
30613 }
30614
30615
30616
30617 for {
30618 _ = v.Args[1]
30619 s1 := v.Args[0]
30620 if s1.Op != OpAMD64SHLLconst {
30621 break
30622 }
30623 j1 := s1.AuxInt
30624 x1 := s1.Args[0]
30625 if x1.Op != OpAMD64MOVBloadidx1 {
30626 break
30627 }
30628 i1 := x1.AuxInt
30629 s := x1.Aux
30630 mem := x1.Args[2]
30631 idx := x1.Args[0]
30632 p := x1.Args[1]
30633 or := v.Args[1]
30634 if or.Op != OpAMD64ORL {
30635 break
30636 }
30637 y := or.Args[1]
30638 s0 := or.Args[0]
30639 if s0.Op != OpAMD64SHLLconst {
30640 break
30641 }
30642 j0 := s0.AuxInt
30643 x0 := s0.Args[0]
30644 if x0.Op != OpAMD64MOVBloadidx1 {
30645 break
30646 }
30647 i0 := x0.AuxInt
30648 if x0.Aux != s {
30649 break
30650 }
30651 _ = x0.Args[2]
30652 if p != x0.Args[0] {
30653 break
30654 }
30655 if idx != x0.Args[1] {
30656 break
30657 }
30658 if mem != x0.Args[2] {
30659 break
30660 }
30661 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
30662 break
30663 }
30664 b = mergePoint(b, x0, x1, y)
30665 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
30666 v.reset(OpCopy)
30667 v.AddArg(v0)
30668 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
30669 v1.AuxInt = j0
30670 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
30671 v2.AuxInt = i0
30672 v2.Aux = s
30673 v2.AddArg(p)
30674 v2.AddArg(idx)
30675 v2.AddArg(mem)
30676 v1.AddArg(v2)
30677 v0.AddArg(v1)
30678 v0.AddArg(y)
30679 return true
30680 }
30681
30682
30683
30684 for {
30685 _ = v.Args[1]
30686 s1 := v.Args[0]
30687 if s1.Op != OpAMD64SHLLconst {
30688 break
30689 }
30690 j1 := s1.AuxInt
30691 x1 := s1.Args[0]
30692 if x1.Op != OpAMD64MOVBloadidx1 {
30693 break
30694 }
30695 i1 := x1.AuxInt
30696 s := x1.Aux
30697 mem := x1.Args[2]
30698 p := x1.Args[0]
30699 idx := x1.Args[1]
30700 or := v.Args[1]
30701 if or.Op != OpAMD64ORL {
30702 break
30703 }
30704 y := or.Args[1]
30705 s0 := or.Args[0]
30706 if s0.Op != OpAMD64SHLLconst {
30707 break
30708 }
30709 j0 := s0.AuxInt
30710 x0 := s0.Args[0]
30711 if x0.Op != OpAMD64MOVBloadidx1 {
30712 break
30713 }
30714 i0 := x0.AuxInt
30715 if x0.Aux != s {
30716 break
30717 }
30718 _ = x0.Args[2]
30719 if idx != x0.Args[0] {
30720 break
30721 }
30722 if p != x0.Args[1] {
30723 break
30724 }
30725 if mem != x0.Args[2] {
30726 break
30727 }
30728 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
30729 break
30730 }
30731 b = mergePoint(b, x0, x1, y)
30732 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
30733 v.reset(OpCopy)
30734 v.AddArg(v0)
30735 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
30736 v1.AuxInt = j0
30737 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
30738 v2.AuxInt = i0
30739 v2.Aux = s
30740 v2.AddArg(p)
30741 v2.AddArg(idx)
30742 v2.AddArg(mem)
30743 v1.AddArg(v2)
30744 v0.AddArg(v1)
30745 v0.AddArg(y)
30746 return true
30747 }
30748 return false
30749 }
30750 func rewriteValueAMD64_OpAMD64ORL_80(v *Value) bool {
30751 b := v.Block
30752 typ := &b.Func.Config.Types
30753
30754
30755
30756 for {
30757 _ = v.Args[1]
30758 s1 := v.Args[0]
30759 if s1.Op != OpAMD64SHLLconst {
30760 break
30761 }
30762 j1 := s1.AuxInt
30763 x1 := s1.Args[0]
30764 if x1.Op != OpAMD64MOVBloadidx1 {
30765 break
30766 }
30767 i1 := x1.AuxInt
30768 s := x1.Aux
30769 mem := x1.Args[2]
30770 idx := x1.Args[0]
30771 p := x1.Args[1]
30772 or := v.Args[1]
30773 if or.Op != OpAMD64ORL {
30774 break
30775 }
30776 y := or.Args[1]
30777 s0 := or.Args[0]
30778 if s0.Op != OpAMD64SHLLconst {
30779 break
30780 }
30781 j0 := s0.AuxInt
30782 x0 := s0.Args[0]
30783 if x0.Op != OpAMD64MOVBloadidx1 {
30784 break
30785 }
30786 i0 := x0.AuxInt
30787 if x0.Aux != s {
30788 break
30789 }
30790 _ = x0.Args[2]
30791 if idx != x0.Args[0] {
30792 break
30793 }
30794 if p != x0.Args[1] {
30795 break
30796 }
30797 if mem != x0.Args[2] {
30798 break
30799 }
30800 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
30801 break
30802 }
30803 b = mergePoint(b, x0, x1, y)
30804 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
30805 v.reset(OpCopy)
30806 v.AddArg(v0)
30807 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
30808 v1.AuxInt = j0
30809 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
30810 v2.AuxInt = i0
30811 v2.Aux = s
30812 v2.AddArg(p)
30813 v2.AddArg(idx)
30814 v2.AddArg(mem)
30815 v1.AddArg(v2)
30816 v0.AddArg(v1)
30817 v0.AddArg(y)
30818 return true
30819 }
30820
30821
30822
30823 for {
30824 _ = v.Args[1]
30825 s1 := v.Args[0]
30826 if s1.Op != OpAMD64SHLLconst {
30827 break
30828 }
30829 j1 := s1.AuxInt
30830 x1 := s1.Args[0]
30831 if x1.Op != OpAMD64MOVBloadidx1 {
30832 break
30833 }
30834 i1 := x1.AuxInt
30835 s := x1.Aux
30836 mem := x1.Args[2]
30837 p := x1.Args[0]
30838 idx := x1.Args[1]
30839 or := v.Args[1]
30840 if or.Op != OpAMD64ORL {
30841 break
30842 }
30843 _ = or.Args[1]
30844 y := or.Args[0]
30845 s0 := or.Args[1]
30846 if s0.Op != OpAMD64SHLLconst {
30847 break
30848 }
30849 j0 := s0.AuxInt
30850 x0 := s0.Args[0]
30851 if x0.Op != OpAMD64MOVBloadidx1 {
30852 break
30853 }
30854 i0 := x0.AuxInt
30855 if x0.Aux != s {
30856 break
30857 }
30858 _ = x0.Args[2]
30859 if p != x0.Args[0] {
30860 break
30861 }
30862 if idx != x0.Args[1] {
30863 break
30864 }
30865 if mem != x0.Args[2] {
30866 break
30867 }
30868 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
30869 break
30870 }
30871 b = mergePoint(b, x0, x1, y)
30872 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
30873 v.reset(OpCopy)
30874 v.AddArg(v0)
30875 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
30876 v1.AuxInt = j0
30877 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
30878 v2.AuxInt = i0
30879 v2.Aux = s
30880 v2.AddArg(p)
30881 v2.AddArg(idx)
30882 v2.AddArg(mem)
30883 v1.AddArg(v2)
30884 v0.AddArg(v1)
30885 v0.AddArg(y)
30886 return true
30887 }
30888
30889
30890
30891 for {
30892 _ = v.Args[1]
30893 s1 := v.Args[0]
30894 if s1.Op != OpAMD64SHLLconst {
30895 break
30896 }
30897 j1 := s1.AuxInt
30898 x1 := s1.Args[0]
30899 if x1.Op != OpAMD64MOVBloadidx1 {
30900 break
30901 }
30902 i1 := x1.AuxInt
30903 s := x1.Aux
30904 mem := x1.Args[2]
30905 idx := x1.Args[0]
30906 p := x1.Args[1]
30907 or := v.Args[1]
30908 if or.Op != OpAMD64ORL {
30909 break
30910 }
30911 _ = or.Args[1]
30912 y := or.Args[0]
30913 s0 := or.Args[1]
30914 if s0.Op != OpAMD64SHLLconst {
30915 break
30916 }
30917 j0 := s0.AuxInt
30918 x0 := s0.Args[0]
30919 if x0.Op != OpAMD64MOVBloadidx1 {
30920 break
30921 }
30922 i0 := x0.AuxInt
30923 if x0.Aux != s {
30924 break
30925 }
30926 _ = x0.Args[2]
30927 if p != x0.Args[0] {
30928 break
30929 }
30930 if idx != x0.Args[1] {
30931 break
30932 }
30933 if mem != x0.Args[2] {
30934 break
30935 }
30936 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
30937 break
30938 }
30939 b = mergePoint(b, x0, x1, y)
30940 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
30941 v.reset(OpCopy)
30942 v.AddArg(v0)
30943 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
30944 v1.AuxInt = j0
30945 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
30946 v2.AuxInt = i0
30947 v2.Aux = s
30948 v2.AddArg(p)
30949 v2.AddArg(idx)
30950 v2.AddArg(mem)
30951 v1.AddArg(v2)
30952 v0.AddArg(v1)
30953 v0.AddArg(y)
30954 return true
30955 }
30956
30957
30958
30959 for {
30960 _ = v.Args[1]
30961 s1 := v.Args[0]
30962 if s1.Op != OpAMD64SHLLconst {
30963 break
30964 }
30965 j1 := s1.AuxInt
30966 x1 := s1.Args[0]
30967 if x1.Op != OpAMD64MOVBloadidx1 {
30968 break
30969 }
30970 i1 := x1.AuxInt
30971 s := x1.Aux
30972 mem := x1.Args[2]
30973 p := x1.Args[0]
30974 idx := x1.Args[1]
30975 or := v.Args[1]
30976 if or.Op != OpAMD64ORL {
30977 break
30978 }
30979 _ = or.Args[1]
30980 y := or.Args[0]
30981 s0 := or.Args[1]
30982 if s0.Op != OpAMD64SHLLconst {
30983 break
30984 }
30985 j0 := s0.AuxInt
30986 x0 := s0.Args[0]
30987 if x0.Op != OpAMD64MOVBloadidx1 {
30988 break
30989 }
30990 i0 := x0.AuxInt
30991 if x0.Aux != s {
30992 break
30993 }
30994 _ = x0.Args[2]
30995 if idx != x0.Args[0] {
30996 break
30997 }
30998 if p != x0.Args[1] {
30999 break
31000 }
31001 if mem != x0.Args[2] {
31002 break
31003 }
31004 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
31005 break
31006 }
31007 b = mergePoint(b, x0, x1, y)
31008 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
31009 v.reset(OpCopy)
31010 v.AddArg(v0)
31011 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
31012 v1.AuxInt = j0
31013 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
31014 v2.AuxInt = i0
31015 v2.Aux = s
31016 v2.AddArg(p)
31017 v2.AddArg(idx)
31018 v2.AddArg(mem)
31019 v1.AddArg(v2)
31020 v0.AddArg(v1)
31021 v0.AddArg(y)
31022 return true
31023 }
31024
31025
31026
31027 for {
31028 _ = v.Args[1]
31029 s1 := v.Args[0]
31030 if s1.Op != OpAMD64SHLLconst {
31031 break
31032 }
31033 j1 := s1.AuxInt
31034 x1 := s1.Args[0]
31035 if x1.Op != OpAMD64MOVBloadidx1 {
31036 break
31037 }
31038 i1 := x1.AuxInt
31039 s := x1.Aux
31040 mem := x1.Args[2]
31041 idx := x1.Args[0]
31042 p := x1.Args[1]
31043 or := v.Args[1]
31044 if or.Op != OpAMD64ORL {
31045 break
31046 }
31047 _ = or.Args[1]
31048 y := or.Args[0]
31049 s0 := or.Args[1]
31050 if s0.Op != OpAMD64SHLLconst {
31051 break
31052 }
31053 j0 := s0.AuxInt
31054 x0 := s0.Args[0]
31055 if x0.Op != OpAMD64MOVBloadidx1 {
31056 break
31057 }
31058 i0 := x0.AuxInt
31059 if x0.Aux != s {
31060 break
31061 }
31062 _ = x0.Args[2]
31063 if idx != x0.Args[0] {
31064 break
31065 }
31066 if p != x0.Args[1] {
31067 break
31068 }
31069 if mem != x0.Args[2] {
31070 break
31071 }
31072 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
31073 break
31074 }
31075 b = mergePoint(b, x0, x1, y)
31076 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
31077 v.reset(OpCopy)
31078 v.AddArg(v0)
31079 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
31080 v1.AuxInt = j0
31081 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
31082 v2.AuxInt = i0
31083 v2.Aux = s
31084 v2.AddArg(p)
31085 v2.AddArg(idx)
31086 v2.AddArg(mem)
31087 v1.AddArg(v2)
31088 v0.AddArg(v1)
31089 v0.AddArg(y)
31090 return true
31091 }
31092
31093
31094
31095 for {
31096 _ = v.Args[1]
31097 or := v.Args[0]
31098 if or.Op != OpAMD64ORL {
31099 break
31100 }
31101 y := or.Args[1]
31102 s0 := or.Args[0]
31103 if s0.Op != OpAMD64SHLLconst {
31104 break
31105 }
31106 j0 := s0.AuxInt
31107 x0 := s0.Args[0]
31108 if x0.Op != OpAMD64MOVBloadidx1 {
31109 break
31110 }
31111 i0 := x0.AuxInt
31112 s := x0.Aux
31113 mem := x0.Args[2]
31114 p := x0.Args[0]
31115 idx := x0.Args[1]
31116 s1 := v.Args[1]
31117 if s1.Op != OpAMD64SHLLconst {
31118 break
31119 }
31120 j1 := s1.AuxInt
31121 x1 := s1.Args[0]
31122 if x1.Op != OpAMD64MOVBloadidx1 {
31123 break
31124 }
31125 i1 := x1.AuxInt
31126 if x1.Aux != s {
31127 break
31128 }
31129 _ = x1.Args[2]
31130 if p != x1.Args[0] {
31131 break
31132 }
31133 if idx != x1.Args[1] {
31134 break
31135 }
31136 if mem != x1.Args[2] {
31137 break
31138 }
31139 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
31140 break
31141 }
31142 b = mergePoint(b, x0, x1, y)
31143 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
31144 v.reset(OpCopy)
31145 v.AddArg(v0)
31146 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
31147 v1.AuxInt = j0
31148 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
31149 v2.AuxInt = i0
31150 v2.Aux = s
31151 v2.AddArg(p)
31152 v2.AddArg(idx)
31153 v2.AddArg(mem)
31154 v1.AddArg(v2)
31155 v0.AddArg(v1)
31156 v0.AddArg(y)
31157 return true
31158 }
31159
31160
31161
31162 for {
31163 _ = v.Args[1]
31164 or := v.Args[0]
31165 if or.Op != OpAMD64ORL {
31166 break
31167 }
31168 y := or.Args[1]
31169 s0 := or.Args[0]
31170 if s0.Op != OpAMD64SHLLconst {
31171 break
31172 }
31173 j0 := s0.AuxInt
31174 x0 := s0.Args[0]
31175 if x0.Op != OpAMD64MOVBloadidx1 {
31176 break
31177 }
31178 i0 := x0.AuxInt
31179 s := x0.Aux
31180 mem := x0.Args[2]
31181 idx := x0.Args[0]
31182 p := x0.Args[1]
31183 s1 := v.Args[1]
31184 if s1.Op != OpAMD64SHLLconst {
31185 break
31186 }
31187 j1 := s1.AuxInt
31188 x1 := s1.Args[0]
31189 if x1.Op != OpAMD64MOVBloadidx1 {
31190 break
31191 }
31192 i1 := x1.AuxInt
31193 if x1.Aux != s {
31194 break
31195 }
31196 _ = x1.Args[2]
31197 if p != x1.Args[0] {
31198 break
31199 }
31200 if idx != x1.Args[1] {
31201 break
31202 }
31203 if mem != x1.Args[2] {
31204 break
31205 }
31206 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
31207 break
31208 }
31209 b = mergePoint(b, x0, x1, y)
31210 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
31211 v.reset(OpCopy)
31212 v.AddArg(v0)
31213 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
31214 v1.AuxInt = j0
31215 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
31216 v2.AuxInt = i0
31217 v2.Aux = s
31218 v2.AddArg(p)
31219 v2.AddArg(idx)
31220 v2.AddArg(mem)
31221 v1.AddArg(v2)
31222 v0.AddArg(v1)
31223 v0.AddArg(y)
31224 return true
31225 }
31226
31227
31228
31229 for {
31230 _ = v.Args[1]
31231 or := v.Args[0]
31232 if or.Op != OpAMD64ORL {
31233 break
31234 }
31235 _ = or.Args[1]
31236 y := or.Args[0]
31237 s0 := or.Args[1]
31238 if s0.Op != OpAMD64SHLLconst {
31239 break
31240 }
31241 j0 := s0.AuxInt
31242 x0 := s0.Args[0]
31243 if x0.Op != OpAMD64MOVBloadidx1 {
31244 break
31245 }
31246 i0 := x0.AuxInt
31247 s := x0.Aux
31248 mem := x0.Args[2]
31249 p := x0.Args[0]
31250 idx := x0.Args[1]
31251 s1 := v.Args[1]
31252 if s1.Op != OpAMD64SHLLconst {
31253 break
31254 }
31255 j1 := s1.AuxInt
31256 x1 := s1.Args[0]
31257 if x1.Op != OpAMD64MOVBloadidx1 {
31258 break
31259 }
31260 i1 := x1.AuxInt
31261 if x1.Aux != s {
31262 break
31263 }
31264 _ = x1.Args[2]
31265 if p != x1.Args[0] {
31266 break
31267 }
31268 if idx != x1.Args[1] {
31269 break
31270 }
31271 if mem != x1.Args[2] {
31272 break
31273 }
31274 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
31275 break
31276 }
31277 b = mergePoint(b, x0, x1, y)
31278 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
31279 v.reset(OpCopy)
31280 v.AddArg(v0)
31281 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
31282 v1.AuxInt = j0
31283 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
31284 v2.AuxInt = i0
31285 v2.Aux = s
31286 v2.AddArg(p)
31287 v2.AddArg(idx)
31288 v2.AddArg(mem)
31289 v1.AddArg(v2)
31290 v0.AddArg(v1)
31291 v0.AddArg(y)
31292 return true
31293 }
31294
31295
31296
31297 for {
31298 _ = v.Args[1]
31299 or := v.Args[0]
31300 if or.Op != OpAMD64ORL {
31301 break
31302 }
31303 _ = or.Args[1]
31304 y := or.Args[0]
31305 s0 := or.Args[1]
31306 if s0.Op != OpAMD64SHLLconst {
31307 break
31308 }
31309 j0 := s0.AuxInt
31310 x0 := s0.Args[0]
31311 if x0.Op != OpAMD64MOVBloadidx1 {
31312 break
31313 }
31314 i0 := x0.AuxInt
31315 s := x0.Aux
31316 mem := x0.Args[2]
31317 idx := x0.Args[0]
31318 p := x0.Args[1]
31319 s1 := v.Args[1]
31320 if s1.Op != OpAMD64SHLLconst {
31321 break
31322 }
31323 j1 := s1.AuxInt
31324 x1 := s1.Args[0]
31325 if x1.Op != OpAMD64MOVBloadidx1 {
31326 break
31327 }
31328 i1 := x1.AuxInt
31329 if x1.Aux != s {
31330 break
31331 }
31332 _ = x1.Args[2]
31333 if p != x1.Args[0] {
31334 break
31335 }
31336 if idx != x1.Args[1] {
31337 break
31338 }
31339 if mem != x1.Args[2] {
31340 break
31341 }
31342 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
31343 break
31344 }
31345 b = mergePoint(b, x0, x1, y)
31346 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
31347 v.reset(OpCopy)
31348 v.AddArg(v0)
31349 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
31350 v1.AuxInt = j0
31351 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
31352 v2.AuxInt = i0
31353 v2.Aux = s
31354 v2.AddArg(p)
31355 v2.AddArg(idx)
31356 v2.AddArg(mem)
31357 v1.AddArg(v2)
31358 v0.AddArg(v1)
31359 v0.AddArg(y)
31360 return true
31361 }
31362
31363
31364
31365 for {
31366 _ = v.Args[1]
31367 or := v.Args[0]
31368 if or.Op != OpAMD64ORL {
31369 break
31370 }
31371 y := or.Args[1]
31372 s0 := or.Args[0]
31373 if s0.Op != OpAMD64SHLLconst {
31374 break
31375 }
31376 j0 := s0.AuxInt
31377 x0 := s0.Args[0]
31378 if x0.Op != OpAMD64MOVBloadidx1 {
31379 break
31380 }
31381 i0 := x0.AuxInt
31382 s := x0.Aux
31383 mem := x0.Args[2]
31384 p := x0.Args[0]
31385 idx := x0.Args[1]
31386 s1 := v.Args[1]
31387 if s1.Op != OpAMD64SHLLconst {
31388 break
31389 }
31390 j1 := s1.AuxInt
31391 x1 := s1.Args[0]
31392 if x1.Op != OpAMD64MOVBloadidx1 {
31393 break
31394 }
31395 i1 := x1.AuxInt
31396 if x1.Aux != s {
31397 break
31398 }
31399 _ = x1.Args[2]
31400 if idx != x1.Args[0] {
31401 break
31402 }
31403 if p != x1.Args[1] {
31404 break
31405 }
31406 if mem != x1.Args[2] {
31407 break
31408 }
31409 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
31410 break
31411 }
31412 b = mergePoint(b, x0, x1, y)
31413 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
31414 v.reset(OpCopy)
31415 v.AddArg(v0)
31416 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
31417 v1.AuxInt = j0
31418 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
31419 v2.AuxInt = i0
31420 v2.Aux = s
31421 v2.AddArg(p)
31422 v2.AddArg(idx)
31423 v2.AddArg(mem)
31424 v1.AddArg(v2)
31425 v0.AddArg(v1)
31426 v0.AddArg(y)
31427 return true
31428 }
31429 return false
31430 }
31431 func rewriteValueAMD64_OpAMD64ORL_90(v *Value) bool {
31432 b := v.Block
31433 typ := &b.Func.Config.Types
31434
31435
31436
31437 for {
31438 _ = v.Args[1]
31439 or := v.Args[0]
31440 if or.Op != OpAMD64ORL {
31441 break
31442 }
31443 y := or.Args[1]
31444 s0 := or.Args[0]
31445 if s0.Op != OpAMD64SHLLconst {
31446 break
31447 }
31448 j0 := s0.AuxInt
31449 x0 := s0.Args[0]
31450 if x0.Op != OpAMD64MOVBloadidx1 {
31451 break
31452 }
31453 i0 := x0.AuxInt
31454 s := x0.Aux
31455 mem := x0.Args[2]
31456 idx := x0.Args[0]
31457 p := x0.Args[1]
31458 s1 := v.Args[1]
31459 if s1.Op != OpAMD64SHLLconst {
31460 break
31461 }
31462 j1 := s1.AuxInt
31463 x1 := s1.Args[0]
31464 if x1.Op != OpAMD64MOVBloadidx1 {
31465 break
31466 }
31467 i1 := x1.AuxInt
31468 if x1.Aux != s {
31469 break
31470 }
31471 _ = x1.Args[2]
31472 if idx != x1.Args[0] {
31473 break
31474 }
31475 if p != x1.Args[1] {
31476 break
31477 }
31478 if mem != x1.Args[2] {
31479 break
31480 }
31481 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
31482 break
31483 }
31484 b = mergePoint(b, x0, x1, y)
31485 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
31486 v.reset(OpCopy)
31487 v.AddArg(v0)
31488 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
31489 v1.AuxInt = j0
31490 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
31491 v2.AuxInt = i0
31492 v2.Aux = s
31493 v2.AddArg(p)
31494 v2.AddArg(idx)
31495 v2.AddArg(mem)
31496 v1.AddArg(v2)
31497 v0.AddArg(v1)
31498 v0.AddArg(y)
31499 return true
31500 }
31501
31502
31503
31504 for {
31505 _ = v.Args[1]
31506 or := v.Args[0]
31507 if or.Op != OpAMD64ORL {
31508 break
31509 }
31510 _ = or.Args[1]
31511 y := or.Args[0]
31512 s0 := or.Args[1]
31513 if s0.Op != OpAMD64SHLLconst {
31514 break
31515 }
31516 j0 := s0.AuxInt
31517 x0 := s0.Args[0]
31518 if x0.Op != OpAMD64MOVBloadidx1 {
31519 break
31520 }
31521 i0 := x0.AuxInt
31522 s := x0.Aux
31523 mem := x0.Args[2]
31524 p := x0.Args[0]
31525 idx := x0.Args[1]
31526 s1 := v.Args[1]
31527 if s1.Op != OpAMD64SHLLconst {
31528 break
31529 }
31530 j1 := s1.AuxInt
31531 x1 := s1.Args[0]
31532 if x1.Op != OpAMD64MOVBloadidx1 {
31533 break
31534 }
31535 i1 := x1.AuxInt
31536 if x1.Aux != s {
31537 break
31538 }
31539 _ = x1.Args[2]
31540 if idx != x1.Args[0] {
31541 break
31542 }
31543 if p != x1.Args[1] {
31544 break
31545 }
31546 if mem != x1.Args[2] {
31547 break
31548 }
31549 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
31550 break
31551 }
31552 b = mergePoint(b, x0, x1, y)
31553 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
31554 v.reset(OpCopy)
31555 v.AddArg(v0)
31556 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
31557 v1.AuxInt = j0
31558 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
31559 v2.AuxInt = i0
31560 v2.Aux = s
31561 v2.AddArg(p)
31562 v2.AddArg(idx)
31563 v2.AddArg(mem)
31564 v1.AddArg(v2)
31565 v0.AddArg(v1)
31566 v0.AddArg(y)
31567 return true
31568 }
31569
31570
31571
31572 for {
31573 _ = v.Args[1]
31574 or := v.Args[0]
31575 if or.Op != OpAMD64ORL {
31576 break
31577 }
31578 _ = or.Args[1]
31579 y := or.Args[0]
31580 s0 := or.Args[1]
31581 if s0.Op != OpAMD64SHLLconst {
31582 break
31583 }
31584 j0 := s0.AuxInt
31585 x0 := s0.Args[0]
31586 if x0.Op != OpAMD64MOVBloadidx1 {
31587 break
31588 }
31589 i0 := x0.AuxInt
31590 s := x0.Aux
31591 mem := x0.Args[2]
31592 idx := x0.Args[0]
31593 p := x0.Args[1]
31594 s1 := v.Args[1]
31595 if s1.Op != OpAMD64SHLLconst {
31596 break
31597 }
31598 j1 := s1.AuxInt
31599 x1 := s1.Args[0]
31600 if x1.Op != OpAMD64MOVBloadidx1 {
31601 break
31602 }
31603 i1 := x1.AuxInt
31604 if x1.Aux != s {
31605 break
31606 }
31607 _ = x1.Args[2]
31608 if idx != x1.Args[0] {
31609 break
31610 }
31611 if p != x1.Args[1] {
31612 break
31613 }
31614 if mem != x1.Args[2] {
31615 break
31616 }
31617 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
31618 break
31619 }
31620 b = mergePoint(b, x0, x1, y)
31621 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
31622 v.reset(OpCopy)
31623 v.AddArg(v0)
31624 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
31625 v1.AuxInt = j0
31626 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
31627 v2.AuxInt = i0
31628 v2.Aux = s
31629 v2.AddArg(p)
31630 v2.AddArg(idx)
31631 v2.AddArg(mem)
31632 v1.AddArg(v2)
31633 v0.AddArg(v1)
31634 v0.AddArg(y)
31635 return true
31636 }
31637
31638
31639
31640 for {
31641 _ = v.Args[1]
31642 x1 := v.Args[0]
31643 if x1.Op != OpAMD64MOVBload {
31644 break
31645 }
31646 i1 := x1.AuxInt
31647 s := x1.Aux
31648 mem := x1.Args[1]
31649 p := x1.Args[0]
31650 sh := v.Args[1]
31651 if sh.Op != OpAMD64SHLLconst {
31652 break
31653 }
31654 if sh.AuxInt != 8 {
31655 break
31656 }
31657 x0 := sh.Args[0]
31658 if x0.Op != OpAMD64MOVBload {
31659 break
31660 }
31661 i0 := x0.AuxInt
31662 if x0.Aux != s {
31663 break
31664 }
31665 _ = x0.Args[1]
31666 if p != x0.Args[0] {
31667 break
31668 }
31669 if mem != x0.Args[1] {
31670 break
31671 }
31672 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
31673 break
31674 }
31675 b = mergePoint(b, x0, x1)
31676 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type)
31677 v.reset(OpCopy)
31678 v.AddArg(v0)
31679 v0.AuxInt = 8
31680 v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
31681 v1.AuxInt = i0
31682 v1.Aux = s
31683 v1.AddArg(p)
31684 v1.AddArg(mem)
31685 v0.AddArg(v1)
31686 return true
31687 }
31688
31689
31690
31691 for {
31692 _ = v.Args[1]
31693 sh := v.Args[0]
31694 if sh.Op != OpAMD64SHLLconst {
31695 break
31696 }
31697 if sh.AuxInt != 8 {
31698 break
31699 }
31700 x0 := sh.Args[0]
31701 if x0.Op != OpAMD64MOVBload {
31702 break
31703 }
31704 i0 := x0.AuxInt
31705 s := x0.Aux
31706 mem := x0.Args[1]
31707 p := x0.Args[0]
31708 x1 := v.Args[1]
31709 if x1.Op != OpAMD64MOVBload {
31710 break
31711 }
31712 i1 := x1.AuxInt
31713 if x1.Aux != s {
31714 break
31715 }
31716 _ = x1.Args[1]
31717 if p != x1.Args[0] {
31718 break
31719 }
31720 if mem != x1.Args[1] {
31721 break
31722 }
31723 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
31724 break
31725 }
31726 b = mergePoint(b, x0, x1)
31727 v0 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, v.Type)
31728 v.reset(OpCopy)
31729 v.AddArg(v0)
31730 v0.AuxInt = 8
31731 v1 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
31732 v1.AuxInt = i0
31733 v1.Aux = s
31734 v1.AddArg(p)
31735 v1.AddArg(mem)
31736 v0.AddArg(v1)
31737 return true
31738 }
31739
31740
31741
31742 for {
31743 _ = v.Args[1]
31744 r1 := v.Args[0]
31745 if r1.Op != OpAMD64ROLWconst {
31746 break
31747 }
31748 if r1.AuxInt != 8 {
31749 break
31750 }
31751 x1 := r1.Args[0]
31752 if x1.Op != OpAMD64MOVWload {
31753 break
31754 }
31755 i1 := x1.AuxInt
31756 s := x1.Aux
31757 mem := x1.Args[1]
31758 p := x1.Args[0]
31759 sh := v.Args[1]
31760 if sh.Op != OpAMD64SHLLconst {
31761 break
31762 }
31763 if sh.AuxInt != 16 {
31764 break
31765 }
31766 r0 := sh.Args[0]
31767 if r0.Op != OpAMD64ROLWconst {
31768 break
31769 }
31770 if r0.AuxInt != 8 {
31771 break
31772 }
31773 x0 := r0.Args[0]
31774 if x0.Op != OpAMD64MOVWload {
31775 break
31776 }
31777 i0 := x0.AuxInt
31778 if x0.Aux != s {
31779 break
31780 }
31781 _ = x0.Args[1]
31782 if p != x0.Args[0] {
31783 break
31784 }
31785 if mem != x0.Args[1] {
31786 break
31787 }
31788 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
31789 break
31790 }
31791 b = mergePoint(b, x0, x1)
31792 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type)
31793 v.reset(OpCopy)
31794 v.AddArg(v0)
31795 v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
31796 v1.AuxInt = i0
31797 v1.Aux = s
31798 v1.AddArg(p)
31799 v1.AddArg(mem)
31800 v0.AddArg(v1)
31801 return true
31802 }
31803
31804
31805
31806 for {
31807 _ = v.Args[1]
31808 sh := v.Args[0]
31809 if sh.Op != OpAMD64SHLLconst {
31810 break
31811 }
31812 if sh.AuxInt != 16 {
31813 break
31814 }
31815 r0 := sh.Args[0]
31816 if r0.Op != OpAMD64ROLWconst {
31817 break
31818 }
31819 if r0.AuxInt != 8 {
31820 break
31821 }
31822 x0 := r0.Args[0]
31823 if x0.Op != OpAMD64MOVWload {
31824 break
31825 }
31826 i0 := x0.AuxInt
31827 s := x0.Aux
31828 mem := x0.Args[1]
31829 p := x0.Args[0]
31830 r1 := v.Args[1]
31831 if r1.Op != OpAMD64ROLWconst {
31832 break
31833 }
31834 if r1.AuxInt != 8 {
31835 break
31836 }
31837 x1 := r1.Args[0]
31838 if x1.Op != OpAMD64MOVWload {
31839 break
31840 }
31841 i1 := x1.AuxInt
31842 if x1.Aux != s {
31843 break
31844 }
31845 _ = x1.Args[1]
31846 if p != x1.Args[0] {
31847 break
31848 }
31849 if mem != x1.Args[1] {
31850 break
31851 }
31852 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
31853 break
31854 }
31855 b = mergePoint(b, x0, x1)
31856 v0 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, v.Type)
31857 v.reset(OpCopy)
31858 v.AddArg(v0)
31859 v1 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
31860 v1.AuxInt = i0
31861 v1.Aux = s
31862 v1.AddArg(p)
31863 v1.AddArg(mem)
31864 v0.AddArg(v1)
31865 return true
31866 }
31867
31868
31869
31870 for {
31871 _ = v.Args[1]
31872 s0 := v.Args[0]
31873 if s0.Op != OpAMD64SHLLconst {
31874 break
31875 }
31876 j0 := s0.AuxInt
31877 x0 := s0.Args[0]
31878 if x0.Op != OpAMD64MOVBload {
31879 break
31880 }
31881 i0 := x0.AuxInt
31882 s := x0.Aux
31883 mem := x0.Args[1]
31884 p := x0.Args[0]
31885 or := v.Args[1]
31886 if or.Op != OpAMD64ORL {
31887 break
31888 }
31889 y := or.Args[1]
31890 s1 := or.Args[0]
31891 if s1.Op != OpAMD64SHLLconst {
31892 break
31893 }
31894 j1 := s1.AuxInt
31895 x1 := s1.Args[0]
31896 if x1.Op != OpAMD64MOVBload {
31897 break
31898 }
31899 i1 := x1.AuxInt
31900 if x1.Aux != s {
31901 break
31902 }
31903 _ = x1.Args[1]
31904 if p != x1.Args[0] {
31905 break
31906 }
31907 if mem != x1.Args[1] {
31908 break
31909 }
31910 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
31911 break
31912 }
31913 b = mergePoint(b, x0, x1, y)
31914 v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type)
31915 v.reset(OpCopy)
31916 v.AddArg(v0)
31917 v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type)
31918 v1.AuxInt = j1
31919 v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16)
31920 v2.AuxInt = 8
31921 v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
31922 v3.AuxInt = i0
31923 v3.Aux = s
31924 v3.AddArg(p)
31925 v3.AddArg(mem)
31926 v2.AddArg(v3)
31927 v1.AddArg(v2)
31928 v0.AddArg(v1)
31929 v0.AddArg(y)
31930 return true
31931 }
31932
31933
31934
31935 for {
31936 _ = v.Args[1]
31937 s0 := v.Args[0]
31938 if s0.Op != OpAMD64SHLLconst {
31939 break
31940 }
31941 j0 := s0.AuxInt
31942 x0 := s0.Args[0]
31943 if x0.Op != OpAMD64MOVBload {
31944 break
31945 }
31946 i0 := x0.AuxInt
31947 s := x0.Aux
31948 mem := x0.Args[1]
31949 p := x0.Args[0]
31950 or := v.Args[1]
31951 if or.Op != OpAMD64ORL {
31952 break
31953 }
31954 _ = or.Args[1]
31955 y := or.Args[0]
31956 s1 := or.Args[1]
31957 if s1.Op != OpAMD64SHLLconst {
31958 break
31959 }
31960 j1 := s1.AuxInt
31961 x1 := s1.Args[0]
31962 if x1.Op != OpAMD64MOVBload {
31963 break
31964 }
31965 i1 := x1.AuxInt
31966 if x1.Aux != s {
31967 break
31968 }
31969 _ = x1.Args[1]
31970 if p != x1.Args[0] {
31971 break
31972 }
31973 if mem != x1.Args[1] {
31974 break
31975 }
31976 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
31977 break
31978 }
31979 b = mergePoint(b, x0, x1, y)
31980 v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type)
31981 v.reset(OpCopy)
31982 v.AddArg(v0)
31983 v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type)
31984 v1.AuxInt = j1
31985 v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16)
31986 v2.AuxInt = 8
31987 v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
31988 v3.AuxInt = i0
31989 v3.Aux = s
31990 v3.AddArg(p)
31991 v3.AddArg(mem)
31992 v2.AddArg(v3)
31993 v1.AddArg(v2)
31994 v0.AddArg(v1)
31995 v0.AddArg(y)
31996 return true
31997 }
31998
31999
32000
32001 for {
32002 _ = v.Args[1]
32003 or := v.Args[0]
32004 if or.Op != OpAMD64ORL {
32005 break
32006 }
32007 y := or.Args[1]
32008 s1 := or.Args[0]
32009 if s1.Op != OpAMD64SHLLconst {
32010 break
32011 }
32012 j1 := s1.AuxInt
32013 x1 := s1.Args[0]
32014 if x1.Op != OpAMD64MOVBload {
32015 break
32016 }
32017 i1 := x1.AuxInt
32018 s := x1.Aux
32019 mem := x1.Args[1]
32020 p := x1.Args[0]
32021 s0 := v.Args[1]
32022 if s0.Op != OpAMD64SHLLconst {
32023 break
32024 }
32025 j0 := s0.AuxInt
32026 x0 := s0.Args[0]
32027 if x0.Op != OpAMD64MOVBload {
32028 break
32029 }
32030 i0 := x0.AuxInt
32031 if x0.Aux != s {
32032 break
32033 }
32034 _ = x0.Args[1]
32035 if p != x0.Args[0] {
32036 break
32037 }
32038 if mem != x0.Args[1] {
32039 break
32040 }
32041 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
32042 break
32043 }
32044 b = mergePoint(b, x0, x1, y)
32045 v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type)
32046 v.reset(OpCopy)
32047 v.AddArg(v0)
32048 v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type)
32049 v1.AuxInt = j1
32050 v2 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, typ.UInt16)
32051 v2.AuxInt = 8
32052 v3 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
32053 v3.AuxInt = i0
32054 v3.Aux = s
32055 v3.AddArg(p)
32056 v3.AddArg(mem)
32057 v2.AddArg(v3)
32058 v1.AddArg(v2)
32059 v0.AddArg(v1)
32060 v0.AddArg(y)
32061 return true
32062 }
32063 return false
32064 }
32065 func rewriteValueAMD64_OpAMD64ORL_100(v *Value) bool {
32066 b := v.Block
32067 typ := &b.Func.Config.Types
32068
32069
32070
32071 for {
32072 _ = v.Args[1]
32073 or := v.Args[0]
32074 if or.Op != OpAMD64ORL {
32075 break
32076 }
32077 _ = or.Args[1]
32078 y := or.Args[0]
32079 s1 := or.Args[1]
32080 if s1.Op != OpAMD64SHLLconst {
32081 break
32082 }
32083 j1 := s1.AuxInt
32084 x1 := s1.Args[0]
32085 if x1.Op != OpAMD64MOVBload {
32086 break
32087 }
32088 i1 := x1.AuxInt
32089 s := x1.Aux
32090 mem := x1.Args[1]
32091 p := x1.Args[0]
32092 s0 := v.Args[1]
32093 if s0.Op != OpAMD64SHLLconst {
32094 break
32095 }
32096 j0 := s0.AuxInt
32097 x0 := s0.Args[0]
32098 if x0.Op != OpAMD64MOVBload {
32099 break
32100 }
32101 i0 := x0.AuxInt
32102 if x0.Aux != s {
32103 break
32104 }
32105 _ = x0.Args[1]
32106 if p != x0.Args[0] {
32107 break
32108 }
32109 if mem != x0.Args[1] {
32110 break
32111 }
32112 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
32113 break
32114 }
32115 b = mergePoint(b, x0, x1, y)
32116 v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type)
32117 v.reset(OpCopy)
32118 v.AddArg(v0)
32119 v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type)
32120 v1.AuxInt = j1
32121 v2 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, typ.UInt16)
32122 v2.AuxInt = 8
32123 v3 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
32124 v3.AuxInt = i0
32125 v3.Aux = s
32126 v3.AddArg(p)
32127 v3.AddArg(mem)
32128 v2.AddArg(v3)
32129 v1.AddArg(v2)
32130 v0.AddArg(v1)
32131 v0.AddArg(y)
32132 return true
32133 }
32134
32135
32136
32137 for {
32138 _ = v.Args[1]
32139 x1 := v.Args[0]
32140 if x1.Op != OpAMD64MOVBloadidx1 {
32141 break
32142 }
32143 i1 := x1.AuxInt
32144 s := x1.Aux
32145 mem := x1.Args[2]
32146 p := x1.Args[0]
32147 idx := x1.Args[1]
32148 sh := v.Args[1]
32149 if sh.Op != OpAMD64SHLLconst {
32150 break
32151 }
32152 if sh.AuxInt != 8 {
32153 break
32154 }
32155 x0 := sh.Args[0]
32156 if x0.Op != OpAMD64MOVBloadidx1 {
32157 break
32158 }
32159 i0 := x0.AuxInt
32160 if x0.Aux != s {
32161 break
32162 }
32163 _ = x0.Args[2]
32164 if p != x0.Args[0] {
32165 break
32166 }
32167 if idx != x0.Args[1] {
32168 break
32169 }
32170 if mem != x0.Args[2] {
32171 break
32172 }
32173 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
32174 break
32175 }
32176 b = mergePoint(b, x0, x1)
32177 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
32178 v.reset(OpCopy)
32179 v.AddArg(v0)
32180 v0.AuxInt = 8
32181 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
32182 v1.AuxInt = i0
32183 v1.Aux = s
32184 v1.AddArg(p)
32185 v1.AddArg(idx)
32186 v1.AddArg(mem)
32187 v0.AddArg(v1)
32188 return true
32189 }
32190
32191
32192
32193 for {
32194 _ = v.Args[1]
32195 x1 := v.Args[0]
32196 if x1.Op != OpAMD64MOVBloadidx1 {
32197 break
32198 }
32199 i1 := x1.AuxInt
32200 s := x1.Aux
32201 mem := x1.Args[2]
32202 idx := x1.Args[0]
32203 p := x1.Args[1]
32204 sh := v.Args[1]
32205 if sh.Op != OpAMD64SHLLconst {
32206 break
32207 }
32208 if sh.AuxInt != 8 {
32209 break
32210 }
32211 x0 := sh.Args[0]
32212 if x0.Op != OpAMD64MOVBloadidx1 {
32213 break
32214 }
32215 i0 := x0.AuxInt
32216 if x0.Aux != s {
32217 break
32218 }
32219 _ = x0.Args[2]
32220 if p != x0.Args[0] {
32221 break
32222 }
32223 if idx != x0.Args[1] {
32224 break
32225 }
32226 if mem != x0.Args[2] {
32227 break
32228 }
32229 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
32230 break
32231 }
32232 b = mergePoint(b, x0, x1)
32233 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
32234 v.reset(OpCopy)
32235 v.AddArg(v0)
32236 v0.AuxInt = 8
32237 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
32238 v1.AuxInt = i0
32239 v1.Aux = s
32240 v1.AddArg(p)
32241 v1.AddArg(idx)
32242 v1.AddArg(mem)
32243 v0.AddArg(v1)
32244 return true
32245 }
32246
32247
32248
32249 for {
32250 _ = v.Args[1]
32251 x1 := v.Args[0]
32252 if x1.Op != OpAMD64MOVBloadidx1 {
32253 break
32254 }
32255 i1 := x1.AuxInt
32256 s := x1.Aux
32257 mem := x1.Args[2]
32258 p := x1.Args[0]
32259 idx := x1.Args[1]
32260 sh := v.Args[1]
32261 if sh.Op != OpAMD64SHLLconst {
32262 break
32263 }
32264 if sh.AuxInt != 8 {
32265 break
32266 }
32267 x0 := sh.Args[0]
32268 if x0.Op != OpAMD64MOVBloadidx1 {
32269 break
32270 }
32271 i0 := x0.AuxInt
32272 if x0.Aux != s {
32273 break
32274 }
32275 _ = x0.Args[2]
32276 if idx != x0.Args[0] {
32277 break
32278 }
32279 if p != x0.Args[1] {
32280 break
32281 }
32282 if mem != x0.Args[2] {
32283 break
32284 }
32285 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
32286 break
32287 }
32288 b = mergePoint(b, x0, x1)
32289 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
32290 v.reset(OpCopy)
32291 v.AddArg(v0)
32292 v0.AuxInt = 8
32293 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
32294 v1.AuxInt = i0
32295 v1.Aux = s
32296 v1.AddArg(p)
32297 v1.AddArg(idx)
32298 v1.AddArg(mem)
32299 v0.AddArg(v1)
32300 return true
32301 }
32302
32303
32304
32305 for {
32306 _ = v.Args[1]
32307 x1 := v.Args[0]
32308 if x1.Op != OpAMD64MOVBloadidx1 {
32309 break
32310 }
32311 i1 := x1.AuxInt
32312 s := x1.Aux
32313 mem := x1.Args[2]
32314 idx := x1.Args[0]
32315 p := x1.Args[1]
32316 sh := v.Args[1]
32317 if sh.Op != OpAMD64SHLLconst {
32318 break
32319 }
32320 if sh.AuxInt != 8 {
32321 break
32322 }
32323 x0 := sh.Args[0]
32324 if x0.Op != OpAMD64MOVBloadidx1 {
32325 break
32326 }
32327 i0 := x0.AuxInt
32328 if x0.Aux != s {
32329 break
32330 }
32331 _ = x0.Args[2]
32332 if idx != x0.Args[0] {
32333 break
32334 }
32335 if p != x0.Args[1] {
32336 break
32337 }
32338 if mem != x0.Args[2] {
32339 break
32340 }
32341 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
32342 break
32343 }
32344 b = mergePoint(b, x0, x1)
32345 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
32346 v.reset(OpCopy)
32347 v.AddArg(v0)
32348 v0.AuxInt = 8
32349 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
32350 v1.AuxInt = i0
32351 v1.Aux = s
32352 v1.AddArg(p)
32353 v1.AddArg(idx)
32354 v1.AddArg(mem)
32355 v0.AddArg(v1)
32356 return true
32357 }
32358
32359
32360
32361 for {
32362 _ = v.Args[1]
32363 sh := v.Args[0]
32364 if sh.Op != OpAMD64SHLLconst {
32365 break
32366 }
32367 if sh.AuxInt != 8 {
32368 break
32369 }
32370 x0 := sh.Args[0]
32371 if x0.Op != OpAMD64MOVBloadidx1 {
32372 break
32373 }
32374 i0 := x0.AuxInt
32375 s := x0.Aux
32376 mem := x0.Args[2]
32377 p := x0.Args[0]
32378 idx := x0.Args[1]
32379 x1 := v.Args[1]
32380 if x1.Op != OpAMD64MOVBloadidx1 {
32381 break
32382 }
32383 i1 := x1.AuxInt
32384 if x1.Aux != s {
32385 break
32386 }
32387 _ = x1.Args[2]
32388 if p != x1.Args[0] {
32389 break
32390 }
32391 if idx != x1.Args[1] {
32392 break
32393 }
32394 if mem != x1.Args[2] {
32395 break
32396 }
32397 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
32398 break
32399 }
32400 b = mergePoint(b, x0, x1)
32401 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
32402 v.reset(OpCopy)
32403 v.AddArg(v0)
32404 v0.AuxInt = 8
32405 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
32406 v1.AuxInt = i0
32407 v1.Aux = s
32408 v1.AddArg(p)
32409 v1.AddArg(idx)
32410 v1.AddArg(mem)
32411 v0.AddArg(v1)
32412 return true
32413 }
32414
32415
32416
32417 for {
32418 _ = v.Args[1]
32419 sh := v.Args[0]
32420 if sh.Op != OpAMD64SHLLconst {
32421 break
32422 }
32423 if sh.AuxInt != 8 {
32424 break
32425 }
32426 x0 := sh.Args[0]
32427 if x0.Op != OpAMD64MOVBloadidx1 {
32428 break
32429 }
32430 i0 := x0.AuxInt
32431 s := x0.Aux
32432 mem := x0.Args[2]
32433 idx := x0.Args[0]
32434 p := x0.Args[1]
32435 x1 := v.Args[1]
32436 if x1.Op != OpAMD64MOVBloadidx1 {
32437 break
32438 }
32439 i1 := x1.AuxInt
32440 if x1.Aux != s {
32441 break
32442 }
32443 _ = x1.Args[2]
32444 if p != x1.Args[0] {
32445 break
32446 }
32447 if idx != x1.Args[1] {
32448 break
32449 }
32450 if mem != x1.Args[2] {
32451 break
32452 }
32453 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
32454 break
32455 }
32456 b = mergePoint(b, x0, x1)
32457 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
32458 v.reset(OpCopy)
32459 v.AddArg(v0)
32460 v0.AuxInt = 8
32461 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
32462 v1.AuxInt = i0
32463 v1.Aux = s
32464 v1.AddArg(p)
32465 v1.AddArg(idx)
32466 v1.AddArg(mem)
32467 v0.AddArg(v1)
32468 return true
32469 }
32470
32471
32472
32473 for {
32474 _ = v.Args[1]
32475 sh := v.Args[0]
32476 if sh.Op != OpAMD64SHLLconst {
32477 break
32478 }
32479 if sh.AuxInt != 8 {
32480 break
32481 }
32482 x0 := sh.Args[0]
32483 if x0.Op != OpAMD64MOVBloadidx1 {
32484 break
32485 }
32486 i0 := x0.AuxInt
32487 s := x0.Aux
32488 mem := x0.Args[2]
32489 p := x0.Args[0]
32490 idx := x0.Args[1]
32491 x1 := v.Args[1]
32492 if x1.Op != OpAMD64MOVBloadidx1 {
32493 break
32494 }
32495 i1 := x1.AuxInt
32496 if x1.Aux != s {
32497 break
32498 }
32499 _ = x1.Args[2]
32500 if idx != x1.Args[0] {
32501 break
32502 }
32503 if p != x1.Args[1] {
32504 break
32505 }
32506 if mem != x1.Args[2] {
32507 break
32508 }
32509 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
32510 break
32511 }
32512 b = mergePoint(b, x0, x1)
32513 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
32514 v.reset(OpCopy)
32515 v.AddArg(v0)
32516 v0.AuxInt = 8
32517 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
32518 v1.AuxInt = i0
32519 v1.Aux = s
32520 v1.AddArg(p)
32521 v1.AddArg(idx)
32522 v1.AddArg(mem)
32523 v0.AddArg(v1)
32524 return true
32525 }
32526
32527
32528
32529 for {
32530 _ = v.Args[1]
32531 sh := v.Args[0]
32532 if sh.Op != OpAMD64SHLLconst {
32533 break
32534 }
32535 if sh.AuxInt != 8 {
32536 break
32537 }
32538 x0 := sh.Args[0]
32539 if x0.Op != OpAMD64MOVBloadidx1 {
32540 break
32541 }
32542 i0 := x0.AuxInt
32543 s := x0.Aux
32544 mem := x0.Args[2]
32545 idx := x0.Args[0]
32546 p := x0.Args[1]
32547 x1 := v.Args[1]
32548 if x1.Op != OpAMD64MOVBloadidx1 {
32549 break
32550 }
32551 i1 := x1.AuxInt
32552 if x1.Aux != s {
32553 break
32554 }
32555 _ = x1.Args[2]
32556 if idx != x1.Args[0] {
32557 break
32558 }
32559 if p != x1.Args[1] {
32560 break
32561 }
32562 if mem != x1.Args[2] {
32563 break
32564 }
32565 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
32566 break
32567 }
32568 b = mergePoint(b, x0, x1)
32569 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
32570 v.reset(OpCopy)
32571 v.AddArg(v0)
32572 v0.AuxInt = 8
32573 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
32574 v1.AuxInt = i0
32575 v1.Aux = s
32576 v1.AddArg(p)
32577 v1.AddArg(idx)
32578 v1.AddArg(mem)
32579 v0.AddArg(v1)
32580 return true
32581 }
32582
32583
32584
32585 for {
32586 _ = v.Args[1]
32587 r1 := v.Args[0]
32588 if r1.Op != OpAMD64ROLWconst {
32589 break
32590 }
32591 if r1.AuxInt != 8 {
32592 break
32593 }
32594 x1 := r1.Args[0]
32595 if x1.Op != OpAMD64MOVWloadidx1 {
32596 break
32597 }
32598 i1 := x1.AuxInt
32599 s := x1.Aux
32600 mem := x1.Args[2]
32601 p := x1.Args[0]
32602 idx := x1.Args[1]
32603 sh := v.Args[1]
32604 if sh.Op != OpAMD64SHLLconst {
32605 break
32606 }
32607 if sh.AuxInt != 16 {
32608 break
32609 }
32610 r0 := sh.Args[0]
32611 if r0.Op != OpAMD64ROLWconst {
32612 break
32613 }
32614 if r0.AuxInt != 8 {
32615 break
32616 }
32617 x0 := r0.Args[0]
32618 if x0.Op != OpAMD64MOVWloadidx1 {
32619 break
32620 }
32621 i0 := x0.AuxInt
32622 if x0.Aux != s {
32623 break
32624 }
32625 _ = x0.Args[2]
32626 if p != x0.Args[0] {
32627 break
32628 }
32629 if idx != x0.Args[1] {
32630 break
32631 }
32632 if mem != x0.Args[2] {
32633 break
32634 }
32635 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
32636 break
32637 }
32638 b = mergePoint(b, x0, x1)
32639 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
32640 v.reset(OpCopy)
32641 v.AddArg(v0)
32642 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
32643 v1.AuxInt = i0
32644 v1.Aux = s
32645 v1.AddArg(p)
32646 v1.AddArg(idx)
32647 v1.AddArg(mem)
32648 v0.AddArg(v1)
32649 return true
32650 }
32651 return false
32652 }
32653 func rewriteValueAMD64_OpAMD64ORL_110(v *Value) bool {
32654 b := v.Block
32655 typ := &b.Func.Config.Types
32656
32657
32658
32659 for {
32660 _ = v.Args[1]
32661 r1 := v.Args[0]
32662 if r1.Op != OpAMD64ROLWconst {
32663 break
32664 }
32665 if r1.AuxInt != 8 {
32666 break
32667 }
32668 x1 := r1.Args[0]
32669 if x1.Op != OpAMD64MOVWloadidx1 {
32670 break
32671 }
32672 i1 := x1.AuxInt
32673 s := x1.Aux
32674 mem := x1.Args[2]
32675 idx := x1.Args[0]
32676 p := x1.Args[1]
32677 sh := v.Args[1]
32678 if sh.Op != OpAMD64SHLLconst {
32679 break
32680 }
32681 if sh.AuxInt != 16 {
32682 break
32683 }
32684 r0 := sh.Args[0]
32685 if r0.Op != OpAMD64ROLWconst {
32686 break
32687 }
32688 if r0.AuxInt != 8 {
32689 break
32690 }
32691 x0 := r0.Args[0]
32692 if x0.Op != OpAMD64MOVWloadidx1 {
32693 break
32694 }
32695 i0 := x0.AuxInt
32696 if x0.Aux != s {
32697 break
32698 }
32699 _ = x0.Args[2]
32700 if p != x0.Args[0] {
32701 break
32702 }
32703 if idx != x0.Args[1] {
32704 break
32705 }
32706 if mem != x0.Args[2] {
32707 break
32708 }
32709 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
32710 break
32711 }
32712 b = mergePoint(b, x0, x1)
32713 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
32714 v.reset(OpCopy)
32715 v.AddArg(v0)
32716 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
32717 v1.AuxInt = i0
32718 v1.Aux = s
32719 v1.AddArg(p)
32720 v1.AddArg(idx)
32721 v1.AddArg(mem)
32722 v0.AddArg(v1)
32723 return true
32724 }
32725
32726
32727
32728 for {
32729 _ = v.Args[1]
32730 r1 := v.Args[0]
32731 if r1.Op != OpAMD64ROLWconst {
32732 break
32733 }
32734 if r1.AuxInt != 8 {
32735 break
32736 }
32737 x1 := r1.Args[0]
32738 if x1.Op != OpAMD64MOVWloadidx1 {
32739 break
32740 }
32741 i1 := x1.AuxInt
32742 s := x1.Aux
32743 mem := x1.Args[2]
32744 p := x1.Args[0]
32745 idx := x1.Args[1]
32746 sh := v.Args[1]
32747 if sh.Op != OpAMD64SHLLconst {
32748 break
32749 }
32750 if sh.AuxInt != 16 {
32751 break
32752 }
32753 r0 := sh.Args[0]
32754 if r0.Op != OpAMD64ROLWconst {
32755 break
32756 }
32757 if r0.AuxInt != 8 {
32758 break
32759 }
32760 x0 := r0.Args[0]
32761 if x0.Op != OpAMD64MOVWloadidx1 {
32762 break
32763 }
32764 i0 := x0.AuxInt
32765 if x0.Aux != s {
32766 break
32767 }
32768 _ = x0.Args[2]
32769 if idx != x0.Args[0] {
32770 break
32771 }
32772 if p != x0.Args[1] {
32773 break
32774 }
32775 if mem != x0.Args[2] {
32776 break
32777 }
32778 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
32779 break
32780 }
32781 b = mergePoint(b, x0, x1)
32782 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
32783 v.reset(OpCopy)
32784 v.AddArg(v0)
32785 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
32786 v1.AuxInt = i0
32787 v1.Aux = s
32788 v1.AddArg(p)
32789 v1.AddArg(idx)
32790 v1.AddArg(mem)
32791 v0.AddArg(v1)
32792 return true
32793 }
32794
32795
32796
32797 for {
32798 _ = v.Args[1]
32799 r1 := v.Args[0]
32800 if r1.Op != OpAMD64ROLWconst {
32801 break
32802 }
32803 if r1.AuxInt != 8 {
32804 break
32805 }
32806 x1 := r1.Args[0]
32807 if x1.Op != OpAMD64MOVWloadidx1 {
32808 break
32809 }
32810 i1 := x1.AuxInt
32811 s := x1.Aux
32812 mem := x1.Args[2]
32813 idx := x1.Args[0]
32814 p := x1.Args[1]
32815 sh := v.Args[1]
32816 if sh.Op != OpAMD64SHLLconst {
32817 break
32818 }
32819 if sh.AuxInt != 16 {
32820 break
32821 }
32822 r0 := sh.Args[0]
32823 if r0.Op != OpAMD64ROLWconst {
32824 break
32825 }
32826 if r0.AuxInt != 8 {
32827 break
32828 }
32829 x0 := r0.Args[0]
32830 if x0.Op != OpAMD64MOVWloadidx1 {
32831 break
32832 }
32833 i0 := x0.AuxInt
32834 if x0.Aux != s {
32835 break
32836 }
32837 _ = x0.Args[2]
32838 if idx != x0.Args[0] {
32839 break
32840 }
32841 if p != x0.Args[1] {
32842 break
32843 }
32844 if mem != x0.Args[2] {
32845 break
32846 }
32847 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
32848 break
32849 }
32850 b = mergePoint(b, x0, x1)
32851 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
32852 v.reset(OpCopy)
32853 v.AddArg(v0)
32854 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
32855 v1.AuxInt = i0
32856 v1.Aux = s
32857 v1.AddArg(p)
32858 v1.AddArg(idx)
32859 v1.AddArg(mem)
32860 v0.AddArg(v1)
32861 return true
32862 }
32863
32864
32865
32866 for {
32867 _ = v.Args[1]
32868 sh := v.Args[0]
32869 if sh.Op != OpAMD64SHLLconst {
32870 break
32871 }
32872 if sh.AuxInt != 16 {
32873 break
32874 }
32875 r0 := sh.Args[0]
32876 if r0.Op != OpAMD64ROLWconst {
32877 break
32878 }
32879 if r0.AuxInt != 8 {
32880 break
32881 }
32882 x0 := r0.Args[0]
32883 if x0.Op != OpAMD64MOVWloadidx1 {
32884 break
32885 }
32886 i0 := x0.AuxInt
32887 s := x0.Aux
32888 mem := x0.Args[2]
32889 p := x0.Args[0]
32890 idx := x0.Args[1]
32891 r1 := v.Args[1]
32892 if r1.Op != OpAMD64ROLWconst {
32893 break
32894 }
32895 if r1.AuxInt != 8 {
32896 break
32897 }
32898 x1 := r1.Args[0]
32899 if x1.Op != OpAMD64MOVWloadidx1 {
32900 break
32901 }
32902 i1 := x1.AuxInt
32903 if x1.Aux != s {
32904 break
32905 }
32906 _ = x1.Args[2]
32907 if p != x1.Args[0] {
32908 break
32909 }
32910 if idx != x1.Args[1] {
32911 break
32912 }
32913 if mem != x1.Args[2] {
32914 break
32915 }
32916 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
32917 break
32918 }
32919 b = mergePoint(b, x0, x1)
32920 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
32921 v.reset(OpCopy)
32922 v.AddArg(v0)
32923 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
32924 v1.AuxInt = i0
32925 v1.Aux = s
32926 v1.AddArg(p)
32927 v1.AddArg(idx)
32928 v1.AddArg(mem)
32929 v0.AddArg(v1)
32930 return true
32931 }
32932
32933
32934
32935 for {
32936 _ = v.Args[1]
32937 sh := v.Args[0]
32938 if sh.Op != OpAMD64SHLLconst {
32939 break
32940 }
32941 if sh.AuxInt != 16 {
32942 break
32943 }
32944 r0 := sh.Args[0]
32945 if r0.Op != OpAMD64ROLWconst {
32946 break
32947 }
32948 if r0.AuxInt != 8 {
32949 break
32950 }
32951 x0 := r0.Args[0]
32952 if x0.Op != OpAMD64MOVWloadidx1 {
32953 break
32954 }
32955 i0 := x0.AuxInt
32956 s := x0.Aux
32957 mem := x0.Args[2]
32958 idx := x0.Args[0]
32959 p := x0.Args[1]
32960 r1 := v.Args[1]
32961 if r1.Op != OpAMD64ROLWconst {
32962 break
32963 }
32964 if r1.AuxInt != 8 {
32965 break
32966 }
32967 x1 := r1.Args[0]
32968 if x1.Op != OpAMD64MOVWloadidx1 {
32969 break
32970 }
32971 i1 := x1.AuxInt
32972 if x1.Aux != s {
32973 break
32974 }
32975 _ = x1.Args[2]
32976 if p != x1.Args[0] {
32977 break
32978 }
32979 if idx != x1.Args[1] {
32980 break
32981 }
32982 if mem != x1.Args[2] {
32983 break
32984 }
32985 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
32986 break
32987 }
32988 b = mergePoint(b, x0, x1)
32989 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
32990 v.reset(OpCopy)
32991 v.AddArg(v0)
32992 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
32993 v1.AuxInt = i0
32994 v1.Aux = s
32995 v1.AddArg(p)
32996 v1.AddArg(idx)
32997 v1.AddArg(mem)
32998 v0.AddArg(v1)
32999 return true
33000 }
33001
33002
33003
33004 for {
33005 _ = v.Args[1]
33006 sh := v.Args[0]
33007 if sh.Op != OpAMD64SHLLconst {
33008 break
33009 }
33010 if sh.AuxInt != 16 {
33011 break
33012 }
33013 r0 := sh.Args[0]
33014 if r0.Op != OpAMD64ROLWconst {
33015 break
33016 }
33017 if r0.AuxInt != 8 {
33018 break
33019 }
33020 x0 := r0.Args[0]
33021 if x0.Op != OpAMD64MOVWloadidx1 {
33022 break
33023 }
33024 i0 := x0.AuxInt
33025 s := x0.Aux
33026 mem := x0.Args[2]
33027 p := x0.Args[0]
33028 idx := x0.Args[1]
33029 r1 := v.Args[1]
33030 if r1.Op != OpAMD64ROLWconst {
33031 break
33032 }
33033 if r1.AuxInt != 8 {
33034 break
33035 }
33036 x1 := r1.Args[0]
33037 if x1.Op != OpAMD64MOVWloadidx1 {
33038 break
33039 }
33040 i1 := x1.AuxInt
33041 if x1.Aux != s {
33042 break
33043 }
33044 _ = x1.Args[2]
33045 if idx != x1.Args[0] {
33046 break
33047 }
33048 if p != x1.Args[1] {
33049 break
33050 }
33051 if mem != x1.Args[2] {
33052 break
33053 }
33054 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
33055 break
33056 }
33057 b = mergePoint(b, x0, x1)
33058 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
33059 v.reset(OpCopy)
33060 v.AddArg(v0)
33061 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
33062 v1.AuxInt = i0
33063 v1.Aux = s
33064 v1.AddArg(p)
33065 v1.AddArg(idx)
33066 v1.AddArg(mem)
33067 v0.AddArg(v1)
33068 return true
33069 }
33070
33071
33072
33073 for {
33074 _ = v.Args[1]
33075 sh := v.Args[0]
33076 if sh.Op != OpAMD64SHLLconst {
33077 break
33078 }
33079 if sh.AuxInt != 16 {
33080 break
33081 }
33082 r0 := sh.Args[0]
33083 if r0.Op != OpAMD64ROLWconst {
33084 break
33085 }
33086 if r0.AuxInt != 8 {
33087 break
33088 }
33089 x0 := r0.Args[0]
33090 if x0.Op != OpAMD64MOVWloadidx1 {
33091 break
33092 }
33093 i0 := x0.AuxInt
33094 s := x0.Aux
33095 mem := x0.Args[2]
33096 idx := x0.Args[0]
33097 p := x0.Args[1]
33098 r1 := v.Args[1]
33099 if r1.Op != OpAMD64ROLWconst {
33100 break
33101 }
33102 if r1.AuxInt != 8 {
33103 break
33104 }
33105 x1 := r1.Args[0]
33106 if x1.Op != OpAMD64MOVWloadidx1 {
33107 break
33108 }
33109 i1 := x1.AuxInt
33110 if x1.Aux != s {
33111 break
33112 }
33113 _ = x1.Args[2]
33114 if idx != x1.Args[0] {
33115 break
33116 }
33117 if p != x1.Args[1] {
33118 break
33119 }
33120 if mem != x1.Args[2] {
33121 break
33122 }
33123 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
33124 break
33125 }
33126 b = mergePoint(b, x0, x1)
33127 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
33128 v.reset(OpCopy)
33129 v.AddArg(v0)
33130 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
33131 v1.AuxInt = i0
33132 v1.Aux = s
33133 v1.AddArg(p)
33134 v1.AddArg(idx)
33135 v1.AddArg(mem)
33136 v0.AddArg(v1)
33137 return true
33138 }
33139
33140
33141
33142 for {
33143 _ = v.Args[1]
33144 s0 := v.Args[0]
33145 if s0.Op != OpAMD64SHLLconst {
33146 break
33147 }
33148 j0 := s0.AuxInt
33149 x0 := s0.Args[0]
33150 if x0.Op != OpAMD64MOVBloadidx1 {
33151 break
33152 }
33153 i0 := x0.AuxInt
33154 s := x0.Aux
33155 mem := x0.Args[2]
33156 p := x0.Args[0]
33157 idx := x0.Args[1]
33158 or := v.Args[1]
33159 if or.Op != OpAMD64ORL {
33160 break
33161 }
33162 y := or.Args[1]
33163 s1 := or.Args[0]
33164 if s1.Op != OpAMD64SHLLconst {
33165 break
33166 }
33167 j1 := s1.AuxInt
33168 x1 := s1.Args[0]
33169 if x1.Op != OpAMD64MOVBloadidx1 {
33170 break
33171 }
33172 i1 := x1.AuxInt
33173 if x1.Aux != s {
33174 break
33175 }
33176 _ = x1.Args[2]
33177 if p != x1.Args[0] {
33178 break
33179 }
33180 if idx != x1.Args[1] {
33181 break
33182 }
33183 if mem != x1.Args[2] {
33184 break
33185 }
33186 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
33187 break
33188 }
33189 b = mergePoint(b, x0, x1, y)
33190 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
33191 v.reset(OpCopy)
33192 v.AddArg(v0)
33193 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
33194 v1.AuxInt = j1
33195 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
33196 v2.AuxInt = 8
33197 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
33198 v3.AuxInt = i0
33199 v3.Aux = s
33200 v3.AddArg(p)
33201 v3.AddArg(idx)
33202 v3.AddArg(mem)
33203 v2.AddArg(v3)
33204 v1.AddArg(v2)
33205 v0.AddArg(v1)
33206 v0.AddArg(y)
33207 return true
33208 }
33209
33210
33211
33212 for {
33213 _ = v.Args[1]
33214 s0 := v.Args[0]
33215 if s0.Op != OpAMD64SHLLconst {
33216 break
33217 }
33218 j0 := s0.AuxInt
33219 x0 := s0.Args[0]
33220 if x0.Op != OpAMD64MOVBloadidx1 {
33221 break
33222 }
33223 i0 := x0.AuxInt
33224 s := x0.Aux
33225 mem := x0.Args[2]
33226 idx := x0.Args[0]
33227 p := x0.Args[1]
33228 or := v.Args[1]
33229 if or.Op != OpAMD64ORL {
33230 break
33231 }
33232 y := or.Args[1]
33233 s1 := or.Args[0]
33234 if s1.Op != OpAMD64SHLLconst {
33235 break
33236 }
33237 j1 := s1.AuxInt
33238 x1 := s1.Args[0]
33239 if x1.Op != OpAMD64MOVBloadidx1 {
33240 break
33241 }
33242 i1 := x1.AuxInt
33243 if x1.Aux != s {
33244 break
33245 }
33246 _ = x1.Args[2]
33247 if p != x1.Args[0] {
33248 break
33249 }
33250 if idx != x1.Args[1] {
33251 break
33252 }
33253 if mem != x1.Args[2] {
33254 break
33255 }
33256 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
33257 break
33258 }
33259 b = mergePoint(b, x0, x1, y)
33260 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
33261 v.reset(OpCopy)
33262 v.AddArg(v0)
33263 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
33264 v1.AuxInt = j1
33265 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
33266 v2.AuxInt = 8
33267 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
33268 v3.AuxInt = i0
33269 v3.Aux = s
33270 v3.AddArg(p)
33271 v3.AddArg(idx)
33272 v3.AddArg(mem)
33273 v2.AddArg(v3)
33274 v1.AddArg(v2)
33275 v0.AddArg(v1)
33276 v0.AddArg(y)
33277 return true
33278 }
33279
33280
33281
33282 for {
33283 _ = v.Args[1]
33284 s0 := v.Args[0]
33285 if s0.Op != OpAMD64SHLLconst {
33286 break
33287 }
33288 j0 := s0.AuxInt
33289 x0 := s0.Args[0]
33290 if x0.Op != OpAMD64MOVBloadidx1 {
33291 break
33292 }
33293 i0 := x0.AuxInt
33294 s := x0.Aux
33295 mem := x0.Args[2]
33296 p := x0.Args[0]
33297 idx := x0.Args[1]
33298 or := v.Args[1]
33299 if or.Op != OpAMD64ORL {
33300 break
33301 }
33302 y := or.Args[1]
33303 s1 := or.Args[0]
33304 if s1.Op != OpAMD64SHLLconst {
33305 break
33306 }
33307 j1 := s1.AuxInt
33308 x1 := s1.Args[0]
33309 if x1.Op != OpAMD64MOVBloadidx1 {
33310 break
33311 }
33312 i1 := x1.AuxInt
33313 if x1.Aux != s {
33314 break
33315 }
33316 _ = x1.Args[2]
33317 if idx != x1.Args[0] {
33318 break
33319 }
33320 if p != x1.Args[1] {
33321 break
33322 }
33323 if mem != x1.Args[2] {
33324 break
33325 }
33326 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
33327 break
33328 }
33329 b = mergePoint(b, x0, x1, y)
33330 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
33331 v.reset(OpCopy)
33332 v.AddArg(v0)
33333 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
33334 v1.AuxInt = j1
33335 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
33336 v2.AuxInt = 8
33337 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
33338 v3.AuxInt = i0
33339 v3.Aux = s
33340 v3.AddArg(p)
33341 v3.AddArg(idx)
33342 v3.AddArg(mem)
33343 v2.AddArg(v3)
33344 v1.AddArg(v2)
33345 v0.AddArg(v1)
33346 v0.AddArg(y)
33347 return true
33348 }
33349 return false
33350 }
33351 func rewriteValueAMD64_OpAMD64ORL_120(v *Value) bool {
33352 b := v.Block
33353 typ := &b.Func.Config.Types
33354
33355
33356
33357 for {
33358 _ = v.Args[1]
33359 s0 := v.Args[0]
33360 if s0.Op != OpAMD64SHLLconst {
33361 break
33362 }
33363 j0 := s0.AuxInt
33364 x0 := s0.Args[0]
33365 if x0.Op != OpAMD64MOVBloadidx1 {
33366 break
33367 }
33368 i0 := x0.AuxInt
33369 s := x0.Aux
33370 mem := x0.Args[2]
33371 idx := x0.Args[0]
33372 p := x0.Args[1]
33373 or := v.Args[1]
33374 if or.Op != OpAMD64ORL {
33375 break
33376 }
33377 y := or.Args[1]
33378 s1 := or.Args[0]
33379 if s1.Op != OpAMD64SHLLconst {
33380 break
33381 }
33382 j1 := s1.AuxInt
33383 x1 := s1.Args[0]
33384 if x1.Op != OpAMD64MOVBloadidx1 {
33385 break
33386 }
33387 i1 := x1.AuxInt
33388 if x1.Aux != s {
33389 break
33390 }
33391 _ = x1.Args[2]
33392 if idx != x1.Args[0] {
33393 break
33394 }
33395 if p != x1.Args[1] {
33396 break
33397 }
33398 if mem != x1.Args[2] {
33399 break
33400 }
33401 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
33402 break
33403 }
33404 b = mergePoint(b, x0, x1, y)
33405 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
33406 v.reset(OpCopy)
33407 v.AddArg(v0)
33408 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
33409 v1.AuxInt = j1
33410 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
33411 v2.AuxInt = 8
33412 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
33413 v3.AuxInt = i0
33414 v3.Aux = s
33415 v3.AddArg(p)
33416 v3.AddArg(idx)
33417 v3.AddArg(mem)
33418 v2.AddArg(v3)
33419 v1.AddArg(v2)
33420 v0.AddArg(v1)
33421 v0.AddArg(y)
33422 return true
33423 }
33424
33425
33426
33427 for {
33428 _ = v.Args[1]
33429 s0 := v.Args[0]
33430 if s0.Op != OpAMD64SHLLconst {
33431 break
33432 }
33433 j0 := s0.AuxInt
33434 x0 := s0.Args[0]
33435 if x0.Op != OpAMD64MOVBloadidx1 {
33436 break
33437 }
33438 i0 := x0.AuxInt
33439 s := x0.Aux
33440 mem := x0.Args[2]
33441 p := x0.Args[0]
33442 idx := x0.Args[1]
33443 or := v.Args[1]
33444 if or.Op != OpAMD64ORL {
33445 break
33446 }
33447 _ = or.Args[1]
33448 y := or.Args[0]
33449 s1 := or.Args[1]
33450 if s1.Op != OpAMD64SHLLconst {
33451 break
33452 }
33453 j1 := s1.AuxInt
33454 x1 := s1.Args[0]
33455 if x1.Op != OpAMD64MOVBloadidx1 {
33456 break
33457 }
33458 i1 := x1.AuxInt
33459 if x1.Aux != s {
33460 break
33461 }
33462 _ = x1.Args[2]
33463 if p != x1.Args[0] {
33464 break
33465 }
33466 if idx != x1.Args[1] {
33467 break
33468 }
33469 if mem != x1.Args[2] {
33470 break
33471 }
33472 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
33473 break
33474 }
33475 b = mergePoint(b, x0, x1, y)
33476 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
33477 v.reset(OpCopy)
33478 v.AddArg(v0)
33479 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
33480 v1.AuxInt = j1
33481 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
33482 v2.AuxInt = 8
33483 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
33484 v3.AuxInt = i0
33485 v3.Aux = s
33486 v3.AddArg(p)
33487 v3.AddArg(idx)
33488 v3.AddArg(mem)
33489 v2.AddArg(v3)
33490 v1.AddArg(v2)
33491 v0.AddArg(v1)
33492 v0.AddArg(y)
33493 return true
33494 }
33495
33496
33497
33498 for {
33499 _ = v.Args[1]
33500 s0 := v.Args[0]
33501 if s0.Op != OpAMD64SHLLconst {
33502 break
33503 }
33504 j0 := s0.AuxInt
33505 x0 := s0.Args[0]
33506 if x0.Op != OpAMD64MOVBloadidx1 {
33507 break
33508 }
33509 i0 := x0.AuxInt
33510 s := x0.Aux
33511 mem := x0.Args[2]
33512 idx := x0.Args[0]
33513 p := x0.Args[1]
33514 or := v.Args[1]
33515 if or.Op != OpAMD64ORL {
33516 break
33517 }
33518 _ = or.Args[1]
33519 y := or.Args[0]
33520 s1 := or.Args[1]
33521 if s1.Op != OpAMD64SHLLconst {
33522 break
33523 }
33524 j1 := s1.AuxInt
33525 x1 := s1.Args[0]
33526 if x1.Op != OpAMD64MOVBloadidx1 {
33527 break
33528 }
33529 i1 := x1.AuxInt
33530 if x1.Aux != s {
33531 break
33532 }
33533 _ = x1.Args[2]
33534 if p != x1.Args[0] {
33535 break
33536 }
33537 if idx != x1.Args[1] {
33538 break
33539 }
33540 if mem != x1.Args[2] {
33541 break
33542 }
33543 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
33544 break
33545 }
33546 b = mergePoint(b, x0, x1, y)
33547 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
33548 v.reset(OpCopy)
33549 v.AddArg(v0)
33550 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
33551 v1.AuxInt = j1
33552 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
33553 v2.AuxInt = 8
33554 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
33555 v3.AuxInt = i0
33556 v3.Aux = s
33557 v3.AddArg(p)
33558 v3.AddArg(idx)
33559 v3.AddArg(mem)
33560 v2.AddArg(v3)
33561 v1.AddArg(v2)
33562 v0.AddArg(v1)
33563 v0.AddArg(y)
33564 return true
33565 }
33566
33567
33568
33569 for {
33570 _ = v.Args[1]
33571 s0 := v.Args[0]
33572 if s0.Op != OpAMD64SHLLconst {
33573 break
33574 }
33575 j0 := s0.AuxInt
33576 x0 := s0.Args[0]
33577 if x0.Op != OpAMD64MOVBloadidx1 {
33578 break
33579 }
33580 i0 := x0.AuxInt
33581 s := x0.Aux
33582 mem := x0.Args[2]
33583 p := x0.Args[0]
33584 idx := x0.Args[1]
33585 or := v.Args[1]
33586 if or.Op != OpAMD64ORL {
33587 break
33588 }
33589 _ = or.Args[1]
33590 y := or.Args[0]
33591 s1 := or.Args[1]
33592 if s1.Op != OpAMD64SHLLconst {
33593 break
33594 }
33595 j1 := s1.AuxInt
33596 x1 := s1.Args[0]
33597 if x1.Op != OpAMD64MOVBloadidx1 {
33598 break
33599 }
33600 i1 := x1.AuxInt
33601 if x1.Aux != s {
33602 break
33603 }
33604 _ = x1.Args[2]
33605 if idx != x1.Args[0] {
33606 break
33607 }
33608 if p != x1.Args[1] {
33609 break
33610 }
33611 if mem != x1.Args[2] {
33612 break
33613 }
33614 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
33615 break
33616 }
33617 b = mergePoint(b, x0, x1, y)
33618 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
33619 v.reset(OpCopy)
33620 v.AddArg(v0)
33621 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
33622 v1.AuxInt = j1
33623 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
33624 v2.AuxInt = 8
33625 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
33626 v3.AuxInt = i0
33627 v3.Aux = s
33628 v3.AddArg(p)
33629 v3.AddArg(idx)
33630 v3.AddArg(mem)
33631 v2.AddArg(v3)
33632 v1.AddArg(v2)
33633 v0.AddArg(v1)
33634 v0.AddArg(y)
33635 return true
33636 }
33637
33638
33639
33640 for {
33641 _ = v.Args[1]
33642 s0 := v.Args[0]
33643 if s0.Op != OpAMD64SHLLconst {
33644 break
33645 }
33646 j0 := s0.AuxInt
33647 x0 := s0.Args[0]
33648 if x0.Op != OpAMD64MOVBloadidx1 {
33649 break
33650 }
33651 i0 := x0.AuxInt
33652 s := x0.Aux
33653 mem := x0.Args[2]
33654 idx := x0.Args[0]
33655 p := x0.Args[1]
33656 or := v.Args[1]
33657 if or.Op != OpAMD64ORL {
33658 break
33659 }
33660 _ = or.Args[1]
33661 y := or.Args[0]
33662 s1 := or.Args[1]
33663 if s1.Op != OpAMD64SHLLconst {
33664 break
33665 }
33666 j1 := s1.AuxInt
33667 x1 := s1.Args[0]
33668 if x1.Op != OpAMD64MOVBloadidx1 {
33669 break
33670 }
33671 i1 := x1.AuxInt
33672 if x1.Aux != s {
33673 break
33674 }
33675 _ = x1.Args[2]
33676 if idx != x1.Args[0] {
33677 break
33678 }
33679 if p != x1.Args[1] {
33680 break
33681 }
33682 if mem != x1.Args[2] {
33683 break
33684 }
33685 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
33686 break
33687 }
33688 b = mergePoint(b, x0, x1, y)
33689 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
33690 v.reset(OpCopy)
33691 v.AddArg(v0)
33692 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
33693 v1.AuxInt = j1
33694 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
33695 v2.AuxInt = 8
33696 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
33697 v3.AuxInt = i0
33698 v3.Aux = s
33699 v3.AddArg(p)
33700 v3.AddArg(idx)
33701 v3.AddArg(mem)
33702 v2.AddArg(v3)
33703 v1.AddArg(v2)
33704 v0.AddArg(v1)
33705 v0.AddArg(y)
33706 return true
33707 }
33708
33709
33710
33711 for {
33712 _ = v.Args[1]
33713 or := v.Args[0]
33714 if or.Op != OpAMD64ORL {
33715 break
33716 }
33717 y := or.Args[1]
33718 s1 := or.Args[0]
33719 if s1.Op != OpAMD64SHLLconst {
33720 break
33721 }
33722 j1 := s1.AuxInt
33723 x1 := s1.Args[0]
33724 if x1.Op != OpAMD64MOVBloadidx1 {
33725 break
33726 }
33727 i1 := x1.AuxInt
33728 s := x1.Aux
33729 mem := x1.Args[2]
33730 p := x1.Args[0]
33731 idx := x1.Args[1]
33732 s0 := v.Args[1]
33733 if s0.Op != OpAMD64SHLLconst {
33734 break
33735 }
33736 j0 := s0.AuxInt
33737 x0 := s0.Args[0]
33738 if x0.Op != OpAMD64MOVBloadidx1 {
33739 break
33740 }
33741 i0 := x0.AuxInt
33742 if x0.Aux != s {
33743 break
33744 }
33745 _ = x0.Args[2]
33746 if p != x0.Args[0] {
33747 break
33748 }
33749 if idx != x0.Args[1] {
33750 break
33751 }
33752 if mem != x0.Args[2] {
33753 break
33754 }
33755 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
33756 break
33757 }
33758 b = mergePoint(b, x0, x1, y)
33759 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
33760 v.reset(OpCopy)
33761 v.AddArg(v0)
33762 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
33763 v1.AuxInt = j1
33764 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
33765 v2.AuxInt = 8
33766 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
33767 v3.AuxInt = i0
33768 v3.Aux = s
33769 v3.AddArg(p)
33770 v3.AddArg(idx)
33771 v3.AddArg(mem)
33772 v2.AddArg(v3)
33773 v1.AddArg(v2)
33774 v0.AddArg(v1)
33775 v0.AddArg(y)
33776 return true
33777 }
33778
33779
33780
33781 for {
33782 _ = v.Args[1]
33783 or := v.Args[0]
33784 if or.Op != OpAMD64ORL {
33785 break
33786 }
33787 y := or.Args[1]
33788 s1 := or.Args[0]
33789 if s1.Op != OpAMD64SHLLconst {
33790 break
33791 }
33792 j1 := s1.AuxInt
33793 x1 := s1.Args[0]
33794 if x1.Op != OpAMD64MOVBloadidx1 {
33795 break
33796 }
33797 i1 := x1.AuxInt
33798 s := x1.Aux
33799 mem := x1.Args[2]
33800 idx := x1.Args[0]
33801 p := x1.Args[1]
33802 s0 := v.Args[1]
33803 if s0.Op != OpAMD64SHLLconst {
33804 break
33805 }
33806 j0 := s0.AuxInt
33807 x0 := s0.Args[0]
33808 if x0.Op != OpAMD64MOVBloadidx1 {
33809 break
33810 }
33811 i0 := x0.AuxInt
33812 if x0.Aux != s {
33813 break
33814 }
33815 _ = x0.Args[2]
33816 if p != x0.Args[0] {
33817 break
33818 }
33819 if idx != x0.Args[1] {
33820 break
33821 }
33822 if mem != x0.Args[2] {
33823 break
33824 }
33825 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
33826 break
33827 }
33828 b = mergePoint(b, x0, x1, y)
33829 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
33830 v.reset(OpCopy)
33831 v.AddArg(v0)
33832 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
33833 v1.AuxInt = j1
33834 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
33835 v2.AuxInt = 8
33836 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
33837 v3.AuxInt = i0
33838 v3.Aux = s
33839 v3.AddArg(p)
33840 v3.AddArg(idx)
33841 v3.AddArg(mem)
33842 v2.AddArg(v3)
33843 v1.AddArg(v2)
33844 v0.AddArg(v1)
33845 v0.AddArg(y)
33846 return true
33847 }
33848
33849
33850
33851 for {
33852 _ = v.Args[1]
33853 or := v.Args[0]
33854 if or.Op != OpAMD64ORL {
33855 break
33856 }
33857 _ = or.Args[1]
33858 y := or.Args[0]
33859 s1 := or.Args[1]
33860 if s1.Op != OpAMD64SHLLconst {
33861 break
33862 }
33863 j1 := s1.AuxInt
33864 x1 := s1.Args[0]
33865 if x1.Op != OpAMD64MOVBloadidx1 {
33866 break
33867 }
33868 i1 := x1.AuxInt
33869 s := x1.Aux
33870 mem := x1.Args[2]
33871 p := x1.Args[0]
33872 idx := x1.Args[1]
33873 s0 := v.Args[1]
33874 if s0.Op != OpAMD64SHLLconst {
33875 break
33876 }
33877 j0 := s0.AuxInt
33878 x0 := s0.Args[0]
33879 if x0.Op != OpAMD64MOVBloadidx1 {
33880 break
33881 }
33882 i0 := x0.AuxInt
33883 if x0.Aux != s {
33884 break
33885 }
33886 _ = x0.Args[2]
33887 if p != x0.Args[0] {
33888 break
33889 }
33890 if idx != x0.Args[1] {
33891 break
33892 }
33893 if mem != x0.Args[2] {
33894 break
33895 }
33896 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
33897 break
33898 }
33899 b = mergePoint(b, x0, x1, y)
33900 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
33901 v.reset(OpCopy)
33902 v.AddArg(v0)
33903 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
33904 v1.AuxInt = j1
33905 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
33906 v2.AuxInt = 8
33907 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
33908 v3.AuxInt = i0
33909 v3.Aux = s
33910 v3.AddArg(p)
33911 v3.AddArg(idx)
33912 v3.AddArg(mem)
33913 v2.AddArg(v3)
33914 v1.AddArg(v2)
33915 v0.AddArg(v1)
33916 v0.AddArg(y)
33917 return true
33918 }
33919
33920
33921
33922 for {
33923 _ = v.Args[1]
33924 or := v.Args[0]
33925 if or.Op != OpAMD64ORL {
33926 break
33927 }
33928 _ = or.Args[1]
33929 y := or.Args[0]
33930 s1 := or.Args[1]
33931 if s1.Op != OpAMD64SHLLconst {
33932 break
33933 }
33934 j1 := s1.AuxInt
33935 x1 := s1.Args[0]
33936 if x1.Op != OpAMD64MOVBloadidx1 {
33937 break
33938 }
33939 i1 := x1.AuxInt
33940 s := x1.Aux
33941 mem := x1.Args[2]
33942 idx := x1.Args[0]
33943 p := x1.Args[1]
33944 s0 := v.Args[1]
33945 if s0.Op != OpAMD64SHLLconst {
33946 break
33947 }
33948 j0 := s0.AuxInt
33949 x0 := s0.Args[0]
33950 if x0.Op != OpAMD64MOVBloadidx1 {
33951 break
33952 }
33953 i0 := x0.AuxInt
33954 if x0.Aux != s {
33955 break
33956 }
33957 _ = x0.Args[2]
33958 if p != x0.Args[0] {
33959 break
33960 }
33961 if idx != x0.Args[1] {
33962 break
33963 }
33964 if mem != x0.Args[2] {
33965 break
33966 }
33967 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
33968 break
33969 }
33970 b = mergePoint(b, x0, x1, y)
33971 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
33972 v.reset(OpCopy)
33973 v.AddArg(v0)
33974 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
33975 v1.AuxInt = j1
33976 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
33977 v2.AuxInt = 8
33978 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
33979 v3.AuxInt = i0
33980 v3.Aux = s
33981 v3.AddArg(p)
33982 v3.AddArg(idx)
33983 v3.AddArg(mem)
33984 v2.AddArg(v3)
33985 v1.AddArg(v2)
33986 v0.AddArg(v1)
33987 v0.AddArg(y)
33988 return true
33989 }
33990
33991
33992
33993 for {
33994 _ = v.Args[1]
33995 or := v.Args[0]
33996 if or.Op != OpAMD64ORL {
33997 break
33998 }
33999 y := or.Args[1]
34000 s1 := or.Args[0]
34001 if s1.Op != OpAMD64SHLLconst {
34002 break
34003 }
34004 j1 := s1.AuxInt
34005 x1 := s1.Args[0]
34006 if x1.Op != OpAMD64MOVBloadidx1 {
34007 break
34008 }
34009 i1 := x1.AuxInt
34010 s := x1.Aux
34011 mem := x1.Args[2]
34012 p := x1.Args[0]
34013 idx := x1.Args[1]
34014 s0 := v.Args[1]
34015 if s0.Op != OpAMD64SHLLconst {
34016 break
34017 }
34018 j0 := s0.AuxInt
34019 x0 := s0.Args[0]
34020 if x0.Op != OpAMD64MOVBloadidx1 {
34021 break
34022 }
34023 i0 := x0.AuxInt
34024 if x0.Aux != s {
34025 break
34026 }
34027 _ = x0.Args[2]
34028 if idx != x0.Args[0] {
34029 break
34030 }
34031 if p != x0.Args[1] {
34032 break
34033 }
34034 if mem != x0.Args[2] {
34035 break
34036 }
34037 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
34038 break
34039 }
34040 b = mergePoint(b, x0, x1, y)
34041 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
34042 v.reset(OpCopy)
34043 v.AddArg(v0)
34044 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
34045 v1.AuxInt = j1
34046 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
34047 v2.AuxInt = 8
34048 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
34049 v3.AuxInt = i0
34050 v3.Aux = s
34051 v3.AddArg(p)
34052 v3.AddArg(idx)
34053 v3.AddArg(mem)
34054 v2.AddArg(v3)
34055 v1.AddArg(v2)
34056 v0.AddArg(v1)
34057 v0.AddArg(y)
34058 return true
34059 }
34060 return false
34061 }
34062 func rewriteValueAMD64_OpAMD64ORL_130(v *Value) bool {
34063 b := v.Block
34064 typ := &b.Func.Config.Types
34065
34066
34067
34068 for {
34069 _ = v.Args[1]
34070 or := v.Args[0]
34071 if or.Op != OpAMD64ORL {
34072 break
34073 }
34074 y := or.Args[1]
34075 s1 := or.Args[0]
34076 if s1.Op != OpAMD64SHLLconst {
34077 break
34078 }
34079 j1 := s1.AuxInt
34080 x1 := s1.Args[0]
34081 if x1.Op != OpAMD64MOVBloadidx1 {
34082 break
34083 }
34084 i1 := x1.AuxInt
34085 s := x1.Aux
34086 mem := x1.Args[2]
34087 idx := x1.Args[0]
34088 p := x1.Args[1]
34089 s0 := v.Args[1]
34090 if s0.Op != OpAMD64SHLLconst {
34091 break
34092 }
34093 j0 := s0.AuxInt
34094 x0 := s0.Args[0]
34095 if x0.Op != OpAMD64MOVBloadidx1 {
34096 break
34097 }
34098 i0 := x0.AuxInt
34099 if x0.Aux != s {
34100 break
34101 }
34102 _ = x0.Args[2]
34103 if idx != x0.Args[0] {
34104 break
34105 }
34106 if p != x0.Args[1] {
34107 break
34108 }
34109 if mem != x0.Args[2] {
34110 break
34111 }
34112 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
34113 break
34114 }
34115 b = mergePoint(b, x0, x1, y)
34116 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
34117 v.reset(OpCopy)
34118 v.AddArg(v0)
34119 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
34120 v1.AuxInt = j1
34121 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
34122 v2.AuxInt = 8
34123 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
34124 v3.AuxInt = i0
34125 v3.Aux = s
34126 v3.AddArg(p)
34127 v3.AddArg(idx)
34128 v3.AddArg(mem)
34129 v2.AddArg(v3)
34130 v1.AddArg(v2)
34131 v0.AddArg(v1)
34132 v0.AddArg(y)
34133 return true
34134 }
34135
34136
34137
34138 for {
34139 _ = v.Args[1]
34140 or := v.Args[0]
34141 if or.Op != OpAMD64ORL {
34142 break
34143 }
34144 _ = or.Args[1]
34145 y := or.Args[0]
34146 s1 := or.Args[1]
34147 if s1.Op != OpAMD64SHLLconst {
34148 break
34149 }
34150 j1 := s1.AuxInt
34151 x1 := s1.Args[0]
34152 if x1.Op != OpAMD64MOVBloadidx1 {
34153 break
34154 }
34155 i1 := x1.AuxInt
34156 s := x1.Aux
34157 mem := x1.Args[2]
34158 p := x1.Args[0]
34159 idx := x1.Args[1]
34160 s0 := v.Args[1]
34161 if s0.Op != OpAMD64SHLLconst {
34162 break
34163 }
34164 j0 := s0.AuxInt
34165 x0 := s0.Args[0]
34166 if x0.Op != OpAMD64MOVBloadidx1 {
34167 break
34168 }
34169 i0 := x0.AuxInt
34170 if x0.Aux != s {
34171 break
34172 }
34173 _ = x0.Args[2]
34174 if idx != x0.Args[0] {
34175 break
34176 }
34177 if p != x0.Args[1] {
34178 break
34179 }
34180 if mem != x0.Args[2] {
34181 break
34182 }
34183 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
34184 break
34185 }
34186 b = mergePoint(b, x0, x1, y)
34187 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
34188 v.reset(OpCopy)
34189 v.AddArg(v0)
34190 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
34191 v1.AuxInt = j1
34192 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
34193 v2.AuxInt = 8
34194 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
34195 v3.AuxInt = i0
34196 v3.Aux = s
34197 v3.AddArg(p)
34198 v3.AddArg(idx)
34199 v3.AddArg(mem)
34200 v2.AddArg(v3)
34201 v1.AddArg(v2)
34202 v0.AddArg(v1)
34203 v0.AddArg(y)
34204 return true
34205 }
34206
34207
34208
34209 for {
34210 _ = v.Args[1]
34211 or := v.Args[0]
34212 if or.Op != OpAMD64ORL {
34213 break
34214 }
34215 _ = or.Args[1]
34216 y := or.Args[0]
34217 s1 := or.Args[1]
34218 if s1.Op != OpAMD64SHLLconst {
34219 break
34220 }
34221 j1 := s1.AuxInt
34222 x1 := s1.Args[0]
34223 if x1.Op != OpAMD64MOVBloadidx1 {
34224 break
34225 }
34226 i1 := x1.AuxInt
34227 s := x1.Aux
34228 mem := x1.Args[2]
34229 idx := x1.Args[0]
34230 p := x1.Args[1]
34231 s0 := v.Args[1]
34232 if s0.Op != OpAMD64SHLLconst {
34233 break
34234 }
34235 j0 := s0.AuxInt
34236 x0 := s0.Args[0]
34237 if x0.Op != OpAMD64MOVBloadidx1 {
34238 break
34239 }
34240 i0 := x0.AuxInt
34241 if x0.Aux != s {
34242 break
34243 }
34244 _ = x0.Args[2]
34245 if idx != x0.Args[0] {
34246 break
34247 }
34248 if p != x0.Args[1] {
34249 break
34250 }
34251 if mem != x0.Args[2] {
34252 break
34253 }
34254 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
34255 break
34256 }
34257 b = mergePoint(b, x0, x1, y)
34258 v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type)
34259 v.reset(OpCopy)
34260 v.AddArg(v0)
34261 v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
34262 v1.AuxInt = j1
34263 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
34264 v2.AuxInt = 8
34265 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
34266 v3.AuxInt = i0
34267 v3.Aux = s
34268 v3.AddArg(p)
34269 v3.AddArg(idx)
34270 v3.AddArg(mem)
34271 v2.AddArg(v3)
34272 v1.AddArg(v2)
34273 v0.AddArg(v1)
34274 v0.AddArg(y)
34275 return true
34276 }
34277
34278
34279
34280 for {
34281 _ = v.Args[1]
34282 x := v.Args[0]
34283 l := v.Args[1]
34284 if l.Op != OpAMD64MOVLload {
34285 break
34286 }
34287 off := l.AuxInt
34288 sym := l.Aux
34289 mem := l.Args[1]
34290 ptr := l.Args[0]
34291 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
34292 break
34293 }
34294 v.reset(OpAMD64ORLload)
34295 v.AuxInt = off
34296 v.Aux = sym
34297 v.AddArg(x)
34298 v.AddArg(ptr)
34299 v.AddArg(mem)
34300 return true
34301 }
34302
34303
34304
34305 for {
34306 x := v.Args[1]
34307 l := v.Args[0]
34308 if l.Op != OpAMD64MOVLload {
34309 break
34310 }
34311 off := l.AuxInt
34312 sym := l.Aux
34313 mem := l.Args[1]
34314 ptr := l.Args[0]
34315 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
34316 break
34317 }
34318 v.reset(OpAMD64ORLload)
34319 v.AuxInt = off
34320 v.Aux = sym
34321 v.AddArg(x)
34322 v.AddArg(ptr)
34323 v.AddArg(mem)
34324 return true
34325 }
34326 return false
34327 }
34328 func rewriteValueAMD64_OpAMD64ORLconst_0(v *Value) bool {
34329 b := v.Block
34330 config := b.Func.Config
34331
34332
34333
34334 for {
34335 c := v.AuxInt
34336 x := v.Args[0]
34337 if !(isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) {
34338 break
34339 }
34340 v.reset(OpAMD64BTSLconst)
34341 v.AuxInt = log2uint32(c)
34342 v.AddArg(x)
34343 return true
34344 }
34345
34346
34347
34348 for {
34349 c := v.AuxInt
34350 v_0 := v.Args[0]
34351 if v_0.Op != OpAMD64ORLconst {
34352 break
34353 }
34354 d := v_0.AuxInt
34355 x := v_0.Args[0]
34356 v.reset(OpAMD64ORLconst)
34357 v.AuxInt = c | d
34358 v.AddArg(x)
34359 return true
34360 }
34361
34362
34363
34364 for {
34365 c := v.AuxInt
34366 v_0 := v.Args[0]
34367 if v_0.Op != OpAMD64BTSLconst {
34368 break
34369 }
34370 d := v_0.AuxInt
34371 x := v_0.Args[0]
34372 v.reset(OpAMD64ORLconst)
34373 v.AuxInt = c | 1<<uint32(d)
34374 v.AddArg(x)
34375 return true
34376 }
34377
34378
34379
34380 for {
34381 c := v.AuxInt
34382 x := v.Args[0]
34383 if !(int32(c) == 0) {
34384 break
34385 }
34386 v.reset(OpCopy)
34387 v.Type = x.Type
34388 v.AddArg(x)
34389 return true
34390 }
34391
34392
34393
34394 for {
34395 c := v.AuxInt
34396 if !(int32(c) == -1) {
34397 break
34398 }
34399 v.reset(OpAMD64MOVLconst)
34400 v.AuxInt = -1
34401 return true
34402 }
34403
34404
34405
34406 for {
34407 c := v.AuxInt
34408 v_0 := v.Args[0]
34409 if v_0.Op != OpAMD64MOVLconst {
34410 break
34411 }
34412 d := v_0.AuxInt
34413 v.reset(OpAMD64MOVLconst)
34414 v.AuxInt = c | d
34415 return true
34416 }
34417 return false
34418 }
34419 func rewriteValueAMD64_OpAMD64ORLconstmodify_0(v *Value) bool {
34420
34421
34422
34423 for {
34424 valoff1 := v.AuxInt
34425 sym := v.Aux
34426 mem := v.Args[1]
34427 v_0 := v.Args[0]
34428 if v_0.Op != OpAMD64ADDQconst {
34429 break
34430 }
34431 off2 := v_0.AuxInt
34432 base := v_0.Args[0]
34433 if !(ValAndOff(valoff1).canAdd(off2)) {
34434 break
34435 }
34436 v.reset(OpAMD64ORLconstmodify)
34437 v.AuxInt = ValAndOff(valoff1).add(off2)
34438 v.Aux = sym
34439 v.AddArg(base)
34440 v.AddArg(mem)
34441 return true
34442 }
34443
34444
34445
34446 for {
34447 valoff1 := v.AuxInt
34448 sym1 := v.Aux
34449 mem := v.Args[1]
34450 v_0 := v.Args[0]
34451 if v_0.Op != OpAMD64LEAQ {
34452 break
34453 }
34454 off2 := v_0.AuxInt
34455 sym2 := v_0.Aux
34456 base := v_0.Args[0]
34457 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
34458 break
34459 }
34460 v.reset(OpAMD64ORLconstmodify)
34461 v.AuxInt = ValAndOff(valoff1).add(off2)
34462 v.Aux = mergeSym(sym1, sym2)
34463 v.AddArg(base)
34464 v.AddArg(mem)
34465 return true
34466 }
34467 return false
34468 }
34469 func rewriteValueAMD64_OpAMD64ORLload_0(v *Value) bool {
34470 b := v.Block
34471 typ := &b.Func.Config.Types
34472
34473
34474
34475 for {
34476 off1 := v.AuxInt
34477 sym := v.Aux
34478 mem := v.Args[2]
34479 val := v.Args[0]
34480 v_1 := v.Args[1]
34481 if v_1.Op != OpAMD64ADDQconst {
34482 break
34483 }
34484 off2 := v_1.AuxInt
34485 base := v_1.Args[0]
34486 if !(is32Bit(off1 + off2)) {
34487 break
34488 }
34489 v.reset(OpAMD64ORLload)
34490 v.AuxInt = off1 + off2
34491 v.Aux = sym
34492 v.AddArg(val)
34493 v.AddArg(base)
34494 v.AddArg(mem)
34495 return true
34496 }
34497
34498
34499
34500 for {
34501 off1 := v.AuxInt
34502 sym1 := v.Aux
34503 mem := v.Args[2]
34504 val := v.Args[0]
34505 v_1 := v.Args[1]
34506 if v_1.Op != OpAMD64LEAQ {
34507 break
34508 }
34509 off2 := v_1.AuxInt
34510 sym2 := v_1.Aux
34511 base := v_1.Args[0]
34512 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
34513 break
34514 }
34515 v.reset(OpAMD64ORLload)
34516 v.AuxInt = off1 + off2
34517 v.Aux = mergeSym(sym1, sym2)
34518 v.AddArg(val)
34519 v.AddArg(base)
34520 v.AddArg(mem)
34521 return true
34522 }
34523
34524
34525
34526 for {
34527 off := v.AuxInt
34528 sym := v.Aux
34529 _ = v.Args[2]
34530 x := v.Args[0]
34531 ptr := v.Args[1]
34532 v_2 := v.Args[2]
34533 if v_2.Op != OpAMD64MOVSSstore {
34534 break
34535 }
34536 if v_2.AuxInt != off {
34537 break
34538 }
34539 if v_2.Aux != sym {
34540 break
34541 }
34542 _ = v_2.Args[2]
34543 if ptr != v_2.Args[0] {
34544 break
34545 }
34546 y := v_2.Args[1]
34547 v.reset(OpAMD64ORL)
34548 v.AddArg(x)
34549 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
34550 v0.AddArg(y)
34551 v.AddArg(v0)
34552 return true
34553 }
34554 return false
34555 }
34556 func rewriteValueAMD64_OpAMD64ORLmodify_0(v *Value) bool {
34557
34558
34559
34560 for {
34561 off1 := v.AuxInt
34562 sym := v.Aux
34563 mem := v.Args[2]
34564 v_0 := v.Args[0]
34565 if v_0.Op != OpAMD64ADDQconst {
34566 break
34567 }
34568 off2 := v_0.AuxInt
34569 base := v_0.Args[0]
34570 val := v.Args[1]
34571 if !(is32Bit(off1 + off2)) {
34572 break
34573 }
34574 v.reset(OpAMD64ORLmodify)
34575 v.AuxInt = off1 + off2
34576 v.Aux = sym
34577 v.AddArg(base)
34578 v.AddArg(val)
34579 v.AddArg(mem)
34580 return true
34581 }
34582
34583
34584
34585 for {
34586 off1 := v.AuxInt
34587 sym1 := v.Aux
34588 mem := v.Args[2]
34589 v_0 := v.Args[0]
34590 if v_0.Op != OpAMD64LEAQ {
34591 break
34592 }
34593 off2 := v_0.AuxInt
34594 sym2 := v_0.Aux
34595 base := v_0.Args[0]
34596 val := v.Args[1]
34597 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
34598 break
34599 }
34600 v.reset(OpAMD64ORLmodify)
34601 v.AuxInt = off1 + off2
34602 v.Aux = mergeSym(sym1, sym2)
34603 v.AddArg(base)
34604 v.AddArg(val)
34605 v.AddArg(mem)
34606 return true
34607 }
34608 return false
34609 }
34610 func rewriteValueAMD64_OpAMD64ORQ_0(v *Value) bool {
34611 b := v.Block
34612 config := b.Func.Config
34613
34614
34615
34616 for {
34617 x := v.Args[1]
34618 v_0 := v.Args[0]
34619 if v_0.Op != OpAMD64SHLQ {
34620 break
34621 }
34622 y := v_0.Args[1]
34623 v_0_0 := v_0.Args[0]
34624 if v_0_0.Op != OpAMD64MOVQconst {
34625 break
34626 }
34627 if v_0_0.AuxInt != 1 {
34628 break
34629 }
34630 if !(!config.nacl) {
34631 break
34632 }
34633 v.reset(OpAMD64BTSQ)
34634 v.AddArg(x)
34635 v.AddArg(y)
34636 return true
34637 }
34638
34639
34640
34641 for {
34642 _ = v.Args[1]
34643 x := v.Args[0]
34644 v_1 := v.Args[1]
34645 if v_1.Op != OpAMD64SHLQ {
34646 break
34647 }
34648 y := v_1.Args[1]
34649 v_1_0 := v_1.Args[0]
34650 if v_1_0.Op != OpAMD64MOVQconst {
34651 break
34652 }
34653 if v_1_0.AuxInt != 1 {
34654 break
34655 }
34656 if !(!config.nacl) {
34657 break
34658 }
34659 v.reset(OpAMD64BTSQ)
34660 v.AddArg(x)
34661 v.AddArg(y)
34662 return true
34663 }
34664
34665
34666
34667 for {
34668 x := v.Args[1]
34669 v_0 := v.Args[0]
34670 if v_0.Op != OpAMD64MOVQconst {
34671 break
34672 }
34673 c := v_0.AuxInt
34674 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) {
34675 break
34676 }
34677 v.reset(OpAMD64BTSQconst)
34678 v.AuxInt = log2(c)
34679 v.AddArg(x)
34680 return true
34681 }
34682
34683
34684
34685 for {
34686 _ = v.Args[1]
34687 x := v.Args[0]
34688 v_1 := v.Args[1]
34689 if v_1.Op != OpAMD64MOVQconst {
34690 break
34691 }
34692 c := v_1.AuxInt
34693 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) {
34694 break
34695 }
34696 v.reset(OpAMD64BTSQconst)
34697 v.AuxInt = log2(c)
34698 v.AddArg(x)
34699 return true
34700 }
34701
34702
34703
34704 for {
34705 _ = v.Args[1]
34706 x := v.Args[0]
34707 v_1 := v.Args[1]
34708 if v_1.Op != OpAMD64MOVQconst {
34709 break
34710 }
34711 c := v_1.AuxInt
34712 if !(is32Bit(c)) {
34713 break
34714 }
34715 v.reset(OpAMD64ORQconst)
34716 v.AuxInt = c
34717 v.AddArg(x)
34718 return true
34719 }
34720
34721
34722
34723 for {
34724 x := v.Args[1]
34725 v_0 := v.Args[0]
34726 if v_0.Op != OpAMD64MOVQconst {
34727 break
34728 }
34729 c := v_0.AuxInt
34730 if !(is32Bit(c)) {
34731 break
34732 }
34733 v.reset(OpAMD64ORQconst)
34734 v.AuxInt = c
34735 v.AddArg(x)
34736 return true
34737 }
34738
34739
34740
34741 for {
34742 _ = v.Args[1]
34743 v_0 := v.Args[0]
34744 if v_0.Op != OpAMD64SHLQconst {
34745 break
34746 }
34747 c := v_0.AuxInt
34748 x := v_0.Args[0]
34749 v_1 := v.Args[1]
34750 if v_1.Op != OpAMD64SHRQconst {
34751 break
34752 }
34753 d := v_1.AuxInt
34754 if x != v_1.Args[0] {
34755 break
34756 }
34757 if !(d == 64-c) {
34758 break
34759 }
34760 v.reset(OpAMD64ROLQconst)
34761 v.AuxInt = c
34762 v.AddArg(x)
34763 return true
34764 }
34765
34766
34767
34768 for {
34769 _ = v.Args[1]
34770 v_0 := v.Args[0]
34771 if v_0.Op != OpAMD64SHRQconst {
34772 break
34773 }
34774 d := v_0.AuxInt
34775 x := v_0.Args[0]
34776 v_1 := v.Args[1]
34777 if v_1.Op != OpAMD64SHLQconst {
34778 break
34779 }
34780 c := v_1.AuxInt
34781 if x != v_1.Args[0] {
34782 break
34783 }
34784 if !(d == 64-c) {
34785 break
34786 }
34787 v.reset(OpAMD64ROLQconst)
34788 v.AuxInt = c
34789 v.AddArg(x)
34790 return true
34791 }
34792
34793
34794
34795 for {
34796 _ = v.Args[1]
34797 v_0 := v.Args[0]
34798 if v_0.Op != OpAMD64SHLQ {
34799 break
34800 }
34801 y := v_0.Args[1]
34802 x := v_0.Args[0]
34803 v_1 := v.Args[1]
34804 if v_1.Op != OpAMD64ANDQ {
34805 break
34806 }
34807 _ = v_1.Args[1]
34808 v_1_0 := v_1.Args[0]
34809 if v_1_0.Op != OpAMD64SHRQ {
34810 break
34811 }
34812 _ = v_1_0.Args[1]
34813 if x != v_1_0.Args[0] {
34814 break
34815 }
34816 v_1_0_1 := v_1_0.Args[1]
34817 if v_1_0_1.Op != OpAMD64NEGQ {
34818 break
34819 }
34820 if y != v_1_0_1.Args[0] {
34821 break
34822 }
34823 v_1_1 := v_1.Args[1]
34824 if v_1_1.Op != OpAMD64SBBQcarrymask {
34825 break
34826 }
34827 v_1_1_0 := v_1_1.Args[0]
34828 if v_1_1_0.Op != OpAMD64CMPQconst {
34829 break
34830 }
34831 if v_1_1_0.AuxInt != 64 {
34832 break
34833 }
34834 v_1_1_0_0 := v_1_1_0.Args[0]
34835 if v_1_1_0_0.Op != OpAMD64NEGQ {
34836 break
34837 }
34838 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
34839 if v_1_1_0_0_0.Op != OpAMD64ADDQconst {
34840 break
34841 }
34842 if v_1_1_0_0_0.AuxInt != -64 {
34843 break
34844 }
34845 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
34846 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst {
34847 break
34848 }
34849 if v_1_1_0_0_0_0.AuxInt != 63 {
34850 break
34851 }
34852 if y != v_1_1_0_0_0_0.Args[0] {
34853 break
34854 }
34855 v.reset(OpAMD64ROLQ)
34856 v.AddArg(x)
34857 v.AddArg(y)
34858 return true
34859 }
34860
34861
34862
34863 for {
34864 _ = v.Args[1]
34865 v_0 := v.Args[0]
34866 if v_0.Op != OpAMD64SHLQ {
34867 break
34868 }
34869 y := v_0.Args[1]
34870 x := v_0.Args[0]
34871 v_1 := v.Args[1]
34872 if v_1.Op != OpAMD64ANDQ {
34873 break
34874 }
34875 _ = v_1.Args[1]
34876 v_1_0 := v_1.Args[0]
34877 if v_1_0.Op != OpAMD64SBBQcarrymask {
34878 break
34879 }
34880 v_1_0_0 := v_1_0.Args[0]
34881 if v_1_0_0.Op != OpAMD64CMPQconst {
34882 break
34883 }
34884 if v_1_0_0.AuxInt != 64 {
34885 break
34886 }
34887 v_1_0_0_0 := v_1_0_0.Args[0]
34888 if v_1_0_0_0.Op != OpAMD64NEGQ {
34889 break
34890 }
34891 v_1_0_0_0_0 := v_1_0_0_0.Args[0]
34892 if v_1_0_0_0_0.Op != OpAMD64ADDQconst {
34893 break
34894 }
34895 if v_1_0_0_0_0.AuxInt != -64 {
34896 break
34897 }
34898 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0]
34899 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst {
34900 break
34901 }
34902 if v_1_0_0_0_0_0.AuxInt != 63 {
34903 break
34904 }
34905 if y != v_1_0_0_0_0_0.Args[0] {
34906 break
34907 }
34908 v_1_1 := v_1.Args[1]
34909 if v_1_1.Op != OpAMD64SHRQ {
34910 break
34911 }
34912 _ = v_1_1.Args[1]
34913 if x != v_1_1.Args[0] {
34914 break
34915 }
34916 v_1_1_1 := v_1_1.Args[1]
34917 if v_1_1_1.Op != OpAMD64NEGQ {
34918 break
34919 }
34920 if y != v_1_1_1.Args[0] {
34921 break
34922 }
34923 v.reset(OpAMD64ROLQ)
34924 v.AddArg(x)
34925 v.AddArg(y)
34926 return true
34927 }
34928 return false
34929 }
34930 func rewriteValueAMD64_OpAMD64ORQ_10(v *Value) bool {
34931
34932
34933
34934 for {
34935 _ = v.Args[1]
34936 v_0 := v.Args[0]
34937 if v_0.Op != OpAMD64ANDQ {
34938 break
34939 }
34940 _ = v_0.Args[1]
34941 v_0_0 := v_0.Args[0]
34942 if v_0_0.Op != OpAMD64SHRQ {
34943 break
34944 }
34945 _ = v_0_0.Args[1]
34946 x := v_0_0.Args[0]
34947 v_0_0_1 := v_0_0.Args[1]
34948 if v_0_0_1.Op != OpAMD64NEGQ {
34949 break
34950 }
34951 y := v_0_0_1.Args[0]
34952 v_0_1 := v_0.Args[1]
34953 if v_0_1.Op != OpAMD64SBBQcarrymask {
34954 break
34955 }
34956 v_0_1_0 := v_0_1.Args[0]
34957 if v_0_1_0.Op != OpAMD64CMPQconst {
34958 break
34959 }
34960 if v_0_1_0.AuxInt != 64 {
34961 break
34962 }
34963 v_0_1_0_0 := v_0_1_0.Args[0]
34964 if v_0_1_0_0.Op != OpAMD64NEGQ {
34965 break
34966 }
34967 v_0_1_0_0_0 := v_0_1_0_0.Args[0]
34968 if v_0_1_0_0_0.Op != OpAMD64ADDQconst {
34969 break
34970 }
34971 if v_0_1_0_0_0.AuxInt != -64 {
34972 break
34973 }
34974 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0]
34975 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst {
34976 break
34977 }
34978 if v_0_1_0_0_0_0.AuxInt != 63 {
34979 break
34980 }
34981 if y != v_0_1_0_0_0_0.Args[0] {
34982 break
34983 }
34984 v_1 := v.Args[1]
34985 if v_1.Op != OpAMD64SHLQ {
34986 break
34987 }
34988 _ = v_1.Args[1]
34989 if x != v_1.Args[0] {
34990 break
34991 }
34992 if y != v_1.Args[1] {
34993 break
34994 }
34995 v.reset(OpAMD64ROLQ)
34996 v.AddArg(x)
34997 v.AddArg(y)
34998 return true
34999 }
35000
35001
35002
35003 for {
35004 _ = v.Args[1]
35005 v_0 := v.Args[0]
35006 if v_0.Op != OpAMD64ANDQ {
35007 break
35008 }
35009 _ = v_0.Args[1]
35010 v_0_0 := v_0.Args[0]
35011 if v_0_0.Op != OpAMD64SBBQcarrymask {
35012 break
35013 }
35014 v_0_0_0 := v_0_0.Args[0]
35015 if v_0_0_0.Op != OpAMD64CMPQconst {
35016 break
35017 }
35018 if v_0_0_0.AuxInt != 64 {
35019 break
35020 }
35021 v_0_0_0_0 := v_0_0_0.Args[0]
35022 if v_0_0_0_0.Op != OpAMD64NEGQ {
35023 break
35024 }
35025 v_0_0_0_0_0 := v_0_0_0_0.Args[0]
35026 if v_0_0_0_0_0.Op != OpAMD64ADDQconst {
35027 break
35028 }
35029 if v_0_0_0_0_0.AuxInt != -64 {
35030 break
35031 }
35032 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0]
35033 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst {
35034 break
35035 }
35036 if v_0_0_0_0_0_0.AuxInt != 63 {
35037 break
35038 }
35039 y := v_0_0_0_0_0_0.Args[0]
35040 v_0_1 := v_0.Args[1]
35041 if v_0_1.Op != OpAMD64SHRQ {
35042 break
35043 }
35044 _ = v_0_1.Args[1]
35045 x := v_0_1.Args[0]
35046 v_0_1_1 := v_0_1.Args[1]
35047 if v_0_1_1.Op != OpAMD64NEGQ {
35048 break
35049 }
35050 if y != v_0_1_1.Args[0] {
35051 break
35052 }
35053 v_1 := v.Args[1]
35054 if v_1.Op != OpAMD64SHLQ {
35055 break
35056 }
35057 _ = v_1.Args[1]
35058 if x != v_1.Args[0] {
35059 break
35060 }
35061 if y != v_1.Args[1] {
35062 break
35063 }
35064 v.reset(OpAMD64ROLQ)
35065 v.AddArg(x)
35066 v.AddArg(y)
35067 return true
35068 }
35069
35070
35071
35072 for {
35073 _ = v.Args[1]
35074 v_0 := v.Args[0]
35075 if v_0.Op != OpAMD64SHLQ {
35076 break
35077 }
35078 y := v_0.Args[1]
35079 x := v_0.Args[0]
35080 v_1 := v.Args[1]
35081 if v_1.Op != OpAMD64ANDQ {
35082 break
35083 }
35084 _ = v_1.Args[1]
35085 v_1_0 := v_1.Args[0]
35086 if v_1_0.Op != OpAMD64SHRQ {
35087 break
35088 }
35089 _ = v_1_0.Args[1]
35090 if x != v_1_0.Args[0] {
35091 break
35092 }
35093 v_1_0_1 := v_1_0.Args[1]
35094 if v_1_0_1.Op != OpAMD64NEGL {
35095 break
35096 }
35097 if y != v_1_0_1.Args[0] {
35098 break
35099 }
35100 v_1_1 := v_1.Args[1]
35101 if v_1_1.Op != OpAMD64SBBQcarrymask {
35102 break
35103 }
35104 v_1_1_0 := v_1_1.Args[0]
35105 if v_1_1_0.Op != OpAMD64CMPLconst {
35106 break
35107 }
35108 if v_1_1_0.AuxInt != 64 {
35109 break
35110 }
35111 v_1_1_0_0 := v_1_1_0.Args[0]
35112 if v_1_1_0_0.Op != OpAMD64NEGL {
35113 break
35114 }
35115 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
35116 if v_1_1_0_0_0.Op != OpAMD64ADDLconst {
35117 break
35118 }
35119 if v_1_1_0_0_0.AuxInt != -64 {
35120 break
35121 }
35122 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
35123 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst {
35124 break
35125 }
35126 if v_1_1_0_0_0_0.AuxInt != 63 {
35127 break
35128 }
35129 if y != v_1_1_0_0_0_0.Args[0] {
35130 break
35131 }
35132 v.reset(OpAMD64ROLQ)
35133 v.AddArg(x)
35134 v.AddArg(y)
35135 return true
35136 }
35137
35138
35139
35140 for {
35141 _ = v.Args[1]
35142 v_0 := v.Args[0]
35143 if v_0.Op != OpAMD64SHLQ {
35144 break
35145 }
35146 y := v_0.Args[1]
35147 x := v_0.Args[0]
35148 v_1 := v.Args[1]
35149 if v_1.Op != OpAMD64ANDQ {
35150 break
35151 }
35152 _ = v_1.Args[1]
35153 v_1_0 := v_1.Args[0]
35154 if v_1_0.Op != OpAMD64SBBQcarrymask {
35155 break
35156 }
35157 v_1_0_0 := v_1_0.Args[0]
35158 if v_1_0_0.Op != OpAMD64CMPLconst {
35159 break
35160 }
35161 if v_1_0_0.AuxInt != 64 {
35162 break
35163 }
35164 v_1_0_0_0 := v_1_0_0.Args[0]
35165 if v_1_0_0_0.Op != OpAMD64NEGL {
35166 break
35167 }
35168 v_1_0_0_0_0 := v_1_0_0_0.Args[0]
35169 if v_1_0_0_0_0.Op != OpAMD64ADDLconst {
35170 break
35171 }
35172 if v_1_0_0_0_0.AuxInt != -64 {
35173 break
35174 }
35175 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0]
35176 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst {
35177 break
35178 }
35179 if v_1_0_0_0_0_0.AuxInt != 63 {
35180 break
35181 }
35182 if y != v_1_0_0_0_0_0.Args[0] {
35183 break
35184 }
35185 v_1_1 := v_1.Args[1]
35186 if v_1_1.Op != OpAMD64SHRQ {
35187 break
35188 }
35189 _ = v_1_1.Args[1]
35190 if x != v_1_1.Args[0] {
35191 break
35192 }
35193 v_1_1_1 := v_1_1.Args[1]
35194 if v_1_1_1.Op != OpAMD64NEGL {
35195 break
35196 }
35197 if y != v_1_1_1.Args[0] {
35198 break
35199 }
35200 v.reset(OpAMD64ROLQ)
35201 v.AddArg(x)
35202 v.AddArg(y)
35203 return true
35204 }
35205
35206
35207
35208 for {
35209 _ = v.Args[1]
35210 v_0 := v.Args[0]
35211 if v_0.Op != OpAMD64ANDQ {
35212 break
35213 }
35214 _ = v_0.Args[1]
35215 v_0_0 := v_0.Args[0]
35216 if v_0_0.Op != OpAMD64SHRQ {
35217 break
35218 }
35219 _ = v_0_0.Args[1]
35220 x := v_0_0.Args[0]
35221 v_0_0_1 := v_0_0.Args[1]
35222 if v_0_0_1.Op != OpAMD64NEGL {
35223 break
35224 }
35225 y := v_0_0_1.Args[0]
35226 v_0_1 := v_0.Args[1]
35227 if v_0_1.Op != OpAMD64SBBQcarrymask {
35228 break
35229 }
35230 v_0_1_0 := v_0_1.Args[0]
35231 if v_0_1_0.Op != OpAMD64CMPLconst {
35232 break
35233 }
35234 if v_0_1_0.AuxInt != 64 {
35235 break
35236 }
35237 v_0_1_0_0 := v_0_1_0.Args[0]
35238 if v_0_1_0_0.Op != OpAMD64NEGL {
35239 break
35240 }
35241 v_0_1_0_0_0 := v_0_1_0_0.Args[0]
35242 if v_0_1_0_0_0.Op != OpAMD64ADDLconst {
35243 break
35244 }
35245 if v_0_1_0_0_0.AuxInt != -64 {
35246 break
35247 }
35248 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0]
35249 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst {
35250 break
35251 }
35252 if v_0_1_0_0_0_0.AuxInt != 63 {
35253 break
35254 }
35255 if y != v_0_1_0_0_0_0.Args[0] {
35256 break
35257 }
35258 v_1 := v.Args[1]
35259 if v_1.Op != OpAMD64SHLQ {
35260 break
35261 }
35262 _ = v_1.Args[1]
35263 if x != v_1.Args[0] {
35264 break
35265 }
35266 if y != v_1.Args[1] {
35267 break
35268 }
35269 v.reset(OpAMD64ROLQ)
35270 v.AddArg(x)
35271 v.AddArg(y)
35272 return true
35273 }
35274
35275
35276
35277 for {
35278 _ = v.Args[1]
35279 v_0 := v.Args[0]
35280 if v_0.Op != OpAMD64ANDQ {
35281 break
35282 }
35283 _ = v_0.Args[1]
35284 v_0_0 := v_0.Args[0]
35285 if v_0_0.Op != OpAMD64SBBQcarrymask {
35286 break
35287 }
35288 v_0_0_0 := v_0_0.Args[0]
35289 if v_0_0_0.Op != OpAMD64CMPLconst {
35290 break
35291 }
35292 if v_0_0_0.AuxInt != 64 {
35293 break
35294 }
35295 v_0_0_0_0 := v_0_0_0.Args[0]
35296 if v_0_0_0_0.Op != OpAMD64NEGL {
35297 break
35298 }
35299 v_0_0_0_0_0 := v_0_0_0_0.Args[0]
35300 if v_0_0_0_0_0.Op != OpAMD64ADDLconst {
35301 break
35302 }
35303 if v_0_0_0_0_0.AuxInt != -64 {
35304 break
35305 }
35306 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0]
35307 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst {
35308 break
35309 }
35310 if v_0_0_0_0_0_0.AuxInt != 63 {
35311 break
35312 }
35313 y := v_0_0_0_0_0_0.Args[0]
35314 v_0_1 := v_0.Args[1]
35315 if v_0_1.Op != OpAMD64SHRQ {
35316 break
35317 }
35318 _ = v_0_1.Args[1]
35319 x := v_0_1.Args[0]
35320 v_0_1_1 := v_0_1.Args[1]
35321 if v_0_1_1.Op != OpAMD64NEGL {
35322 break
35323 }
35324 if y != v_0_1_1.Args[0] {
35325 break
35326 }
35327 v_1 := v.Args[1]
35328 if v_1.Op != OpAMD64SHLQ {
35329 break
35330 }
35331 _ = v_1.Args[1]
35332 if x != v_1.Args[0] {
35333 break
35334 }
35335 if y != v_1.Args[1] {
35336 break
35337 }
35338 v.reset(OpAMD64ROLQ)
35339 v.AddArg(x)
35340 v.AddArg(y)
35341 return true
35342 }
35343
35344
35345
35346 for {
35347 _ = v.Args[1]
35348 v_0 := v.Args[0]
35349 if v_0.Op != OpAMD64SHRQ {
35350 break
35351 }
35352 y := v_0.Args[1]
35353 x := v_0.Args[0]
35354 v_1 := v.Args[1]
35355 if v_1.Op != OpAMD64ANDQ {
35356 break
35357 }
35358 _ = v_1.Args[1]
35359 v_1_0 := v_1.Args[0]
35360 if v_1_0.Op != OpAMD64SHLQ {
35361 break
35362 }
35363 _ = v_1_0.Args[1]
35364 if x != v_1_0.Args[0] {
35365 break
35366 }
35367 v_1_0_1 := v_1_0.Args[1]
35368 if v_1_0_1.Op != OpAMD64NEGQ {
35369 break
35370 }
35371 if y != v_1_0_1.Args[0] {
35372 break
35373 }
35374 v_1_1 := v_1.Args[1]
35375 if v_1_1.Op != OpAMD64SBBQcarrymask {
35376 break
35377 }
35378 v_1_1_0 := v_1_1.Args[0]
35379 if v_1_1_0.Op != OpAMD64CMPQconst {
35380 break
35381 }
35382 if v_1_1_0.AuxInt != 64 {
35383 break
35384 }
35385 v_1_1_0_0 := v_1_1_0.Args[0]
35386 if v_1_1_0_0.Op != OpAMD64NEGQ {
35387 break
35388 }
35389 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
35390 if v_1_1_0_0_0.Op != OpAMD64ADDQconst {
35391 break
35392 }
35393 if v_1_1_0_0_0.AuxInt != -64 {
35394 break
35395 }
35396 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
35397 if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst {
35398 break
35399 }
35400 if v_1_1_0_0_0_0.AuxInt != 63 {
35401 break
35402 }
35403 if y != v_1_1_0_0_0_0.Args[0] {
35404 break
35405 }
35406 v.reset(OpAMD64RORQ)
35407 v.AddArg(x)
35408 v.AddArg(y)
35409 return true
35410 }
35411
35412
35413
35414 for {
35415 _ = v.Args[1]
35416 v_0 := v.Args[0]
35417 if v_0.Op != OpAMD64SHRQ {
35418 break
35419 }
35420 y := v_0.Args[1]
35421 x := v_0.Args[0]
35422 v_1 := v.Args[1]
35423 if v_1.Op != OpAMD64ANDQ {
35424 break
35425 }
35426 _ = v_1.Args[1]
35427 v_1_0 := v_1.Args[0]
35428 if v_1_0.Op != OpAMD64SBBQcarrymask {
35429 break
35430 }
35431 v_1_0_0 := v_1_0.Args[0]
35432 if v_1_0_0.Op != OpAMD64CMPQconst {
35433 break
35434 }
35435 if v_1_0_0.AuxInt != 64 {
35436 break
35437 }
35438 v_1_0_0_0 := v_1_0_0.Args[0]
35439 if v_1_0_0_0.Op != OpAMD64NEGQ {
35440 break
35441 }
35442 v_1_0_0_0_0 := v_1_0_0_0.Args[0]
35443 if v_1_0_0_0_0.Op != OpAMD64ADDQconst {
35444 break
35445 }
35446 if v_1_0_0_0_0.AuxInt != -64 {
35447 break
35448 }
35449 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0]
35450 if v_1_0_0_0_0_0.Op != OpAMD64ANDQconst {
35451 break
35452 }
35453 if v_1_0_0_0_0_0.AuxInt != 63 {
35454 break
35455 }
35456 if y != v_1_0_0_0_0_0.Args[0] {
35457 break
35458 }
35459 v_1_1 := v_1.Args[1]
35460 if v_1_1.Op != OpAMD64SHLQ {
35461 break
35462 }
35463 _ = v_1_1.Args[1]
35464 if x != v_1_1.Args[0] {
35465 break
35466 }
35467 v_1_1_1 := v_1_1.Args[1]
35468 if v_1_1_1.Op != OpAMD64NEGQ {
35469 break
35470 }
35471 if y != v_1_1_1.Args[0] {
35472 break
35473 }
35474 v.reset(OpAMD64RORQ)
35475 v.AddArg(x)
35476 v.AddArg(y)
35477 return true
35478 }
35479
35480
35481
35482 for {
35483 _ = v.Args[1]
35484 v_0 := v.Args[0]
35485 if v_0.Op != OpAMD64ANDQ {
35486 break
35487 }
35488 _ = v_0.Args[1]
35489 v_0_0 := v_0.Args[0]
35490 if v_0_0.Op != OpAMD64SHLQ {
35491 break
35492 }
35493 _ = v_0_0.Args[1]
35494 x := v_0_0.Args[0]
35495 v_0_0_1 := v_0_0.Args[1]
35496 if v_0_0_1.Op != OpAMD64NEGQ {
35497 break
35498 }
35499 y := v_0_0_1.Args[0]
35500 v_0_1 := v_0.Args[1]
35501 if v_0_1.Op != OpAMD64SBBQcarrymask {
35502 break
35503 }
35504 v_0_1_0 := v_0_1.Args[0]
35505 if v_0_1_0.Op != OpAMD64CMPQconst {
35506 break
35507 }
35508 if v_0_1_0.AuxInt != 64 {
35509 break
35510 }
35511 v_0_1_0_0 := v_0_1_0.Args[0]
35512 if v_0_1_0_0.Op != OpAMD64NEGQ {
35513 break
35514 }
35515 v_0_1_0_0_0 := v_0_1_0_0.Args[0]
35516 if v_0_1_0_0_0.Op != OpAMD64ADDQconst {
35517 break
35518 }
35519 if v_0_1_0_0_0.AuxInt != -64 {
35520 break
35521 }
35522 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0]
35523 if v_0_1_0_0_0_0.Op != OpAMD64ANDQconst {
35524 break
35525 }
35526 if v_0_1_0_0_0_0.AuxInt != 63 {
35527 break
35528 }
35529 if y != v_0_1_0_0_0_0.Args[0] {
35530 break
35531 }
35532 v_1 := v.Args[1]
35533 if v_1.Op != OpAMD64SHRQ {
35534 break
35535 }
35536 _ = v_1.Args[1]
35537 if x != v_1.Args[0] {
35538 break
35539 }
35540 if y != v_1.Args[1] {
35541 break
35542 }
35543 v.reset(OpAMD64RORQ)
35544 v.AddArg(x)
35545 v.AddArg(y)
35546 return true
35547 }
35548
35549
35550
35551 for {
35552 _ = v.Args[1]
35553 v_0 := v.Args[0]
35554 if v_0.Op != OpAMD64ANDQ {
35555 break
35556 }
35557 _ = v_0.Args[1]
35558 v_0_0 := v_0.Args[0]
35559 if v_0_0.Op != OpAMD64SBBQcarrymask {
35560 break
35561 }
35562 v_0_0_0 := v_0_0.Args[0]
35563 if v_0_0_0.Op != OpAMD64CMPQconst {
35564 break
35565 }
35566 if v_0_0_0.AuxInt != 64 {
35567 break
35568 }
35569 v_0_0_0_0 := v_0_0_0.Args[0]
35570 if v_0_0_0_0.Op != OpAMD64NEGQ {
35571 break
35572 }
35573 v_0_0_0_0_0 := v_0_0_0_0.Args[0]
35574 if v_0_0_0_0_0.Op != OpAMD64ADDQconst {
35575 break
35576 }
35577 if v_0_0_0_0_0.AuxInt != -64 {
35578 break
35579 }
35580 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0]
35581 if v_0_0_0_0_0_0.Op != OpAMD64ANDQconst {
35582 break
35583 }
35584 if v_0_0_0_0_0_0.AuxInt != 63 {
35585 break
35586 }
35587 y := v_0_0_0_0_0_0.Args[0]
35588 v_0_1 := v_0.Args[1]
35589 if v_0_1.Op != OpAMD64SHLQ {
35590 break
35591 }
35592 _ = v_0_1.Args[1]
35593 x := v_0_1.Args[0]
35594 v_0_1_1 := v_0_1.Args[1]
35595 if v_0_1_1.Op != OpAMD64NEGQ {
35596 break
35597 }
35598 if y != v_0_1_1.Args[0] {
35599 break
35600 }
35601 v_1 := v.Args[1]
35602 if v_1.Op != OpAMD64SHRQ {
35603 break
35604 }
35605 _ = v_1.Args[1]
35606 if x != v_1.Args[0] {
35607 break
35608 }
35609 if y != v_1.Args[1] {
35610 break
35611 }
35612 v.reset(OpAMD64RORQ)
35613 v.AddArg(x)
35614 v.AddArg(y)
35615 return true
35616 }
35617 return false
35618 }
35619 func rewriteValueAMD64_OpAMD64ORQ_20(v *Value) bool {
35620 b := v.Block
35621 typ := &b.Func.Config.Types
35622
35623
35624
35625 for {
35626 _ = v.Args[1]
35627 v_0 := v.Args[0]
35628 if v_0.Op != OpAMD64SHRQ {
35629 break
35630 }
35631 y := v_0.Args[1]
35632 x := v_0.Args[0]
35633 v_1 := v.Args[1]
35634 if v_1.Op != OpAMD64ANDQ {
35635 break
35636 }
35637 _ = v_1.Args[1]
35638 v_1_0 := v_1.Args[0]
35639 if v_1_0.Op != OpAMD64SHLQ {
35640 break
35641 }
35642 _ = v_1_0.Args[1]
35643 if x != v_1_0.Args[0] {
35644 break
35645 }
35646 v_1_0_1 := v_1_0.Args[1]
35647 if v_1_0_1.Op != OpAMD64NEGL {
35648 break
35649 }
35650 if y != v_1_0_1.Args[0] {
35651 break
35652 }
35653 v_1_1 := v_1.Args[1]
35654 if v_1_1.Op != OpAMD64SBBQcarrymask {
35655 break
35656 }
35657 v_1_1_0 := v_1_1.Args[0]
35658 if v_1_1_0.Op != OpAMD64CMPLconst {
35659 break
35660 }
35661 if v_1_1_0.AuxInt != 64 {
35662 break
35663 }
35664 v_1_1_0_0 := v_1_1_0.Args[0]
35665 if v_1_1_0_0.Op != OpAMD64NEGL {
35666 break
35667 }
35668 v_1_1_0_0_0 := v_1_1_0_0.Args[0]
35669 if v_1_1_0_0_0.Op != OpAMD64ADDLconst {
35670 break
35671 }
35672 if v_1_1_0_0_0.AuxInt != -64 {
35673 break
35674 }
35675 v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
35676 if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst {
35677 break
35678 }
35679 if v_1_1_0_0_0_0.AuxInt != 63 {
35680 break
35681 }
35682 if y != v_1_1_0_0_0_0.Args[0] {
35683 break
35684 }
35685 v.reset(OpAMD64RORQ)
35686 v.AddArg(x)
35687 v.AddArg(y)
35688 return true
35689 }
35690
35691
35692
35693 for {
35694 _ = v.Args[1]
35695 v_0 := v.Args[0]
35696 if v_0.Op != OpAMD64SHRQ {
35697 break
35698 }
35699 y := v_0.Args[1]
35700 x := v_0.Args[0]
35701 v_1 := v.Args[1]
35702 if v_1.Op != OpAMD64ANDQ {
35703 break
35704 }
35705 _ = v_1.Args[1]
35706 v_1_0 := v_1.Args[0]
35707 if v_1_0.Op != OpAMD64SBBQcarrymask {
35708 break
35709 }
35710 v_1_0_0 := v_1_0.Args[0]
35711 if v_1_0_0.Op != OpAMD64CMPLconst {
35712 break
35713 }
35714 if v_1_0_0.AuxInt != 64 {
35715 break
35716 }
35717 v_1_0_0_0 := v_1_0_0.Args[0]
35718 if v_1_0_0_0.Op != OpAMD64NEGL {
35719 break
35720 }
35721 v_1_0_0_0_0 := v_1_0_0_0.Args[0]
35722 if v_1_0_0_0_0.Op != OpAMD64ADDLconst {
35723 break
35724 }
35725 if v_1_0_0_0_0.AuxInt != -64 {
35726 break
35727 }
35728 v_1_0_0_0_0_0 := v_1_0_0_0_0.Args[0]
35729 if v_1_0_0_0_0_0.Op != OpAMD64ANDLconst {
35730 break
35731 }
35732 if v_1_0_0_0_0_0.AuxInt != 63 {
35733 break
35734 }
35735 if y != v_1_0_0_0_0_0.Args[0] {
35736 break
35737 }
35738 v_1_1 := v_1.Args[1]
35739 if v_1_1.Op != OpAMD64SHLQ {
35740 break
35741 }
35742 _ = v_1_1.Args[1]
35743 if x != v_1_1.Args[0] {
35744 break
35745 }
35746 v_1_1_1 := v_1_1.Args[1]
35747 if v_1_1_1.Op != OpAMD64NEGL {
35748 break
35749 }
35750 if y != v_1_1_1.Args[0] {
35751 break
35752 }
35753 v.reset(OpAMD64RORQ)
35754 v.AddArg(x)
35755 v.AddArg(y)
35756 return true
35757 }
35758
35759
35760
35761 for {
35762 _ = v.Args[1]
35763 v_0 := v.Args[0]
35764 if v_0.Op != OpAMD64ANDQ {
35765 break
35766 }
35767 _ = v_0.Args[1]
35768 v_0_0 := v_0.Args[0]
35769 if v_0_0.Op != OpAMD64SHLQ {
35770 break
35771 }
35772 _ = v_0_0.Args[1]
35773 x := v_0_0.Args[0]
35774 v_0_0_1 := v_0_0.Args[1]
35775 if v_0_0_1.Op != OpAMD64NEGL {
35776 break
35777 }
35778 y := v_0_0_1.Args[0]
35779 v_0_1 := v_0.Args[1]
35780 if v_0_1.Op != OpAMD64SBBQcarrymask {
35781 break
35782 }
35783 v_0_1_0 := v_0_1.Args[0]
35784 if v_0_1_0.Op != OpAMD64CMPLconst {
35785 break
35786 }
35787 if v_0_1_0.AuxInt != 64 {
35788 break
35789 }
35790 v_0_1_0_0 := v_0_1_0.Args[0]
35791 if v_0_1_0_0.Op != OpAMD64NEGL {
35792 break
35793 }
35794 v_0_1_0_0_0 := v_0_1_0_0.Args[0]
35795 if v_0_1_0_0_0.Op != OpAMD64ADDLconst {
35796 break
35797 }
35798 if v_0_1_0_0_0.AuxInt != -64 {
35799 break
35800 }
35801 v_0_1_0_0_0_0 := v_0_1_0_0_0.Args[0]
35802 if v_0_1_0_0_0_0.Op != OpAMD64ANDLconst {
35803 break
35804 }
35805 if v_0_1_0_0_0_0.AuxInt != 63 {
35806 break
35807 }
35808 if y != v_0_1_0_0_0_0.Args[0] {
35809 break
35810 }
35811 v_1 := v.Args[1]
35812 if v_1.Op != OpAMD64SHRQ {
35813 break
35814 }
35815 _ = v_1.Args[1]
35816 if x != v_1.Args[0] {
35817 break
35818 }
35819 if y != v_1.Args[1] {
35820 break
35821 }
35822 v.reset(OpAMD64RORQ)
35823 v.AddArg(x)
35824 v.AddArg(y)
35825 return true
35826 }
35827
35828
35829
35830 for {
35831 _ = v.Args[1]
35832 v_0 := v.Args[0]
35833 if v_0.Op != OpAMD64ANDQ {
35834 break
35835 }
35836 _ = v_0.Args[1]
35837 v_0_0 := v_0.Args[0]
35838 if v_0_0.Op != OpAMD64SBBQcarrymask {
35839 break
35840 }
35841 v_0_0_0 := v_0_0.Args[0]
35842 if v_0_0_0.Op != OpAMD64CMPLconst {
35843 break
35844 }
35845 if v_0_0_0.AuxInt != 64 {
35846 break
35847 }
35848 v_0_0_0_0 := v_0_0_0.Args[0]
35849 if v_0_0_0_0.Op != OpAMD64NEGL {
35850 break
35851 }
35852 v_0_0_0_0_0 := v_0_0_0_0.Args[0]
35853 if v_0_0_0_0_0.Op != OpAMD64ADDLconst {
35854 break
35855 }
35856 if v_0_0_0_0_0.AuxInt != -64 {
35857 break
35858 }
35859 v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0]
35860 if v_0_0_0_0_0_0.Op != OpAMD64ANDLconst {
35861 break
35862 }
35863 if v_0_0_0_0_0_0.AuxInt != 63 {
35864 break
35865 }
35866 y := v_0_0_0_0_0_0.Args[0]
35867 v_0_1 := v_0.Args[1]
35868 if v_0_1.Op != OpAMD64SHLQ {
35869 break
35870 }
35871 _ = v_0_1.Args[1]
35872 x := v_0_1.Args[0]
35873 v_0_1_1 := v_0_1.Args[1]
35874 if v_0_1_1.Op != OpAMD64NEGL {
35875 break
35876 }
35877 if y != v_0_1_1.Args[0] {
35878 break
35879 }
35880 v_1 := v.Args[1]
35881 if v_1.Op != OpAMD64SHRQ {
35882 break
35883 }
35884 _ = v_1.Args[1]
35885 if x != v_1.Args[0] {
35886 break
35887 }
35888 if y != v_1.Args[1] {
35889 break
35890 }
35891 v.reset(OpAMD64RORQ)
35892 v.AddArg(x)
35893 v.AddArg(y)
35894 return true
35895 }
35896
35897
35898
35899 for {
35900 x := v.Args[1]
35901 if x != v.Args[0] {
35902 break
35903 }
35904 v.reset(OpCopy)
35905 v.Type = x.Type
35906 v.AddArg(x)
35907 return true
35908 }
35909
35910
35911
35912 for {
35913 _ = v.Args[1]
35914 x0 := v.Args[0]
35915 if x0.Op != OpAMD64MOVBload {
35916 break
35917 }
35918 i0 := x0.AuxInt
35919 s := x0.Aux
35920 mem := x0.Args[1]
35921 p := x0.Args[0]
35922 sh := v.Args[1]
35923 if sh.Op != OpAMD64SHLQconst {
35924 break
35925 }
35926 if sh.AuxInt != 8 {
35927 break
35928 }
35929 x1 := sh.Args[0]
35930 if x1.Op != OpAMD64MOVBload {
35931 break
35932 }
35933 i1 := x1.AuxInt
35934 if x1.Aux != s {
35935 break
35936 }
35937 _ = x1.Args[1]
35938 if p != x1.Args[0] {
35939 break
35940 }
35941 if mem != x1.Args[1] {
35942 break
35943 }
35944 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
35945 break
35946 }
35947 b = mergePoint(b, x0, x1)
35948 v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
35949 v.reset(OpCopy)
35950 v.AddArg(v0)
35951 v0.AuxInt = i0
35952 v0.Aux = s
35953 v0.AddArg(p)
35954 v0.AddArg(mem)
35955 return true
35956 }
35957
35958
35959
35960 for {
35961 _ = v.Args[1]
35962 sh := v.Args[0]
35963 if sh.Op != OpAMD64SHLQconst {
35964 break
35965 }
35966 if sh.AuxInt != 8 {
35967 break
35968 }
35969 x1 := sh.Args[0]
35970 if x1.Op != OpAMD64MOVBload {
35971 break
35972 }
35973 i1 := x1.AuxInt
35974 s := x1.Aux
35975 mem := x1.Args[1]
35976 p := x1.Args[0]
35977 x0 := v.Args[1]
35978 if x0.Op != OpAMD64MOVBload {
35979 break
35980 }
35981 i0 := x0.AuxInt
35982 if x0.Aux != s {
35983 break
35984 }
35985 _ = x0.Args[1]
35986 if p != x0.Args[0] {
35987 break
35988 }
35989 if mem != x0.Args[1] {
35990 break
35991 }
35992 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
35993 break
35994 }
35995 b = mergePoint(b, x0, x1)
35996 v0 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
35997 v.reset(OpCopy)
35998 v.AddArg(v0)
35999 v0.AuxInt = i0
36000 v0.Aux = s
36001 v0.AddArg(p)
36002 v0.AddArg(mem)
36003 return true
36004 }
36005
36006
36007
36008 for {
36009 _ = v.Args[1]
36010 x0 := v.Args[0]
36011 if x0.Op != OpAMD64MOVWload {
36012 break
36013 }
36014 i0 := x0.AuxInt
36015 s := x0.Aux
36016 mem := x0.Args[1]
36017 p := x0.Args[0]
36018 sh := v.Args[1]
36019 if sh.Op != OpAMD64SHLQconst {
36020 break
36021 }
36022 if sh.AuxInt != 16 {
36023 break
36024 }
36025 x1 := sh.Args[0]
36026 if x1.Op != OpAMD64MOVWload {
36027 break
36028 }
36029 i1 := x1.AuxInt
36030 if x1.Aux != s {
36031 break
36032 }
36033 _ = x1.Args[1]
36034 if p != x1.Args[0] {
36035 break
36036 }
36037 if mem != x1.Args[1] {
36038 break
36039 }
36040 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
36041 break
36042 }
36043 b = mergePoint(b, x0, x1)
36044 v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
36045 v.reset(OpCopy)
36046 v.AddArg(v0)
36047 v0.AuxInt = i0
36048 v0.Aux = s
36049 v0.AddArg(p)
36050 v0.AddArg(mem)
36051 return true
36052 }
36053
36054
36055
36056 for {
36057 _ = v.Args[1]
36058 sh := v.Args[0]
36059 if sh.Op != OpAMD64SHLQconst {
36060 break
36061 }
36062 if sh.AuxInt != 16 {
36063 break
36064 }
36065 x1 := sh.Args[0]
36066 if x1.Op != OpAMD64MOVWload {
36067 break
36068 }
36069 i1 := x1.AuxInt
36070 s := x1.Aux
36071 mem := x1.Args[1]
36072 p := x1.Args[0]
36073 x0 := v.Args[1]
36074 if x0.Op != OpAMD64MOVWload {
36075 break
36076 }
36077 i0 := x0.AuxInt
36078 if x0.Aux != s {
36079 break
36080 }
36081 _ = x0.Args[1]
36082 if p != x0.Args[0] {
36083 break
36084 }
36085 if mem != x0.Args[1] {
36086 break
36087 }
36088 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
36089 break
36090 }
36091 b = mergePoint(b, x0, x1)
36092 v0 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
36093 v.reset(OpCopy)
36094 v.AddArg(v0)
36095 v0.AuxInt = i0
36096 v0.Aux = s
36097 v0.AddArg(p)
36098 v0.AddArg(mem)
36099 return true
36100 }
36101
36102
36103
36104 for {
36105 _ = v.Args[1]
36106 x0 := v.Args[0]
36107 if x0.Op != OpAMD64MOVLload {
36108 break
36109 }
36110 i0 := x0.AuxInt
36111 s := x0.Aux
36112 mem := x0.Args[1]
36113 p := x0.Args[0]
36114 sh := v.Args[1]
36115 if sh.Op != OpAMD64SHLQconst {
36116 break
36117 }
36118 if sh.AuxInt != 32 {
36119 break
36120 }
36121 x1 := sh.Args[0]
36122 if x1.Op != OpAMD64MOVLload {
36123 break
36124 }
36125 i1 := x1.AuxInt
36126 if x1.Aux != s {
36127 break
36128 }
36129 _ = x1.Args[1]
36130 if p != x1.Args[0] {
36131 break
36132 }
36133 if mem != x1.Args[1] {
36134 break
36135 }
36136 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
36137 break
36138 }
36139 b = mergePoint(b, x0, x1)
36140 v0 := b.NewValue0(x1.Pos, OpAMD64MOVQload, typ.UInt64)
36141 v.reset(OpCopy)
36142 v.AddArg(v0)
36143 v0.AuxInt = i0
36144 v0.Aux = s
36145 v0.AddArg(p)
36146 v0.AddArg(mem)
36147 return true
36148 }
36149 return false
36150 }
36151 func rewriteValueAMD64_OpAMD64ORQ_30(v *Value) bool {
36152 b := v.Block
36153 typ := &b.Func.Config.Types
36154
36155
36156
36157 for {
36158 _ = v.Args[1]
36159 sh := v.Args[0]
36160 if sh.Op != OpAMD64SHLQconst {
36161 break
36162 }
36163 if sh.AuxInt != 32 {
36164 break
36165 }
36166 x1 := sh.Args[0]
36167 if x1.Op != OpAMD64MOVLload {
36168 break
36169 }
36170 i1 := x1.AuxInt
36171 s := x1.Aux
36172 mem := x1.Args[1]
36173 p := x1.Args[0]
36174 x0 := v.Args[1]
36175 if x0.Op != OpAMD64MOVLload {
36176 break
36177 }
36178 i0 := x0.AuxInt
36179 if x0.Aux != s {
36180 break
36181 }
36182 _ = x0.Args[1]
36183 if p != x0.Args[0] {
36184 break
36185 }
36186 if mem != x0.Args[1] {
36187 break
36188 }
36189 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
36190 break
36191 }
36192 b = mergePoint(b, x0, x1)
36193 v0 := b.NewValue0(x0.Pos, OpAMD64MOVQload, typ.UInt64)
36194 v.reset(OpCopy)
36195 v.AddArg(v0)
36196 v0.AuxInt = i0
36197 v0.Aux = s
36198 v0.AddArg(p)
36199 v0.AddArg(mem)
36200 return true
36201 }
36202
36203
36204
36205 for {
36206 _ = v.Args[1]
36207 s1 := v.Args[0]
36208 if s1.Op != OpAMD64SHLQconst {
36209 break
36210 }
36211 j1 := s1.AuxInt
36212 x1 := s1.Args[0]
36213 if x1.Op != OpAMD64MOVBload {
36214 break
36215 }
36216 i1 := x1.AuxInt
36217 s := x1.Aux
36218 mem := x1.Args[1]
36219 p := x1.Args[0]
36220 or := v.Args[1]
36221 if or.Op != OpAMD64ORQ {
36222 break
36223 }
36224 y := or.Args[1]
36225 s0 := or.Args[0]
36226 if s0.Op != OpAMD64SHLQconst {
36227 break
36228 }
36229 j0 := s0.AuxInt
36230 x0 := s0.Args[0]
36231 if x0.Op != OpAMD64MOVBload {
36232 break
36233 }
36234 i0 := x0.AuxInt
36235 if x0.Aux != s {
36236 break
36237 }
36238 _ = x0.Args[1]
36239 if p != x0.Args[0] {
36240 break
36241 }
36242 if mem != x0.Args[1] {
36243 break
36244 }
36245 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
36246 break
36247 }
36248 b = mergePoint(b, x0, x1, y)
36249 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
36250 v.reset(OpCopy)
36251 v.AddArg(v0)
36252 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
36253 v1.AuxInt = j0
36254 v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
36255 v2.AuxInt = i0
36256 v2.Aux = s
36257 v2.AddArg(p)
36258 v2.AddArg(mem)
36259 v1.AddArg(v2)
36260 v0.AddArg(v1)
36261 v0.AddArg(y)
36262 return true
36263 }
36264
36265
36266
36267 for {
36268 _ = v.Args[1]
36269 s1 := v.Args[0]
36270 if s1.Op != OpAMD64SHLQconst {
36271 break
36272 }
36273 j1 := s1.AuxInt
36274 x1 := s1.Args[0]
36275 if x1.Op != OpAMD64MOVBload {
36276 break
36277 }
36278 i1 := x1.AuxInt
36279 s := x1.Aux
36280 mem := x1.Args[1]
36281 p := x1.Args[0]
36282 or := v.Args[1]
36283 if or.Op != OpAMD64ORQ {
36284 break
36285 }
36286 _ = or.Args[1]
36287 y := or.Args[0]
36288 s0 := or.Args[1]
36289 if s0.Op != OpAMD64SHLQconst {
36290 break
36291 }
36292 j0 := s0.AuxInt
36293 x0 := s0.Args[0]
36294 if x0.Op != OpAMD64MOVBload {
36295 break
36296 }
36297 i0 := x0.AuxInt
36298 if x0.Aux != s {
36299 break
36300 }
36301 _ = x0.Args[1]
36302 if p != x0.Args[0] {
36303 break
36304 }
36305 if mem != x0.Args[1] {
36306 break
36307 }
36308 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
36309 break
36310 }
36311 b = mergePoint(b, x0, x1, y)
36312 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
36313 v.reset(OpCopy)
36314 v.AddArg(v0)
36315 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
36316 v1.AuxInt = j0
36317 v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
36318 v2.AuxInt = i0
36319 v2.Aux = s
36320 v2.AddArg(p)
36321 v2.AddArg(mem)
36322 v1.AddArg(v2)
36323 v0.AddArg(v1)
36324 v0.AddArg(y)
36325 return true
36326 }
36327
36328
36329
36330 for {
36331 _ = v.Args[1]
36332 or := v.Args[0]
36333 if or.Op != OpAMD64ORQ {
36334 break
36335 }
36336 y := or.Args[1]
36337 s0 := or.Args[0]
36338 if s0.Op != OpAMD64SHLQconst {
36339 break
36340 }
36341 j0 := s0.AuxInt
36342 x0 := s0.Args[0]
36343 if x0.Op != OpAMD64MOVBload {
36344 break
36345 }
36346 i0 := x0.AuxInt
36347 s := x0.Aux
36348 mem := x0.Args[1]
36349 p := x0.Args[0]
36350 s1 := v.Args[1]
36351 if s1.Op != OpAMD64SHLQconst {
36352 break
36353 }
36354 j1 := s1.AuxInt
36355 x1 := s1.Args[0]
36356 if x1.Op != OpAMD64MOVBload {
36357 break
36358 }
36359 i1 := x1.AuxInt
36360 if x1.Aux != s {
36361 break
36362 }
36363 _ = x1.Args[1]
36364 if p != x1.Args[0] {
36365 break
36366 }
36367 if mem != x1.Args[1] {
36368 break
36369 }
36370 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
36371 break
36372 }
36373 b = mergePoint(b, x0, x1, y)
36374 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
36375 v.reset(OpCopy)
36376 v.AddArg(v0)
36377 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
36378 v1.AuxInt = j0
36379 v2 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
36380 v2.AuxInt = i0
36381 v2.Aux = s
36382 v2.AddArg(p)
36383 v2.AddArg(mem)
36384 v1.AddArg(v2)
36385 v0.AddArg(v1)
36386 v0.AddArg(y)
36387 return true
36388 }
36389
36390
36391
36392 for {
36393 _ = v.Args[1]
36394 or := v.Args[0]
36395 if or.Op != OpAMD64ORQ {
36396 break
36397 }
36398 _ = or.Args[1]
36399 y := or.Args[0]
36400 s0 := or.Args[1]
36401 if s0.Op != OpAMD64SHLQconst {
36402 break
36403 }
36404 j0 := s0.AuxInt
36405 x0 := s0.Args[0]
36406 if x0.Op != OpAMD64MOVBload {
36407 break
36408 }
36409 i0 := x0.AuxInt
36410 s := x0.Aux
36411 mem := x0.Args[1]
36412 p := x0.Args[0]
36413 s1 := v.Args[1]
36414 if s1.Op != OpAMD64SHLQconst {
36415 break
36416 }
36417 j1 := s1.AuxInt
36418 x1 := s1.Args[0]
36419 if x1.Op != OpAMD64MOVBload {
36420 break
36421 }
36422 i1 := x1.AuxInt
36423 if x1.Aux != s {
36424 break
36425 }
36426 _ = x1.Args[1]
36427 if p != x1.Args[0] {
36428 break
36429 }
36430 if mem != x1.Args[1] {
36431 break
36432 }
36433 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
36434 break
36435 }
36436 b = mergePoint(b, x0, x1, y)
36437 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
36438 v.reset(OpCopy)
36439 v.AddArg(v0)
36440 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
36441 v1.AuxInt = j0
36442 v2 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
36443 v2.AuxInt = i0
36444 v2.Aux = s
36445 v2.AddArg(p)
36446 v2.AddArg(mem)
36447 v1.AddArg(v2)
36448 v0.AddArg(v1)
36449 v0.AddArg(y)
36450 return true
36451 }
36452
36453
36454
36455 for {
36456 _ = v.Args[1]
36457 s1 := v.Args[0]
36458 if s1.Op != OpAMD64SHLQconst {
36459 break
36460 }
36461 j1 := s1.AuxInt
36462 x1 := s1.Args[0]
36463 if x1.Op != OpAMD64MOVWload {
36464 break
36465 }
36466 i1 := x1.AuxInt
36467 s := x1.Aux
36468 mem := x1.Args[1]
36469 p := x1.Args[0]
36470 or := v.Args[1]
36471 if or.Op != OpAMD64ORQ {
36472 break
36473 }
36474 y := or.Args[1]
36475 s0 := or.Args[0]
36476 if s0.Op != OpAMD64SHLQconst {
36477 break
36478 }
36479 j0 := s0.AuxInt
36480 x0 := s0.Args[0]
36481 if x0.Op != OpAMD64MOVWload {
36482 break
36483 }
36484 i0 := x0.AuxInt
36485 if x0.Aux != s {
36486 break
36487 }
36488 _ = x0.Args[1]
36489 if p != x0.Args[0] {
36490 break
36491 }
36492 if mem != x0.Args[1] {
36493 break
36494 }
36495 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
36496 break
36497 }
36498 b = mergePoint(b, x0, x1, y)
36499 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
36500 v.reset(OpCopy)
36501 v.AddArg(v0)
36502 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
36503 v1.AuxInt = j0
36504 v2 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
36505 v2.AuxInt = i0
36506 v2.Aux = s
36507 v2.AddArg(p)
36508 v2.AddArg(mem)
36509 v1.AddArg(v2)
36510 v0.AddArg(v1)
36511 v0.AddArg(y)
36512 return true
36513 }
36514
36515
36516
36517 for {
36518 _ = v.Args[1]
36519 s1 := v.Args[0]
36520 if s1.Op != OpAMD64SHLQconst {
36521 break
36522 }
36523 j1 := s1.AuxInt
36524 x1 := s1.Args[0]
36525 if x1.Op != OpAMD64MOVWload {
36526 break
36527 }
36528 i1 := x1.AuxInt
36529 s := x1.Aux
36530 mem := x1.Args[1]
36531 p := x1.Args[0]
36532 or := v.Args[1]
36533 if or.Op != OpAMD64ORQ {
36534 break
36535 }
36536 _ = or.Args[1]
36537 y := or.Args[0]
36538 s0 := or.Args[1]
36539 if s0.Op != OpAMD64SHLQconst {
36540 break
36541 }
36542 j0 := s0.AuxInt
36543 x0 := s0.Args[0]
36544 if x0.Op != OpAMD64MOVWload {
36545 break
36546 }
36547 i0 := x0.AuxInt
36548 if x0.Aux != s {
36549 break
36550 }
36551 _ = x0.Args[1]
36552 if p != x0.Args[0] {
36553 break
36554 }
36555 if mem != x0.Args[1] {
36556 break
36557 }
36558 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
36559 break
36560 }
36561 b = mergePoint(b, x0, x1, y)
36562 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
36563 v.reset(OpCopy)
36564 v.AddArg(v0)
36565 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
36566 v1.AuxInt = j0
36567 v2 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
36568 v2.AuxInt = i0
36569 v2.Aux = s
36570 v2.AddArg(p)
36571 v2.AddArg(mem)
36572 v1.AddArg(v2)
36573 v0.AddArg(v1)
36574 v0.AddArg(y)
36575 return true
36576 }
36577
36578
36579
36580 for {
36581 _ = v.Args[1]
36582 or := v.Args[0]
36583 if or.Op != OpAMD64ORQ {
36584 break
36585 }
36586 y := or.Args[1]
36587 s0 := or.Args[0]
36588 if s0.Op != OpAMD64SHLQconst {
36589 break
36590 }
36591 j0 := s0.AuxInt
36592 x0 := s0.Args[0]
36593 if x0.Op != OpAMD64MOVWload {
36594 break
36595 }
36596 i0 := x0.AuxInt
36597 s := x0.Aux
36598 mem := x0.Args[1]
36599 p := x0.Args[0]
36600 s1 := v.Args[1]
36601 if s1.Op != OpAMD64SHLQconst {
36602 break
36603 }
36604 j1 := s1.AuxInt
36605 x1 := s1.Args[0]
36606 if x1.Op != OpAMD64MOVWload {
36607 break
36608 }
36609 i1 := x1.AuxInt
36610 if x1.Aux != s {
36611 break
36612 }
36613 _ = x1.Args[1]
36614 if p != x1.Args[0] {
36615 break
36616 }
36617 if mem != x1.Args[1] {
36618 break
36619 }
36620 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
36621 break
36622 }
36623 b = mergePoint(b, x0, x1, y)
36624 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
36625 v.reset(OpCopy)
36626 v.AddArg(v0)
36627 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
36628 v1.AuxInt = j0
36629 v2 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
36630 v2.AuxInt = i0
36631 v2.Aux = s
36632 v2.AddArg(p)
36633 v2.AddArg(mem)
36634 v1.AddArg(v2)
36635 v0.AddArg(v1)
36636 v0.AddArg(y)
36637 return true
36638 }
36639
36640
36641
36642 for {
36643 _ = v.Args[1]
36644 or := v.Args[0]
36645 if or.Op != OpAMD64ORQ {
36646 break
36647 }
36648 _ = or.Args[1]
36649 y := or.Args[0]
36650 s0 := or.Args[1]
36651 if s0.Op != OpAMD64SHLQconst {
36652 break
36653 }
36654 j0 := s0.AuxInt
36655 x0 := s0.Args[0]
36656 if x0.Op != OpAMD64MOVWload {
36657 break
36658 }
36659 i0 := x0.AuxInt
36660 s := x0.Aux
36661 mem := x0.Args[1]
36662 p := x0.Args[0]
36663 s1 := v.Args[1]
36664 if s1.Op != OpAMD64SHLQconst {
36665 break
36666 }
36667 j1 := s1.AuxInt
36668 x1 := s1.Args[0]
36669 if x1.Op != OpAMD64MOVWload {
36670 break
36671 }
36672 i1 := x1.AuxInt
36673 if x1.Aux != s {
36674 break
36675 }
36676 _ = x1.Args[1]
36677 if p != x1.Args[0] {
36678 break
36679 }
36680 if mem != x1.Args[1] {
36681 break
36682 }
36683 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
36684 break
36685 }
36686 b = mergePoint(b, x0, x1, y)
36687 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
36688 v.reset(OpCopy)
36689 v.AddArg(v0)
36690 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
36691 v1.AuxInt = j0
36692 v2 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
36693 v2.AuxInt = i0
36694 v2.Aux = s
36695 v2.AddArg(p)
36696 v2.AddArg(mem)
36697 v1.AddArg(v2)
36698 v0.AddArg(v1)
36699 v0.AddArg(y)
36700 return true
36701 }
36702
36703
36704
36705 for {
36706 _ = v.Args[1]
36707 x0 := v.Args[0]
36708 if x0.Op != OpAMD64MOVBloadidx1 {
36709 break
36710 }
36711 i0 := x0.AuxInt
36712 s := x0.Aux
36713 mem := x0.Args[2]
36714 p := x0.Args[0]
36715 idx := x0.Args[1]
36716 sh := v.Args[1]
36717 if sh.Op != OpAMD64SHLQconst {
36718 break
36719 }
36720 if sh.AuxInt != 8 {
36721 break
36722 }
36723 x1 := sh.Args[0]
36724 if x1.Op != OpAMD64MOVBloadidx1 {
36725 break
36726 }
36727 i1 := x1.AuxInt
36728 if x1.Aux != s {
36729 break
36730 }
36731 _ = x1.Args[2]
36732 if p != x1.Args[0] {
36733 break
36734 }
36735 if idx != x1.Args[1] {
36736 break
36737 }
36738 if mem != x1.Args[2] {
36739 break
36740 }
36741 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
36742 break
36743 }
36744 b = mergePoint(b, x0, x1)
36745 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
36746 v.reset(OpCopy)
36747 v.AddArg(v0)
36748 v0.AuxInt = i0
36749 v0.Aux = s
36750 v0.AddArg(p)
36751 v0.AddArg(idx)
36752 v0.AddArg(mem)
36753 return true
36754 }
36755 return false
36756 }
36757 func rewriteValueAMD64_OpAMD64ORQ_40(v *Value) bool {
36758 b := v.Block
36759 typ := &b.Func.Config.Types
36760
36761
36762
36763 for {
36764 _ = v.Args[1]
36765 x0 := v.Args[0]
36766 if x0.Op != OpAMD64MOVBloadidx1 {
36767 break
36768 }
36769 i0 := x0.AuxInt
36770 s := x0.Aux
36771 mem := x0.Args[2]
36772 idx := x0.Args[0]
36773 p := x0.Args[1]
36774 sh := v.Args[1]
36775 if sh.Op != OpAMD64SHLQconst {
36776 break
36777 }
36778 if sh.AuxInt != 8 {
36779 break
36780 }
36781 x1 := sh.Args[0]
36782 if x1.Op != OpAMD64MOVBloadidx1 {
36783 break
36784 }
36785 i1 := x1.AuxInt
36786 if x1.Aux != s {
36787 break
36788 }
36789 _ = x1.Args[2]
36790 if p != x1.Args[0] {
36791 break
36792 }
36793 if idx != x1.Args[1] {
36794 break
36795 }
36796 if mem != x1.Args[2] {
36797 break
36798 }
36799 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
36800 break
36801 }
36802 b = mergePoint(b, x0, x1)
36803 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
36804 v.reset(OpCopy)
36805 v.AddArg(v0)
36806 v0.AuxInt = i0
36807 v0.Aux = s
36808 v0.AddArg(p)
36809 v0.AddArg(idx)
36810 v0.AddArg(mem)
36811 return true
36812 }
36813
36814
36815
36816 for {
36817 _ = v.Args[1]
36818 x0 := v.Args[0]
36819 if x0.Op != OpAMD64MOVBloadidx1 {
36820 break
36821 }
36822 i0 := x0.AuxInt
36823 s := x0.Aux
36824 mem := x0.Args[2]
36825 p := x0.Args[0]
36826 idx := x0.Args[1]
36827 sh := v.Args[1]
36828 if sh.Op != OpAMD64SHLQconst {
36829 break
36830 }
36831 if sh.AuxInt != 8 {
36832 break
36833 }
36834 x1 := sh.Args[0]
36835 if x1.Op != OpAMD64MOVBloadidx1 {
36836 break
36837 }
36838 i1 := x1.AuxInt
36839 if x1.Aux != s {
36840 break
36841 }
36842 _ = x1.Args[2]
36843 if idx != x1.Args[0] {
36844 break
36845 }
36846 if p != x1.Args[1] {
36847 break
36848 }
36849 if mem != x1.Args[2] {
36850 break
36851 }
36852 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
36853 break
36854 }
36855 b = mergePoint(b, x0, x1)
36856 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
36857 v.reset(OpCopy)
36858 v.AddArg(v0)
36859 v0.AuxInt = i0
36860 v0.Aux = s
36861 v0.AddArg(p)
36862 v0.AddArg(idx)
36863 v0.AddArg(mem)
36864 return true
36865 }
36866
36867
36868
36869 for {
36870 _ = v.Args[1]
36871 x0 := v.Args[0]
36872 if x0.Op != OpAMD64MOVBloadidx1 {
36873 break
36874 }
36875 i0 := x0.AuxInt
36876 s := x0.Aux
36877 mem := x0.Args[2]
36878 idx := x0.Args[0]
36879 p := x0.Args[1]
36880 sh := v.Args[1]
36881 if sh.Op != OpAMD64SHLQconst {
36882 break
36883 }
36884 if sh.AuxInt != 8 {
36885 break
36886 }
36887 x1 := sh.Args[0]
36888 if x1.Op != OpAMD64MOVBloadidx1 {
36889 break
36890 }
36891 i1 := x1.AuxInt
36892 if x1.Aux != s {
36893 break
36894 }
36895 _ = x1.Args[2]
36896 if idx != x1.Args[0] {
36897 break
36898 }
36899 if p != x1.Args[1] {
36900 break
36901 }
36902 if mem != x1.Args[2] {
36903 break
36904 }
36905 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
36906 break
36907 }
36908 b = mergePoint(b, x0, x1)
36909 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
36910 v.reset(OpCopy)
36911 v.AddArg(v0)
36912 v0.AuxInt = i0
36913 v0.Aux = s
36914 v0.AddArg(p)
36915 v0.AddArg(idx)
36916 v0.AddArg(mem)
36917 return true
36918 }
36919
36920
36921
36922 for {
36923 _ = v.Args[1]
36924 sh := v.Args[0]
36925 if sh.Op != OpAMD64SHLQconst {
36926 break
36927 }
36928 if sh.AuxInt != 8 {
36929 break
36930 }
36931 x1 := sh.Args[0]
36932 if x1.Op != OpAMD64MOVBloadidx1 {
36933 break
36934 }
36935 i1 := x1.AuxInt
36936 s := x1.Aux
36937 mem := x1.Args[2]
36938 p := x1.Args[0]
36939 idx := x1.Args[1]
36940 x0 := v.Args[1]
36941 if x0.Op != OpAMD64MOVBloadidx1 {
36942 break
36943 }
36944 i0 := x0.AuxInt
36945 if x0.Aux != s {
36946 break
36947 }
36948 _ = x0.Args[2]
36949 if p != x0.Args[0] {
36950 break
36951 }
36952 if idx != x0.Args[1] {
36953 break
36954 }
36955 if mem != x0.Args[2] {
36956 break
36957 }
36958 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
36959 break
36960 }
36961 b = mergePoint(b, x0, x1)
36962 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
36963 v.reset(OpCopy)
36964 v.AddArg(v0)
36965 v0.AuxInt = i0
36966 v0.Aux = s
36967 v0.AddArg(p)
36968 v0.AddArg(idx)
36969 v0.AddArg(mem)
36970 return true
36971 }
36972
36973
36974
36975 for {
36976 _ = v.Args[1]
36977 sh := v.Args[0]
36978 if sh.Op != OpAMD64SHLQconst {
36979 break
36980 }
36981 if sh.AuxInt != 8 {
36982 break
36983 }
36984 x1 := sh.Args[0]
36985 if x1.Op != OpAMD64MOVBloadidx1 {
36986 break
36987 }
36988 i1 := x1.AuxInt
36989 s := x1.Aux
36990 mem := x1.Args[2]
36991 idx := x1.Args[0]
36992 p := x1.Args[1]
36993 x0 := v.Args[1]
36994 if x0.Op != OpAMD64MOVBloadidx1 {
36995 break
36996 }
36997 i0 := x0.AuxInt
36998 if x0.Aux != s {
36999 break
37000 }
37001 _ = x0.Args[2]
37002 if p != x0.Args[0] {
37003 break
37004 }
37005 if idx != x0.Args[1] {
37006 break
37007 }
37008 if mem != x0.Args[2] {
37009 break
37010 }
37011 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
37012 break
37013 }
37014 b = mergePoint(b, x0, x1)
37015 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
37016 v.reset(OpCopy)
37017 v.AddArg(v0)
37018 v0.AuxInt = i0
37019 v0.Aux = s
37020 v0.AddArg(p)
37021 v0.AddArg(idx)
37022 v0.AddArg(mem)
37023 return true
37024 }
37025
37026
37027
37028 for {
37029 _ = v.Args[1]
37030 sh := v.Args[0]
37031 if sh.Op != OpAMD64SHLQconst {
37032 break
37033 }
37034 if sh.AuxInt != 8 {
37035 break
37036 }
37037 x1 := sh.Args[0]
37038 if x1.Op != OpAMD64MOVBloadidx1 {
37039 break
37040 }
37041 i1 := x1.AuxInt
37042 s := x1.Aux
37043 mem := x1.Args[2]
37044 p := x1.Args[0]
37045 idx := x1.Args[1]
37046 x0 := v.Args[1]
37047 if x0.Op != OpAMD64MOVBloadidx1 {
37048 break
37049 }
37050 i0 := x0.AuxInt
37051 if x0.Aux != s {
37052 break
37053 }
37054 _ = x0.Args[2]
37055 if idx != x0.Args[0] {
37056 break
37057 }
37058 if p != x0.Args[1] {
37059 break
37060 }
37061 if mem != x0.Args[2] {
37062 break
37063 }
37064 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
37065 break
37066 }
37067 b = mergePoint(b, x0, x1)
37068 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
37069 v.reset(OpCopy)
37070 v.AddArg(v0)
37071 v0.AuxInt = i0
37072 v0.Aux = s
37073 v0.AddArg(p)
37074 v0.AddArg(idx)
37075 v0.AddArg(mem)
37076 return true
37077 }
37078
37079
37080
37081 for {
37082 _ = v.Args[1]
37083 sh := v.Args[0]
37084 if sh.Op != OpAMD64SHLQconst {
37085 break
37086 }
37087 if sh.AuxInt != 8 {
37088 break
37089 }
37090 x1 := sh.Args[0]
37091 if x1.Op != OpAMD64MOVBloadidx1 {
37092 break
37093 }
37094 i1 := x1.AuxInt
37095 s := x1.Aux
37096 mem := x1.Args[2]
37097 idx := x1.Args[0]
37098 p := x1.Args[1]
37099 x0 := v.Args[1]
37100 if x0.Op != OpAMD64MOVBloadidx1 {
37101 break
37102 }
37103 i0 := x0.AuxInt
37104 if x0.Aux != s {
37105 break
37106 }
37107 _ = x0.Args[2]
37108 if idx != x0.Args[0] {
37109 break
37110 }
37111 if p != x0.Args[1] {
37112 break
37113 }
37114 if mem != x0.Args[2] {
37115 break
37116 }
37117 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
37118 break
37119 }
37120 b = mergePoint(b, x0, x1)
37121 v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type)
37122 v.reset(OpCopy)
37123 v.AddArg(v0)
37124 v0.AuxInt = i0
37125 v0.Aux = s
37126 v0.AddArg(p)
37127 v0.AddArg(idx)
37128 v0.AddArg(mem)
37129 return true
37130 }
37131
37132
37133
37134 for {
37135 _ = v.Args[1]
37136 x0 := v.Args[0]
37137 if x0.Op != OpAMD64MOVWloadidx1 {
37138 break
37139 }
37140 i0 := x0.AuxInt
37141 s := x0.Aux
37142 mem := x0.Args[2]
37143 p := x0.Args[0]
37144 idx := x0.Args[1]
37145 sh := v.Args[1]
37146 if sh.Op != OpAMD64SHLQconst {
37147 break
37148 }
37149 if sh.AuxInt != 16 {
37150 break
37151 }
37152 x1 := sh.Args[0]
37153 if x1.Op != OpAMD64MOVWloadidx1 {
37154 break
37155 }
37156 i1 := x1.AuxInt
37157 if x1.Aux != s {
37158 break
37159 }
37160 _ = x1.Args[2]
37161 if p != x1.Args[0] {
37162 break
37163 }
37164 if idx != x1.Args[1] {
37165 break
37166 }
37167 if mem != x1.Args[2] {
37168 break
37169 }
37170 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
37171 break
37172 }
37173 b = mergePoint(b, x0, x1)
37174 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
37175 v.reset(OpCopy)
37176 v.AddArg(v0)
37177 v0.AuxInt = i0
37178 v0.Aux = s
37179 v0.AddArg(p)
37180 v0.AddArg(idx)
37181 v0.AddArg(mem)
37182 return true
37183 }
37184
37185
37186
37187 for {
37188 _ = v.Args[1]
37189 x0 := v.Args[0]
37190 if x0.Op != OpAMD64MOVWloadidx1 {
37191 break
37192 }
37193 i0 := x0.AuxInt
37194 s := x0.Aux
37195 mem := x0.Args[2]
37196 idx := x0.Args[0]
37197 p := x0.Args[1]
37198 sh := v.Args[1]
37199 if sh.Op != OpAMD64SHLQconst {
37200 break
37201 }
37202 if sh.AuxInt != 16 {
37203 break
37204 }
37205 x1 := sh.Args[0]
37206 if x1.Op != OpAMD64MOVWloadidx1 {
37207 break
37208 }
37209 i1 := x1.AuxInt
37210 if x1.Aux != s {
37211 break
37212 }
37213 _ = x1.Args[2]
37214 if p != x1.Args[0] {
37215 break
37216 }
37217 if idx != x1.Args[1] {
37218 break
37219 }
37220 if mem != x1.Args[2] {
37221 break
37222 }
37223 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
37224 break
37225 }
37226 b = mergePoint(b, x0, x1)
37227 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
37228 v.reset(OpCopy)
37229 v.AddArg(v0)
37230 v0.AuxInt = i0
37231 v0.Aux = s
37232 v0.AddArg(p)
37233 v0.AddArg(idx)
37234 v0.AddArg(mem)
37235 return true
37236 }
37237
37238
37239
37240 for {
37241 _ = v.Args[1]
37242 x0 := v.Args[0]
37243 if x0.Op != OpAMD64MOVWloadidx1 {
37244 break
37245 }
37246 i0 := x0.AuxInt
37247 s := x0.Aux
37248 mem := x0.Args[2]
37249 p := x0.Args[0]
37250 idx := x0.Args[1]
37251 sh := v.Args[1]
37252 if sh.Op != OpAMD64SHLQconst {
37253 break
37254 }
37255 if sh.AuxInt != 16 {
37256 break
37257 }
37258 x1 := sh.Args[0]
37259 if x1.Op != OpAMD64MOVWloadidx1 {
37260 break
37261 }
37262 i1 := x1.AuxInt
37263 if x1.Aux != s {
37264 break
37265 }
37266 _ = x1.Args[2]
37267 if idx != x1.Args[0] {
37268 break
37269 }
37270 if p != x1.Args[1] {
37271 break
37272 }
37273 if mem != x1.Args[2] {
37274 break
37275 }
37276 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
37277 break
37278 }
37279 b = mergePoint(b, x0, x1)
37280 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
37281 v.reset(OpCopy)
37282 v.AddArg(v0)
37283 v0.AuxInt = i0
37284 v0.Aux = s
37285 v0.AddArg(p)
37286 v0.AddArg(idx)
37287 v0.AddArg(mem)
37288 return true
37289 }
37290 return false
37291 }
37292 func rewriteValueAMD64_OpAMD64ORQ_50(v *Value) bool {
37293 b := v.Block
37294 typ := &b.Func.Config.Types
37295
37296
37297
37298 for {
37299 _ = v.Args[1]
37300 x0 := v.Args[0]
37301 if x0.Op != OpAMD64MOVWloadidx1 {
37302 break
37303 }
37304 i0 := x0.AuxInt
37305 s := x0.Aux
37306 mem := x0.Args[2]
37307 idx := x0.Args[0]
37308 p := x0.Args[1]
37309 sh := v.Args[1]
37310 if sh.Op != OpAMD64SHLQconst {
37311 break
37312 }
37313 if sh.AuxInt != 16 {
37314 break
37315 }
37316 x1 := sh.Args[0]
37317 if x1.Op != OpAMD64MOVWloadidx1 {
37318 break
37319 }
37320 i1 := x1.AuxInt
37321 if x1.Aux != s {
37322 break
37323 }
37324 _ = x1.Args[2]
37325 if idx != x1.Args[0] {
37326 break
37327 }
37328 if p != x1.Args[1] {
37329 break
37330 }
37331 if mem != x1.Args[2] {
37332 break
37333 }
37334 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
37335 break
37336 }
37337 b = mergePoint(b, x0, x1)
37338 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
37339 v.reset(OpCopy)
37340 v.AddArg(v0)
37341 v0.AuxInt = i0
37342 v0.Aux = s
37343 v0.AddArg(p)
37344 v0.AddArg(idx)
37345 v0.AddArg(mem)
37346 return true
37347 }
37348
37349
37350
37351 for {
37352 _ = v.Args[1]
37353 sh := v.Args[0]
37354 if sh.Op != OpAMD64SHLQconst {
37355 break
37356 }
37357 if sh.AuxInt != 16 {
37358 break
37359 }
37360 x1 := sh.Args[0]
37361 if x1.Op != OpAMD64MOVWloadidx1 {
37362 break
37363 }
37364 i1 := x1.AuxInt
37365 s := x1.Aux
37366 mem := x1.Args[2]
37367 p := x1.Args[0]
37368 idx := x1.Args[1]
37369 x0 := v.Args[1]
37370 if x0.Op != OpAMD64MOVWloadidx1 {
37371 break
37372 }
37373 i0 := x0.AuxInt
37374 if x0.Aux != s {
37375 break
37376 }
37377 _ = x0.Args[2]
37378 if p != x0.Args[0] {
37379 break
37380 }
37381 if idx != x0.Args[1] {
37382 break
37383 }
37384 if mem != x0.Args[2] {
37385 break
37386 }
37387 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
37388 break
37389 }
37390 b = mergePoint(b, x0, x1)
37391 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
37392 v.reset(OpCopy)
37393 v.AddArg(v0)
37394 v0.AuxInt = i0
37395 v0.Aux = s
37396 v0.AddArg(p)
37397 v0.AddArg(idx)
37398 v0.AddArg(mem)
37399 return true
37400 }
37401
37402
37403
37404 for {
37405 _ = v.Args[1]
37406 sh := v.Args[0]
37407 if sh.Op != OpAMD64SHLQconst {
37408 break
37409 }
37410 if sh.AuxInt != 16 {
37411 break
37412 }
37413 x1 := sh.Args[0]
37414 if x1.Op != OpAMD64MOVWloadidx1 {
37415 break
37416 }
37417 i1 := x1.AuxInt
37418 s := x1.Aux
37419 mem := x1.Args[2]
37420 idx := x1.Args[0]
37421 p := x1.Args[1]
37422 x0 := v.Args[1]
37423 if x0.Op != OpAMD64MOVWloadidx1 {
37424 break
37425 }
37426 i0 := x0.AuxInt
37427 if x0.Aux != s {
37428 break
37429 }
37430 _ = x0.Args[2]
37431 if p != x0.Args[0] {
37432 break
37433 }
37434 if idx != x0.Args[1] {
37435 break
37436 }
37437 if mem != x0.Args[2] {
37438 break
37439 }
37440 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
37441 break
37442 }
37443 b = mergePoint(b, x0, x1)
37444 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
37445 v.reset(OpCopy)
37446 v.AddArg(v0)
37447 v0.AuxInt = i0
37448 v0.Aux = s
37449 v0.AddArg(p)
37450 v0.AddArg(idx)
37451 v0.AddArg(mem)
37452 return true
37453 }
37454
37455
37456
37457 for {
37458 _ = v.Args[1]
37459 sh := v.Args[0]
37460 if sh.Op != OpAMD64SHLQconst {
37461 break
37462 }
37463 if sh.AuxInt != 16 {
37464 break
37465 }
37466 x1 := sh.Args[0]
37467 if x1.Op != OpAMD64MOVWloadidx1 {
37468 break
37469 }
37470 i1 := x1.AuxInt
37471 s := x1.Aux
37472 mem := x1.Args[2]
37473 p := x1.Args[0]
37474 idx := x1.Args[1]
37475 x0 := v.Args[1]
37476 if x0.Op != OpAMD64MOVWloadidx1 {
37477 break
37478 }
37479 i0 := x0.AuxInt
37480 if x0.Aux != s {
37481 break
37482 }
37483 _ = x0.Args[2]
37484 if idx != x0.Args[0] {
37485 break
37486 }
37487 if p != x0.Args[1] {
37488 break
37489 }
37490 if mem != x0.Args[2] {
37491 break
37492 }
37493 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
37494 break
37495 }
37496 b = mergePoint(b, x0, x1)
37497 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
37498 v.reset(OpCopy)
37499 v.AddArg(v0)
37500 v0.AuxInt = i0
37501 v0.Aux = s
37502 v0.AddArg(p)
37503 v0.AddArg(idx)
37504 v0.AddArg(mem)
37505 return true
37506 }
37507
37508
37509
37510 for {
37511 _ = v.Args[1]
37512 sh := v.Args[0]
37513 if sh.Op != OpAMD64SHLQconst {
37514 break
37515 }
37516 if sh.AuxInt != 16 {
37517 break
37518 }
37519 x1 := sh.Args[0]
37520 if x1.Op != OpAMD64MOVWloadidx1 {
37521 break
37522 }
37523 i1 := x1.AuxInt
37524 s := x1.Aux
37525 mem := x1.Args[2]
37526 idx := x1.Args[0]
37527 p := x1.Args[1]
37528 x0 := v.Args[1]
37529 if x0.Op != OpAMD64MOVWloadidx1 {
37530 break
37531 }
37532 i0 := x0.AuxInt
37533 if x0.Aux != s {
37534 break
37535 }
37536 _ = x0.Args[2]
37537 if idx != x0.Args[0] {
37538 break
37539 }
37540 if p != x0.Args[1] {
37541 break
37542 }
37543 if mem != x0.Args[2] {
37544 break
37545 }
37546 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
37547 break
37548 }
37549 b = mergePoint(b, x0, x1)
37550 v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
37551 v.reset(OpCopy)
37552 v.AddArg(v0)
37553 v0.AuxInt = i0
37554 v0.Aux = s
37555 v0.AddArg(p)
37556 v0.AddArg(idx)
37557 v0.AddArg(mem)
37558 return true
37559 }
37560
37561
37562
37563 for {
37564 _ = v.Args[1]
37565 x0 := v.Args[0]
37566 if x0.Op != OpAMD64MOVLloadidx1 {
37567 break
37568 }
37569 i0 := x0.AuxInt
37570 s := x0.Aux
37571 mem := x0.Args[2]
37572 p := x0.Args[0]
37573 idx := x0.Args[1]
37574 sh := v.Args[1]
37575 if sh.Op != OpAMD64SHLQconst {
37576 break
37577 }
37578 if sh.AuxInt != 32 {
37579 break
37580 }
37581 x1 := sh.Args[0]
37582 if x1.Op != OpAMD64MOVLloadidx1 {
37583 break
37584 }
37585 i1 := x1.AuxInt
37586 if x1.Aux != s {
37587 break
37588 }
37589 _ = x1.Args[2]
37590 if p != x1.Args[0] {
37591 break
37592 }
37593 if idx != x1.Args[1] {
37594 break
37595 }
37596 if mem != x1.Args[2] {
37597 break
37598 }
37599 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
37600 break
37601 }
37602 b = mergePoint(b, x0, x1)
37603 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
37604 v.reset(OpCopy)
37605 v.AddArg(v0)
37606 v0.AuxInt = i0
37607 v0.Aux = s
37608 v0.AddArg(p)
37609 v0.AddArg(idx)
37610 v0.AddArg(mem)
37611 return true
37612 }
37613
37614
37615
37616 for {
37617 _ = v.Args[1]
37618 x0 := v.Args[0]
37619 if x0.Op != OpAMD64MOVLloadidx1 {
37620 break
37621 }
37622 i0 := x0.AuxInt
37623 s := x0.Aux
37624 mem := x0.Args[2]
37625 idx := x0.Args[0]
37626 p := x0.Args[1]
37627 sh := v.Args[1]
37628 if sh.Op != OpAMD64SHLQconst {
37629 break
37630 }
37631 if sh.AuxInt != 32 {
37632 break
37633 }
37634 x1 := sh.Args[0]
37635 if x1.Op != OpAMD64MOVLloadidx1 {
37636 break
37637 }
37638 i1 := x1.AuxInt
37639 if x1.Aux != s {
37640 break
37641 }
37642 _ = x1.Args[2]
37643 if p != x1.Args[0] {
37644 break
37645 }
37646 if idx != x1.Args[1] {
37647 break
37648 }
37649 if mem != x1.Args[2] {
37650 break
37651 }
37652 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
37653 break
37654 }
37655 b = mergePoint(b, x0, x1)
37656 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
37657 v.reset(OpCopy)
37658 v.AddArg(v0)
37659 v0.AuxInt = i0
37660 v0.Aux = s
37661 v0.AddArg(p)
37662 v0.AddArg(idx)
37663 v0.AddArg(mem)
37664 return true
37665 }
37666
37667
37668
37669 for {
37670 _ = v.Args[1]
37671 x0 := v.Args[0]
37672 if x0.Op != OpAMD64MOVLloadidx1 {
37673 break
37674 }
37675 i0 := x0.AuxInt
37676 s := x0.Aux
37677 mem := x0.Args[2]
37678 p := x0.Args[0]
37679 idx := x0.Args[1]
37680 sh := v.Args[1]
37681 if sh.Op != OpAMD64SHLQconst {
37682 break
37683 }
37684 if sh.AuxInt != 32 {
37685 break
37686 }
37687 x1 := sh.Args[0]
37688 if x1.Op != OpAMD64MOVLloadidx1 {
37689 break
37690 }
37691 i1 := x1.AuxInt
37692 if x1.Aux != s {
37693 break
37694 }
37695 _ = x1.Args[2]
37696 if idx != x1.Args[0] {
37697 break
37698 }
37699 if p != x1.Args[1] {
37700 break
37701 }
37702 if mem != x1.Args[2] {
37703 break
37704 }
37705 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
37706 break
37707 }
37708 b = mergePoint(b, x0, x1)
37709 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
37710 v.reset(OpCopy)
37711 v.AddArg(v0)
37712 v0.AuxInt = i0
37713 v0.Aux = s
37714 v0.AddArg(p)
37715 v0.AddArg(idx)
37716 v0.AddArg(mem)
37717 return true
37718 }
37719
37720
37721
37722 for {
37723 _ = v.Args[1]
37724 x0 := v.Args[0]
37725 if x0.Op != OpAMD64MOVLloadidx1 {
37726 break
37727 }
37728 i0 := x0.AuxInt
37729 s := x0.Aux
37730 mem := x0.Args[2]
37731 idx := x0.Args[0]
37732 p := x0.Args[1]
37733 sh := v.Args[1]
37734 if sh.Op != OpAMD64SHLQconst {
37735 break
37736 }
37737 if sh.AuxInt != 32 {
37738 break
37739 }
37740 x1 := sh.Args[0]
37741 if x1.Op != OpAMD64MOVLloadidx1 {
37742 break
37743 }
37744 i1 := x1.AuxInt
37745 if x1.Aux != s {
37746 break
37747 }
37748 _ = x1.Args[2]
37749 if idx != x1.Args[0] {
37750 break
37751 }
37752 if p != x1.Args[1] {
37753 break
37754 }
37755 if mem != x1.Args[2] {
37756 break
37757 }
37758 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
37759 break
37760 }
37761 b = mergePoint(b, x0, x1)
37762 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
37763 v.reset(OpCopy)
37764 v.AddArg(v0)
37765 v0.AuxInt = i0
37766 v0.Aux = s
37767 v0.AddArg(p)
37768 v0.AddArg(idx)
37769 v0.AddArg(mem)
37770 return true
37771 }
37772
37773
37774
37775 for {
37776 _ = v.Args[1]
37777 sh := v.Args[0]
37778 if sh.Op != OpAMD64SHLQconst {
37779 break
37780 }
37781 if sh.AuxInt != 32 {
37782 break
37783 }
37784 x1 := sh.Args[0]
37785 if x1.Op != OpAMD64MOVLloadidx1 {
37786 break
37787 }
37788 i1 := x1.AuxInt
37789 s := x1.Aux
37790 mem := x1.Args[2]
37791 p := x1.Args[0]
37792 idx := x1.Args[1]
37793 x0 := v.Args[1]
37794 if x0.Op != OpAMD64MOVLloadidx1 {
37795 break
37796 }
37797 i0 := x0.AuxInt
37798 if x0.Aux != s {
37799 break
37800 }
37801 _ = x0.Args[2]
37802 if p != x0.Args[0] {
37803 break
37804 }
37805 if idx != x0.Args[1] {
37806 break
37807 }
37808 if mem != x0.Args[2] {
37809 break
37810 }
37811 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
37812 break
37813 }
37814 b = mergePoint(b, x0, x1)
37815 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
37816 v.reset(OpCopy)
37817 v.AddArg(v0)
37818 v0.AuxInt = i0
37819 v0.Aux = s
37820 v0.AddArg(p)
37821 v0.AddArg(idx)
37822 v0.AddArg(mem)
37823 return true
37824 }
37825 return false
37826 }
37827 func rewriteValueAMD64_OpAMD64ORQ_60(v *Value) bool {
37828 b := v.Block
37829 typ := &b.Func.Config.Types
37830
37831
37832
37833 for {
37834 _ = v.Args[1]
37835 sh := v.Args[0]
37836 if sh.Op != OpAMD64SHLQconst {
37837 break
37838 }
37839 if sh.AuxInt != 32 {
37840 break
37841 }
37842 x1 := sh.Args[0]
37843 if x1.Op != OpAMD64MOVLloadidx1 {
37844 break
37845 }
37846 i1 := x1.AuxInt
37847 s := x1.Aux
37848 mem := x1.Args[2]
37849 idx := x1.Args[0]
37850 p := x1.Args[1]
37851 x0 := v.Args[1]
37852 if x0.Op != OpAMD64MOVLloadidx1 {
37853 break
37854 }
37855 i0 := x0.AuxInt
37856 if x0.Aux != s {
37857 break
37858 }
37859 _ = x0.Args[2]
37860 if p != x0.Args[0] {
37861 break
37862 }
37863 if idx != x0.Args[1] {
37864 break
37865 }
37866 if mem != x0.Args[2] {
37867 break
37868 }
37869 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
37870 break
37871 }
37872 b = mergePoint(b, x0, x1)
37873 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
37874 v.reset(OpCopy)
37875 v.AddArg(v0)
37876 v0.AuxInt = i0
37877 v0.Aux = s
37878 v0.AddArg(p)
37879 v0.AddArg(idx)
37880 v0.AddArg(mem)
37881 return true
37882 }
37883
37884
37885
37886 for {
37887 _ = v.Args[1]
37888 sh := v.Args[0]
37889 if sh.Op != OpAMD64SHLQconst {
37890 break
37891 }
37892 if sh.AuxInt != 32 {
37893 break
37894 }
37895 x1 := sh.Args[0]
37896 if x1.Op != OpAMD64MOVLloadidx1 {
37897 break
37898 }
37899 i1 := x1.AuxInt
37900 s := x1.Aux
37901 mem := x1.Args[2]
37902 p := x1.Args[0]
37903 idx := x1.Args[1]
37904 x0 := v.Args[1]
37905 if x0.Op != OpAMD64MOVLloadidx1 {
37906 break
37907 }
37908 i0 := x0.AuxInt
37909 if x0.Aux != s {
37910 break
37911 }
37912 _ = x0.Args[2]
37913 if idx != x0.Args[0] {
37914 break
37915 }
37916 if p != x0.Args[1] {
37917 break
37918 }
37919 if mem != x0.Args[2] {
37920 break
37921 }
37922 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
37923 break
37924 }
37925 b = mergePoint(b, x0, x1)
37926 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
37927 v.reset(OpCopy)
37928 v.AddArg(v0)
37929 v0.AuxInt = i0
37930 v0.Aux = s
37931 v0.AddArg(p)
37932 v0.AddArg(idx)
37933 v0.AddArg(mem)
37934 return true
37935 }
37936
37937
37938
37939 for {
37940 _ = v.Args[1]
37941 sh := v.Args[0]
37942 if sh.Op != OpAMD64SHLQconst {
37943 break
37944 }
37945 if sh.AuxInt != 32 {
37946 break
37947 }
37948 x1 := sh.Args[0]
37949 if x1.Op != OpAMD64MOVLloadidx1 {
37950 break
37951 }
37952 i1 := x1.AuxInt
37953 s := x1.Aux
37954 mem := x1.Args[2]
37955 idx := x1.Args[0]
37956 p := x1.Args[1]
37957 x0 := v.Args[1]
37958 if x0.Op != OpAMD64MOVLloadidx1 {
37959 break
37960 }
37961 i0 := x0.AuxInt
37962 if x0.Aux != s {
37963 break
37964 }
37965 _ = x0.Args[2]
37966 if idx != x0.Args[0] {
37967 break
37968 }
37969 if p != x0.Args[1] {
37970 break
37971 }
37972 if mem != x0.Args[2] {
37973 break
37974 }
37975 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
37976 break
37977 }
37978 b = mergePoint(b, x0, x1)
37979 v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
37980 v.reset(OpCopy)
37981 v.AddArg(v0)
37982 v0.AuxInt = i0
37983 v0.Aux = s
37984 v0.AddArg(p)
37985 v0.AddArg(idx)
37986 v0.AddArg(mem)
37987 return true
37988 }
37989
37990
37991
37992 for {
37993 _ = v.Args[1]
37994 s1 := v.Args[0]
37995 if s1.Op != OpAMD64SHLQconst {
37996 break
37997 }
37998 j1 := s1.AuxInt
37999 x1 := s1.Args[0]
38000 if x1.Op != OpAMD64MOVBloadidx1 {
38001 break
38002 }
38003 i1 := x1.AuxInt
38004 s := x1.Aux
38005 mem := x1.Args[2]
38006 p := x1.Args[0]
38007 idx := x1.Args[1]
38008 or := v.Args[1]
38009 if or.Op != OpAMD64ORQ {
38010 break
38011 }
38012 y := or.Args[1]
38013 s0 := or.Args[0]
38014 if s0.Op != OpAMD64SHLQconst {
38015 break
38016 }
38017 j0 := s0.AuxInt
38018 x0 := s0.Args[0]
38019 if x0.Op != OpAMD64MOVBloadidx1 {
38020 break
38021 }
38022 i0 := x0.AuxInt
38023 if x0.Aux != s {
38024 break
38025 }
38026 _ = x0.Args[2]
38027 if p != x0.Args[0] {
38028 break
38029 }
38030 if idx != x0.Args[1] {
38031 break
38032 }
38033 if mem != x0.Args[2] {
38034 break
38035 }
38036 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
38037 break
38038 }
38039 b = mergePoint(b, x0, x1, y)
38040 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
38041 v.reset(OpCopy)
38042 v.AddArg(v0)
38043 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
38044 v1.AuxInt = j0
38045 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
38046 v2.AuxInt = i0
38047 v2.Aux = s
38048 v2.AddArg(p)
38049 v2.AddArg(idx)
38050 v2.AddArg(mem)
38051 v1.AddArg(v2)
38052 v0.AddArg(v1)
38053 v0.AddArg(y)
38054 return true
38055 }
38056
38057
38058
38059 for {
38060 _ = v.Args[1]
38061 s1 := v.Args[0]
38062 if s1.Op != OpAMD64SHLQconst {
38063 break
38064 }
38065 j1 := s1.AuxInt
38066 x1 := s1.Args[0]
38067 if x1.Op != OpAMD64MOVBloadidx1 {
38068 break
38069 }
38070 i1 := x1.AuxInt
38071 s := x1.Aux
38072 mem := x1.Args[2]
38073 idx := x1.Args[0]
38074 p := x1.Args[1]
38075 or := v.Args[1]
38076 if or.Op != OpAMD64ORQ {
38077 break
38078 }
38079 y := or.Args[1]
38080 s0 := or.Args[0]
38081 if s0.Op != OpAMD64SHLQconst {
38082 break
38083 }
38084 j0 := s0.AuxInt
38085 x0 := s0.Args[0]
38086 if x0.Op != OpAMD64MOVBloadidx1 {
38087 break
38088 }
38089 i0 := x0.AuxInt
38090 if x0.Aux != s {
38091 break
38092 }
38093 _ = x0.Args[2]
38094 if p != x0.Args[0] {
38095 break
38096 }
38097 if idx != x0.Args[1] {
38098 break
38099 }
38100 if mem != x0.Args[2] {
38101 break
38102 }
38103 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
38104 break
38105 }
38106 b = mergePoint(b, x0, x1, y)
38107 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
38108 v.reset(OpCopy)
38109 v.AddArg(v0)
38110 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
38111 v1.AuxInt = j0
38112 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
38113 v2.AuxInt = i0
38114 v2.Aux = s
38115 v2.AddArg(p)
38116 v2.AddArg(idx)
38117 v2.AddArg(mem)
38118 v1.AddArg(v2)
38119 v0.AddArg(v1)
38120 v0.AddArg(y)
38121 return true
38122 }
38123
38124
38125
38126 for {
38127 _ = v.Args[1]
38128 s1 := v.Args[0]
38129 if s1.Op != OpAMD64SHLQconst {
38130 break
38131 }
38132 j1 := s1.AuxInt
38133 x1 := s1.Args[0]
38134 if x1.Op != OpAMD64MOVBloadidx1 {
38135 break
38136 }
38137 i1 := x1.AuxInt
38138 s := x1.Aux
38139 mem := x1.Args[2]
38140 p := x1.Args[0]
38141 idx := x1.Args[1]
38142 or := v.Args[1]
38143 if or.Op != OpAMD64ORQ {
38144 break
38145 }
38146 y := or.Args[1]
38147 s0 := or.Args[0]
38148 if s0.Op != OpAMD64SHLQconst {
38149 break
38150 }
38151 j0 := s0.AuxInt
38152 x0 := s0.Args[0]
38153 if x0.Op != OpAMD64MOVBloadidx1 {
38154 break
38155 }
38156 i0 := x0.AuxInt
38157 if x0.Aux != s {
38158 break
38159 }
38160 _ = x0.Args[2]
38161 if idx != x0.Args[0] {
38162 break
38163 }
38164 if p != x0.Args[1] {
38165 break
38166 }
38167 if mem != x0.Args[2] {
38168 break
38169 }
38170 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
38171 break
38172 }
38173 b = mergePoint(b, x0, x1, y)
38174 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
38175 v.reset(OpCopy)
38176 v.AddArg(v0)
38177 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
38178 v1.AuxInt = j0
38179 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
38180 v2.AuxInt = i0
38181 v2.Aux = s
38182 v2.AddArg(p)
38183 v2.AddArg(idx)
38184 v2.AddArg(mem)
38185 v1.AddArg(v2)
38186 v0.AddArg(v1)
38187 v0.AddArg(y)
38188 return true
38189 }
38190
38191
38192
38193 for {
38194 _ = v.Args[1]
38195 s1 := v.Args[0]
38196 if s1.Op != OpAMD64SHLQconst {
38197 break
38198 }
38199 j1 := s1.AuxInt
38200 x1 := s1.Args[0]
38201 if x1.Op != OpAMD64MOVBloadidx1 {
38202 break
38203 }
38204 i1 := x1.AuxInt
38205 s := x1.Aux
38206 mem := x1.Args[2]
38207 idx := x1.Args[0]
38208 p := x1.Args[1]
38209 or := v.Args[1]
38210 if or.Op != OpAMD64ORQ {
38211 break
38212 }
38213 y := or.Args[1]
38214 s0 := or.Args[0]
38215 if s0.Op != OpAMD64SHLQconst {
38216 break
38217 }
38218 j0 := s0.AuxInt
38219 x0 := s0.Args[0]
38220 if x0.Op != OpAMD64MOVBloadidx1 {
38221 break
38222 }
38223 i0 := x0.AuxInt
38224 if x0.Aux != s {
38225 break
38226 }
38227 _ = x0.Args[2]
38228 if idx != x0.Args[0] {
38229 break
38230 }
38231 if p != x0.Args[1] {
38232 break
38233 }
38234 if mem != x0.Args[2] {
38235 break
38236 }
38237 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
38238 break
38239 }
38240 b = mergePoint(b, x0, x1, y)
38241 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
38242 v.reset(OpCopy)
38243 v.AddArg(v0)
38244 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
38245 v1.AuxInt = j0
38246 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
38247 v2.AuxInt = i0
38248 v2.Aux = s
38249 v2.AddArg(p)
38250 v2.AddArg(idx)
38251 v2.AddArg(mem)
38252 v1.AddArg(v2)
38253 v0.AddArg(v1)
38254 v0.AddArg(y)
38255 return true
38256 }
38257
38258
38259
38260 for {
38261 _ = v.Args[1]
38262 s1 := v.Args[0]
38263 if s1.Op != OpAMD64SHLQconst {
38264 break
38265 }
38266 j1 := s1.AuxInt
38267 x1 := s1.Args[0]
38268 if x1.Op != OpAMD64MOVBloadidx1 {
38269 break
38270 }
38271 i1 := x1.AuxInt
38272 s := x1.Aux
38273 mem := x1.Args[2]
38274 p := x1.Args[0]
38275 idx := x1.Args[1]
38276 or := v.Args[1]
38277 if or.Op != OpAMD64ORQ {
38278 break
38279 }
38280 _ = or.Args[1]
38281 y := or.Args[0]
38282 s0 := or.Args[1]
38283 if s0.Op != OpAMD64SHLQconst {
38284 break
38285 }
38286 j0 := s0.AuxInt
38287 x0 := s0.Args[0]
38288 if x0.Op != OpAMD64MOVBloadidx1 {
38289 break
38290 }
38291 i0 := x0.AuxInt
38292 if x0.Aux != s {
38293 break
38294 }
38295 _ = x0.Args[2]
38296 if p != x0.Args[0] {
38297 break
38298 }
38299 if idx != x0.Args[1] {
38300 break
38301 }
38302 if mem != x0.Args[2] {
38303 break
38304 }
38305 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
38306 break
38307 }
38308 b = mergePoint(b, x0, x1, y)
38309 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
38310 v.reset(OpCopy)
38311 v.AddArg(v0)
38312 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
38313 v1.AuxInt = j0
38314 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
38315 v2.AuxInt = i0
38316 v2.Aux = s
38317 v2.AddArg(p)
38318 v2.AddArg(idx)
38319 v2.AddArg(mem)
38320 v1.AddArg(v2)
38321 v0.AddArg(v1)
38322 v0.AddArg(y)
38323 return true
38324 }
38325
38326
38327
38328 for {
38329 _ = v.Args[1]
38330 s1 := v.Args[0]
38331 if s1.Op != OpAMD64SHLQconst {
38332 break
38333 }
38334 j1 := s1.AuxInt
38335 x1 := s1.Args[0]
38336 if x1.Op != OpAMD64MOVBloadidx1 {
38337 break
38338 }
38339 i1 := x1.AuxInt
38340 s := x1.Aux
38341 mem := x1.Args[2]
38342 idx := x1.Args[0]
38343 p := x1.Args[1]
38344 or := v.Args[1]
38345 if or.Op != OpAMD64ORQ {
38346 break
38347 }
38348 _ = or.Args[1]
38349 y := or.Args[0]
38350 s0 := or.Args[1]
38351 if s0.Op != OpAMD64SHLQconst {
38352 break
38353 }
38354 j0 := s0.AuxInt
38355 x0 := s0.Args[0]
38356 if x0.Op != OpAMD64MOVBloadidx1 {
38357 break
38358 }
38359 i0 := x0.AuxInt
38360 if x0.Aux != s {
38361 break
38362 }
38363 _ = x0.Args[2]
38364 if p != x0.Args[0] {
38365 break
38366 }
38367 if idx != x0.Args[1] {
38368 break
38369 }
38370 if mem != x0.Args[2] {
38371 break
38372 }
38373 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
38374 break
38375 }
38376 b = mergePoint(b, x0, x1, y)
38377 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
38378 v.reset(OpCopy)
38379 v.AddArg(v0)
38380 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
38381 v1.AuxInt = j0
38382 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
38383 v2.AuxInt = i0
38384 v2.Aux = s
38385 v2.AddArg(p)
38386 v2.AddArg(idx)
38387 v2.AddArg(mem)
38388 v1.AddArg(v2)
38389 v0.AddArg(v1)
38390 v0.AddArg(y)
38391 return true
38392 }
38393
38394
38395
38396 for {
38397 _ = v.Args[1]
38398 s1 := v.Args[0]
38399 if s1.Op != OpAMD64SHLQconst {
38400 break
38401 }
38402 j1 := s1.AuxInt
38403 x1 := s1.Args[0]
38404 if x1.Op != OpAMD64MOVBloadidx1 {
38405 break
38406 }
38407 i1 := x1.AuxInt
38408 s := x1.Aux
38409 mem := x1.Args[2]
38410 p := x1.Args[0]
38411 idx := x1.Args[1]
38412 or := v.Args[1]
38413 if or.Op != OpAMD64ORQ {
38414 break
38415 }
38416 _ = or.Args[1]
38417 y := or.Args[0]
38418 s0 := or.Args[1]
38419 if s0.Op != OpAMD64SHLQconst {
38420 break
38421 }
38422 j0 := s0.AuxInt
38423 x0 := s0.Args[0]
38424 if x0.Op != OpAMD64MOVBloadidx1 {
38425 break
38426 }
38427 i0 := x0.AuxInt
38428 if x0.Aux != s {
38429 break
38430 }
38431 _ = x0.Args[2]
38432 if idx != x0.Args[0] {
38433 break
38434 }
38435 if p != x0.Args[1] {
38436 break
38437 }
38438 if mem != x0.Args[2] {
38439 break
38440 }
38441 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
38442 break
38443 }
38444 b = mergePoint(b, x0, x1, y)
38445 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
38446 v.reset(OpCopy)
38447 v.AddArg(v0)
38448 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
38449 v1.AuxInt = j0
38450 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
38451 v2.AuxInt = i0
38452 v2.Aux = s
38453 v2.AddArg(p)
38454 v2.AddArg(idx)
38455 v2.AddArg(mem)
38456 v1.AddArg(v2)
38457 v0.AddArg(v1)
38458 v0.AddArg(y)
38459 return true
38460 }
38461 return false
38462 }
38463 func rewriteValueAMD64_OpAMD64ORQ_70(v *Value) bool {
38464 b := v.Block
38465 typ := &b.Func.Config.Types
38466
38467
38468
38469 for {
38470 _ = v.Args[1]
38471 s1 := v.Args[0]
38472 if s1.Op != OpAMD64SHLQconst {
38473 break
38474 }
38475 j1 := s1.AuxInt
38476 x1 := s1.Args[0]
38477 if x1.Op != OpAMD64MOVBloadidx1 {
38478 break
38479 }
38480 i1 := x1.AuxInt
38481 s := x1.Aux
38482 mem := x1.Args[2]
38483 idx := x1.Args[0]
38484 p := x1.Args[1]
38485 or := v.Args[1]
38486 if or.Op != OpAMD64ORQ {
38487 break
38488 }
38489 _ = or.Args[1]
38490 y := or.Args[0]
38491 s0 := or.Args[1]
38492 if s0.Op != OpAMD64SHLQconst {
38493 break
38494 }
38495 j0 := s0.AuxInt
38496 x0 := s0.Args[0]
38497 if x0.Op != OpAMD64MOVBloadidx1 {
38498 break
38499 }
38500 i0 := x0.AuxInt
38501 if x0.Aux != s {
38502 break
38503 }
38504 _ = x0.Args[2]
38505 if idx != x0.Args[0] {
38506 break
38507 }
38508 if p != x0.Args[1] {
38509 break
38510 }
38511 if mem != x0.Args[2] {
38512 break
38513 }
38514 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
38515 break
38516 }
38517 b = mergePoint(b, x0, x1, y)
38518 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
38519 v.reset(OpCopy)
38520 v.AddArg(v0)
38521 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
38522 v1.AuxInt = j0
38523 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
38524 v2.AuxInt = i0
38525 v2.Aux = s
38526 v2.AddArg(p)
38527 v2.AddArg(idx)
38528 v2.AddArg(mem)
38529 v1.AddArg(v2)
38530 v0.AddArg(v1)
38531 v0.AddArg(y)
38532 return true
38533 }
38534
38535
38536
38537 for {
38538 _ = v.Args[1]
38539 or := v.Args[0]
38540 if or.Op != OpAMD64ORQ {
38541 break
38542 }
38543 y := or.Args[1]
38544 s0 := or.Args[0]
38545 if s0.Op != OpAMD64SHLQconst {
38546 break
38547 }
38548 j0 := s0.AuxInt
38549 x0 := s0.Args[0]
38550 if x0.Op != OpAMD64MOVBloadidx1 {
38551 break
38552 }
38553 i0 := x0.AuxInt
38554 s := x0.Aux
38555 mem := x0.Args[2]
38556 p := x0.Args[0]
38557 idx := x0.Args[1]
38558 s1 := v.Args[1]
38559 if s1.Op != OpAMD64SHLQconst {
38560 break
38561 }
38562 j1 := s1.AuxInt
38563 x1 := s1.Args[0]
38564 if x1.Op != OpAMD64MOVBloadidx1 {
38565 break
38566 }
38567 i1 := x1.AuxInt
38568 if x1.Aux != s {
38569 break
38570 }
38571 _ = x1.Args[2]
38572 if p != x1.Args[0] {
38573 break
38574 }
38575 if idx != x1.Args[1] {
38576 break
38577 }
38578 if mem != x1.Args[2] {
38579 break
38580 }
38581 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
38582 break
38583 }
38584 b = mergePoint(b, x0, x1, y)
38585 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
38586 v.reset(OpCopy)
38587 v.AddArg(v0)
38588 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
38589 v1.AuxInt = j0
38590 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
38591 v2.AuxInt = i0
38592 v2.Aux = s
38593 v2.AddArg(p)
38594 v2.AddArg(idx)
38595 v2.AddArg(mem)
38596 v1.AddArg(v2)
38597 v0.AddArg(v1)
38598 v0.AddArg(y)
38599 return true
38600 }
38601
38602
38603
38604 for {
38605 _ = v.Args[1]
38606 or := v.Args[0]
38607 if or.Op != OpAMD64ORQ {
38608 break
38609 }
38610 y := or.Args[1]
38611 s0 := or.Args[0]
38612 if s0.Op != OpAMD64SHLQconst {
38613 break
38614 }
38615 j0 := s0.AuxInt
38616 x0 := s0.Args[0]
38617 if x0.Op != OpAMD64MOVBloadidx1 {
38618 break
38619 }
38620 i0 := x0.AuxInt
38621 s := x0.Aux
38622 mem := x0.Args[2]
38623 idx := x0.Args[0]
38624 p := x0.Args[1]
38625 s1 := v.Args[1]
38626 if s1.Op != OpAMD64SHLQconst {
38627 break
38628 }
38629 j1 := s1.AuxInt
38630 x1 := s1.Args[0]
38631 if x1.Op != OpAMD64MOVBloadidx1 {
38632 break
38633 }
38634 i1 := x1.AuxInt
38635 if x1.Aux != s {
38636 break
38637 }
38638 _ = x1.Args[2]
38639 if p != x1.Args[0] {
38640 break
38641 }
38642 if idx != x1.Args[1] {
38643 break
38644 }
38645 if mem != x1.Args[2] {
38646 break
38647 }
38648 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
38649 break
38650 }
38651 b = mergePoint(b, x0, x1, y)
38652 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
38653 v.reset(OpCopy)
38654 v.AddArg(v0)
38655 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
38656 v1.AuxInt = j0
38657 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
38658 v2.AuxInt = i0
38659 v2.Aux = s
38660 v2.AddArg(p)
38661 v2.AddArg(idx)
38662 v2.AddArg(mem)
38663 v1.AddArg(v2)
38664 v0.AddArg(v1)
38665 v0.AddArg(y)
38666 return true
38667 }
38668
38669
38670
38671 for {
38672 _ = v.Args[1]
38673 or := v.Args[0]
38674 if or.Op != OpAMD64ORQ {
38675 break
38676 }
38677 _ = or.Args[1]
38678 y := or.Args[0]
38679 s0 := or.Args[1]
38680 if s0.Op != OpAMD64SHLQconst {
38681 break
38682 }
38683 j0 := s0.AuxInt
38684 x0 := s0.Args[0]
38685 if x0.Op != OpAMD64MOVBloadidx1 {
38686 break
38687 }
38688 i0 := x0.AuxInt
38689 s := x0.Aux
38690 mem := x0.Args[2]
38691 p := x0.Args[0]
38692 idx := x0.Args[1]
38693 s1 := v.Args[1]
38694 if s1.Op != OpAMD64SHLQconst {
38695 break
38696 }
38697 j1 := s1.AuxInt
38698 x1 := s1.Args[0]
38699 if x1.Op != OpAMD64MOVBloadidx1 {
38700 break
38701 }
38702 i1 := x1.AuxInt
38703 if x1.Aux != s {
38704 break
38705 }
38706 _ = x1.Args[2]
38707 if p != x1.Args[0] {
38708 break
38709 }
38710 if idx != x1.Args[1] {
38711 break
38712 }
38713 if mem != x1.Args[2] {
38714 break
38715 }
38716 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
38717 break
38718 }
38719 b = mergePoint(b, x0, x1, y)
38720 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
38721 v.reset(OpCopy)
38722 v.AddArg(v0)
38723 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
38724 v1.AuxInt = j0
38725 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
38726 v2.AuxInt = i0
38727 v2.Aux = s
38728 v2.AddArg(p)
38729 v2.AddArg(idx)
38730 v2.AddArg(mem)
38731 v1.AddArg(v2)
38732 v0.AddArg(v1)
38733 v0.AddArg(y)
38734 return true
38735 }
38736
38737
38738
38739 for {
38740 _ = v.Args[1]
38741 or := v.Args[0]
38742 if or.Op != OpAMD64ORQ {
38743 break
38744 }
38745 _ = or.Args[1]
38746 y := or.Args[0]
38747 s0 := or.Args[1]
38748 if s0.Op != OpAMD64SHLQconst {
38749 break
38750 }
38751 j0 := s0.AuxInt
38752 x0 := s0.Args[0]
38753 if x0.Op != OpAMD64MOVBloadidx1 {
38754 break
38755 }
38756 i0 := x0.AuxInt
38757 s := x0.Aux
38758 mem := x0.Args[2]
38759 idx := x0.Args[0]
38760 p := x0.Args[1]
38761 s1 := v.Args[1]
38762 if s1.Op != OpAMD64SHLQconst {
38763 break
38764 }
38765 j1 := s1.AuxInt
38766 x1 := s1.Args[0]
38767 if x1.Op != OpAMD64MOVBloadidx1 {
38768 break
38769 }
38770 i1 := x1.AuxInt
38771 if x1.Aux != s {
38772 break
38773 }
38774 _ = x1.Args[2]
38775 if p != x1.Args[0] {
38776 break
38777 }
38778 if idx != x1.Args[1] {
38779 break
38780 }
38781 if mem != x1.Args[2] {
38782 break
38783 }
38784 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
38785 break
38786 }
38787 b = mergePoint(b, x0, x1, y)
38788 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
38789 v.reset(OpCopy)
38790 v.AddArg(v0)
38791 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
38792 v1.AuxInt = j0
38793 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
38794 v2.AuxInt = i0
38795 v2.Aux = s
38796 v2.AddArg(p)
38797 v2.AddArg(idx)
38798 v2.AddArg(mem)
38799 v1.AddArg(v2)
38800 v0.AddArg(v1)
38801 v0.AddArg(y)
38802 return true
38803 }
38804
38805
38806
38807 for {
38808 _ = v.Args[1]
38809 or := v.Args[0]
38810 if or.Op != OpAMD64ORQ {
38811 break
38812 }
38813 y := or.Args[1]
38814 s0 := or.Args[0]
38815 if s0.Op != OpAMD64SHLQconst {
38816 break
38817 }
38818 j0 := s0.AuxInt
38819 x0 := s0.Args[0]
38820 if x0.Op != OpAMD64MOVBloadidx1 {
38821 break
38822 }
38823 i0 := x0.AuxInt
38824 s := x0.Aux
38825 mem := x0.Args[2]
38826 p := x0.Args[0]
38827 idx := x0.Args[1]
38828 s1 := v.Args[1]
38829 if s1.Op != OpAMD64SHLQconst {
38830 break
38831 }
38832 j1 := s1.AuxInt
38833 x1 := s1.Args[0]
38834 if x1.Op != OpAMD64MOVBloadidx1 {
38835 break
38836 }
38837 i1 := x1.AuxInt
38838 if x1.Aux != s {
38839 break
38840 }
38841 _ = x1.Args[2]
38842 if idx != x1.Args[0] {
38843 break
38844 }
38845 if p != x1.Args[1] {
38846 break
38847 }
38848 if mem != x1.Args[2] {
38849 break
38850 }
38851 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
38852 break
38853 }
38854 b = mergePoint(b, x0, x1, y)
38855 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
38856 v.reset(OpCopy)
38857 v.AddArg(v0)
38858 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
38859 v1.AuxInt = j0
38860 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
38861 v2.AuxInt = i0
38862 v2.Aux = s
38863 v2.AddArg(p)
38864 v2.AddArg(idx)
38865 v2.AddArg(mem)
38866 v1.AddArg(v2)
38867 v0.AddArg(v1)
38868 v0.AddArg(y)
38869 return true
38870 }
38871
38872
38873
38874 for {
38875 _ = v.Args[1]
38876 or := v.Args[0]
38877 if or.Op != OpAMD64ORQ {
38878 break
38879 }
38880 y := or.Args[1]
38881 s0 := or.Args[0]
38882 if s0.Op != OpAMD64SHLQconst {
38883 break
38884 }
38885 j0 := s0.AuxInt
38886 x0 := s0.Args[0]
38887 if x0.Op != OpAMD64MOVBloadidx1 {
38888 break
38889 }
38890 i0 := x0.AuxInt
38891 s := x0.Aux
38892 mem := x0.Args[2]
38893 idx := x0.Args[0]
38894 p := x0.Args[1]
38895 s1 := v.Args[1]
38896 if s1.Op != OpAMD64SHLQconst {
38897 break
38898 }
38899 j1 := s1.AuxInt
38900 x1 := s1.Args[0]
38901 if x1.Op != OpAMD64MOVBloadidx1 {
38902 break
38903 }
38904 i1 := x1.AuxInt
38905 if x1.Aux != s {
38906 break
38907 }
38908 _ = x1.Args[2]
38909 if idx != x1.Args[0] {
38910 break
38911 }
38912 if p != x1.Args[1] {
38913 break
38914 }
38915 if mem != x1.Args[2] {
38916 break
38917 }
38918 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
38919 break
38920 }
38921 b = mergePoint(b, x0, x1, y)
38922 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
38923 v.reset(OpCopy)
38924 v.AddArg(v0)
38925 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
38926 v1.AuxInt = j0
38927 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
38928 v2.AuxInt = i0
38929 v2.Aux = s
38930 v2.AddArg(p)
38931 v2.AddArg(idx)
38932 v2.AddArg(mem)
38933 v1.AddArg(v2)
38934 v0.AddArg(v1)
38935 v0.AddArg(y)
38936 return true
38937 }
38938
38939
38940
38941 for {
38942 _ = v.Args[1]
38943 or := v.Args[0]
38944 if or.Op != OpAMD64ORQ {
38945 break
38946 }
38947 _ = or.Args[1]
38948 y := or.Args[0]
38949 s0 := or.Args[1]
38950 if s0.Op != OpAMD64SHLQconst {
38951 break
38952 }
38953 j0 := s0.AuxInt
38954 x0 := s0.Args[0]
38955 if x0.Op != OpAMD64MOVBloadidx1 {
38956 break
38957 }
38958 i0 := x0.AuxInt
38959 s := x0.Aux
38960 mem := x0.Args[2]
38961 p := x0.Args[0]
38962 idx := x0.Args[1]
38963 s1 := v.Args[1]
38964 if s1.Op != OpAMD64SHLQconst {
38965 break
38966 }
38967 j1 := s1.AuxInt
38968 x1 := s1.Args[0]
38969 if x1.Op != OpAMD64MOVBloadidx1 {
38970 break
38971 }
38972 i1 := x1.AuxInt
38973 if x1.Aux != s {
38974 break
38975 }
38976 _ = x1.Args[2]
38977 if idx != x1.Args[0] {
38978 break
38979 }
38980 if p != x1.Args[1] {
38981 break
38982 }
38983 if mem != x1.Args[2] {
38984 break
38985 }
38986 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
38987 break
38988 }
38989 b = mergePoint(b, x0, x1, y)
38990 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
38991 v.reset(OpCopy)
38992 v.AddArg(v0)
38993 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
38994 v1.AuxInt = j0
38995 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
38996 v2.AuxInt = i0
38997 v2.Aux = s
38998 v2.AddArg(p)
38999 v2.AddArg(idx)
39000 v2.AddArg(mem)
39001 v1.AddArg(v2)
39002 v0.AddArg(v1)
39003 v0.AddArg(y)
39004 return true
39005 }
39006
39007
39008
39009 for {
39010 _ = v.Args[1]
39011 or := v.Args[0]
39012 if or.Op != OpAMD64ORQ {
39013 break
39014 }
39015 _ = or.Args[1]
39016 y := or.Args[0]
39017 s0 := or.Args[1]
39018 if s0.Op != OpAMD64SHLQconst {
39019 break
39020 }
39021 j0 := s0.AuxInt
39022 x0 := s0.Args[0]
39023 if x0.Op != OpAMD64MOVBloadidx1 {
39024 break
39025 }
39026 i0 := x0.AuxInt
39027 s := x0.Aux
39028 mem := x0.Args[2]
39029 idx := x0.Args[0]
39030 p := x0.Args[1]
39031 s1 := v.Args[1]
39032 if s1.Op != OpAMD64SHLQconst {
39033 break
39034 }
39035 j1 := s1.AuxInt
39036 x1 := s1.Args[0]
39037 if x1.Op != OpAMD64MOVBloadidx1 {
39038 break
39039 }
39040 i1 := x1.AuxInt
39041 if x1.Aux != s {
39042 break
39043 }
39044 _ = x1.Args[2]
39045 if idx != x1.Args[0] {
39046 break
39047 }
39048 if p != x1.Args[1] {
39049 break
39050 }
39051 if mem != x1.Args[2] {
39052 break
39053 }
39054 if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
39055 break
39056 }
39057 b = mergePoint(b, x0, x1, y)
39058 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
39059 v.reset(OpCopy)
39060 v.AddArg(v0)
39061 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
39062 v1.AuxInt = j0
39063 v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
39064 v2.AuxInt = i0
39065 v2.Aux = s
39066 v2.AddArg(p)
39067 v2.AddArg(idx)
39068 v2.AddArg(mem)
39069 v1.AddArg(v2)
39070 v0.AddArg(v1)
39071 v0.AddArg(y)
39072 return true
39073 }
39074
39075
39076
39077 for {
39078 _ = v.Args[1]
39079 s1 := v.Args[0]
39080 if s1.Op != OpAMD64SHLQconst {
39081 break
39082 }
39083 j1 := s1.AuxInt
39084 x1 := s1.Args[0]
39085 if x1.Op != OpAMD64MOVWloadidx1 {
39086 break
39087 }
39088 i1 := x1.AuxInt
39089 s := x1.Aux
39090 mem := x1.Args[2]
39091 p := x1.Args[0]
39092 idx := x1.Args[1]
39093 or := v.Args[1]
39094 if or.Op != OpAMD64ORQ {
39095 break
39096 }
39097 y := or.Args[1]
39098 s0 := or.Args[0]
39099 if s0.Op != OpAMD64SHLQconst {
39100 break
39101 }
39102 j0 := s0.AuxInt
39103 x0 := s0.Args[0]
39104 if x0.Op != OpAMD64MOVWloadidx1 {
39105 break
39106 }
39107 i0 := x0.AuxInt
39108 if x0.Aux != s {
39109 break
39110 }
39111 _ = x0.Args[2]
39112 if p != x0.Args[0] {
39113 break
39114 }
39115 if idx != x0.Args[1] {
39116 break
39117 }
39118 if mem != x0.Args[2] {
39119 break
39120 }
39121 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
39122 break
39123 }
39124 b = mergePoint(b, x0, x1, y)
39125 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
39126 v.reset(OpCopy)
39127 v.AddArg(v0)
39128 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
39129 v1.AuxInt = j0
39130 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
39131 v2.AuxInt = i0
39132 v2.Aux = s
39133 v2.AddArg(p)
39134 v2.AddArg(idx)
39135 v2.AddArg(mem)
39136 v1.AddArg(v2)
39137 v0.AddArg(v1)
39138 v0.AddArg(y)
39139 return true
39140 }
39141 return false
39142 }
39143 func rewriteValueAMD64_OpAMD64ORQ_80(v *Value) bool {
39144 b := v.Block
39145 typ := &b.Func.Config.Types
39146
39147
39148
39149 for {
39150 _ = v.Args[1]
39151 s1 := v.Args[0]
39152 if s1.Op != OpAMD64SHLQconst {
39153 break
39154 }
39155 j1 := s1.AuxInt
39156 x1 := s1.Args[0]
39157 if x1.Op != OpAMD64MOVWloadidx1 {
39158 break
39159 }
39160 i1 := x1.AuxInt
39161 s := x1.Aux
39162 mem := x1.Args[2]
39163 idx := x1.Args[0]
39164 p := x1.Args[1]
39165 or := v.Args[1]
39166 if or.Op != OpAMD64ORQ {
39167 break
39168 }
39169 y := or.Args[1]
39170 s0 := or.Args[0]
39171 if s0.Op != OpAMD64SHLQconst {
39172 break
39173 }
39174 j0 := s0.AuxInt
39175 x0 := s0.Args[0]
39176 if x0.Op != OpAMD64MOVWloadidx1 {
39177 break
39178 }
39179 i0 := x0.AuxInt
39180 if x0.Aux != s {
39181 break
39182 }
39183 _ = x0.Args[2]
39184 if p != x0.Args[0] {
39185 break
39186 }
39187 if idx != x0.Args[1] {
39188 break
39189 }
39190 if mem != x0.Args[2] {
39191 break
39192 }
39193 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
39194 break
39195 }
39196 b = mergePoint(b, x0, x1, y)
39197 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
39198 v.reset(OpCopy)
39199 v.AddArg(v0)
39200 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
39201 v1.AuxInt = j0
39202 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
39203 v2.AuxInt = i0
39204 v2.Aux = s
39205 v2.AddArg(p)
39206 v2.AddArg(idx)
39207 v2.AddArg(mem)
39208 v1.AddArg(v2)
39209 v0.AddArg(v1)
39210 v0.AddArg(y)
39211 return true
39212 }
39213
39214
39215
39216 for {
39217 _ = v.Args[1]
39218 s1 := v.Args[0]
39219 if s1.Op != OpAMD64SHLQconst {
39220 break
39221 }
39222 j1 := s1.AuxInt
39223 x1 := s1.Args[0]
39224 if x1.Op != OpAMD64MOVWloadidx1 {
39225 break
39226 }
39227 i1 := x1.AuxInt
39228 s := x1.Aux
39229 mem := x1.Args[2]
39230 p := x1.Args[0]
39231 idx := x1.Args[1]
39232 or := v.Args[1]
39233 if or.Op != OpAMD64ORQ {
39234 break
39235 }
39236 y := or.Args[1]
39237 s0 := or.Args[0]
39238 if s0.Op != OpAMD64SHLQconst {
39239 break
39240 }
39241 j0 := s0.AuxInt
39242 x0 := s0.Args[0]
39243 if x0.Op != OpAMD64MOVWloadidx1 {
39244 break
39245 }
39246 i0 := x0.AuxInt
39247 if x0.Aux != s {
39248 break
39249 }
39250 _ = x0.Args[2]
39251 if idx != x0.Args[0] {
39252 break
39253 }
39254 if p != x0.Args[1] {
39255 break
39256 }
39257 if mem != x0.Args[2] {
39258 break
39259 }
39260 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
39261 break
39262 }
39263 b = mergePoint(b, x0, x1, y)
39264 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
39265 v.reset(OpCopy)
39266 v.AddArg(v0)
39267 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
39268 v1.AuxInt = j0
39269 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
39270 v2.AuxInt = i0
39271 v2.Aux = s
39272 v2.AddArg(p)
39273 v2.AddArg(idx)
39274 v2.AddArg(mem)
39275 v1.AddArg(v2)
39276 v0.AddArg(v1)
39277 v0.AddArg(y)
39278 return true
39279 }
39280
39281
39282
39283 for {
39284 _ = v.Args[1]
39285 s1 := v.Args[0]
39286 if s1.Op != OpAMD64SHLQconst {
39287 break
39288 }
39289 j1 := s1.AuxInt
39290 x1 := s1.Args[0]
39291 if x1.Op != OpAMD64MOVWloadidx1 {
39292 break
39293 }
39294 i1 := x1.AuxInt
39295 s := x1.Aux
39296 mem := x1.Args[2]
39297 idx := x1.Args[0]
39298 p := x1.Args[1]
39299 or := v.Args[1]
39300 if or.Op != OpAMD64ORQ {
39301 break
39302 }
39303 y := or.Args[1]
39304 s0 := or.Args[0]
39305 if s0.Op != OpAMD64SHLQconst {
39306 break
39307 }
39308 j0 := s0.AuxInt
39309 x0 := s0.Args[0]
39310 if x0.Op != OpAMD64MOVWloadidx1 {
39311 break
39312 }
39313 i0 := x0.AuxInt
39314 if x0.Aux != s {
39315 break
39316 }
39317 _ = x0.Args[2]
39318 if idx != x0.Args[0] {
39319 break
39320 }
39321 if p != x0.Args[1] {
39322 break
39323 }
39324 if mem != x0.Args[2] {
39325 break
39326 }
39327 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
39328 break
39329 }
39330 b = mergePoint(b, x0, x1, y)
39331 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
39332 v.reset(OpCopy)
39333 v.AddArg(v0)
39334 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
39335 v1.AuxInt = j0
39336 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
39337 v2.AuxInt = i0
39338 v2.Aux = s
39339 v2.AddArg(p)
39340 v2.AddArg(idx)
39341 v2.AddArg(mem)
39342 v1.AddArg(v2)
39343 v0.AddArg(v1)
39344 v0.AddArg(y)
39345 return true
39346 }
39347
39348
39349
39350 for {
39351 _ = v.Args[1]
39352 s1 := v.Args[0]
39353 if s1.Op != OpAMD64SHLQconst {
39354 break
39355 }
39356 j1 := s1.AuxInt
39357 x1 := s1.Args[0]
39358 if x1.Op != OpAMD64MOVWloadidx1 {
39359 break
39360 }
39361 i1 := x1.AuxInt
39362 s := x1.Aux
39363 mem := x1.Args[2]
39364 p := x1.Args[0]
39365 idx := x1.Args[1]
39366 or := v.Args[1]
39367 if or.Op != OpAMD64ORQ {
39368 break
39369 }
39370 _ = or.Args[1]
39371 y := or.Args[0]
39372 s0 := or.Args[1]
39373 if s0.Op != OpAMD64SHLQconst {
39374 break
39375 }
39376 j0 := s0.AuxInt
39377 x0 := s0.Args[0]
39378 if x0.Op != OpAMD64MOVWloadidx1 {
39379 break
39380 }
39381 i0 := x0.AuxInt
39382 if x0.Aux != s {
39383 break
39384 }
39385 _ = x0.Args[2]
39386 if p != x0.Args[0] {
39387 break
39388 }
39389 if idx != x0.Args[1] {
39390 break
39391 }
39392 if mem != x0.Args[2] {
39393 break
39394 }
39395 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
39396 break
39397 }
39398 b = mergePoint(b, x0, x1, y)
39399 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
39400 v.reset(OpCopy)
39401 v.AddArg(v0)
39402 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
39403 v1.AuxInt = j0
39404 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
39405 v2.AuxInt = i0
39406 v2.Aux = s
39407 v2.AddArg(p)
39408 v2.AddArg(idx)
39409 v2.AddArg(mem)
39410 v1.AddArg(v2)
39411 v0.AddArg(v1)
39412 v0.AddArg(y)
39413 return true
39414 }
39415
39416
39417
39418 for {
39419 _ = v.Args[1]
39420 s1 := v.Args[0]
39421 if s1.Op != OpAMD64SHLQconst {
39422 break
39423 }
39424 j1 := s1.AuxInt
39425 x1 := s1.Args[0]
39426 if x1.Op != OpAMD64MOVWloadidx1 {
39427 break
39428 }
39429 i1 := x1.AuxInt
39430 s := x1.Aux
39431 mem := x1.Args[2]
39432 idx := x1.Args[0]
39433 p := x1.Args[1]
39434 or := v.Args[1]
39435 if or.Op != OpAMD64ORQ {
39436 break
39437 }
39438 _ = or.Args[1]
39439 y := or.Args[0]
39440 s0 := or.Args[1]
39441 if s0.Op != OpAMD64SHLQconst {
39442 break
39443 }
39444 j0 := s0.AuxInt
39445 x0 := s0.Args[0]
39446 if x0.Op != OpAMD64MOVWloadidx1 {
39447 break
39448 }
39449 i0 := x0.AuxInt
39450 if x0.Aux != s {
39451 break
39452 }
39453 _ = x0.Args[2]
39454 if p != x0.Args[0] {
39455 break
39456 }
39457 if idx != x0.Args[1] {
39458 break
39459 }
39460 if mem != x0.Args[2] {
39461 break
39462 }
39463 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
39464 break
39465 }
39466 b = mergePoint(b, x0, x1, y)
39467 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
39468 v.reset(OpCopy)
39469 v.AddArg(v0)
39470 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
39471 v1.AuxInt = j0
39472 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
39473 v2.AuxInt = i0
39474 v2.Aux = s
39475 v2.AddArg(p)
39476 v2.AddArg(idx)
39477 v2.AddArg(mem)
39478 v1.AddArg(v2)
39479 v0.AddArg(v1)
39480 v0.AddArg(y)
39481 return true
39482 }
39483
39484
39485
39486 for {
39487 _ = v.Args[1]
39488 s1 := v.Args[0]
39489 if s1.Op != OpAMD64SHLQconst {
39490 break
39491 }
39492 j1 := s1.AuxInt
39493 x1 := s1.Args[0]
39494 if x1.Op != OpAMD64MOVWloadidx1 {
39495 break
39496 }
39497 i1 := x1.AuxInt
39498 s := x1.Aux
39499 mem := x1.Args[2]
39500 p := x1.Args[0]
39501 idx := x1.Args[1]
39502 or := v.Args[1]
39503 if or.Op != OpAMD64ORQ {
39504 break
39505 }
39506 _ = or.Args[1]
39507 y := or.Args[0]
39508 s0 := or.Args[1]
39509 if s0.Op != OpAMD64SHLQconst {
39510 break
39511 }
39512 j0 := s0.AuxInt
39513 x0 := s0.Args[0]
39514 if x0.Op != OpAMD64MOVWloadidx1 {
39515 break
39516 }
39517 i0 := x0.AuxInt
39518 if x0.Aux != s {
39519 break
39520 }
39521 _ = x0.Args[2]
39522 if idx != x0.Args[0] {
39523 break
39524 }
39525 if p != x0.Args[1] {
39526 break
39527 }
39528 if mem != x0.Args[2] {
39529 break
39530 }
39531 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
39532 break
39533 }
39534 b = mergePoint(b, x0, x1, y)
39535 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
39536 v.reset(OpCopy)
39537 v.AddArg(v0)
39538 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
39539 v1.AuxInt = j0
39540 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
39541 v2.AuxInt = i0
39542 v2.Aux = s
39543 v2.AddArg(p)
39544 v2.AddArg(idx)
39545 v2.AddArg(mem)
39546 v1.AddArg(v2)
39547 v0.AddArg(v1)
39548 v0.AddArg(y)
39549 return true
39550 }
39551
39552
39553
39554 for {
39555 _ = v.Args[1]
39556 s1 := v.Args[0]
39557 if s1.Op != OpAMD64SHLQconst {
39558 break
39559 }
39560 j1 := s1.AuxInt
39561 x1 := s1.Args[0]
39562 if x1.Op != OpAMD64MOVWloadidx1 {
39563 break
39564 }
39565 i1 := x1.AuxInt
39566 s := x1.Aux
39567 mem := x1.Args[2]
39568 idx := x1.Args[0]
39569 p := x1.Args[1]
39570 or := v.Args[1]
39571 if or.Op != OpAMD64ORQ {
39572 break
39573 }
39574 _ = or.Args[1]
39575 y := or.Args[0]
39576 s0 := or.Args[1]
39577 if s0.Op != OpAMD64SHLQconst {
39578 break
39579 }
39580 j0 := s0.AuxInt
39581 x0 := s0.Args[0]
39582 if x0.Op != OpAMD64MOVWloadidx1 {
39583 break
39584 }
39585 i0 := x0.AuxInt
39586 if x0.Aux != s {
39587 break
39588 }
39589 _ = x0.Args[2]
39590 if idx != x0.Args[0] {
39591 break
39592 }
39593 if p != x0.Args[1] {
39594 break
39595 }
39596 if mem != x0.Args[2] {
39597 break
39598 }
39599 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
39600 break
39601 }
39602 b = mergePoint(b, x0, x1, y)
39603 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
39604 v.reset(OpCopy)
39605 v.AddArg(v0)
39606 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
39607 v1.AuxInt = j0
39608 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
39609 v2.AuxInt = i0
39610 v2.Aux = s
39611 v2.AddArg(p)
39612 v2.AddArg(idx)
39613 v2.AddArg(mem)
39614 v1.AddArg(v2)
39615 v0.AddArg(v1)
39616 v0.AddArg(y)
39617 return true
39618 }
39619
39620
39621
39622 for {
39623 _ = v.Args[1]
39624 or := v.Args[0]
39625 if or.Op != OpAMD64ORQ {
39626 break
39627 }
39628 y := or.Args[1]
39629 s0 := or.Args[0]
39630 if s0.Op != OpAMD64SHLQconst {
39631 break
39632 }
39633 j0 := s0.AuxInt
39634 x0 := s0.Args[0]
39635 if x0.Op != OpAMD64MOVWloadidx1 {
39636 break
39637 }
39638 i0 := x0.AuxInt
39639 s := x0.Aux
39640 mem := x0.Args[2]
39641 p := x0.Args[0]
39642 idx := x0.Args[1]
39643 s1 := v.Args[1]
39644 if s1.Op != OpAMD64SHLQconst {
39645 break
39646 }
39647 j1 := s1.AuxInt
39648 x1 := s1.Args[0]
39649 if x1.Op != OpAMD64MOVWloadidx1 {
39650 break
39651 }
39652 i1 := x1.AuxInt
39653 if x1.Aux != s {
39654 break
39655 }
39656 _ = x1.Args[2]
39657 if p != x1.Args[0] {
39658 break
39659 }
39660 if idx != x1.Args[1] {
39661 break
39662 }
39663 if mem != x1.Args[2] {
39664 break
39665 }
39666 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
39667 break
39668 }
39669 b = mergePoint(b, x0, x1, y)
39670 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
39671 v.reset(OpCopy)
39672 v.AddArg(v0)
39673 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
39674 v1.AuxInt = j0
39675 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
39676 v2.AuxInt = i0
39677 v2.Aux = s
39678 v2.AddArg(p)
39679 v2.AddArg(idx)
39680 v2.AddArg(mem)
39681 v1.AddArg(v2)
39682 v0.AddArg(v1)
39683 v0.AddArg(y)
39684 return true
39685 }
39686
39687
39688
39689 for {
39690 _ = v.Args[1]
39691 or := v.Args[0]
39692 if or.Op != OpAMD64ORQ {
39693 break
39694 }
39695 y := or.Args[1]
39696 s0 := or.Args[0]
39697 if s0.Op != OpAMD64SHLQconst {
39698 break
39699 }
39700 j0 := s0.AuxInt
39701 x0 := s0.Args[0]
39702 if x0.Op != OpAMD64MOVWloadidx1 {
39703 break
39704 }
39705 i0 := x0.AuxInt
39706 s := x0.Aux
39707 mem := x0.Args[2]
39708 idx := x0.Args[0]
39709 p := x0.Args[1]
39710 s1 := v.Args[1]
39711 if s1.Op != OpAMD64SHLQconst {
39712 break
39713 }
39714 j1 := s1.AuxInt
39715 x1 := s1.Args[0]
39716 if x1.Op != OpAMD64MOVWloadidx1 {
39717 break
39718 }
39719 i1 := x1.AuxInt
39720 if x1.Aux != s {
39721 break
39722 }
39723 _ = x1.Args[2]
39724 if p != x1.Args[0] {
39725 break
39726 }
39727 if idx != x1.Args[1] {
39728 break
39729 }
39730 if mem != x1.Args[2] {
39731 break
39732 }
39733 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
39734 break
39735 }
39736 b = mergePoint(b, x0, x1, y)
39737 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
39738 v.reset(OpCopy)
39739 v.AddArg(v0)
39740 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
39741 v1.AuxInt = j0
39742 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
39743 v2.AuxInt = i0
39744 v2.Aux = s
39745 v2.AddArg(p)
39746 v2.AddArg(idx)
39747 v2.AddArg(mem)
39748 v1.AddArg(v2)
39749 v0.AddArg(v1)
39750 v0.AddArg(y)
39751 return true
39752 }
39753
39754
39755
39756 for {
39757 _ = v.Args[1]
39758 or := v.Args[0]
39759 if or.Op != OpAMD64ORQ {
39760 break
39761 }
39762 _ = or.Args[1]
39763 y := or.Args[0]
39764 s0 := or.Args[1]
39765 if s0.Op != OpAMD64SHLQconst {
39766 break
39767 }
39768 j0 := s0.AuxInt
39769 x0 := s0.Args[0]
39770 if x0.Op != OpAMD64MOVWloadidx1 {
39771 break
39772 }
39773 i0 := x0.AuxInt
39774 s := x0.Aux
39775 mem := x0.Args[2]
39776 p := x0.Args[0]
39777 idx := x0.Args[1]
39778 s1 := v.Args[1]
39779 if s1.Op != OpAMD64SHLQconst {
39780 break
39781 }
39782 j1 := s1.AuxInt
39783 x1 := s1.Args[0]
39784 if x1.Op != OpAMD64MOVWloadidx1 {
39785 break
39786 }
39787 i1 := x1.AuxInt
39788 if x1.Aux != s {
39789 break
39790 }
39791 _ = x1.Args[2]
39792 if p != x1.Args[0] {
39793 break
39794 }
39795 if idx != x1.Args[1] {
39796 break
39797 }
39798 if mem != x1.Args[2] {
39799 break
39800 }
39801 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
39802 break
39803 }
39804 b = mergePoint(b, x0, x1, y)
39805 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
39806 v.reset(OpCopy)
39807 v.AddArg(v0)
39808 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
39809 v1.AuxInt = j0
39810 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
39811 v2.AuxInt = i0
39812 v2.Aux = s
39813 v2.AddArg(p)
39814 v2.AddArg(idx)
39815 v2.AddArg(mem)
39816 v1.AddArg(v2)
39817 v0.AddArg(v1)
39818 v0.AddArg(y)
39819 return true
39820 }
39821 return false
39822 }
39823 func rewriteValueAMD64_OpAMD64ORQ_90(v *Value) bool {
39824 b := v.Block
39825 typ := &b.Func.Config.Types
39826
39827
39828
39829 for {
39830 _ = v.Args[1]
39831 or := v.Args[0]
39832 if or.Op != OpAMD64ORQ {
39833 break
39834 }
39835 _ = or.Args[1]
39836 y := or.Args[0]
39837 s0 := or.Args[1]
39838 if s0.Op != OpAMD64SHLQconst {
39839 break
39840 }
39841 j0 := s0.AuxInt
39842 x0 := s0.Args[0]
39843 if x0.Op != OpAMD64MOVWloadidx1 {
39844 break
39845 }
39846 i0 := x0.AuxInt
39847 s := x0.Aux
39848 mem := x0.Args[2]
39849 idx := x0.Args[0]
39850 p := x0.Args[1]
39851 s1 := v.Args[1]
39852 if s1.Op != OpAMD64SHLQconst {
39853 break
39854 }
39855 j1 := s1.AuxInt
39856 x1 := s1.Args[0]
39857 if x1.Op != OpAMD64MOVWloadidx1 {
39858 break
39859 }
39860 i1 := x1.AuxInt
39861 if x1.Aux != s {
39862 break
39863 }
39864 _ = x1.Args[2]
39865 if p != x1.Args[0] {
39866 break
39867 }
39868 if idx != x1.Args[1] {
39869 break
39870 }
39871 if mem != x1.Args[2] {
39872 break
39873 }
39874 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
39875 break
39876 }
39877 b = mergePoint(b, x0, x1, y)
39878 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
39879 v.reset(OpCopy)
39880 v.AddArg(v0)
39881 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
39882 v1.AuxInt = j0
39883 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
39884 v2.AuxInt = i0
39885 v2.Aux = s
39886 v2.AddArg(p)
39887 v2.AddArg(idx)
39888 v2.AddArg(mem)
39889 v1.AddArg(v2)
39890 v0.AddArg(v1)
39891 v0.AddArg(y)
39892 return true
39893 }
39894
39895
39896
39897 for {
39898 _ = v.Args[1]
39899 or := v.Args[0]
39900 if or.Op != OpAMD64ORQ {
39901 break
39902 }
39903 y := or.Args[1]
39904 s0 := or.Args[0]
39905 if s0.Op != OpAMD64SHLQconst {
39906 break
39907 }
39908 j0 := s0.AuxInt
39909 x0 := s0.Args[0]
39910 if x0.Op != OpAMD64MOVWloadidx1 {
39911 break
39912 }
39913 i0 := x0.AuxInt
39914 s := x0.Aux
39915 mem := x0.Args[2]
39916 p := x0.Args[0]
39917 idx := x0.Args[1]
39918 s1 := v.Args[1]
39919 if s1.Op != OpAMD64SHLQconst {
39920 break
39921 }
39922 j1 := s1.AuxInt
39923 x1 := s1.Args[0]
39924 if x1.Op != OpAMD64MOVWloadidx1 {
39925 break
39926 }
39927 i1 := x1.AuxInt
39928 if x1.Aux != s {
39929 break
39930 }
39931 _ = x1.Args[2]
39932 if idx != x1.Args[0] {
39933 break
39934 }
39935 if p != x1.Args[1] {
39936 break
39937 }
39938 if mem != x1.Args[2] {
39939 break
39940 }
39941 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
39942 break
39943 }
39944 b = mergePoint(b, x0, x1, y)
39945 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
39946 v.reset(OpCopy)
39947 v.AddArg(v0)
39948 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
39949 v1.AuxInt = j0
39950 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
39951 v2.AuxInt = i0
39952 v2.Aux = s
39953 v2.AddArg(p)
39954 v2.AddArg(idx)
39955 v2.AddArg(mem)
39956 v1.AddArg(v2)
39957 v0.AddArg(v1)
39958 v0.AddArg(y)
39959 return true
39960 }
39961
39962
39963
39964 for {
39965 _ = v.Args[1]
39966 or := v.Args[0]
39967 if or.Op != OpAMD64ORQ {
39968 break
39969 }
39970 y := or.Args[1]
39971 s0 := or.Args[0]
39972 if s0.Op != OpAMD64SHLQconst {
39973 break
39974 }
39975 j0 := s0.AuxInt
39976 x0 := s0.Args[0]
39977 if x0.Op != OpAMD64MOVWloadidx1 {
39978 break
39979 }
39980 i0 := x0.AuxInt
39981 s := x0.Aux
39982 mem := x0.Args[2]
39983 idx := x0.Args[0]
39984 p := x0.Args[1]
39985 s1 := v.Args[1]
39986 if s1.Op != OpAMD64SHLQconst {
39987 break
39988 }
39989 j1 := s1.AuxInt
39990 x1 := s1.Args[0]
39991 if x1.Op != OpAMD64MOVWloadidx1 {
39992 break
39993 }
39994 i1 := x1.AuxInt
39995 if x1.Aux != s {
39996 break
39997 }
39998 _ = x1.Args[2]
39999 if idx != x1.Args[0] {
40000 break
40001 }
40002 if p != x1.Args[1] {
40003 break
40004 }
40005 if mem != x1.Args[2] {
40006 break
40007 }
40008 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
40009 break
40010 }
40011 b = mergePoint(b, x0, x1, y)
40012 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
40013 v.reset(OpCopy)
40014 v.AddArg(v0)
40015 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
40016 v1.AuxInt = j0
40017 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
40018 v2.AuxInt = i0
40019 v2.Aux = s
40020 v2.AddArg(p)
40021 v2.AddArg(idx)
40022 v2.AddArg(mem)
40023 v1.AddArg(v2)
40024 v0.AddArg(v1)
40025 v0.AddArg(y)
40026 return true
40027 }
40028
40029
40030
40031 for {
40032 _ = v.Args[1]
40033 or := v.Args[0]
40034 if or.Op != OpAMD64ORQ {
40035 break
40036 }
40037 _ = or.Args[1]
40038 y := or.Args[0]
40039 s0 := or.Args[1]
40040 if s0.Op != OpAMD64SHLQconst {
40041 break
40042 }
40043 j0 := s0.AuxInt
40044 x0 := s0.Args[0]
40045 if x0.Op != OpAMD64MOVWloadidx1 {
40046 break
40047 }
40048 i0 := x0.AuxInt
40049 s := x0.Aux
40050 mem := x0.Args[2]
40051 p := x0.Args[0]
40052 idx := x0.Args[1]
40053 s1 := v.Args[1]
40054 if s1.Op != OpAMD64SHLQconst {
40055 break
40056 }
40057 j1 := s1.AuxInt
40058 x1 := s1.Args[0]
40059 if x1.Op != OpAMD64MOVWloadidx1 {
40060 break
40061 }
40062 i1 := x1.AuxInt
40063 if x1.Aux != s {
40064 break
40065 }
40066 _ = x1.Args[2]
40067 if idx != x1.Args[0] {
40068 break
40069 }
40070 if p != x1.Args[1] {
40071 break
40072 }
40073 if mem != x1.Args[2] {
40074 break
40075 }
40076 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
40077 break
40078 }
40079 b = mergePoint(b, x0, x1, y)
40080 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
40081 v.reset(OpCopy)
40082 v.AddArg(v0)
40083 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
40084 v1.AuxInt = j0
40085 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
40086 v2.AuxInt = i0
40087 v2.Aux = s
40088 v2.AddArg(p)
40089 v2.AddArg(idx)
40090 v2.AddArg(mem)
40091 v1.AddArg(v2)
40092 v0.AddArg(v1)
40093 v0.AddArg(y)
40094 return true
40095 }
40096
40097
40098
40099 for {
40100 _ = v.Args[1]
40101 or := v.Args[0]
40102 if or.Op != OpAMD64ORQ {
40103 break
40104 }
40105 _ = or.Args[1]
40106 y := or.Args[0]
40107 s0 := or.Args[1]
40108 if s0.Op != OpAMD64SHLQconst {
40109 break
40110 }
40111 j0 := s0.AuxInt
40112 x0 := s0.Args[0]
40113 if x0.Op != OpAMD64MOVWloadidx1 {
40114 break
40115 }
40116 i0 := x0.AuxInt
40117 s := x0.Aux
40118 mem := x0.Args[2]
40119 idx := x0.Args[0]
40120 p := x0.Args[1]
40121 s1 := v.Args[1]
40122 if s1.Op != OpAMD64SHLQconst {
40123 break
40124 }
40125 j1 := s1.AuxInt
40126 x1 := s1.Args[0]
40127 if x1.Op != OpAMD64MOVWloadidx1 {
40128 break
40129 }
40130 i1 := x1.AuxInt
40131 if x1.Aux != s {
40132 break
40133 }
40134 _ = x1.Args[2]
40135 if idx != x1.Args[0] {
40136 break
40137 }
40138 if p != x1.Args[1] {
40139 break
40140 }
40141 if mem != x1.Args[2] {
40142 break
40143 }
40144 if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
40145 break
40146 }
40147 b = mergePoint(b, x0, x1, y)
40148 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
40149 v.reset(OpCopy)
40150 v.AddArg(v0)
40151 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
40152 v1.AuxInt = j0
40153 v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
40154 v2.AuxInt = i0
40155 v2.Aux = s
40156 v2.AddArg(p)
40157 v2.AddArg(idx)
40158 v2.AddArg(mem)
40159 v1.AddArg(v2)
40160 v0.AddArg(v1)
40161 v0.AddArg(y)
40162 return true
40163 }
40164
40165
40166
40167 for {
40168 _ = v.Args[1]
40169 x1 := v.Args[0]
40170 if x1.Op != OpAMD64MOVBload {
40171 break
40172 }
40173 i1 := x1.AuxInt
40174 s := x1.Aux
40175 mem := x1.Args[1]
40176 p := x1.Args[0]
40177 sh := v.Args[1]
40178 if sh.Op != OpAMD64SHLQconst {
40179 break
40180 }
40181 if sh.AuxInt != 8 {
40182 break
40183 }
40184 x0 := sh.Args[0]
40185 if x0.Op != OpAMD64MOVBload {
40186 break
40187 }
40188 i0 := x0.AuxInt
40189 if x0.Aux != s {
40190 break
40191 }
40192 _ = x0.Args[1]
40193 if p != x0.Args[0] {
40194 break
40195 }
40196 if mem != x0.Args[1] {
40197 break
40198 }
40199 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
40200 break
40201 }
40202 b = mergePoint(b, x0, x1)
40203 v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type)
40204 v.reset(OpCopy)
40205 v.AddArg(v0)
40206 v0.AuxInt = 8
40207 v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
40208 v1.AuxInt = i0
40209 v1.Aux = s
40210 v1.AddArg(p)
40211 v1.AddArg(mem)
40212 v0.AddArg(v1)
40213 return true
40214 }
40215
40216
40217
40218 for {
40219 _ = v.Args[1]
40220 sh := v.Args[0]
40221 if sh.Op != OpAMD64SHLQconst {
40222 break
40223 }
40224 if sh.AuxInt != 8 {
40225 break
40226 }
40227 x0 := sh.Args[0]
40228 if x0.Op != OpAMD64MOVBload {
40229 break
40230 }
40231 i0 := x0.AuxInt
40232 s := x0.Aux
40233 mem := x0.Args[1]
40234 p := x0.Args[0]
40235 x1 := v.Args[1]
40236 if x1.Op != OpAMD64MOVBload {
40237 break
40238 }
40239 i1 := x1.AuxInt
40240 if x1.Aux != s {
40241 break
40242 }
40243 _ = x1.Args[1]
40244 if p != x1.Args[0] {
40245 break
40246 }
40247 if mem != x1.Args[1] {
40248 break
40249 }
40250 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
40251 break
40252 }
40253 b = mergePoint(b, x0, x1)
40254 v0 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, v.Type)
40255 v.reset(OpCopy)
40256 v.AddArg(v0)
40257 v0.AuxInt = 8
40258 v1 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
40259 v1.AuxInt = i0
40260 v1.Aux = s
40261 v1.AddArg(p)
40262 v1.AddArg(mem)
40263 v0.AddArg(v1)
40264 return true
40265 }
40266
40267
40268
40269 for {
40270 _ = v.Args[1]
40271 r1 := v.Args[0]
40272 if r1.Op != OpAMD64ROLWconst {
40273 break
40274 }
40275 if r1.AuxInt != 8 {
40276 break
40277 }
40278 x1 := r1.Args[0]
40279 if x1.Op != OpAMD64MOVWload {
40280 break
40281 }
40282 i1 := x1.AuxInt
40283 s := x1.Aux
40284 mem := x1.Args[1]
40285 p := x1.Args[0]
40286 sh := v.Args[1]
40287 if sh.Op != OpAMD64SHLQconst {
40288 break
40289 }
40290 if sh.AuxInt != 16 {
40291 break
40292 }
40293 r0 := sh.Args[0]
40294 if r0.Op != OpAMD64ROLWconst {
40295 break
40296 }
40297 if r0.AuxInt != 8 {
40298 break
40299 }
40300 x0 := r0.Args[0]
40301 if x0.Op != OpAMD64MOVWload {
40302 break
40303 }
40304 i0 := x0.AuxInt
40305 if x0.Aux != s {
40306 break
40307 }
40308 _ = x0.Args[1]
40309 if p != x0.Args[0] {
40310 break
40311 }
40312 if mem != x0.Args[1] {
40313 break
40314 }
40315 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
40316 break
40317 }
40318 b = mergePoint(b, x0, x1)
40319 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type)
40320 v.reset(OpCopy)
40321 v.AddArg(v0)
40322 v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
40323 v1.AuxInt = i0
40324 v1.Aux = s
40325 v1.AddArg(p)
40326 v1.AddArg(mem)
40327 v0.AddArg(v1)
40328 return true
40329 }
40330
40331
40332
40333 for {
40334 _ = v.Args[1]
40335 sh := v.Args[0]
40336 if sh.Op != OpAMD64SHLQconst {
40337 break
40338 }
40339 if sh.AuxInt != 16 {
40340 break
40341 }
40342 r0 := sh.Args[0]
40343 if r0.Op != OpAMD64ROLWconst {
40344 break
40345 }
40346 if r0.AuxInt != 8 {
40347 break
40348 }
40349 x0 := r0.Args[0]
40350 if x0.Op != OpAMD64MOVWload {
40351 break
40352 }
40353 i0 := x0.AuxInt
40354 s := x0.Aux
40355 mem := x0.Args[1]
40356 p := x0.Args[0]
40357 r1 := v.Args[1]
40358 if r1.Op != OpAMD64ROLWconst {
40359 break
40360 }
40361 if r1.AuxInt != 8 {
40362 break
40363 }
40364 x1 := r1.Args[0]
40365 if x1.Op != OpAMD64MOVWload {
40366 break
40367 }
40368 i1 := x1.AuxInt
40369 if x1.Aux != s {
40370 break
40371 }
40372 _ = x1.Args[1]
40373 if p != x1.Args[0] {
40374 break
40375 }
40376 if mem != x1.Args[1] {
40377 break
40378 }
40379 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
40380 break
40381 }
40382 b = mergePoint(b, x0, x1)
40383 v0 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, v.Type)
40384 v.reset(OpCopy)
40385 v.AddArg(v0)
40386 v1 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
40387 v1.AuxInt = i0
40388 v1.Aux = s
40389 v1.AddArg(p)
40390 v1.AddArg(mem)
40391 v0.AddArg(v1)
40392 return true
40393 }
40394
40395
40396
40397 for {
40398 _ = v.Args[1]
40399 r1 := v.Args[0]
40400 if r1.Op != OpAMD64BSWAPL {
40401 break
40402 }
40403 x1 := r1.Args[0]
40404 if x1.Op != OpAMD64MOVLload {
40405 break
40406 }
40407 i1 := x1.AuxInt
40408 s := x1.Aux
40409 mem := x1.Args[1]
40410 p := x1.Args[0]
40411 sh := v.Args[1]
40412 if sh.Op != OpAMD64SHLQconst {
40413 break
40414 }
40415 if sh.AuxInt != 32 {
40416 break
40417 }
40418 r0 := sh.Args[0]
40419 if r0.Op != OpAMD64BSWAPL {
40420 break
40421 }
40422 x0 := r0.Args[0]
40423 if x0.Op != OpAMD64MOVLload {
40424 break
40425 }
40426 i0 := x0.AuxInt
40427 if x0.Aux != s {
40428 break
40429 }
40430 _ = x0.Args[1]
40431 if p != x0.Args[0] {
40432 break
40433 }
40434 if mem != x0.Args[1] {
40435 break
40436 }
40437 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
40438 break
40439 }
40440 b = mergePoint(b, x0, x1)
40441 v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, v.Type)
40442 v.reset(OpCopy)
40443 v.AddArg(v0)
40444 v1 := b.NewValue0(x0.Pos, OpAMD64MOVQload, typ.UInt64)
40445 v1.AuxInt = i0
40446 v1.Aux = s
40447 v1.AddArg(p)
40448 v1.AddArg(mem)
40449 v0.AddArg(v1)
40450 return true
40451 }
40452 return false
40453 }
40454 func rewriteValueAMD64_OpAMD64ORQ_100(v *Value) bool {
40455 b := v.Block
40456 typ := &b.Func.Config.Types
40457
40458
40459
40460 for {
40461 _ = v.Args[1]
40462 sh := v.Args[0]
40463 if sh.Op != OpAMD64SHLQconst {
40464 break
40465 }
40466 if sh.AuxInt != 32 {
40467 break
40468 }
40469 r0 := sh.Args[0]
40470 if r0.Op != OpAMD64BSWAPL {
40471 break
40472 }
40473 x0 := r0.Args[0]
40474 if x0.Op != OpAMD64MOVLload {
40475 break
40476 }
40477 i0 := x0.AuxInt
40478 s := x0.Aux
40479 mem := x0.Args[1]
40480 p := x0.Args[0]
40481 r1 := v.Args[1]
40482 if r1.Op != OpAMD64BSWAPL {
40483 break
40484 }
40485 x1 := r1.Args[0]
40486 if x1.Op != OpAMD64MOVLload {
40487 break
40488 }
40489 i1 := x1.AuxInt
40490 if x1.Aux != s {
40491 break
40492 }
40493 _ = x1.Args[1]
40494 if p != x1.Args[0] {
40495 break
40496 }
40497 if mem != x1.Args[1] {
40498 break
40499 }
40500 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
40501 break
40502 }
40503 b = mergePoint(b, x0, x1)
40504 v0 := b.NewValue0(x1.Pos, OpAMD64BSWAPQ, v.Type)
40505 v.reset(OpCopy)
40506 v.AddArg(v0)
40507 v1 := b.NewValue0(x1.Pos, OpAMD64MOVQload, typ.UInt64)
40508 v1.AuxInt = i0
40509 v1.Aux = s
40510 v1.AddArg(p)
40511 v1.AddArg(mem)
40512 v0.AddArg(v1)
40513 return true
40514 }
40515
40516
40517
40518 for {
40519 _ = v.Args[1]
40520 s0 := v.Args[0]
40521 if s0.Op != OpAMD64SHLQconst {
40522 break
40523 }
40524 j0 := s0.AuxInt
40525 x0 := s0.Args[0]
40526 if x0.Op != OpAMD64MOVBload {
40527 break
40528 }
40529 i0 := x0.AuxInt
40530 s := x0.Aux
40531 mem := x0.Args[1]
40532 p := x0.Args[0]
40533 or := v.Args[1]
40534 if or.Op != OpAMD64ORQ {
40535 break
40536 }
40537 y := or.Args[1]
40538 s1 := or.Args[0]
40539 if s1.Op != OpAMD64SHLQconst {
40540 break
40541 }
40542 j1 := s1.AuxInt
40543 x1 := s1.Args[0]
40544 if x1.Op != OpAMD64MOVBload {
40545 break
40546 }
40547 i1 := x1.AuxInt
40548 if x1.Aux != s {
40549 break
40550 }
40551 _ = x1.Args[1]
40552 if p != x1.Args[0] {
40553 break
40554 }
40555 if mem != x1.Args[1] {
40556 break
40557 }
40558 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
40559 break
40560 }
40561 b = mergePoint(b, x0, x1, y)
40562 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
40563 v.reset(OpCopy)
40564 v.AddArg(v0)
40565 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
40566 v1.AuxInt = j1
40567 v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16)
40568 v2.AuxInt = 8
40569 v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
40570 v3.AuxInt = i0
40571 v3.Aux = s
40572 v3.AddArg(p)
40573 v3.AddArg(mem)
40574 v2.AddArg(v3)
40575 v1.AddArg(v2)
40576 v0.AddArg(v1)
40577 v0.AddArg(y)
40578 return true
40579 }
40580
40581
40582
40583 for {
40584 _ = v.Args[1]
40585 s0 := v.Args[0]
40586 if s0.Op != OpAMD64SHLQconst {
40587 break
40588 }
40589 j0 := s0.AuxInt
40590 x0 := s0.Args[0]
40591 if x0.Op != OpAMD64MOVBload {
40592 break
40593 }
40594 i0 := x0.AuxInt
40595 s := x0.Aux
40596 mem := x0.Args[1]
40597 p := x0.Args[0]
40598 or := v.Args[1]
40599 if or.Op != OpAMD64ORQ {
40600 break
40601 }
40602 _ = or.Args[1]
40603 y := or.Args[0]
40604 s1 := or.Args[1]
40605 if s1.Op != OpAMD64SHLQconst {
40606 break
40607 }
40608 j1 := s1.AuxInt
40609 x1 := s1.Args[0]
40610 if x1.Op != OpAMD64MOVBload {
40611 break
40612 }
40613 i1 := x1.AuxInt
40614 if x1.Aux != s {
40615 break
40616 }
40617 _ = x1.Args[1]
40618 if p != x1.Args[0] {
40619 break
40620 }
40621 if mem != x1.Args[1] {
40622 break
40623 }
40624 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
40625 break
40626 }
40627 b = mergePoint(b, x0, x1, y)
40628 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
40629 v.reset(OpCopy)
40630 v.AddArg(v0)
40631 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
40632 v1.AuxInt = j1
40633 v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16)
40634 v2.AuxInt = 8
40635 v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
40636 v3.AuxInt = i0
40637 v3.Aux = s
40638 v3.AddArg(p)
40639 v3.AddArg(mem)
40640 v2.AddArg(v3)
40641 v1.AddArg(v2)
40642 v0.AddArg(v1)
40643 v0.AddArg(y)
40644 return true
40645 }
40646
40647
40648
40649 for {
40650 _ = v.Args[1]
40651 or := v.Args[0]
40652 if or.Op != OpAMD64ORQ {
40653 break
40654 }
40655 y := or.Args[1]
40656 s1 := or.Args[0]
40657 if s1.Op != OpAMD64SHLQconst {
40658 break
40659 }
40660 j1 := s1.AuxInt
40661 x1 := s1.Args[0]
40662 if x1.Op != OpAMD64MOVBload {
40663 break
40664 }
40665 i1 := x1.AuxInt
40666 s := x1.Aux
40667 mem := x1.Args[1]
40668 p := x1.Args[0]
40669 s0 := v.Args[1]
40670 if s0.Op != OpAMD64SHLQconst {
40671 break
40672 }
40673 j0 := s0.AuxInt
40674 x0 := s0.Args[0]
40675 if x0.Op != OpAMD64MOVBload {
40676 break
40677 }
40678 i0 := x0.AuxInt
40679 if x0.Aux != s {
40680 break
40681 }
40682 _ = x0.Args[1]
40683 if p != x0.Args[0] {
40684 break
40685 }
40686 if mem != x0.Args[1] {
40687 break
40688 }
40689 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
40690 break
40691 }
40692 b = mergePoint(b, x0, x1, y)
40693 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
40694 v.reset(OpCopy)
40695 v.AddArg(v0)
40696 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
40697 v1.AuxInt = j1
40698 v2 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, typ.UInt16)
40699 v2.AuxInt = 8
40700 v3 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
40701 v3.AuxInt = i0
40702 v3.Aux = s
40703 v3.AddArg(p)
40704 v3.AddArg(mem)
40705 v2.AddArg(v3)
40706 v1.AddArg(v2)
40707 v0.AddArg(v1)
40708 v0.AddArg(y)
40709 return true
40710 }
40711
40712
40713
40714 for {
40715 _ = v.Args[1]
40716 or := v.Args[0]
40717 if or.Op != OpAMD64ORQ {
40718 break
40719 }
40720 _ = or.Args[1]
40721 y := or.Args[0]
40722 s1 := or.Args[1]
40723 if s1.Op != OpAMD64SHLQconst {
40724 break
40725 }
40726 j1 := s1.AuxInt
40727 x1 := s1.Args[0]
40728 if x1.Op != OpAMD64MOVBload {
40729 break
40730 }
40731 i1 := x1.AuxInt
40732 s := x1.Aux
40733 mem := x1.Args[1]
40734 p := x1.Args[0]
40735 s0 := v.Args[1]
40736 if s0.Op != OpAMD64SHLQconst {
40737 break
40738 }
40739 j0 := s0.AuxInt
40740 x0 := s0.Args[0]
40741 if x0.Op != OpAMD64MOVBload {
40742 break
40743 }
40744 i0 := x0.AuxInt
40745 if x0.Aux != s {
40746 break
40747 }
40748 _ = x0.Args[1]
40749 if p != x0.Args[0] {
40750 break
40751 }
40752 if mem != x0.Args[1] {
40753 break
40754 }
40755 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
40756 break
40757 }
40758 b = mergePoint(b, x0, x1, y)
40759 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
40760 v.reset(OpCopy)
40761 v.AddArg(v0)
40762 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
40763 v1.AuxInt = j1
40764 v2 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, typ.UInt16)
40765 v2.AuxInt = 8
40766 v3 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
40767 v3.AuxInt = i0
40768 v3.Aux = s
40769 v3.AddArg(p)
40770 v3.AddArg(mem)
40771 v2.AddArg(v3)
40772 v1.AddArg(v2)
40773 v0.AddArg(v1)
40774 v0.AddArg(y)
40775 return true
40776 }
40777
40778
40779
40780 for {
40781 _ = v.Args[1]
40782 s0 := v.Args[0]
40783 if s0.Op != OpAMD64SHLQconst {
40784 break
40785 }
40786 j0 := s0.AuxInt
40787 r0 := s0.Args[0]
40788 if r0.Op != OpAMD64ROLWconst {
40789 break
40790 }
40791 if r0.AuxInt != 8 {
40792 break
40793 }
40794 x0 := r0.Args[0]
40795 if x0.Op != OpAMD64MOVWload {
40796 break
40797 }
40798 i0 := x0.AuxInt
40799 s := x0.Aux
40800 mem := x0.Args[1]
40801 p := x0.Args[0]
40802 or := v.Args[1]
40803 if or.Op != OpAMD64ORQ {
40804 break
40805 }
40806 y := or.Args[1]
40807 s1 := or.Args[0]
40808 if s1.Op != OpAMD64SHLQconst {
40809 break
40810 }
40811 j1 := s1.AuxInt
40812 r1 := s1.Args[0]
40813 if r1.Op != OpAMD64ROLWconst {
40814 break
40815 }
40816 if r1.AuxInt != 8 {
40817 break
40818 }
40819 x1 := r1.Args[0]
40820 if x1.Op != OpAMD64MOVWload {
40821 break
40822 }
40823 i1 := x1.AuxInt
40824 if x1.Aux != s {
40825 break
40826 }
40827 _ = x1.Args[1]
40828 if p != x1.Args[0] {
40829 break
40830 }
40831 if mem != x1.Args[1] {
40832 break
40833 }
40834 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
40835 break
40836 }
40837 b = mergePoint(b, x0, x1, y)
40838 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
40839 v.reset(OpCopy)
40840 v.AddArg(v0)
40841 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
40842 v1.AuxInt = j1
40843 v2 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, typ.UInt32)
40844 v3 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
40845 v3.AuxInt = i0
40846 v3.Aux = s
40847 v3.AddArg(p)
40848 v3.AddArg(mem)
40849 v2.AddArg(v3)
40850 v1.AddArg(v2)
40851 v0.AddArg(v1)
40852 v0.AddArg(y)
40853 return true
40854 }
40855
40856
40857
40858 for {
40859 _ = v.Args[1]
40860 s0 := v.Args[0]
40861 if s0.Op != OpAMD64SHLQconst {
40862 break
40863 }
40864 j0 := s0.AuxInt
40865 r0 := s0.Args[0]
40866 if r0.Op != OpAMD64ROLWconst {
40867 break
40868 }
40869 if r0.AuxInt != 8 {
40870 break
40871 }
40872 x0 := r0.Args[0]
40873 if x0.Op != OpAMD64MOVWload {
40874 break
40875 }
40876 i0 := x0.AuxInt
40877 s := x0.Aux
40878 mem := x0.Args[1]
40879 p := x0.Args[0]
40880 or := v.Args[1]
40881 if or.Op != OpAMD64ORQ {
40882 break
40883 }
40884 _ = or.Args[1]
40885 y := or.Args[0]
40886 s1 := or.Args[1]
40887 if s1.Op != OpAMD64SHLQconst {
40888 break
40889 }
40890 j1 := s1.AuxInt
40891 r1 := s1.Args[0]
40892 if r1.Op != OpAMD64ROLWconst {
40893 break
40894 }
40895 if r1.AuxInt != 8 {
40896 break
40897 }
40898 x1 := r1.Args[0]
40899 if x1.Op != OpAMD64MOVWload {
40900 break
40901 }
40902 i1 := x1.AuxInt
40903 if x1.Aux != s {
40904 break
40905 }
40906 _ = x1.Args[1]
40907 if p != x1.Args[0] {
40908 break
40909 }
40910 if mem != x1.Args[1] {
40911 break
40912 }
40913 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
40914 break
40915 }
40916 b = mergePoint(b, x0, x1, y)
40917 v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
40918 v.reset(OpCopy)
40919 v.AddArg(v0)
40920 v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
40921 v1.AuxInt = j1
40922 v2 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, typ.UInt32)
40923 v3 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
40924 v3.AuxInt = i0
40925 v3.Aux = s
40926 v3.AddArg(p)
40927 v3.AddArg(mem)
40928 v2.AddArg(v3)
40929 v1.AddArg(v2)
40930 v0.AddArg(v1)
40931 v0.AddArg(y)
40932 return true
40933 }
40934
40935
40936
40937 for {
40938 _ = v.Args[1]
40939 or := v.Args[0]
40940 if or.Op != OpAMD64ORQ {
40941 break
40942 }
40943 y := or.Args[1]
40944 s1 := or.Args[0]
40945 if s1.Op != OpAMD64SHLQconst {
40946 break
40947 }
40948 j1 := s1.AuxInt
40949 r1 := s1.Args[0]
40950 if r1.Op != OpAMD64ROLWconst {
40951 break
40952 }
40953 if r1.AuxInt != 8 {
40954 break
40955 }
40956 x1 := r1.Args[0]
40957 if x1.Op != OpAMD64MOVWload {
40958 break
40959 }
40960 i1 := x1.AuxInt
40961 s := x1.Aux
40962 mem := x1.Args[1]
40963 p := x1.Args[0]
40964 s0 := v.Args[1]
40965 if s0.Op != OpAMD64SHLQconst {
40966 break
40967 }
40968 j0 := s0.AuxInt
40969 r0 := s0.Args[0]
40970 if r0.Op != OpAMD64ROLWconst {
40971 break
40972 }
40973 if r0.AuxInt != 8 {
40974 break
40975 }
40976 x0 := r0.Args[0]
40977 if x0.Op != OpAMD64MOVWload {
40978 break
40979 }
40980 i0 := x0.AuxInt
40981 if x0.Aux != s {
40982 break
40983 }
40984 _ = x0.Args[1]
40985 if p != x0.Args[0] {
40986 break
40987 }
40988 if mem != x0.Args[1] {
40989 break
40990 }
40991 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
40992 break
40993 }
40994 b = mergePoint(b, x0, x1, y)
40995 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
40996 v.reset(OpCopy)
40997 v.AddArg(v0)
40998 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
40999 v1.AuxInt = j1
41000 v2 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, typ.UInt32)
41001 v3 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
41002 v3.AuxInt = i0
41003 v3.Aux = s
41004 v3.AddArg(p)
41005 v3.AddArg(mem)
41006 v2.AddArg(v3)
41007 v1.AddArg(v2)
41008 v0.AddArg(v1)
41009 v0.AddArg(y)
41010 return true
41011 }
41012
41013
41014
41015 for {
41016 _ = v.Args[1]
41017 or := v.Args[0]
41018 if or.Op != OpAMD64ORQ {
41019 break
41020 }
41021 _ = or.Args[1]
41022 y := or.Args[0]
41023 s1 := or.Args[1]
41024 if s1.Op != OpAMD64SHLQconst {
41025 break
41026 }
41027 j1 := s1.AuxInt
41028 r1 := s1.Args[0]
41029 if r1.Op != OpAMD64ROLWconst {
41030 break
41031 }
41032 if r1.AuxInt != 8 {
41033 break
41034 }
41035 x1 := r1.Args[0]
41036 if x1.Op != OpAMD64MOVWload {
41037 break
41038 }
41039 i1 := x1.AuxInt
41040 s := x1.Aux
41041 mem := x1.Args[1]
41042 p := x1.Args[0]
41043 s0 := v.Args[1]
41044 if s0.Op != OpAMD64SHLQconst {
41045 break
41046 }
41047 j0 := s0.AuxInt
41048 r0 := s0.Args[0]
41049 if r0.Op != OpAMD64ROLWconst {
41050 break
41051 }
41052 if r0.AuxInt != 8 {
41053 break
41054 }
41055 x0 := r0.Args[0]
41056 if x0.Op != OpAMD64MOVWload {
41057 break
41058 }
41059 i0 := x0.AuxInt
41060 if x0.Aux != s {
41061 break
41062 }
41063 _ = x0.Args[1]
41064 if p != x0.Args[0] {
41065 break
41066 }
41067 if mem != x0.Args[1] {
41068 break
41069 }
41070 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
41071 break
41072 }
41073 b = mergePoint(b, x0, x1, y)
41074 v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
41075 v.reset(OpCopy)
41076 v.AddArg(v0)
41077 v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
41078 v1.AuxInt = j1
41079 v2 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, typ.UInt32)
41080 v3 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
41081 v3.AuxInt = i0
41082 v3.Aux = s
41083 v3.AddArg(p)
41084 v3.AddArg(mem)
41085 v2.AddArg(v3)
41086 v1.AddArg(v2)
41087 v0.AddArg(v1)
41088 v0.AddArg(y)
41089 return true
41090 }
41091
41092
41093
41094 for {
41095 _ = v.Args[1]
41096 x1 := v.Args[0]
41097 if x1.Op != OpAMD64MOVBloadidx1 {
41098 break
41099 }
41100 i1 := x1.AuxInt
41101 s := x1.Aux
41102 mem := x1.Args[2]
41103 p := x1.Args[0]
41104 idx := x1.Args[1]
41105 sh := v.Args[1]
41106 if sh.Op != OpAMD64SHLQconst {
41107 break
41108 }
41109 if sh.AuxInt != 8 {
41110 break
41111 }
41112 x0 := sh.Args[0]
41113 if x0.Op != OpAMD64MOVBloadidx1 {
41114 break
41115 }
41116 i0 := x0.AuxInt
41117 if x0.Aux != s {
41118 break
41119 }
41120 _ = x0.Args[2]
41121 if p != x0.Args[0] {
41122 break
41123 }
41124 if idx != x0.Args[1] {
41125 break
41126 }
41127 if mem != x0.Args[2] {
41128 break
41129 }
41130 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
41131 break
41132 }
41133 b = mergePoint(b, x0, x1)
41134 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
41135 v.reset(OpCopy)
41136 v.AddArg(v0)
41137 v0.AuxInt = 8
41138 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
41139 v1.AuxInt = i0
41140 v1.Aux = s
41141 v1.AddArg(p)
41142 v1.AddArg(idx)
41143 v1.AddArg(mem)
41144 v0.AddArg(v1)
41145 return true
41146 }
41147 return false
41148 }
41149 func rewriteValueAMD64_OpAMD64ORQ_110(v *Value) bool {
41150 b := v.Block
41151 typ := &b.Func.Config.Types
41152
41153
41154
41155 for {
41156 _ = v.Args[1]
41157 x1 := v.Args[0]
41158 if x1.Op != OpAMD64MOVBloadidx1 {
41159 break
41160 }
41161 i1 := x1.AuxInt
41162 s := x1.Aux
41163 mem := x1.Args[2]
41164 idx := x1.Args[0]
41165 p := x1.Args[1]
41166 sh := v.Args[1]
41167 if sh.Op != OpAMD64SHLQconst {
41168 break
41169 }
41170 if sh.AuxInt != 8 {
41171 break
41172 }
41173 x0 := sh.Args[0]
41174 if x0.Op != OpAMD64MOVBloadidx1 {
41175 break
41176 }
41177 i0 := x0.AuxInt
41178 if x0.Aux != s {
41179 break
41180 }
41181 _ = x0.Args[2]
41182 if p != x0.Args[0] {
41183 break
41184 }
41185 if idx != x0.Args[1] {
41186 break
41187 }
41188 if mem != x0.Args[2] {
41189 break
41190 }
41191 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
41192 break
41193 }
41194 b = mergePoint(b, x0, x1)
41195 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
41196 v.reset(OpCopy)
41197 v.AddArg(v0)
41198 v0.AuxInt = 8
41199 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
41200 v1.AuxInt = i0
41201 v1.Aux = s
41202 v1.AddArg(p)
41203 v1.AddArg(idx)
41204 v1.AddArg(mem)
41205 v0.AddArg(v1)
41206 return true
41207 }
41208
41209
41210
41211 for {
41212 _ = v.Args[1]
41213 x1 := v.Args[0]
41214 if x1.Op != OpAMD64MOVBloadidx1 {
41215 break
41216 }
41217 i1 := x1.AuxInt
41218 s := x1.Aux
41219 mem := x1.Args[2]
41220 p := x1.Args[0]
41221 idx := x1.Args[1]
41222 sh := v.Args[1]
41223 if sh.Op != OpAMD64SHLQconst {
41224 break
41225 }
41226 if sh.AuxInt != 8 {
41227 break
41228 }
41229 x0 := sh.Args[0]
41230 if x0.Op != OpAMD64MOVBloadidx1 {
41231 break
41232 }
41233 i0 := x0.AuxInt
41234 if x0.Aux != s {
41235 break
41236 }
41237 _ = x0.Args[2]
41238 if idx != x0.Args[0] {
41239 break
41240 }
41241 if p != x0.Args[1] {
41242 break
41243 }
41244 if mem != x0.Args[2] {
41245 break
41246 }
41247 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
41248 break
41249 }
41250 b = mergePoint(b, x0, x1)
41251 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
41252 v.reset(OpCopy)
41253 v.AddArg(v0)
41254 v0.AuxInt = 8
41255 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
41256 v1.AuxInt = i0
41257 v1.Aux = s
41258 v1.AddArg(p)
41259 v1.AddArg(idx)
41260 v1.AddArg(mem)
41261 v0.AddArg(v1)
41262 return true
41263 }
41264
41265
41266
41267 for {
41268 _ = v.Args[1]
41269 x1 := v.Args[0]
41270 if x1.Op != OpAMD64MOVBloadidx1 {
41271 break
41272 }
41273 i1 := x1.AuxInt
41274 s := x1.Aux
41275 mem := x1.Args[2]
41276 idx := x1.Args[0]
41277 p := x1.Args[1]
41278 sh := v.Args[1]
41279 if sh.Op != OpAMD64SHLQconst {
41280 break
41281 }
41282 if sh.AuxInt != 8 {
41283 break
41284 }
41285 x0 := sh.Args[0]
41286 if x0.Op != OpAMD64MOVBloadidx1 {
41287 break
41288 }
41289 i0 := x0.AuxInt
41290 if x0.Aux != s {
41291 break
41292 }
41293 _ = x0.Args[2]
41294 if idx != x0.Args[0] {
41295 break
41296 }
41297 if p != x0.Args[1] {
41298 break
41299 }
41300 if mem != x0.Args[2] {
41301 break
41302 }
41303 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
41304 break
41305 }
41306 b = mergePoint(b, x0, x1)
41307 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
41308 v.reset(OpCopy)
41309 v.AddArg(v0)
41310 v0.AuxInt = 8
41311 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
41312 v1.AuxInt = i0
41313 v1.Aux = s
41314 v1.AddArg(p)
41315 v1.AddArg(idx)
41316 v1.AddArg(mem)
41317 v0.AddArg(v1)
41318 return true
41319 }
41320
41321
41322
41323 for {
41324 _ = v.Args[1]
41325 sh := v.Args[0]
41326 if sh.Op != OpAMD64SHLQconst {
41327 break
41328 }
41329 if sh.AuxInt != 8 {
41330 break
41331 }
41332 x0 := sh.Args[0]
41333 if x0.Op != OpAMD64MOVBloadidx1 {
41334 break
41335 }
41336 i0 := x0.AuxInt
41337 s := x0.Aux
41338 mem := x0.Args[2]
41339 p := x0.Args[0]
41340 idx := x0.Args[1]
41341 x1 := v.Args[1]
41342 if x1.Op != OpAMD64MOVBloadidx1 {
41343 break
41344 }
41345 i1 := x1.AuxInt
41346 if x1.Aux != s {
41347 break
41348 }
41349 _ = x1.Args[2]
41350 if p != x1.Args[0] {
41351 break
41352 }
41353 if idx != x1.Args[1] {
41354 break
41355 }
41356 if mem != x1.Args[2] {
41357 break
41358 }
41359 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
41360 break
41361 }
41362 b = mergePoint(b, x0, x1)
41363 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
41364 v.reset(OpCopy)
41365 v.AddArg(v0)
41366 v0.AuxInt = 8
41367 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
41368 v1.AuxInt = i0
41369 v1.Aux = s
41370 v1.AddArg(p)
41371 v1.AddArg(idx)
41372 v1.AddArg(mem)
41373 v0.AddArg(v1)
41374 return true
41375 }
41376
41377
41378
41379 for {
41380 _ = v.Args[1]
41381 sh := v.Args[0]
41382 if sh.Op != OpAMD64SHLQconst {
41383 break
41384 }
41385 if sh.AuxInt != 8 {
41386 break
41387 }
41388 x0 := sh.Args[0]
41389 if x0.Op != OpAMD64MOVBloadidx1 {
41390 break
41391 }
41392 i0 := x0.AuxInt
41393 s := x0.Aux
41394 mem := x0.Args[2]
41395 idx := x0.Args[0]
41396 p := x0.Args[1]
41397 x1 := v.Args[1]
41398 if x1.Op != OpAMD64MOVBloadidx1 {
41399 break
41400 }
41401 i1 := x1.AuxInt
41402 if x1.Aux != s {
41403 break
41404 }
41405 _ = x1.Args[2]
41406 if p != x1.Args[0] {
41407 break
41408 }
41409 if idx != x1.Args[1] {
41410 break
41411 }
41412 if mem != x1.Args[2] {
41413 break
41414 }
41415 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
41416 break
41417 }
41418 b = mergePoint(b, x0, x1)
41419 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
41420 v.reset(OpCopy)
41421 v.AddArg(v0)
41422 v0.AuxInt = 8
41423 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
41424 v1.AuxInt = i0
41425 v1.Aux = s
41426 v1.AddArg(p)
41427 v1.AddArg(idx)
41428 v1.AddArg(mem)
41429 v0.AddArg(v1)
41430 return true
41431 }
41432
41433
41434
41435 for {
41436 _ = v.Args[1]
41437 sh := v.Args[0]
41438 if sh.Op != OpAMD64SHLQconst {
41439 break
41440 }
41441 if sh.AuxInt != 8 {
41442 break
41443 }
41444 x0 := sh.Args[0]
41445 if x0.Op != OpAMD64MOVBloadidx1 {
41446 break
41447 }
41448 i0 := x0.AuxInt
41449 s := x0.Aux
41450 mem := x0.Args[2]
41451 p := x0.Args[0]
41452 idx := x0.Args[1]
41453 x1 := v.Args[1]
41454 if x1.Op != OpAMD64MOVBloadidx1 {
41455 break
41456 }
41457 i1 := x1.AuxInt
41458 if x1.Aux != s {
41459 break
41460 }
41461 _ = x1.Args[2]
41462 if idx != x1.Args[0] {
41463 break
41464 }
41465 if p != x1.Args[1] {
41466 break
41467 }
41468 if mem != x1.Args[2] {
41469 break
41470 }
41471 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
41472 break
41473 }
41474 b = mergePoint(b, x0, x1)
41475 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
41476 v.reset(OpCopy)
41477 v.AddArg(v0)
41478 v0.AuxInt = 8
41479 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
41480 v1.AuxInt = i0
41481 v1.Aux = s
41482 v1.AddArg(p)
41483 v1.AddArg(idx)
41484 v1.AddArg(mem)
41485 v0.AddArg(v1)
41486 return true
41487 }
41488
41489
41490
41491 for {
41492 _ = v.Args[1]
41493 sh := v.Args[0]
41494 if sh.Op != OpAMD64SHLQconst {
41495 break
41496 }
41497 if sh.AuxInt != 8 {
41498 break
41499 }
41500 x0 := sh.Args[0]
41501 if x0.Op != OpAMD64MOVBloadidx1 {
41502 break
41503 }
41504 i0 := x0.AuxInt
41505 s := x0.Aux
41506 mem := x0.Args[2]
41507 idx := x0.Args[0]
41508 p := x0.Args[1]
41509 x1 := v.Args[1]
41510 if x1.Op != OpAMD64MOVBloadidx1 {
41511 break
41512 }
41513 i1 := x1.AuxInt
41514 if x1.Aux != s {
41515 break
41516 }
41517 _ = x1.Args[2]
41518 if idx != x1.Args[0] {
41519 break
41520 }
41521 if p != x1.Args[1] {
41522 break
41523 }
41524 if mem != x1.Args[2] {
41525 break
41526 }
41527 if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) {
41528 break
41529 }
41530 b = mergePoint(b, x0, x1)
41531 v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type)
41532 v.reset(OpCopy)
41533 v.AddArg(v0)
41534 v0.AuxInt = 8
41535 v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
41536 v1.AuxInt = i0
41537 v1.Aux = s
41538 v1.AddArg(p)
41539 v1.AddArg(idx)
41540 v1.AddArg(mem)
41541 v0.AddArg(v1)
41542 return true
41543 }
41544
41545
41546
41547 for {
41548 _ = v.Args[1]
41549 r1 := v.Args[0]
41550 if r1.Op != OpAMD64ROLWconst {
41551 break
41552 }
41553 if r1.AuxInt != 8 {
41554 break
41555 }
41556 x1 := r1.Args[0]
41557 if x1.Op != OpAMD64MOVWloadidx1 {
41558 break
41559 }
41560 i1 := x1.AuxInt
41561 s := x1.Aux
41562 mem := x1.Args[2]
41563 p := x1.Args[0]
41564 idx := x1.Args[1]
41565 sh := v.Args[1]
41566 if sh.Op != OpAMD64SHLQconst {
41567 break
41568 }
41569 if sh.AuxInt != 16 {
41570 break
41571 }
41572 r0 := sh.Args[0]
41573 if r0.Op != OpAMD64ROLWconst {
41574 break
41575 }
41576 if r0.AuxInt != 8 {
41577 break
41578 }
41579 x0 := r0.Args[0]
41580 if x0.Op != OpAMD64MOVWloadidx1 {
41581 break
41582 }
41583 i0 := x0.AuxInt
41584 if x0.Aux != s {
41585 break
41586 }
41587 _ = x0.Args[2]
41588 if p != x0.Args[0] {
41589 break
41590 }
41591 if idx != x0.Args[1] {
41592 break
41593 }
41594 if mem != x0.Args[2] {
41595 break
41596 }
41597 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
41598 break
41599 }
41600 b = mergePoint(b, x0, x1)
41601 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
41602 v.reset(OpCopy)
41603 v.AddArg(v0)
41604 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
41605 v1.AuxInt = i0
41606 v1.Aux = s
41607 v1.AddArg(p)
41608 v1.AddArg(idx)
41609 v1.AddArg(mem)
41610 v0.AddArg(v1)
41611 return true
41612 }
41613
41614
41615
41616 for {
41617 _ = v.Args[1]
41618 r1 := v.Args[0]
41619 if r1.Op != OpAMD64ROLWconst {
41620 break
41621 }
41622 if r1.AuxInt != 8 {
41623 break
41624 }
41625 x1 := r1.Args[0]
41626 if x1.Op != OpAMD64MOVWloadidx1 {
41627 break
41628 }
41629 i1 := x1.AuxInt
41630 s := x1.Aux
41631 mem := x1.Args[2]
41632 idx := x1.Args[0]
41633 p := x1.Args[1]
41634 sh := v.Args[1]
41635 if sh.Op != OpAMD64SHLQconst {
41636 break
41637 }
41638 if sh.AuxInt != 16 {
41639 break
41640 }
41641 r0 := sh.Args[0]
41642 if r0.Op != OpAMD64ROLWconst {
41643 break
41644 }
41645 if r0.AuxInt != 8 {
41646 break
41647 }
41648 x0 := r0.Args[0]
41649 if x0.Op != OpAMD64MOVWloadidx1 {
41650 break
41651 }
41652 i0 := x0.AuxInt
41653 if x0.Aux != s {
41654 break
41655 }
41656 _ = x0.Args[2]
41657 if p != x0.Args[0] {
41658 break
41659 }
41660 if idx != x0.Args[1] {
41661 break
41662 }
41663 if mem != x0.Args[2] {
41664 break
41665 }
41666 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
41667 break
41668 }
41669 b = mergePoint(b, x0, x1)
41670 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
41671 v.reset(OpCopy)
41672 v.AddArg(v0)
41673 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
41674 v1.AuxInt = i0
41675 v1.Aux = s
41676 v1.AddArg(p)
41677 v1.AddArg(idx)
41678 v1.AddArg(mem)
41679 v0.AddArg(v1)
41680 return true
41681 }
41682
41683
41684
41685 for {
41686 _ = v.Args[1]
41687 r1 := v.Args[0]
41688 if r1.Op != OpAMD64ROLWconst {
41689 break
41690 }
41691 if r1.AuxInt != 8 {
41692 break
41693 }
41694 x1 := r1.Args[0]
41695 if x1.Op != OpAMD64MOVWloadidx1 {
41696 break
41697 }
41698 i1 := x1.AuxInt
41699 s := x1.Aux
41700 mem := x1.Args[2]
41701 p := x1.Args[0]
41702 idx := x1.Args[1]
41703 sh := v.Args[1]
41704 if sh.Op != OpAMD64SHLQconst {
41705 break
41706 }
41707 if sh.AuxInt != 16 {
41708 break
41709 }
41710 r0 := sh.Args[0]
41711 if r0.Op != OpAMD64ROLWconst {
41712 break
41713 }
41714 if r0.AuxInt != 8 {
41715 break
41716 }
41717 x0 := r0.Args[0]
41718 if x0.Op != OpAMD64MOVWloadidx1 {
41719 break
41720 }
41721 i0 := x0.AuxInt
41722 if x0.Aux != s {
41723 break
41724 }
41725 _ = x0.Args[2]
41726 if idx != x0.Args[0] {
41727 break
41728 }
41729 if p != x0.Args[1] {
41730 break
41731 }
41732 if mem != x0.Args[2] {
41733 break
41734 }
41735 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
41736 break
41737 }
41738 b = mergePoint(b, x0, x1)
41739 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
41740 v.reset(OpCopy)
41741 v.AddArg(v0)
41742 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
41743 v1.AuxInt = i0
41744 v1.Aux = s
41745 v1.AddArg(p)
41746 v1.AddArg(idx)
41747 v1.AddArg(mem)
41748 v0.AddArg(v1)
41749 return true
41750 }
41751 return false
41752 }
41753 func rewriteValueAMD64_OpAMD64ORQ_120(v *Value) bool {
41754 b := v.Block
41755 typ := &b.Func.Config.Types
41756
41757
41758
41759 for {
41760 _ = v.Args[1]
41761 r1 := v.Args[0]
41762 if r1.Op != OpAMD64ROLWconst {
41763 break
41764 }
41765 if r1.AuxInt != 8 {
41766 break
41767 }
41768 x1 := r1.Args[0]
41769 if x1.Op != OpAMD64MOVWloadidx1 {
41770 break
41771 }
41772 i1 := x1.AuxInt
41773 s := x1.Aux
41774 mem := x1.Args[2]
41775 idx := x1.Args[0]
41776 p := x1.Args[1]
41777 sh := v.Args[1]
41778 if sh.Op != OpAMD64SHLQconst {
41779 break
41780 }
41781 if sh.AuxInt != 16 {
41782 break
41783 }
41784 r0 := sh.Args[0]
41785 if r0.Op != OpAMD64ROLWconst {
41786 break
41787 }
41788 if r0.AuxInt != 8 {
41789 break
41790 }
41791 x0 := r0.Args[0]
41792 if x0.Op != OpAMD64MOVWloadidx1 {
41793 break
41794 }
41795 i0 := x0.AuxInt
41796 if x0.Aux != s {
41797 break
41798 }
41799 _ = x0.Args[2]
41800 if idx != x0.Args[0] {
41801 break
41802 }
41803 if p != x0.Args[1] {
41804 break
41805 }
41806 if mem != x0.Args[2] {
41807 break
41808 }
41809 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
41810 break
41811 }
41812 b = mergePoint(b, x0, x1)
41813 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
41814 v.reset(OpCopy)
41815 v.AddArg(v0)
41816 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
41817 v1.AuxInt = i0
41818 v1.Aux = s
41819 v1.AddArg(p)
41820 v1.AddArg(idx)
41821 v1.AddArg(mem)
41822 v0.AddArg(v1)
41823 return true
41824 }
41825
41826
41827
41828 for {
41829 _ = v.Args[1]
41830 sh := v.Args[0]
41831 if sh.Op != OpAMD64SHLQconst {
41832 break
41833 }
41834 if sh.AuxInt != 16 {
41835 break
41836 }
41837 r0 := sh.Args[0]
41838 if r0.Op != OpAMD64ROLWconst {
41839 break
41840 }
41841 if r0.AuxInt != 8 {
41842 break
41843 }
41844 x0 := r0.Args[0]
41845 if x0.Op != OpAMD64MOVWloadidx1 {
41846 break
41847 }
41848 i0 := x0.AuxInt
41849 s := x0.Aux
41850 mem := x0.Args[2]
41851 p := x0.Args[0]
41852 idx := x0.Args[1]
41853 r1 := v.Args[1]
41854 if r1.Op != OpAMD64ROLWconst {
41855 break
41856 }
41857 if r1.AuxInt != 8 {
41858 break
41859 }
41860 x1 := r1.Args[0]
41861 if x1.Op != OpAMD64MOVWloadidx1 {
41862 break
41863 }
41864 i1 := x1.AuxInt
41865 if x1.Aux != s {
41866 break
41867 }
41868 _ = x1.Args[2]
41869 if p != x1.Args[0] {
41870 break
41871 }
41872 if idx != x1.Args[1] {
41873 break
41874 }
41875 if mem != x1.Args[2] {
41876 break
41877 }
41878 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
41879 break
41880 }
41881 b = mergePoint(b, x0, x1)
41882 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
41883 v.reset(OpCopy)
41884 v.AddArg(v0)
41885 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
41886 v1.AuxInt = i0
41887 v1.Aux = s
41888 v1.AddArg(p)
41889 v1.AddArg(idx)
41890 v1.AddArg(mem)
41891 v0.AddArg(v1)
41892 return true
41893 }
41894
41895
41896
41897 for {
41898 _ = v.Args[1]
41899 sh := v.Args[0]
41900 if sh.Op != OpAMD64SHLQconst {
41901 break
41902 }
41903 if sh.AuxInt != 16 {
41904 break
41905 }
41906 r0 := sh.Args[0]
41907 if r0.Op != OpAMD64ROLWconst {
41908 break
41909 }
41910 if r0.AuxInt != 8 {
41911 break
41912 }
41913 x0 := r0.Args[0]
41914 if x0.Op != OpAMD64MOVWloadidx1 {
41915 break
41916 }
41917 i0 := x0.AuxInt
41918 s := x0.Aux
41919 mem := x0.Args[2]
41920 idx := x0.Args[0]
41921 p := x0.Args[1]
41922 r1 := v.Args[1]
41923 if r1.Op != OpAMD64ROLWconst {
41924 break
41925 }
41926 if r1.AuxInt != 8 {
41927 break
41928 }
41929 x1 := r1.Args[0]
41930 if x1.Op != OpAMD64MOVWloadidx1 {
41931 break
41932 }
41933 i1 := x1.AuxInt
41934 if x1.Aux != s {
41935 break
41936 }
41937 _ = x1.Args[2]
41938 if p != x1.Args[0] {
41939 break
41940 }
41941 if idx != x1.Args[1] {
41942 break
41943 }
41944 if mem != x1.Args[2] {
41945 break
41946 }
41947 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
41948 break
41949 }
41950 b = mergePoint(b, x0, x1)
41951 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
41952 v.reset(OpCopy)
41953 v.AddArg(v0)
41954 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
41955 v1.AuxInt = i0
41956 v1.Aux = s
41957 v1.AddArg(p)
41958 v1.AddArg(idx)
41959 v1.AddArg(mem)
41960 v0.AddArg(v1)
41961 return true
41962 }
41963
41964
41965
41966 for {
41967 _ = v.Args[1]
41968 sh := v.Args[0]
41969 if sh.Op != OpAMD64SHLQconst {
41970 break
41971 }
41972 if sh.AuxInt != 16 {
41973 break
41974 }
41975 r0 := sh.Args[0]
41976 if r0.Op != OpAMD64ROLWconst {
41977 break
41978 }
41979 if r0.AuxInt != 8 {
41980 break
41981 }
41982 x0 := r0.Args[0]
41983 if x0.Op != OpAMD64MOVWloadidx1 {
41984 break
41985 }
41986 i0 := x0.AuxInt
41987 s := x0.Aux
41988 mem := x0.Args[2]
41989 p := x0.Args[0]
41990 idx := x0.Args[1]
41991 r1 := v.Args[1]
41992 if r1.Op != OpAMD64ROLWconst {
41993 break
41994 }
41995 if r1.AuxInt != 8 {
41996 break
41997 }
41998 x1 := r1.Args[0]
41999 if x1.Op != OpAMD64MOVWloadidx1 {
42000 break
42001 }
42002 i1 := x1.AuxInt
42003 if x1.Aux != s {
42004 break
42005 }
42006 _ = x1.Args[2]
42007 if idx != x1.Args[0] {
42008 break
42009 }
42010 if p != x1.Args[1] {
42011 break
42012 }
42013 if mem != x1.Args[2] {
42014 break
42015 }
42016 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
42017 break
42018 }
42019 b = mergePoint(b, x0, x1)
42020 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
42021 v.reset(OpCopy)
42022 v.AddArg(v0)
42023 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
42024 v1.AuxInt = i0
42025 v1.Aux = s
42026 v1.AddArg(p)
42027 v1.AddArg(idx)
42028 v1.AddArg(mem)
42029 v0.AddArg(v1)
42030 return true
42031 }
42032
42033
42034
42035 for {
42036 _ = v.Args[1]
42037 sh := v.Args[0]
42038 if sh.Op != OpAMD64SHLQconst {
42039 break
42040 }
42041 if sh.AuxInt != 16 {
42042 break
42043 }
42044 r0 := sh.Args[0]
42045 if r0.Op != OpAMD64ROLWconst {
42046 break
42047 }
42048 if r0.AuxInt != 8 {
42049 break
42050 }
42051 x0 := r0.Args[0]
42052 if x0.Op != OpAMD64MOVWloadidx1 {
42053 break
42054 }
42055 i0 := x0.AuxInt
42056 s := x0.Aux
42057 mem := x0.Args[2]
42058 idx := x0.Args[0]
42059 p := x0.Args[1]
42060 r1 := v.Args[1]
42061 if r1.Op != OpAMD64ROLWconst {
42062 break
42063 }
42064 if r1.AuxInt != 8 {
42065 break
42066 }
42067 x1 := r1.Args[0]
42068 if x1.Op != OpAMD64MOVWloadidx1 {
42069 break
42070 }
42071 i1 := x1.AuxInt
42072 if x1.Aux != s {
42073 break
42074 }
42075 _ = x1.Args[2]
42076 if idx != x1.Args[0] {
42077 break
42078 }
42079 if p != x1.Args[1] {
42080 break
42081 }
42082 if mem != x1.Args[2] {
42083 break
42084 }
42085 if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
42086 break
42087 }
42088 b = mergePoint(b, x0, x1)
42089 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
42090 v.reset(OpCopy)
42091 v.AddArg(v0)
42092 v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
42093 v1.AuxInt = i0
42094 v1.Aux = s
42095 v1.AddArg(p)
42096 v1.AddArg(idx)
42097 v1.AddArg(mem)
42098 v0.AddArg(v1)
42099 return true
42100 }
42101
42102
42103
42104 for {
42105 _ = v.Args[1]
42106 r1 := v.Args[0]
42107 if r1.Op != OpAMD64BSWAPL {
42108 break
42109 }
42110 x1 := r1.Args[0]
42111 if x1.Op != OpAMD64MOVLloadidx1 {
42112 break
42113 }
42114 i1 := x1.AuxInt
42115 s := x1.Aux
42116 mem := x1.Args[2]
42117 p := x1.Args[0]
42118 idx := x1.Args[1]
42119 sh := v.Args[1]
42120 if sh.Op != OpAMD64SHLQconst {
42121 break
42122 }
42123 if sh.AuxInt != 32 {
42124 break
42125 }
42126 r0 := sh.Args[0]
42127 if r0.Op != OpAMD64BSWAPL {
42128 break
42129 }
42130 x0 := r0.Args[0]
42131 if x0.Op != OpAMD64MOVLloadidx1 {
42132 break
42133 }
42134 i0 := x0.AuxInt
42135 if x0.Aux != s {
42136 break
42137 }
42138 _ = x0.Args[2]
42139 if p != x0.Args[0] {
42140 break
42141 }
42142 if idx != x0.Args[1] {
42143 break
42144 }
42145 if mem != x0.Args[2] {
42146 break
42147 }
42148 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
42149 break
42150 }
42151 b = mergePoint(b, x0, x1)
42152 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type)
42153 v.reset(OpCopy)
42154 v.AddArg(v0)
42155 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
42156 v1.AuxInt = i0
42157 v1.Aux = s
42158 v1.AddArg(p)
42159 v1.AddArg(idx)
42160 v1.AddArg(mem)
42161 v0.AddArg(v1)
42162 return true
42163 }
42164
42165
42166
42167 for {
42168 _ = v.Args[1]
42169 r1 := v.Args[0]
42170 if r1.Op != OpAMD64BSWAPL {
42171 break
42172 }
42173 x1 := r1.Args[0]
42174 if x1.Op != OpAMD64MOVLloadidx1 {
42175 break
42176 }
42177 i1 := x1.AuxInt
42178 s := x1.Aux
42179 mem := x1.Args[2]
42180 idx := x1.Args[0]
42181 p := x1.Args[1]
42182 sh := v.Args[1]
42183 if sh.Op != OpAMD64SHLQconst {
42184 break
42185 }
42186 if sh.AuxInt != 32 {
42187 break
42188 }
42189 r0 := sh.Args[0]
42190 if r0.Op != OpAMD64BSWAPL {
42191 break
42192 }
42193 x0 := r0.Args[0]
42194 if x0.Op != OpAMD64MOVLloadidx1 {
42195 break
42196 }
42197 i0 := x0.AuxInt
42198 if x0.Aux != s {
42199 break
42200 }
42201 _ = x0.Args[2]
42202 if p != x0.Args[0] {
42203 break
42204 }
42205 if idx != x0.Args[1] {
42206 break
42207 }
42208 if mem != x0.Args[2] {
42209 break
42210 }
42211 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
42212 break
42213 }
42214 b = mergePoint(b, x0, x1)
42215 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type)
42216 v.reset(OpCopy)
42217 v.AddArg(v0)
42218 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
42219 v1.AuxInt = i0
42220 v1.Aux = s
42221 v1.AddArg(p)
42222 v1.AddArg(idx)
42223 v1.AddArg(mem)
42224 v0.AddArg(v1)
42225 return true
42226 }
42227
42228
42229
42230 for {
42231 _ = v.Args[1]
42232 r1 := v.Args[0]
42233 if r1.Op != OpAMD64BSWAPL {
42234 break
42235 }
42236 x1 := r1.Args[0]
42237 if x1.Op != OpAMD64MOVLloadidx1 {
42238 break
42239 }
42240 i1 := x1.AuxInt
42241 s := x1.Aux
42242 mem := x1.Args[2]
42243 p := x1.Args[0]
42244 idx := x1.Args[1]
42245 sh := v.Args[1]
42246 if sh.Op != OpAMD64SHLQconst {
42247 break
42248 }
42249 if sh.AuxInt != 32 {
42250 break
42251 }
42252 r0 := sh.Args[0]
42253 if r0.Op != OpAMD64BSWAPL {
42254 break
42255 }
42256 x0 := r0.Args[0]
42257 if x0.Op != OpAMD64MOVLloadidx1 {
42258 break
42259 }
42260 i0 := x0.AuxInt
42261 if x0.Aux != s {
42262 break
42263 }
42264 _ = x0.Args[2]
42265 if idx != x0.Args[0] {
42266 break
42267 }
42268 if p != x0.Args[1] {
42269 break
42270 }
42271 if mem != x0.Args[2] {
42272 break
42273 }
42274 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
42275 break
42276 }
42277 b = mergePoint(b, x0, x1)
42278 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type)
42279 v.reset(OpCopy)
42280 v.AddArg(v0)
42281 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
42282 v1.AuxInt = i0
42283 v1.Aux = s
42284 v1.AddArg(p)
42285 v1.AddArg(idx)
42286 v1.AddArg(mem)
42287 v0.AddArg(v1)
42288 return true
42289 }
42290
42291
42292
42293 for {
42294 _ = v.Args[1]
42295 r1 := v.Args[0]
42296 if r1.Op != OpAMD64BSWAPL {
42297 break
42298 }
42299 x1 := r1.Args[0]
42300 if x1.Op != OpAMD64MOVLloadidx1 {
42301 break
42302 }
42303 i1 := x1.AuxInt
42304 s := x1.Aux
42305 mem := x1.Args[2]
42306 idx := x1.Args[0]
42307 p := x1.Args[1]
42308 sh := v.Args[1]
42309 if sh.Op != OpAMD64SHLQconst {
42310 break
42311 }
42312 if sh.AuxInt != 32 {
42313 break
42314 }
42315 r0 := sh.Args[0]
42316 if r0.Op != OpAMD64BSWAPL {
42317 break
42318 }
42319 x0 := r0.Args[0]
42320 if x0.Op != OpAMD64MOVLloadidx1 {
42321 break
42322 }
42323 i0 := x0.AuxInt
42324 if x0.Aux != s {
42325 break
42326 }
42327 _ = x0.Args[2]
42328 if idx != x0.Args[0] {
42329 break
42330 }
42331 if p != x0.Args[1] {
42332 break
42333 }
42334 if mem != x0.Args[2] {
42335 break
42336 }
42337 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
42338 break
42339 }
42340 b = mergePoint(b, x0, x1)
42341 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type)
42342 v.reset(OpCopy)
42343 v.AddArg(v0)
42344 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
42345 v1.AuxInt = i0
42346 v1.Aux = s
42347 v1.AddArg(p)
42348 v1.AddArg(idx)
42349 v1.AddArg(mem)
42350 v0.AddArg(v1)
42351 return true
42352 }
42353
42354
42355
42356 for {
42357 _ = v.Args[1]
42358 sh := v.Args[0]
42359 if sh.Op != OpAMD64SHLQconst {
42360 break
42361 }
42362 if sh.AuxInt != 32 {
42363 break
42364 }
42365 r0 := sh.Args[0]
42366 if r0.Op != OpAMD64BSWAPL {
42367 break
42368 }
42369 x0 := r0.Args[0]
42370 if x0.Op != OpAMD64MOVLloadidx1 {
42371 break
42372 }
42373 i0 := x0.AuxInt
42374 s := x0.Aux
42375 mem := x0.Args[2]
42376 p := x0.Args[0]
42377 idx := x0.Args[1]
42378 r1 := v.Args[1]
42379 if r1.Op != OpAMD64BSWAPL {
42380 break
42381 }
42382 x1 := r1.Args[0]
42383 if x1.Op != OpAMD64MOVLloadidx1 {
42384 break
42385 }
42386 i1 := x1.AuxInt
42387 if x1.Aux != s {
42388 break
42389 }
42390 _ = x1.Args[2]
42391 if p != x1.Args[0] {
42392 break
42393 }
42394 if idx != x1.Args[1] {
42395 break
42396 }
42397 if mem != x1.Args[2] {
42398 break
42399 }
42400 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
42401 break
42402 }
42403 b = mergePoint(b, x0, x1)
42404 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type)
42405 v.reset(OpCopy)
42406 v.AddArg(v0)
42407 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
42408 v1.AuxInt = i0
42409 v1.Aux = s
42410 v1.AddArg(p)
42411 v1.AddArg(idx)
42412 v1.AddArg(mem)
42413 v0.AddArg(v1)
42414 return true
42415 }
42416 return false
42417 }
42418 func rewriteValueAMD64_OpAMD64ORQ_130(v *Value) bool {
42419 b := v.Block
42420 typ := &b.Func.Config.Types
42421
42422
42423
42424 for {
42425 _ = v.Args[1]
42426 sh := v.Args[0]
42427 if sh.Op != OpAMD64SHLQconst {
42428 break
42429 }
42430 if sh.AuxInt != 32 {
42431 break
42432 }
42433 r0 := sh.Args[0]
42434 if r0.Op != OpAMD64BSWAPL {
42435 break
42436 }
42437 x0 := r0.Args[0]
42438 if x0.Op != OpAMD64MOVLloadidx1 {
42439 break
42440 }
42441 i0 := x0.AuxInt
42442 s := x0.Aux
42443 mem := x0.Args[2]
42444 idx := x0.Args[0]
42445 p := x0.Args[1]
42446 r1 := v.Args[1]
42447 if r1.Op != OpAMD64BSWAPL {
42448 break
42449 }
42450 x1 := r1.Args[0]
42451 if x1.Op != OpAMD64MOVLloadidx1 {
42452 break
42453 }
42454 i1 := x1.AuxInt
42455 if x1.Aux != s {
42456 break
42457 }
42458 _ = x1.Args[2]
42459 if p != x1.Args[0] {
42460 break
42461 }
42462 if idx != x1.Args[1] {
42463 break
42464 }
42465 if mem != x1.Args[2] {
42466 break
42467 }
42468 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
42469 break
42470 }
42471 b = mergePoint(b, x0, x1)
42472 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type)
42473 v.reset(OpCopy)
42474 v.AddArg(v0)
42475 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
42476 v1.AuxInt = i0
42477 v1.Aux = s
42478 v1.AddArg(p)
42479 v1.AddArg(idx)
42480 v1.AddArg(mem)
42481 v0.AddArg(v1)
42482 return true
42483 }
42484
42485
42486
42487 for {
42488 _ = v.Args[1]
42489 sh := v.Args[0]
42490 if sh.Op != OpAMD64SHLQconst {
42491 break
42492 }
42493 if sh.AuxInt != 32 {
42494 break
42495 }
42496 r0 := sh.Args[0]
42497 if r0.Op != OpAMD64BSWAPL {
42498 break
42499 }
42500 x0 := r0.Args[0]
42501 if x0.Op != OpAMD64MOVLloadidx1 {
42502 break
42503 }
42504 i0 := x0.AuxInt
42505 s := x0.Aux
42506 mem := x0.Args[2]
42507 p := x0.Args[0]
42508 idx := x0.Args[1]
42509 r1 := v.Args[1]
42510 if r1.Op != OpAMD64BSWAPL {
42511 break
42512 }
42513 x1 := r1.Args[0]
42514 if x1.Op != OpAMD64MOVLloadidx1 {
42515 break
42516 }
42517 i1 := x1.AuxInt
42518 if x1.Aux != s {
42519 break
42520 }
42521 _ = x1.Args[2]
42522 if idx != x1.Args[0] {
42523 break
42524 }
42525 if p != x1.Args[1] {
42526 break
42527 }
42528 if mem != x1.Args[2] {
42529 break
42530 }
42531 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
42532 break
42533 }
42534 b = mergePoint(b, x0, x1)
42535 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type)
42536 v.reset(OpCopy)
42537 v.AddArg(v0)
42538 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
42539 v1.AuxInt = i0
42540 v1.Aux = s
42541 v1.AddArg(p)
42542 v1.AddArg(idx)
42543 v1.AddArg(mem)
42544 v0.AddArg(v1)
42545 return true
42546 }
42547
42548
42549
42550 for {
42551 _ = v.Args[1]
42552 sh := v.Args[0]
42553 if sh.Op != OpAMD64SHLQconst {
42554 break
42555 }
42556 if sh.AuxInt != 32 {
42557 break
42558 }
42559 r0 := sh.Args[0]
42560 if r0.Op != OpAMD64BSWAPL {
42561 break
42562 }
42563 x0 := r0.Args[0]
42564 if x0.Op != OpAMD64MOVLloadidx1 {
42565 break
42566 }
42567 i0 := x0.AuxInt
42568 s := x0.Aux
42569 mem := x0.Args[2]
42570 idx := x0.Args[0]
42571 p := x0.Args[1]
42572 r1 := v.Args[1]
42573 if r1.Op != OpAMD64BSWAPL {
42574 break
42575 }
42576 x1 := r1.Args[0]
42577 if x1.Op != OpAMD64MOVLloadidx1 {
42578 break
42579 }
42580 i1 := x1.AuxInt
42581 if x1.Aux != s {
42582 break
42583 }
42584 _ = x1.Args[2]
42585 if idx != x1.Args[0] {
42586 break
42587 }
42588 if p != x1.Args[1] {
42589 break
42590 }
42591 if mem != x1.Args[2] {
42592 break
42593 }
42594 if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) {
42595 break
42596 }
42597 b = mergePoint(b, x0, x1)
42598 v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type)
42599 v.reset(OpCopy)
42600 v.AddArg(v0)
42601 v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
42602 v1.AuxInt = i0
42603 v1.Aux = s
42604 v1.AddArg(p)
42605 v1.AddArg(idx)
42606 v1.AddArg(mem)
42607 v0.AddArg(v1)
42608 return true
42609 }
42610
42611
42612
42613 for {
42614 _ = v.Args[1]
42615 s0 := v.Args[0]
42616 if s0.Op != OpAMD64SHLQconst {
42617 break
42618 }
42619 j0 := s0.AuxInt
42620 x0 := s0.Args[0]
42621 if x0.Op != OpAMD64MOVBloadidx1 {
42622 break
42623 }
42624 i0 := x0.AuxInt
42625 s := x0.Aux
42626 mem := x0.Args[2]
42627 p := x0.Args[0]
42628 idx := x0.Args[1]
42629 or := v.Args[1]
42630 if or.Op != OpAMD64ORQ {
42631 break
42632 }
42633 y := or.Args[1]
42634 s1 := or.Args[0]
42635 if s1.Op != OpAMD64SHLQconst {
42636 break
42637 }
42638 j1 := s1.AuxInt
42639 x1 := s1.Args[0]
42640 if x1.Op != OpAMD64MOVBloadidx1 {
42641 break
42642 }
42643 i1 := x1.AuxInt
42644 if x1.Aux != s {
42645 break
42646 }
42647 _ = x1.Args[2]
42648 if p != x1.Args[0] {
42649 break
42650 }
42651 if idx != x1.Args[1] {
42652 break
42653 }
42654 if mem != x1.Args[2] {
42655 break
42656 }
42657 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
42658 break
42659 }
42660 b = mergePoint(b, x0, x1, y)
42661 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
42662 v.reset(OpCopy)
42663 v.AddArg(v0)
42664 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
42665 v1.AuxInt = j1
42666 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
42667 v2.AuxInt = 8
42668 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
42669 v3.AuxInt = i0
42670 v3.Aux = s
42671 v3.AddArg(p)
42672 v3.AddArg(idx)
42673 v3.AddArg(mem)
42674 v2.AddArg(v3)
42675 v1.AddArg(v2)
42676 v0.AddArg(v1)
42677 v0.AddArg(y)
42678 return true
42679 }
42680
42681
42682
42683 for {
42684 _ = v.Args[1]
42685 s0 := v.Args[0]
42686 if s0.Op != OpAMD64SHLQconst {
42687 break
42688 }
42689 j0 := s0.AuxInt
42690 x0 := s0.Args[0]
42691 if x0.Op != OpAMD64MOVBloadidx1 {
42692 break
42693 }
42694 i0 := x0.AuxInt
42695 s := x0.Aux
42696 mem := x0.Args[2]
42697 idx := x0.Args[0]
42698 p := x0.Args[1]
42699 or := v.Args[1]
42700 if or.Op != OpAMD64ORQ {
42701 break
42702 }
42703 y := or.Args[1]
42704 s1 := or.Args[0]
42705 if s1.Op != OpAMD64SHLQconst {
42706 break
42707 }
42708 j1 := s1.AuxInt
42709 x1 := s1.Args[0]
42710 if x1.Op != OpAMD64MOVBloadidx1 {
42711 break
42712 }
42713 i1 := x1.AuxInt
42714 if x1.Aux != s {
42715 break
42716 }
42717 _ = x1.Args[2]
42718 if p != x1.Args[0] {
42719 break
42720 }
42721 if idx != x1.Args[1] {
42722 break
42723 }
42724 if mem != x1.Args[2] {
42725 break
42726 }
42727 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
42728 break
42729 }
42730 b = mergePoint(b, x0, x1, y)
42731 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
42732 v.reset(OpCopy)
42733 v.AddArg(v0)
42734 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
42735 v1.AuxInt = j1
42736 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
42737 v2.AuxInt = 8
42738 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
42739 v3.AuxInt = i0
42740 v3.Aux = s
42741 v3.AddArg(p)
42742 v3.AddArg(idx)
42743 v3.AddArg(mem)
42744 v2.AddArg(v3)
42745 v1.AddArg(v2)
42746 v0.AddArg(v1)
42747 v0.AddArg(y)
42748 return true
42749 }
42750
42751
42752
42753 for {
42754 _ = v.Args[1]
42755 s0 := v.Args[0]
42756 if s0.Op != OpAMD64SHLQconst {
42757 break
42758 }
42759 j0 := s0.AuxInt
42760 x0 := s0.Args[0]
42761 if x0.Op != OpAMD64MOVBloadidx1 {
42762 break
42763 }
42764 i0 := x0.AuxInt
42765 s := x0.Aux
42766 mem := x0.Args[2]
42767 p := x0.Args[0]
42768 idx := x0.Args[1]
42769 or := v.Args[1]
42770 if or.Op != OpAMD64ORQ {
42771 break
42772 }
42773 y := or.Args[1]
42774 s1 := or.Args[0]
42775 if s1.Op != OpAMD64SHLQconst {
42776 break
42777 }
42778 j1 := s1.AuxInt
42779 x1 := s1.Args[0]
42780 if x1.Op != OpAMD64MOVBloadidx1 {
42781 break
42782 }
42783 i1 := x1.AuxInt
42784 if x1.Aux != s {
42785 break
42786 }
42787 _ = x1.Args[2]
42788 if idx != x1.Args[0] {
42789 break
42790 }
42791 if p != x1.Args[1] {
42792 break
42793 }
42794 if mem != x1.Args[2] {
42795 break
42796 }
42797 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
42798 break
42799 }
42800 b = mergePoint(b, x0, x1, y)
42801 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
42802 v.reset(OpCopy)
42803 v.AddArg(v0)
42804 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
42805 v1.AuxInt = j1
42806 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
42807 v2.AuxInt = 8
42808 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
42809 v3.AuxInt = i0
42810 v3.Aux = s
42811 v3.AddArg(p)
42812 v3.AddArg(idx)
42813 v3.AddArg(mem)
42814 v2.AddArg(v3)
42815 v1.AddArg(v2)
42816 v0.AddArg(v1)
42817 v0.AddArg(y)
42818 return true
42819 }
42820
42821
42822
42823 for {
42824 _ = v.Args[1]
42825 s0 := v.Args[0]
42826 if s0.Op != OpAMD64SHLQconst {
42827 break
42828 }
42829 j0 := s0.AuxInt
42830 x0 := s0.Args[0]
42831 if x0.Op != OpAMD64MOVBloadidx1 {
42832 break
42833 }
42834 i0 := x0.AuxInt
42835 s := x0.Aux
42836 mem := x0.Args[2]
42837 idx := x0.Args[0]
42838 p := x0.Args[1]
42839 or := v.Args[1]
42840 if or.Op != OpAMD64ORQ {
42841 break
42842 }
42843 y := or.Args[1]
42844 s1 := or.Args[0]
42845 if s1.Op != OpAMD64SHLQconst {
42846 break
42847 }
42848 j1 := s1.AuxInt
42849 x1 := s1.Args[0]
42850 if x1.Op != OpAMD64MOVBloadidx1 {
42851 break
42852 }
42853 i1 := x1.AuxInt
42854 if x1.Aux != s {
42855 break
42856 }
42857 _ = x1.Args[2]
42858 if idx != x1.Args[0] {
42859 break
42860 }
42861 if p != x1.Args[1] {
42862 break
42863 }
42864 if mem != x1.Args[2] {
42865 break
42866 }
42867 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
42868 break
42869 }
42870 b = mergePoint(b, x0, x1, y)
42871 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
42872 v.reset(OpCopy)
42873 v.AddArg(v0)
42874 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
42875 v1.AuxInt = j1
42876 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
42877 v2.AuxInt = 8
42878 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
42879 v3.AuxInt = i0
42880 v3.Aux = s
42881 v3.AddArg(p)
42882 v3.AddArg(idx)
42883 v3.AddArg(mem)
42884 v2.AddArg(v3)
42885 v1.AddArg(v2)
42886 v0.AddArg(v1)
42887 v0.AddArg(y)
42888 return true
42889 }
42890
42891
42892
42893 for {
42894 _ = v.Args[1]
42895 s0 := v.Args[0]
42896 if s0.Op != OpAMD64SHLQconst {
42897 break
42898 }
42899 j0 := s0.AuxInt
42900 x0 := s0.Args[0]
42901 if x0.Op != OpAMD64MOVBloadidx1 {
42902 break
42903 }
42904 i0 := x0.AuxInt
42905 s := x0.Aux
42906 mem := x0.Args[2]
42907 p := x0.Args[0]
42908 idx := x0.Args[1]
42909 or := v.Args[1]
42910 if or.Op != OpAMD64ORQ {
42911 break
42912 }
42913 _ = or.Args[1]
42914 y := or.Args[0]
42915 s1 := or.Args[1]
42916 if s1.Op != OpAMD64SHLQconst {
42917 break
42918 }
42919 j1 := s1.AuxInt
42920 x1 := s1.Args[0]
42921 if x1.Op != OpAMD64MOVBloadidx1 {
42922 break
42923 }
42924 i1 := x1.AuxInt
42925 if x1.Aux != s {
42926 break
42927 }
42928 _ = x1.Args[2]
42929 if p != x1.Args[0] {
42930 break
42931 }
42932 if idx != x1.Args[1] {
42933 break
42934 }
42935 if mem != x1.Args[2] {
42936 break
42937 }
42938 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
42939 break
42940 }
42941 b = mergePoint(b, x0, x1, y)
42942 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
42943 v.reset(OpCopy)
42944 v.AddArg(v0)
42945 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
42946 v1.AuxInt = j1
42947 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
42948 v2.AuxInt = 8
42949 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
42950 v3.AuxInt = i0
42951 v3.Aux = s
42952 v3.AddArg(p)
42953 v3.AddArg(idx)
42954 v3.AddArg(mem)
42955 v2.AddArg(v3)
42956 v1.AddArg(v2)
42957 v0.AddArg(v1)
42958 v0.AddArg(y)
42959 return true
42960 }
42961
42962
42963
42964 for {
42965 _ = v.Args[1]
42966 s0 := v.Args[0]
42967 if s0.Op != OpAMD64SHLQconst {
42968 break
42969 }
42970 j0 := s0.AuxInt
42971 x0 := s0.Args[0]
42972 if x0.Op != OpAMD64MOVBloadidx1 {
42973 break
42974 }
42975 i0 := x0.AuxInt
42976 s := x0.Aux
42977 mem := x0.Args[2]
42978 idx := x0.Args[0]
42979 p := x0.Args[1]
42980 or := v.Args[1]
42981 if or.Op != OpAMD64ORQ {
42982 break
42983 }
42984 _ = or.Args[1]
42985 y := or.Args[0]
42986 s1 := or.Args[1]
42987 if s1.Op != OpAMD64SHLQconst {
42988 break
42989 }
42990 j1 := s1.AuxInt
42991 x1 := s1.Args[0]
42992 if x1.Op != OpAMD64MOVBloadidx1 {
42993 break
42994 }
42995 i1 := x1.AuxInt
42996 if x1.Aux != s {
42997 break
42998 }
42999 _ = x1.Args[2]
43000 if p != x1.Args[0] {
43001 break
43002 }
43003 if idx != x1.Args[1] {
43004 break
43005 }
43006 if mem != x1.Args[2] {
43007 break
43008 }
43009 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
43010 break
43011 }
43012 b = mergePoint(b, x0, x1, y)
43013 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
43014 v.reset(OpCopy)
43015 v.AddArg(v0)
43016 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
43017 v1.AuxInt = j1
43018 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
43019 v2.AuxInt = 8
43020 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
43021 v3.AuxInt = i0
43022 v3.Aux = s
43023 v3.AddArg(p)
43024 v3.AddArg(idx)
43025 v3.AddArg(mem)
43026 v2.AddArg(v3)
43027 v1.AddArg(v2)
43028 v0.AddArg(v1)
43029 v0.AddArg(y)
43030 return true
43031 }
43032
43033
43034
43035 for {
43036 _ = v.Args[1]
43037 s0 := v.Args[0]
43038 if s0.Op != OpAMD64SHLQconst {
43039 break
43040 }
43041 j0 := s0.AuxInt
43042 x0 := s0.Args[0]
43043 if x0.Op != OpAMD64MOVBloadidx1 {
43044 break
43045 }
43046 i0 := x0.AuxInt
43047 s := x0.Aux
43048 mem := x0.Args[2]
43049 p := x0.Args[0]
43050 idx := x0.Args[1]
43051 or := v.Args[1]
43052 if or.Op != OpAMD64ORQ {
43053 break
43054 }
43055 _ = or.Args[1]
43056 y := or.Args[0]
43057 s1 := or.Args[1]
43058 if s1.Op != OpAMD64SHLQconst {
43059 break
43060 }
43061 j1 := s1.AuxInt
43062 x1 := s1.Args[0]
43063 if x1.Op != OpAMD64MOVBloadidx1 {
43064 break
43065 }
43066 i1 := x1.AuxInt
43067 if x1.Aux != s {
43068 break
43069 }
43070 _ = x1.Args[2]
43071 if idx != x1.Args[0] {
43072 break
43073 }
43074 if p != x1.Args[1] {
43075 break
43076 }
43077 if mem != x1.Args[2] {
43078 break
43079 }
43080 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
43081 break
43082 }
43083 b = mergePoint(b, x0, x1, y)
43084 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
43085 v.reset(OpCopy)
43086 v.AddArg(v0)
43087 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
43088 v1.AuxInt = j1
43089 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
43090 v2.AuxInt = 8
43091 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
43092 v3.AuxInt = i0
43093 v3.Aux = s
43094 v3.AddArg(p)
43095 v3.AddArg(idx)
43096 v3.AddArg(mem)
43097 v2.AddArg(v3)
43098 v1.AddArg(v2)
43099 v0.AddArg(v1)
43100 v0.AddArg(y)
43101 return true
43102 }
43103 return false
43104 }
43105 func rewriteValueAMD64_OpAMD64ORQ_140(v *Value) bool {
43106 b := v.Block
43107 typ := &b.Func.Config.Types
43108
43109
43110
43111 for {
43112 _ = v.Args[1]
43113 s0 := v.Args[0]
43114 if s0.Op != OpAMD64SHLQconst {
43115 break
43116 }
43117 j0 := s0.AuxInt
43118 x0 := s0.Args[0]
43119 if x0.Op != OpAMD64MOVBloadidx1 {
43120 break
43121 }
43122 i0 := x0.AuxInt
43123 s := x0.Aux
43124 mem := x0.Args[2]
43125 idx := x0.Args[0]
43126 p := x0.Args[1]
43127 or := v.Args[1]
43128 if or.Op != OpAMD64ORQ {
43129 break
43130 }
43131 _ = or.Args[1]
43132 y := or.Args[0]
43133 s1 := or.Args[1]
43134 if s1.Op != OpAMD64SHLQconst {
43135 break
43136 }
43137 j1 := s1.AuxInt
43138 x1 := s1.Args[0]
43139 if x1.Op != OpAMD64MOVBloadidx1 {
43140 break
43141 }
43142 i1 := x1.AuxInt
43143 if x1.Aux != s {
43144 break
43145 }
43146 _ = x1.Args[2]
43147 if idx != x1.Args[0] {
43148 break
43149 }
43150 if p != x1.Args[1] {
43151 break
43152 }
43153 if mem != x1.Args[2] {
43154 break
43155 }
43156 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
43157 break
43158 }
43159 b = mergePoint(b, x0, x1, y)
43160 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
43161 v.reset(OpCopy)
43162 v.AddArg(v0)
43163 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
43164 v1.AuxInt = j1
43165 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
43166 v2.AuxInt = 8
43167 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
43168 v3.AuxInt = i0
43169 v3.Aux = s
43170 v3.AddArg(p)
43171 v3.AddArg(idx)
43172 v3.AddArg(mem)
43173 v2.AddArg(v3)
43174 v1.AddArg(v2)
43175 v0.AddArg(v1)
43176 v0.AddArg(y)
43177 return true
43178 }
43179
43180
43181
43182 for {
43183 _ = v.Args[1]
43184 or := v.Args[0]
43185 if or.Op != OpAMD64ORQ {
43186 break
43187 }
43188 y := or.Args[1]
43189 s1 := or.Args[0]
43190 if s1.Op != OpAMD64SHLQconst {
43191 break
43192 }
43193 j1 := s1.AuxInt
43194 x1 := s1.Args[0]
43195 if x1.Op != OpAMD64MOVBloadidx1 {
43196 break
43197 }
43198 i1 := x1.AuxInt
43199 s := x1.Aux
43200 mem := x1.Args[2]
43201 p := x1.Args[0]
43202 idx := x1.Args[1]
43203 s0 := v.Args[1]
43204 if s0.Op != OpAMD64SHLQconst {
43205 break
43206 }
43207 j0 := s0.AuxInt
43208 x0 := s0.Args[0]
43209 if x0.Op != OpAMD64MOVBloadidx1 {
43210 break
43211 }
43212 i0 := x0.AuxInt
43213 if x0.Aux != s {
43214 break
43215 }
43216 _ = x0.Args[2]
43217 if p != x0.Args[0] {
43218 break
43219 }
43220 if idx != x0.Args[1] {
43221 break
43222 }
43223 if mem != x0.Args[2] {
43224 break
43225 }
43226 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
43227 break
43228 }
43229 b = mergePoint(b, x0, x1, y)
43230 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
43231 v.reset(OpCopy)
43232 v.AddArg(v0)
43233 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
43234 v1.AuxInt = j1
43235 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
43236 v2.AuxInt = 8
43237 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
43238 v3.AuxInt = i0
43239 v3.Aux = s
43240 v3.AddArg(p)
43241 v3.AddArg(idx)
43242 v3.AddArg(mem)
43243 v2.AddArg(v3)
43244 v1.AddArg(v2)
43245 v0.AddArg(v1)
43246 v0.AddArg(y)
43247 return true
43248 }
43249
43250
43251
43252 for {
43253 _ = v.Args[1]
43254 or := v.Args[0]
43255 if or.Op != OpAMD64ORQ {
43256 break
43257 }
43258 y := or.Args[1]
43259 s1 := or.Args[0]
43260 if s1.Op != OpAMD64SHLQconst {
43261 break
43262 }
43263 j1 := s1.AuxInt
43264 x1 := s1.Args[0]
43265 if x1.Op != OpAMD64MOVBloadidx1 {
43266 break
43267 }
43268 i1 := x1.AuxInt
43269 s := x1.Aux
43270 mem := x1.Args[2]
43271 idx := x1.Args[0]
43272 p := x1.Args[1]
43273 s0 := v.Args[1]
43274 if s0.Op != OpAMD64SHLQconst {
43275 break
43276 }
43277 j0 := s0.AuxInt
43278 x0 := s0.Args[0]
43279 if x0.Op != OpAMD64MOVBloadidx1 {
43280 break
43281 }
43282 i0 := x0.AuxInt
43283 if x0.Aux != s {
43284 break
43285 }
43286 _ = x0.Args[2]
43287 if p != x0.Args[0] {
43288 break
43289 }
43290 if idx != x0.Args[1] {
43291 break
43292 }
43293 if mem != x0.Args[2] {
43294 break
43295 }
43296 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
43297 break
43298 }
43299 b = mergePoint(b, x0, x1, y)
43300 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
43301 v.reset(OpCopy)
43302 v.AddArg(v0)
43303 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
43304 v1.AuxInt = j1
43305 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
43306 v2.AuxInt = 8
43307 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
43308 v3.AuxInt = i0
43309 v3.Aux = s
43310 v3.AddArg(p)
43311 v3.AddArg(idx)
43312 v3.AddArg(mem)
43313 v2.AddArg(v3)
43314 v1.AddArg(v2)
43315 v0.AddArg(v1)
43316 v0.AddArg(y)
43317 return true
43318 }
43319
43320
43321
43322 for {
43323 _ = v.Args[1]
43324 or := v.Args[0]
43325 if or.Op != OpAMD64ORQ {
43326 break
43327 }
43328 _ = or.Args[1]
43329 y := or.Args[0]
43330 s1 := or.Args[1]
43331 if s1.Op != OpAMD64SHLQconst {
43332 break
43333 }
43334 j1 := s1.AuxInt
43335 x1 := s1.Args[0]
43336 if x1.Op != OpAMD64MOVBloadidx1 {
43337 break
43338 }
43339 i1 := x1.AuxInt
43340 s := x1.Aux
43341 mem := x1.Args[2]
43342 p := x1.Args[0]
43343 idx := x1.Args[1]
43344 s0 := v.Args[1]
43345 if s0.Op != OpAMD64SHLQconst {
43346 break
43347 }
43348 j0 := s0.AuxInt
43349 x0 := s0.Args[0]
43350 if x0.Op != OpAMD64MOVBloadidx1 {
43351 break
43352 }
43353 i0 := x0.AuxInt
43354 if x0.Aux != s {
43355 break
43356 }
43357 _ = x0.Args[2]
43358 if p != x0.Args[0] {
43359 break
43360 }
43361 if idx != x0.Args[1] {
43362 break
43363 }
43364 if mem != x0.Args[2] {
43365 break
43366 }
43367 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
43368 break
43369 }
43370 b = mergePoint(b, x0, x1, y)
43371 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
43372 v.reset(OpCopy)
43373 v.AddArg(v0)
43374 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
43375 v1.AuxInt = j1
43376 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
43377 v2.AuxInt = 8
43378 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
43379 v3.AuxInt = i0
43380 v3.Aux = s
43381 v3.AddArg(p)
43382 v3.AddArg(idx)
43383 v3.AddArg(mem)
43384 v2.AddArg(v3)
43385 v1.AddArg(v2)
43386 v0.AddArg(v1)
43387 v0.AddArg(y)
43388 return true
43389 }
43390
43391
43392
43393 for {
43394 _ = v.Args[1]
43395 or := v.Args[0]
43396 if or.Op != OpAMD64ORQ {
43397 break
43398 }
43399 _ = or.Args[1]
43400 y := or.Args[0]
43401 s1 := or.Args[1]
43402 if s1.Op != OpAMD64SHLQconst {
43403 break
43404 }
43405 j1 := s1.AuxInt
43406 x1 := s1.Args[0]
43407 if x1.Op != OpAMD64MOVBloadidx1 {
43408 break
43409 }
43410 i1 := x1.AuxInt
43411 s := x1.Aux
43412 mem := x1.Args[2]
43413 idx := x1.Args[0]
43414 p := x1.Args[1]
43415 s0 := v.Args[1]
43416 if s0.Op != OpAMD64SHLQconst {
43417 break
43418 }
43419 j0 := s0.AuxInt
43420 x0 := s0.Args[0]
43421 if x0.Op != OpAMD64MOVBloadidx1 {
43422 break
43423 }
43424 i0 := x0.AuxInt
43425 if x0.Aux != s {
43426 break
43427 }
43428 _ = x0.Args[2]
43429 if p != x0.Args[0] {
43430 break
43431 }
43432 if idx != x0.Args[1] {
43433 break
43434 }
43435 if mem != x0.Args[2] {
43436 break
43437 }
43438 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
43439 break
43440 }
43441 b = mergePoint(b, x0, x1, y)
43442 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
43443 v.reset(OpCopy)
43444 v.AddArg(v0)
43445 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
43446 v1.AuxInt = j1
43447 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
43448 v2.AuxInt = 8
43449 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
43450 v3.AuxInt = i0
43451 v3.Aux = s
43452 v3.AddArg(p)
43453 v3.AddArg(idx)
43454 v3.AddArg(mem)
43455 v2.AddArg(v3)
43456 v1.AddArg(v2)
43457 v0.AddArg(v1)
43458 v0.AddArg(y)
43459 return true
43460 }
43461
43462
43463
43464 for {
43465 _ = v.Args[1]
43466 or := v.Args[0]
43467 if or.Op != OpAMD64ORQ {
43468 break
43469 }
43470 y := or.Args[1]
43471 s1 := or.Args[0]
43472 if s1.Op != OpAMD64SHLQconst {
43473 break
43474 }
43475 j1 := s1.AuxInt
43476 x1 := s1.Args[0]
43477 if x1.Op != OpAMD64MOVBloadidx1 {
43478 break
43479 }
43480 i1 := x1.AuxInt
43481 s := x1.Aux
43482 mem := x1.Args[2]
43483 p := x1.Args[0]
43484 idx := x1.Args[1]
43485 s0 := v.Args[1]
43486 if s0.Op != OpAMD64SHLQconst {
43487 break
43488 }
43489 j0 := s0.AuxInt
43490 x0 := s0.Args[0]
43491 if x0.Op != OpAMD64MOVBloadidx1 {
43492 break
43493 }
43494 i0 := x0.AuxInt
43495 if x0.Aux != s {
43496 break
43497 }
43498 _ = x0.Args[2]
43499 if idx != x0.Args[0] {
43500 break
43501 }
43502 if p != x0.Args[1] {
43503 break
43504 }
43505 if mem != x0.Args[2] {
43506 break
43507 }
43508 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
43509 break
43510 }
43511 b = mergePoint(b, x0, x1, y)
43512 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
43513 v.reset(OpCopy)
43514 v.AddArg(v0)
43515 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
43516 v1.AuxInt = j1
43517 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
43518 v2.AuxInt = 8
43519 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
43520 v3.AuxInt = i0
43521 v3.Aux = s
43522 v3.AddArg(p)
43523 v3.AddArg(idx)
43524 v3.AddArg(mem)
43525 v2.AddArg(v3)
43526 v1.AddArg(v2)
43527 v0.AddArg(v1)
43528 v0.AddArg(y)
43529 return true
43530 }
43531
43532
43533
43534 for {
43535 _ = v.Args[1]
43536 or := v.Args[0]
43537 if or.Op != OpAMD64ORQ {
43538 break
43539 }
43540 y := or.Args[1]
43541 s1 := or.Args[0]
43542 if s1.Op != OpAMD64SHLQconst {
43543 break
43544 }
43545 j1 := s1.AuxInt
43546 x1 := s1.Args[0]
43547 if x1.Op != OpAMD64MOVBloadidx1 {
43548 break
43549 }
43550 i1 := x1.AuxInt
43551 s := x1.Aux
43552 mem := x1.Args[2]
43553 idx := x1.Args[0]
43554 p := x1.Args[1]
43555 s0 := v.Args[1]
43556 if s0.Op != OpAMD64SHLQconst {
43557 break
43558 }
43559 j0 := s0.AuxInt
43560 x0 := s0.Args[0]
43561 if x0.Op != OpAMD64MOVBloadidx1 {
43562 break
43563 }
43564 i0 := x0.AuxInt
43565 if x0.Aux != s {
43566 break
43567 }
43568 _ = x0.Args[2]
43569 if idx != x0.Args[0] {
43570 break
43571 }
43572 if p != x0.Args[1] {
43573 break
43574 }
43575 if mem != x0.Args[2] {
43576 break
43577 }
43578 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
43579 break
43580 }
43581 b = mergePoint(b, x0, x1, y)
43582 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
43583 v.reset(OpCopy)
43584 v.AddArg(v0)
43585 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
43586 v1.AuxInt = j1
43587 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
43588 v2.AuxInt = 8
43589 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
43590 v3.AuxInt = i0
43591 v3.Aux = s
43592 v3.AddArg(p)
43593 v3.AddArg(idx)
43594 v3.AddArg(mem)
43595 v2.AddArg(v3)
43596 v1.AddArg(v2)
43597 v0.AddArg(v1)
43598 v0.AddArg(y)
43599 return true
43600 }
43601
43602
43603
43604 for {
43605 _ = v.Args[1]
43606 or := v.Args[0]
43607 if or.Op != OpAMD64ORQ {
43608 break
43609 }
43610 _ = or.Args[1]
43611 y := or.Args[0]
43612 s1 := or.Args[1]
43613 if s1.Op != OpAMD64SHLQconst {
43614 break
43615 }
43616 j1 := s1.AuxInt
43617 x1 := s1.Args[0]
43618 if x1.Op != OpAMD64MOVBloadidx1 {
43619 break
43620 }
43621 i1 := x1.AuxInt
43622 s := x1.Aux
43623 mem := x1.Args[2]
43624 p := x1.Args[0]
43625 idx := x1.Args[1]
43626 s0 := v.Args[1]
43627 if s0.Op != OpAMD64SHLQconst {
43628 break
43629 }
43630 j0 := s0.AuxInt
43631 x0 := s0.Args[0]
43632 if x0.Op != OpAMD64MOVBloadidx1 {
43633 break
43634 }
43635 i0 := x0.AuxInt
43636 if x0.Aux != s {
43637 break
43638 }
43639 _ = x0.Args[2]
43640 if idx != x0.Args[0] {
43641 break
43642 }
43643 if p != x0.Args[1] {
43644 break
43645 }
43646 if mem != x0.Args[2] {
43647 break
43648 }
43649 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
43650 break
43651 }
43652 b = mergePoint(b, x0, x1, y)
43653 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
43654 v.reset(OpCopy)
43655 v.AddArg(v0)
43656 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
43657 v1.AuxInt = j1
43658 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
43659 v2.AuxInt = 8
43660 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
43661 v3.AuxInt = i0
43662 v3.Aux = s
43663 v3.AddArg(p)
43664 v3.AddArg(idx)
43665 v3.AddArg(mem)
43666 v2.AddArg(v3)
43667 v1.AddArg(v2)
43668 v0.AddArg(v1)
43669 v0.AddArg(y)
43670 return true
43671 }
43672
43673
43674
43675 for {
43676 _ = v.Args[1]
43677 or := v.Args[0]
43678 if or.Op != OpAMD64ORQ {
43679 break
43680 }
43681 _ = or.Args[1]
43682 y := or.Args[0]
43683 s1 := or.Args[1]
43684 if s1.Op != OpAMD64SHLQconst {
43685 break
43686 }
43687 j1 := s1.AuxInt
43688 x1 := s1.Args[0]
43689 if x1.Op != OpAMD64MOVBloadidx1 {
43690 break
43691 }
43692 i1 := x1.AuxInt
43693 s := x1.Aux
43694 mem := x1.Args[2]
43695 idx := x1.Args[0]
43696 p := x1.Args[1]
43697 s0 := v.Args[1]
43698 if s0.Op != OpAMD64SHLQconst {
43699 break
43700 }
43701 j0 := s0.AuxInt
43702 x0 := s0.Args[0]
43703 if x0.Op != OpAMD64MOVBloadidx1 {
43704 break
43705 }
43706 i0 := x0.AuxInt
43707 if x0.Aux != s {
43708 break
43709 }
43710 _ = x0.Args[2]
43711 if idx != x0.Args[0] {
43712 break
43713 }
43714 if p != x0.Args[1] {
43715 break
43716 }
43717 if mem != x0.Args[2] {
43718 break
43719 }
43720 if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) {
43721 break
43722 }
43723 b = mergePoint(b, x0, x1, y)
43724 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
43725 v.reset(OpCopy)
43726 v.AddArg(v0)
43727 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
43728 v1.AuxInt = j1
43729 v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, typ.UInt16)
43730 v2.AuxInt = 8
43731 v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
43732 v3.AuxInt = i0
43733 v3.Aux = s
43734 v3.AddArg(p)
43735 v3.AddArg(idx)
43736 v3.AddArg(mem)
43737 v2.AddArg(v3)
43738 v1.AddArg(v2)
43739 v0.AddArg(v1)
43740 v0.AddArg(y)
43741 return true
43742 }
43743
43744
43745
43746 for {
43747 _ = v.Args[1]
43748 s0 := v.Args[0]
43749 if s0.Op != OpAMD64SHLQconst {
43750 break
43751 }
43752 j0 := s0.AuxInt
43753 r0 := s0.Args[0]
43754 if r0.Op != OpAMD64ROLWconst {
43755 break
43756 }
43757 if r0.AuxInt != 8 {
43758 break
43759 }
43760 x0 := r0.Args[0]
43761 if x0.Op != OpAMD64MOVWloadidx1 {
43762 break
43763 }
43764 i0 := x0.AuxInt
43765 s := x0.Aux
43766 mem := x0.Args[2]
43767 p := x0.Args[0]
43768 idx := x0.Args[1]
43769 or := v.Args[1]
43770 if or.Op != OpAMD64ORQ {
43771 break
43772 }
43773 y := or.Args[1]
43774 s1 := or.Args[0]
43775 if s1.Op != OpAMD64SHLQconst {
43776 break
43777 }
43778 j1 := s1.AuxInt
43779 r1 := s1.Args[0]
43780 if r1.Op != OpAMD64ROLWconst {
43781 break
43782 }
43783 if r1.AuxInt != 8 {
43784 break
43785 }
43786 x1 := r1.Args[0]
43787 if x1.Op != OpAMD64MOVWloadidx1 {
43788 break
43789 }
43790 i1 := x1.AuxInt
43791 if x1.Aux != s {
43792 break
43793 }
43794 _ = x1.Args[2]
43795 if p != x1.Args[0] {
43796 break
43797 }
43798 if idx != x1.Args[1] {
43799 break
43800 }
43801 if mem != x1.Args[2] {
43802 break
43803 }
43804 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
43805 break
43806 }
43807 b = mergePoint(b, x0, x1, y)
43808 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
43809 v.reset(OpCopy)
43810 v.AddArg(v0)
43811 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
43812 v1.AuxInt = j1
43813 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
43814 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
43815 v3.AuxInt = i0
43816 v3.Aux = s
43817 v3.AddArg(p)
43818 v3.AddArg(idx)
43819 v3.AddArg(mem)
43820 v2.AddArg(v3)
43821 v1.AddArg(v2)
43822 v0.AddArg(v1)
43823 v0.AddArg(y)
43824 return true
43825 }
43826 return false
43827 }
43828 func rewriteValueAMD64_OpAMD64ORQ_150(v *Value) bool {
43829 b := v.Block
43830 typ := &b.Func.Config.Types
43831
43832
43833
43834 for {
43835 _ = v.Args[1]
43836 s0 := v.Args[0]
43837 if s0.Op != OpAMD64SHLQconst {
43838 break
43839 }
43840 j0 := s0.AuxInt
43841 r0 := s0.Args[0]
43842 if r0.Op != OpAMD64ROLWconst {
43843 break
43844 }
43845 if r0.AuxInt != 8 {
43846 break
43847 }
43848 x0 := r0.Args[0]
43849 if x0.Op != OpAMD64MOVWloadidx1 {
43850 break
43851 }
43852 i0 := x0.AuxInt
43853 s := x0.Aux
43854 mem := x0.Args[2]
43855 idx := x0.Args[0]
43856 p := x0.Args[1]
43857 or := v.Args[1]
43858 if or.Op != OpAMD64ORQ {
43859 break
43860 }
43861 y := or.Args[1]
43862 s1 := or.Args[0]
43863 if s1.Op != OpAMD64SHLQconst {
43864 break
43865 }
43866 j1 := s1.AuxInt
43867 r1 := s1.Args[0]
43868 if r1.Op != OpAMD64ROLWconst {
43869 break
43870 }
43871 if r1.AuxInt != 8 {
43872 break
43873 }
43874 x1 := r1.Args[0]
43875 if x1.Op != OpAMD64MOVWloadidx1 {
43876 break
43877 }
43878 i1 := x1.AuxInt
43879 if x1.Aux != s {
43880 break
43881 }
43882 _ = x1.Args[2]
43883 if p != x1.Args[0] {
43884 break
43885 }
43886 if idx != x1.Args[1] {
43887 break
43888 }
43889 if mem != x1.Args[2] {
43890 break
43891 }
43892 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
43893 break
43894 }
43895 b = mergePoint(b, x0, x1, y)
43896 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
43897 v.reset(OpCopy)
43898 v.AddArg(v0)
43899 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
43900 v1.AuxInt = j1
43901 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
43902 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
43903 v3.AuxInt = i0
43904 v3.Aux = s
43905 v3.AddArg(p)
43906 v3.AddArg(idx)
43907 v3.AddArg(mem)
43908 v2.AddArg(v3)
43909 v1.AddArg(v2)
43910 v0.AddArg(v1)
43911 v0.AddArg(y)
43912 return true
43913 }
43914
43915
43916
43917 for {
43918 _ = v.Args[1]
43919 s0 := v.Args[0]
43920 if s0.Op != OpAMD64SHLQconst {
43921 break
43922 }
43923 j0 := s0.AuxInt
43924 r0 := s0.Args[0]
43925 if r0.Op != OpAMD64ROLWconst {
43926 break
43927 }
43928 if r0.AuxInt != 8 {
43929 break
43930 }
43931 x0 := r0.Args[0]
43932 if x0.Op != OpAMD64MOVWloadidx1 {
43933 break
43934 }
43935 i0 := x0.AuxInt
43936 s := x0.Aux
43937 mem := x0.Args[2]
43938 p := x0.Args[0]
43939 idx := x0.Args[1]
43940 or := v.Args[1]
43941 if or.Op != OpAMD64ORQ {
43942 break
43943 }
43944 y := or.Args[1]
43945 s1 := or.Args[0]
43946 if s1.Op != OpAMD64SHLQconst {
43947 break
43948 }
43949 j1 := s1.AuxInt
43950 r1 := s1.Args[0]
43951 if r1.Op != OpAMD64ROLWconst {
43952 break
43953 }
43954 if r1.AuxInt != 8 {
43955 break
43956 }
43957 x1 := r1.Args[0]
43958 if x1.Op != OpAMD64MOVWloadidx1 {
43959 break
43960 }
43961 i1 := x1.AuxInt
43962 if x1.Aux != s {
43963 break
43964 }
43965 _ = x1.Args[2]
43966 if idx != x1.Args[0] {
43967 break
43968 }
43969 if p != x1.Args[1] {
43970 break
43971 }
43972 if mem != x1.Args[2] {
43973 break
43974 }
43975 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
43976 break
43977 }
43978 b = mergePoint(b, x0, x1, y)
43979 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
43980 v.reset(OpCopy)
43981 v.AddArg(v0)
43982 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
43983 v1.AuxInt = j1
43984 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
43985 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
43986 v3.AuxInt = i0
43987 v3.Aux = s
43988 v3.AddArg(p)
43989 v3.AddArg(idx)
43990 v3.AddArg(mem)
43991 v2.AddArg(v3)
43992 v1.AddArg(v2)
43993 v0.AddArg(v1)
43994 v0.AddArg(y)
43995 return true
43996 }
43997
43998
43999
44000 for {
44001 _ = v.Args[1]
44002 s0 := v.Args[0]
44003 if s0.Op != OpAMD64SHLQconst {
44004 break
44005 }
44006 j0 := s0.AuxInt
44007 r0 := s0.Args[0]
44008 if r0.Op != OpAMD64ROLWconst {
44009 break
44010 }
44011 if r0.AuxInt != 8 {
44012 break
44013 }
44014 x0 := r0.Args[0]
44015 if x0.Op != OpAMD64MOVWloadidx1 {
44016 break
44017 }
44018 i0 := x0.AuxInt
44019 s := x0.Aux
44020 mem := x0.Args[2]
44021 idx := x0.Args[0]
44022 p := x0.Args[1]
44023 or := v.Args[1]
44024 if or.Op != OpAMD64ORQ {
44025 break
44026 }
44027 y := or.Args[1]
44028 s1 := or.Args[0]
44029 if s1.Op != OpAMD64SHLQconst {
44030 break
44031 }
44032 j1 := s1.AuxInt
44033 r1 := s1.Args[0]
44034 if r1.Op != OpAMD64ROLWconst {
44035 break
44036 }
44037 if r1.AuxInt != 8 {
44038 break
44039 }
44040 x1 := r1.Args[0]
44041 if x1.Op != OpAMD64MOVWloadidx1 {
44042 break
44043 }
44044 i1 := x1.AuxInt
44045 if x1.Aux != s {
44046 break
44047 }
44048 _ = x1.Args[2]
44049 if idx != x1.Args[0] {
44050 break
44051 }
44052 if p != x1.Args[1] {
44053 break
44054 }
44055 if mem != x1.Args[2] {
44056 break
44057 }
44058 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
44059 break
44060 }
44061 b = mergePoint(b, x0, x1, y)
44062 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
44063 v.reset(OpCopy)
44064 v.AddArg(v0)
44065 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
44066 v1.AuxInt = j1
44067 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
44068 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
44069 v3.AuxInt = i0
44070 v3.Aux = s
44071 v3.AddArg(p)
44072 v3.AddArg(idx)
44073 v3.AddArg(mem)
44074 v2.AddArg(v3)
44075 v1.AddArg(v2)
44076 v0.AddArg(v1)
44077 v0.AddArg(y)
44078 return true
44079 }
44080
44081
44082
44083 for {
44084 _ = v.Args[1]
44085 s0 := v.Args[0]
44086 if s0.Op != OpAMD64SHLQconst {
44087 break
44088 }
44089 j0 := s0.AuxInt
44090 r0 := s0.Args[0]
44091 if r0.Op != OpAMD64ROLWconst {
44092 break
44093 }
44094 if r0.AuxInt != 8 {
44095 break
44096 }
44097 x0 := r0.Args[0]
44098 if x0.Op != OpAMD64MOVWloadidx1 {
44099 break
44100 }
44101 i0 := x0.AuxInt
44102 s := x0.Aux
44103 mem := x0.Args[2]
44104 p := x0.Args[0]
44105 idx := x0.Args[1]
44106 or := v.Args[1]
44107 if or.Op != OpAMD64ORQ {
44108 break
44109 }
44110 _ = or.Args[1]
44111 y := or.Args[0]
44112 s1 := or.Args[1]
44113 if s1.Op != OpAMD64SHLQconst {
44114 break
44115 }
44116 j1 := s1.AuxInt
44117 r1 := s1.Args[0]
44118 if r1.Op != OpAMD64ROLWconst {
44119 break
44120 }
44121 if r1.AuxInt != 8 {
44122 break
44123 }
44124 x1 := r1.Args[0]
44125 if x1.Op != OpAMD64MOVWloadidx1 {
44126 break
44127 }
44128 i1 := x1.AuxInt
44129 if x1.Aux != s {
44130 break
44131 }
44132 _ = x1.Args[2]
44133 if p != x1.Args[0] {
44134 break
44135 }
44136 if idx != x1.Args[1] {
44137 break
44138 }
44139 if mem != x1.Args[2] {
44140 break
44141 }
44142 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
44143 break
44144 }
44145 b = mergePoint(b, x0, x1, y)
44146 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
44147 v.reset(OpCopy)
44148 v.AddArg(v0)
44149 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
44150 v1.AuxInt = j1
44151 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
44152 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
44153 v3.AuxInt = i0
44154 v3.Aux = s
44155 v3.AddArg(p)
44156 v3.AddArg(idx)
44157 v3.AddArg(mem)
44158 v2.AddArg(v3)
44159 v1.AddArg(v2)
44160 v0.AddArg(v1)
44161 v0.AddArg(y)
44162 return true
44163 }
44164
44165
44166
44167 for {
44168 _ = v.Args[1]
44169 s0 := v.Args[0]
44170 if s0.Op != OpAMD64SHLQconst {
44171 break
44172 }
44173 j0 := s0.AuxInt
44174 r0 := s0.Args[0]
44175 if r0.Op != OpAMD64ROLWconst {
44176 break
44177 }
44178 if r0.AuxInt != 8 {
44179 break
44180 }
44181 x0 := r0.Args[0]
44182 if x0.Op != OpAMD64MOVWloadidx1 {
44183 break
44184 }
44185 i0 := x0.AuxInt
44186 s := x0.Aux
44187 mem := x0.Args[2]
44188 idx := x0.Args[0]
44189 p := x0.Args[1]
44190 or := v.Args[1]
44191 if or.Op != OpAMD64ORQ {
44192 break
44193 }
44194 _ = or.Args[1]
44195 y := or.Args[0]
44196 s1 := or.Args[1]
44197 if s1.Op != OpAMD64SHLQconst {
44198 break
44199 }
44200 j1 := s1.AuxInt
44201 r1 := s1.Args[0]
44202 if r1.Op != OpAMD64ROLWconst {
44203 break
44204 }
44205 if r1.AuxInt != 8 {
44206 break
44207 }
44208 x1 := r1.Args[0]
44209 if x1.Op != OpAMD64MOVWloadidx1 {
44210 break
44211 }
44212 i1 := x1.AuxInt
44213 if x1.Aux != s {
44214 break
44215 }
44216 _ = x1.Args[2]
44217 if p != x1.Args[0] {
44218 break
44219 }
44220 if idx != x1.Args[1] {
44221 break
44222 }
44223 if mem != x1.Args[2] {
44224 break
44225 }
44226 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
44227 break
44228 }
44229 b = mergePoint(b, x0, x1, y)
44230 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
44231 v.reset(OpCopy)
44232 v.AddArg(v0)
44233 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
44234 v1.AuxInt = j1
44235 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
44236 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
44237 v3.AuxInt = i0
44238 v3.Aux = s
44239 v3.AddArg(p)
44240 v3.AddArg(idx)
44241 v3.AddArg(mem)
44242 v2.AddArg(v3)
44243 v1.AddArg(v2)
44244 v0.AddArg(v1)
44245 v0.AddArg(y)
44246 return true
44247 }
44248
44249
44250
44251 for {
44252 _ = v.Args[1]
44253 s0 := v.Args[0]
44254 if s0.Op != OpAMD64SHLQconst {
44255 break
44256 }
44257 j0 := s0.AuxInt
44258 r0 := s0.Args[0]
44259 if r0.Op != OpAMD64ROLWconst {
44260 break
44261 }
44262 if r0.AuxInt != 8 {
44263 break
44264 }
44265 x0 := r0.Args[0]
44266 if x0.Op != OpAMD64MOVWloadidx1 {
44267 break
44268 }
44269 i0 := x0.AuxInt
44270 s := x0.Aux
44271 mem := x0.Args[2]
44272 p := x0.Args[0]
44273 idx := x0.Args[1]
44274 or := v.Args[1]
44275 if or.Op != OpAMD64ORQ {
44276 break
44277 }
44278 _ = or.Args[1]
44279 y := or.Args[0]
44280 s1 := or.Args[1]
44281 if s1.Op != OpAMD64SHLQconst {
44282 break
44283 }
44284 j1 := s1.AuxInt
44285 r1 := s1.Args[0]
44286 if r1.Op != OpAMD64ROLWconst {
44287 break
44288 }
44289 if r1.AuxInt != 8 {
44290 break
44291 }
44292 x1 := r1.Args[0]
44293 if x1.Op != OpAMD64MOVWloadidx1 {
44294 break
44295 }
44296 i1 := x1.AuxInt
44297 if x1.Aux != s {
44298 break
44299 }
44300 _ = x1.Args[2]
44301 if idx != x1.Args[0] {
44302 break
44303 }
44304 if p != x1.Args[1] {
44305 break
44306 }
44307 if mem != x1.Args[2] {
44308 break
44309 }
44310 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
44311 break
44312 }
44313 b = mergePoint(b, x0, x1, y)
44314 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
44315 v.reset(OpCopy)
44316 v.AddArg(v0)
44317 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
44318 v1.AuxInt = j1
44319 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
44320 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
44321 v3.AuxInt = i0
44322 v3.Aux = s
44323 v3.AddArg(p)
44324 v3.AddArg(idx)
44325 v3.AddArg(mem)
44326 v2.AddArg(v3)
44327 v1.AddArg(v2)
44328 v0.AddArg(v1)
44329 v0.AddArg(y)
44330 return true
44331 }
44332
44333
44334
44335 for {
44336 _ = v.Args[1]
44337 s0 := v.Args[0]
44338 if s0.Op != OpAMD64SHLQconst {
44339 break
44340 }
44341 j0 := s0.AuxInt
44342 r0 := s0.Args[0]
44343 if r0.Op != OpAMD64ROLWconst {
44344 break
44345 }
44346 if r0.AuxInt != 8 {
44347 break
44348 }
44349 x0 := r0.Args[0]
44350 if x0.Op != OpAMD64MOVWloadidx1 {
44351 break
44352 }
44353 i0 := x0.AuxInt
44354 s := x0.Aux
44355 mem := x0.Args[2]
44356 idx := x0.Args[0]
44357 p := x0.Args[1]
44358 or := v.Args[1]
44359 if or.Op != OpAMD64ORQ {
44360 break
44361 }
44362 _ = or.Args[1]
44363 y := or.Args[0]
44364 s1 := or.Args[1]
44365 if s1.Op != OpAMD64SHLQconst {
44366 break
44367 }
44368 j1 := s1.AuxInt
44369 r1 := s1.Args[0]
44370 if r1.Op != OpAMD64ROLWconst {
44371 break
44372 }
44373 if r1.AuxInt != 8 {
44374 break
44375 }
44376 x1 := r1.Args[0]
44377 if x1.Op != OpAMD64MOVWloadidx1 {
44378 break
44379 }
44380 i1 := x1.AuxInt
44381 if x1.Aux != s {
44382 break
44383 }
44384 _ = x1.Args[2]
44385 if idx != x1.Args[0] {
44386 break
44387 }
44388 if p != x1.Args[1] {
44389 break
44390 }
44391 if mem != x1.Args[2] {
44392 break
44393 }
44394 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
44395 break
44396 }
44397 b = mergePoint(b, x0, x1, y)
44398 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
44399 v.reset(OpCopy)
44400 v.AddArg(v0)
44401 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
44402 v1.AuxInt = j1
44403 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
44404 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
44405 v3.AuxInt = i0
44406 v3.Aux = s
44407 v3.AddArg(p)
44408 v3.AddArg(idx)
44409 v3.AddArg(mem)
44410 v2.AddArg(v3)
44411 v1.AddArg(v2)
44412 v0.AddArg(v1)
44413 v0.AddArg(y)
44414 return true
44415 }
44416
44417
44418
44419 for {
44420 _ = v.Args[1]
44421 or := v.Args[0]
44422 if or.Op != OpAMD64ORQ {
44423 break
44424 }
44425 y := or.Args[1]
44426 s1 := or.Args[0]
44427 if s1.Op != OpAMD64SHLQconst {
44428 break
44429 }
44430 j1 := s1.AuxInt
44431 r1 := s1.Args[0]
44432 if r1.Op != OpAMD64ROLWconst {
44433 break
44434 }
44435 if r1.AuxInt != 8 {
44436 break
44437 }
44438 x1 := r1.Args[0]
44439 if x1.Op != OpAMD64MOVWloadidx1 {
44440 break
44441 }
44442 i1 := x1.AuxInt
44443 s := x1.Aux
44444 mem := x1.Args[2]
44445 p := x1.Args[0]
44446 idx := x1.Args[1]
44447 s0 := v.Args[1]
44448 if s0.Op != OpAMD64SHLQconst {
44449 break
44450 }
44451 j0 := s0.AuxInt
44452 r0 := s0.Args[0]
44453 if r0.Op != OpAMD64ROLWconst {
44454 break
44455 }
44456 if r0.AuxInt != 8 {
44457 break
44458 }
44459 x0 := r0.Args[0]
44460 if x0.Op != OpAMD64MOVWloadidx1 {
44461 break
44462 }
44463 i0 := x0.AuxInt
44464 if x0.Aux != s {
44465 break
44466 }
44467 _ = x0.Args[2]
44468 if p != x0.Args[0] {
44469 break
44470 }
44471 if idx != x0.Args[1] {
44472 break
44473 }
44474 if mem != x0.Args[2] {
44475 break
44476 }
44477 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
44478 break
44479 }
44480 b = mergePoint(b, x0, x1, y)
44481 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
44482 v.reset(OpCopy)
44483 v.AddArg(v0)
44484 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
44485 v1.AuxInt = j1
44486 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
44487 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
44488 v3.AuxInt = i0
44489 v3.Aux = s
44490 v3.AddArg(p)
44491 v3.AddArg(idx)
44492 v3.AddArg(mem)
44493 v2.AddArg(v3)
44494 v1.AddArg(v2)
44495 v0.AddArg(v1)
44496 v0.AddArg(y)
44497 return true
44498 }
44499
44500
44501
44502 for {
44503 _ = v.Args[1]
44504 or := v.Args[0]
44505 if or.Op != OpAMD64ORQ {
44506 break
44507 }
44508 y := or.Args[1]
44509 s1 := or.Args[0]
44510 if s1.Op != OpAMD64SHLQconst {
44511 break
44512 }
44513 j1 := s1.AuxInt
44514 r1 := s1.Args[0]
44515 if r1.Op != OpAMD64ROLWconst {
44516 break
44517 }
44518 if r1.AuxInt != 8 {
44519 break
44520 }
44521 x1 := r1.Args[0]
44522 if x1.Op != OpAMD64MOVWloadidx1 {
44523 break
44524 }
44525 i1 := x1.AuxInt
44526 s := x1.Aux
44527 mem := x1.Args[2]
44528 idx := x1.Args[0]
44529 p := x1.Args[1]
44530 s0 := v.Args[1]
44531 if s0.Op != OpAMD64SHLQconst {
44532 break
44533 }
44534 j0 := s0.AuxInt
44535 r0 := s0.Args[0]
44536 if r0.Op != OpAMD64ROLWconst {
44537 break
44538 }
44539 if r0.AuxInt != 8 {
44540 break
44541 }
44542 x0 := r0.Args[0]
44543 if x0.Op != OpAMD64MOVWloadidx1 {
44544 break
44545 }
44546 i0 := x0.AuxInt
44547 if x0.Aux != s {
44548 break
44549 }
44550 _ = x0.Args[2]
44551 if p != x0.Args[0] {
44552 break
44553 }
44554 if idx != x0.Args[1] {
44555 break
44556 }
44557 if mem != x0.Args[2] {
44558 break
44559 }
44560 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
44561 break
44562 }
44563 b = mergePoint(b, x0, x1, y)
44564 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
44565 v.reset(OpCopy)
44566 v.AddArg(v0)
44567 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
44568 v1.AuxInt = j1
44569 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
44570 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
44571 v3.AuxInt = i0
44572 v3.Aux = s
44573 v3.AddArg(p)
44574 v3.AddArg(idx)
44575 v3.AddArg(mem)
44576 v2.AddArg(v3)
44577 v1.AddArg(v2)
44578 v0.AddArg(v1)
44579 v0.AddArg(y)
44580 return true
44581 }
44582
44583
44584
44585 for {
44586 _ = v.Args[1]
44587 or := v.Args[0]
44588 if or.Op != OpAMD64ORQ {
44589 break
44590 }
44591 _ = or.Args[1]
44592 y := or.Args[0]
44593 s1 := or.Args[1]
44594 if s1.Op != OpAMD64SHLQconst {
44595 break
44596 }
44597 j1 := s1.AuxInt
44598 r1 := s1.Args[0]
44599 if r1.Op != OpAMD64ROLWconst {
44600 break
44601 }
44602 if r1.AuxInt != 8 {
44603 break
44604 }
44605 x1 := r1.Args[0]
44606 if x1.Op != OpAMD64MOVWloadidx1 {
44607 break
44608 }
44609 i1 := x1.AuxInt
44610 s := x1.Aux
44611 mem := x1.Args[2]
44612 p := x1.Args[0]
44613 idx := x1.Args[1]
44614 s0 := v.Args[1]
44615 if s0.Op != OpAMD64SHLQconst {
44616 break
44617 }
44618 j0 := s0.AuxInt
44619 r0 := s0.Args[0]
44620 if r0.Op != OpAMD64ROLWconst {
44621 break
44622 }
44623 if r0.AuxInt != 8 {
44624 break
44625 }
44626 x0 := r0.Args[0]
44627 if x0.Op != OpAMD64MOVWloadidx1 {
44628 break
44629 }
44630 i0 := x0.AuxInt
44631 if x0.Aux != s {
44632 break
44633 }
44634 _ = x0.Args[2]
44635 if p != x0.Args[0] {
44636 break
44637 }
44638 if idx != x0.Args[1] {
44639 break
44640 }
44641 if mem != x0.Args[2] {
44642 break
44643 }
44644 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
44645 break
44646 }
44647 b = mergePoint(b, x0, x1, y)
44648 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
44649 v.reset(OpCopy)
44650 v.AddArg(v0)
44651 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
44652 v1.AuxInt = j1
44653 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
44654 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
44655 v3.AuxInt = i0
44656 v3.Aux = s
44657 v3.AddArg(p)
44658 v3.AddArg(idx)
44659 v3.AddArg(mem)
44660 v2.AddArg(v3)
44661 v1.AddArg(v2)
44662 v0.AddArg(v1)
44663 v0.AddArg(y)
44664 return true
44665 }
44666 return false
44667 }
44668 func rewriteValueAMD64_OpAMD64ORQ_160(v *Value) bool {
44669 b := v.Block
44670 typ := &b.Func.Config.Types
44671
44672
44673
44674 for {
44675 _ = v.Args[1]
44676 or := v.Args[0]
44677 if or.Op != OpAMD64ORQ {
44678 break
44679 }
44680 _ = or.Args[1]
44681 y := or.Args[0]
44682 s1 := or.Args[1]
44683 if s1.Op != OpAMD64SHLQconst {
44684 break
44685 }
44686 j1 := s1.AuxInt
44687 r1 := s1.Args[0]
44688 if r1.Op != OpAMD64ROLWconst {
44689 break
44690 }
44691 if r1.AuxInt != 8 {
44692 break
44693 }
44694 x1 := r1.Args[0]
44695 if x1.Op != OpAMD64MOVWloadidx1 {
44696 break
44697 }
44698 i1 := x1.AuxInt
44699 s := x1.Aux
44700 mem := x1.Args[2]
44701 idx := x1.Args[0]
44702 p := x1.Args[1]
44703 s0 := v.Args[1]
44704 if s0.Op != OpAMD64SHLQconst {
44705 break
44706 }
44707 j0 := s0.AuxInt
44708 r0 := s0.Args[0]
44709 if r0.Op != OpAMD64ROLWconst {
44710 break
44711 }
44712 if r0.AuxInt != 8 {
44713 break
44714 }
44715 x0 := r0.Args[0]
44716 if x0.Op != OpAMD64MOVWloadidx1 {
44717 break
44718 }
44719 i0 := x0.AuxInt
44720 if x0.Aux != s {
44721 break
44722 }
44723 _ = x0.Args[2]
44724 if p != x0.Args[0] {
44725 break
44726 }
44727 if idx != x0.Args[1] {
44728 break
44729 }
44730 if mem != x0.Args[2] {
44731 break
44732 }
44733 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
44734 break
44735 }
44736 b = mergePoint(b, x0, x1, y)
44737 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
44738 v.reset(OpCopy)
44739 v.AddArg(v0)
44740 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
44741 v1.AuxInt = j1
44742 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
44743 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
44744 v3.AuxInt = i0
44745 v3.Aux = s
44746 v3.AddArg(p)
44747 v3.AddArg(idx)
44748 v3.AddArg(mem)
44749 v2.AddArg(v3)
44750 v1.AddArg(v2)
44751 v0.AddArg(v1)
44752 v0.AddArg(y)
44753 return true
44754 }
44755
44756
44757
44758 for {
44759 _ = v.Args[1]
44760 or := v.Args[0]
44761 if or.Op != OpAMD64ORQ {
44762 break
44763 }
44764 y := or.Args[1]
44765 s1 := or.Args[0]
44766 if s1.Op != OpAMD64SHLQconst {
44767 break
44768 }
44769 j1 := s1.AuxInt
44770 r1 := s1.Args[0]
44771 if r1.Op != OpAMD64ROLWconst {
44772 break
44773 }
44774 if r1.AuxInt != 8 {
44775 break
44776 }
44777 x1 := r1.Args[0]
44778 if x1.Op != OpAMD64MOVWloadidx1 {
44779 break
44780 }
44781 i1 := x1.AuxInt
44782 s := x1.Aux
44783 mem := x1.Args[2]
44784 p := x1.Args[0]
44785 idx := x1.Args[1]
44786 s0 := v.Args[1]
44787 if s0.Op != OpAMD64SHLQconst {
44788 break
44789 }
44790 j0 := s0.AuxInt
44791 r0 := s0.Args[0]
44792 if r0.Op != OpAMD64ROLWconst {
44793 break
44794 }
44795 if r0.AuxInt != 8 {
44796 break
44797 }
44798 x0 := r0.Args[0]
44799 if x0.Op != OpAMD64MOVWloadidx1 {
44800 break
44801 }
44802 i0 := x0.AuxInt
44803 if x0.Aux != s {
44804 break
44805 }
44806 _ = x0.Args[2]
44807 if idx != x0.Args[0] {
44808 break
44809 }
44810 if p != x0.Args[1] {
44811 break
44812 }
44813 if mem != x0.Args[2] {
44814 break
44815 }
44816 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
44817 break
44818 }
44819 b = mergePoint(b, x0, x1, y)
44820 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
44821 v.reset(OpCopy)
44822 v.AddArg(v0)
44823 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
44824 v1.AuxInt = j1
44825 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
44826 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
44827 v3.AuxInt = i0
44828 v3.Aux = s
44829 v3.AddArg(p)
44830 v3.AddArg(idx)
44831 v3.AddArg(mem)
44832 v2.AddArg(v3)
44833 v1.AddArg(v2)
44834 v0.AddArg(v1)
44835 v0.AddArg(y)
44836 return true
44837 }
44838
44839
44840
44841 for {
44842 _ = v.Args[1]
44843 or := v.Args[0]
44844 if or.Op != OpAMD64ORQ {
44845 break
44846 }
44847 y := or.Args[1]
44848 s1 := or.Args[0]
44849 if s1.Op != OpAMD64SHLQconst {
44850 break
44851 }
44852 j1 := s1.AuxInt
44853 r1 := s1.Args[0]
44854 if r1.Op != OpAMD64ROLWconst {
44855 break
44856 }
44857 if r1.AuxInt != 8 {
44858 break
44859 }
44860 x1 := r1.Args[0]
44861 if x1.Op != OpAMD64MOVWloadidx1 {
44862 break
44863 }
44864 i1 := x1.AuxInt
44865 s := x1.Aux
44866 mem := x1.Args[2]
44867 idx := x1.Args[0]
44868 p := x1.Args[1]
44869 s0 := v.Args[1]
44870 if s0.Op != OpAMD64SHLQconst {
44871 break
44872 }
44873 j0 := s0.AuxInt
44874 r0 := s0.Args[0]
44875 if r0.Op != OpAMD64ROLWconst {
44876 break
44877 }
44878 if r0.AuxInt != 8 {
44879 break
44880 }
44881 x0 := r0.Args[0]
44882 if x0.Op != OpAMD64MOVWloadidx1 {
44883 break
44884 }
44885 i0 := x0.AuxInt
44886 if x0.Aux != s {
44887 break
44888 }
44889 _ = x0.Args[2]
44890 if idx != x0.Args[0] {
44891 break
44892 }
44893 if p != x0.Args[1] {
44894 break
44895 }
44896 if mem != x0.Args[2] {
44897 break
44898 }
44899 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
44900 break
44901 }
44902 b = mergePoint(b, x0, x1, y)
44903 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
44904 v.reset(OpCopy)
44905 v.AddArg(v0)
44906 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
44907 v1.AuxInt = j1
44908 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
44909 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
44910 v3.AuxInt = i0
44911 v3.Aux = s
44912 v3.AddArg(p)
44913 v3.AddArg(idx)
44914 v3.AddArg(mem)
44915 v2.AddArg(v3)
44916 v1.AddArg(v2)
44917 v0.AddArg(v1)
44918 v0.AddArg(y)
44919 return true
44920 }
44921
44922
44923
44924 for {
44925 _ = v.Args[1]
44926 or := v.Args[0]
44927 if or.Op != OpAMD64ORQ {
44928 break
44929 }
44930 _ = or.Args[1]
44931 y := or.Args[0]
44932 s1 := or.Args[1]
44933 if s1.Op != OpAMD64SHLQconst {
44934 break
44935 }
44936 j1 := s1.AuxInt
44937 r1 := s1.Args[0]
44938 if r1.Op != OpAMD64ROLWconst {
44939 break
44940 }
44941 if r1.AuxInt != 8 {
44942 break
44943 }
44944 x1 := r1.Args[0]
44945 if x1.Op != OpAMD64MOVWloadidx1 {
44946 break
44947 }
44948 i1 := x1.AuxInt
44949 s := x1.Aux
44950 mem := x1.Args[2]
44951 p := x1.Args[0]
44952 idx := x1.Args[1]
44953 s0 := v.Args[1]
44954 if s0.Op != OpAMD64SHLQconst {
44955 break
44956 }
44957 j0 := s0.AuxInt
44958 r0 := s0.Args[0]
44959 if r0.Op != OpAMD64ROLWconst {
44960 break
44961 }
44962 if r0.AuxInt != 8 {
44963 break
44964 }
44965 x0 := r0.Args[0]
44966 if x0.Op != OpAMD64MOVWloadidx1 {
44967 break
44968 }
44969 i0 := x0.AuxInt
44970 if x0.Aux != s {
44971 break
44972 }
44973 _ = x0.Args[2]
44974 if idx != x0.Args[0] {
44975 break
44976 }
44977 if p != x0.Args[1] {
44978 break
44979 }
44980 if mem != x0.Args[2] {
44981 break
44982 }
44983 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
44984 break
44985 }
44986 b = mergePoint(b, x0, x1, y)
44987 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
44988 v.reset(OpCopy)
44989 v.AddArg(v0)
44990 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
44991 v1.AuxInt = j1
44992 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
44993 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
44994 v3.AuxInt = i0
44995 v3.Aux = s
44996 v3.AddArg(p)
44997 v3.AddArg(idx)
44998 v3.AddArg(mem)
44999 v2.AddArg(v3)
45000 v1.AddArg(v2)
45001 v0.AddArg(v1)
45002 v0.AddArg(y)
45003 return true
45004 }
45005
45006
45007
45008 for {
45009 _ = v.Args[1]
45010 or := v.Args[0]
45011 if or.Op != OpAMD64ORQ {
45012 break
45013 }
45014 _ = or.Args[1]
45015 y := or.Args[0]
45016 s1 := or.Args[1]
45017 if s1.Op != OpAMD64SHLQconst {
45018 break
45019 }
45020 j1 := s1.AuxInt
45021 r1 := s1.Args[0]
45022 if r1.Op != OpAMD64ROLWconst {
45023 break
45024 }
45025 if r1.AuxInt != 8 {
45026 break
45027 }
45028 x1 := r1.Args[0]
45029 if x1.Op != OpAMD64MOVWloadidx1 {
45030 break
45031 }
45032 i1 := x1.AuxInt
45033 s := x1.Aux
45034 mem := x1.Args[2]
45035 idx := x1.Args[0]
45036 p := x1.Args[1]
45037 s0 := v.Args[1]
45038 if s0.Op != OpAMD64SHLQconst {
45039 break
45040 }
45041 j0 := s0.AuxInt
45042 r0 := s0.Args[0]
45043 if r0.Op != OpAMD64ROLWconst {
45044 break
45045 }
45046 if r0.AuxInt != 8 {
45047 break
45048 }
45049 x0 := r0.Args[0]
45050 if x0.Op != OpAMD64MOVWloadidx1 {
45051 break
45052 }
45053 i0 := x0.AuxInt
45054 if x0.Aux != s {
45055 break
45056 }
45057 _ = x0.Args[2]
45058 if idx != x0.Args[0] {
45059 break
45060 }
45061 if p != x0.Args[1] {
45062 break
45063 }
45064 if mem != x0.Args[2] {
45065 break
45066 }
45067 if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) {
45068 break
45069 }
45070 b = mergePoint(b, x0, x1, y)
45071 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type)
45072 v.reset(OpCopy)
45073 v.AddArg(v0)
45074 v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
45075 v1.AuxInt = j1
45076 v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, typ.UInt32)
45077 v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
45078 v3.AuxInt = i0
45079 v3.Aux = s
45080 v3.AddArg(p)
45081 v3.AddArg(idx)
45082 v3.AddArg(mem)
45083 v2.AddArg(v3)
45084 v1.AddArg(v2)
45085 v0.AddArg(v1)
45086 v0.AddArg(y)
45087 return true
45088 }
45089
45090
45091
45092 for {
45093 _ = v.Args[1]
45094 x := v.Args[0]
45095 l := v.Args[1]
45096 if l.Op != OpAMD64MOVQload {
45097 break
45098 }
45099 off := l.AuxInt
45100 sym := l.Aux
45101 mem := l.Args[1]
45102 ptr := l.Args[0]
45103 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
45104 break
45105 }
45106 v.reset(OpAMD64ORQload)
45107 v.AuxInt = off
45108 v.Aux = sym
45109 v.AddArg(x)
45110 v.AddArg(ptr)
45111 v.AddArg(mem)
45112 return true
45113 }
45114
45115
45116
45117 for {
45118 x := v.Args[1]
45119 l := v.Args[0]
45120 if l.Op != OpAMD64MOVQload {
45121 break
45122 }
45123 off := l.AuxInt
45124 sym := l.Aux
45125 mem := l.Args[1]
45126 ptr := l.Args[0]
45127 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
45128 break
45129 }
45130 v.reset(OpAMD64ORQload)
45131 v.AuxInt = off
45132 v.Aux = sym
45133 v.AddArg(x)
45134 v.AddArg(ptr)
45135 v.AddArg(mem)
45136 return true
45137 }
45138 return false
45139 }
45140 func rewriteValueAMD64_OpAMD64ORQconst_0(v *Value) bool {
45141 b := v.Block
45142 config := b.Func.Config
45143
45144
45145
45146 for {
45147 c := v.AuxInt
45148 x := v.Args[0]
45149 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) {
45150 break
45151 }
45152 v.reset(OpAMD64BTSQconst)
45153 v.AuxInt = log2(c)
45154 v.AddArg(x)
45155 return true
45156 }
45157
45158
45159
45160 for {
45161 c := v.AuxInt
45162 v_0 := v.Args[0]
45163 if v_0.Op != OpAMD64ORQconst {
45164 break
45165 }
45166 d := v_0.AuxInt
45167 x := v_0.Args[0]
45168 v.reset(OpAMD64ORQconst)
45169 v.AuxInt = c | d
45170 v.AddArg(x)
45171 return true
45172 }
45173
45174
45175
45176 for {
45177 c := v.AuxInt
45178 v_0 := v.Args[0]
45179 if v_0.Op != OpAMD64BTSQconst {
45180 break
45181 }
45182 d := v_0.AuxInt
45183 x := v_0.Args[0]
45184 v.reset(OpAMD64ORQconst)
45185 v.AuxInt = c | 1<<uint32(d)
45186 v.AddArg(x)
45187 return true
45188 }
45189
45190
45191
45192 for {
45193 if v.AuxInt != 0 {
45194 break
45195 }
45196 x := v.Args[0]
45197 v.reset(OpCopy)
45198 v.Type = x.Type
45199 v.AddArg(x)
45200 return true
45201 }
45202
45203
45204
45205 for {
45206 if v.AuxInt != -1 {
45207 break
45208 }
45209 v.reset(OpAMD64MOVQconst)
45210 v.AuxInt = -1
45211 return true
45212 }
45213
45214
45215
45216 for {
45217 c := v.AuxInt
45218 v_0 := v.Args[0]
45219 if v_0.Op != OpAMD64MOVQconst {
45220 break
45221 }
45222 d := v_0.AuxInt
45223 v.reset(OpAMD64MOVQconst)
45224 v.AuxInt = c | d
45225 return true
45226 }
45227 return false
45228 }
45229 func rewriteValueAMD64_OpAMD64ORQconstmodify_0(v *Value) bool {
45230
45231
45232
45233 for {
45234 valoff1 := v.AuxInt
45235 sym := v.Aux
45236 mem := v.Args[1]
45237 v_0 := v.Args[0]
45238 if v_0.Op != OpAMD64ADDQconst {
45239 break
45240 }
45241 off2 := v_0.AuxInt
45242 base := v_0.Args[0]
45243 if !(ValAndOff(valoff1).canAdd(off2)) {
45244 break
45245 }
45246 v.reset(OpAMD64ORQconstmodify)
45247 v.AuxInt = ValAndOff(valoff1).add(off2)
45248 v.Aux = sym
45249 v.AddArg(base)
45250 v.AddArg(mem)
45251 return true
45252 }
45253
45254
45255
45256 for {
45257 valoff1 := v.AuxInt
45258 sym1 := v.Aux
45259 mem := v.Args[1]
45260 v_0 := v.Args[0]
45261 if v_0.Op != OpAMD64LEAQ {
45262 break
45263 }
45264 off2 := v_0.AuxInt
45265 sym2 := v_0.Aux
45266 base := v_0.Args[0]
45267 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
45268 break
45269 }
45270 v.reset(OpAMD64ORQconstmodify)
45271 v.AuxInt = ValAndOff(valoff1).add(off2)
45272 v.Aux = mergeSym(sym1, sym2)
45273 v.AddArg(base)
45274 v.AddArg(mem)
45275 return true
45276 }
45277 return false
45278 }
45279 func rewriteValueAMD64_OpAMD64ORQload_0(v *Value) bool {
45280 b := v.Block
45281 typ := &b.Func.Config.Types
45282
45283
45284
45285 for {
45286 off1 := v.AuxInt
45287 sym := v.Aux
45288 mem := v.Args[2]
45289 val := v.Args[0]
45290 v_1 := v.Args[1]
45291 if v_1.Op != OpAMD64ADDQconst {
45292 break
45293 }
45294 off2 := v_1.AuxInt
45295 base := v_1.Args[0]
45296 if !(is32Bit(off1 + off2)) {
45297 break
45298 }
45299 v.reset(OpAMD64ORQload)
45300 v.AuxInt = off1 + off2
45301 v.Aux = sym
45302 v.AddArg(val)
45303 v.AddArg(base)
45304 v.AddArg(mem)
45305 return true
45306 }
45307
45308
45309
45310 for {
45311 off1 := v.AuxInt
45312 sym1 := v.Aux
45313 mem := v.Args[2]
45314 val := v.Args[0]
45315 v_1 := v.Args[1]
45316 if v_1.Op != OpAMD64LEAQ {
45317 break
45318 }
45319 off2 := v_1.AuxInt
45320 sym2 := v_1.Aux
45321 base := v_1.Args[0]
45322 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
45323 break
45324 }
45325 v.reset(OpAMD64ORQload)
45326 v.AuxInt = off1 + off2
45327 v.Aux = mergeSym(sym1, sym2)
45328 v.AddArg(val)
45329 v.AddArg(base)
45330 v.AddArg(mem)
45331 return true
45332 }
45333
45334
45335
45336 for {
45337 off := v.AuxInt
45338 sym := v.Aux
45339 _ = v.Args[2]
45340 x := v.Args[0]
45341 ptr := v.Args[1]
45342 v_2 := v.Args[2]
45343 if v_2.Op != OpAMD64MOVSDstore {
45344 break
45345 }
45346 if v_2.AuxInt != off {
45347 break
45348 }
45349 if v_2.Aux != sym {
45350 break
45351 }
45352 _ = v_2.Args[2]
45353 if ptr != v_2.Args[0] {
45354 break
45355 }
45356 y := v_2.Args[1]
45357 v.reset(OpAMD64ORQ)
45358 v.AddArg(x)
45359 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
45360 v0.AddArg(y)
45361 v.AddArg(v0)
45362 return true
45363 }
45364 return false
45365 }
45366 func rewriteValueAMD64_OpAMD64ORQmodify_0(v *Value) bool {
45367
45368
45369
45370 for {
45371 off1 := v.AuxInt
45372 sym := v.Aux
45373 mem := v.Args[2]
45374 v_0 := v.Args[0]
45375 if v_0.Op != OpAMD64ADDQconst {
45376 break
45377 }
45378 off2 := v_0.AuxInt
45379 base := v_0.Args[0]
45380 val := v.Args[1]
45381 if !(is32Bit(off1 + off2)) {
45382 break
45383 }
45384 v.reset(OpAMD64ORQmodify)
45385 v.AuxInt = off1 + off2
45386 v.Aux = sym
45387 v.AddArg(base)
45388 v.AddArg(val)
45389 v.AddArg(mem)
45390 return true
45391 }
45392
45393
45394
45395 for {
45396 off1 := v.AuxInt
45397 sym1 := v.Aux
45398 mem := v.Args[2]
45399 v_0 := v.Args[0]
45400 if v_0.Op != OpAMD64LEAQ {
45401 break
45402 }
45403 off2 := v_0.AuxInt
45404 sym2 := v_0.Aux
45405 base := v_0.Args[0]
45406 val := v.Args[1]
45407 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
45408 break
45409 }
45410 v.reset(OpAMD64ORQmodify)
45411 v.AuxInt = off1 + off2
45412 v.Aux = mergeSym(sym1, sym2)
45413 v.AddArg(base)
45414 v.AddArg(val)
45415 v.AddArg(mem)
45416 return true
45417 }
45418 return false
45419 }
45420 func rewriteValueAMD64_OpAMD64ROLB_0(v *Value) bool {
45421
45422
45423
45424 for {
45425 _ = v.Args[1]
45426 x := v.Args[0]
45427 v_1 := v.Args[1]
45428 if v_1.Op != OpAMD64NEGQ {
45429 break
45430 }
45431 y := v_1.Args[0]
45432 v.reset(OpAMD64RORB)
45433 v.AddArg(x)
45434 v.AddArg(y)
45435 return true
45436 }
45437
45438
45439
45440 for {
45441 _ = v.Args[1]
45442 x := v.Args[0]
45443 v_1 := v.Args[1]
45444 if v_1.Op != OpAMD64NEGL {
45445 break
45446 }
45447 y := v_1.Args[0]
45448 v.reset(OpAMD64RORB)
45449 v.AddArg(x)
45450 v.AddArg(y)
45451 return true
45452 }
45453
45454
45455
45456 for {
45457 _ = v.Args[1]
45458 x := v.Args[0]
45459 v_1 := v.Args[1]
45460 if v_1.Op != OpAMD64MOVQconst {
45461 break
45462 }
45463 c := v_1.AuxInt
45464 v.reset(OpAMD64ROLBconst)
45465 v.AuxInt = c & 7
45466 v.AddArg(x)
45467 return true
45468 }
45469
45470
45471
45472 for {
45473 _ = v.Args[1]
45474 x := v.Args[0]
45475 v_1 := v.Args[1]
45476 if v_1.Op != OpAMD64MOVLconst {
45477 break
45478 }
45479 c := v_1.AuxInt
45480 v.reset(OpAMD64ROLBconst)
45481 v.AuxInt = c & 7
45482 v.AddArg(x)
45483 return true
45484 }
45485 return false
45486 }
45487 func rewriteValueAMD64_OpAMD64ROLBconst_0(v *Value) bool {
45488
45489
45490
45491 for {
45492 c := v.AuxInt
45493 v_0 := v.Args[0]
45494 if v_0.Op != OpAMD64ROLBconst {
45495 break
45496 }
45497 d := v_0.AuxInt
45498 x := v_0.Args[0]
45499 v.reset(OpAMD64ROLBconst)
45500 v.AuxInt = (c + d) & 7
45501 v.AddArg(x)
45502 return true
45503 }
45504
45505
45506
45507 for {
45508 if v.AuxInt != 0 {
45509 break
45510 }
45511 x := v.Args[0]
45512 v.reset(OpCopy)
45513 v.Type = x.Type
45514 v.AddArg(x)
45515 return true
45516 }
45517 return false
45518 }
45519 func rewriteValueAMD64_OpAMD64ROLL_0(v *Value) bool {
45520
45521
45522
45523 for {
45524 _ = v.Args[1]
45525 x := v.Args[0]
45526 v_1 := v.Args[1]
45527 if v_1.Op != OpAMD64NEGQ {
45528 break
45529 }
45530 y := v_1.Args[0]
45531 v.reset(OpAMD64RORL)
45532 v.AddArg(x)
45533 v.AddArg(y)
45534 return true
45535 }
45536
45537
45538
45539 for {
45540 _ = v.Args[1]
45541 x := v.Args[0]
45542 v_1 := v.Args[1]
45543 if v_1.Op != OpAMD64NEGL {
45544 break
45545 }
45546 y := v_1.Args[0]
45547 v.reset(OpAMD64RORL)
45548 v.AddArg(x)
45549 v.AddArg(y)
45550 return true
45551 }
45552
45553
45554
45555 for {
45556 _ = v.Args[1]
45557 x := v.Args[0]
45558 v_1 := v.Args[1]
45559 if v_1.Op != OpAMD64MOVQconst {
45560 break
45561 }
45562 c := v_1.AuxInt
45563 v.reset(OpAMD64ROLLconst)
45564 v.AuxInt = c & 31
45565 v.AddArg(x)
45566 return true
45567 }
45568
45569
45570
45571 for {
45572 _ = v.Args[1]
45573 x := v.Args[0]
45574 v_1 := v.Args[1]
45575 if v_1.Op != OpAMD64MOVLconst {
45576 break
45577 }
45578 c := v_1.AuxInt
45579 v.reset(OpAMD64ROLLconst)
45580 v.AuxInt = c & 31
45581 v.AddArg(x)
45582 return true
45583 }
45584 return false
45585 }
45586 func rewriteValueAMD64_OpAMD64ROLLconst_0(v *Value) bool {
45587
45588
45589
45590 for {
45591 c := v.AuxInt
45592 v_0 := v.Args[0]
45593 if v_0.Op != OpAMD64ROLLconst {
45594 break
45595 }
45596 d := v_0.AuxInt
45597 x := v_0.Args[0]
45598 v.reset(OpAMD64ROLLconst)
45599 v.AuxInt = (c + d) & 31
45600 v.AddArg(x)
45601 return true
45602 }
45603
45604
45605
45606 for {
45607 if v.AuxInt != 0 {
45608 break
45609 }
45610 x := v.Args[0]
45611 v.reset(OpCopy)
45612 v.Type = x.Type
45613 v.AddArg(x)
45614 return true
45615 }
45616 return false
45617 }
45618 func rewriteValueAMD64_OpAMD64ROLQ_0(v *Value) bool {
45619
45620
45621
45622 for {
45623 _ = v.Args[1]
45624 x := v.Args[0]
45625 v_1 := v.Args[1]
45626 if v_1.Op != OpAMD64NEGQ {
45627 break
45628 }
45629 y := v_1.Args[0]
45630 v.reset(OpAMD64RORQ)
45631 v.AddArg(x)
45632 v.AddArg(y)
45633 return true
45634 }
45635
45636
45637
45638 for {
45639 _ = v.Args[1]
45640 x := v.Args[0]
45641 v_1 := v.Args[1]
45642 if v_1.Op != OpAMD64NEGL {
45643 break
45644 }
45645 y := v_1.Args[0]
45646 v.reset(OpAMD64RORQ)
45647 v.AddArg(x)
45648 v.AddArg(y)
45649 return true
45650 }
45651
45652
45653
45654 for {
45655 _ = v.Args[1]
45656 x := v.Args[0]
45657 v_1 := v.Args[1]
45658 if v_1.Op != OpAMD64MOVQconst {
45659 break
45660 }
45661 c := v_1.AuxInt
45662 v.reset(OpAMD64ROLQconst)
45663 v.AuxInt = c & 63
45664 v.AddArg(x)
45665 return true
45666 }
45667
45668
45669
45670 for {
45671 _ = v.Args[1]
45672 x := v.Args[0]
45673 v_1 := v.Args[1]
45674 if v_1.Op != OpAMD64MOVLconst {
45675 break
45676 }
45677 c := v_1.AuxInt
45678 v.reset(OpAMD64ROLQconst)
45679 v.AuxInt = c & 63
45680 v.AddArg(x)
45681 return true
45682 }
45683 return false
45684 }
45685 func rewriteValueAMD64_OpAMD64ROLQconst_0(v *Value) bool {
45686
45687
45688
45689 for {
45690 c := v.AuxInt
45691 v_0 := v.Args[0]
45692 if v_0.Op != OpAMD64ROLQconst {
45693 break
45694 }
45695 d := v_0.AuxInt
45696 x := v_0.Args[0]
45697 v.reset(OpAMD64ROLQconst)
45698 v.AuxInt = (c + d) & 63
45699 v.AddArg(x)
45700 return true
45701 }
45702
45703
45704
45705 for {
45706 if v.AuxInt != 0 {
45707 break
45708 }
45709 x := v.Args[0]
45710 v.reset(OpCopy)
45711 v.Type = x.Type
45712 v.AddArg(x)
45713 return true
45714 }
45715 return false
45716 }
45717 func rewriteValueAMD64_OpAMD64ROLW_0(v *Value) bool {
45718
45719
45720
45721 for {
45722 _ = v.Args[1]
45723 x := v.Args[0]
45724 v_1 := v.Args[1]
45725 if v_1.Op != OpAMD64NEGQ {
45726 break
45727 }
45728 y := v_1.Args[0]
45729 v.reset(OpAMD64RORW)
45730 v.AddArg(x)
45731 v.AddArg(y)
45732 return true
45733 }
45734
45735
45736
45737 for {
45738 _ = v.Args[1]
45739 x := v.Args[0]
45740 v_1 := v.Args[1]
45741 if v_1.Op != OpAMD64NEGL {
45742 break
45743 }
45744 y := v_1.Args[0]
45745 v.reset(OpAMD64RORW)
45746 v.AddArg(x)
45747 v.AddArg(y)
45748 return true
45749 }
45750
45751
45752
45753 for {
45754 _ = v.Args[1]
45755 x := v.Args[0]
45756 v_1 := v.Args[1]
45757 if v_1.Op != OpAMD64MOVQconst {
45758 break
45759 }
45760 c := v_1.AuxInt
45761 v.reset(OpAMD64ROLWconst)
45762 v.AuxInt = c & 15
45763 v.AddArg(x)
45764 return true
45765 }
45766
45767
45768
45769 for {
45770 _ = v.Args[1]
45771 x := v.Args[0]
45772 v_1 := v.Args[1]
45773 if v_1.Op != OpAMD64MOVLconst {
45774 break
45775 }
45776 c := v_1.AuxInt
45777 v.reset(OpAMD64ROLWconst)
45778 v.AuxInt = c & 15
45779 v.AddArg(x)
45780 return true
45781 }
45782 return false
45783 }
45784 func rewriteValueAMD64_OpAMD64ROLWconst_0(v *Value) bool {
45785
45786
45787
45788 for {
45789 c := v.AuxInt
45790 v_0 := v.Args[0]
45791 if v_0.Op != OpAMD64ROLWconst {
45792 break
45793 }
45794 d := v_0.AuxInt
45795 x := v_0.Args[0]
45796 v.reset(OpAMD64ROLWconst)
45797 v.AuxInt = (c + d) & 15
45798 v.AddArg(x)
45799 return true
45800 }
45801
45802
45803
45804 for {
45805 if v.AuxInt != 0 {
45806 break
45807 }
45808 x := v.Args[0]
45809 v.reset(OpCopy)
45810 v.Type = x.Type
45811 v.AddArg(x)
45812 return true
45813 }
45814 return false
45815 }
45816 func rewriteValueAMD64_OpAMD64RORB_0(v *Value) bool {
45817
45818
45819
45820 for {
45821 _ = v.Args[1]
45822 x := v.Args[0]
45823 v_1 := v.Args[1]
45824 if v_1.Op != OpAMD64NEGQ {
45825 break
45826 }
45827 y := v_1.Args[0]
45828 v.reset(OpAMD64ROLB)
45829 v.AddArg(x)
45830 v.AddArg(y)
45831 return true
45832 }
45833
45834
45835
45836 for {
45837 _ = v.Args[1]
45838 x := v.Args[0]
45839 v_1 := v.Args[1]
45840 if v_1.Op != OpAMD64NEGL {
45841 break
45842 }
45843 y := v_1.Args[0]
45844 v.reset(OpAMD64ROLB)
45845 v.AddArg(x)
45846 v.AddArg(y)
45847 return true
45848 }
45849
45850
45851
45852 for {
45853 _ = v.Args[1]
45854 x := v.Args[0]
45855 v_1 := v.Args[1]
45856 if v_1.Op != OpAMD64MOVQconst {
45857 break
45858 }
45859 c := v_1.AuxInt
45860 v.reset(OpAMD64ROLBconst)
45861 v.AuxInt = (-c) & 7
45862 v.AddArg(x)
45863 return true
45864 }
45865
45866
45867
45868 for {
45869 _ = v.Args[1]
45870 x := v.Args[0]
45871 v_1 := v.Args[1]
45872 if v_1.Op != OpAMD64MOVLconst {
45873 break
45874 }
45875 c := v_1.AuxInt
45876 v.reset(OpAMD64ROLBconst)
45877 v.AuxInt = (-c) & 7
45878 v.AddArg(x)
45879 return true
45880 }
45881 return false
45882 }
45883 func rewriteValueAMD64_OpAMD64RORL_0(v *Value) bool {
45884
45885
45886
45887 for {
45888 _ = v.Args[1]
45889 x := v.Args[0]
45890 v_1 := v.Args[1]
45891 if v_1.Op != OpAMD64NEGQ {
45892 break
45893 }
45894 y := v_1.Args[0]
45895 v.reset(OpAMD64ROLL)
45896 v.AddArg(x)
45897 v.AddArg(y)
45898 return true
45899 }
45900
45901
45902
45903 for {
45904 _ = v.Args[1]
45905 x := v.Args[0]
45906 v_1 := v.Args[1]
45907 if v_1.Op != OpAMD64NEGL {
45908 break
45909 }
45910 y := v_1.Args[0]
45911 v.reset(OpAMD64ROLL)
45912 v.AddArg(x)
45913 v.AddArg(y)
45914 return true
45915 }
45916
45917
45918
45919 for {
45920 _ = v.Args[1]
45921 x := v.Args[0]
45922 v_1 := v.Args[1]
45923 if v_1.Op != OpAMD64MOVQconst {
45924 break
45925 }
45926 c := v_1.AuxInt
45927 v.reset(OpAMD64ROLLconst)
45928 v.AuxInt = (-c) & 31
45929 v.AddArg(x)
45930 return true
45931 }
45932
45933
45934
45935 for {
45936 _ = v.Args[1]
45937 x := v.Args[0]
45938 v_1 := v.Args[1]
45939 if v_1.Op != OpAMD64MOVLconst {
45940 break
45941 }
45942 c := v_1.AuxInt
45943 v.reset(OpAMD64ROLLconst)
45944 v.AuxInt = (-c) & 31
45945 v.AddArg(x)
45946 return true
45947 }
45948 return false
45949 }
45950 func rewriteValueAMD64_OpAMD64RORQ_0(v *Value) bool {
45951
45952
45953
45954 for {
45955 _ = v.Args[1]
45956 x := v.Args[0]
45957 v_1 := v.Args[1]
45958 if v_1.Op != OpAMD64NEGQ {
45959 break
45960 }
45961 y := v_1.Args[0]
45962 v.reset(OpAMD64ROLQ)
45963 v.AddArg(x)
45964 v.AddArg(y)
45965 return true
45966 }
45967
45968
45969
45970 for {
45971 _ = v.Args[1]
45972 x := v.Args[0]
45973 v_1 := v.Args[1]
45974 if v_1.Op != OpAMD64NEGL {
45975 break
45976 }
45977 y := v_1.Args[0]
45978 v.reset(OpAMD64ROLQ)
45979 v.AddArg(x)
45980 v.AddArg(y)
45981 return true
45982 }
45983
45984
45985
45986 for {
45987 _ = v.Args[1]
45988 x := v.Args[0]
45989 v_1 := v.Args[1]
45990 if v_1.Op != OpAMD64MOVQconst {
45991 break
45992 }
45993 c := v_1.AuxInt
45994 v.reset(OpAMD64ROLQconst)
45995 v.AuxInt = (-c) & 63
45996 v.AddArg(x)
45997 return true
45998 }
45999
46000
46001
46002 for {
46003 _ = v.Args[1]
46004 x := v.Args[0]
46005 v_1 := v.Args[1]
46006 if v_1.Op != OpAMD64MOVLconst {
46007 break
46008 }
46009 c := v_1.AuxInt
46010 v.reset(OpAMD64ROLQconst)
46011 v.AuxInt = (-c) & 63
46012 v.AddArg(x)
46013 return true
46014 }
46015 return false
46016 }
46017 func rewriteValueAMD64_OpAMD64RORW_0(v *Value) bool {
46018
46019
46020
46021 for {
46022 _ = v.Args[1]
46023 x := v.Args[0]
46024 v_1 := v.Args[1]
46025 if v_1.Op != OpAMD64NEGQ {
46026 break
46027 }
46028 y := v_1.Args[0]
46029 v.reset(OpAMD64ROLW)
46030 v.AddArg(x)
46031 v.AddArg(y)
46032 return true
46033 }
46034
46035
46036
46037 for {
46038 _ = v.Args[1]
46039 x := v.Args[0]
46040 v_1 := v.Args[1]
46041 if v_1.Op != OpAMD64NEGL {
46042 break
46043 }
46044 y := v_1.Args[0]
46045 v.reset(OpAMD64ROLW)
46046 v.AddArg(x)
46047 v.AddArg(y)
46048 return true
46049 }
46050
46051
46052
46053 for {
46054 _ = v.Args[1]
46055 x := v.Args[0]
46056 v_1 := v.Args[1]
46057 if v_1.Op != OpAMD64MOVQconst {
46058 break
46059 }
46060 c := v_1.AuxInt
46061 v.reset(OpAMD64ROLWconst)
46062 v.AuxInt = (-c) & 15
46063 v.AddArg(x)
46064 return true
46065 }
46066
46067
46068
46069 for {
46070 _ = v.Args[1]
46071 x := v.Args[0]
46072 v_1 := v.Args[1]
46073 if v_1.Op != OpAMD64MOVLconst {
46074 break
46075 }
46076 c := v_1.AuxInt
46077 v.reset(OpAMD64ROLWconst)
46078 v.AuxInt = (-c) & 15
46079 v.AddArg(x)
46080 return true
46081 }
46082 return false
46083 }
46084 func rewriteValueAMD64_OpAMD64SARB_0(v *Value) bool {
46085
46086
46087
46088 for {
46089 _ = v.Args[1]
46090 x := v.Args[0]
46091 v_1 := v.Args[1]
46092 if v_1.Op != OpAMD64MOVQconst {
46093 break
46094 }
46095 c := v_1.AuxInt
46096 v.reset(OpAMD64SARBconst)
46097 v.AuxInt = min(c&31, 7)
46098 v.AddArg(x)
46099 return true
46100 }
46101
46102
46103
46104 for {
46105 _ = v.Args[1]
46106 x := v.Args[0]
46107 v_1 := v.Args[1]
46108 if v_1.Op != OpAMD64MOVLconst {
46109 break
46110 }
46111 c := v_1.AuxInt
46112 v.reset(OpAMD64SARBconst)
46113 v.AuxInt = min(c&31, 7)
46114 v.AddArg(x)
46115 return true
46116 }
46117 return false
46118 }
46119 func rewriteValueAMD64_OpAMD64SARBconst_0(v *Value) bool {
46120
46121
46122
46123 for {
46124 if v.AuxInt != 0 {
46125 break
46126 }
46127 x := v.Args[0]
46128 v.reset(OpCopy)
46129 v.Type = x.Type
46130 v.AddArg(x)
46131 return true
46132 }
46133
46134
46135
46136 for {
46137 c := v.AuxInt
46138 v_0 := v.Args[0]
46139 if v_0.Op != OpAMD64MOVQconst {
46140 break
46141 }
46142 d := v_0.AuxInt
46143 v.reset(OpAMD64MOVQconst)
46144 v.AuxInt = int64(int8(d)) >> uint64(c)
46145 return true
46146 }
46147 return false
46148 }
46149 func rewriteValueAMD64_OpAMD64SARL_0(v *Value) bool {
46150 b := v.Block
46151
46152
46153
46154 for {
46155 _ = v.Args[1]
46156 x := v.Args[0]
46157 v_1 := v.Args[1]
46158 if v_1.Op != OpAMD64MOVQconst {
46159 break
46160 }
46161 c := v_1.AuxInt
46162 v.reset(OpAMD64SARLconst)
46163 v.AuxInt = c & 31
46164 v.AddArg(x)
46165 return true
46166 }
46167
46168
46169
46170 for {
46171 _ = v.Args[1]
46172 x := v.Args[0]
46173 v_1 := v.Args[1]
46174 if v_1.Op != OpAMD64MOVLconst {
46175 break
46176 }
46177 c := v_1.AuxInt
46178 v.reset(OpAMD64SARLconst)
46179 v.AuxInt = c & 31
46180 v.AddArg(x)
46181 return true
46182 }
46183
46184
46185
46186 for {
46187 _ = v.Args[1]
46188 x := v.Args[0]
46189 v_1 := v.Args[1]
46190 if v_1.Op != OpAMD64ADDQconst {
46191 break
46192 }
46193 c := v_1.AuxInt
46194 y := v_1.Args[0]
46195 if !(c&31 == 0) {
46196 break
46197 }
46198 v.reset(OpAMD64SARL)
46199 v.AddArg(x)
46200 v.AddArg(y)
46201 return true
46202 }
46203
46204
46205
46206 for {
46207 _ = v.Args[1]
46208 x := v.Args[0]
46209 v_1 := v.Args[1]
46210 if v_1.Op != OpAMD64NEGQ {
46211 break
46212 }
46213 t := v_1.Type
46214 v_1_0 := v_1.Args[0]
46215 if v_1_0.Op != OpAMD64ADDQconst {
46216 break
46217 }
46218 c := v_1_0.AuxInt
46219 y := v_1_0.Args[0]
46220 if !(c&31 == 0) {
46221 break
46222 }
46223 v.reset(OpAMD64SARL)
46224 v.AddArg(x)
46225 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
46226 v0.AddArg(y)
46227 v.AddArg(v0)
46228 return true
46229 }
46230
46231
46232
46233 for {
46234 _ = v.Args[1]
46235 x := v.Args[0]
46236 v_1 := v.Args[1]
46237 if v_1.Op != OpAMD64ANDQconst {
46238 break
46239 }
46240 c := v_1.AuxInt
46241 y := v_1.Args[0]
46242 if !(c&31 == 31) {
46243 break
46244 }
46245 v.reset(OpAMD64SARL)
46246 v.AddArg(x)
46247 v.AddArg(y)
46248 return true
46249 }
46250
46251
46252
46253 for {
46254 _ = v.Args[1]
46255 x := v.Args[0]
46256 v_1 := v.Args[1]
46257 if v_1.Op != OpAMD64NEGQ {
46258 break
46259 }
46260 t := v_1.Type
46261 v_1_0 := v_1.Args[0]
46262 if v_1_0.Op != OpAMD64ANDQconst {
46263 break
46264 }
46265 c := v_1_0.AuxInt
46266 y := v_1_0.Args[0]
46267 if !(c&31 == 31) {
46268 break
46269 }
46270 v.reset(OpAMD64SARL)
46271 v.AddArg(x)
46272 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
46273 v0.AddArg(y)
46274 v.AddArg(v0)
46275 return true
46276 }
46277
46278
46279
46280 for {
46281 _ = v.Args[1]
46282 x := v.Args[0]
46283 v_1 := v.Args[1]
46284 if v_1.Op != OpAMD64ADDLconst {
46285 break
46286 }
46287 c := v_1.AuxInt
46288 y := v_1.Args[0]
46289 if !(c&31 == 0) {
46290 break
46291 }
46292 v.reset(OpAMD64SARL)
46293 v.AddArg(x)
46294 v.AddArg(y)
46295 return true
46296 }
46297
46298
46299
46300 for {
46301 _ = v.Args[1]
46302 x := v.Args[0]
46303 v_1 := v.Args[1]
46304 if v_1.Op != OpAMD64NEGL {
46305 break
46306 }
46307 t := v_1.Type
46308 v_1_0 := v_1.Args[0]
46309 if v_1_0.Op != OpAMD64ADDLconst {
46310 break
46311 }
46312 c := v_1_0.AuxInt
46313 y := v_1_0.Args[0]
46314 if !(c&31 == 0) {
46315 break
46316 }
46317 v.reset(OpAMD64SARL)
46318 v.AddArg(x)
46319 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
46320 v0.AddArg(y)
46321 v.AddArg(v0)
46322 return true
46323 }
46324
46325
46326
46327 for {
46328 _ = v.Args[1]
46329 x := v.Args[0]
46330 v_1 := v.Args[1]
46331 if v_1.Op != OpAMD64ANDLconst {
46332 break
46333 }
46334 c := v_1.AuxInt
46335 y := v_1.Args[0]
46336 if !(c&31 == 31) {
46337 break
46338 }
46339 v.reset(OpAMD64SARL)
46340 v.AddArg(x)
46341 v.AddArg(y)
46342 return true
46343 }
46344
46345
46346
46347 for {
46348 _ = v.Args[1]
46349 x := v.Args[0]
46350 v_1 := v.Args[1]
46351 if v_1.Op != OpAMD64NEGL {
46352 break
46353 }
46354 t := v_1.Type
46355 v_1_0 := v_1.Args[0]
46356 if v_1_0.Op != OpAMD64ANDLconst {
46357 break
46358 }
46359 c := v_1_0.AuxInt
46360 y := v_1_0.Args[0]
46361 if !(c&31 == 31) {
46362 break
46363 }
46364 v.reset(OpAMD64SARL)
46365 v.AddArg(x)
46366 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
46367 v0.AddArg(y)
46368 v.AddArg(v0)
46369 return true
46370 }
46371 return false
46372 }
46373 func rewriteValueAMD64_OpAMD64SARLconst_0(v *Value) bool {
46374
46375
46376
46377 for {
46378 if v.AuxInt != 0 {
46379 break
46380 }
46381 x := v.Args[0]
46382 v.reset(OpCopy)
46383 v.Type = x.Type
46384 v.AddArg(x)
46385 return true
46386 }
46387
46388
46389
46390 for {
46391 c := v.AuxInt
46392 v_0 := v.Args[0]
46393 if v_0.Op != OpAMD64MOVQconst {
46394 break
46395 }
46396 d := v_0.AuxInt
46397 v.reset(OpAMD64MOVQconst)
46398 v.AuxInt = int64(int32(d)) >> uint64(c)
46399 return true
46400 }
46401 return false
46402 }
46403 func rewriteValueAMD64_OpAMD64SARQ_0(v *Value) bool {
46404 b := v.Block
46405
46406
46407
46408 for {
46409 _ = v.Args[1]
46410 x := v.Args[0]
46411 v_1 := v.Args[1]
46412 if v_1.Op != OpAMD64MOVQconst {
46413 break
46414 }
46415 c := v_1.AuxInt
46416 v.reset(OpAMD64SARQconst)
46417 v.AuxInt = c & 63
46418 v.AddArg(x)
46419 return true
46420 }
46421
46422
46423
46424 for {
46425 _ = v.Args[1]
46426 x := v.Args[0]
46427 v_1 := v.Args[1]
46428 if v_1.Op != OpAMD64MOVLconst {
46429 break
46430 }
46431 c := v_1.AuxInt
46432 v.reset(OpAMD64SARQconst)
46433 v.AuxInt = c & 63
46434 v.AddArg(x)
46435 return true
46436 }
46437
46438
46439
46440 for {
46441 _ = v.Args[1]
46442 x := v.Args[0]
46443 v_1 := v.Args[1]
46444 if v_1.Op != OpAMD64ADDQconst {
46445 break
46446 }
46447 c := v_1.AuxInt
46448 y := v_1.Args[0]
46449 if !(c&63 == 0) {
46450 break
46451 }
46452 v.reset(OpAMD64SARQ)
46453 v.AddArg(x)
46454 v.AddArg(y)
46455 return true
46456 }
46457
46458
46459
46460 for {
46461 _ = v.Args[1]
46462 x := v.Args[0]
46463 v_1 := v.Args[1]
46464 if v_1.Op != OpAMD64NEGQ {
46465 break
46466 }
46467 t := v_1.Type
46468 v_1_0 := v_1.Args[0]
46469 if v_1_0.Op != OpAMD64ADDQconst {
46470 break
46471 }
46472 c := v_1_0.AuxInt
46473 y := v_1_0.Args[0]
46474 if !(c&63 == 0) {
46475 break
46476 }
46477 v.reset(OpAMD64SARQ)
46478 v.AddArg(x)
46479 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
46480 v0.AddArg(y)
46481 v.AddArg(v0)
46482 return true
46483 }
46484
46485
46486
46487 for {
46488 _ = v.Args[1]
46489 x := v.Args[0]
46490 v_1 := v.Args[1]
46491 if v_1.Op != OpAMD64ANDQconst {
46492 break
46493 }
46494 c := v_1.AuxInt
46495 y := v_1.Args[0]
46496 if !(c&63 == 63) {
46497 break
46498 }
46499 v.reset(OpAMD64SARQ)
46500 v.AddArg(x)
46501 v.AddArg(y)
46502 return true
46503 }
46504
46505
46506
46507 for {
46508 _ = v.Args[1]
46509 x := v.Args[0]
46510 v_1 := v.Args[1]
46511 if v_1.Op != OpAMD64NEGQ {
46512 break
46513 }
46514 t := v_1.Type
46515 v_1_0 := v_1.Args[0]
46516 if v_1_0.Op != OpAMD64ANDQconst {
46517 break
46518 }
46519 c := v_1_0.AuxInt
46520 y := v_1_0.Args[0]
46521 if !(c&63 == 63) {
46522 break
46523 }
46524 v.reset(OpAMD64SARQ)
46525 v.AddArg(x)
46526 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
46527 v0.AddArg(y)
46528 v.AddArg(v0)
46529 return true
46530 }
46531
46532
46533
46534 for {
46535 _ = v.Args[1]
46536 x := v.Args[0]
46537 v_1 := v.Args[1]
46538 if v_1.Op != OpAMD64ADDLconst {
46539 break
46540 }
46541 c := v_1.AuxInt
46542 y := v_1.Args[0]
46543 if !(c&63 == 0) {
46544 break
46545 }
46546 v.reset(OpAMD64SARQ)
46547 v.AddArg(x)
46548 v.AddArg(y)
46549 return true
46550 }
46551
46552
46553
46554 for {
46555 _ = v.Args[1]
46556 x := v.Args[0]
46557 v_1 := v.Args[1]
46558 if v_1.Op != OpAMD64NEGL {
46559 break
46560 }
46561 t := v_1.Type
46562 v_1_0 := v_1.Args[0]
46563 if v_1_0.Op != OpAMD64ADDLconst {
46564 break
46565 }
46566 c := v_1_0.AuxInt
46567 y := v_1_0.Args[0]
46568 if !(c&63 == 0) {
46569 break
46570 }
46571 v.reset(OpAMD64SARQ)
46572 v.AddArg(x)
46573 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
46574 v0.AddArg(y)
46575 v.AddArg(v0)
46576 return true
46577 }
46578
46579
46580
46581 for {
46582 _ = v.Args[1]
46583 x := v.Args[0]
46584 v_1 := v.Args[1]
46585 if v_1.Op != OpAMD64ANDLconst {
46586 break
46587 }
46588 c := v_1.AuxInt
46589 y := v_1.Args[0]
46590 if !(c&63 == 63) {
46591 break
46592 }
46593 v.reset(OpAMD64SARQ)
46594 v.AddArg(x)
46595 v.AddArg(y)
46596 return true
46597 }
46598
46599
46600
46601 for {
46602 _ = v.Args[1]
46603 x := v.Args[0]
46604 v_1 := v.Args[1]
46605 if v_1.Op != OpAMD64NEGL {
46606 break
46607 }
46608 t := v_1.Type
46609 v_1_0 := v_1.Args[0]
46610 if v_1_0.Op != OpAMD64ANDLconst {
46611 break
46612 }
46613 c := v_1_0.AuxInt
46614 y := v_1_0.Args[0]
46615 if !(c&63 == 63) {
46616 break
46617 }
46618 v.reset(OpAMD64SARQ)
46619 v.AddArg(x)
46620 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
46621 v0.AddArg(y)
46622 v.AddArg(v0)
46623 return true
46624 }
46625 return false
46626 }
46627 func rewriteValueAMD64_OpAMD64SARQconst_0(v *Value) bool {
46628
46629
46630
46631 for {
46632 if v.AuxInt != 0 {
46633 break
46634 }
46635 x := v.Args[0]
46636 v.reset(OpCopy)
46637 v.Type = x.Type
46638 v.AddArg(x)
46639 return true
46640 }
46641
46642
46643
46644 for {
46645 c := v.AuxInt
46646 v_0 := v.Args[0]
46647 if v_0.Op != OpAMD64MOVQconst {
46648 break
46649 }
46650 d := v_0.AuxInt
46651 v.reset(OpAMD64MOVQconst)
46652 v.AuxInt = d >> uint64(c)
46653 return true
46654 }
46655 return false
46656 }
46657 func rewriteValueAMD64_OpAMD64SARW_0(v *Value) bool {
46658
46659
46660
46661 for {
46662 _ = v.Args[1]
46663 x := v.Args[0]
46664 v_1 := v.Args[1]
46665 if v_1.Op != OpAMD64MOVQconst {
46666 break
46667 }
46668 c := v_1.AuxInt
46669 v.reset(OpAMD64SARWconst)
46670 v.AuxInt = min(c&31, 15)
46671 v.AddArg(x)
46672 return true
46673 }
46674
46675
46676
46677 for {
46678 _ = v.Args[1]
46679 x := v.Args[0]
46680 v_1 := v.Args[1]
46681 if v_1.Op != OpAMD64MOVLconst {
46682 break
46683 }
46684 c := v_1.AuxInt
46685 v.reset(OpAMD64SARWconst)
46686 v.AuxInt = min(c&31, 15)
46687 v.AddArg(x)
46688 return true
46689 }
46690 return false
46691 }
46692 func rewriteValueAMD64_OpAMD64SARWconst_0(v *Value) bool {
46693
46694
46695
46696 for {
46697 if v.AuxInt != 0 {
46698 break
46699 }
46700 x := v.Args[0]
46701 v.reset(OpCopy)
46702 v.Type = x.Type
46703 v.AddArg(x)
46704 return true
46705 }
46706
46707
46708
46709 for {
46710 c := v.AuxInt
46711 v_0 := v.Args[0]
46712 if v_0.Op != OpAMD64MOVQconst {
46713 break
46714 }
46715 d := v_0.AuxInt
46716 v.reset(OpAMD64MOVQconst)
46717 v.AuxInt = int64(int16(d)) >> uint64(c)
46718 return true
46719 }
46720 return false
46721 }
46722 func rewriteValueAMD64_OpAMD64SBBLcarrymask_0(v *Value) bool {
46723
46724
46725
46726 for {
46727 v_0 := v.Args[0]
46728 if v_0.Op != OpAMD64FlagEQ {
46729 break
46730 }
46731 v.reset(OpAMD64MOVLconst)
46732 v.AuxInt = 0
46733 return true
46734 }
46735
46736
46737
46738 for {
46739 v_0 := v.Args[0]
46740 if v_0.Op != OpAMD64FlagLT_ULT {
46741 break
46742 }
46743 v.reset(OpAMD64MOVLconst)
46744 v.AuxInt = -1
46745 return true
46746 }
46747
46748
46749
46750 for {
46751 v_0 := v.Args[0]
46752 if v_0.Op != OpAMD64FlagLT_UGT {
46753 break
46754 }
46755 v.reset(OpAMD64MOVLconst)
46756 v.AuxInt = 0
46757 return true
46758 }
46759
46760
46761
46762 for {
46763 v_0 := v.Args[0]
46764 if v_0.Op != OpAMD64FlagGT_ULT {
46765 break
46766 }
46767 v.reset(OpAMD64MOVLconst)
46768 v.AuxInt = -1
46769 return true
46770 }
46771
46772
46773
46774 for {
46775 v_0 := v.Args[0]
46776 if v_0.Op != OpAMD64FlagGT_UGT {
46777 break
46778 }
46779 v.reset(OpAMD64MOVLconst)
46780 v.AuxInt = 0
46781 return true
46782 }
46783 return false
46784 }
46785 func rewriteValueAMD64_OpAMD64SBBQ_0(v *Value) bool {
46786
46787
46788
46789 for {
46790 borrow := v.Args[2]
46791 x := v.Args[0]
46792 v_1 := v.Args[1]
46793 if v_1.Op != OpAMD64MOVQconst {
46794 break
46795 }
46796 c := v_1.AuxInt
46797 if !(is32Bit(c)) {
46798 break
46799 }
46800 v.reset(OpAMD64SBBQconst)
46801 v.AuxInt = c
46802 v.AddArg(x)
46803 v.AddArg(borrow)
46804 return true
46805 }
46806
46807
46808
46809 for {
46810 _ = v.Args[2]
46811 x := v.Args[0]
46812 y := v.Args[1]
46813 v_2 := v.Args[2]
46814 if v_2.Op != OpAMD64FlagEQ {
46815 break
46816 }
46817 v.reset(OpAMD64SUBQborrow)
46818 v.AddArg(x)
46819 v.AddArg(y)
46820 return true
46821 }
46822 return false
46823 }
46824 func rewriteValueAMD64_OpAMD64SBBQcarrymask_0(v *Value) bool {
46825
46826
46827
46828 for {
46829 v_0 := v.Args[0]
46830 if v_0.Op != OpAMD64FlagEQ {
46831 break
46832 }
46833 v.reset(OpAMD64MOVQconst)
46834 v.AuxInt = 0
46835 return true
46836 }
46837
46838
46839
46840 for {
46841 v_0 := v.Args[0]
46842 if v_0.Op != OpAMD64FlagLT_ULT {
46843 break
46844 }
46845 v.reset(OpAMD64MOVQconst)
46846 v.AuxInt = -1
46847 return true
46848 }
46849
46850
46851
46852 for {
46853 v_0 := v.Args[0]
46854 if v_0.Op != OpAMD64FlagLT_UGT {
46855 break
46856 }
46857 v.reset(OpAMD64MOVQconst)
46858 v.AuxInt = 0
46859 return true
46860 }
46861
46862
46863
46864 for {
46865 v_0 := v.Args[0]
46866 if v_0.Op != OpAMD64FlagGT_ULT {
46867 break
46868 }
46869 v.reset(OpAMD64MOVQconst)
46870 v.AuxInt = -1
46871 return true
46872 }
46873
46874
46875
46876 for {
46877 v_0 := v.Args[0]
46878 if v_0.Op != OpAMD64FlagGT_UGT {
46879 break
46880 }
46881 v.reset(OpAMD64MOVQconst)
46882 v.AuxInt = 0
46883 return true
46884 }
46885 return false
46886 }
46887 func rewriteValueAMD64_OpAMD64SBBQconst_0(v *Value) bool {
46888
46889
46890
46891 for {
46892 c := v.AuxInt
46893 _ = v.Args[1]
46894 x := v.Args[0]
46895 v_1 := v.Args[1]
46896 if v_1.Op != OpAMD64FlagEQ {
46897 break
46898 }
46899 v.reset(OpAMD64SUBQconstborrow)
46900 v.AuxInt = c
46901 v.AddArg(x)
46902 return true
46903 }
46904 return false
46905 }
46906 func rewriteValueAMD64_OpAMD64SETA_0(v *Value) bool {
46907
46908
46909
46910 for {
46911 v_0 := v.Args[0]
46912 if v_0.Op != OpAMD64InvertFlags {
46913 break
46914 }
46915 x := v_0.Args[0]
46916 v.reset(OpAMD64SETB)
46917 v.AddArg(x)
46918 return true
46919 }
46920
46921
46922
46923 for {
46924 v_0 := v.Args[0]
46925 if v_0.Op != OpAMD64FlagEQ {
46926 break
46927 }
46928 v.reset(OpAMD64MOVLconst)
46929 v.AuxInt = 0
46930 return true
46931 }
46932
46933
46934
46935 for {
46936 v_0 := v.Args[0]
46937 if v_0.Op != OpAMD64FlagLT_ULT {
46938 break
46939 }
46940 v.reset(OpAMD64MOVLconst)
46941 v.AuxInt = 0
46942 return true
46943 }
46944
46945
46946
46947 for {
46948 v_0 := v.Args[0]
46949 if v_0.Op != OpAMD64FlagLT_UGT {
46950 break
46951 }
46952 v.reset(OpAMD64MOVLconst)
46953 v.AuxInt = 1
46954 return true
46955 }
46956
46957
46958
46959 for {
46960 v_0 := v.Args[0]
46961 if v_0.Op != OpAMD64FlagGT_ULT {
46962 break
46963 }
46964 v.reset(OpAMD64MOVLconst)
46965 v.AuxInt = 0
46966 return true
46967 }
46968
46969
46970
46971 for {
46972 v_0 := v.Args[0]
46973 if v_0.Op != OpAMD64FlagGT_UGT {
46974 break
46975 }
46976 v.reset(OpAMD64MOVLconst)
46977 v.AuxInt = 1
46978 return true
46979 }
46980 return false
46981 }
46982 func rewriteValueAMD64_OpAMD64SETAE_0(v *Value) bool {
46983
46984
46985
46986 for {
46987 v_0 := v.Args[0]
46988 if v_0.Op != OpAMD64InvertFlags {
46989 break
46990 }
46991 x := v_0.Args[0]
46992 v.reset(OpAMD64SETBE)
46993 v.AddArg(x)
46994 return true
46995 }
46996
46997
46998
46999 for {
47000 v_0 := v.Args[0]
47001 if v_0.Op != OpAMD64FlagEQ {
47002 break
47003 }
47004 v.reset(OpAMD64MOVLconst)
47005 v.AuxInt = 1
47006 return true
47007 }
47008
47009
47010
47011 for {
47012 v_0 := v.Args[0]
47013 if v_0.Op != OpAMD64FlagLT_ULT {
47014 break
47015 }
47016 v.reset(OpAMD64MOVLconst)
47017 v.AuxInt = 0
47018 return true
47019 }
47020
47021
47022
47023 for {
47024 v_0 := v.Args[0]
47025 if v_0.Op != OpAMD64FlagLT_UGT {
47026 break
47027 }
47028 v.reset(OpAMD64MOVLconst)
47029 v.AuxInt = 1
47030 return true
47031 }
47032
47033
47034
47035 for {
47036 v_0 := v.Args[0]
47037 if v_0.Op != OpAMD64FlagGT_ULT {
47038 break
47039 }
47040 v.reset(OpAMD64MOVLconst)
47041 v.AuxInt = 0
47042 return true
47043 }
47044
47045
47046
47047 for {
47048 v_0 := v.Args[0]
47049 if v_0.Op != OpAMD64FlagGT_UGT {
47050 break
47051 }
47052 v.reset(OpAMD64MOVLconst)
47053 v.AuxInt = 1
47054 return true
47055 }
47056 return false
47057 }
47058 func rewriteValueAMD64_OpAMD64SETAEstore_0(v *Value) bool {
47059 b := v.Block
47060 typ := &b.Func.Config.Types
47061
47062
47063
47064 for {
47065 off := v.AuxInt
47066 sym := v.Aux
47067 mem := v.Args[2]
47068 ptr := v.Args[0]
47069 v_1 := v.Args[1]
47070 if v_1.Op != OpAMD64InvertFlags {
47071 break
47072 }
47073 x := v_1.Args[0]
47074 v.reset(OpAMD64SETBEstore)
47075 v.AuxInt = off
47076 v.Aux = sym
47077 v.AddArg(ptr)
47078 v.AddArg(x)
47079 v.AddArg(mem)
47080 return true
47081 }
47082
47083
47084
47085 for {
47086 off1 := v.AuxInt
47087 sym := v.Aux
47088 mem := v.Args[2]
47089 v_0 := v.Args[0]
47090 if v_0.Op != OpAMD64ADDQconst {
47091 break
47092 }
47093 off2 := v_0.AuxInt
47094 base := v_0.Args[0]
47095 val := v.Args[1]
47096 if !(is32Bit(off1 + off2)) {
47097 break
47098 }
47099 v.reset(OpAMD64SETAEstore)
47100 v.AuxInt = off1 + off2
47101 v.Aux = sym
47102 v.AddArg(base)
47103 v.AddArg(val)
47104 v.AddArg(mem)
47105 return true
47106 }
47107
47108
47109
47110 for {
47111 off1 := v.AuxInt
47112 sym1 := v.Aux
47113 mem := v.Args[2]
47114 v_0 := v.Args[0]
47115 if v_0.Op != OpAMD64LEAQ {
47116 break
47117 }
47118 off2 := v_0.AuxInt
47119 sym2 := v_0.Aux
47120 base := v_0.Args[0]
47121 val := v.Args[1]
47122 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
47123 break
47124 }
47125 v.reset(OpAMD64SETAEstore)
47126 v.AuxInt = off1 + off2
47127 v.Aux = mergeSym(sym1, sym2)
47128 v.AddArg(base)
47129 v.AddArg(val)
47130 v.AddArg(mem)
47131 return true
47132 }
47133
47134
47135
47136 for {
47137 off := v.AuxInt
47138 sym := v.Aux
47139 mem := v.Args[2]
47140 ptr := v.Args[0]
47141 v_1 := v.Args[1]
47142 if v_1.Op != OpAMD64FlagEQ {
47143 break
47144 }
47145 v.reset(OpAMD64MOVBstore)
47146 v.AuxInt = off
47147 v.Aux = sym
47148 v.AddArg(ptr)
47149 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
47150 v0.AuxInt = 1
47151 v.AddArg(v0)
47152 v.AddArg(mem)
47153 return true
47154 }
47155
47156
47157
47158 for {
47159 off := v.AuxInt
47160 sym := v.Aux
47161 mem := v.Args[2]
47162 ptr := v.Args[0]
47163 v_1 := v.Args[1]
47164 if v_1.Op != OpAMD64FlagLT_ULT {
47165 break
47166 }
47167 v.reset(OpAMD64MOVBstore)
47168 v.AuxInt = off
47169 v.Aux = sym
47170 v.AddArg(ptr)
47171 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
47172 v0.AuxInt = 0
47173 v.AddArg(v0)
47174 v.AddArg(mem)
47175 return true
47176 }
47177
47178
47179
47180 for {
47181 off := v.AuxInt
47182 sym := v.Aux
47183 mem := v.Args[2]
47184 ptr := v.Args[0]
47185 v_1 := v.Args[1]
47186 if v_1.Op != OpAMD64FlagLT_UGT {
47187 break
47188 }
47189 v.reset(OpAMD64MOVBstore)
47190 v.AuxInt = off
47191 v.Aux = sym
47192 v.AddArg(ptr)
47193 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
47194 v0.AuxInt = 1
47195 v.AddArg(v0)
47196 v.AddArg(mem)
47197 return true
47198 }
47199
47200
47201
47202 for {
47203 off := v.AuxInt
47204 sym := v.Aux
47205 mem := v.Args[2]
47206 ptr := v.Args[0]
47207 v_1 := v.Args[1]
47208 if v_1.Op != OpAMD64FlagGT_ULT {
47209 break
47210 }
47211 v.reset(OpAMD64MOVBstore)
47212 v.AuxInt = off
47213 v.Aux = sym
47214 v.AddArg(ptr)
47215 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
47216 v0.AuxInt = 0
47217 v.AddArg(v0)
47218 v.AddArg(mem)
47219 return true
47220 }
47221
47222
47223
47224 for {
47225 off := v.AuxInt
47226 sym := v.Aux
47227 mem := v.Args[2]
47228 ptr := v.Args[0]
47229 v_1 := v.Args[1]
47230 if v_1.Op != OpAMD64FlagGT_UGT {
47231 break
47232 }
47233 v.reset(OpAMD64MOVBstore)
47234 v.AuxInt = off
47235 v.Aux = sym
47236 v.AddArg(ptr)
47237 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
47238 v0.AuxInt = 1
47239 v.AddArg(v0)
47240 v.AddArg(mem)
47241 return true
47242 }
47243 return false
47244 }
47245 func rewriteValueAMD64_OpAMD64SETAstore_0(v *Value) bool {
47246 b := v.Block
47247 typ := &b.Func.Config.Types
47248
47249
47250
47251 for {
47252 off := v.AuxInt
47253 sym := v.Aux
47254 mem := v.Args[2]
47255 ptr := v.Args[0]
47256 v_1 := v.Args[1]
47257 if v_1.Op != OpAMD64InvertFlags {
47258 break
47259 }
47260 x := v_1.Args[0]
47261 v.reset(OpAMD64SETBstore)
47262 v.AuxInt = off
47263 v.Aux = sym
47264 v.AddArg(ptr)
47265 v.AddArg(x)
47266 v.AddArg(mem)
47267 return true
47268 }
47269
47270
47271
47272 for {
47273 off1 := v.AuxInt
47274 sym := v.Aux
47275 mem := v.Args[2]
47276 v_0 := v.Args[0]
47277 if v_0.Op != OpAMD64ADDQconst {
47278 break
47279 }
47280 off2 := v_0.AuxInt
47281 base := v_0.Args[0]
47282 val := v.Args[1]
47283 if !(is32Bit(off1 + off2)) {
47284 break
47285 }
47286 v.reset(OpAMD64SETAstore)
47287 v.AuxInt = off1 + off2
47288 v.Aux = sym
47289 v.AddArg(base)
47290 v.AddArg(val)
47291 v.AddArg(mem)
47292 return true
47293 }
47294
47295
47296
47297 for {
47298 off1 := v.AuxInt
47299 sym1 := v.Aux
47300 mem := v.Args[2]
47301 v_0 := v.Args[0]
47302 if v_0.Op != OpAMD64LEAQ {
47303 break
47304 }
47305 off2 := v_0.AuxInt
47306 sym2 := v_0.Aux
47307 base := v_0.Args[0]
47308 val := v.Args[1]
47309 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
47310 break
47311 }
47312 v.reset(OpAMD64SETAstore)
47313 v.AuxInt = off1 + off2
47314 v.Aux = mergeSym(sym1, sym2)
47315 v.AddArg(base)
47316 v.AddArg(val)
47317 v.AddArg(mem)
47318 return true
47319 }
47320
47321
47322
47323 for {
47324 off := v.AuxInt
47325 sym := v.Aux
47326 mem := v.Args[2]
47327 ptr := v.Args[0]
47328 v_1 := v.Args[1]
47329 if v_1.Op != OpAMD64FlagEQ {
47330 break
47331 }
47332 v.reset(OpAMD64MOVBstore)
47333 v.AuxInt = off
47334 v.Aux = sym
47335 v.AddArg(ptr)
47336 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
47337 v0.AuxInt = 0
47338 v.AddArg(v0)
47339 v.AddArg(mem)
47340 return true
47341 }
47342
47343
47344
47345 for {
47346 off := v.AuxInt
47347 sym := v.Aux
47348 mem := v.Args[2]
47349 ptr := v.Args[0]
47350 v_1 := v.Args[1]
47351 if v_1.Op != OpAMD64FlagLT_ULT {
47352 break
47353 }
47354 v.reset(OpAMD64MOVBstore)
47355 v.AuxInt = off
47356 v.Aux = sym
47357 v.AddArg(ptr)
47358 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
47359 v0.AuxInt = 0
47360 v.AddArg(v0)
47361 v.AddArg(mem)
47362 return true
47363 }
47364
47365
47366
47367 for {
47368 off := v.AuxInt
47369 sym := v.Aux
47370 mem := v.Args[2]
47371 ptr := v.Args[0]
47372 v_1 := v.Args[1]
47373 if v_1.Op != OpAMD64FlagLT_UGT {
47374 break
47375 }
47376 v.reset(OpAMD64MOVBstore)
47377 v.AuxInt = off
47378 v.Aux = sym
47379 v.AddArg(ptr)
47380 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
47381 v0.AuxInt = 1
47382 v.AddArg(v0)
47383 v.AddArg(mem)
47384 return true
47385 }
47386
47387
47388
47389 for {
47390 off := v.AuxInt
47391 sym := v.Aux
47392 mem := v.Args[2]
47393 ptr := v.Args[0]
47394 v_1 := v.Args[1]
47395 if v_1.Op != OpAMD64FlagGT_ULT {
47396 break
47397 }
47398 v.reset(OpAMD64MOVBstore)
47399 v.AuxInt = off
47400 v.Aux = sym
47401 v.AddArg(ptr)
47402 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
47403 v0.AuxInt = 0
47404 v.AddArg(v0)
47405 v.AddArg(mem)
47406 return true
47407 }
47408
47409
47410
47411 for {
47412 off := v.AuxInt
47413 sym := v.Aux
47414 mem := v.Args[2]
47415 ptr := v.Args[0]
47416 v_1 := v.Args[1]
47417 if v_1.Op != OpAMD64FlagGT_UGT {
47418 break
47419 }
47420 v.reset(OpAMD64MOVBstore)
47421 v.AuxInt = off
47422 v.Aux = sym
47423 v.AddArg(ptr)
47424 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
47425 v0.AuxInt = 1
47426 v.AddArg(v0)
47427 v.AddArg(mem)
47428 return true
47429 }
47430 return false
47431 }
47432 func rewriteValueAMD64_OpAMD64SETB_0(v *Value) bool {
47433
47434
47435
47436 for {
47437 v_0 := v.Args[0]
47438 if v_0.Op != OpAMD64InvertFlags {
47439 break
47440 }
47441 x := v_0.Args[0]
47442 v.reset(OpAMD64SETA)
47443 v.AddArg(x)
47444 return true
47445 }
47446
47447
47448
47449 for {
47450 v_0 := v.Args[0]
47451 if v_0.Op != OpAMD64FlagEQ {
47452 break
47453 }
47454 v.reset(OpAMD64MOVLconst)
47455 v.AuxInt = 0
47456 return true
47457 }
47458
47459
47460
47461 for {
47462 v_0 := v.Args[0]
47463 if v_0.Op != OpAMD64FlagLT_ULT {
47464 break
47465 }
47466 v.reset(OpAMD64MOVLconst)
47467 v.AuxInt = 1
47468 return true
47469 }
47470
47471
47472
47473 for {
47474 v_0 := v.Args[0]
47475 if v_0.Op != OpAMD64FlagLT_UGT {
47476 break
47477 }
47478 v.reset(OpAMD64MOVLconst)
47479 v.AuxInt = 0
47480 return true
47481 }
47482
47483
47484
47485 for {
47486 v_0 := v.Args[0]
47487 if v_0.Op != OpAMD64FlagGT_ULT {
47488 break
47489 }
47490 v.reset(OpAMD64MOVLconst)
47491 v.AuxInt = 1
47492 return true
47493 }
47494
47495
47496
47497 for {
47498 v_0 := v.Args[0]
47499 if v_0.Op != OpAMD64FlagGT_UGT {
47500 break
47501 }
47502 v.reset(OpAMD64MOVLconst)
47503 v.AuxInt = 0
47504 return true
47505 }
47506 return false
47507 }
47508 func rewriteValueAMD64_OpAMD64SETBE_0(v *Value) bool {
47509
47510
47511
47512 for {
47513 v_0 := v.Args[0]
47514 if v_0.Op != OpAMD64InvertFlags {
47515 break
47516 }
47517 x := v_0.Args[0]
47518 v.reset(OpAMD64SETAE)
47519 v.AddArg(x)
47520 return true
47521 }
47522
47523
47524
47525 for {
47526 v_0 := v.Args[0]
47527 if v_0.Op != OpAMD64FlagEQ {
47528 break
47529 }
47530 v.reset(OpAMD64MOVLconst)
47531 v.AuxInt = 1
47532 return true
47533 }
47534
47535
47536
47537 for {
47538 v_0 := v.Args[0]
47539 if v_0.Op != OpAMD64FlagLT_ULT {
47540 break
47541 }
47542 v.reset(OpAMD64MOVLconst)
47543 v.AuxInt = 1
47544 return true
47545 }
47546
47547
47548
47549 for {
47550 v_0 := v.Args[0]
47551 if v_0.Op != OpAMD64FlagLT_UGT {
47552 break
47553 }
47554 v.reset(OpAMD64MOVLconst)
47555 v.AuxInt = 0
47556 return true
47557 }
47558
47559
47560
47561 for {
47562 v_0 := v.Args[0]
47563 if v_0.Op != OpAMD64FlagGT_ULT {
47564 break
47565 }
47566 v.reset(OpAMD64MOVLconst)
47567 v.AuxInt = 1
47568 return true
47569 }
47570
47571
47572
47573 for {
47574 v_0 := v.Args[0]
47575 if v_0.Op != OpAMD64FlagGT_UGT {
47576 break
47577 }
47578 v.reset(OpAMD64MOVLconst)
47579 v.AuxInt = 0
47580 return true
47581 }
47582 return false
47583 }
47584 func rewriteValueAMD64_OpAMD64SETBEstore_0(v *Value) bool {
47585 b := v.Block
47586 typ := &b.Func.Config.Types
47587
47588
47589
47590 for {
47591 off := v.AuxInt
47592 sym := v.Aux
47593 mem := v.Args[2]
47594 ptr := v.Args[0]
47595 v_1 := v.Args[1]
47596 if v_1.Op != OpAMD64InvertFlags {
47597 break
47598 }
47599 x := v_1.Args[0]
47600 v.reset(OpAMD64SETAEstore)
47601 v.AuxInt = off
47602 v.Aux = sym
47603 v.AddArg(ptr)
47604 v.AddArg(x)
47605 v.AddArg(mem)
47606 return true
47607 }
47608
47609
47610
47611 for {
47612 off1 := v.AuxInt
47613 sym := v.Aux
47614 mem := v.Args[2]
47615 v_0 := v.Args[0]
47616 if v_0.Op != OpAMD64ADDQconst {
47617 break
47618 }
47619 off2 := v_0.AuxInt
47620 base := v_0.Args[0]
47621 val := v.Args[1]
47622 if !(is32Bit(off1 + off2)) {
47623 break
47624 }
47625 v.reset(OpAMD64SETBEstore)
47626 v.AuxInt = off1 + off2
47627 v.Aux = sym
47628 v.AddArg(base)
47629 v.AddArg(val)
47630 v.AddArg(mem)
47631 return true
47632 }
47633
47634
47635
47636 for {
47637 off1 := v.AuxInt
47638 sym1 := v.Aux
47639 mem := v.Args[2]
47640 v_0 := v.Args[0]
47641 if v_0.Op != OpAMD64LEAQ {
47642 break
47643 }
47644 off2 := v_0.AuxInt
47645 sym2 := v_0.Aux
47646 base := v_0.Args[0]
47647 val := v.Args[1]
47648 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
47649 break
47650 }
47651 v.reset(OpAMD64SETBEstore)
47652 v.AuxInt = off1 + off2
47653 v.Aux = mergeSym(sym1, sym2)
47654 v.AddArg(base)
47655 v.AddArg(val)
47656 v.AddArg(mem)
47657 return true
47658 }
47659
47660
47661
47662 for {
47663 off := v.AuxInt
47664 sym := v.Aux
47665 mem := v.Args[2]
47666 ptr := v.Args[0]
47667 v_1 := v.Args[1]
47668 if v_1.Op != OpAMD64FlagEQ {
47669 break
47670 }
47671 v.reset(OpAMD64MOVBstore)
47672 v.AuxInt = off
47673 v.Aux = sym
47674 v.AddArg(ptr)
47675 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
47676 v0.AuxInt = 1
47677 v.AddArg(v0)
47678 v.AddArg(mem)
47679 return true
47680 }
47681
47682
47683
47684 for {
47685 off := v.AuxInt
47686 sym := v.Aux
47687 mem := v.Args[2]
47688 ptr := v.Args[0]
47689 v_1 := v.Args[1]
47690 if v_1.Op != OpAMD64FlagLT_ULT {
47691 break
47692 }
47693 v.reset(OpAMD64MOVBstore)
47694 v.AuxInt = off
47695 v.Aux = sym
47696 v.AddArg(ptr)
47697 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
47698 v0.AuxInt = 1
47699 v.AddArg(v0)
47700 v.AddArg(mem)
47701 return true
47702 }
47703
47704
47705
47706 for {
47707 off := v.AuxInt
47708 sym := v.Aux
47709 mem := v.Args[2]
47710 ptr := v.Args[0]
47711 v_1 := v.Args[1]
47712 if v_1.Op != OpAMD64FlagLT_UGT {
47713 break
47714 }
47715 v.reset(OpAMD64MOVBstore)
47716 v.AuxInt = off
47717 v.Aux = sym
47718 v.AddArg(ptr)
47719 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
47720 v0.AuxInt = 0
47721 v.AddArg(v0)
47722 v.AddArg(mem)
47723 return true
47724 }
47725
47726
47727
47728 for {
47729 off := v.AuxInt
47730 sym := v.Aux
47731 mem := v.Args[2]
47732 ptr := v.Args[0]
47733 v_1 := v.Args[1]
47734 if v_1.Op != OpAMD64FlagGT_ULT {
47735 break
47736 }
47737 v.reset(OpAMD64MOVBstore)
47738 v.AuxInt = off
47739 v.Aux = sym
47740 v.AddArg(ptr)
47741 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
47742 v0.AuxInt = 1
47743 v.AddArg(v0)
47744 v.AddArg(mem)
47745 return true
47746 }
47747
47748
47749
47750 for {
47751 off := v.AuxInt
47752 sym := v.Aux
47753 mem := v.Args[2]
47754 ptr := v.Args[0]
47755 v_1 := v.Args[1]
47756 if v_1.Op != OpAMD64FlagGT_UGT {
47757 break
47758 }
47759 v.reset(OpAMD64MOVBstore)
47760 v.AuxInt = off
47761 v.Aux = sym
47762 v.AddArg(ptr)
47763 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
47764 v0.AuxInt = 0
47765 v.AddArg(v0)
47766 v.AddArg(mem)
47767 return true
47768 }
47769 return false
47770 }
47771 func rewriteValueAMD64_OpAMD64SETBstore_0(v *Value) bool {
47772 b := v.Block
47773 typ := &b.Func.Config.Types
47774
47775
47776
47777 for {
47778 off := v.AuxInt
47779 sym := v.Aux
47780 mem := v.Args[2]
47781 ptr := v.Args[0]
47782 v_1 := v.Args[1]
47783 if v_1.Op != OpAMD64InvertFlags {
47784 break
47785 }
47786 x := v_1.Args[0]
47787 v.reset(OpAMD64SETAstore)
47788 v.AuxInt = off
47789 v.Aux = sym
47790 v.AddArg(ptr)
47791 v.AddArg(x)
47792 v.AddArg(mem)
47793 return true
47794 }
47795
47796
47797
47798 for {
47799 off1 := v.AuxInt
47800 sym := v.Aux
47801 mem := v.Args[2]
47802 v_0 := v.Args[0]
47803 if v_0.Op != OpAMD64ADDQconst {
47804 break
47805 }
47806 off2 := v_0.AuxInt
47807 base := v_0.Args[0]
47808 val := v.Args[1]
47809 if !(is32Bit(off1 + off2)) {
47810 break
47811 }
47812 v.reset(OpAMD64SETBstore)
47813 v.AuxInt = off1 + off2
47814 v.Aux = sym
47815 v.AddArg(base)
47816 v.AddArg(val)
47817 v.AddArg(mem)
47818 return true
47819 }
47820
47821
47822
47823 for {
47824 off1 := v.AuxInt
47825 sym1 := v.Aux
47826 mem := v.Args[2]
47827 v_0 := v.Args[0]
47828 if v_0.Op != OpAMD64LEAQ {
47829 break
47830 }
47831 off2 := v_0.AuxInt
47832 sym2 := v_0.Aux
47833 base := v_0.Args[0]
47834 val := v.Args[1]
47835 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
47836 break
47837 }
47838 v.reset(OpAMD64SETBstore)
47839 v.AuxInt = off1 + off2
47840 v.Aux = mergeSym(sym1, sym2)
47841 v.AddArg(base)
47842 v.AddArg(val)
47843 v.AddArg(mem)
47844 return true
47845 }
47846
47847
47848
47849 for {
47850 off := v.AuxInt
47851 sym := v.Aux
47852 mem := v.Args[2]
47853 ptr := v.Args[0]
47854 v_1 := v.Args[1]
47855 if v_1.Op != OpAMD64FlagEQ {
47856 break
47857 }
47858 v.reset(OpAMD64MOVBstore)
47859 v.AuxInt = off
47860 v.Aux = sym
47861 v.AddArg(ptr)
47862 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
47863 v0.AuxInt = 0
47864 v.AddArg(v0)
47865 v.AddArg(mem)
47866 return true
47867 }
47868
47869
47870
47871 for {
47872 off := v.AuxInt
47873 sym := v.Aux
47874 mem := v.Args[2]
47875 ptr := v.Args[0]
47876 v_1 := v.Args[1]
47877 if v_1.Op != OpAMD64FlagLT_ULT {
47878 break
47879 }
47880 v.reset(OpAMD64MOVBstore)
47881 v.AuxInt = off
47882 v.Aux = sym
47883 v.AddArg(ptr)
47884 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
47885 v0.AuxInt = 1
47886 v.AddArg(v0)
47887 v.AddArg(mem)
47888 return true
47889 }
47890
47891
47892
47893 for {
47894 off := v.AuxInt
47895 sym := v.Aux
47896 mem := v.Args[2]
47897 ptr := v.Args[0]
47898 v_1 := v.Args[1]
47899 if v_1.Op != OpAMD64FlagLT_UGT {
47900 break
47901 }
47902 v.reset(OpAMD64MOVBstore)
47903 v.AuxInt = off
47904 v.Aux = sym
47905 v.AddArg(ptr)
47906 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
47907 v0.AuxInt = 0
47908 v.AddArg(v0)
47909 v.AddArg(mem)
47910 return true
47911 }
47912
47913
47914
47915 for {
47916 off := v.AuxInt
47917 sym := v.Aux
47918 mem := v.Args[2]
47919 ptr := v.Args[0]
47920 v_1 := v.Args[1]
47921 if v_1.Op != OpAMD64FlagGT_ULT {
47922 break
47923 }
47924 v.reset(OpAMD64MOVBstore)
47925 v.AuxInt = off
47926 v.Aux = sym
47927 v.AddArg(ptr)
47928 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
47929 v0.AuxInt = 1
47930 v.AddArg(v0)
47931 v.AddArg(mem)
47932 return true
47933 }
47934
47935
47936
47937 for {
47938 off := v.AuxInt
47939 sym := v.Aux
47940 mem := v.Args[2]
47941 ptr := v.Args[0]
47942 v_1 := v.Args[1]
47943 if v_1.Op != OpAMD64FlagGT_UGT {
47944 break
47945 }
47946 v.reset(OpAMD64MOVBstore)
47947 v.AuxInt = off
47948 v.Aux = sym
47949 v.AddArg(ptr)
47950 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
47951 v0.AuxInt = 0
47952 v.AddArg(v0)
47953 v.AddArg(mem)
47954 return true
47955 }
47956 return false
47957 }
47958 func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool {
47959 b := v.Block
47960 config := b.Func.Config
47961
47962
47963
47964 for {
47965 v_0 := v.Args[0]
47966 if v_0.Op != OpAMD64TESTL {
47967 break
47968 }
47969 y := v_0.Args[1]
47970 v_0_0 := v_0.Args[0]
47971 if v_0_0.Op != OpAMD64SHLL {
47972 break
47973 }
47974 x := v_0_0.Args[1]
47975 v_0_0_0 := v_0_0.Args[0]
47976 if v_0_0_0.Op != OpAMD64MOVLconst {
47977 break
47978 }
47979 if v_0_0_0.AuxInt != 1 {
47980 break
47981 }
47982 if !(!config.nacl) {
47983 break
47984 }
47985 v.reset(OpAMD64SETAE)
47986 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
47987 v0.AddArg(x)
47988 v0.AddArg(y)
47989 v.AddArg(v0)
47990 return true
47991 }
47992
47993
47994
47995 for {
47996 v_0 := v.Args[0]
47997 if v_0.Op != OpAMD64TESTL {
47998 break
47999 }
48000 _ = v_0.Args[1]
48001 y := v_0.Args[0]
48002 v_0_1 := v_0.Args[1]
48003 if v_0_1.Op != OpAMD64SHLL {
48004 break
48005 }
48006 x := v_0_1.Args[1]
48007 v_0_1_0 := v_0_1.Args[0]
48008 if v_0_1_0.Op != OpAMD64MOVLconst {
48009 break
48010 }
48011 if v_0_1_0.AuxInt != 1 {
48012 break
48013 }
48014 if !(!config.nacl) {
48015 break
48016 }
48017 v.reset(OpAMD64SETAE)
48018 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
48019 v0.AddArg(x)
48020 v0.AddArg(y)
48021 v.AddArg(v0)
48022 return true
48023 }
48024
48025
48026
48027 for {
48028 v_0 := v.Args[0]
48029 if v_0.Op != OpAMD64TESTQ {
48030 break
48031 }
48032 y := v_0.Args[1]
48033 v_0_0 := v_0.Args[0]
48034 if v_0_0.Op != OpAMD64SHLQ {
48035 break
48036 }
48037 x := v_0_0.Args[1]
48038 v_0_0_0 := v_0_0.Args[0]
48039 if v_0_0_0.Op != OpAMD64MOVQconst {
48040 break
48041 }
48042 if v_0_0_0.AuxInt != 1 {
48043 break
48044 }
48045 if !(!config.nacl) {
48046 break
48047 }
48048 v.reset(OpAMD64SETAE)
48049 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
48050 v0.AddArg(x)
48051 v0.AddArg(y)
48052 v.AddArg(v0)
48053 return true
48054 }
48055
48056
48057
48058 for {
48059 v_0 := v.Args[0]
48060 if v_0.Op != OpAMD64TESTQ {
48061 break
48062 }
48063 _ = v_0.Args[1]
48064 y := v_0.Args[0]
48065 v_0_1 := v_0.Args[1]
48066 if v_0_1.Op != OpAMD64SHLQ {
48067 break
48068 }
48069 x := v_0_1.Args[1]
48070 v_0_1_0 := v_0_1.Args[0]
48071 if v_0_1_0.Op != OpAMD64MOVQconst {
48072 break
48073 }
48074 if v_0_1_0.AuxInt != 1 {
48075 break
48076 }
48077 if !(!config.nacl) {
48078 break
48079 }
48080 v.reset(OpAMD64SETAE)
48081 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
48082 v0.AddArg(x)
48083 v0.AddArg(y)
48084 v.AddArg(v0)
48085 return true
48086 }
48087
48088
48089
48090 for {
48091 v_0 := v.Args[0]
48092 if v_0.Op != OpAMD64TESTLconst {
48093 break
48094 }
48095 c := v_0.AuxInt
48096 x := v_0.Args[0]
48097 if !(isUint32PowerOfTwo(c) && !config.nacl) {
48098 break
48099 }
48100 v.reset(OpAMD64SETAE)
48101 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
48102 v0.AuxInt = log2uint32(c)
48103 v0.AddArg(x)
48104 v.AddArg(v0)
48105 return true
48106 }
48107
48108
48109
48110 for {
48111 v_0 := v.Args[0]
48112 if v_0.Op != OpAMD64TESTQconst {
48113 break
48114 }
48115 c := v_0.AuxInt
48116 x := v_0.Args[0]
48117 if !(isUint64PowerOfTwo(c) && !config.nacl) {
48118 break
48119 }
48120 v.reset(OpAMD64SETAE)
48121 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
48122 v0.AuxInt = log2(c)
48123 v0.AddArg(x)
48124 v.AddArg(v0)
48125 return true
48126 }
48127
48128
48129
48130 for {
48131 v_0 := v.Args[0]
48132 if v_0.Op != OpAMD64TESTQ {
48133 break
48134 }
48135 x := v_0.Args[1]
48136 v_0_0 := v_0.Args[0]
48137 if v_0_0.Op != OpAMD64MOVQconst {
48138 break
48139 }
48140 c := v_0_0.AuxInt
48141 if !(isUint64PowerOfTwo(c) && !config.nacl) {
48142 break
48143 }
48144 v.reset(OpAMD64SETAE)
48145 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
48146 v0.AuxInt = log2(c)
48147 v0.AddArg(x)
48148 v.AddArg(v0)
48149 return true
48150 }
48151
48152
48153
48154 for {
48155 v_0 := v.Args[0]
48156 if v_0.Op != OpAMD64TESTQ {
48157 break
48158 }
48159 _ = v_0.Args[1]
48160 x := v_0.Args[0]
48161 v_0_1 := v_0.Args[1]
48162 if v_0_1.Op != OpAMD64MOVQconst {
48163 break
48164 }
48165 c := v_0_1.AuxInt
48166 if !(isUint64PowerOfTwo(c) && !config.nacl) {
48167 break
48168 }
48169 v.reset(OpAMD64SETAE)
48170 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
48171 v0.AuxInt = log2(c)
48172 v0.AddArg(x)
48173 v.AddArg(v0)
48174 return true
48175 }
48176
48177
48178
48179 for {
48180 v_0 := v.Args[0]
48181 if v_0.Op != OpAMD64CMPLconst {
48182 break
48183 }
48184 if v_0.AuxInt != 1 {
48185 break
48186 }
48187 s := v_0.Args[0]
48188 if s.Op != OpAMD64ANDLconst {
48189 break
48190 }
48191 if s.AuxInt != 1 {
48192 break
48193 }
48194 v.reset(OpAMD64SETNE)
48195 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
48196 v0.AuxInt = 0
48197 v0.AddArg(s)
48198 v.AddArg(v0)
48199 return true
48200 }
48201
48202
48203
48204 for {
48205 v_0 := v.Args[0]
48206 if v_0.Op != OpAMD64CMPQconst {
48207 break
48208 }
48209 if v_0.AuxInt != 1 {
48210 break
48211 }
48212 s := v_0.Args[0]
48213 if s.Op != OpAMD64ANDQconst {
48214 break
48215 }
48216 if s.AuxInt != 1 {
48217 break
48218 }
48219 v.reset(OpAMD64SETNE)
48220 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
48221 v0.AuxInt = 0
48222 v0.AddArg(s)
48223 v.AddArg(v0)
48224 return true
48225 }
48226 return false
48227 }
48228 func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool {
48229 b := v.Block
48230 config := b.Func.Config
48231
48232
48233
48234 for {
48235 v_0 := v.Args[0]
48236 if v_0.Op != OpAMD64TESTQ {
48237 break
48238 }
48239 z2 := v_0.Args[1]
48240 z1 := v_0.Args[0]
48241 if z1.Op != OpAMD64SHLQconst {
48242 break
48243 }
48244 if z1.AuxInt != 63 {
48245 break
48246 }
48247 z1_0 := z1.Args[0]
48248 if z1_0.Op != OpAMD64SHRQconst {
48249 break
48250 }
48251 if z1_0.AuxInt != 63 {
48252 break
48253 }
48254 x := z1_0.Args[0]
48255 if !(z1 == z2 && !config.nacl) {
48256 break
48257 }
48258 v.reset(OpAMD64SETAE)
48259 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
48260 v0.AuxInt = 63
48261 v0.AddArg(x)
48262 v.AddArg(v0)
48263 return true
48264 }
48265
48266
48267
48268 for {
48269 v_0 := v.Args[0]
48270 if v_0.Op != OpAMD64TESTQ {
48271 break
48272 }
48273 _ = v_0.Args[1]
48274 z2 := v_0.Args[0]
48275 z1 := v_0.Args[1]
48276 if z1.Op != OpAMD64SHLQconst {
48277 break
48278 }
48279 if z1.AuxInt != 63 {
48280 break
48281 }
48282 z1_0 := z1.Args[0]
48283 if z1_0.Op != OpAMD64SHRQconst {
48284 break
48285 }
48286 if z1_0.AuxInt != 63 {
48287 break
48288 }
48289 x := z1_0.Args[0]
48290 if !(z1 == z2 && !config.nacl) {
48291 break
48292 }
48293 v.reset(OpAMD64SETAE)
48294 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
48295 v0.AuxInt = 63
48296 v0.AddArg(x)
48297 v.AddArg(v0)
48298 return true
48299 }
48300
48301
48302
48303 for {
48304 v_0 := v.Args[0]
48305 if v_0.Op != OpAMD64TESTL {
48306 break
48307 }
48308 z2 := v_0.Args[1]
48309 z1 := v_0.Args[0]
48310 if z1.Op != OpAMD64SHLLconst {
48311 break
48312 }
48313 if z1.AuxInt != 31 {
48314 break
48315 }
48316 z1_0 := z1.Args[0]
48317 if z1_0.Op != OpAMD64SHRQconst {
48318 break
48319 }
48320 if z1_0.AuxInt != 31 {
48321 break
48322 }
48323 x := z1_0.Args[0]
48324 if !(z1 == z2 && !config.nacl) {
48325 break
48326 }
48327 v.reset(OpAMD64SETAE)
48328 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
48329 v0.AuxInt = 31
48330 v0.AddArg(x)
48331 v.AddArg(v0)
48332 return true
48333 }
48334
48335
48336
48337 for {
48338 v_0 := v.Args[0]
48339 if v_0.Op != OpAMD64TESTL {
48340 break
48341 }
48342 _ = v_0.Args[1]
48343 z2 := v_0.Args[0]
48344 z1 := v_0.Args[1]
48345 if z1.Op != OpAMD64SHLLconst {
48346 break
48347 }
48348 if z1.AuxInt != 31 {
48349 break
48350 }
48351 z1_0 := z1.Args[0]
48352 if z1_0.Op != OpAMD64SHRQconst {
48353 break
48354 }
48355 if z1_0.AuxInt != 31 {
48356 break
48357 }
48358 x := z1_0.Args[0]
48359 if !(z1 == z2 && !config.nacl) {
48360 break
48361 }
48362 v.reset(OpAMD64SETAE)
48363 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
48364 v0.AuxInt = 31
48365 v0.AddArg(x)
48366 v.AddArg(v0)
48367 return true
48368 }
48369
48370
48371
48372 for {
48373 v_0 := v.Args[0]
48374 if v_0.Op != OpAMD64TESTQ {
48375 break
48376 }
48377 z2 := v_0.Args[1]
48378 z1 := v_0.Args[0]
48379 if z1.Op != OpAMD64SHRQconst {
48380 break
48381 }
48382 if z1.AuxInt != 63 {
48383 break
48384 }
48385 z1_0 := z1.Args[0]
48386 if z1_0.Op != OpAMD64SHLQconst {
48387 break
48388 }
48389 if z1_0.AuxInt != 63 {
48390 break
48391 }
48392 x := z1_0.Args[0]
48393 if !(z1 == z2 && !config.nacl) {
48394 break
48395 }
48396 v.reset(OpAMD64SETAE)
48397 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
48398 v0.AuxInt = 0
48399 v0.AddArg(x)
48400 v.AddArg(v0)
48401 return true
48402 }
48403
48404
48405
48406 for {
48407 v_0 := v.Args[0]
48408 if v_0.Op != OpAMD64TESTQ {
48409 break
48410 }
48411 _ = v_0.Args[1]
48412 z2 := v_0.Args[0]
48413 z1 := v_0.Args[1]
48414 if z1.Op != OpAMD64SHRQconst {
48415 break
48416 }
48417 if z1.AuxInt != 63 {
48418 break
48419 }
48420 z1_0 := z1.Args[0]
48421 if z1_0.Op != OpAMD64SHLQconst {
48422 break
48423 }
48424 if z1_0.AuxInt != 63 {
48425 break
48426 }
48427 x := z1_0.Args[0]
48428 if !(z1 == z2 && !config.nacl) {
48429 break
48430 }
48431 v.reset(OpAMD64SETAE)
48432 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
48433 v0.AuxInt = 0
48434 v0.AddArg(x)
48435 v.AddArg(v0)
48436 return true
48437 }
48438
48439
48440
48441 for {
48442 v_0 := v.Args[0]
48443 if v_0.Op != OpAMD64TESTL {
48444 break
48445 }
48446 z2 := v_0.Args[1]
48447 z1 := v_0.Args[0]
48448 if z1.Op != OpAMD64SHRLconst {
48449 break
48450 }
48451 if z1.AuxInt != 31 {
48452 break
48453 }
48454 z1_0 := z1.Args[0]
48455 if z1_0.Op != OpAMD64SHLLconst {
48456 break
48457 }
48458 if z1_0.AuxInt != 31 {
48459 break
48460 }
48461 x := z1_0.Args[0]
48462 if !(z1 == z2 && !config.nacl) {
48463 break
48464 }
48465 v.reset(OpAMD64SETAE)
48466 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
48467 v0.AuxInt = 0
48468 v0.AddArg(x)
48469 v.AddArg(v0)
48470 return true
48471 }
48472
48473
48474
48475 for {
48476 v_0 := v.Args[0]
48477 if v_0.Op != OpAMD64TESTL {
48478 break
48479 }
48480 _ = v_0.Args[1]
48481 z2 := v_0.Args[0]
48482 z1 := v_0.Args[1]
48483 if z1.Op != OpAMD64SHRLconst {
48484 break
48485 }
48486 if z1.AuxInt != 31 {
48487 break
48488 }
48489 z1_0 := z1.Args[0]
48490 if z1_0.Op != OpAMD64SHLLconst {
48491 break
48492 }
48493 if z1_0.AuxInt != 31 {
48494 break
48495 }
48496 x := z1_0.Args[0]
48497 if !(z1 == z2 && !config.nacl) {
48498 break
48499 }
48500 v.reset(OpAMD64SETAE)
48501 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
48502 v0.AuxInt = 0
48503 v0.AddArg(x)
48504 v.AddArg(v0)
48505 return true
48506 }
48507
48508
48509
48510 for {
48511 v_0 := v.Args[0]
48512 if v_0.Op != OpAMD64TESTQ {
48513 break
48514 }
48515 z2 := v_0.Args[1]
48516 z1 := v_0.Args[0]
48517 if z1.Op != OpAMD64SHRQconst {
48518 break
48519 }
48520 if z1.AuxInt != 63 {
48521 break
48522 }
48523 x := z1.Args[0]
48524 if !(z1 == z2 && !config.nacl) {
48525 break
48526 }
48527 v.reset(OpAMD64SETAE)
48528 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
48529 v0.AuxInt = 63
48530 v0.AddArg(x)
48531 v.AddArg(v0)
48532 return true
48533 }
48534
48535
48536
48537 for {
48538 v_0 := v.Args[0]
48539 if v_0.Op != OpAMD64TESTQ {
48540 break
48541 }
48542 _ = v_0.Args[1]
48543 z2 := v_0.Args[0]
48544 z1 := v_0.Args[1]
48545 if z1.Op != OpAMD64SHRQconst {
48546 break
48547 }
48548 if z1.AuxInt != 63 {
48549 break
48550 }
48551 x := z1.Args[0]
48552 if !(z1 == z2 && !config.nacl) {
48553 break
48554 }
48555 v.reset(OpAMD64SETAE)
48556 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
48557 v0.AuxInt = 63
48558 v0.AddArg(x)
48559 v.AddArg(v0)
48560 return true
48561 }
48562 return false
48563 }
48564 func rewriteValueAMD64_OpAMD64SETEQ_20(v *Value) bool {
48565 b := v.Block
48566 config := b.Func.Config
48567
48568
48569
48570 for {
48571 v_0 := v.Args[0]
48572 if v_0.Op != OpAMD64TESTL {
48573 break
48574 }
48575 z2 := v_0.Args[1]
48576 z1 := v_0.Args[0]
48577 if z1.Op != OpAMD64SHRLconst {
48578 break
48579 }
48580 if z1.AuxInt != 31 {
48581 break
48582 }
48583 x := z1.Args[0]
48584 if !(z1 == z2 && !config.nacl) {
48585 break
48586 }
48587 v.reset(OpAMD64SETAE)
48588 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
48589 v0.AuxInt = 31
48590 v0.AddArg(x)
48591 v.AddArg(v0)
48592 return true
48593 }
48594
48595
48596
48597 for {
48598 v_0 := v.Args[0]
48599 if v_0.Op != OpAMD64TESTL {
48600 break
48601 }
48602 _ = v_0.Args[1]
48603 z2 := v_0.Args[0]
48604 z1 := v_0.Args[1]
48605 if z1.Op != OpAMD64SHRLconst {
48606 break
48607 }
48608 if z1.AuxInt != 31 {
48609 break
48610 }
48611 x := z1.Args[0]
48612 if !(z1 == z2 && !config.nacl) {
48613 break
48614 }
48615 v.reset(OpAMD64SETAE)
48616 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
48617 v0.AuxInt = 31
48618 v0.AddArg(x)
48619 v.AddArg(v0)
48620 return true
48621 }
48622
48623
48624
48625 for {
48626 v_0 := v.Args[0]
48627 if v_0.Op != OpAMD64InvertFlags {
48628 break
48629 }
48630 x := v_0.Args[0]
48631 v.reset(OpAMD64SETEQ)
48632 v.AddArg(x)
48633 return true
48634 }
48635
48636
48637
48638 for {
48639 v_0 := v.Args[0]
48640 if v_0.Op != OpAMD64FlagEQ {
48641 break
48642 }
48643 v.reset(OpAMD64MOVLconst)
48644 v.AuxInt = 1
48645 return true
48646 }
48647
48648
48649
48650 for {
48651 v_0 := v.Args[0]
48652 if v_0.Op != OpAMD64FlagLT_ULT {
48653 break
48654 }
48655 v.reset(OpAMD64MOVLconst)
48656 v.AuxInt = 0
48657 return true
48658 }
48659
48660
48661
48662 for {
48663 v_0 := v.Args[0]
48664 if v_0.Op != OpAMD64FlagLT_UGT {
48665 break
48666 }
48667 v.reset(OpAMD64MOVLconst)
48668 v.AuxInt = 0
48669 return true
48670 }
48671
48672
48673
48674 for {
48675 v_0 := v.Args[0]
48676 if v_0.Op != OpAMD64FlagGT_ULT {
48677 break
48678 }
48679 v.reset(OpAMD64MOVLconst)
48680 v.AuxInt = 0
48681 return true
48682 }
48683
48684
48685
48686 for {
48687 v_0 := v.Args[0]
48688 if v_0.Op != OpAMD64FlagGT_UGT {
48689 break
48690 }
48691 v.reset(OpAMD64MOVLconst)
48692 v.AuxInt = 0
48693 return true
48694 }
48695 return false
48696 }
48697 func rewriteValueAMD64_OpAMD64SETEQstore_0(v *Value) bool {
48698 b := v.Block
48699 config := b.Func.Config
48700
48701
48702
48703 for {
48704 off := v.AuxInt
48705 sym := v.Aux
48706 mem := v.Args[2]
48707 ptr := v.Args[0]
48708 v_1 := v.Args[1]
48709 if v_1.Op != OpAMD64TESTL {
48710 break
48711 }
48712 y := v_1.Args[1]
48713 v_1_0 := v_1.Args[0]
48714 if v_1_0.Op != OpAMD64SHLL {
48715 break
48716 }
48717 x := v_1_0.Args[1]
48718 v_1_0_0 := v_1_0.Args[0]
48719 if v_1_0_0.Op != OpAMD64MOVLconst {
48720 break
48721 }
48722 if v_1_0_0.AuxInt != 1 {
48723 break
48724 }
48725 if !(!config.nacl) {
48726 break
48727 }
48728 v.reset(OpAMD64SETAEstore)
48729 v.AuxInt = off
48730 v.Aux = sym
48731 v.AddArg(ptr)
48732 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
48733 v0.AddArg(x)
48734 v0.AddArg(y)
48735 v.AddArg(v0)
48736 v.AddArg(mem)
48737 return true
48738 }
48739
48740
48741
48742 for {
48743 off := v.AuxInt
48744 sym := v.Aux
48745 mem := v.Args[2]
48746 ptr := v.Args[0]
48747 v_1 := v.Args[1]
48748 if v_1.Op != OpAMD64TESTL {
48749 break
48750 }
48751 _ = v_1.Args[1]
48752 y := v_1.Args[0]
48753 v_1_1 := v_1.Args[1]
48754 if v_1_1.Op != OpAMD64SHLL {
48755 break
48756 }
48757 x := v_1_1.Args[1]
48758 v_1_1_0 := v_1_1.Args[0]
48759 if v_1_1_0.Op != OpAMD64MOVLconst {
48760 break
48761 }
48762 if v_1_1_0.AuxInt != 1 {
48763 break
48764 }
48765 if !(!config.nacl) {
48766 break
48767 }
48768 v.reset(OpAMD64SETAEstore)
48769 v.AuxInt = off
48770 v.Aux = sym
48771 v.AddArg(ptr)
48772 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
48773 v0.AddArg(x)
48774 v0.AddArg(y)
48775 v.AddArg(v0)
48776 v.AddArg(mem)
48777 return true
48778 }
48779
48780
48781
48782 for {
48783 off := v.AuxInt
48784 sym := v.Aux
48785 mem := v.Args[2]
48786 ptr := v.Args[0]
48787 v_1 := v.Args[1]
48788 if v_1.Op != OpAMD64TESTQ {
48789 break
48790 }
48791 y := v_1.Args[1]
48792 v_1_0 := v_1.Args[0]
48793 if v_1_0.Op != OpAMD64SHLQ {
48794 break
48795 }
48796 x := v_1_0.Args[1]
48797 v_1_0_0 := v_1_0.Args[0]
48798 if v_1_0_0.Op != OpAMD64MOVQconst {
48799 break
48800 }
48801 if v_1_0_0.AuxInt != 1 {
48802 break
48803 }
48804 if !(!config.nacl) {
48805 break
48806 }
48807 v.reset(OpAMD64SETAEstore)
48808 v.AuxInt = off
48809 v.Aux = sym
48810 v.AddArg(ptr)
48811 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
48812 v0.AddArg(x)
48813 v0.AddArg(y)
48814 v.AddArg(v0)
48815 v.AddArg(mem)
48816 return true
48817 }
48818
48819
48820
48821 for {
48822 off := v.AuxInt
48823 sym := v.Aux
48824 mem := v.Args[2]
48825 ptr := v.Args[0]
48826 v_1 := v.Args[1]
48827 if v_1.Op != OpAMD64TESTQ {
48828 break
48829 }
48830 _ = v_1.Args[1]
48831 y := v_1.Args[0]
48832 v_1_1 := v_1.Args[1]
48833 if v_1_1.Op != OpAMD64SHLQ {
48834 break
48835 }
48836 x := v_1_1.Args[1]
48837 v_1_1_0 := v_1_1.Args[0]
48838 if v_1_1_0.Op != OpAMD64MOVQconst {
48839 break
48840 }
48841 if v_1_1_0.AuxInt != 1 {
48842 break
48843 }
48844 if !(!config.nacl) {
48845 break
48846 }
48847 v.reset(OpAMD64SETAEstore)
48848 v.AuxInt = off
48849 v.Aux = sym
48850 v.AddArg(ptr)
48851 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
48852 v0.AddArg(x)
48853 v0.AddArg(y)
48854 v.AddArg(v0)
48855 v.AddArg(mem)
48856 return true
48857 }
48858
48859
48860
48861 for {
48862 off := v.AuxInt
48863 sym := v.Aux
48864 mem := v.Args[2]
48865 ptr := v.Args[0]
48866 v_1 := v.Args[1]
48867 if v_1.Op != OpAMD64TESTLconst {
48868 break
48869 }
48870 c := v_1.AuxInt
48871 x := v_1.Args[0]
48872 if !(isUint32PowerOfTwo(c) && !config.nacl) {
48873 break
48874 }
48875 v.reset(OpAMD64SETAEstore)
48876 v.AuxInt = off
48877 v.Aux = sym
48878 v.AddArg(ptr)
48879 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
48880 v0.AuxInt = log2uint32(c)
48881 v0.AddArg(x)
48882 v.AddArg(v0)
48883 v.AddArg(mem)
48884 return true
48885 }
48886
48887
48888
48889 for {
48890 off := v.AuxInt
48891 sym := v.Aux
48892 mem := v.Args[2]
48893 ptr := v.Args[0]
48894 v_1 := v.Args[1]
48895 if v_1.Op != OpAMD64TESTQconst {
48896 break
48897 }
48898 c := v_1.AuxInt
48899 x := v_1.Args[0]
48900 if !(isUint64PowerOfTwo(c) && !config.nacl) {
48901 break
48902 }
48903 v.reset(OpAMD64SETAEstore)
48904 v.AuxInt = off
48905 v.Aux = sym
48906 v.AddArg(ptr)
48907 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
48908 v0.AuxInt = log2(c)
48909 v0.AddArg(x)
48910 v.AddArg(v0)
48911 v.AddArg(mem)
48912 return true
48913 }
48914
48915
48916
48917 for {
48918 off := v.AuxInt
48919 sym := v.Aux
48920 mem := v.Args[2]
48921 ptr := v.Args[0]
48922 v_1 := v.Args[1]
48923 if v_1.Op != OpAMD64TESTQ {
48924 break
48925 }
48926 x := v_1.Args[1]
48927 v_1_0 := v_1.Args[0]
48928 if v_1_0.Op != OpAMD64MOVQconst {
48929 break
48930 }
48931 c := v_1_0.AuxInt
48932 if !(isUint64PowerOfTwo(c) && !config.nacl) {
48933 break
48934 }
48935 v.reset(OpAMD64SETAEstore)
48936 v.AuxInt = off
48937 v.Aux = sym
48938 v.AddArg(ptr)
48939 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
48940 v0.AuxInt = log2(c)
48941 v0.AddArg(x)
48942 v.AddArg(v0)
48943 v.AddArg(mem)
48944 return true
48945 }
48946
48947
48948
48949 for {
48950 off := v.AuxInt
48951 sym := v.Aux
48952 mem := v.Args[2]
48953 ptr := v.Args[0]
48954 v_1 := v.Args[1]
48955 if v_1.Op != OpAMD64TESTQ {
48956 break
48957 }
48958 _ = v_1.Args[1]
48959 x := v_1.Args[0]
48960 v_1_1 := v_1.Args[1]
48961 if v_1_1.Op != OpAMD64MOVQconst {
48962 break
48963 }
48964 c := v_1_1.AuxInt
48965 if !(isUint64PowerOfTwo(c) && !config.nacl) {
48966 break
48967 }
48968 v.reset(OpAMD64SETAEstore)
48969 v.AuxInt = off
48970 v.Aux = sym
48971 v.AddArg(ptr)
48972 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
48973 v0.AuxInt = log2(c)
48974 v0.AddArg(x)
48975 v.AddArg(v0)
48976 v.AddArg(mem)
48977 return true
48978 }
48979
48980
48981
48982 for {
48983 off := v.AuxInt
48984 sym := v.Aux
48985 mem := v.Args[2]
48986 ptr := v.Args[0]
48987 v_1 := v.Args[1]
48988 if v_1.Op != OpAMD64CMPLconst {
48989 break
48990 }
48991 if v_1.AuxInt != 1 {
48992 break
48993 }
48994 s := v_1.Args[0]
48995 if s.Op != OpAMD64ANDLconst {
48996 break
48997 }
48998 if s.AuxInt != 1 {
48999 break
49000 }
49001 v.reset(OpAMD64SETNEstore)
49002 v.AuxInt = off
49003 v.Aux = sym
49004 v.AddArg(ptr)
49005 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
49006 v0.AuxInt = 0
49007 v0.AddArg(s)
49008 v.AddArg(v0)
49009 v.AddArg(mem)
49010 return true
49011 }
49012
49013
49014
49015 for {
49016 off := v.AuxInt
49017 sym := v.Aux
49018 mem := v.Args[2]
49019 ptr := v.Args[0]
49020 v_1 := v.Args[1]
49021 if v_1.Op != OpAMD64CMPQconst {
49022 break
49023 }
49024 if v_1.AuxInt != 1 {
49025 break
49026 }
49027 s := v_1.Args[0]
49028 if s.Op != OpAMD64ANDQconst {
49029 break
49030 }
49031 if s.AuxInt != 1 {
49032 break
49033 }
49034 v.reset(OpAMD64SETNEstore)
49035 v.AuxInt = off
49036 v.Aux = sym
49037 v.AddArg(ptr)
49038 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
49039 v0.AuxInt = 0
49040 v0.AddArg(s)
49041 v.AddArg(v0)
49042 v.AddArg(mem)
49043 return true
49044 }
49045 return false
49046 }
49047 func rewriteValueAMD64_OpAMD64SETEQstore_10(v *Value) bool {
49048 b := v.Block
49049 config := b.Func.Config
49050
49051
49052
49053 for {
49054 off := v.AuxInt
49055 sym := v.Aux
49056 mem := v.Args[2]
49057 ptr := v.Args[0]
49058 v_1 := v.Args[1]
49059 if v_1.Op != OpAMD64TESTQ {
49060 break
49061 }
49062 z2 := v_1.Args[1]
49063 z1 := v_1.Args[0]
49064 if z1.Op != OpAMD64SHLQconst {
49065 break
49066 }
49067 if z1.AuxInt != 63 {
49068 break
49069 }
49070 z1_0 := z1.Args[0]
49071 if z1_0.Op != OpAMD64SHRQconst {
49072 break
49073 }
49074 if z1_0.AuxInt != 63 {
49075 break
49076 }
49077 x := z1_0.Args[0]
49078 if !(z1 == z2 && !config.nacl) {
49079 break
49080 }
49081 v.reset(OpAMD64SETAEstore)
49082 v.AuxInt = off
49083 v.Aux = sym
49084 v.AddArg(ptr)
49085 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
49086 v0.AuxInt = 63
49087 v0.AddArg(x)
49088 v.AddArg(v0)
49089 v.AddArg(mem)
49090 return true
49091 }
49092
49093
49094
49095 for {
49096 off := v.AuxInt
49097 sym := v.Aux
49098 mem := v.Args[2]
49099 ptr := v.Args[0]
49100 v_1 := v.Args[1]
49101 if v_1.Op != OpAMD64TESTQ {
49102 break
49103 }
49104 _ = v_1.Args[1]
49105 z2 := v_1.Args[0]
49106 z1 := v_1.Args[1]
49107 if z1.Op != OpAMD64SHLQconst {
49108 break
49109 }
49110 if z1.AuxInt != 63 {
49111 break
49112 }
49113 z1_0 := z1.Args[0]
49114 if z1_0.Op != OpAMD64SHRQconst {
49115 break
49116 }
49117 if z1_0.AuxInt != 63 {
49118 break
49119 }
49120 x := z1_0.Args[0]
49121 if !(z1 == z2 && !config.nacl) {
49122 break
49123 }
49124 v.reset(OpAMD64SETAEstore)
49125 v.AuxInt = off
49126 v.Aux = sym
49127 v.AddArg(ptr)
49128 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
49129 v0.AuxInt = 63
49130 v0.AddArg(x)
49131 v.AddArg(v0)
49132 v.AddArg(mem)
49133 return true
49134 }
49135
49136
49137
49138 for {
49139 off := v.AuxInt
49140 sym := v.Aux
49141 mem := v.Args[2]
49142 ptr := v.Args[0]
49143 v_1 := v.Args[1]
49144 if v_1.Op != OpAMD64TESTL {
49145 break
49146 }
49147 z2 := v_1.Args[1]
49148 z1 := v_1.Args[0]
49149 if z1.Op != OpAMD64SHLLconst {
49150 break
49151 }
49152 if z1.AuxInt != 31 {
49153 break
49154 }
49155 z1_0 := z1.Args[0]
49156 if z1_0.Op != OpAMD64SHRLconst {
49157 break
49158 }
49159 if z1_0.AuxInt != 31 {
49160 break
49161 }
49162 x := z1_0.Args[0]
49163 if !(z1 == z2 && !config.nacl) {
49164 break
49165 }
49166 v.reset(OpAMD64SETAEstore)
49167 v.AuxInt = off
49168 v.Aux = sym
49169 v.AddArg(ptr)
49170 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
49171 v0.AuxInt = 31
49172 v0.AddArg(x)
49173 v.AddArg(v0)
49174 v.AddArg(mem)
49175 return true
49176 }
49177
49178
49179
49180 for {
49181 off := v.AuxInt
49182 sym := v.Aux
49183 mem := v.Args[2]
49184 ptr := v.Args[0]
49185 v_1 := v.Args[1]
49186 if v_1.Op != OpAMD64TESTL {
49187 break
49188 }
49189 _ = v_1.Args[1]
49190 z2 := v_1.Args[0]
49191 z1 := v_1.Args[1]
49192 if z1.Op != OpAMD64SHLLconst {
49193 break
49194 }
49195 if z1.AuxInt != 31 {
49196 break
49197 }
49198 z1_0 := z1.Args[0]
49199 if z1_0.Op != OpAMD64SHRLconst {
49200 break
49201 }
49202 if z1_0.AuxInt != 31 {
49203 break
49204 }
49205 x := z1_0.Args[0]
49206 if !(z1 == z2 && !config.nacl) {
49207 break
49208 }
49209 v.reset(OpAMD64SETAEstore)
49210 v.AuxInt = off
49211 v.Aux = sym
49212 v.AddArg(ptr)
49213 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
49214 v0.AuxInt = 31
49215 v0.AddArg(x)
49216 v.AddArg(v0)
49217 v.AddArg(mem)
49218 return true
49219 }
49220
49221
49222
49223 for {
49224 off := v.AuxInt
49225 sym := v.Aux
49226 mem := v.Args[2]
49227 ptr := v.Args[0]
49228 v_1 := v.Args[1]
49229 if v_1.Op != OpAMD64TESTQ {
49230 break
49231 }
49232 z2 := v_1.Args[1]
49233 z1 := v_1.Args[0]
49234 if z1.Op != OpAMD64SHRQconst {
49235 break
49236 }
49237 if z1.AuxInt != 63 {
49238 break
49239 }
49240 z1_0 := z1.Args[0]
49241 if z1_0.Op != OpAMD64SHLQconst {
49242 break
49243 }
49244 if z1_0.AuxInt != 63 {
49245 break
49246 }
49247 x := z1_0.Args[0]
49248 if !(z1 == z2 && !config.nacl) {
49249 break
49250 }
49251 v.reset(OpAMD64SETAEstore)
49252 v.AuxInt = off
49253 v.Aux = sym
49254 v.AddArg(ptr)
49255 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
49256 v0.AuxInt = 0
49257 v0.AddArg(x)
49258 v.AddArg(v0)
49259 v.AddArg(mem)
49260 return true
49261 }
49262
49263
49264
49265 for {
49266 off := v.AuxInt
49267 sym := v.Aux
49268 mem := v.Args[2]
49269 ptr := v.Args[0]
49270 v_1 := v.Args[1]
49271 if v_1.Op != OpAMD64TESTQ {
49272 break
49273 }
49274 _ = v_1.Args[1]
49275 z2 := v_1.Args[0]
49276 z1 := v_1.Args[1]
49277 if z1.Op != OpAMD64SHRQconst {
49278 break
49279 }
49280 if z1.AuxInt != 63 {
49281 break
49282 }
49283 z1_0 := z1.Args[0]
49284 if z1_0.Op != OpAMD64SHLQconst {
49285 break
49286 }
49287 if z1_0.AuxInt != 63 {
49288 break
49289 }
49290 x := z1_0.Args[0]
49291 if !(z1 == z2 && !config.nacl) {
49292 break
49293 }
49294 v.reset(OpAMD64SETAEstore)
49295 v.AuxInt = off
49296 v.Aux = sym
49297 v.AddArg(ptr)
49298 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
49299 v0.AuxInt = 0
49300 v0.AddArg(x)
49301 v.AddArg(v0)
49302 v.AddArg(mem)
49303 return true
49304 }
49305
49306
49307
49308 for {
49309 off := v.AuxInt
49310 sym := v.Aux
49311 mem := v.Args[2]
49312 ptr := v.Args[0]
49313 v_1 := v.Args[1]
49314 if v_1.Op != OpAMD64TESTL {
49315 break
49316 }
49317 z2 := v_1.Args[1]
49318 z1 := v_1.Args[0]
49319 if z1.Op != OpAMD64SHRLconst {
49320 break
49321 }
49322 if z1.AuxInt != 31 {
49323 break
49324 }
49325 z1_0 := z1.Args[0]
49326 if z1_0.Op != OpAMD64SHLLconst {
49327 break
49328 }
49329 if z1_0.AuxInt != 31 {
49330 break
49331 }
49332 x := z1_0.Args[0]
49333 if !(z1 == z2 && !config.nacl) {
49334 break
49335 }
49336 v.reset(OpAMD64SETAEstore)
49337 v.AuxInt = off
49338 v.Aux = sym
49339 v.AddArg(ptr)
49340 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
49341 v0.AuxInt = 0
49342 v0.AddArg(x)
49343 v.AddArg(v0)
49344 v.AddArg(mem)
49345 return true
49346 }
49347
49348
49349
49350 for {
49351 off := v.AuxInt
49352 sym := v.Aux
49353 mem := v.Args[2]
49354 ptr := v.Args[0]
49355 v_1 := v.Args[1]
49356 if v_1.Op != OpAMD64TESTL {
49357 break
49358 }
49359 _ = v_1.Args[1]
49360 z2 := v_1.Args[0]
49361 z1 := v_1.Args[1]
49362 if z1.Op != OpAMD64SHRLconst {
49363 break
49364 }
49365 if z1.AuxInt != 31 {
49366 break
49367 }
49368 z1_0 := z1.Args[0]
49369 if z1_0.Op != OpAMD64SHLLconst {
49370 break
49371 }
49372 if z1_0.AuxInt != 31 {
49373 break
49374 }
49375 x := z1_0.Args[0]
49376 if !(z1 == z2 && !config.nacl) {
49377 break
49378 }
49379 v.reset(OpAMD64SETAEstore)
49380 v.AuxInt = off
49381 v.Aux = sym
49382 v.AddArg(ptr)
49383 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
49384 v0.AuxInt = 0
49385 v0.AddArg(x)
49386 v.AddArg(v0)
49387 v.AddArg(mem)
49388 return true
49389 }
49390
49391
49392
49393 for {
49394 off := v.AuxInt
49395 sym := v.Aux
49396 mem := v.Args[2]
49397 ptr := v.Args[0]
49398 v_1 := v.Args[1]
49399 if v_1.Op != OpAMD64TESTQ {
49400 break
49401 }
49402 z2 := v_1.Args[1]
49403 z1 := v_1.Args[0]
49404 if z1.Op != OpAMD64SHRQconst {
49405 break
49406 }
49407 if z1.AuxInt != 63 {
49408 break
49409 }
49410 x := z1.Args[0]
49411 if !(z1 == z2 && !config.nacl) {
49412 break
49413 }
49414 v.reset(OpAMD64SETAEstore)
49415 v.AuxInt = off
49416 v.Aux = sym
49417 v.AddArg(ptr)
49418 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
49419 v0.AuxInt = 63
49420 v0.AddArg(x)
49421 v.AddArg(v0)
49422 v.AddArg(mem)
49423 return true
49424 }
49425
49426
49427
49428 for {
49429 off := v.AuxInt
49430 sym := v.Aux
49431 mem := v.Args[2]
49432 ptr := v.Args[0]
49433 v_1 := v.Args[1]
49434 if v_1.Op != OpAMD64TESTQ {
49435 break
49436 }
49437 _ = v_1.Args[1]
49438 z2 := v_1.Args[0]
49439 z1 := v_1.Args[1]
49440 if z1.Op != OpAMD64SHRQconst {
49441 break
49442 }
49443 if z1.AuxInt != 63 {
49444 break
49445 }
49446 x := z1.Args[0]
49447 if !(z1 == z2 && !config.nacl) {
49448 break
49449 }
49450 v.reset(OpAMD64SETAEstore)
49451 v.AuxInt = off
49452 v.Aux = sym
49453 v.AddArg(ptr)
49454 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
49455 v0.AuxInt = 63
49456 v0.AddArg(x)
49457 v.AddArg(v0)
49458 v.AddArg(mem)
49459 return true
49460 }
49461 return false
49462 }
49463 func rewriteValueAMD64_OpAMD64SETEQstore_20(v *Value) bool {
49464 b := v.Block
49465 config := b.Func.Config
49466 typ := &b.Func.Config.Types
49467
49468
49469
49470 for {
49471 off := v.AuxInt
49472 sym := v.Aux
49473 mem := v.Args[2]
49474 ptr := v.Args[0]
49475 v_1 := v.Args[1]
49476 if v_1.Op != OpAMD64TESTL {
49477 break
49478 }
49479 z2 := v_1.Args[1]
49480 z1 := v_1.Args[0]
49481 if z1.Op != OpAMD64SHRLconst {
49482 break
49483 }
49484 if z1.AuxInt != 31 {
49485 break
49486 }
49487 x := z1.Args[0]
49488 if !(z1 == z2 && !config.nacl) {
49489 break
49490 }
49491 v.reset(OpAMD64SETAEstore)
49492 v.AuxInt = off
49493 v.Aux = sym
49494 v.AddArg(ptr)
49495 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
49496 v0.AuxInt = 31
49497 v0.AddArg(x)
49498 v.AddArg(v0)
49499 v.AddArg(mem)
49500 return true
49501 }
49502
49503
49504
49505 for {
49506 off := v.AuxInt
49507 sym := v.Aux
49508 mem := v.Args[2]
49509 ptr := v.Args[0]
49510 v_1 := v.Args[1]
49511 if v_1.Op != OpAMD64TESTL {
49512 break
49513 }
49514 _ = v_1.Args[1]
49515 z2 := v_1.Args[0]
49516 z1 := v_1.Args[1]
49517 if z1.Op != OpAMD64SHRLconst {
49518 break
49519 }
49520 if z1.AuxInt != 31 {
49521 break
49522 }
49523 x := z1.Args[0]
49524 if !(z1 == z2 && !config.nacl) {
49525 break
49526 }
49527 v.reset(OpAMD64SETAEstore)
49528 v.AuxInt = off
49529 v.Aux = sym
49530 v.AddArg(ptr)
49531 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
49532 v0.AuxInt = 31
49533 v0.AddArg(x)
49534 v.AddArg(v0)
49535 v.AddArg(mem)
49536 return true
49537 }
49538
49539
49540
49541 for {
49542 off := v.AuxInt
49543 sym := v.Aux
49544 mem := v.Args[2]
49545 ptr := v.Args[0]
49546 v_1 := v.Args[1]
49547 if v_1.Op != OpAMD64InvertFlags {
49548 break
49549 }
49550 x := v_1.Args[0]
49551 v.reset(OpAMD64SETEQstore)
49552 v.AuxInt = off
49553 v.Aux = sym
49554 v.AddArg(ptr)
49555 v.AddArg(x)
49556 v.AddArg(mem)
49557 return true
49558 }
49559
49560
49561
49562 for {
49563 off1 := v.AuxInt
49564 sym := v.Aux
49565 mem := v.Args[2]
49566 v_0 := v.Args[0]
49567 if v_0.Op != OpAMD64ADDQconst {
49568 break
49569 }
49570 off2 := v_0.AuxInt
49571 base := v_0.Args[0]
49572 val := v.Args[1]
49573 if !(is32Bit(off1 + off2)) {
49574 break
49575 }
49576 v.reset(OpAMD64SETEQstore)
49577 v.AuxInt = off1 + off2
49578 v.Aux = sym
49579 v.AddArg(base)
49580 v.AddArg(val)
49581 v.AddArg(mem)
49582 return true
49583 }
49584
49585
49586
49587 for {
49588 off1 := v.AuxInt
49589 sym1 := v.Aux
49590 mem := v.Args[2]
49591 v_0 := v.Args[0]
49592 if v_0.Op != OpAMD64LEAQ {
49593 break
49594 }
49595 off2 := v_0.AuxInt
49596 sym2 := v_0.Aux
49597 base := v_0.Args[0]
49598 val := v.Args[1]
49599 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
49600 break
49601 }
49602 v.reset(OpAMD64SETEQstore)
49603 v.AuxInt = off1 + off2
49604 v.Aux = mergeSym(sym1, sym2)
49605 v.AddArg(base)
49606 v.AddArg(val)
49607 v.AddArg(mem)
49608 return true
49609 }
49610
49611
49612
49613 for {
49614 off := v.AuxInt
49615 sym := v.Aux
49616 mem := v.Args[2]
49617 ptr := v.Args[0]
49618 v_1 := v.Args[1]
49619 if v_1.Op != OpAMD64FlagEQ {
49620 break
49621 }
49622 v.reset(OpAMD64MOVBstore)
49623 v.AuxInt = off
49624 v.Aux = sym
49625 v.AddArg(ptr)
49626 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
49627 v0.AuxInt = 1
49628 v.AddArg(v0)
49629 v.AddArg(mem)
49630 return true
49631 }
49632
49633
49634
49635 for {
49636 off := v.AuxInt
49637 sym := v.Aux
49638 mem := v.Args[2]
49639 ptr := v.Args[0]
49640 v_1 := v.Args[1]
49641 if v_1.Op != OpAMD64FlagLT_ULT {
49642 break
49643 }
49644 v.reset(OpAMD64MOVBstore)
49645 v.AuxInt = off
49646 v.Aux = sym
49647 v.AddArg(ptr)
49648 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
49649 v0.AuxInt = 0
49650 v.AddArg(v0)
49651 v.AddArg(mem)
49652 return true
49653 }
49654
49655
49656
49657 for {
49658 off := v.AuxInt
49659 sym := v.Aux
49660 mem := v.Args[2]
49661 ptr := v.Args[0]
49662 v_1 := v.Args[1]
49663 if v_1.Op != OpAMD64FlagLT_UGT {
49664 break
49665 }
49666 v.reset(OpAMD64MOVBstore)
49667 v.AuxInt = off
49668 v.Aux = sym
49669 v.AddArg(ptr)
49670 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
49671 v0.AuxInt = 0
49672 v.AddArg(v0)
49673 v.AddArg(mem)
49674 return true
49675 }
49676
49677
49678
49679 for {
49680 off := v.AuxInt
49681 sym := v.Aux
49682 mem := v.Args[2]
49683 ptr := v.Args[0]
49684 v_1 := v.Args[1]
49685 if v_1.Op != OpAMD64FlagGT_ULT {
49686 break
49687 }
49688 v.reset(OpAMD64MOVBstore)
49689 v.AuxInt = off
49690 v.Aux = sym
49691 v.AddArg(ptr)
49692 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
49693 v0.AuxInt = 0
49694 v.AddArg(v0)
49695 v.AddArg(mem)
49696 return true
49697 }
49698
49699
49700
49701 for {
49702 off := v.AuxInt
49703 sym := v.Aux
49704 mem := v.Args[2]
49705 ptr := v.Args[0]
49706 v_1 := v.Args[1]
49707 if v_1.Op != OpAMD64FlagGT_UGT {
49708 break
49709 }
49710 v.reset(OpAMD64MOVBstore)
49711 v.AuxInt = off
49712 v.Aux = sym
49713 v.AddArg(ptr)
49714 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
49715 v0.AuxInt = 0
49716 v.AddArg(v0)
49717 v.AddArg(mem)
49718 return true
49719 }
49720 return false
49721 }
49722 func rewriteValueAMD64_OpAMD64SETG_0(v *Value) bool {
49723
49724
49725
49726 for {
49727 v_0 := v.Args[0]
49728 if v_0.Op != OpAMD64InvertFlags {
49729 break
49730 }
49731 x := v_0.Args[0]
49732 v.reset(OpAMD64SETL)
49733 v.AddArg(x)
49734 return true
49735 }
49736
49737
49738
49739 for {
49740 v_0 := v.Args[0]
49741 if v_0.Op != OpAMD64FlagEQ {
49742 break
49743 }
49744 v.reset(OpAMD64MOVLconst)
49745 v.AuxInt = 0
49746 return true
49747 }
49748
49749
49750
49751 for {
49752 v_0 := v.Args[0]
49753 if v_0.Op != OpAMD64FlagLT_ULT {
49754 break
49755 }
49756 v.reset(OpAMD64MOVLconst)
49757 v.AuxInt = 0
49758 return true
49759 }
49760
49761
49762
49763 for {
49764 v_0 := v.Args[0]
49765 if v_0.Op != OpAMD64FlagLT_UGT {
49766 break
49767 }
49768 v.reset(OpAMD64MOVLconst)
49769 v.AuxInt = 0
49770 return true
49771 }
49772
49773
49774
49775 for {
49776 v_0 := v.Args[0]
49777 if v_0.Op != OpAMD64FlagGT_ULT {
49778 break
49779 }
49780 v.reset(OpAMD64MOVLconst)
49781 v.AuxInt = 1
49782 return true
49783 }
49784
49785
49786
49787 for {
49788 v_0 := v.Args[0]
49789 if v_0.Op != OpAMD64FlagGT_UGT {
49790 break
49791 }
49792 v.reset(OpAMD64MOVLconst)
49793 v.AuxInt = 1
49794 return true
49795 }
49796 return false
49797 }
49798 func rewriteValueAMD64_OpAMD64SETGE_0(v *Value) bool {
49799
49800
49801
49802 for {
49803 v_0 := v.Args[0]
49804 if v_0.Op != OpAMD64InvertFlags {
49805 break
49806 }
49807 x := v_0.Args[0]
49808 v.reset(OpAMD64SETLE)
49809 v.AddArg(x)
49810 return true
49811 }
49812
49813
49814
49815 for {
49816 v_0 := v.Args[0]
49817 if v_0.Op != OpAMD64FlagEQ {
49818 break
49819 }
49820 v.reset(OpAMD64MOVLconst)
49821 v.AuxInt = 1
49822 return true
49823 }
49824
49825
49826
49827 for {
49828 v_0 := v.Args[0]
49829 if v_0.Op != OpAMD64FlagLT_ULT {
49830 break
49831 }
49832 v.reset(OpAMD64MOVLconst)
49833 v.AuxInt = 0
49834 return true
49835 }
49836
49837
49838
49839 for {
49840 v_0 := v.Args[0]
49841 if v_0.Op != OpAMD64FlagLT_UGT {
49842 break
49843 }
49844 v.reset(OpAMD64MOVLconst)
49845 v.AuxInt = 0
49846 return true
49847 }
49848
49849
49850
49851 for {
49852 v_0 := v.Args[0]
49853 if v_0.Op != OpAMD64FlagGT_ULT {
49854 break
49855 }
49856 v.reset(OpAMD64MOVLconst)
49857 v.AuxInt = 1
49858 return true
49859 }
49860
49861
49862
49863 for {
49864 v_0 := v.Args[0]
49865 if v_0.Op != OpAMD64FlagGT_UGT {
49866 break
49867 }
49868 v.reset(OpAMD64MOVLconst)
49869 v.AuxInt = 1
49870 return true
49871 }
49872 return false
49873 }
49874 func rewriteValueAMD64_OpAMD64SETGEstore_0(v *Value) bool {
49875 b := v.Block
49876 typ := &b.Func.Config.Types
49877
49878
49879
49880 for {
49881 off := v.AuxInt
49882 sym := v.Aux
49883 mem := v.Args[2]
49884 ptr := v.Args[0]
49885 v_1 := v.Args[1]
49886 if v_1.Op != OpAMD64InvertFlags {
49887 break
49888 }
49889 x := v_1.Args[0]
49890 v.reset(OpAMD64SETLEstore)
49891 v.AuxInt = off
49892 v.Aux = sym
49893 v.AddArg(ptr)
49894 v.AddArg(x)
49895 v.AddArg(mem)
49896 return true
49897 }
49898
49899
49900
49901 for {
49902 off1 := v.AuxInt
49903 sym := v.Aux
49904 mem := v.Args[2]
49905 v_0 := v.Args[0]
49906 if v_0.Op != OpAMD64ADDQconst {
49907 break
49908 }
49909 off2 := v_0.AuxInt
49910 base := v_0.Args[0]
49911 val := v.Args[1]
49912 if !(is32Bit(off1 + off2)) {
49913 break
49914 }
49915 v.reset(OpAMD64SETGEstore)
49916 v.AuxInt = off1 + off2
49917 v.Aux = sym
49918 v.AddArg(base)
49919 v.AddArg(val)
49920 v.AddArg(mem)
49921 return true
49922 }
49923
49924
49925
49926 for {
49927 off1 := v.AuxInt
49928 sym1 := v.Aux
49929 mem := v.Args[2]
49930 v_0 := v.Args[0]
49931 if v_0.Op != OpAMD64LEAQ {
49932 break
49933 }
49934 off2 := v_0.AuxInt
49935 sym2 := v_0.Aux
49936 base := v_0.Args[0]
49937 val := v.Args[1]
49938 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
49939 break
49940 }
49941 v.reset(OpAMD64SETGEstore)
49942 v.AuxInt = off1 + off2
49943 v.Aux = mergeSym(sym1, sym2)
49944 v.AddArg(base)
49945 v.AddArg(val)
49946 v.AddArg(mem)
49947 return true
49948 }
49949
49950
49951
49952 for {
49953 off := v.AuxInt
49954 sym := v.Aux
49955 mem := v.Args[2]
49956 ptr := v.Args[0]
49957 v_1 := v.Args[1]
49958 if v_1.Op != OpAMD64FlagEQ {
49959 break
49960 }
49961 v.reset(OpAMD64MOVBstore)
49962 v.AuxInt = off
49963 v.Aux = sym
49964 v.AddArg(ptr)
49965 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
49966 v0.AuxInt = 1
49967 v.AddArg(v0)
49968 v.AddArg(mem)
49969 return true
49970 }
49971
49972
49973
49974 for {
49975 off := v.AuxInt
49976 sym := v.Aux
49977 mem := v.Args[2]
49978 ptr := v.Args[0]
49979 v_1 := v.Args[1]
49980 if v_1.Op != OpAMD64FlagLT_ULT {
49981 break
49982 }
49983 v.reset(OpAMD64MOVBstore)
49984 v.AuxInt = off
49985 v.Aux = sym
49986 v.AddArg(ptr)
49987 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
49988 v0.AuxInt = 0
49989 v.AddArg(v0)
49990 v.AddArg(mem)
49991 return true
49992 }
49993
49994
49995
49996 for {
49997 off := v.AuxInt
49998 sym := v.Aux
49999 mem := v.Args[2]
50000 ptr := v.Args[0]
50001 v_1 := v.Args[1]
50002 if v_1.Op != OpAMD64FlagLT_UGT {
50003 break
50004 }
50005 v.reset(OpAMD64MOVBstore)
50006 v.AuxInt = off
50007 v.Aux = sym
50008 v.AddArg(ptr)
50009 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
50010 v0.AuxInt = 0
50011 v.AddArg(v0)
50012 v.AddArg(mem)
50013 return true
50014 }
50015
50016
50017
50018 for {
50019 off := v.AuxInt
50020 sym := v.Aux
50021 mem := v.Args[2]
50022 ptr := v.Args[0]
50023 v_1 := v.Args[1]
50024 if v_1.Op != OpAMD64FlagGT_ULT {
50025 break
50026 }
50027 v.reset(OpAMD64MOVBstore)
50028 v.AuxInt = off
50029 v.Aux = sym
50030 v.AddArg(ptr)
50031 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
50032 v0.AuxInt = 1
50033 v.AddArg(v0)
50034 v.AddArg(mem)
50035 return true
50036 }
50037
50038
50039
50040 for {
50041 off := v.AuxInt
50042 sym := v.Aux
50043 mem := v.Args[2]
50044 ptr := v.Args[0]
50045 v_1 := v.Args[1]
50046 if v_1.Op != OpAMD64FlagGT_UGT {
50047 break
50048 }
50049 v.reset(OpAMD64MOVBstore)
50050 v.AuxInt = off
50051 v.Aux = sym
50052 v.AddArg(ptr)
50053 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
50054 v0.AuxInt = 1
50055 v.AddArg(v0)
50056 v.AddArg(mem)
50057 return true
50058 }
50059 return false
50060 }
50061 func rewriteValueAMD64_OpAMD64SETGstore_0(v *Value) bool {
50062 b := v.Block
50063 typ := &b.Func.Config.Types
50064
50065
50066
50067 for {
50068 off := v.AuxInt
50069 sym := v.Aux
50070 mem := v.Args[2]
50071 ptr := v.Args[0]
50072 v_1 := v.Args[1]
50073 if v_1.Op != OpAMD64InvertFlags {
50074 break
50075 }
50076 x := v_1.Args[0]
50077 v.reset(OpAMD64SETLstore)
50078 v.AuxInt = off
50079 v.Aux = sym
50080 v.AddArg(ptr)
50081 v.AddArg(x)
50082 v.AddArg(mem)
50083 return true
50084 }
50085
50086
50087
50088 for {
50089 off1 := v.AuxInt
50090 sym := v.Aux
50091 mem := v.Args[2]
50092 v_0 := v.Args[0]
50093 if v_0.Op != OpAMD64ADDQconst {
50094 break
50095 }
50096 off2 := v_0.AuxInt
50097 base := v_0.Args[0]
50098 val := v.Args[1]
50099 if !(is32Bit(off1 + off2)) {
50100 break
50101 }
50102 v.reset(OpAMD64SETGstore)
50103 v.AuxInt = off1 + off2
50104 v.Aux = sym
50105 v.AddArg(base)
50106 v.AddArg(val)
50107 v.AddArg(mem)
50108 return true
50109 }
50110
50111
50112
50113 for {
50114 off1 := v.AuxInt
50115 sym1 := v.Aux
50116 mem := v.Args[2]
50117 v_0 := v.Args[0]
50118 if v_0.Op != OpAMD64LEAQ {
50119 break
50120 }
50121 off2 := v_0.AuxInt
50122 sym2 := v_0.Aux
50123 base := v_0.Args[0]
50124 val := v.Args[1]
50125 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
50126 break
50127 }
50128 v.reset(OpAMD64SETGstore)
50129 v.AuxInt = off1 + off2
50130 v.Aux = mergeSym(sym1, sym2)
50131 v.AddArg(base)
50132 v.AddArg(val)
50133 v.AddArg(mem)
50134 return true
50135 }
50136
50137
50138
50139 for {
50140 off := v.AuxInt
50141 sym := v.Aux
50142 mem := v.Args[2]
50143 ptr := v.Args[0]
50144 v_1 := v.Args[1]
50145 if v_1.Op != OpAMD64FlagEQ {
50146 break
50147 }
50148 v.reset(OpAMD64MOVBstore)
50149 v.AuxInt = off
50150 v.Aux = sym
50151 v.AddArg(ptr)
50152 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
50153 v0.AuxInt = 0
50154 v.AddArg(v0)
50155 v.AddArg(mem)
50156 return true
50157 }
50158
50159
50160
50161 for {
50162 off := v.AuxInt
50163 sym := v.Aux
50164 mem := v.Args[2]
50165 ptr := v.Args[0]
50166 v_1 := v.Args[1]
50167 if v_1.Op != OpAMD64FlagLT_ULT {
50168 break
50169 }
50170 v.reset(OpAMD64MOVBstore)
50171 v.AuxInt = off
50172 v.Aux = sym
50173 v.AddArg(ptr)
50174 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
50175 v0.AuxInt = 0
50176 v.AddArg(v0)
50177 v.AddArg(mem)
50178 return true
50179 }
50180
50181
50182
50183 for {
50184 off := v.AuxInt
50185 sym := v.Aux
50186 mem := v.Args[2]
50187 ptr := v.Args[0]
50188 v_1 := v.Args[1]
50189 if v_1.Op != OpAMD64FlagLT_UGT {
50190 break
50191 }
50192 v.reset(OpAMD64MOVBstore)
50193 v.AuxInt = off
50194 v.Aux = sym
50195 v.AddArg(ptr)
50196 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
50197 v0.AuxInt = 0
50198 v.AddArg(v0)
50199 v.AddArg(mem)
50200 return true
50201 }
50202
50203
50204
50205 for {
50206 off := v.AuxInt
50207 sym := v.Aux
50208 mem := v.Args[2]
50209 ptr := v.Args[0]
50210 v_1 := v.Args[1]
50211 if v_1.Op != OpAMD64FlagGT_ULT {
50212 break
50213 }
50214 v.reset(OpAMD64MOVBstore)
50215 v.AuxInt = off
50216 v.Aux = sym
50217 v.AddArg(ptr)
50218 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
50219 v0.AuxInt = 1
50220 v.AddArg(v0)
50221 v.AddArg(mem)
50222 return true
50223 }
50224
50225
50226
50227 for {
50228 off := v.AuxInt
50229 sym := v.Aux
50230 mem := v.Args[2]
50231 ptr := v.Args[0]
50232 v_1 := v.Args[1]
50233 if v_1.Op != OpAMD64FlagGT_UGT {
50234 break
50235 }
50236 v.reset(OpAMD64MOVBstore)
50237 v.AuxInt = off
50238 v.Aux = sym
50239 v.AddArg(ptr)
50240 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
50241 v0.AuxInt = 1
50242 v.AddArg(v0)
50243 v.AddArg(mem)
50244 return true
50245 }
50246 return false
50247 }
50248 func rewriteValueAMD64_OpAMD64SETL_0(v *Value) bool {
50249
50250
50251
50252 for {
50253 v_0 := v.Args[0]
50254 if v_0.Op != OpAMD64InvertFlags {
50255 break
50256 }
50257 x := v_0.Args[0]
50258 v.reset(OpAMD64SETG)
50259 v.AddArg(x)
50260 return true
50261 }
50262
50263
50264
50265 for {
50266 v_0 := v.Args[0]
50267 if v_0.Op != OpAMD64FlagEQ {
50268 break
50269 }
50270 v.reset(OpAMD64MOVLconst)
50271 v.AuxInt = 0
50272 return true
50273 }
50274
50275
50276
50277 for {
50278 v_0 := v.Args[0]
50279 if v_0.Op != OpAMD64FlagLT_ULT {
50280 break
50281 }
50282 v.reset(OpAMD64MOVLconst)
50283 v.AuxInt = 1
50284 return true
50285 }
50286
50287
50288
50289 for {
50290 v_0 := v.Args[0]
50291 if v_0.Op != OpAMD64FlagLT_UGT {
50292 break
50293 }
50294 v.reset(OpAMD64MOVLconst)
50295 v.AuxInt = 1
50296 return true
50297 }
50298
50299
50300
50301 for {
50302 v_0 := v.Args[0]
50303 if v_0.Op != OpAMD64FlagGT_ULT {
50304 break
50305 }
50306 v.reset(OpAMD64MOVLconst)
50307 v.AuxInt = 0
50308 return true
50309 }
50310
50311
50312
50313 for {
50314 v_0 := v.Args[0]
50315 if v_0.Op != OpAMD64FlagGT_UGT {
50316 break
50317 }
50318 v.reset(OpAMD64MOVLconst)
50319 v.AuxInt = 0
50320 return true
50321 }
50322 return false
50323 }
50324 func rewriteValueAMD64_OpAMD64SETLE_0(v *Value) bool {
50325
50326
50327
50328 for {
50329 v_0 := v.Args[0]
50330 if v_0.Op != OpAMD64InvertFlags {
50331 break
50332 }
50333 x := v_0.Args[0]
50334 v.reset(OpAMD64SETGE)
50335 v.AddArg(x)
50336 return true
50337 }
50338
50339
50340
50341 for {
50342 v_0 := v.Args[0]
50343 if v_0.Op != OpAMD64FlagEQ {
50344 break
50345 }
50346 v.reset(OpAMD64MOVLconst)
50347 v.AuxInt = 1
50348 return true
50349 }
50350
50351
50352
50353 for {
50354 v_0 := v.Args[0]
50355 if v_0.Op != OpAMD64FlagLT_ULT {
50356 break
50357 }
50358 v.reset(OpAMD64MOVLconst)
50359 v.AuxInt = 1
50360 return true
50361 }
50362
50363
50364
50365 for {
50366 v_0 := v.Args[0]
50367 if v_0.Op != OpAMD64FlagLT_UGT {
50368 break
50369 }
50370 v.reset(OpAMD64MOVLconst)
50371 v.AuxInt = 1
50372 return true
50373 }
50374
50375
50376
50377 for {
50378 v_0 := v.Args[0]
50379 if v_0.Op != OpAMD64FlagGT_ULT {
50380 break
50381 }
50382 v.reset(OpAMD64MOVLconst)
50383 v.AuxInt = 0
50384 return true
50385 }
50386
50387
50388
50389 for {
50390 v_0 := v.Args[0]
50391 if v_0.Op != OpAMD64FlagGT_UGT {
50392 break
50393 }
50394 v.reset(OpAMD64MOVLconst)
50395 v.AuxInt = 0
50396 return true
50397 }
50398 return false
50399 }
50400 func rewriteValueAMD64_OpAMD64SETLEstore_0(v *Value) bool {
50401 b := v.Block
50402 typ := &b.Func.Config.Types
50403
50404
50405
50406 for {
50407 off := v.AuxInt
50408 sym := v.Aux
50409 mem := v.Args[2]
50410 ptr := v.Args[0]
50411 v_1 := v.Args[1]
50412 if v_1.Op != OpAMD64InvertFlags {
50413 break
50414 }
50415 x := v_1.Args[0]
50416 v.reset(OpAMD64SETGEstore)
50417 v.AuxInt = off
50418 v.Aux = sym
50419 v.AddArg(ptr)
50420 v.AddArg(x)
50421 v.AddArg(mem)
50422 return true
50423 }
50424
50425
50426
50427 for {
50428 off1 := v.AuxInt
50429 sym := v.Aux
50430 mem := v.Args[2]
50431 v_0 := v.Args[0]
50432 if v_0.Op != OpAMD64ADDQconst {
50433 break
50434 }
50435 off2 := v_0.AuxInt
50436 base := v_0.Args[0]
50437 val := v.Args[1]
50438 if !(is32Bit(off1 + off2)) {
50439 break
50440 }
50441 v.reset(OpAMD64SETLEstore)
50442 v.AuxInt = off1 + off2
50443 v.Aux = sym
50444 v.AddArg(base)
50445 v.AddArg(val)
50446 v.AddArg(mem)
50447 return true
50448 }
50449
50450
50451
50452 for {
50453 off1 := v.AuxInt
50454 sym1 := v.Aux
50455 mem := v.Args[2]
50456 v_0 := v.Args[0]
50457 if v_0.Op != OpAMD64LEAQ {
50458 break
50459 }
50460 off2 := v_0.AuxInt
50461 sym2 := v_0.Aux
50462 base := v_0.Args[0]
50463 val := v.Args[1]
50464 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
50465 break
50466 }
50467 v.reset(OpAMD64SETLEstore)
50468 v.AuxInt = off1 + off2
50469 v.Aux = mergeSym(sym1, sym2)
50470 v.AddArg(base)
50471 v.AddArg(val)
50472 v.AddArg(mem)
50473 return true
50474 }
50475
50476
50477
50478 for {
50479 off := v.AuxInt
50480 sym := v.Aux
50481 mem := v.Args[2]
50482 ptr := v.Args[0]
50483 v_1 := v.Args[1]
50484 if v_1.Op != OpAMD64FlagEQ {
50485 break
50486 }
50487 v.reset(OpAMD64MOVBstore)
50488 v.AuxInt = off
50489 v.Aux = sym
50490 v.AddArg(ptr)
50491 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
50492 v0.AuxInt = 1
50493 v.AddArg(v0)
50494 v.AddArg(mem)
50495 return true
50496 }
50497
50498
50499
50500 for {
50501 off := v.AuxInt
50502 sym := v.Aux
50503 mem := v.Args[2]
50504 ptr := v.Args[0]
50505 v_1 := v.Args[1]
50506 if v_1.Op != OpAMD64FlagLT_ULT {
50507 break
50508 }
50509 v.reset(OpAMD64MOVBstore)
50510 v.AuxInt = off
50511 v.Aux = sym
50512 v.AddArg(ptr)
50513 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
50514 v0.AuxInt = 1
50515 v.AddArg(v0)
50516 v.AddArg(mem)
50517 return true
50518 }
50519
50520
50521
50522 for {
50523 off := v.AuxInt
50524 sym := v.Aux
50525 mem := v.Args[2]
50526 ptr := v.Args[0]
50527 v_1 := v.Args[1]
50528 if v_1.Op != OpAMD64FlagLT_UGT {
50529 break
50530 }
50531 v.reset(OpAMD64MOVBstore)
50532 v.AuxInt = off
50533 v.Aux = sym
50534 v.AddArg(ptr)
50535 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
50536 v0.AuxInt = 1
50537 v.AddArg(v0)
50538 v.AddArg(mem)
50539 return true
50540 }
50541
50542
50543
50544 for {
50545 off := v.AuxInt
50546 sym := v.Aux
50547 mem := v.Args[2]
50548 ptr := v.Args[0]
50549 v_1 := v.Args[1]
50550 if v_1.Op != OpAMD64FlagGT_ULT {
50551 break
50552 }
50553 v.reset(OpAMD64MOVBstore)
50554 v.AuxInt = off
50555 v.Aux = sym
50556 v.AddArg(ptr)
50557 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
50558 v0.AuxInt = 0
50559 v.AddArg(v0)
50560 v.AddArg(mem)
50561 return true
50562 }
50563
50564
50565
50566 for {
50567 off := v.AuxInt
50568 sym := v.Aux
50569 mem := v.Args[2]
50570 ptr := v.Args[0]
50571 v_1 := v.Args[1]
50572 if v_1.Op != OpAMD64FlagGT_UGT {
50573 break
50574 }
50575 v.reset(OpAMD64MOVBstore)
50576 v.AuxInt = off
50577 v.Aux = sym
50578 v.AddArg(ptr)
50579 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
50580 v0.AuxInt = 0
50581 v.AddArg(v0)
50582 v.AddArg(mem)
50583 return true
50584 }
50585 return false
50586 }
50587 func rewriteValueAMD64_OpAMD64SETLstore_0(v *Value) bool {
50588 b := v.Block
50589 typ := &b.Func.Config.Types
50590
50591
50592
50593 for {
50594 off := v.AuxInt
50595 sym := v.Aux
50596 mem := v.Args[2]
50597 ptr := v.Args[0]
50598 v_1 := v.Args[1]
50599 if v_1.Op != OpAMD64InvertFlags {
50600 break
50601 }
50602 x := v_1.Args[0]
50603 v.reset(OpAMD64SETGstore)
50604 v.AuxInt = off
50605 v.Aux = sym
50606 v.AddArg(ptr)
50607 v.AddArg(x)
50608 v.AddArg(mem)
50609 return true
50610 }
50611
50612
50613
50614 for {
50615 off1 := v.AuxInt
50616 sym := v.Aux
50617 mem := v.Args[2]
50618 v_0 := v.Args[0]
50619 if v_0.Op != OpAMD64ADDQconst {
50620 break
50621 }
50622 off2 := v_0.AuxInt
50623 base := v_0.Args[0]
50624 val := v.Args[1]
50625 if !(is32Bit(off1 + off2)) {
50626 break
50627 }
50628 v.reset(OpAMD64SETLstore)
50629 v.AuxInt = off1 + off2
50630 v.Aux = sym
50631 v.AddArg(base)
50632 v.AddArg(val)
50633 v.AddArg(mem)
50634 return true
50635 }
50636
50637
50638
50639 for {
50640 off1 := v.AuxInt
50641 sym1 := v.Aux
50642 mem := v.Args[2]
50643 v_0 := v.Args[0]
50644 if v_0.Op != OpAMD64LEAQ {
50645 break
50646 }
50647 off2 := v_0.AuxInt
50648 sym2 := v_0.Aux
50649 base := v_0.Args[0]
50650 val := v.Args[1]
50651 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
50652 break
50653 }
50654 v.reset(OpAMD64SETLstore)
50655 v.AuxInt = off1 + off2
50656 v.Aux = mergeSym(sym1, sym2)
50657 v.AddArg(base)
50658 v.AddArg(val)
50659 v.AddArg(mem)
50660 return true
50661 }
50662
50663
50664
50665 for {
50666 off := v.AuxInt
50667 sym := v.Aux
50668 mem := v.Args[2]
50669 ptr := v.Args[0]
50670 v_1 := v.Args[1]
50671 if v_1.Op != OpAMD64FlagEQ {
50672 break
50673 }
50674 v.reset(OpAMD64MOVBstore)
50675 v.AuxInt = off
50676 v.Aux = sym
50677 v.AddArg(ptr)
50678 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
50679 v0.AuxInt = 0
50680 v.AddArg(v0)
50681 v.AddArg(mem)
50682 return true
50683 }
50684
50685
50686
50687 for {
50688 off := v.AuxInt
50689 sym := v.Aux
50690 mem := v.Args[2]
50691 ptr := v.Args[0]
50692 v_1 := v.Args[1]
50693 if v_1.Op != OpAMD64FlagLT_ULT {
50694 break
50695 }
50696 v.reset(OpAMD64MOVBstore)
50697 v.AuxInt = off
50698 v.Aux = sym
50699 v.AddArg(ptr)
50700 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
50701 v0.AuxInt = 1
50702 v.AddArg(v0)
50703 v.AddArg(mem)
50704 return true
50705 }
50706
50707
50708
50709 for {
50710 off := v.AuxInt
50711 sym := v.Aux
50712 mem := v.Args[2]
50713 ptr := v.Args[0]
50714 v_1 := v.Args[1]
50715 if v_1.Op != OpAMD64FlagLT_UGT {
50716 break
50717 }
50718 v.reset(OpAMD64MOVBstore)
50719 v.AuxInt = off
50720 v.Aux = sym
50721 v.AddArg(ptr)
50722 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
50723 v0.AuxInt = 1
50724 v.AddArg(v0)
50725 v.AddArg(mem)
50726 return true
50727 }
50728
50729
50730
50731 for {
50732 off := v.AuxInt
50733 sym := v.Aux
50734 mem := v.Args[2]
50735 ptr := v.Args[0]
50736 v_1 := v.Args[1]
50737 if v_1.Op != OpAMD64FlagGT_ULT {
50738 break
50739 }
50740 v.reset(OpAMD64MOVBstore)
50741 v.AuxInt = off
50742 v.Aux = sym
50743 v.AddArg(ptr)
50744 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
50745 v0.AuxInt = 0
50746 v.AddArg(v0)
50747 v.AddArg(mem)
50748 return true
50749 }
50750
50751
50752
50753 for {
50754 off := v.AuxInt
50755 sym := v.Aux
50756 mem := v.Args[2]
50757 ptr := v.Args[0]
50758 v_1 := v.Args[1]
50759 if v_1.Op != OpAMD64FlagGT_UGT {
50760 break
50761 }
50762 v.reset(OpAMD64MOVBstore)
50763 v.AuxInt = off
50764 v.Aux = sym
50765 v.AddArg(ptr)
50766 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
50767 v0.AuxInt = 0
50768 v.AddArg(v0)
50769 v.AddArg(mem)
50770 return true
50771 }
50772 return false
50773 }
50774 func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool {
50775 b := v.Block
50776 config := b.Func.Config
50777
50778
50779
50780 for {
50781 v_0 := v.Args[0]
50782 if v_0.Op != OpAMD64TESTL {
50783 break
50784 }
50785 y := v_0.Args[1]
50786 v_0_0 := v_0.Args[0]
50787 if v_0_0.Op != OpAMD64SHLL {
50788 break
50789 }
50790 x := v_0_0.Args[1]
50791 v_0_0_0 := v_0_0.Args[0]
50792 if v_0_0_0.Op != OpAMD64MOVLconst {
50793 break
50794 }
50795 if v_0_0_0.AuxInt != 1 {
50796 break
50797 }
50798 if !(!config.nacl) {
50799 break
50800 }
50801 v.reset(OpAMD64SETB)
50802 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
50803 v0.AddArg(x)
50804 v0.AddArg(y)
50805 v.AddArg(v0)
50806 return true
50807 }
50808
50809
50810
50811 for {
50812 v_0 := v.Args[0]
50813 if v_0.Op != OpAMD64TESTL {
50814 break
50815 }
50816 _ = v_0.Args[1]
50817 y := v_0.Args[0]
50818 v_0_1 := v_0.Args[1]
50819 if v_0_1.Op != OpAMD64SHLL {
50820 break
50821 }
50822 x := v_0_1.Args[1]
50823 v_0_1_0 := v_0_1.Args[0]
50824 if v_0_1_0.Op != OpAMD64MOVLconst {
50825 break
50826 }
50827 if v_0_1_0.AuxInt != 1 {
50828 break
50829 }
50830 if !(!config.nacl) {
50831 break
50832 }
50833 v.reset(OpAMD64SETB)
50834 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
50835 v0.AddArg(x)
50836 v0.AddArg(y)
50837 v.AddArg(v0)
50838 return true
50839 }
50840
50841
50842
50843 for {
50844 v_0 := v.Args[0]
50845 if v_0.Op != OpAMD64TESTQ {
50846 break
50847 }
50848 y := v_0.Args[1]
50849 v_0_0 := v_0.Args[0]
50850 if v_0_0.Op != OpAMD64SHLQ {
50851 break
50852 }
50853 x := v_0_0.Args[1]
50854 v_0_0_0 := v_0_0.Args[0]
50855 if v_0_0_0.Op != OpAMD64MOVQconst {
50856 break
50857 }
50858 if v_0_0_0.AuxInt != 1 {
50859 break
50860 }
50861 if !(!config.nacl) {
50862 break
50863 }
50864 v.reset(OpAMD64SETB)
50865 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
50866 v0.AddArg(x)
50867 v0.AddArg(y)
50868 v.AddArg(v0)
50869 return true
50870 }
50871
50872
50873
50874 for {
50875 v_0 := v.Args[0]
50876 if v_0.Op != OpAMD64TESTQ {
50877 break
50878 }
50879 _ = v_0.Args[1]
50880 y := v_0.Args[0]
50881 v_0_1 := v_0.Args[1]
50882 if v_0_1.Op != OpAMD64SHLQ {
50883 break
50884 }
50885 x := v_0_1.Args[1]
50886 v_0_1_0 := v_0_1.Args[0]
50887 if v_0_1_0.Op != OpAMD64MOVQconst {
50888 break
50889 }
50890 if v_0_1_0.AuxInt != 1 {
50891 break
50892 }
50893 if !(!config.nacl) {
50894 break
50895 }
50896 v.reset(OpAMD64SETB)
50897 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
50898 v0.AddArg(x)
50899 v0.AddArg(y)
50900 v.AddArg(v0)
50901 return true
50902 }
50903
50904
50905
50906 for {
50907 v_0 := v.Args[0]
50908 if v_0.Op != OpAMD64TESTLconst {
50909 break
50910 }
50911 c := v_0.AuxInt
50912 x := v_0.Args[0]
50913 if !(isUint32PowerOfTwo(c) && !config.nacl) {
50914 break
50915 }
50916 v.reset(OpAMD64SETB)
50917 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
50918 v0.AuxInt = log2uint32(c)
50919 v0.AddArg(x)
50920 v.AddArg(v0)
50921 return true
50922 }
50923
50924
50925
50926 for {
50927 v_0 := v.Args[0]
50928 if v_0.Op != OpAMD64TESTQconst {
50929 break
50930 }
50931 c := v_0.AuxInt
50932 x := v_0.Args[0]
50933 if !(isUint64PowerOfTwo(c) && !config.nacl) {
50934 break
50935 }
50936 v.reset(OpAMD64SETB)
50937 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
50938 v0.AuxInt = log2(c)
50939 v0.AddArg(x)
50940 v.AddArg(v0)
50941 return true
50942 }
50943
50944
50945
50946 for {
50947 v_0 := v.Args[0]
50948 if v_0.Op != OpAMD64TESTQ {
50949 break
50950 }
50951 x := v_0.Args[1]
50952 v_0_0 := v_0.Args[0]
50953 if v_0_0.Op != OpAMD64MOVQconst {
50954 break
50955 }
50956 c := v_0_0.AuxInt
50957 if !(isUint64PowerOfTwo(c) && !config.nacl) {
50958 break
50959 }
50960 v.reset(OpAMD64SETB)
50961 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
50962 v0.AuxInt = log2(c)
50963 v0.AddArg(x)
50964 v.AddArg(v0)
50965 return true
50966 }
50967
50968
50969
50970 for {
50971 v_0 := v.Args[0]
50972 if v_0.Op != OpAMD64TESTQ {
50973 break
50974 }
50975 _ = v_0.Args[1]
50976 x := v_0.Args[0]
50977 v_0_1 := v_0.Args[1]
50978 if v_0_1.Op != OpAMD64MOVQconst {
50979 break
50980 }
50981 c := v_0_1.AuxInt
50982 if !(isUint64PowerOfTwo(c) && !config.nacl) {
50983 break
50984 }
50985 v.reset(OpAMD64SETB)
50986 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
50987 v0.AuxInt = log2(c)
50988 v0.AddArg(x)
50989 v.AddArg(v0)
50990 return true
50991 }
50992
50993
50994
50995 for {
50996 v_0 := v.Args[0]
50997 if v_0.Op != OpAMD64CMPLconst {
50998 break
50999 }
51000 if v_0.AuxInt != 1 {
51001 break
51002 }
51003 s := v_0.Args[0]
51004 if s.Op != OpAMD64ANDLconst {
51005 break
51006 }
51007 if s.AuxInt != 1 {
51008 break
51009 }
51010 v.reset(OpAMD64SETEQ)
51011 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
51012 v0.AuxInt = 0
51013 v0.AddArg(s)
51014 v.AddArg(v0)
51015 return true
51016 }
51017
51018
51019
51020 for {
51021 v_0 := v.Args[0]
51022 if v_0.Op != OpAMD64CMPQconst {
51023 break
51024 }
51025 if v_0.AuxInt != 1 {
51026 break
51027 }
51028 s := v_0.Args[0]
51029 if s.Op != OpAMD64ANDQconst {
51030 break
51031 }
51032 if s.AuxInt != 1 {
51033 break
51034 }
51035 v.reset(OpAMD64SETEQ)
51036 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
51037 v0.AuxInt = 0
51038 v0.AddArg(s)
51039 v.AddArg(v0)
51040 return true
51041 }
51042 return false
51043 }
51044 func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool {
51045 b := v.Block
51046 config := b.Func.Config
51047
51048
51049
51050 for {
51051 v_0 := v.Args[0]
51052 if v_0.Op != OpAMD64TESTQ {
51053 break
51054 }
51055 z2 := v_0.Args[1]
51056 z1 := v_0.Args[0]
51057 if z1.Op != OpAMD64SHLQconst {
51058 break
51059 }
51060 if z1.AuxInt != 63 {
51061 break
51062 }
51063 z1_0 := z1.Args[0]
51064 if z1_0.Op != OpAMD64SHRQconst {
51065 break
51066 }
51067 if z1_0.AuxInt != 63 {
51068 break
51069 }
51070 x := z1_0.Args[0]
51071 if !(z1 == z2 && !config.nacl) {
51072 break
51073 }
51074 v.reset(OpAMD64SETB)
51075 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
51076 v0.AuxInt = 63
51077 v0.AddArg(x)
51078 v.AddArg(v0)
51079 return true
51080 }
51081
51082
51083
51084 for {
51085 v_0 := v.Args[0]
51086 if v_0.Op != OpAMD64TESTQ {
51087 break
51088 }
51089 _ = v_0.Args[1]
51090 z2 := v_0.Args[0]
51091 z1 := v_0.Args[1]
51092 if z1.Op != OpAMD64SHLQconst {
51093 break
51094 }
51095 if z1.AuxInt != 63 {
51096 break
51097 }
51098 z1_0 := z1.Args[0]
51099 if z1_0.Op != OpAMD64SHRQconst {
51100 break
51101 }
51102 if z1_0.AuxInt != 63 {
51103 break
51104 }
51105 x := z1_0.Args[0]
51106 if !(z1 == z2 && !config.nacl) {
51107 break
51108 }
51109 v.reset(OpAMD64SETB)
51110 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
51111 v0.AuxInt = 63
51112 v0.AddArg(x)
51113 v.AddArg(v0)
51114 return true
51115 }
51116
51117
51118
51119 for {
51120 v_0 := v.Args[0]
51121 if v_0.Op != OpAMD64TESTL {
51122 break
51123 }
51124 z2 := v_0.Args[1]
51125 z1 := v_0.Args[0]
51126 if z1.Op != OpAMD64SHLLconst {
51127 break
51128 }
51129 if z1.AuxInt != 31 {
51130 break
51131 }
51132 z1_0 := z1.Args[0]
51133 if z1_0.Op != OpAMD64SHRQconst {
51134 break
51135 }
51136 if z1_0.AuxInt != 31 {
51137 break
51138 }
51139 x := z1_0.Args[0]
51140 if !(z1 == z2 && !config.nacl) {
51141 break
51142 }
51143 v.reset(OpAMD64SETB)
51144 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
51145 v0.AuxInt = 31
51146 v0.AddArg(x)
51147 v.AddArg(v0)
51148 return true
51149 }
51150
51151
51152
51153 for {
51154 v_0 := v.Args[0]
51155 if v_0.Op != OpAMD64TESTL {
51156 break
51157 }
51158 _ = v_0.Args[1]
51159 z2 := v_0.Args[0]
51160 z1 := v_0.Args[1]
51161 if z1.Op != OpAMD64SHLLconst {
51162 break
51163 }
51164 if z1.AuxInt != 31 {
51165 break
51166 }
51167 z1_0 := z1.Args[0]
51168 if z1_0.Op != OpAMD64SHRQconst {
51169 break
51170 }
51171 if z1_0.AuxInt != 31 {
51172 break
51173 }
51174 x := z1_0.Args[0]
51175 if !(z1 == z2 && !config.nacl) {
51176 break
51177 }
51178 v.reset(OpAMD64SETB)
51179 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
51180 v0.AuxInt = 31
51181 v0.AddArg(x)
51182 v.AddArg(v0)
51183 return true
51184 }
51185
51186
51187
51188 for {
51189 v_0 := v.Args[0]
51190 if v_0.Op != OpAMD64TESTQ {
51191 break
51192 }
51193 z2 := v_0.Args[1]
51194 z1 := v_0.Args[0]
51195 if z1.Op != OpAMD64SHRQconst {
51196 break
51197 }
51198 if z1.AuxInt != 63 {
51199 break
51200 }
51201 z1_0 := z1.Args[0]
51202 if z1_0.Op != OpAMD64SHLQconst {
51203 break
51204 }
51205 if z1_0.AuxInt != 63 {
51206 break
51207 }
51208 x := z1_0.Args[0]
51209 if !(z1 == z2 && !config.nacl) {
51210 break
51211 }
51212 v.reset(OpAMD64SETB)
51213 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
51214 v0.AuxInt = 0
51215 v0.AddArg(x)
51216 v.AddArg(v0)
51217 return true
51218 }
51219
51220
51221
51222 for {
51223 v_0 := v.Args[0]
51224 if v_0.Op != OpAMD64TESTQ {
51225 break
51226 }
51227 _ = v_0.Args[1]
51228 z2 := v_0.Args[0]
51229 z1 := v_0.Args[1]
51230 if z1.Op != OpAMD64SHRQconst {
51231 break
51232 }
51233 if z1.AuxInt != 63 {
51234 break
51235 }
51236 z1_0 := z1.Args[0]
51237 if z1_0.Op != OpAMD64SHLQconst {
51238 break
51239 }
51240 if z1_0.AuxInt != 63 {
51241 break
51242 }
51243 x := z1_0.Args[0]
51244 if !(z1 == z2 && !config.nacl) {
51245 break
51246 }
51247 v.reset(OpAMD64SETB)
51248 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
51249 v0.AuxInt = 0
51250 v0.AddArg(x)
51251 v.AddArg(v0)
51252 return true
51253 }
51254
51255
51256
51257 for {
51258 v_0 := v.Args[0]
51259 if v_0.Op != OpAMD64TESTL {
51260 break
51261 }
51262 z2 := v_0.Args[1]
51263 z1 := v_0.Args[0]
51264 if z1.Op != OpAMD64SHRLconst {
51265 break
51266 }
51267 if z1.AuxInt != 31 {
51268 break
51269 }
51270 z1_0 := z1.Args[0]
51271 if z1_0.Op != OpAMD64SHLLconst {
51272 break
51273 }
51274 if z1_0.AuxInt != 31 {
51275 break
51276 }
51277 x := z1_0.Args[0]
51278 if !(z1 == z2 && !config.nacl) {
51279 break
51280 }
51281 v.reset(OpAMD64SETB)
51282 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
51283 v0.AuxInt = 0
51284 v0.AddArg(x)
51285 v.AddArg(v0)
51286 return true
51287 }
51288
51289
51290
51291 for {
51292 v_0 := v.Args[0]
51293 if v_0.Op != OpAMD64TESTL {
51294 break
51295 }
51296 _ = v_0.Args[1]
51297 z2 := v_0.Args[0]
51298 z1 := v_0.Args[1]
51299 if z1.Op != OpAMD64SHRLconst {
51300 break
51301 }
51302 if z1.AuxInt != 31 {
51303 break
51304 }
51305 z1_0 := z1.Args[0]
51306 if z1_0.Op != OpAMD64SHLLconst {
51307 break
51308 }
51309 if z1_0.AuxInt != 31 {
51310 break
51311 }
51312 x := z1_0.Args[0]
51313 if !(z1 == z2 && !config.nacl) {
51314 break
51315 }
51316 v.reset(OpAMD64SETB)
51317 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
51318 v0.AuxInt = 0
51319 v0.AddArg(x)
51320 v.AddArg(v0)
51321 return true
51322 }
51323
51324
51325
51326 for {
51327 v_0 := v.Args[0]
51328 if v_0.Op != OpAMD64TESTQ {
51329 break
51330 }
51331 z2 := v_0.Args[1]
51332 z1 := v_0.Args[0]
51333 if z1.Op != OpAMD64SHRQconst {
51334 break
51335 }
51336 if z1.AuxInt != 63 {
51337 break
51338 }
51339 x := z1.Args[0]
51340 if !(z1 == z2 && !config.nacl) {
51341 break
51342 }
51343 v.reset(OpAMD64SETB)
51344 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
51345 v0.AuxInt = 63
51346 v0.AddArg(x)
51347 v.AddArg(v0)
51348 return true
51349 }
51350
51351
51352
51353 for {
51354 v_0 := v.Args[0]
51355 if v_0.Op != OpAMD64TESTQ {
51356 break
51357 }
51358 _ = v_0.Args[1]
51359 z2 := v_0.Args[0]
51360 z1 := v_0.Args[1]
51361 if z1.Op != OpAMD64SHRQconst {
51362 break
51363 }
51364 if z1.AuxInt != 63 {
51365 break
51366 }
51367 x := z1.Args[0]
51368 if !(z1 == z2 && !config.nacl) {
51369 break
51370 }
51371 v.reset(OpAMD64SETB)
51372 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
51373 v0.AuxInt = 63
51374 v0.AddArg(x)
51375 v.AddArg(v0)
51376 return true
51377 }
51378 return false
51379 }
51380 func rewriteValueAMD64_OpAMD64SETNE_20(v *Value) bool {
51381 b := v.Block
51382 config := b.Func.Config
51383
51384
51385
51386 for {
51387 v_0 := v.Args[0]
51388 if v_0.Op != OpAMD64TESTL {
51389 break
51390 }
51391 z2 := v_0.Args[1]
51392 z1 := v_0.Args[0]
51393 if z1.Op != OpAMD64SHRLconst {
51394 break
51395 }
51396 if z1.AuxInt != 31 {
51397 break
51398 }
51399 x := z1.Args[0]
51400 if !(z1 == z2 && !config.nacl) {
51401 break
51402 }
51403 v.reset(OpAMD64SETB)
51404 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
51405 v0.AuxInt = 31
51406 v0.AddArg(x)
51407 v.AddArg(v0)
51408 return true
51409 }
51410
51411
51412
51413 for {
51414 v_0 := v.Args[0]
51415 if v_0.Op != OpAMD64TESTL {
51416 break
51417 }
51418 _ = v_0.Args[1]
51419 z2 := v_0.Args[0]
51420 z1 := v_0.Args[1]
51421 if z1.Op != OpAMD64SHRLconst {
51422 break
51423 }
51424 if z1.AuxInt != 31 {
51425 break
51426 }
51427 x := z1.Args[0]
51428 if !(z1 == z2 && !config.nacl) {
51429 break
51430 }
51431 v.reset(OpAMD64SETB)
51432 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
51433 v0.AuxInt = 31
51434 v0.AddArg(x)
51435 v.AddArg(v0)
51436 return true
51437 }
51438
51439
51440
51441 for {
51442 v_0 := v.Args[0]
51443 if v_0.Op != OpAMD64InvertFlags {
51444 break
51445 }
51446 x := v_0.Args[0]
51447 v.reset(OpAMD64SETNE)
51448 v.AddArg(x)
51449 return true
51450 }
51451
51452
51453
51454 for {
51455 v_0 := v.Args[0]
51456 if v_0.Op != OpAMD64FlagEQ {
51457 break
51458 }
51459 v.reset(OpAMD64MOVLconst)
51460 v.AuxInt = 0
51461 return true
51462 }
51463
51464
51465
51466 for {
51467 v_0 := v.Args[0]
51468 if v_0.Op != OpAMD64FlagLT_ULT {
51469 break
51470 }
51471 v.reset(OpAMD64MOVLconst)
51472 v.AuxInt = 1
51473 return true
51474 }
51475
51476
51477
51478 for {
51479 v_0 := v.Args[0]
51480 if v_0.Op != OpAMD64FlagLT_UGT {
51481 break
51482 }
51483 v.reset(OpAMD64MOVLconst)
51484 v.AuxInt = 1
51485 return true
51486 }
51487
51488
51489
51490 for {
51491 v_0 := v.Args[0]
51492 if v_0.Op != OpAMD64FlagGT_ULT {
51493 break
51494 }
51495 v.reset(OpAMD64MOVLconst)
51496 v.AuxInt = 1
51497 return true
51498 }
51499
51500
51501
51502 for {
51503 v_0 := v.Args[0]
51504 if v_0.Op != OpAMD64FlagGT_UGT {
51505 break
51506 }
51507 v.reset(OpAMD64MOVLconst)
51508 v.AuxInt = 1
51509 return true
51510 }
51511 return false
51512 }
51513 func rewriteValueAMD64_OpAMD64SETNEstore_0(v *Value) bool {
51514 b := v.Block
51515 config := b.Func.Config
51516
51517
51518
51519 for {
51520 off := v.AuxInt
51521 sym := v.Aux
51522 mem := v.Args[2]
51523 ptr := v.Args[0]
51524 v_1 := v.Args[1]
51525 if v_1.Op != OpAMD64TESTL {
51526 break
51527 }
51528 y := v_1.Args[1]
51529 v_1_0 := v_1.Args[0]
51530 if v_1_0.Op != OpAMD64SHLL {
51531 break
51532 }
51533 x := v_1_0.Args[1]
51534 v_1_0_0 := v_1_0.Args[0]
51535 if v_1_0_0.Op != OpAMD64MOVLconst {
51536 break
51537 }
51538 if v_1_0_0.AuxInt != 1 {
51539 break
51540 }
51541 if !(!config.nacl) {
51542 break
51543 }
51544 v.reset(OpAMD64SETBstore)
51545 v.AuxInt = off
51546 v.Aux = sym
51547 v.AddArg(ptr)
51548 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
51549 v0.AddArg(x)
51550 v0.AddArg(y)
51551 v.AddArg(v0)
51552 v.AddArg(mem)
51553 return true
51554 }
51555
51556
51557
51558 for {
51559 off := v.AuxInt
51560 sym := v.Aux
51561 mem := v.Args[2]
51562 ptr := v.Args[0]
51563 v_1 := v.Args[1]
51564 if v_1.Op != OpAMD64TESTL {
51565 break
51566 }
51567 _ = v_1.Args[1]
51568 y := v_1.Args[0]
51569 v_1_1 := v_1.Args[1]
51570 if v_1_1.Op != OpAMD64SHLL {
51571 break
51572 }
51573 x := v_1_1.Args[1]
51574 v_1_1_0 := v_1_1.Args[0]
51575 if v_1_1_0.Op != OpAMD64MOVLconst {
51576 break
51577 }
51578 if v_1_1_0.AuxInt != 1 {
51579 break
51580 }
51581 if !(!config.nacl) {
51582 break
51583 }
51584 v.reset(OpAMD64SETBstore)
51585 v.AuxInt = off
51586 v.Aux = sym
51587 v.AddArg(ptr)
51588 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
51589 v0.AddArg(x)
51590 v0.AddArg(y)
51591 v.AddArg(v0)
51592 v.AddArg(mem)
51593 return true
51594 }
51595
51596
51597
51598 for {
51599 off := v.AuxInt
51600 sym := v.Aux
51601 mem := v.Args[2]
51602 ptr := v.Args[0]
51603 v_1 := v.Args[1]
51604 if v_1.Op != OpAMD64TESTQ {
51605 break
51606 }
51607 y := v_1.Args[1]
51608 v_1_0 := v_1.Args[0]
51609 if v_1_0.Op != OpAMD64SHLQ {
51610 break
51611 }
51612 x := v_1_0.Args[1]
51613 v_1_0_0 := v_1_0.Args[0]
51614 if v_1_0_0.Op != OpAMD64MOVQconst {
51615 break
51616 }
51617 if v_1_0_0.AuxInt != 1 {
51618 break
51619 }
51620 if !(!config.nacl) {
51621 break
51622 }
51623 v.reset(OpAMD64SETBstore)
51624 v.AuxInt = off
51625 v.Aux = sym
51626 v.AddArg(ptr)
51627 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
51628 v0.AddArg(x)
51629 v0.AddArg(y)
51630 v.AddArg(v0)
51631 v.AddArg(mem)
51632 return true
51633 }
51634
51635
51636
51637 for {
51638 off := v.AuxInt
51639 sym := v.Aux
51640 mem := v.Args[2]
51641 ptr := v.Args[0]
51642 v_1 := v.Args[1]
51643 if v_1.Op != OpAMD64TESTQ {
51644 break
51645 }
51646 _ = v_1.Args[1]
51647 y := v_1.Args[0]
51648 v_1_1 := v_1.Args[1]
51649 if v_1_1.Op != OpAMD64SHLQ {
51650 break
51651 }
51652 x := v_1_1.Args[1]
51653 v_1_1_0 := v_1_1.Args[0]
51654 if v_1_1_0.Op != OpAMD64MOVQconst {
51655 break
51656 }
51657 if v_1_1_0.AuxInt != 1 {
51658 break
51659 }
51660 if !(!config.nacl) {
51661 break
51662 }
51663 v.reset(OpAMD64SETBstore)
51664 v.AuxInt = off
51665 v.Aux = sym
51666 v.AddArg(ptr)
51667 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
51668 v0.AddArg(x)
51669 v0.AddArg(y)
51670 v.AddArg(v0)
51671 v.AddArg(mem)
51672 return true
51673 }
51674
51675
51676
51677 for {
51678 off := v.AuxInt
51679 sym := v.Aux
51680 mem := v.Args[2]
51681 ptr := v.Args[0]
51682 v_1 := v.Args[1]
51683 if v_1.Op != OpAMD64TESTLconst {
51684 break
51685 }
51686 c := v_1.AuxInt
51687 x := v_1.Args[0]
51688 if !(isUint32PowerOfTwo(c) && !config.nacl) {
51689 break
51690 }
51691 v.reset(OpAMD64SETBstore)
51692 v.AuxInt = off
51693 v.Aux = sym
51694 v.AddArg(ptr)
51695 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
51696 v0.AuxInt = log2uint32(c)
51697 v0.AddArg(x)
51698 v.AddArg(v0)
51699 v.AddArg(mem)
51700 return true
51701 }
51702
51703
51704
51705 for {
51706 off := v.AuxInt
51707 sym := v.Aux
51708 mem := v.Args[2]
51709 ptr := v.Args[0]
51710 v_1 := v.Args[1]
51711 if v_1.Op != OpAMD64TESTQconst {
51712 break
51713 }
51714 c := v_1.AuxInt
51715 x := v_1.Args[0]
51716 if !(isUint64PowerOfTwo(c) && !config.nacl) {
51717 break
51718 }
51719 v.reset(OpAMD64SETBstore)
51720 v.AuxInt = off
51721 v.Aux = sym
51722 v.AddArg(ptr)
51723 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
51724 v0.AuxInt = log2(c)
51725 v0.AddArg(x)
51726 v.AddArg(v0)
51727 v.AddArg(mem)
51728 return true
51729 }
51730
51731
51732
51733 for {
51734 off := v.AuxInt
51735 sym := v.Aux
51736 mem := v.Args[2]
51737 ptr := v.Args[0]
51738 v_1 := v.Args[1]
51739 if v_1.Op != OpAMD64TESTQ {
51740 break
51741 }
51742 x := v_1.Args[1]
51743 v_1_0 := v_1.Args[0]
51744 if v_1_0.Op != OpAMD64MOVQconst {
51745 break
51746 }
51747 c := v_1_0.AuxInt
51748 if !(isUint64PowerOfTwo(c) && !config.nacl) {
51749 break
51750 }
51751 v.reset(OpAMD64SETBstore)
51752 v.AuxInt = off
51753 v.Aux = sym
51754 v.AddArg(ptr)
51755 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
51756 v0.AuxInt = log2(c)
51757 v0.AddArg(x)
51758 v.AddArg(v0)
51759 v.AddArg(mem)
51760 return true
51761 }
51762
51763
51764
51765 for {
51766 off := v.AuxInt
51767 sym := v.Aux
51768 mem := v.Args[2]
51769 ptr := v.Args[0]
51770 v_1 := v.Args[1]
51771 if v_1.Op != OpAMD64TESTQ {
51772 break
51773 }
51774 _ = v_1.Args[1]
51775 x := v_1.Args[0]
51776 v_1_1 := v_1.Args[1]
51777 if v_1_1.Op != OpAMD64MOVQconst {
51778 break
51779 }
51780 c := v_1_1.AuxInt
51781 if !(isUint64PowerOfTwo(c) && !config.nacl) {
51782 break
51783 }
51784 v.reset(OpAMD64SETBstore)
51785 v.AuxInt = off
51786 v.Aux = sym
51787 v.AddArg(ptr)
51788 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
51789 v0.AuxInt = log2(c)
51790 v0.AddArg(x)
51791 v.AddArg(v0)
51792 v.AddArg(mem)
51793 return true
51794 }
51795
51796
51797
51798 for {
51799 off := v.AuxInt
51800 sym := v.Aux
51801 mem := v.Args[2]
51802 ptr := v.Args[0]
51803 v_1 := v.Args[1]
51804 if v_1.Op != OpAMD64CMPLconst {
51805 break
51806 }
51807 if v_1.AuxInt != 1 {
51808 break
51809 }
51810 s := v_1.Args[0]
51811 if s.Op != OpAMD64ANDLconst {
51812 break
51813 }
51814 if s.AuxInt != 1 {
51815 break
51816 }
51817 v.reset(OpAMD64SETEQstore)
51818 v.AuxInt = off
51819 v.Aux = sym
51820 v.AddArg(ptr)
51821 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
51822 v0.AuxInt = 0
51823 v0.AddArg(s)
51824 v.AddArg(v0)
51825 v.AddArg(mem)
51826 return true
51827 }
51828
51829
51830
51831 for {
51832 off := v.AuxInt
51833 sym := v.Aux
51834 mem := v.Args[2]
51835 ptr := v.Args[0]
51836 v_1 := v.Args[1]
51837 if v_1.Op != OpAMD64CMPQconst {
51838 break
51839 }
51840 if v_1.AuxInt != 1 {
51841 break
51842 }
51843 s := v_1.Args[0]
51844 if s.Op != OpAMD64ANDQconst {
51845 break
51846 }
51847 if s.AuxInt != 1 {
51848 break
51849 }
51850 v.reset(OpAMD64SETEQstore)
51851 v.AuxInt = off
51852 v.Aux = sym
51853 v.AddArg(ptr)
51854 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
51855 v0.AuxInt = 0
51856 v0.AddArg(s)
51857 v.AddArg(v0)
51858 v.AddArg(mem)
51859 return true
51860 }
51861 return false
51862 }
51863 func rewriteValueAMD64_OpAMD64SETNEstore_10(v *Value) bool {
51864 b := v.Block
51865 config := b.Func.Config
51866
51867
51868
51869 for {
51870 off := v.AuxInt
51871 sym := v.Aux
51872 mem := v.Args[2]
51873 ptr := v.Args[0]
51874 v_1 := v.Args[1]
51875 if v_1.Op != OpAMD64TESTQ {
51876 break
51877 }
51878 z2 := v_1.Args[1]
51879 z1 := v_1.Args[0]
51880 if z1.Op != OpAMD64SHLQconst {
51881 break
51882 }
51883 if z1.AuxInt != 63 {
51884 break
51885 }
51886 z1_0 := z1.Args[0]
51887 if z1_0.Op != OpAMD64SHRQconst {
51888 break
51889 }
51890 if z1_0.AuxInt != 63 {
51891 break
51892 }
51893 x := z1_0.Args[0]
51894 if !(z1 == z2 && !config.nacl) {
51895 break
51896 }
51897 v.reset(OpAMD64SETBstore)
51898 v.AuxInt = off
51899 v.Aux = sym
51900 v.AddArg(ptr)
51901 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
51902 v0.AuxInt = 63
51903 v0.AddArg(x)
51904 v.AddArg(v0)
51905 v.AddArg(mem)
51906 return true
51907 }
51908
51909
51910
51911 for {
51912 off := v.AuxInt
51913 sym := v.Aux
51914 mem := v.Args[2]
51915 ptr := v.Args[0]
51916 v_1 := v.Args[1]
51917 if v_1.Op != OpAMD64TESTQ {
51918 break
51919 }
51920 _ = v_1.Args[1]
51921 z2 := v_1.Args[0]
51922 z1 := v_1.Args[1]
51923 if z1.Op != OpAMD64SHLQconst {
51924 break
51925 }
51926 if z1.AuxInt != 63 {
51927 break
51928 }
51929 z1_0 := z1.Args[0]
51930 if z1_0.Op != OpAMD64SHRQconst {
51931 break
51932 }
51933 if z1_0.AuxInt != 63 {
51934 break
51935 }
51936 x := z1_0.Args[0]
51937 if !(z1 == z2 && !config.nacl) {
51938 break
51939 }
51940 v.reset(OpAMD64SETBstore)
51941 v.AuxInt = off
51942 v.Aux = sym
51943 v.AddArg(ptr)
51944 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
51945 v0.AuxInt = 63
51946 v0.AddArg(x)
51947 v.AddArg(v0)
51948 v.AddArg(mem)
51949 return true
51950 }
51951
51952
51953
51954 for {
51955 off := v.AuxInt
51956 sym := v.Aux
51957 mem := v.Args[2]
51958 ptr := v.Args[0]
51959 v_1 := v.Args[1]
51960 if v_1.Op != OpAMD64TESTL {
51961 break
51962 }
51963 z2 := v_1.Args[1]
51964 z1 := v_1.Args[0]
51965 if z1.Op != OpAMD64SHLLconst {
51966 break
51967 }
51968 if z1.AuxInt != 31 {
51969 break
51970 }
51971 z1_0 := z1.Args[0]
51972 if z1_0.Op != OpAMD64SHRLconst {
51973 break
51974 }
51975 if z1_0.AuxInt != 31 {
51976 break
51977 }
51978 x := z1_0.Args[0]
51979 if !(z1 == z2 && !config.nacl) {
51980 break
51981 }
51982 v.reset(OpAMD64SETBstore)
51983 v.AuxInt = off
51984 v.Aux = sym
51985 v.AddArg(ptr)
51986 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
51987 v0.AuxInt = 31
51988 v0.AddArg(x)
51989 v.AddArg(v0)
51990 v.AddArg(mem)
51991 return true
51992 }
51993
51994
51995
51996 for {
51997 off := v.AuxInt
51998 sym := v.Aux
51999 mem := v.Args[2]
52000 ptr := v.Args[0]
52001 v_1 := v.Args[1]
52002 if v_1.Op != OpAMD64TESTL {
52003 break
52004 }
52005 _ = v_1.Args[1]
52006 z2 := v_1.Args[0]
52007 z1 := v_1.Args[1]
52008 if z1.Op != OpAMD64SHLLconst {
52009 break
52010 }
52011 if z1.AuxInt != 31 {
52012 break
52013 }
52014 z1_0 := z1.Args[0]
52015 if z1_0.Op != OpAMD64SHRLconst {
52016 break
52017 }
52018 if z1_0.AuxInt != 31 {
52019 break
52020 }
52021 x := z1_0.Args[0]
52022 if !(z1 == z2 && !config.nacl) {
52023 break
52024 }
52025 v.reset(OpAMD64SETBstore)
52026 v.AuxInt = off
52027 v.Aux = sym
52028 v.AddArg(ptr)
52029 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
52030 v0.AuxInt = 31
52031 v0.AddArg(x)
52032 v.AddArg(v0)
52033 v.AddArg(mem)
52034 return true
52035 }
52036
52037
52038
52039 for {
52040 off := v.AuxInt
52041 sym := v.Aux
52042 mem := v.Args[2]
52043 ptr := v.Args[0]
52044 v_1 := v.Args[1]
52045 if v_1.Op != OpAMD64TESTQ {
52046 break
52047 }
52048 z2 := v_1.Args[1]
52049 z1 := v_1.Args[0]
52050 if z1.Op != OpAMD64SHRQconst {
52051 break
52052 }
52053 if z1.AuxInt != 63 {
52054 break
52055 }
52056 z1_0 := z1.Args[0]
52057 if z1_0.Op != OpAMD64SHLQconst {
52058 break
52059 }
52060 if z1_0.AuxInt != 63 {
52061 break
52062 }
52063 x := z1_0.Args[0]
52064 if !(z1 == z2 && !config.nacl) {
52065 break
52066 }
52067 v.reset(OpAMD64SETBstore)
52068 v.AuxInt = off
52069 v.Aux = sym
52070 v.AddArg(ptr)
52071 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
52072 v0.AuxInt = 0
52073 v0.AddArg(x)
52074 v.AddArg(v0)
52075 v.AddArg(mem)
52076 return true
52077 }
52078
52079
52080
52081 for {
52082 off := v.AuxInt
52083 sym := v.Aux
52084 mem := v.Args[2]
52085 ptr := v.Args[0]
52086 v_1 := v.Args[1]
52087 if v_1.Op != OpAMD64TESTQ {
52088 break
52089 }
52090 _ = v_1.Args[1]
52091 z2 := v_1.Args[0]
52092 z1 := v_1.Args[1]
52093 if z1.Op != OpAMD64SHRQconst {
52094 break
52095 }
52096 if z1.AuxInt != 63 {
52097 break
52098 }
52099 z1_0 := z1.Args[0]
52100 if z1_0.Op != OpAMD64SHLQconst {
52101 break
52102 }
52103 if z1_0.AuxInt != 63 {
52104 break
52105 }
52106 x := z1_0.Args[0]
52107 if !(z1 == z2 && !config.nacl) {
52108 break
52109 }
52110 v.reset(OpAMD64SETBstore)
52111 v.AuxInt = off
52112 v.Aux = sym
52113 v.AddArg(ptr)
52114 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
52115 v0.AuxInt = 0
52116 v0.AddArg(x)
52117 v.AddArg(v0)
52118 v.AddArg(mem)
52119 return true
52120 }
52121
52122
52123
52124 for {
52125 off := v.AuxInt
52126 sym := v.Aux
52127 mem := v.Args[2]
52128 ptr := v.Args[0]
52129 v_1 := v.Args[1]
52130 if v_1.Op != OpAMD64TESTL {
52131 break
52132 }
52133 z2 := v_1.Args[1]
52134 z1 := v_1.Args[0]
52135 if z1.Op != OpAMD64SHRLconst {
52136 break
52137 }
52138 if z1.AuxInt != 31 {
52139 break
52140 }
52141 z1_0 := z1.Args[0]
52142 if z1_0.Op != OpAMD64SHLLconst {
52143 break
52144 }
52145 if z1_0.AuxInt != 31 {
52146 break
52147 }
52148 x := z1_0.Args[0]
52149 if !(z1 == z2 && !config.nacl) {
52150 break
52151 }
52152 v.reset(OpAMD64SETBstore)
52153 v.AuxInt = off
52154 v.Aux = sym
52155 v.AddArg(ptr)
52156 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
52157 v0.AuxInt = 0
52158 v0.AddArg(x)
52159 v.AddArg(v0)
52160 v.AddArg(mem)
52161 return true
52162 }
52163
52164
52165
52166 for {
52167 off := v.AuxInt
52168 sym := v.Aux
52169 mem := v.Args[2]
52170 ptr := v.Args[0]
52171 v_1 := v.Args[1]
52172 if v_1.Op != OpAMD64TESTL {
52173 break
52174 }
52175 _ = v_1.Args[1]
52176 z2 := v_1.Args[0]
52177 z1 := v_1.Args[1]
52178 if z1.Op != OpAMD64SHRLconst {
52179 break
52180 }
52181 if z1.AuxInt != 31 {
52182 break
52183 }
52184 z1_0 := z1.Args[0]
52185 if z1_0.Op != OpAMD64SHLLconst {
52186 break
52187 }
52188 if z1_0.AuxInt != 31 {
52189 break
52190 }
52191 x := z1_0.Args[0]
52192 if !(z1 == z2 && !config.nacl) {
52193 break
52194 }
52195 v.reset(OpAMD64SETBstore)
52196 v.AuxInt = off
52197 v.Aux = sym
52198 v.AddArg(ptr)
52199 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
52200 v0.AuxInt = 0
52201 v0.AddArg(x)
52202 v.AddArg(v0)
52203 v.AddArg(mem)
52204 return true
52205 }
52206
52207
52208
52209 for {
52210 off := v.AuxInt
52211 sym := v.Aux
52212 mem := v.Args[2]
52213 ptr := v.Args[0]
52214 v_1 := v.Args[1]
52215 if v_1.Op != OpAMD64TESTQ {
52216 break
52217 }
52218 z2 := v_1.Args[1]
52219 z1 := v_1.Args[0]
52220 if z1.Op != OpAMD64SHRQconst {
52221 break
52222 }
52223 if z1.AuxInt != 63 {
52224 break
52225 }
52226 x := z1.Args[0]
52227 if !(z1 == z2 && !config.nacl) {
52228 break
52229 }
52230 v.reset(OpAMD64SETBstore)
52231 v.AuxInt = off
52232 v.Aux = sym
52233 v.AddArg(ptr)
52234 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
52235 v0.AuxInt = 63
52236 v0.AddArg(x)
52237 v.AddArg(v0)
52238 v.AddArg(mem)
52239 return true
52240 }
52241
52242
52243
52244 for {
52245 off := v.AuxInt
52246 sym := v.Aux
52247 mem := v.Args[2]
52248 ptr := v.Args[0]
52249 v_1 := v.Args[1]
52250 if v_1.Op != OpAMD64TESTQ {
52251 break
52252 }
52253 _ = v_1.Args[1]
52254 z2 := v_1.Args[0]
52255 z1 := v_1.Args[1]
52256 if z1.Op != OpAMD64SHRQconst {
52257 break
52258 }
52259 if z1.AuxInt != 63 {
52260 break
52261 }
52262 x := z1.Args[0]
52263 if !(z1 == z2 && !config.nacl) {
52264 break
52265 }
52266 v.reset(OpAMD64SETBstore)
52267 v.AuxInt = off
52268 v.Aux = sym
52269 v.AddArg(ptr)
52270 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
52271 v0.AuxInt = 63
52272 v0.AddArg(x)
52273 v.AddArg(v0)
52274 v.AddArg(mem)
52275 return true
52276 }
52277 return false
52278 }
52279 func rewriteValueAMD64_OpAMD64SETNEstore_20(v *Value) bool {
52280 b := v.Block
52281 config := b.Func.Config
52282 typ := &b.Func.Config.Types
52283
52284
52285
52286 for {
52287 off := v.AuxInt
52288 sym := v.Aux
52289 mem := v.Args[2]
52290 ptr := v.Args[0]
52291 v_1 := v.Args[1]
52292 if v_1.Op != OpAMD64TESTL {
52293 break
52294 }
52295 z2 := v_1.Args[1]
52296 z1 := v_1.Args[0]
52297 if z1.Op != OpAMD64SHRLconst {
52298 break
52299 }
52300 if z1.AuxInt != 31 {
52301 break
52302 }
52303 x := z1.Args[0]
52304 if !(z1 == z2 && !config.nacl) {
52305 break
52306 }
52307 v.reset(OpAMD64SETBstore)
52308 v.AuxInt = off
52309 v.Aux = sym
52310 v.AddArg(ptr)
52311 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
52312 v0.AuxInt = 31
52313 v0.AddArg(x)
52314 v.AddArg(v0)
52315 v.AddArg(mem)
52316 return true
52317 }
52318
52319
52320
52321 for {
52322 off := v.AuxInt
52323 sym := v.Aux
52324 mem := v.Args[2]
52325 ptr := v.Args[0]
52326 v_1 := v.Args[1]
52327 if v_1.Op != OpAMD64TESTL {
52328 break
52329 }
52330 _ = v_1.Args[1]
52331 z2 := v_1.Args[0]
52332 z1 := v_1.Args[1]
52333 if z1.Op != OpAMD64SHRLconst {
52334 break
52335 }
52336 if z1.AuxInt != 31 {
52337 break
52338 }
52339 x := z1.Args[0]
52340 if !(z1 == z2 && !config.nacl) {
52341 break
52342 }
52343 v.reset(OpAMD64SETBstore)
52344 v.AuxInt = off
52345 v.Aux = sym
52346 v.AddArg(ptr)
52347 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
52348 v0.AuxInt = 31
52349 v0.AddArg(x)
52350 v.AddArg(v0)
52351 v.AddArg(mem)
52352 return true
52353 }
52354
52355
52356
52357 for {
52358 off := v.AuxInt
52359 sym := v.Aux
52360 mem := v.Args[2]
52361 ptr := v.Args[0]
52362 v_1 := v.Args[1]
52363 if v_1.Op != OpAMD64InvertFlags {
52364 break
52365 }
52366 x := v_1.Args[0]
52367 v.reset(OpAMD64SETNEstore)
52368 v.AuxInt = off
52369 v.Aux = sym
52370 v.AddArg(ptr)
52371 v.AddArg(x)
52372 v.AddArg(mem)
52373 return true
52374 }
52375
52376
52377
52378 for {
52379 off1 := v.AuxInt
52380 sym := v.Aux
52381 mem := v.Args[2]
52382 v_0 := v.Args[0]
52383 if v_0.Op != OpAMD64ADDQconst {
52384 break
52385 }
52386 off2 := v_0.AuxInt
52387 base := v_0.Args[0]
52388 val := v.Args[1]
52389 if !(is32Bit(off1 + off2)) {
52390 break
52391 }
52392 v.reset(OpAMD64SETNEstore)
52393 v.AuxInt = off1 + off2
52394 v.Aux = sym
52395 v.AddArg(base)
52396 v.AddArg(val)
52397 v.AddArg(mem)
52398 return true
52399 }
52400
52401
52402
52403 for {
52404 off1 := v.AuxInt
52405 sym1 := v.Aux
52406 mem := v.Args[2]
52407 v_0 := v.Args[0]
52408 if v_0.Op != OpAMD64LEAQ {
52409 break
52410 }
52411 off2 := v_0.AuxInt
52412 sym2 := v_0.Aux
52413 base := v_0.Args[0]
52414 val := v.Args[1]
52415 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
52416 break
52417 }
52418 v.reset(OpAMD64SETNEstore)
52419 v.AuxInt = off1 + off2
52420 v.Aux = mergeSym(sym1, sym2)
52421 v.AddArg(base)
52422 v.AddArg(val)
52423 v.AddArg(mem)
52424 return true
52425 }
52426
52427
52428
52429 for {
52430 off := v.AuxInt
52431 sym := v.Aux
52432 mem := v.Args[2]
52433 ptr := v.Args[0]
52434 v_1 := v.Args[1]
52435 if v_1.Op != OpAMD64FlagEQ {
52436 break
52437 }
52438 v.reset(OpAMD64MOVBstore)
52439 v.AuxInt = off
52440 v.Aux = sym
52441 v.AddArg(ptr)
52442 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
52443 v0.AuxInt = 0
52444 v.AddArg(v0)
52445 v.AddArg(mem)
52446 return true
52447 }
52448
52449
52450
52451 for {
52452 off := v.AuxInt
52453 sym := v.Aux
52454 mem := v.Args[2]
52455 ptr := v.Args[0]
52456 v_1 := v.Args[1]
52457 if v_1.Op != OpAMD64FlagLT_ULT {
52458 break
52459 }
52460 v.reset(OpAMD64MOVBstore)
52461 v.AuxInt = off
52462 v.Aux = sym
52463 v.AddArg(ptr)
52464 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
52465 v0.AuxInt = 1
52466 v.AddArg(v0)
52467 v.AddArg(mem)
52468 return true
52469 }
52470
52471
52472
52473 for {
52474 off := v.AuxInt
52475 sym := v.Aux
52476 mem := v.Args[2]
52477 ptr := v.Args[0]
52478 v_1 := v.Args[1]
52479 if v_1.Op != OpAMD64FlagLT_UGT {
52480 break
52481 }
52482 v.reset(OpAMD64MOVBstore)
52483 v.AuxInt = off
52484 v.Aux = sym
52485 v.AddArg(ptr)
52486 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
52487 v0.AuxInt = 1
52488 v.AddArg(v0)
52489 v.AddArg(mem)
52490 return true
52491 }
52492
52493
52494
52495 for {
52496 off := v.AuxInt
52497 sym := v.Aux
52498 mem := v.Args[2]
52499 ptr := v.Args[0]
52500 v_1 := v.Args[1]
52501 if v_1.Op != OpAMD64FlagGT_ULT {
52502 break
52503 }
52504 v.reset(OpAMD64MOVBstore)
52505 v.AuxInt = off
52506 v.Aux = sym
52507 v.AddArg(ptr)
52508 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
52509 v0.AuxInt = 1
52510 v.AddArg(v0)
52511 v.AddArg(mem)
52512 return true
52513 }
52514
52515
52516
52517 for {
52518 off := v.AuxInt
52519 sym := v.Aux
52520 mem := v.Args[2]
52521 ptr := v.Args[0]
52522 v_1 := v.Args[1]
52523 if v_1.Op != OpAMD64FlagGT_UGT {
52524 break
52525 }
52526 v.reset(OpAMD64MOVBstore)
52527 v.AuxInt = off
52528 v.Aux = sym
52529 v.AddArg(ptr)
52530 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
52531 v0.AuxInt = 1
52532 v.AddArg(v0)
52533 v.AddArg(mem)
52534 return true
52535 }
52536 return false
52537 }
52538 func rewriteValueAMD64_OpAMD64SHLL_0(v *Value) bool {
52539 b := v.Block
52540
52541
52542
52543 for {
52544 _ = v.Args[1]
52545 x := v.Args[0]
52546 v_1 := v.Args[1]
52547 if v_1.Op != OpAMD64MOVQconst {
52548 break
52549 }
52550 c := v_1.AuxInt
52551 v.reset(OpAMD64SHLLconst)
52552 v.AuxInt = c & 31
52553 v.AddArg(x)
52554 return true
52555 }
52556
52557
52558
52559 for {
52560 _ = v.Args[1]
52561 x := v.Args[0]
52562 v_1 := v.Args[1]
52563 if v_1.Op != OpAMD64MOVLconst {
52564 break
52565 }
52566 c := v_1.AuxInt
52567 v.reset(OpAMD64SHLLconst)
52568 v.AuxInt = c & 31
52569 v.AddArg(x)
52570 return true
52571 }
52572
52573
52574
52575 for {
52576 _ = v.Args[1]
52577 x := v.Args[0]
52578 v_1 := v.Args[1]
52579 if v_1.Op != OpAMD64ADDQconst {
52580 break
52581 }
52582 c := v_1.AuxInt
52583 y := v_1.Args[0]
52584 if !(c&31 == 0) {
52585 break
52586 }
52587 v.reset(OpAMD64SHLL)
52588 v.AddArg(x)
52589 v.AddArg(y)
52590 return true
52591 }
52592
52593
52594
52595 for {
52596 _ = v.Args[1]
52597 x := v.Args[0]
52598 v_1 := v.Args[1]
52599 if v_1.Op != OpAMD64NEGQ {
52600 break
52601 }
52602 t := v_1.Type
52603 v_1_0 := v_1.Args[0]
52604 if v_1_0.Op != OpAMD64ADDQconst {
52605 break
52606 }
52607 c := v_1_0.AuxInt
52608 y := v_1_0.Args[0]
52609 if !(c&31 == 0) {
52610 break
52611 }
52612 v.reset(OpAMD64SHLL)
52613 v.AddArg(x)
52614 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
52615 v0.AddArg(y)
52616 v.AddArg(v0)
52617 return true
52618 }
52619
52620
52621
52622 for {
52623 _ = v.Args[1]
52624 x := v.Args[0]
52625 v_1 := v.Args[1]
52626 if v_1.Op != OpAMD64ANDQconst {
52627 break
52628 }
52629 c := v_1.AuxInt
52630 y := v_1.Args[0]
52631 if !(c&31 == 31) {
52632 break
52633 }
52634 v.reset(OpAMD64SHLL)
52635 v.AddArg(x)
52636 v.AddArg(y)
52637 return true
52638 }
52639
52640
52641
52642 for {
52643 _ = v.Args[1]
52644 x := v.Args[0]
52645 v_1 := v.Args[1]
52646 if v_1.Op != OpAMD64NEGQ {
52647 break
52648 }
52649 t := v_1.Type
52650 v_1_0 := v_1.Args[0]
52651 if v_1_0.Op != OpAMD64ANDQconst {
52652 break
52653 }
52654 c := v_1_0.AuxInt
52655 y := v_1_0.Args[0]
52656 if !(c&31 == 31) {
52657 break
52658 }
52659 v.reset(OpAMD64SHLL)
52660 v.AddArg(x)
52661 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
52662 v0.AddArg(y)
52663 v.AddArg(v0)
52664 return true
52665 }
52666
52667
52668
52669 for {
52670 _ = v.Args[1]
52671 x := v.Args[0]
52672 v_1 := v.Args[1]
52673 if v_1.Op != OpAMD64ADDLconst {
52674 break
52675 }
52676 c := v_1.AuxInt
52677 y := v_1.Args[0]
52678 if !(c&31 == 0) {
52679 break
52680 }
52681 v.reset(OpAMD64SHLL)
52682 v.AddArg(x)
52683 v.AddArg(y)
52684 return true
52685 }
52686
52687
52688
52689 for {
52690 _ = v.Args[1]
52691 x := v.Args[0]
52692 v_1 := v.Args[1]
52693 if v_1.Op != OpAMD64NEGL {
52694 break
52695 }
52696 t := v_1.Type
52697 v_1_0 := v_1.Args[0]
52698 if v_1_0.Op != OpAMD64ADDLconst {
52699 break
52700 }
52701 c := v_1_0.AuxInt
52702 y := v_1_0.Args[0]
52703 if !(c&31 == 0) {
52704 break
52705 }
52706 v.reset(OpAMD64SHLL)
52707 v.AddArg(x)
52708 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
52709 v0.AddArg(y)
52710 v.AddArg(v0)
52711 return true
52712 }
52713
52714
52715
52716 for {
52717 _ = v.Args[1]
52718 x := v.Args[0]
52719 v_1 := v.Args[1]
52720 if v_1.Op != OpAMD64ANDLconst {
52721 break
52722 }
52723 c := v_1.AuxInt
52724 y := v_1.Args[0]
52725 if !(c&31 == 31) {
52726 break
52727 }
52728 v.reset(OpAMD64SHLL)
52729 v.AddArg(x)
52730 v.AddArg(y)
52731 return true
52732 }
52733
52734
52735
52736 for {
52737 _ = v.Args[1]
52738 x := v.Args[0]
52739 v_1 := v.Args[1]
52740 if v_1.Op != OpAMD64NEGL {
52741 break
52742 }
52743 t := v_1.Type
52744 v_1_0 := v_1.Args[0]
52745 if v_1_0.Op != OpAMD64ANDLconst {
52746 break
52747 }
52748 c := v_1_0.AuxInt
52749 y := v_1_0.Args[0]
52750 if !(c&31 == 31) {
52751 break
52752 }
52753 v.reset(OpAMD64SHLL)
52754 v.AddArg(x)
52755 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
52756 v0.AddArg(y)
52757 v.AddArg(v0)
52758 return true
52759 }
52760 return false
52761 }
52762 func rewriteValueAMD64_OpAMD64SHLLconst_0(v *Value) bool {
52763 b := v.Block
52764 config := b.Func.Config
52765
52766
52767
52768 for {
52769 if v.AuxInt != 1 {
52770 break
52771 }
52772 v_0 := v.Args[0]
52773 if v_0.Op != OpAMD64SHRLconst {
52774 break
52775 }
52776 if v_0.AuxInt != 1 {
52777 break
52778 }
52779 x := v_0.Args[0]
52780 if !(!config.nacl) {
52781 break
52782 }
52783 v.reset(OpAMD64BTRLconst)
52784 v.AuxInt = 0
52785 v.AddArg(x)
52786 return true
52787 }
52788
52789
52790
52791 for {
52792 if v.AuxInt != 0 {
52793 break
52794 }
52795 x := v.Args[0]
52796 v.reset(OpCopy)
52797 v.Type = x.Type
52798 v.AddArg(x)
52799 return true
52800 }
52801 return false
52802 }
52803 func rewriteValueAMD64_OpAMD64SHLQ_0(v *Value) bool {
52804 b := v.Block
52805
52806
52807
52808 for {
52809 _ = v.Args[1]
52810 x := v.Args[0]
52811 v_1 := v.Args[1]
52812 if v_1.Op != OpAMD64MOVQconst {
52813 break
52814 }
52815 c := v_1.AuxInt
52816 v.reset(OpAMD64SHLQconst)
52817 v.AuxInt = c & 63
52818 v.AddArg(x)
52819 return true
52820 }
52821
52822
52823
52824 for {
52825 _ = v.Args[1]
52826 x := v.Args[0]
52827 v_1 := v.Args[1]
52828 if v_1.Op != OpAMD64MOVLconst {
52829 break
52830 }
52831 c := v_1.AuxInt
52832 v.reset(OpAMD64SHLQconst)
52833 v.AuxInt = c & 63
52834 v.AddArg(x)
52835 return true
52836 }
52837
52838
52839
52840 for {
52841 _ = v.Args[1]
52842 x := v.Args[0]
52843 v_1 := v.Args[1]
52844 if v_1.Op != OpAMD64ADDQconst {
52845 break
52846 }
52847 c := v_1.AuxInt
52848 y := v_1.Args[0]
52849 if !(c&63 == 0) {
52850 break
52851 }
52852 v.reset(OpAMD64SHLQ)
52853 v.AddArg(x)
52854 v.AddArg(y)
52855 return true
52856 }
52857
52858
52859
52860 for {
52861 _ = v.Args[1]
52862 x := v.Args[0]
52863 v_1 := v.Args[1]
52864 if v_1.Op != OpAMD64NEGQ {
52865 break
52866 }
52867 t := v_1.Type
52868 v_1_0 := v_1.Args[0]
52869 if v_1_0.Op != OpAMD64ADDQconst {
52870 break
52871 }
52872 c := v_1_0.AuxInt
52873 y := v_1_0.Args[0]
52874 if !(c&63 == 0) {
52875 break
52876 }
52877 v.reset(OpAMD64SHLQ)
52878 v.AddArg(x)
52879 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
52880 v0.AddArg(y)
52881 v.AddArg(v0)
52882 return true
52883 }
52884
52885
52886
52887 for {
52888 _ = v.Args[1]
52889 x := v.Args[0]
52890 v_1 := v.Args[1]
52891 if v_1.Op != OpAMD64ANDQconst {
52892 break
52893 }
52894 c := v_1.AuxInt
52895 y := v_1.Args[0]
52896 if !(c&63 == 63) {
52897 break
52898 }
52899 v.reset(OpAMD64SHLQ)
52900 v.AddArg(x)
52901 v.AddArg(y)
52902 return true
52903 }
52904
52905
52906
52907 for {
52908 _ = v.Args[1]
52909 x := v.Args[0]
52910 v_1 := v.Args[1]
52911 if v_1.Op != OpAMD64NEGQ {
52912 break
52913 }
52914 t := v_1.Type
52915 v_1_0 := v_1.Args[0]
52916 if v_1_0.Op != OpAMD64ANDQconst {
52917 break
52918 }
52919 c := v_1_0.AuxInt
52920 y := v_1_0.Args[0]
52921 if !(c&63 == 63) {
52922 break
52923 }
52924 v.reset(OpAMD64SHLQ)
52925 v.AddArg(x)
52926 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
52927 v0.AddArg(y)
52928 v.AddArg(v0)
52929 return true
52930 }
52931
52932
52933
52934 for {
52935 _ = v.Args[1]
52936 x := v.Args[0]
52937 v_1 := v.Args[1]
52938 if v_1.Op != OpAMD64ADDLconst {
52939 break
52940 }
52941 c := v_1.AuxInt
52942 y := v_1.Args[0]
52943 if !(c&63 == 0) {
52944 break
52945 }
52946 v.reset(OpAMD64SHLQ)
52947 v.AddArg(x)
52948 v.AddArg(y)
52949 return true
52950 }
52951
52952
52953
52954 for {
52955 _ = v.Args[1]
52956 x := v.Args[0]
52957 v_1 := v.Args[1]
52958 if v_1.Op != OpAMD64NEGL {
52959 break
52960 }
52961 t := v_1.Type
52962 v_1_0 := v_1.Args[0]
52963 if v_1_0.Op != OpAMD64ADDLconst {
52964 break
52965 }
52966 c := v_1_0.AuxInt
52967 y := v_1_0.Args[0]
52968 if !(c&63 == 0) {
52969 break
52970 }
52971 v.reset(OpAMD64SHLQ)
52972 v.AddArg(x)
52973 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
52974 v0.AddArg(y)
52975 v.AddArg(v0)
52976 return true
52977 }
52978
52979
52980
52981 for {
52982 _ = v.Args[1]
52983 x := v.Args[0]
52984 v_1 := v.Args[1]
52985 if v_1.Op != OpAMD64ANDLconst {
52986 break
52987 }
52988 c := v_1.AuxInt
52989 y := v_1.Args[0]
52990 if !(c&63 == 63) {
52991 break
52992 }
52993 v.reset(OpAMD64SHLQ)
52994 v.AddArg(x)
52995 v.AddArg(y)
52996 return true
52997 }
52998
52999
53000
53001 for {
53002 _ = v.Args[1]
53003 x := v.Args[0]
53004 v_1 := v.Args[1]
53005 if v_1.Op != OpAMD64NEGL {
53006 break
53007 }
53008 t := v_1.Type
53009 v_1_0 := v_1.Args[0]
53010 if v_1_0.Op != OpAMD64ANDLconst {
53011 break
53012 }
53013 c := v_1_0.AuxInt
53014 y := v_1_0.Args[0]
53015 if !(c&63 == 63) {
53016 break
53017 }
53018 v.reset(OpAMD64SHLQ)
53019 v.AddArg(x)
53020 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
53021 v0.AddArg(y)
53022 v.AddArg(v0)
53023 return true
53024 }
53025 return false
53026 }
53027 func rewriteValueAMD64_OpAMD64SHLQconst_0(v *Value) bool {
53028 b := v.Block
53029 config := b.Func.Config
53030
53031
53032
53033 for {
53034 if v.AuxInt != 1 {
53035 break
53036 }
53037 v_0 := v.Args[0]
53038 if v_0.Op != OpAMD64SHRQconst {
53039 break
53040 }
53041 if v_0.AuxInt != 1 {
53042 break
53043 }
53044 x := v_0.Args[0]
53045 if !(!config.nacl) {
53046 break
53047 }
53048 v.reset(OpAMD64BTRQconst)
53049 v.AuxInt = 0
53050 v.AddArg(x)
53051 return true
53052 }
53053
53054
53055
53056 for {
53057 if v.AuxInt != 0 {
53058 break
53059 }
53060 x := v.Args[0]
53061 v.reset(OpCopy)
53062 v.Type = x.Type
53063 v.AddArg(x)
53064 return true
53065 }
53066 return false
53067 }
53068 func rewriteValueAMD64_OpAMD64SHRB_0(v *Value) bool {
53069
53070
53071
53072 for {
53073 _ = v.Args[1]
53074 x := v.Args[0]
53075 v_1 := v.Args[1]
53076 if v_1.Op != OpAMD64MOVQconst {
53077 break
53078 }
53079 c := v_1.AuxInt
53080 if !(c&31 < 8) {
53081 break
53082 }
53083 v.reset(OpAMD64SHRBconst)
53084 v.AuxInt = c & 31
53085 v.AddArg(x)
53086 return true
53087 }
53088
53089
53090
53091 for {
53092 _ = v.Args[1]
53093 x := v.Args[0]
53094 v_1 := v.Args[1]
53095 if v_1.Op != OpAMD64MOVLconst {
53096 break
53097 }
53098 c := v_1.AuxInt
53099 if !(c&31 < 8) {
53100 break
53101 }
53102 v.reset(OpAMD64SHRBconst)
53103 v.AuxInt = c & 31
53104 v.AddArg(x)
53105 return true
53106 }
53107
53108
53109
53110 for {
53111 _ = v.Args[1]
53112 v_1 := v.Args[1]
53113 if v_1.Op != OpAMD64MOVQconst {
53114 break
53115 }
53116 c := v_1.AuxInt
53117 if !(c&31 >= 8) {
53118 break
53119 }
53120 v.reset(OpAMD64MOVLconst)
53121 v.AuxInt = 0
53122 return true
53123 }
53124
53125
53126
53127 for {
53128 _ = v.Args[1]
53129 v_1 := v.Args[1]
53130 if v_1.Op != OpAMD64MOVLconst {
53131 break
53132 }
53133 c := v_1.AuxInt
53134 if !(c&31 >= 8) {
53135 break
53136 }
53137 v.reset(OpAMD64MOVLconst)
53138 v.AuxInt = 0
53139 return true
53140 }
53141 return false
53142 }
53143 func rewriteValueAMD64_OpAMD64SHRBconst_0(v *Value) bool {
53144
53145
53146
53147 for {
53148 if v.AuxInt != 0 {
53149 break
53150 }
53151 x := v.Args[0]
53152 v.reset(OpCopy)
53153 v.Type = x.Type
53154 v.AddArg(x)
53155 return true
53156 }
53157 return false
53158 }
53159 func rewriteValueAMD64_OpAMD64SHRL_0(v *Value) bool {
53160 b := v.Block
53161
53162
53163
53164 for {
53165 _ = v.Args[1]
53166 x := v.Args[0]
53167 v_1 := v.Args[1]
53168 if v_1.Op != OpAMD64MOVQconst {
53169 break
53170 }
53171 c := v_1.AuxInt
53172 v.reset(OpAMD64SHRLconst)
53173 v.AuxInt = c & 31
53174 v.AddArg(x)
53175 return true
53176 }
53177
53178
53179
53180 for {
53181 _ = v.Args[1]
53182 x := v.Args[0]
53183 v_1 := v.Args[1]
53184 if v_1.Op != OpAMD64MOVLconst {
53185 break
53186 }
53187 c := v_1.AuxInt
53188 v.reset(OpAMD64SHRLconst)
53189 v.AuxInt = c & 31
53190 v.AddArg(x)
53191 return true
53192 }
53193
53194
53195
53196 for {
53197 _ = v.Args[1]
53198 x := v.Args[0]
53199 v_1 := v.Args[1]
53200 if v_1.Op != OpAMD64ADDQconst {
53201 break
53202 }
53203 c := v_1.AuxInt
53204 y := v_1.Args[0]
53205 if !(c&31 == 0) {
53206 break
53207 }
53208 v.reset(OpAMD64SHRL)
53209 v.AddArg(x)
53210 v.AddArg(y)
53211 return true
53212 }
53213
53214
53215
53216 for {
53217 _ = v.Args[1]
53218 x := v.Args[0]
53219 v_1 := v.Args[1]
53220 if v_1.Op != OpAMD64NEGQ {
53221 break
53222 }
53223 t := v_1.Type
53224 v_1_0 := v_1.Args[0]
53225 if v_1_0.Op != OpAMD64ADDQconst {
53226 break
53227 }
53228 c := v_1_0.AuxInt
53229 y := v_1_0.Args[0]
53230 if !(c&31 == 0) {
53231 break
53232 }
53233 v.reset(OpAMD64SHRL)
53234 v.AddArg(x)
53235 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
53236 v0.AddArg(y)
53237 v.AddArg(v0)
53238 return true
53239 }
53240
53241
53242
53243 for {
53244 _ = v.Args[1]
53245 x := v.Args[0]
53246 v_1 := v.Args[1]
53247 if v_1.Op != OpAMD64ANDQconst {
53248 break
53249 }
53250 c := v_1.AuxInt
53251 y := v_1.Args[0]
53252 if !(c&31 == 31) {
53253 break
53254 }
53255 v.reset(OpAMD64SHRL)
53256 v.AddArg(x)
53257 v.AddArg(y)
53258 return true
53259 }
53260
53261
53262
53263 for {
53264 _ = v.Args[1]
53265 x := v.Args[0]
53266 v_1 := v.Args[1]
53267 if v_1.Op != OpAMD64NEGQ {
53268 break
53269 }
53270 t := v_1.Type
53271 v_1_0 := v_1.Args[0]
53272 if v_1_0.Op != OpAMD64ANDQconst {
53273 break
53274 }
53275 c := v_1_0.AuxInt
53276 y := v_1_0.Args[0]
53277 if !(c&31 == 31) {
53278 break
53279 }
53280 v.reset(OpAMD64SHRL)
53281 v.AddArg(x)
53282 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
53283 v0.AddArg(y)
53284 v.AddArg(v0)
53285 return true
53286 }
53287
53288
53289
53290 for {
53291 _ = v.Args[1]
53292 x := v.Args[0]
53293 v_1 := v.Args[1]
53294 if v_1.Op != OpAMD64ADDLconst {
53295 break
53296 }
53297 c := v_1.AuxInt
53298 y := v_1.Args[0]
53299 if !(c&31 == 0) {
53300 break
53301 }
53302 v.reset(OpAMD64SHRL)
53303 v.AddArg(x)
53304 v.AddArg(y)
53305 return true
53306 }
53307
53308
53309
53310 for {
53311 _ = v.Args[1]
53312 x := v.Args[0]
53313 v_1 := v.Args[1]
53314 if v_1.Op != OpAMD64NEGL {
53315 break
53316 }
53317 t := v_1.Type
53318 v_1_0 := v_1.Args[0]
53319 if v_1_0.Op != OpAMD64ADDLconst {
53320 break
53321 }
53322 c := v_1_0.AuxInt
53323 y := v_1_0.Args[0]
53324 if !(c&31 == 0) {
53325 break
53326 }
53327 v.reset(OpAMD64SHRL)
53328 v.AddArg(x)
53329 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
53330 v0.AddArg(y)
53331 v.AddArg(v0)
53332 return true
53333 }
53334
53335
53336
53337 for {
53338 _ = v.Args[1]
53339 x := v.Args[0]
53340 v_1 := v.Args[1]
53341 if v_1.Op != OpAMD64ANDLconst {
53342 break
53343 }
53344 c := v_1.AuxInt
53345 y := v_1.Args[0]
53346 if !(c&31 == 31) {
53347 break
53348 }
53349 v.reset(OpAMD64SHRL)
53350 v.AddArg(x)
53351 v.AddArg(y)
53352 return true
53353 }
53354
53355
53356
53357 for {
53358 _ = v.Args[1]
53359 x := v.Args[0]
53360 v_1 := v.Args[1]
53361 if v_1.Op != OpAMD64NEGL {
53362 break
53363 }
53364 t := v_1.Type
53365 v_1_0 := v_1.Args[0]
53366 if v_1_0.Op != OpAMD64ANDLconst {
53367 break
53368 }
53369 c := v_1_0.AuxInt
53370 y := v_1_0.Args[0]
53371 if !(c&31 == 31) {
53372 break
53373 }
53374 v.reset(OpAMD64SHRL)
53375 v.AddArg(x)
53376 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
53377 v0.AddArg(y)
53378 v.AddArg(v0)
53379 return true
53380 }
53381 return false
53382 }
53383 func rewriteValueAMD64_OpAMD64SHRLconst_0(v *Value) bool {
53384 b := v.Block
53385 config := b.Func.Config
53386
53387
53388
53389 for {
53390 if v.AuxInt != 1 {
53391 break
53392 }
53393 v_0 := v.Args[0]
53394 if v_0.Op != OpAMD64SHLLconst {
53395 break
53396 }
53397 if v_0.AuxInt != 1 {
53398 break
53399 }
53400 x := v_0.Args[0]
53401 if !(!config.nacl) {
53402 break
53403 }
53404 v.reset(OpAMD64BTRLconst)
53405 v.AuxInt = 31
53406 v.AddArg(x)
53407 return true
53408 }
53409
53410
53411
53412 for {
53413 if v.AuxInt != 0 {
53414 break
53415 }
53416 x := v.Args[0]
53417 v.reset(OpCopy)
53418 v.Type = x.Type
53419 v.AddArg(x)
53420 return true
53421 }
53422 return false
53423 }
53424 func rewriteValueAMD64_OpAMD64SHRQ_0(v *Value) bool {
53425 b := v.Block
53426
53427
53428
53429 for {
53430 _ = v.Args[1]
53431 x := v.Args[0]
53432 v_1 := v.Args[1]
53433 if v_1.Op != OpAMD64MOVQconst {
53434 break
53435 }
53436 c := v_1.AuxInt
53437 v.reset(OpAMD64SHRQconst)
53438 v.AuxInt = c & 63
53439 v.AddArg(x)
53440 return true
53441 }
53442
53443
53444
53445 for {
53446 _ = v.Args[1]
53447 x := v.Args[0]
53448 v_1 := v.Args[1]
53449 if v_1.Op != OpAMD64MOVLconst {
53450 break
53451 }
53452 c := v_1.AuxInt
53453 v.reset(OpAMD64SHRQconst)
53454 v.AuxInt = c & 63
53455 v.AddArg(x)
53456 return true
53457 }
53458
53459
53460
53461 for {
53462 _ = v.Args[1]
53463 x := v.Args[0]
53464 v_1 := v.Args[1]
53465 if v_1.Op != OpAMD64ADDQconst {
53466 break
53467 }
53468 c := v_1.AuxInt
53469 y := v_1.Args[0]
53470 if !(c&63 == 0) {
53471 break
53472 }
53473 v.reset(OpAMD64SHRQ)
53474 v.AddArg(x)
53475 v.AddArg(y)
53476 return true
53477 }
53478
53479
53480
53481 for {
53482 _ = v.Args[1]
53483 x := v.Args[0]
53484 v_1 := v.Args[1]
53485 if v_1.Op != OpAMD64NEGQ {
53486 break
53487 }
53488 t := v_1.Type
53489 v_1_0 := v_1.Args[0]
53490 if v_1_0.Op != OpAMD64ADDQconst {
53491 break
53492 }
53493 c := v_1_0.AuxInt
53494 y := v_1_0.Args[0]
53495 if !(c&63 == 0) {
53496 break
53497 }
53498 v.reset(OpAMD64SHRQ)
53499 v.AddArg(x)
53500 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
53501 v0.AddArg(y)
53502 v.AddArg(v0)
53503 return true
53504 }
53505
53506
53507
53508 for {
53509 _ = v.Args[1]
53510 x := v.Args[0]
53511 v_1 := v.Args[1]
53512 if v_1.Op != OpAMD64ANDQconst {
53513 break
53514 }
53515 c := v_1.AuxInt
53516 y := v_1.Args[0]
53517 if !(c&63 == 63) {
53518 break
53519 }
53520 v.reset(OpAMD64SHRQ)
53521 v.AddArg(x)
53522 v.AddArg(y)
53523 return true
53524 }
53525
53526
53527
53528 for {
53529 _ = v.Args[1]
53530 x := v.Args[0]
53531 v_1 := v.Args[1]
53532 if v_1.Op != OpAMD64NEGQ {
53533 break
53534 }
53535 t := v_1.Type
53536 v_1_0 := v_1.Args[0]
53537 if v_1_0.Op != OpAMD64ANDQconst {
53538 break
53539 }
53540 c := v_1_0.AuxInt
53541 y := v_1_0.Args[0]
53542 if !(c&63 == 63) {
53543 break
53544 }
53545 v.reset(OpAMD64SHRQ)
53546 v.AddArg(x)
53547 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
53548 v0.AddArg(y)
53549 v.AddArg(v0)
53550 return true
53551 }
53552
53553
53554
53555 for {
53556 _ = v.Args[1]
53557 x := v.Args[0]
53558 v_1 := v.Args[1]
53559 if v_1.Op != OpAMD64ADDLconst {
53560 break
53561 }
53562 c := v_1.AuxInt
53563 y := v_1.Args[0]
53564 if !(c&63 == 0) {
53565 break
53566 }
53567 v.reset(OpAMD64SHRQ)
53568 v.AddArg(x)
53569 v.AddArg(y)
53570 return true
53571 }
53572
53573
53574
53575 for {
53576 _ = v.Args[1]
53577 x := v.Args[0]
53578 v_1 := v.Args[1]
53579 if v_1.Op != OpAMD64NEGL {
53580 break
53581 }
53582 t := v_1.Type
53583 v_1_0 := v_1.Args[0]
53584 if v_1_0.Op != OpAMD64ADDLconst {
53585 break
53586 }
53587 c := v_1_0.AuxInt
53588 y := v_1_0.Args[0]
53589 if !(c&63 == 0) {
53590 break
53591 }
53592 v.reset(OpAMD64SHRQ)
53593 v.AddArg(x)
53594 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
53595 v0.AddArg(y)
53596 v.AddArg(v0)
53597 return true
53598 }
53599
53600
53601
53602 for {
53603 _ = v.Args[1]
53604 x := v.Args[0]
53605 v_1 := v.Args[1]
53606 if v_1.Op != OpAMD64ANDLconst {
53607 break
53608 }
53609 c := v_1.AuxInt
53610 y := v_1.Args[0]
53611 if !(c&63 == 63) {
53612 break
53613 }
53614 v.reset(OpAMD64SHRQ)
53615 v.AddArg(x)
53616 v.AddArg(y)
53617 return true
53618 }
53619
53620
53621
53622 for {
53623 _ = v.Args[1]
53624 x := v.Args[0]
53625 v_1 := v.Args[1]
53626 if v_1.Op != OpAMD64NEGL {
53627 break
53628 }
53629 t := v_1.Type
53630 v_1_0 := v_1.Args[0]
53631 if v_1_0.Op != OpAMD64ANDLconst {
53632 break
53633 }
53634 c := v_1_0.AuxInt
53635 y := v_1_0.Args[0]
53636 if !(c&63 == 63) {
53637 break
53638 }
53639 v.reset(OpAMD64SHRQ)
53640 v.AddArg(x)
53641 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
53642 v0.AddArg(y)
53643 v.AddArg(v0)
53644 return true
53645 }
53646 return false
53647 }
53648 func rewriteValueAMD64_OpAMD64SHRQconst_0(v *Value) bool {
53649 b := v.Block
53650 config := b.Func.Config
53651
53652
53653
53654 for {
53655 if v.AuxInt != 1 {
53656 break
53657 }
53658 v_0 := v.Args[0]
53659 if v_0.Op != OpAMD64SHLQconst {
53660 break
53661 }
53662 if v_0.AuxInt != 1 {
53663 break
53664 }
53665 x := v_0.Args[0]
53666 if !(!config.nacl) {
53667 break
53668 }
53669 v.reset(OpAMD64BTRQconst)
53670 v.AuxInt = 63
53671 v.AddArg(x)
53672 return true
53673 }
53674
53675
53676
53677 for {
53678 if v.AuxInt != 0 {
53679 break
53680 }
53681 x := v.Args[0]
53682 v.reset(OpCopy)
53683 v.Type = x.Type
53684 v.AddArg(x)
53685 return true
53686 }
53687 return false
53688 }
53689 func rewriteValueAMD64_OpAMD64SHRW_0(v *Value) bool {
53690
53691
53692
53693 for {
53694 _ = v.Args[1]
53695 x := v.Args[0]
53696 v_1 := v.Args[1]
53697 if v_1.Op != OpAMD64MOVQconst {
53698 break
53699 }
53700 c := v_1.AuxInt
53701 if !(c&31 < 16) {
53702 break
53703 }
53704 v.reset(OpAMD64SHRWconst)
53705 v.AuxInt = c & 31
53706 v.AddArg(x)
53707 return true
53708 }
53709
53710
53711
53712 for {
53713 _ = v.Args[1]
53714 x := v.Args[0]
53715 v_1 := v.Args[1]
53716 if v_1.Op != OpAMD64MOVLconst {
53717 break
53718 }
53719 c := v_1.AuxInt
53720 if !(c&31 < 16) {
53721 break
53722 }
53723 v.reset(OpAMD64SHRWconst)
53724 v.AuxInt = c & 31
53725 v.AddArg(x)
53726 return true
53727 }
53728
53729
53730
53731 for {
53732 _ = v.Args[1]
53733 v_1 := v.Args[1]
53734 if v_1.Op != OpAMD64MOVQconst {
53735 break
53736 }
53737 c := v_1.AuxInt
53738 if !(c&31 >= 16) {
53739 break
53740 }
53741 v.reset(OpAMD64MOVLconst)
53742 v.AuxInt = 0
53743 return true
53744 }
53745
53746
53747
53748 for {
53749 _ = v.Args[1]
53750 v_1 := v.Args[1]
53751 if v_1.Op != OpAMD64MOVLconst {
53752 break
53753 }
53754 c := v_1.AuxInt
53755 if !(c&31 >= 16) {
53756 break
53757 }
53758 v.reset(OpAMD64MOVLconst)
53759 v.AuxInt = 0
53760 return true
53761 }
53762 return false
53763 }
53764 func rewriteValueAMD64_OpAMD64SHRWconst_0(v *Value) bool {
53765
53766
53767
53768 for {
53769 if v.AuxInt != 0 {
53770 break
53771 }
53772 x := v.Args[0]
53773 v.reset(OpCopy)
53774 v.Type = x.Type
53775 v.AddArg(x)
53776 return true
53777 }
53778 return false
53779 }
53780 func rewriteValueAMD64_OpAMD64SUBL_0(v *Value) bool {
53781 b := v.Block
53782
53783
53784
53785 for {
53786 _ = v.Args[1]
53787 x := v.Args[0]
53788 v_1 := v.Args[1]
53789 if v_1.Op != OpAMD64MOVLconst {
53790 break
53791 }
53792 c := v_1.AuxInt
53793 v.reset(OpAMD64SUBLconst)
53794 v.AuxInt = c
53795 v.AddArg(x)
53796 return true
53797 }
53798
53799
53800
53801 for {
53802 x := v.Args[1]
53803 v_0 := v.Args[0]
53804 if v_0.Op != OpAMD64MOVLconst {
53805 break
53806 }
53807 c := v_0.AuxInt
53808 v.reset(OpAMD64NEGL)
53809 v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type)
53810 v0.AuxInt = c
53811 v0.AddArg(x)
53812 v.AddArg(v0)
53813 return true
53814 }
53815
53816
53817
53818 for {
53819 x := v.Args[1]
53820 if x != v.Args[0] {
53821 break
53822 }
53823 v.reset(OpAMD64MOVLconst)
53824 v.AuxInt = 0
53825 return true
53826 }
53827
53828
53829
53830 for {
53831 _ = v.Args[1]
53832 x := v.Args[0]
53833 l := v.Args[1]
53834 if l.Op != OpAMD64MOVLload {
53835 break
53836 }
53837 off := l.AuxInt
53838 sym := l.Aux
53839 mem := l.Args[1]
53840 ptr := l.Args[0]
53841 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
53842 break
53843 }
53844 v.reset(OpAMD64SUBLload)
53845 v.AuxInt = off
53846 v.Aux = sym
53847 v.AddArg(x)
53848 v.AddArg(ptr)
53849 v.AddArg(mem)
53850 return true
53851 }
53852 return false
53853 }
53854 func rewriteValueAMD64_OpAMD64SUBLconst_0(v *Value) bool {
53855
53856
53857
53858 for {
53859 c := v.AuxInt
53860 x := v.Args[0]
53861 if !(int32(c) == 0) {
53862 break
53863 }
53864 v.reset(OpCopy)
53865 v.Type = x.Type
53866 v.AddArg(x)
53867 return true
53868 }
53869
53870
53871
53872 for {
53873 c := v.AuxInt
53874 x := v.Args[0]
53875 v.reset(OpAMD64ADDLconst)
53876 v.AuxInt = int64(int32(-c))
53877 v.AddArg(x)
53878 return true
53879 }
53880 }
53881 func rewriteValueAMD64_OpAMD64SUBLload_0(v *Value) bool {
53882 b := v.Block
53883 typ := &b.Func.Config.Types
53884
53885
53886
53887 for {
53888 off1 := v.AuxInt
53889 sym := v.Aux
53890 mem := v.Args[2]
53891 val := v.Args[0]
53892 v_1 := v.Args[1]
53893 if v_1.Op != OpAMD64ADDQconst {
53894 break
53895 }
53896 off2 := v_1.AuxInt
53897 base := v_1.Args[0]
53898 if !(is32Bit(off1 + off2)) {
53899 break
53900 }
53901 v.reset(OpAMD64SUBLload)
53902 v.AuxInt = off1 + off2
53903 v.Aux = sym
53904 v.AddArg(val)
53905 v.AddArg(base)
53906 v.AddArg(mem)
53907 return true
53908 }
53909
53910
53911
53912 for {
53913 off1 := v.AuxInt
53914 sym1 := v.Aux
53915 mem := v.Args[2]
53916 val := v.Args[0]
53917 v_1 := v.Args[1]
53918 if v_1.Op != OpAMD64LEAQ {
53919 break
53920 }
53921 off2 := v_1.AuxInt
53922 sym2 := v_1.Aux
53923 base := v_1.Args[0]
53924 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
53925 break
53926 }
53927 v.reset(OpAMD64SUBLload)
53928 v.AuxInt = off1 + off2
53929 v.Aux = mergeSym(sym1, sym2)
53930 v.AddArg(val)
53931 v.AddArg(base)
53932 v.AddArg(mem)
53933 return true
53934 }
53935
53936
53937
53938 for {
53939 off := v.AuxInt
53940 sym := v.Aux
53941 _ = v.Args[2]
53942 x := v.Args[0]
53943 ptr := v.Args[1]
53944 v_2 := v.Args[2]
53945 if v_2.Op != OpAMD64MOVSSstore {
53946 break
53947 }
53948 if v_2.AuxInt != off {
53949 break
53950 }
53951 if v_2.Aux != sym {
53952 break
53953 }
53954 _ = v_2.Args[2]
53955 if ptr != v_2.Args[0] {
53956 break
53957 }
53958 y := v_2.Args[1]
53959 v.reset(OpAMD64SUBL)
53960 v.AddArg(x)
53961 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
53962 v0.AddArg(y)
53963 v.AddArg(v0)
53964 return true
53965 }
53966 return false
53967 }
53968 func rewriteValueAMD64_OpAMD64SUBLmodify_0(v *Value) bool {
53969
53970
53971
53972 for {
53973 off1 := v.AuxInt
53974 sym := v.Aux
53975 mem := v.Args[2]
53976 v_0 := v.Args[0]
53977 if v_0.Op != OpAMD64ADDQconst {
53978 break
53979 }
53980 off2 := v_0.AuxInt
53981 base := v_0.Args[0]
53982 val := v.Args[1]
53983 if !(is32Bit(off1 + off2)) {
53984 break
53985 }
53986 v.reset(OpAMD64SUBLmodify)
53987 v.AuxInt = off1 + off2
53988 v.Aux = sym
53989 v.AddArg(base)
53990 v.AddArg(val)
53991 v.AddArg(mem)
53992 return true
53993 }
53994
53995
53996
53997 for {
53998 off1 := v.AuxInt
53999 sym1 := v.Aux
54000 mem := v.Args[2]
54001 v_0 := v.Args[0]
54002 if v_0.Op != OpAMD64LEAQ {
54003 break
54004 }
54005 off2 := v_0.AuxInt
54006 sym2 := v_0.Aux
54007 base := v_0.Args[0]
54008 val := v.Args[1]
54009 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
54010 break
54011 }
54012 v.reset(OpAMD64SUBLmodify)
54013 v.AuxInt = off1 + off2
54014 v.Aux = mergeSym(sym1, sym2)
54015 v.AddArg(base)
54016 v.AddArg(val)
54017 v.AddArg(mem)
54018 return true
54019 }
54020 return false
54021 }
54022 func rewriteValueAMD64_OpAMD64SUBQ_0(v *Value) bool {
54023 b := v.Block
54024
54025
54026
54027 for {
54028 _ = v.Args[1]
54029 x := v.Args[0]
54030 v_1 := v.Args[1]
54031 if v_1.Op != OpAMD64MOVQconst {
54032 break
54033 }
54034 c := v_1.AuxInt
54035 if !(is32Bit(c)) {
54036 break
54037 }
54038 v.reset(OpAMD64SUBQconst)
54039 v.AuxInt = c
54040 v.AddArg(x)
54041 return true
54042 }
54043
54044
54045
54046 for {
54047 x := v.Args[1]
54048 v_0 := v.Args[0]
54049 if v_0.Op != OpAMD64MOVQconst {
54050 break
54051 }
54052 c := v_0.AuxInt
54053 if !(is32Bit(c)) {
54054 break
54055 }
54056 v.reset(OpAMD64NEGQ)
54057 v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type)
54058 v0.AuxInt = c
54059 v0.AddArg(x)
54060 v.AddArg(v0)
54061 return true
54062 }
54063
54064
54065
54066 for {
54067 x := v.Args[1]
54068 if x != v.Args[0] {
54069 break
54070 }
54071 v.reset(OpAMD64MOVQconst)
54072 v.AuxInt = 0
54073 return true
54074 }
54075
54076
54077
54078 for {
54079 _ = v.Args[1]
54080 x := v.Args[0]
54081 l := v.Args[1]
54082 if l.Op != OpAMD64MOVQload {
54083 break
54084 }
54085 off := l.AuxInt
54086 sym := l.Aux
54087 mem := l.Args[1]
54088 ptr := l.Args[0]
54089 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
54090 break
54091 }
54092 v.reset(OpAMD64SUBQload)
54093 v.AuxInt = off
54094 v.Aux = sym
54095 v.AddArg(x)
54096 v.AddArg(ptr)
54097 v.AddArg(mem)
54098 return true
54099 }
54100 return false
54101 }
54102 func rewriteValueAMD64_OpAMD64SUBQborrow_0(v *Value) bool {
54103
54104
54105
54106 for {
54107 _ = v.Args[1]
54108 x := v.Args[0]
54109 v_1 := v.Args[1]
54110 if v_1.Op != OpAMD64MOVQconst {
54111 break
54112 }
54113 c := v_1.AuxInt
54114 if !(is32Bit(c)) {
54115 break
54116 }
54117 v.reset(OpAMD64SUBQconstborrow)
54118 v.AuxInt = c
54119 v.AddArg(x)
54120 return true
54121 }
54122 return false
54123 }
54124 func rewriteValueAMD64_OpAMD64SUBQconst_0(v *Value) bool {
54125
54126
54127
54128 for {
54129 if v.AuxInt != 0 {
54130 break
54131 }
54132 x := v.Args[0]
54133 v.reset(OpCopy)
54134 v.Type = x.Type
54135 v.AddArg(x)
54136 return true
54137 }
54138
54139
54140
54141 for {
54142 c := v.AuxInt
54143 x := v.Args[0]
54144 if !(c != -(1 << 31)) {
54145 break
54146 }
54147 v.reset(OpAMD64ADDQconst)
54148 v.AuxInt = -c
54149 v.AddArg(x)
54150 return true
54151 }
54152
54153
54154
54155 for {
54156 c := v.AuxInt
54157 v_0 := v.Args[0]
54158 if v_0.Op != OpAMD64MOVQconst {
54159 break
54160 }
54161 d := v_0.AuxInt
54162 v.reset(OpAMD64MOVQconst)
54163 v.AuxInt = d - c
54164 return true
54165 }
54166
54167
54168
54169 for {
54170 c := v.AuxInt
54171 v_0 := v.Args[0]
54172 if v_0.Op != OpAMD64SUBQconst {
54173 break
54174 }
54175 d := v_0.AuxInt
54176 x := v_0.Args[0]
54177 if !(is32Bit(-c - d)) {
54178 break
54179 }
54180 v.reset(OpAMD64ADDQconst)
54181 v.AuxInt = -c - d
54182 v.AddArg(x)
54183 return true
54184 }
54185 return false
54186 }
54187 func rewriteValueAMD64_OpAMD64SUBQload_0(v *Value) bool {
54188 b := v.Block
54189 typ := &b.Func.Config.Types
54190
54191
54192
54193 for {
54194 off1 := v.AuxInt
54195 sym := v.Aux
54196 mem := v.Args[2]
54197 val := v.Args[0]
54198 v_1 := v.Args[1]
54199 if v_1.Op != OpAMD64ADDQconst {
54200 break
54201 }
54202 off2 := v_1.AuxInt
54203 base := v_1.Args[0]
54204 if !(is32Bit(off1 + off2)) {
54205 break
54206 }
54207 v.reset(OpAMD64SUBQload)
54208 v.AuxInt = off1 + off2
54209 v.Aux = sym
54210 v.AddArg(val)
54211 v.AddArg(base)
54212 v.AddArg(mem)
54213 return true
54214 }
54215
54216
54217
54218 for {
54219 off1 := v.AuxInt
54220 sym1 := v.Aux
54221 mem := v.Args[2]
54222 val := v.Args[0]
54223 v_1 := v.Args[1]
54224 if v_1.Op != OpAMD64LEAQ {
54225 break
54226 }
54227 off2 := v_1.AuxInt
54228 sym2 := v_1.Aux
54229 base := v_1.Args[0]
54230 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
54231 break
54232 }
54233 v.reset(OpAMD64SUBQload)
54234 v.AuxInt = off1 + off2
54235 v.Aux = mergeSym(sym1, sym2)
54236 v.AddArg(val)
54237 v.AddArg(base)
54238 v.AddArg(mem)
54239 return true
54240 }
54241
54242
54243
54244 for {
54245 off := v.AuxInt
54246 sym := v.Aux
54247 _ = v.Args[2]
54248 x := v.Args[0]
54249 ptr := v.Args[1]
54250 v_2 := v.Args[2]
54251 if v_2.Op != OpAMD64MOVSDstore {
54252 break
54253 }
54254 if v_2.AuxInt != off {
54255 break
54256 }
54257 if v_2.Aux != sym {
54258 break
54259 }
54260 _ = v_2.Args[2]
54261 if ptr != v_2.Args[0] {
54262 break
54263 }
54264 y := v_2.Args[1]
54265 v.reset(OpAMD64SUBQ)
54266 v.AddArg(x)
54267 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
54268 v0.AddArg(y)
54269 v.AddArg(v0)
54270 return true
54271 }
54272 return false
54273 }
54274 func rewriteValueAMD64_OpAMD64SUBQmodify_0(v *Value) bool {
54275
54276
54277
54278 for {
54279 off1 := v.AuxInt
54280 sym := v.Aux
54281 mem := v.Args[2]
54282 v_0 := v.Args[0]
54283 if v_0.Op != OpAMD64ADDQconst {
54284 break
54285 }
54286 off2 := v_0.AuxInt
54287 base := v_0.Args[0]
54288 val := v.Args[1]
54289 if !(is32Bit(off1 + off2)) {
54290 break
54291 }
54292 v.reset(OpAMD64SUBQmodify)
54293 v.AuxInt = off1 + off2
54294 v.Aux = sym
54295 v.AddArg(base)
54296 v.AddArg(val)
54297 v.AddArg(mem)
54298 return true
54299 }
54300
54301
54302
54303 for {
54304 off1 := v.AuxInt
54305 sym1 := v.Aux
54306 mem := v.Args[2]
54307 v_0 := v.Args[0]
54308 if v_0.Op != OpAMD64LEAQ {
54309 break
54310 }
54311 off2 := v_0.AuxInt
54312 sym2 := v_0.Aux
54313 base := v_0.Args[0]
54314 val := v.Args[1]
54315 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
54316 break
54317 }
54318 v.reset(OpAMD64SUBQmodify)
54319 v.AuxInt = off1 + off2
54320 v.Aux = mergeSym(sym1, sym2)
54321 v.AddArg(base)
54322 v.AddArg(val)
54323 v.AddArg(mem)
54324 return true
54325 }
54326 return false
54327 }
54328 func rewriteValueAMD64_OpAMD64SUBSD_0(v *Value) bool {
54329
54330
54331
54332 for {
54333 _ = v.Args[1]
54334 x := v.Args[0]
54335 l := v.Args[1]
54336 if l.Op != OpAMD64MOVSDload {
54337 break
54338 }
54339 off := l.AuxInt
54340 sym := l.Aux
54341 mem := l.Args[1]
54342 ptr := l.Args[0]
54343 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
54344 break
54345 }
54346 v.reset(OpAMD64SUBSDload)
54347 v.AuxInt = off
54348 v.Aux = sym
54349 v.AddArg(x)
54350 v.AddArg(ptr)
54351 v.AddArg(mem)
54352 return true
54353 }
54354 return false
54355 }
54356 func rewriteValueAMD64_OpAMD64SUBSDload_0(v *Value) bool {
54357 b := v.Block
54358 typ := &b.Func.Config.Types
54359
54360
54361
54362 for {
54363 off1 := v.AuxInt
54364 sym := v.Aux
54365 mem := v.Args[2]
54366 val := v.Args[0]
54367 v_1 := v.Args[1]
54368 if v_1.Op != OpAMD64ADDQconst {
54369 break
54370 }
54371 off2 := v_1.AuxInt
54372 base := v_1.Args[0]
54373 if !(is32Bit(off1 + off2)) {
54374 break
54375 }
54376 v.reset(OpAMD64SUBSDload)
54377 v.AuxInt = off1 + off2
54378 v.Aux = sym
54379 v.AddArg(val)
54380 v.AddArg(base)
54381 v.AddArg(mem)
54382 return true
54383 }
54384
54385
54386
54387 for {
54388 off1 := v.AuxInt
54389 sym1 := v.Aux
54390 mem := v.Args[2]
54391 val := v.Args[0]
54392 v_1 := v.Args[1]
54393 if v_1.Op != OpAMD64LEAQ {
54394 break
54395 }
54396 off2 := v_1.AuxInt
54397 sym2 := v_1.Aux
54398 base := v_1.Args[0]
54399 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
54400 break
54401 }
54402 v.reset(OpAMD64SUBSDload)
54403 v.AuxInt = off1 + off2
54404 v.Aux = mergeSym(sym1, sym2)
54405 v.AddArg(val)
54406 v.AddArg(base)
54407 v.AddArg(mem)
54408 return true
54409 }
54410
54411
54412
54413 for {
54414 off := v.AuxInt
54415 sym := v.Aux
54416 _ = v.Args[2]
54417 x := v.Args[0]
54418 ptr := v.Args[1]
54419 v_2 := v.Args[2]
54420 if v_2.Op != OpAMD64MOVQstore {
54421 break
54422 }
54423 if v_2.AuxInt != off {
54424 break
54425 }
54426 if v_2.Aux != sym {
54427 break
54428 }
54429 _ = v_2.Args[2]
54430 if ptr != v_2.Args[0] {
54431 break
54432 }
54433 y := v_2.Args[1]
54434 v.reset(OpAMD64SUBSD)
54435 v.AddArg(x)
54436 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
54437 v0.AddArg(y)
54438 v.AddArg(v0)
54439 return true
54440 }
54441 return false
54442 }
54443 func rewriteValueAMD64_OpAMD64SUBSS_0(v *Value) bool {
54444
54445
54446
54447 for {
54448 _ = v.Args[1]
54449 x := v.Args[0]
54450 l := v.Args[1]
54451 if l.Op != OpAMD64MOVSSload {
54452 break
54453 }
54454 off := l.AuxInt
54455 sym := l.Aux
54456 mem := l.Args[1]
54457 ptr := l.Args[0]
54458 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
54459 break
54460 }
54461 v.reset(OpAMD64SUBSSload)
54462 v.AuxInt = off
54463 v.Aux = sym
54464 v.AddArg(x)
54465 v.AddArg(ptr)
54466 v.AddArg(mem)
54467 return true
54468 }
54469 return false
54470 }
54471 func rewriteValueAMD64_OpAMD64SUBSSload_0(v *Value) bool {
54472 b := v.Block
54473 typ := &b.Func.Config.Types
54474
54475
54476
54477 for {
54478 off1 := v.AuxInt
54479 sym := v.Aux
54480 mem := v.Args[2]
54481 val := v.Args[0]
54482 v_1 := v.Args[1]
54483 if v_1.Op != OpAMD64ADDQconst {
54484 break
54485 }
54486 off2 := v_1.AuxInt
54487 base := v_1.Args[0]
54488 if !(is32Bit(off1 + off2)) {
54489 break
54490 }
54491 v.reset(OpAMD64SUBSSload)
54492 v.AuxInt = off1 + off2
54493 v.Aux = sym
54494 v.AddArg(val)
54495 v.AddArg(base)
54496 v.AddArg(mem)
54497 return true
54498 }
54499
54500
54501
54502 for {
54503 off1 := v.AuxInt
54504 sym1 := v.Aux
54505 mem := v.Args[2]
54506 val := v.Args[0]
54507 v_1 := v.Args[1]
54508 if v_1.Op != OpAMD64LEAQ {
54509 break
54510 }
54511 off2 := v_1.AuxInt
54512 sym2 := v_1.Aux
54513 base := v_1.Args[0]
54514 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
54515 break
54516 }
54517 v.reset(OpAMD64SUBSSload)
54518 v.AuxInt = off1 + off2
54519 v.Aux = mergeSym(sym1, sym2)
54520 v.AddArg(val)
54521 v.AddArg(base)
54522 v.AddArg(mem)
54523 return true
54524 }
54525
54526
54527
54528 for {
54529 off := v.AuxInt
54530 sym := v.Aux
54531 _ = v.Args[2]
54532 x := v.Args[0]
54533 ptr := v.Args[1]
54534 v_2 := v.Args[2]
54535 if v_2.Op != OpAMD64MOVLstore {
54536 break
54537 }
54538 if v_2.AuxInt != off {
54539 break
54540 }
54541 if v_2.Aux != sym {
54542 break
54543 }
54544 _ = v_2.Args[2]
54545 if ptr != v_2.Args[0] {
54546 break
54547 }
54548 y := v_2.Args[1]
54549 v.reset(OpAMD64SUBSS)
54550 v.AddArg(x)
54551 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
54552 v0.AddArg(y)
54553 v.AddArg(v0)
54554 return true
54555 }
54556 return false
54557 }
54558 func rewriteValueAMD64_OpAMD64TESTB_0(v *Value) bool {
54559 b := v.Block
54560
54561
54562
54563 for {
54564 x := v.Args[1]
54565 v_0 := v.Args[0]
54566 if v_0.Op != OpAMD64MOVLconst {
54567 break
54568 }
54569 c := v_0.AuxInt
54570 v.reset(OpAMD64TESTBconst)
54571 v.AuxInt = c
54572 v.AddArg(x)
54573 return true
54574 }
54575
54576
54577
54578 for {
54579 _ = v.Args[1]
54580 x := v.Args[0]
54581 v_1 := v.Args[1]
54582 if v_1.Op != OpAMD64MOVLconst {
54583 break
54584 }
54585 c := v_1.AuxInt
54586 v.reset(OpAMD64TESTBconst)
54587 v.AuxInt = c
54588 v.AddArg(x)
54589 return true
54590 }
54591
54592
54593
54594 for {
54595 l2 := v.Args[1]
54596 l := v.Args[0]
54597 if l.Op != OpAMD64MOVBload {
54598 break
54599 }
54600 off := l.AuxInt
54601 sym := l.Aux
54602 mem := l.Args[1]
54603 ptr := l.Args[0]
54604 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) {
54605 break
54606 }
54607 b = l.Block
54608 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
54609 v.reset(OpCopy)
54610 v.AddArg(v0)
54611 v0.AuxInt = makeValAndOff(0, off)
54612 v0.Aux = sym
54613 v0.AddArg(ptr)
54614 v0.AddArg(mem)
54615 return true
54616 }
54617
54618
54619
54620 for {
54621 _ = v.Args[1]
54622 l2 := v.Args[0]
54623 l := v.Args[1]
54624 if l.Op != OpAMD64MOVBload {
54625 break
54626 }
54627 off := l.AuxInt
54628 sym := l.Aux
54629 mem := l.Args[1]
54630 ptr := l.Args[0]
54631 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) {
54632 break
54633 }
54634 b = l.Block
54635 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
54636 v.reset(OpCopy)
54637 v.AddArg(v0)
54638 v0.AuxInt = makeValAndOff(0, off)
54639 v0.Aux = sym
54640 v0.AddArg(ptr)
54641 v0.AddArg(mem)
54642 return true
54643 }
54644 return false
54645 }
54646 func rewriteValueAMD64_OpAMD64TESTBconst_0(v *Value) bool {
54647
54648
54649
54650 for {
54651 if v.AuxInt != -1 {
54652 break
54653 }
54654 x := v.Args[0]
54655 if !(x.Op != OpAMD64MOVLconst) {
54656 break
54657 }
54658 v.reset(OpAMD64TESTB)
54659 v.AddArg(x)
54660 v.AddArg(x)
54661 return true
54662 }
54663 return false
54664 }
54665 func rewriteValueAMD64_OpAMD64TESTL_0(v *Value) bool {
54666 b := v.Block
54667
54668
54669
54670 for {
54671 x := v.Args[1]
54672 v_0 := v.Args[0]
54673 if v_0.Op != OpAMD64MOVLconst {
54674 break
54675 }
54676 c := v_0.AuxInt
54677 v.reset(OpAMD64TESTLconst)
54678 v.AuxInt = c
54679 v.AddArg(x)
54680 return true
54681 }
54682
54683
54684
54685 for {
54686 _ = v.Args[1]
54687 x := v.Args[0]
54688 v_1 := v.Args[1]
54689 if v_1.Op != OpAMD64MOVLconst {
54690 break
54691 }
54692 c := v_1.AuxInt
54693 v.reset(OpAMD64TESTLconst)
54694 v.AuxInt = c
54695 v.AddArg(x)
54696 return true
54697 }
54698
54699
54700
54701 for {
54702 l2 := v.Args[1]
54703 l := v.Args[0]
54704 if l.Op != OpAMD64MOVLload {
54705 break
54706 }
54707 off := l.AuxInt
54708 sym := l.Aux
54709 mem := l.Args[1]
54710 ptr := l.Args[0]
54711 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) {
54712 break
54713 }
54714 b = l.Block
54715 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
54716 v.reset(OpCopy)
54717 v.AddArg(v0)
54718 v0.AuxInt = makeValAndOff(0, off)
54719 v0.Aux = sym
54720 v0.AddArg(ptr)
54721 v0.AddArg(mem)
54722 return true
54723 }
54724
54725
54726
54727 for {
54728 _ = v.Args[1]
54729 l2 := v.Args[0]
54730 l := v.Args[1]
54731 if l.Op != OpAMD64MOVLload {
54732 break
54733 }
54734 off := l.AuxInt
54735 sym := l.Aux
54736 mem := l.Args[1]
54737 ptr := l.Args[0]
54738 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) {
54739 break
54740 }
54741 b = l.Block
54742 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
54743 v.reset(OpCopy)
54744 v.AddArg(v0)
54745 v0.AuxInt = makeValAndOff(0, off)
54746 v0.Aux = sym
54747 v0.AddArg(ptr)
54748 v0.AddArg(mem)
54749 return true
54750 }
54751 return false
54752 }
54753 func rewriteValueAMD64_OpAMD64TESTLconst_0(v *Value) bool {
54754
54755
54756
54757 for {
54758 if v.AuxInt != -1 {
54759 break
54760 }
54761 x := v.Args[0]
54762 if !(x.Op != OpAMD64MOVLconst) {
54763 break
54764 }
54765 v.reset(OpAMD64TESTL)
54766 v.AddArg(x)
54767 v.AddArg(x)
54768 return true
54769 }
54770 return false
54771 }
54772 func rewriteValueAMD64_OpAMD64TESTQ_0(v *Value) bool {
54773 b := v.Block
54774
54775
54776
54777 for {
54778 x := v.Args[1]
54779 v_0 := v.Args[0]
54780 if v_0.Op != OpAMD64MOVQconst {
54781 break
54782 }
54783 c := v_0.AuxInt
54784 if !(is32Bit(c)) {
54785 break
54786 }
54787 v.reset(OpAMD64TESTQconst)
54788 v.AuxInt = c
54789 v.AddArg(x)
54790 return true
54791 }
54792
54793
54794
54795 for {
54796 _ = v.Args[1]
54797 x := v.Args[0]
54798 v_1 := v.Args[1]
54799 if v_1.Op != OpAMD64MOVQconst {
54800 break
54801 }
54802 c := v_1.AuxInt
54803 if !(is32Bit(c)) {
54804 break
54805 }
54806 v.reset(OpAMD64TESTQconst)
54807 v.AuxInt = c
54808 v.AddArg(x)
54809 return true
54810 }
54811
54812
54813
54814 for {
54815 l2 := v.Args[1]
54816 l := v.Args[0]
54817 if l.Op != OpAMD64MOVQload {
54818 break
54819 }
54820 off := l.AuxInt
54821 sym := l.Aux
54822 mem := l.Args[1]
54823 ptr := l.Args[0]
54824 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) {
54825 break
54826 }
54827 b = l.Block
54828 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
54829 v.reset(OpCopy)
54830 v.AddArg(v0)
54831 v0.AuxInt = makeValAndOff(0, off)
54832 v0.Aux = sym
54833 v0.AddArg(ptr)
54834 v0.AddArg(mem)
54835 return true
54836 }
54837
54838
54839
54840 for {
54841 _ = v.Args[1]
54842 l2 := v.Args[0]
54843 l := v.Args[1]
54844 if l.Op != OpAMD64MOVQload {
54845 break
54846 }
54847 off := l.AuxInt
54848 sym := l.Aux
54849 mem := l.Args[1]
54850 ptr := l.Args[0]
54851 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) {
54852 break
54853 }
54854 b = l.Block
54855 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
54856 v.reset(OpCopy)
54857 v.AddArg(v0)
54858 v0.AuxInt = makeValAndOff(0, off)
54859 v0.Aux = sym
54860 v0.AddArg(ptr)
54861 v0.AddArg(mem)
54862 return true
54863 }
54864 return false
54865 }
54866 func rewriteValueAMD64_OpAMD64TESTQconst_0(v *Value) bool {
54867
54868
54869
54870 for {
54871 if v.AuxInt != -1 {
54872 break
54873 }
54874 x := v.Args[0]
54875 if !(x.Op != OpAMD64MOVQconst) {
54876 break
54877 }
54878 v.reset(OpAMD64TESTQ)
54879 v.AddArg(x)
54880 v.AddArg(x)
54881 return true
54882 }
54883 return false
54884 }
54885 func rewriteValueAMD64_OpAMD64TESTW_0(v *Value) bool {
54886 b := v.Block
54887
54888
54889
54890 for {
54891 x := v.Args[1]
54892 v_0 := v.Args[0]
54893 if v_0.Op != OpAMD64MOVLconst {
54894 break
54895 }
54896 c := v_0.AuxInt
54897 v.reset(OpAMD64TESTWconst)
54898 v.AuxInt = c
54899 v.AddArg(x)
54900 return true
54901 }
54902
54903
54904
54905 for {
54906 _ = v.Args[1]
54907 x := v.Args[0]
54908 v_1 := v.Args[1]
54909 if v_1.Op != OpAMD64MOVLconst {
54910 break
54911 }
54912 c := v_1.AuxInt
54913 v.reset(OpAMD64TESTWconst)
54914 v.AuxInt = c
54915 v.AddArg(x)
54916 return true
54917 }
54918
54919
54920
54921 for {
54922 l2 := v.Args[1]
54923 l := v.Args[0]
54924 if l.Op != OpAMD64MOVWload {
54925 break
54926 }
54927 off := l.AuxInt
54928 sym := l.Aux
54929 mem := l.Args[1]
54930 ptr := l.Args[0]
54931 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) {
54932 break
54933 }
54934 b = l.Block
54935 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
54936 v.reset(OpCopy)
54937 v.AddArg(v0)
54938 v0.AuxInt = makeValAndOff(0, off)
54939 v0.Aux = sym
54940 v0.AddArg(ptr)
54941 v0.AddArg(mem)
54942 return true
54943 }
54944
54945
54946
54947 for {
54948 _ = v.Args[1]
54949 l2 := v.Args[0]
54950 l := v.Args[1]
54951 if l.Op != OpAMD64MOVWload {
54952 break
54953 }
54954 off := l.AuxInt
54955 sym := l.Aux
54956 mem := l.Args[1]
54957 ptr := l.Args[0]
54958 if !(l == l2 && l.Uses == 2 && validValAndOff(0, off) && clobber(l)) {
54959 break
54960 }
54961 b = l.Block
54962 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
54963 v.reset(OpCopy)
54964 v.AddArg(v0)
54965 v0.AuxInt = makeValAndOff(0, off)
54966 v0.Aux = sym
54967 v0.AddArg(ptr)
54968 v0.AddArg(mem)
54969 return true
54970 }
54971 return false
54972 }
54973 func rewriteValueAMD64_OpAMD64TESTWconst_0(v *Value) bool {
54974
54975
54976
54977 for {
54978 if v.AuxInt != -1 {
54979 break
54980 }
54981 x := v.Args[0]
54982 if !(x.Op != OpAMD64MOVLconst) {
54983 break
54984 }
54985 v.reset(OpAMD64TESTW)
54986 v.AddArg(x)
54987 v.AddArg(x)
54988 return true
54989 }
54990 return false
54991 }
54992 func rewriteValueAMD64_OpAMD64XADDLlock_0(v *Value) bool {
54993
54994
54995
54996 for {
54997 off1 := v.AuxInt
54998 sym := v.Aux
54999 mem := v.Args[2]
55000 val := v.Args[0]
55001 v_1 := v.Args[1]
55002 if v_1.Op != OpAMD64ADDQconst {
55003 break
55004 }
55005 off2 := v_1.AuxInt
55006 ptr := v_1.Args[0]
55007 if !(is32Bit(off1 + off2)) {
55008 break
55009 }
55010 v.reset(OpAMD64XADDLlock)
55011 v.AuxInt = off1 + off2
55012 v.Aux = sym
55013 v.AddArg(val)
55014 v.AddArg(ptr)
55015 v.AddArg(mem)
55016 return true
55017 }
55018 return false
55019 }
55020 func rewriteValueAMD64_OpAMD64XADDQlock_0(v *Value) bool {
55021
55022
55023
55024 for {
55025 off1 := v.AuxInt
55026 sym := v.Aux
55027 mem := v.Args[2]
55028 val := v.Args[0]
55029 v_1 := v.Args[1]
55030 if v_1.Op != OpAMD64ADDQconst {
55031 break
55032 }
55033 off2 := v_1.AuxInt
55034 ptr := v_1.Args[0]
55035 if !(is32Bit(off1 + off2)) {
55036 break
55037 }
55038 v.reset(OpAMD64XADDQlock)
55039 v.AuxInt = off1 + off2
55040 v.Aux = sym
55041 v.AddArg(val)
55042 v.AddArg(ptr)
55043 v.AddArg(mem)
55044 return true
55045 }
55046 return false
55047 }
55048 func rewriteValueAMD64_OpAMD64XCHGL_0(v *Value) bool {
55049
55050
55051
55052 for {
55053 off1 := v.AuxInt
55054 sym := v.Aux
55055 mem := v.Args[2]
55056 val := v.Args[0]
55057 v_1 := v.Args[1]
55058 if v_1.Op != OpAMD64ADDQconst {
55059 break
55060 }
55061 off2 := v_1.AuxInt
55062 ptr := v_1.Args[0]
55063 if !(is32Bit(off1 + off2)) {
55064 break
55065 }
55066 v.reset(OpAMD64XCHGL)
55067 v.AuxInt = off1 + off2
55068 v.Aux = sym
55069 v.AddArg(val)
55070 v.AddArg(ptr)
55071 v.AddArg(mem)
55072 return true
55073 }
55074
55075
55076
55077 for {
55078 off1 := v.AuxInt
55079 sym1 := v.Aux
55080 mem := v.Args[2]
55081 val := v.Args[0]
55082 v_1 := v.Args[1]
55083 if v_1.Op != OpAMD64LEAQ {
55084 break
55085 }
55086 off2 := v_1.AuxInt
55087 sym2 := v_1.Aux
55088 ptr := v_1.Args[0]
55089 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
55090 break
55091 }
55092 v.reset(OpAMD64XCHGL)
55093 v.AuxInt = off1 + off2
55094 v.Aux = mergeSym(sym1, sym2)
55095 v.AddArg(val)
55096 v.AddArg(ptr)
55097 v.AddArg(mem)
55098 return true
55099 }
55100 return false
55101 }
55102 func rewriteValueAMD64_OpAMD64XCHGQ_0(v *Value) bool {
55103
55104
55105
55106 for {
55107 off1 := v.AuxInt
55108 sym := v.Aux
55109 mem := v.Args[2]
55110 val := v.Args[0]
55111 v_1 := v.Args[1]
55112 if v_1.Op != OpAMD64ADDQconst {
55113 break
55114 }
55115 off2 := v_1.AuxInt
55116 ptr := v_1.Args[0]
55117 if !(is32Bit(off1 + off2)) {
55118 break
55119 }
55120 v.reset(OpAMD64XCHGQ)
55121 v.AuxInt = off1 + off2
55122 v.Aux = sym
55123 v.AddArg(val)
55124 v.AddArg(ptr)
55125 v.AddArg(mem)
55126 return true
55127 }
55128
55129
55130
55131 for {
55132 off1 := v.AuxInt
55133 sym1 := v.Aux
55134 mem := v.Args[2]
55135 val := v.Args[0]
55136 v_1 := v.Args[1]
55137 if v_1.Op != OpAMD64LEAQ {
55138 break
55139 }
55140 off2 := v_1.AuxInt
55141 sym2 := v_1.Aux
55142 ptr := v_1.Args[0]
55143 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
55144 break
55145 }
55146 v.reset(OpAMD64XCHGQ)
55147 v.AuxInt = off1 + off2
55148 v.Aux = mergeSym(sym1, sym2)
55149 v.AddArg(val)
55150 v.AddArg(ptr)
55151 v.AddArg(mem)
55152 return true
55153 }
55154 return false
55155 }
55156 func rewriteValueAMD64_OpAMD64XORL_0(v *Value) bool {
55157 b := v.Block
55158 config := b.Func.Config
55159
55160
55161
55162 for {
55163 x := v.Args[1]
55164 v_0 := v.Args[0]
55165 if v_0.Op != OpAMD64SHLL {
55166 break
55167 }
55168 y := v_0.Args[1]
55169 v_0_0 := v_0.Args[0]
55170 if v_0_0.Op != OpAMD64MOVLconst {
55171 break
55172 }
55173 if v_0_0.AuxInt != 1 {
55174 break
55175 }
55176 if !(!config.nacl) {
55177 break
55178 }
55179 v.reset(OpAMD64BTCL)
55180 v.AddArg(x)
55181 v.AddArg(y)
55182 return true
55183 }
55184
55185
55186
55187 for {
55188 _ = v.Args[1]
55189 x := v.Args[0]
55190 v_1 := v.Args[1]
55191 if v_1.Op != OpAMD64SHLL {
55192 break
55193 }
55194 y := v_1.Args[1]
55195 v_1_0 := v_1.Args[0]
55196 if v_1_0.Op != OpAMD64MOVLconst {
55197 break
55198 }
55199 if v_1_0.AuxInt != 1 {
55200 break
55201 }
55202 if !(!config.nacl) {
55203 break
55204 }
55205 v.reset(OpAMD64BTCL)
55206 v.AddArg(x)
55207 v.AddArg(y)
55208 return true
55209 }
55210
55211
55212
55213 for {
55214 x := v.Args[1]
55215 v_0 := v.Args[0]
55216 if v_0.Op != OpAMD64MOVLconst {
55217 break
55218 }
55219 c := v_0.AuxInt
55220 if !(isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) {
55221 break
55222 }
55223 v.reset(OpAMD64BTCLconst)
55224 v.AuxInt = log2uint32(c)
55225 v.AddArg(x)
55226 return true
55227 }
55228
55229
55230
55231 for {
55232 _ = v.Args[1]
55233 x := v.Args[0]
55234 v_1 := v.Args[1]
55235 if v_1.Op != OpAMD64MOVLconst {
55236 break
55237 }
55238 c := v_1.AuxInt
55239 if !(isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) {
55240 break
55241 }
55242 v.reset(OpAMD64BTCLconst)
55243 v.AuxInt = log2uint32(c)
55244 v.AddArg(x)
55245 return true
55246 }
55247
55248
55249
55250 for {
55251 _ = v.Args[1]
55252 x := v.Args[0]
55253 v_1 := v.Args[1]
55254 if v_1.Op != OpAMD64MOVLconst {
55255 break
55256 }
55257 c := v_1.AuxInt
55258 v.reset(OpAMD64XORLconst)
55259 v.AuxInt = c
55260 v.AddArg(x)
55261 return true
55262 }
55263
55264
55265
55266 for {
55267 x := v.Args[1]
55268 v_0 := v.Args[0]
55269 if v_0.Op != OpAMD64MOVLconst {
55270 break
55271 }
55272 c := v_0.AuxInt
55273 v.reset(OpAMD64XORLconst)
55274 v.AuxInt = c
55275 v.AddArg(x)
55276 return true
55277 }
55278
55279
55280
55281 for {
55282 _ = v.Args[1]
55283 v_0 := v.Args[0]
55284 if v_0.Op != OpAMD64SHLLconst {
55285 break
55286 }
55287 c := v_0.AuxInt
55288 x := v_0.Args[0]
55289 v_1 := v.Args[1]
55290 if v_1.Op != OpAMD64SHRLconst {
55291 break
55292 }
55293 d := v_1.AuxInt
55294 if x != v_1.Args[0] {
55295 break
55296 }
55297 if !(d == 32-c) {
55298 break
55299 }
55300 v.reset(OpAMD64ROLLconst)
55301 v.AuxInt = c
55302 v.AddArg(x)
55303 return true
55304 }
55305
55306
55307
55308 for {
55309 _ = v.Args[1]
55310 v_0 := v.Args[0]
55311 if v_0.Op != OpAMD64SHRLconst {
55312 break
55313 }
55314 d := v_0.AuxInt
55315 x := v_0.Args[0]
55316 v_1 := v.Args[1]
55317 if v_1.Op != OpAMD64SHLLconst {
55318 break
55319 }
55320 c := v_1.AuxInt
55321 if x != v_1.Args[0] {
55322 break
55323 }
55324 if !(d == 32-c) {
55325 break
55326 }
55327 v.reset(OpAMD64ROLLconst)
55328 v.AuxInt = c
55329 v.AddArg(x)
55330 return true
55331 }
55332
55333
55334
55335 for {
55336 t := v.Type
55337 _ = v.Args[1]
55338 v_0 := v.Args[0]
55339 if v_0.Op != OpAMD64SHLLconst {
55340 break
55341 }
55342 c := v_0.AuxInt
55343 x := v_0.Args[0]
55344 v_1 := v.Args[1]
55345 if v_1.Op != OpAMD64SHRWconst {
55346 break
55347 }
55348 d := v_1.AuxInt
55349 if x != v_1.Args[0] {
55350 break
55351 }
55352 if !(d == 16-c && c < 16 && t.Size() == 2) {
55353 break
55354 }
55355 v.reset(OpAMD64ROLWconst)
55356 v.AuxInt = c
55357 v.AddArg(x)
55358 return true
55359 }
55360
55361
55362
55363 for {
55364 t := v.Type
55365 _ = v.Args[1]
55366 v_0 := v.Args[0]
55367 if v_0.Op != OpAMD64SHRWconst {
55368 break
55369 }
55370 d := v_0.AuxInt
55371 x := v_0.Args[0]
55372 v_1 := v.Args[1]
55373 if v_1.Op != OpAMD64SHLLconst {
55374 break
55375 }
55376 c := v_1.AuxInt
55377 if x != v_1.Args[0] {
55378 break
55379 }
55380 if !(d == 16-c && c < 16 && t.Size() == 2) {
55381 break
55382 }
55383 v.reset(OpAMD64ROLWconst)
55384 v.AuxInt = c
55385 v.AddArg(x)
55386 return true
55387 }
55388 return false
55389 }
55390 func rewriteValueAMD64_OpAMD64XORL_10(v *Value) bool {
55391
55392
55393
55394 for {
55395 t := v.Type
55396 _ = v.Args[1]
55397 v_0 := v.Args[0]
55398 if v_0.Op != OpAMD64SHLLconst {
55399 break
55400 }
55401 c := v_0.AuxInt
55402 x := v_0.Args[0]
55403 v_1 := v.Args[1]
55404 if v_1.Op != OpAMD64SHRBconst {
55405 break
55406 }
55407 d := v_1.AuxInt
55408 if x != v_1.Args[0] {
55409 break
55410 }
55411 if !(d == 8-c && c < 8 && t.Size() == 1) {
55412 break
55413 }
55414 v.reset(OpAMD64ROLBconst)
55415 v.AuxInt = c
55416 v.AddArg(x)
55417 return true
55418 }
55419
55420
55421
55422 for {
55423 t := v.Type
55424 _ = v.Args[1]
55425 v_0 := v.Args[0]
55426 if v_0.Op != OpAMD64SHRBconst {
55427 break
55428 }
55429 d := v_0.AuxInt
55430 x := v_0.Args[0]
55431 v_1 := v.Args[1]
55432 if v_1.Op != OpAMD64SHLLconst {
55433 break
55434 }
55435 c := v_1.AuxInt
55436 if x != v_1.Args[0] {
55437 break
55438 }
55439 if !(d == 8-c && c < 8 && t.Size() == 1) {
55440 break
55441 }
55442 v.reset(OpAMD64ROLBconst)
55443 v.AuxInt = c
55444 v.AddArg(x)
55445 return true
55446 }
55447
55448
55449
55450 for {
55451 x := v.Args[1]
55452 if x != v.Args[0] {
55453 break
55454 }
55455 v.reset(OpAMD64MOVLconst)
55456 v.AuxInt = 0
55457 return true
55458 }
55459
55460
55461
55462 for {
55463 _ = v.Args[1]
55464 x := v.Args[0]
55465 l := v.Args[1]
55466 if l.Op != OpAMD64MOVLload {
55467 break
55468 }
55469 off := l.AuxInt
55470 sym := l.Aux
55471 mem := l.Args[1]
55472 ptr := l.Args[0]
55473 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
55474 break
55475 }
55476 v.reset(OpAMD64XORLload)
55477 v.AuxInt = off
55478 v.Aux = sym
55479 v.AddArg(x)
55480 v.AddArg(ptr)
55481 v.AddArg(mem)
55482 return true
55483 }
55484
55485
55486
55487 for {
55488 x := v.Args[1]
55489 l := v.Args[0]
55490 if l.Op != OpAMD64MOVLload {
55491 break
55492 }
55493 off := l.AuxInt
55494 sym := l.Aux
55495 mem := l.Args[1]
55496 ptr := l.Args[0]
55497 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
55498 break
55499 }
55500 v.reset(OpAMD64XORLload)
55501 v.AuxInt = off
55502 v.Aux = sym
55503 v.AddArg(x)
55504 v.AddArg(ptr)
55505 v.AddArg(mem)
55506 return true
55507 }
55508 return false
55509 }
55510 func rewriteValueAMD64_OpAMD64XORLconst_0(v *Value) bool {
55511 b := v.Block
55512 config := b.Func.Config
55513
55514
55515
55516 for {
55517 c := v.AuxInt
55518 x := v.Args[0]
55519 if !(isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) {
55520 break
55521 }
55522 v.reset(OpAMD64BTCLconst)
55523 v.AuxInt = log2uint32(c)
55524 v.AddArg(x)
55525 return true
55526 }
55527
55528
55529
55530 for {
55531 if v.AuxInt != 1 {
55532 break
55533 }
55534 v_0 := v.Args[0]
55535 if v_0.Op != OpAMD64SETNE {
55536 break
55537 }
55538 x := v_0.Args[0]
55539 v.reset(OpAMD64SETEQ)
55540 v.AddArg(x)
55541 return true
55542 }
55543
55544
55545
55546 for {
55547 if v.AuxInt != 1 {
55548 break
55549 }
55550 v_0 := v.Args[0]
55551 if v_0.Op != OpAMD64SETEQ {
55552 break
55553 }
55554 x := v_0.Args[0]
55555 v.reset(OpAMD64SETNE)
55556 v.AddArg(x)
55557 return true
55558 }
55559
55560
55561
55562 for {
55563 if v.AuxInt != 1 {
55564 break
55565 }
55566 v_0 := v.Args[0]
55567 if v_0.Op != OpAMD64SETL {
55568 break
55569 }
55570 x := v_0.Args[0]
55571 v.reset(OpAMD64SETGE)
55572 v.AddArg(x)
55573 return true
55574 }
55575
55576
55577
55578 for {
55579 if v.AuxInt != 1 {
55580 break
55581 }
55582 v_0 := v.Args[0]
55583 if v_0.Op != OpAMD64SETGE {
55584 break
55585 }
55586 x := v_0.Args[0]
55587 v.reset(OpAMD64SETL)
55588 v.AddArg(x)
55589 return true
55590 }
55591
55592
55593
55594 for {
55595 if v.AuxInt != 1 {
55596 break
55597 }
55598 v_0 := v.Args[0]
55599 if v_0.Op != OpAMD64SETLE {
55600 break
55601 }
55602 x := v_0.Args[0]
55603 v.reset(OpAMD64SETG)
55604 v.AddArg(x)
55605 return true
55606 }
55607
55608
55609
55610 for {
55611 if v.AuxInt != 1 {
55612 break
55613 }
55614 v_0 := v.Args[0]
55615 if v_0.Op != OpAMD64SETG {
55616 break
55617 }
55618 x := v_0.Args[0]
55619 v.reset(OpAMD64SETLE)
55620 v.AddArg(x)
55621 return true
55622 }
55623
55624
55625
55626 for {
55627 if v.AuxInt != 1 {
55628 break
55629 }
55630 v_0 := v.Args[0]
55631 if v_0.Op != OpAMD64SETB {
55632 break
55633 }
55634 x := v_0.Args[0]
55635 v.reset(OpAMD64SETAE)
55636 v.AddArg(x)
55637 return true
55638 }
55639
55640
55641
55642 for {
55643 if v.AuxInt != 1 {
55644 break
55645 }
55646 v_0 := v.Args[0]
55647 if v_0.Op != OpAMD64SETAE {
55648 break
55649 }
55650 x := v_0.Args[0]
55651 v.reset(OpAMD64SETB)
55652 v.AddArg(x)
55653 return true
55654 }
55655
55656
55657
55658 for {
55659 if v.AuxInt != 1 {
55660 break
55661 }
55662 v_0 := v.Args[0]
55663 if v_0.Op != OpAMD64SETBE {
55664 break
55665 }
55666 x := v_0.Args[0]
55667 v.reset(OpAMD64SETA)
55668 v.AddArg(x)
55669 return true
55670 }
55671 return false
55672 }
55673 func rewriteValueAMD64_OpAMD64XORLconst_10(v *Value) bool {
55674
55675
55676
55677 for {
55678 if v.AuxInt != 1 {
55679 break
55680 }
55681 v_0 := v.Args[0]
55682 if v_0.Op != OpAMD64SETA {
55683 break
55684 }
55685 x := v_0.Args[0]
55686 v.reset(OpAMD64SETBE)
55687 v.AddArg(x)
55688 return true
55689 }
55690
55691
55692
55693 for {
55694 c := v.AuxInt
55695 v_0 := v.Args[0]
55696 if v_0.Op != OpAMD64XORLconst {
55697 break
55698 }
55699 d := v_0.AuxInt
55700 x := v_0.Args[0]
55701 v.reset(OpAMD64XORLconst)
55702 v.AuxInt = c ^ d
55703 v.AddArg(x)
55704 return true
55705 }
55706
55707
55708
55709 for {
55710 c := v.AuxInt
55711 v_0 := v.Args[0]
55712 if v_0.Op != OpAMD64BTCLconst {
55713 break
55714 }
55715 d := v_0.AuxInt
55716 x := v_0.Args[0]
55717 v.reset(OpAMD64XORLconst)
55718 v.AuxInt = c ^ 1<<uint32(d)
55719 v.AddArg(x)
55720 return true
55721 }
55722
55723
55724
55725 for {
55726 c := v.AuxInt
55727 x := v.Args[0]
55728 if !(int32(c) == 0) {
55729 break
55730 }
55731 v.reset(OpCopy)
55732 v.Type = x.Type
55733 v.AddArg(x)
55734 return true
55735 }
55736
55737
55738
55739 for {
55740 c := v.AuxInt
55741 v_0 := v.Args[0]
55742 if v_0.Op != OpAMD64MOVLconst {
55743 break
55744 }
55745 d := v_0.AuxInt
55746 v.reset(OpAMD64MOVLconst)
55747 v.AuxInt = c ^ d
55748 return true
55749 }
55750 return false
55751 }
55752 func rewriteValueAMD64_OpAMD64XORLconstmodify_0(v *Value) bool {
55753
55754
55755
55756 for {
55757 valoff1 := v.AuxInt
55758 sym := v.Aux
55759 mem := v.Args[1]
55760 v_0 := v.Args[0]
55761 if v_0.Op != OpAMD64ADDQconst {
55762 break
55763 }
55764 off2 := v_0.AuxInt
55765 base := v_0.Args[0]
55766 if !(ValAndOff(valoff1).canAdd(off2)) {
55767 break
55768 }
55769 v.reset(OpAMD64XORLconstmodify)
55770 v.AuxInt = ValAndOff(valoff1).add(off2)
55771 v.Aux = sym
55772 v.AddArg(base)
55773 v.AddArg(mem)
55774 return true
55775 }
55776
55777
55778
55779 for {
55780 valoff1 := v.AuxInt
55781 sym1 := v.Aux
55782 mem := v.Args[1]
55783 v_0 := v.Args[0]
55784 if v_0.Op != OpAMD64LEAQ {
55785 break
55786 }
55787 off2 := v_0.AuxInt
55788 sym2 := v_0.Aux
55789 base := v_0.Args[0]
55790 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
55791 break
55792 }
55793 v.reset(OpAMD64XORLconstmodify)
55794 v.AuxInt = ValAndOff(valoff1).add(off2)
55795 v.Aux = mergeSym(sym1, sym2)
55796 v.AddArg(base)
55797 v.AddArg(mem)
55798 return true
55799 }
55800 return false
55801 }
55802 func rewriteValueAMD64_OpAMD64XORLload_0(v *Value) bool {
55803 b := v.Block
55804 typ := &b.Func.Config.Types
55805
55806
55807
55808 for {
55809 off1 := v.AuxInt
55810 sym := v.Aux
55811 mem := v.Args[2]
55812 val := v.Args[0]
55813 v_1 := v.Args[1]
55814 if v_1.Op != OpAMD64ADDQconst {
55815 break
55816 }
55817 off2 := v_1.AuxInt
55818 base := v_1.Args[0]
55819 if !(is32Bit(off1 + off2)) {
55820 break
55821 }
55822 v.reset(OpAMD64XORLload)
55823 v.AuxInt = off1 + off2
55824 v.Aux = sym
55825 v.AddArg(val)
55826 v.AddArg(base)
55827 v.AddArg(mem)
55828 return true
55829 }
55830
55831
55832
55833 for {
55834 off1 := v.AuxInt
55835 sym1 := v.Aux
55836 mem := v.Args[2]
55837 val := v.Args[0]
55838 v_1 := v.Args[1]
55839 if v_1.Op != OpAMD64LEAQ {
55840 break
55841 }
55842 off2 := v_1.AuxInt
55843 sym2 := v_1.Aux
55844 base := v_1.Args[0]
55845 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
55846 break
55847 }
55848 v.reset(OpAMD64XORLload)
55849 v.AuxInt = off1 + off2
55850 v.Aux = mergeSym(sym1, sym2)
55851 v.AddArg(val)
55852 v.AddArg(base)
55853 v.AddArg(mem)
55854 return true
55855 }
55856
55857
55858
55859 for {
55860 off := v.AuxInt
55861 sym := v.Aux
55862 _ = v.Args[2]
55863 x := v.Args[0]
55864 ptr := v.Args[1]
55865 v_2 := v.Args[2]
55866 if v_2.Op != OpAMD64MOVSSstore {
55867 break
55868 }
55869 if v_2.AuxInt != off {
55870 break
55871 }
55872 if v_2.Aux != sym {
55873 break
55874 }
55875 _ = v_2.Args[2]
55876 if ptr != v_2.Args[0] {
55877 break
55878 }
55879 y := v_2.Args[1]
55880 v.reset(OpAMD64XORL)
55881 v.AddArg(x)
55882 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
55883 v0.AddArg(y)
55884 v.AddArg(v0)
55885 return true
55886 }
55887 return false
55888 }
55889 func rewriteValueAMD64_OpAMD64XORLmodify_0(v *Value) bool {
55890
55891
55892
55893 for {
55894 off1 := v.AuxInt
55895 sym := v.Aux
55896 mem := v.Args[2]
55897 v_0 := v.Args[0]
55898 if v_0.Op != OpAMD64ADDQconst {
55899 break
55900 }
55901 off2 := v_0.AuxInt
55902 base := v_0.Args[0]
55903 val := v.Args[1]
55904 if !(is32Bit(off1 + off2)) {
55905 break
55906 }
55907 v.reset(OpAMD64XORLmodify)
55908 v.AuxInt = off1 + off2
55909 v.Aux = sym
55910 v.AddArg(base)
55911 v.AddArg(val)
55912 v.AddArg(mem)
55913 return true
55914 }
55915
55916
55917
55918 for {
55919 off1 := v.AuxInt
55920 sym1 := v.Aux
55921 mem := v.Args[2]
55922 v_0 := v.Args[0]
55923 if v_0.Op != OpAMD64LEAQ {
55924 break
55925 }
55926 off2 := v_0.AuxInt
55927 sym2 := v_0.Aux
55928 base := v_0.Args[0]
55929 val := v.Args[1]
55930 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
55931 break
55932 }
55933 v.reset(OpAMD64XORLmodify)
55934 v.AuxInt = off1 + off2
55935 v.Aux = mergeSym(sym1, sym2)
55936 v.AddArg(base)
55937 v.AddArg(val)
55938 v.AddArg(mem)
55939 return true
55940 }
55941 return false
55942 }
55943 func rewriteValueAMD64_OpAMD64XORQ_0(v *Value) bool {
55944 b := v.Block
55945 config := b.Func.Config
55946
55947
55948
55949 for {
55950 x := v.Args[1]
55951 v_0 := v.Args[0]
55952 if v_0.Op != OpAMD64SHLQ {
55953 break
55954 }
55955 y := v_0.Args[1]
55956 v_0_0 := v_0.Args[0]
55957 if v_0_0.Op != OpAMD64MOVQconst {
55958 break
55959 }
55960 if v_0_0.AuxInt != 1 {
55961 break
55962 }
55963 if !(!config.nacl) {
55964 break
55965 }
55966 v.reset(OpAMD64BTCQ)
55967 v.AddArg(x)
55968 v.AddArg(y)
55969 return true
55970 }
55971
55972
55973
55974 for {
55975 _ = v.Args[1]
55976 x := v.Args[0]
55977 v_1 := v.Args[1]
55978 if v_1.Op != OpAMD64SHLQ {
55979 break
55980 }
55981 y := v_1.Args[1]
55982 v_1_0 := v_1.Args[0]
55983 if v_1_0.Op != OpAMD64MOVQconst {
55984 break
55985 }
55986 if v_1_0.AuxInt != 1 {
55987 break
55988 }
55989 if !(!config.nacl) {
55990 break
55991 }
55992 v.reset(OpAMD64BTCQ)
55993 v.AddArg(x)
55994 v.AddArg(y)
55995 return true
55996 }
55997
55998
55999
56000 for {
56001 x := v.Args[1]
56002 v_0 := v.Args[0]
56003 if v_0.Op != OpAMD64MOVQconst {
56004 break
56005 }
56006 c := v_0.AuxInt
56007 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) {
56008 break
56009 }
56010 v.reset(OpAMD64BTCQconst)
56011 v.AuxInt = log2(c)
56012 v.AddArg(x)
56013 return true
56014 }
56015
56016
56017
56018 for {
56019 _ = v.Args[1]
56020 x := v.Args[0]
56021 v_1 := v.Args[1]
56022 if v_1.Op != OpAMD64MOVQconst {
56023 break
56024 }
56025 c := v_1.AuxInt
56026 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) {
56027 break
56028 }
56029 v.reset(OpAMD64BTCQconst)
56030 v.AuxInt = log2(c)
56031 v.AddArg(x)
56032 return true
56033 }
56034
56035
56036
56037 for {
56038 _ = v.Args[1]
56039 x := v.Args[0]
56040 v_1 := v.Args[1]
56041 if v_1.Op != OpAMD64MOVQconst {
56042 break
56043 }
56044 c := v_1.AuxInt
56045 if !(is32Bit(c)) {
56046 break
56047 }
56048 v.reset(OpAMD64XORQconst)
56049 v.AuxInt = c
56050 v.AddArg(x)
56051 return true
56052 }
56053
56054
56055
56056 for {
56057 x := v.Args[1]
56058 v_0 := v.Args[0]
56059 if v_0.Op != OpAMD64MOVQconst {
56060 break
56061 }
56062 c := v_0.AuxInt
56063 if !(is32Bit(c)) {
56064 break
56065 }
56066 v.reset(OpAMD64XORQconst)
56067 v.AuxInt = c
56068 v.AddArg(x)
56069 return true
56070 }
56071
56072
56073
56074 for {
56075 _ = v.Args[1]
56076 v_0 := v.Args[0]
56077 if v_0.Op != OpAMD64SHLQconst {
56078 break
56079 }
56080 c := v_0.AuxInt
56081 x := v_0.Args[0]
56082 v_1 := v.Args[1]
56083 if v_1.Op != OpAMD64SHRQconst {
56084 break
56085 }
56086 d := v_1.AuxInt
56087 if x != v_1.Args[0] {
56088 break
56089 }
56090 if !(d == 64-c) {
56091 break
56092 }
56093 v.reset(OpAMD64ROLQconst)
56094 v.AuxInt = c
56095 v.AddArg(x)
56096 return true
56097 }
56098
56099
56100
56101 for {
56102 _ = v.Args[1]
56103 v_0 := v.Args[0]
56104 if v_0.Op != OpAMD64SHRQconst {
56105 break
56106 }
56107 d := v_0.AuxInt
56108 x := v_0.Args[0]
56109 v_1 := v.Args[1]
56110 if v_1.Op != OpAMD64SHLQconst {
56111 break
56112 }
56113 c := v_1.AuxInt
56114 if x != v_1.Args[0] {
56115 break
56116 }
56117 if !(d == 64-c) {
56118 break
56119 }
56120 v.reset(OpAMD64ROLQconst)
56121 v.AuxInt = c
56122 v.AddArg(x)
56123 return true
56124 }
56125
56126
56127
56128 for {
56129 x := v.Args[1]
56130 if x != v.Args[0] {
56131 break
56132 }
56133 v.reset(OpAMD64MOVQconst)
56134 v.AuxInt = 0
56135 return true
56136 }
56137
56138
56139
56140 for {
56141 _ = v.Args[1]
56142 x := v.Args[0]
56143 l := v.Args[1]
56144 if l.Op != OpAMD64MOVQload {
56145 break
56146 }
56147 off := l.AuxInt
56148 sym := l.Aux
56149 mem := l.Args[1]
56150 ptr := l.Args[0]
56151 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
56152 break
56153 }
56154 v.reset(OpAMD64XORQload)
56155 v.AuxInt = off
56156 v.Aux = sym
56157 v.AddArg(x)
56158 v.AddArg(ptr)
56159 v.AddArg(mem)
56160 return true
56161 }
56162 return false
56163 }
56164 func rewriteValueAMD64_OpAMD64XORQ_10(v *Value) bool {
56165
56166
56167
56168 for {
56169 x := v.Args[1]
56170 l := v.Args[0]
56171 if l.Op != OpAMD64MOVQload {
56172 break
56173 }
56174 off := l.AuxInt
56175 sym := l.Aux
56176 mem := l.Args[1]
56177 ptr := l.Args[0]
56178 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
56179 break
56180 }
56181 v.reset(OpAMD64XORQload)
56182 v.AuxInt = off
56183 v.Aux = sym
56184 v.AddArg(x)
56185 v.AddArg(ptr)
56186 v.AddArg(mem)
56187 return true
56188 }
56189 return false
56190 }
56191 func rewriteValueAMD64_OpAMD64XORQconst_0(v *Value) bool {
56192 b := v.Block
56193 config := b.Func.Config
56194
56195
56196
56197 for {
56198 c := v.AuxInt
56199 x := v.Args[0]
56200 if !(isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl) {
56201 break
56202 }
56203 v.reset(OpAMD64BTCQconst)
56204 v.AuxInt = log2(c)
56205 v.AddArg(x)
56206 return true
56207 }
56208
56209
56210
56211 for {
56212 c := v.AuxInt
56213 v_0 := v.Args[0]
56214 if v_0.Op != OpAMD64XORQconst {
56215 break
56216 }
56217 d := v_0.AuxInt
56218 x := v_0.Args[0]
56219 v.reset(OpAMD64XORQconst)
56220 v.AuxInt = c ^ d
56221 v.AddArg(x)
56222 return true
56223 }
56224
56225
56226
56227 for {
56228 c := v.AuxInt
56229 v_0 := v.Args[0]
56230 if v_0.Op != OpAMD64BTCQconst {
56231 break
56232 }
56233 d := v_0.AuxInt
56234 x := v_0.Args[0]
56235 v.reset(OpAMD64XORQconst)
56236 v.AuxInt = c ^ 1<<uint32(d)
56237 v.AddArg(x)
56238 return true
56239 }
56240
56241
56242
56243 for {
56244 if v.AuxInt != 0 {
56245 break
56246 }
56247 x := v.Args[0]
56248 v.reset(OpCopy)
56249 v.Type = x.Type
56250 v.AddArg(x)
56251 return true
56252 }
56253
56254
56255
56256 for {
56257 c := v.AuxInt
56258 v_0 := v.Args[0]
56259 if v_0.Op != OpAMD64MOVQconst {
56260 break
56261 }
56262 d := v_0.AuxInt
56263 v.reset(OpAMD64MOVQconst)
56264 v.AuxInt = c ^ d
56265 return true
56266 }
56267 return false
56268 }
56269 func rewriteValueAMD64_OpAMD64XORQconstmodify_0(v *Value) bool {
56270
56271
56272
56273 for {
56274 valoff1 := v.AuxInt
56275 sym := v.Aux
56276 mem := v.Args[1]
56277 v_0 := v.Args[0]
56278 if v_0.Op != OpAMD64ADDQconst {
56279 break
56280 }
56281 off2 := v_0.AuxInt
56282 base := v_0.Args[0]
56283 if !(ValAndOff(valoff1).canAdd(off2)) {
56284 break
56285 }
56286 v.reset(OpAMD64XORQconstmodify)
56287 v.AuxInt = ValAndOff(valoff1).add(off2)
56288 v.Aux = sym
56289 v.AddArg(base)
56290 v.AddArg(mem)
56291 return true
56292 }
56293
56294
56295
56296 for {
56297 valoff1 := v.AuxInt
56298 sym1 := v.Aux
56299 mem := v.Args[1]
56300 v_0 := v.Args[0]
56301 if v_0.Op != OpAMD64LEAQ {
56302 break
56303 }
56304 off2 := v_0.AuxInt
56305 sym2 := v_0.Aux
56306 base := v_0.Args[0]
56307 if !(ValAndOff(valoff1).canAdd(off2) && canMergeSym(sym1, sym2)) {
56308 break
56309 }
56310 v.reset(OpAMD64XORQconstmodify)
56311 v.AuxInt = ValAndOff(valoff1).add(off2)
56312 v.Aux = mergeSym(sym1, sym2)
56313 v.AddArg(base)
56314 v.AddArg(mem)
56315 return true
56316 }
56317 return false
56318 }
56319 func rewriteValueAMD64_OpAMD64XORQload_0(v *Value) bool {
56320 b := v.Block
56321 typ := &b.Func.Config.Types
56322
56323
56324
56325 for {
56326 off1 := v.AuxInt
56327 sym := v.Aux
56328 mem := v.Args[2]
56329 val := v.Args[0]
56330 v_1 := v.Args[1]
56331 if v_1.Op != OpAMD64ADDQconst {
56332 break
56333 }
56334 off2 := v_1.AuxInt
56335 base := v_1.Args[0]
56336 if !(is32Bit(off1 + off2)) {
56337 break
56338 }
56339 v.reset(OpAMD64XORQload)
56340 v.AuxInt = off1 + off2
56341 v.Aux = sym
56342 v.AddArg(val)
56343 v.AddArg(base)
56344 v.AddArg(mem)
56345 return true
56346 }
56347
56348
56349
56350 for {
56351 off1 := v.AuxInt
56352 sym1 := v.Aux
56353 mem := v.Args[2]
56354 val := v.Args[0]
56355 v_1 := v.Args[1]
56356 if v_1.Op != OpAMD64LEAQ {
56357 break
56358 }
56359 off2 := v_1.AuxInt
56360 sym2 := v_1.Aux
56361 base := v_1.Args[0]
56362 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
56363 break
56364 }
56365 v.reset(OpAMD64XORQload)
56366 v.AuxInt = off1 + off2
56367 v.Aux = mergeSym(sym1, sym2)
56368 v.AddArg(val)
56369 v.AddArg(base)
56370 v.AddArg(mem)
56371 return true
56372 }
56373
56374
56375
56376 for {
56377 off := v.AuxInt
56378 sym := v.Aux
56379 _ = v.Args[2]
56380 x := v.Args[0]
56381 ptr := v.Args[1]
56382 v_2 := v.Args[2]
56383 if v_2.Op != OpAMD64MOVSDstore {
56384 break
56385 }
56386 if v_2.AuxInt != off {
56387 break
56388 }
56389 if v_2.Aux != sym {
56390 break
56391 }
56392 _ = v_2.Args[2]
56393 if ptr != v_2.Args[0] {
56394 break
56395 }
56396 y := v_2.Args[1]
56397 v.reset(OpAMD64XORQ)
56398 v.AddArg(x)
56399 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
56400 v0.AddArg(y)
56401 v.AddArg(v0)
56402 return true
56403 }
56404 return false
56405 }
56406 func rewriteValueAMD64_OpAMD64XORQmodify_0(v *Value) bool {
56407
56408
56409
56410 for {
56411 off1 := v.AuxInt
56412 sym := v.Aux
56413 mem := v.Args[2]
56414 v_0 := v.Args[0]
56415 if v_0.Op != OpAMD64ADDQconst {
56416 break
56417 }
56418 off2 := v_0.AuxInt
56419 base := v_0.Args[0]
56420 val := v.Args[1]
56421 if !(is32Bit(off1 + off2)) {
56422 break
56423 }
56424 v.reset(OpAMD64XORQmodify)
56425 v.AuxInt = off1 + off2
56426 v.Aux = sym
56427 v.AddArg(base)
56428 v.AddArg(val)
56429 v.AddArg(mem)
56430 return true
56431 }
56432
56433
56434
56435 for {
56436 off1 := v.AuxInt
56437 sym1 := v.Aux
56438 mem := v.Args[2]
56439 v_0 := v.Args[0]
56440 if v_0.Op != OpAMD64LEAQ {
56441 break
56442 }
56443 off2 := v_0.AuxInt
56444 sym2 := v_0.Aux
56445 base := v_0.Args[0]
56446 val := v.Args[1]
56447 if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
56448 break
56449 }
56450 v.reset(OpAMD64XORQmodify)
56451 v.AuxInt = off1 + off2
56452 v.Aux = mergeSym(sym1, sym2)
56453 v.AddArg(base)
56454 v.AddArg(val)
56455 v.AddArg(mem)
56456 return true
56457 }
56458 return false
56459 }
56460 func rewriteValueAMD64_OpAdd16_0(v *Value) bool {
56461
56462
56463
56464 for {
56465 y := v.Args[1]
56466 x := v.Args[0]
56467 v.reset(OpAMD64ADDL)
56468 v.AddArg(x)
56469 v.AddArg(y)
56470 return true
56471 }
56472 }
56473 func rewriteValueAMD64_OpAdd32_0(v *Value) bool {
56474
56475
56476
56477 for {
56478 y := v.Args[1]
56479 x := v.Args[0]
56480 v.reset(OpAMD64ADDL)
56481 v.AddArg(x)
56482 v.AddArg(y)
56483 return true
56484 }
56485 }
56486 func rewriteValueAMD64_OpAdd32F_0(v *Value) bool {
56487
56488
56489
56490 for {
56491 y := v.Args[1]
56492 x := v.Args[0]
56493 v.reset(OpAMD64ADDSS)
56494 v.AddArg(x)
56495 v.AddArg(y)
56496 return true
56497 }
56498 }
56499 func rewriteValueAMD64_OpAdd64_0(v *Value) bool {
56500
56501
56502
56503 for {
56504 y := v.Args[1]
56505 x := v.Args[0]
56506 v.reset(OpAMD64ADDQ)
56507 v.AddArg(x)
56508 v.AddArg(y)
56509 return true
56510 }
56511 }
56512 func rewriteValueAMD64_OpAdd64F_0(v *Value) bool {
56513
56514
56515
56516 for {
56517 y := v.Args[1]
56518 x := v.Args[0]
56519 v.reset(OpAMD64ADDSD)
56520 v.AddArg(x)
56521 v.AddArg(y)
56522 return true
56523 }
56524 }
56525 func rewriteValueAMD64_OpAdd8_0(v *Value) bool {
56526
56527
56528
56529 for {
56530 y := v.Args[1]
56531 x := v.Args[0]
56532 v.reset(OpAMD64ADDL)
56533 v.AddArg(x)
56534 v.AddArg(y)
56535 return true
56536 }
56537 }
56538 func rewriteValueAMD64_OpAddPtr_0(v *Value) bool {
56539 b := v.Block
56540 config := b.Func.Config
56541
56542
56543
56544 for {
56545 y := v.Args[1]
56546 x := v.Args[0]
56547 if !(config.PtrSize == 8) {
56548 break
56549 }
56550 v.reset(OpAMD64ADDQ)
56551 v.AddArg(x)
56552 v.AddArg(y)
56553 return true
56554 }
56555
56556
56557
56558 for {
56559 y := v.Args[1]
56560 x := v.Args[0]
56561 if !(config.PtrSize == 4) {
56562 break
56563 }
56564 v.reset(OpAMD64ADDL)
56565 v.AddArg(x)
56566 v.AddArg(y)
56567 return true
56568 }
56569 return false
56570 }
56571 func rewriteValueAMD64_OpAddr_0(v *Value) bool {
56572 b := v.Block
56573 config := b.Func.Config
56574
56575
56576
56577 for {
56578 sym := v.Aux
56579 base := v.Args[0]
56580 if !(config.PtrSize == 8) {
56581 break
56582 }
56583 v.reset(OpAMD64LEAQ)
56584 v.Aux = sym
56585 v.AddArg(base)
56586 return true
56587 }
56588
56589
56590
56591 for {
56592 sym := v.Aux
56593 base := v.Args[0]
56594 if !(config.PtrSize == 4) {
56595 break
56596 }
56597 v.reset(OpAMD64LEAL)
56598 v.Aux = sym
56599 v.AddArg(base)
56600 return true
56601 }
56602 return false
56603 }
56604 func rewriteValueAMD64_OpAnd16_0(v *Value) bool {
56605
56606
56607
56608 for {
56609 y := v.Args[1]
56610 x := v.Args[0]
56611 v.reset(OpAMD64ANDL)
56612 v.AddArg(x)
56613 v.AddArg(y)
56614 return true
56615 }
56616 }
56617 func rewriteValueAMD64_OpAnd32_0(v *Value) bool {
56618
56619
56620
56621 for {
56622 y := v.Args[1]
56623 x := v.Args[0]
56624 v.reset(OpAMD64ANDL)
56625 v.AddArg(x)
56626 v.AddArg(y)
56627 return true
56628 }
56629 }
56630 func rewriteValueAMD64_OpAnd64_0(v *Value) bool {
56631
56632
56633
56634 for {
56635 y := v.Args[1]
56636 x := v.Args[0]
56637 v.reset(OpAMD64ANDQ)
56638 v.AddArg(x)
56639 v.AddArg(y)
56640 return true
56641 }
56642 }
56643 func rewriteValueAMD64_OpAnd8_0(v *Value) bool {
56644
56645
56646
56647 for {
56648 y := v.Args[1]
56649 x := v.Args[0]
56650 v.reset(OpAMD64ANDL)
56651 v.AddArg(x)
56652 v.AddArg(y)
56653 return true
56654 }
56655 }
56656 func rewriteValueAMD64_OpAndB_0(v *Value) bool {
56657
56658
56659
56660 for {
56661 y := v.Args[1]
56662 x := v.Args[0]
56663 v.reset(OpAMD64ANDL)
56664 v.AddArg(x)
56665 v.AddArg(y)
56666 return true
56667 }
56668 }
56669 func rewriteValueAMD64_OpAtomicAdd32_0(v *Value) bool {
56670 b := v.Block
56671 typ := &b.Func.Config.Types
56672
56673
56674
56675 for {
56676 mem := v.Args[2]
56677 ptr := v.Args[0]
56678 val := v.Args[1]
56679 v.reset(OpAMD64AddTupleFirst32)
56680 v.AddArg(val)
56681 v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem))
56682 v0.AddArg(val)
56683 v0.AddArg(ptr)
56684 v0.AddArg(mem)
56685 v.AddArg(v0)
56686 return true
56687 }
56688 }
56689 func rewriteValueAMD64_OpAtomicAdd64_0(v *Value) bool {
56690 b := v.Block
56691 typ := &b.Func.Config.Types
56692
56693
56694
56695 for {
56696 mem := v.Args[2]
56697 ptr := v.Args[0]
56698 val := v.Args[1]
56699 v.reset(OpAMD64AddTupleFirst64)
56700 v.AddArg(val)
56701 v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem))
56702 v0.AddArg(val)
56703 v0.AddArg(ptr)
56704 v0.AddArg(mem)
56705 v.AddArg(v0)
56706 return true
56707 }
56708 }
56709 func rewriteValueAMD64_OpAtomicAnd8_0(v *Value) bool {
56710
56711
56712
56713 for {
56714 mem := v.Args[2]
56715 ptr := v.Args[0]
56716 val := v.Args[1]
56717 v.reset(OpAMD64ANDBlock)
56718 v.AddArg(ptr)
56719 v.AddArg(val)
56720 v.AddArg(mem)
56721 return true
56722 }
56723 }
56724 func rewriteValueAMD64_OpAtomicCompareAndSwap32_0(v *Value) bool {
56725
56726
56727
56728 for {
56729 mem := v.Args[3]
56730 ptr := v.Args[0]
56731 old := v.Args[1]
56732 new_ := v.Args[2]
56733 v.reset(OpAMD64CMPXCHGLlock)
56734 v.AddArg(ptr)
56735 v.AddArg(old)
56736 v.AddArg(new_)
56737 v.AddArg(mem)
56738 return true
56739 }
56740 }
56741 func rewriteValueAMD64_OpAtomicCompareAndSwap64_0(v *Value) bool {
56742
56743
56744
56745 for {
56746 mem := v.Args[3]
56747 ptr := v.Args[0]
56748 old := v.Args[1]
56749 new_ := v.Args[2]
56750 v.reset(OpAMD64CMPXCHGQlock)
56751 v.AddArg(ptr)
56752 v.AddArg(old)
56753 v.AddArg(new_)
56754 v.AddArg(mem)
56755 return true
56756 }
56757 }
56758 func rewriteValueAMD64_OpAtomicExchange32_0(v *Value) bool {
56759
56760
56761
56762 for {
56763 mem := v.Args[2]
56764 ptr := v.Args[0]
56765 val := v.Args[1]
56766 v.reset(OpAMD64XCHGL)
56767 v.AddArg(val)
56768 v.AddArg(ptr)
56769 v.AddArg(mem)
56770 return true
56771 }
56772 }
56773 func rewriteValueAMD64_OpAtomicExchange64_0(v *Value) bool {
56774
56775
56776
56777 for {
56778 mem := v.Args[2]
56779 ptr := v.Args[0]
56780 val := v.Args[1]
56781 v.reset(OpAMD64XCHGQ)
56782 v.AddArg(val)
56783 v.AddArg(ptr)
56784 v.AddArg(mem)
56785 return true
56786 }
56787 }
56788 func rewriteValueAMD64_OpAtomicLoad32_0(v *Value) bool {
56789
56790
56791
56792 for {
56793 mem := v.Args[1]
56794 ptr := v.Args[0]
56795 v.reset(OpAMD64MOVLatomicload)
56796 v.AddArg(ptr)
56797 v.AddArg(mem)
56798 return true
56799 }
56800 }
56801 func rewriteValueAMD64_OpAtomicLoad64_0(v *Value) bool {
56802
56803
56804
56805 for {
56806 mem := v.Args[1]
56807 ptr := v.Args[0]
56808 v.reset(OpAMD64MOVQatomicload)
56809 v.AddArg(ptr)
56810 v.AddArg(mem)
56811 return true
56812 }
56813 }
56814 func rewriteValueAMD64_OpAtomicLoad8_0(v *Value) bool {
56815
56816
56817
56818 for {
56819 mem := v.Args[1]
56820 ptr := v.Args[0]
56821 v.reset(OpAMD64MOVBatomicload)
56822 v.AddArg(ptr)
56823 v.AddArg(mem)
56824 return true
56825 }
56826 }
56827 func rewriteValueAMD64_OpAtomicLoadPtr_0(v *Value) bool {
56828 b := v.Block
56829 config := b.Func.Config
56830
56831
56832
56833 for {
56834 mem := v.Args[1]
56835 ptr := v.Args[0]
56836 if !(config.PtrSize == 8) {
56837 break
56838 }
56839 v.reset(OpAMD64MOVQatomicload)
56840 v.AddArg(ptr)
56841 v.AddArg(mem)
56842 return true
56843 }
56844
56845
56846
56847 for {
56848 mem := v.Args[1]
56849 ptr := v.Args[0]
56850 if !(config.PtrSize == 4) {
56851 break
56852 }
56853 v.reset(OpAMD64MOVLatomicload)
56854 v.AddArg(ptr)
56855 v.AddArg(mem)
56856 return true
56857 }
56858 return false
56859 }
56860 func rewriteValueAMD64_OpAtomicOr8_0(v *Value) bool {
56861
56862
56863
56864 for {
56865 mem := v.Args[2]
56866 ptr := v.Args[0]
56867 val := v.Args[1]
56868 v.reset(OpAMD64ORBlock)
56869 v.AddArg(ptr)
56870 v.AddArg(val)
56871 v.AddArg(mem)
56872 return true
56873 }
56874 }
56875 func rewriteValueAMD64_OpAtomicStore32_0(v *Value) bool {
56876 b := v.Block
56877 typ := &b.Func.Config.Types
56878
56879
56880
56881 for {
56882 mem := v.Args[2]
56883 ptr := v.Args[0]
56884 val := v.Args[1]
56885 v.reset(OpSelect1)
56886 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem))
56887 v0.AddArg(val)
56888 v0.AddArg(ptr)
56889 v0.AddArg(mem)
56890 v.AddArg(v0)
56891 return true
56892 }
56893 }
56894 func rewriteValueAMD64_OpAtomicStore64_0(v *Value) bool {
56895 b := v.Block
56896 typ := &b.Func.Config.Types
56897
56898
56899
56900 for {
56901 mem := v.Args[2]
56902 ptr := v.Args[0]
56903 val := v.Args[1]
56904 v.reset(OpSelect1)
56905 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem))
56906 v0.AddArg(val)
56907 v0.AddArg(ptr)
56908 v0.AddArg(mem)
56909 v.AddArg(v0)
56910 return true
56911 }
56912 }
56913 func rewriteValueAMD64_OpAtomicStorePtrNoWB_0(v *Value) bool {
56914 b := v.Block
56915 config := b.Func.Config
56916 typ := &b.Func.Config.Types
56917
56918
56919
56920 for {
56921 mem := v.Args[2]
56922 ptr := v.Args[0]
56923 val := v.Args[1]
56924 if !(config.PtrSize == 8) {
56925 break
56926 }
56927 v.reset(OpSelect1)
56928 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem))
56929 v0.AddArg(val)
56930 v0.AddArg(ptr)
56931 v0.AddArg(mem)
56932 v.AddArg(v0)
56933 return true
56934 }
56935
56936
56937
56938 for {
56939 mem := v.Args[2]
56940 ptr := v.Args[0]
56941 val := v.Args[1]
56942 if !(config.PtrSize == 4) {
56943 break
56944 }
56945 v.reset(OpSelect1)
56946 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.BytePtr, types.TypeMem))
56947 v0.AddArg(val)
56948 v0.AddArg(ptr)
56949 v0.AddArg(mem)
56950 v.AddArg(v0)
56951 return true
56952 }
56953 return false
56954 }
56955 func rewriteValueAMD64_OpAvg64u_0(v *Value) bool {
56956
56957
56958
56959 for {
56960 y := v.Args[1]
56961 x := v.Args[0]
56962 v.reset(OpAMD64AVGQU)
56963 v.AddArg(x)
56964 v.AddArg(y)
56965 return true
56966 }
56967 }
56968 func rewriteValueAMD64_OpBitLen16_0(v *Value) bool {
56969 b := v.Block
56970 typ := &b.Func.Config.Types
56971
56972
56973
56974 for {
56975 x := v.Args[0]
56976 v.reset(OpAMD64BSRL)
56977 v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32)
56978 v0.AuxInt = 1
56979 v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
56980 v1.AddArg(x)
56981 v0.AddArg(v1)
56982 v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
56983 v2.AddArg(x)
56984 v0.AddArg(v2)
56985 v.AddArg(v0)
56986 return true
56987 }
56988 }
56989 func rewriteValueAMD64_OpBitLen32_0(v *Value) bool {
56990 b := v.Block
56991 typ := &b.Func.Config.Types
56992
56993
56994
56995 for {
56996 x := v.Args[0]
56997 v.reset(OpSelect0)
56998 v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
56999 v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64)
57000 v1.AuxInt = 1
57001 v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
57002 v2.AddArg(x)
57003 v1.AddArg(v2)
57004 v3 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
57005 v3.AddArg(x)
57006 v1.AddArg(v3)
57007 v0.AddArg(v1)
57008 v.AddArg(v0)
57009 return true
57010 }
57011 }
57012 func rewriteValueAMD64_OpBitLen64_0(v *Value) bool {
57013 b := v.Block
57014 typ := &b.Func.Config.Types
57015
57016
57017
57018 for {
57019 t := v.Type
57020 x := v.Args[0]
57021 v.reset(OpAMD64ADDQconst)
57022 v.AuxInt = 1
57023 v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t)
57024 v1 := b.NewValue0(v.Pos, OpSelect0, t)
57025 v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
57026 v2.AddArg(x)
57027 v1.AddArg(v2)
57028 v0.AddArg(v1)
57029 v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
57030 v3.AuxInt = -1
57031 v0.AddArg(v3)
57032 v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
57033 v5 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
57034 v5.AddArg(x)
57035 v4.AddArg(v5)
57036 v0.AddArg(v4)
57037 v.AddArg(v0)
57038 return true
57039 }
57040 }
57041 func rewriteValueAMD64_OpBitLen8_0(v *Value) bool {
57042 b := v.Block
57043 typ := &b.Func.Config.Types
57044
57045
57046
57047 for {
57048 x := v.Args[0]
57049 v.reset(OpAMD64BSRL)
57050 v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32)
57051 v0.AuxInt = 1
57052 v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
57053 v1.AddArg(x)
57054 v0.AddArg(v1)
57055 v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
57056 v2.AddArg(x)
57057 v0.AddArg(v2)
57058 v.AddArg(v0)
57059 return true
57060 }
57061 }
57062 func rewriteValueAMD64_OpBswap32_0(v *Value) bool {
57063
57064
57065
57066 for {
57067 x := v.Args[0]
57068 v.reset(OpAMD64BSWAPL)
57069 v.AddArg(x)
57070 return true
57071 }
57072 }
57073 func rewriteValueAMD64_OpBswap64_0(v *Value) bool {
57074
57075
57076
57077 for {
57078 x := v.Args[0]
57079 v.reset(OpAMD64BSWAPQ)
57080 v.AddArg(x)
57081 return true
57082 }
57083 }
57084 func rewriteValueAMD64_OpCeil_0(v *Value) bool {
57085
57086
57087
57088 for {
57089 x := v.Args[0]
57090 v.reset(OpAMD64ROUNDSD)
57091 v.AuxInt = 2
57092 v.AddArg(x)
57093 return true
57094 }
57095 }
57096 func rewriteValueAMD64_OpClosureCall_0(v *Value) bool {
57097
57098
57099
57100 for {
57101 argwid := v.AuxInt
57102 mem := v.Args[2]
57103 entry := v.Args[0]
57104 closure := v.Args[1]
57105 v.reset(OpAMD64CALLclosure)
57106 v.AuxInt = argwid
57107 v.AddArg(entry)
57108 v.AddArg(closure)
57109 v.AddArg(mem)
57110 return true
57111 }
57112 }
57113 func rewriteValueAMD64_OpCom16_0(v *Value) bool {
57114
57115
57116
57117 for {
57118 x := v.Args[0]
57119 v.reset(OpAMD64NOTL)
57120 v.AddArg(x)
57121 return true
57122 }
57123 }
57124 func rewriteValueAMD64_OpCom32_0(v *Value) bool {
57125
57126
57127
57128 for {
57129 x := v.Args[0]
57130 v.reset(OpAMD64NOTL)
57131 v.AddArg(x)
57132 return true
57133 }
57134 }
57135 func rewriteValueAMD64_OpCom64_0(v *Value) bool {
57136
57137
57138
57139 for {
57140 x := v.Args[0]
57141 v.reset(OpAMD64NOTQ)
57142 v.AddArg(x)
57143 return true
57144 }
57145 }
57146 func rewriteValueAMD64_OpCom8_0(v *Value) bool {
57147
57148
57149
57150 for {
57151 x := v.Args[0]
57152 v.reset(OpAMD64NOTL)
57153 v.AddArg(x)
57154 return true
57155 }
57156 }
57157 func rewriteValueAMD64_OpCondSelect_0(v *Value) bool {
57158
57159
57160
57161 for {
57162 t := v.Type
57163 _ = v.Args[2]
57164 x := v.Args[0]
57165 y := v.Args[1]
57166 v_2 := v.Args[2]
57167 if v_2.Op != OpAMD64SETEQ {
57168 break
57169 }
57170 cond := v_2.Args[0]
57171 if !(is64BitInt(t) || isPtr(t)) {
57172 break
57173 }
57174 v.reset(OpAMD64CMOVQEQ)
57175 v.AddArg(y)
57176 v.AddArg(x)
57177 v.AddArg(cond)
57178 return true
57179 }
57180
57181
57182
57183 for {
57184 t := v.Type
57185 _ = v.Args[2]
57186 x := v.Args[0]
57187 y := v.Args[1]
57188 v_2 := v.Args[2]
57189 if v_2.Op != OpAMD64SETNE {
57190 break
57191 }
57192 cond := v_2.Args[0]
57193 if !(is64BitInt(t) || isPtr(t)) {
57194 break
57195 }
57196 v.reset(OpAMD64CMOVQNE)
57197 v.AddArg(y)
57198 v.AddArg(x)
57199 v.AddArg(cond)
57200 return true
57201 }
57202
57203
57204
57205 for {
57206 t := v.Type
57207 _ = v.Args[2]
57208 x := v.Args[0]
57209 y := v.Args[1]
57210 v_2 := v.Args[2]
57211 if v_2.Op != OpAMD64SETL {
57212 break
57213 }
57214 cond := v_2.Args[0]
57215 if !(is64BitInt(t) || isPtr(t)) {
57216 break
57217 }
57218 v.reset(OpAMD64CMOVQLT)
57219 v.AddArg(y)
57220 v.AddArg(x)
57221 v.AddArg(cond)
57222 return true
57223 }
57224
57225
57226
57227 for {
57228 t := v.Type
57229 _ = v.Args[2]
57230 x := v.Args[0]
57231 y := v.Args[1]
57232 v_2 := v.Args[2]
57233 if v_2.Op != OpAMD64SETG {
57234 break
57235 }
57236 cond := v_2.Args[0]
57237 if !(is64BitInt(t) || isPtr(t)) {
57238 break
57239 }
57240 v.reset(OpAMD64CMOVQGT)
57241 v.AddArg(y)
57242 v.AddArg(x)
57243 v.AddArg(cond)
57244 return true
57245 }
57246
57247
57248
57249 for {
57250 t := v.Type
57251 _ = v.Args[2]
57252 x := v.Args[0]
57253 y := v.Args[1]
57254 v_2 := v.Args[2]
57255 if v_2.Op != OpAMD64SETLE {
57256 break
57257 }
57258 cond := v_2.Args[0]
57259 if !(is64BitInt(t) || isPtr(t)) {
57260 break
57261 }
57262 v.reset(OpAMD64CMOVQLE)
57263 v.AddArg(y)
57264 v.AddArg(x)
57265 v.AddArg(cond)
57266 return true
57267 }
57268
57269
57270
57271 for {
57272 t := v.Type
57273 _ = v.Args[2]
57274 x := v.Args[0]
57275 y := v.Args[1]
57276 v_2 := v.Args[2]
57277 if v_2.Op != OpAMD64SETGE {
57278 break
57279 }
57280 cond := v_2.Args[0]
57281 if !(is64BitInt(t) || isPtr(t)) {
57282 break
57283 }
57284 v.reset(OpAMD64CMOVQGE)
57285 v.AddArg(y)
57286 v.AddArg(x)
57287 v.AddArg(cond)
57288 return true
57289 }
57290
57291
57292
57293 for {
57294 t := v.Type
57295 _ = v.Args[2]
57296 x := v.Args[0]
57297 y := v.Args[1]
57298 v_2 := v.Args[2]
57299 if v_2.Op != OpAMD64SETA {
57300 break
57301 }
57302 cond := v_2.Args[0]
57303 if !(is64BitInt(t) || isPtr(t)) {
57304 break
57305 }
57306 v.reset(OpAMD64CMOVQHI)
57307 v.AddArg(y)
57308 v.AddArg(x)
57309 v.AddArg(cond)
57310 return true
57311 }
57312
57313
57314
57315 for {
57316 t := v.Type
57317 _ = v.Args[2]
57318 x := v.Args[0]
57319 y := v.Args[1]
57320 v_2 := v.Args[2]
57321 if v_2.Op != OpAMD64SETB {
57322 break
57323 }
57324 cond := v_2.Args[0]
57325 if !(is64BitInt(t) || isPtr(t)) {
57326 break
57327 }
57328 v.reset(OpAMD64CMOVQCS)
57329 v.AddArg(y)
57330 v.AddArg(x)
57331 v.AddArg(cond)
57332 return true
57333 }
57334
57335
57336
57337 for {
57338 t := v.Type
57339 _ = v.Args[2]
57340 x := v.Args[0]
57341 y := v.Args[1]
57342 v_2 := v.Args[2]
57343 if v_2.Op != OpAMD64SETAE {
57344 break
57345 }
57346 cond := v_2.Args[0]
57347 if !(is64BitInt(t) || isPtr(t)) {
57348 break
57349 }
57350 v.reset(OpAMD64CMOVQCC)
57351 v.AddArg(y)
57352 v.AddArg(x)
57353 v.AddArg(cond)
57354 return true
57355 }
57356
57357
57358
57359 for {
57360 t := v.Type
57361 _ = v.Args[2]
57362 x := v.Args[0]
57363 y := v.Args[1]
57364 v_2 := v.Args[2]
57365 if v_2.Op != OpAMD64SETBE {
57366 break
57367 }
57368 cond := v_2.Args[0]
57369 if !(is64BitInt(t) || isPtr(t)) {
57370 break
57371 }
57372 v.reset(OpAMD64CMOVQLS)
57373 v.AddArg(y)
57374 v.AddArg(x)
57375 v.AddArg(cond)
57376 return true
57377 }
57378 return false
57379 }
57380 func rewriteValueAMD64_OpCondSelect_10(v *Value) bool {
57381
57382
57383
57384 for {
57385 t := v.Type
57386 _ = v.Args[2]
57387 x := v.Args[0]
57388 y := v.Args[1]
57389 v_2 := v.Args[2]
57390 if v_2.Op != OpAMD64SETEQF {
57391 break
57392 }
57393 cond := v_2.Args[0]
57394 if !(is64BitInt(t) || isPtr(t)) {
57395 break
57396 }
57397 v.reset(OpAMD64CMOVQEQF)
57398 v.AddArg(y)
57399 v.AddArg(x)
57400 v.AddArg(cond)
57401 return true
57402 }
57403
57404
57405
57406 for {
57407 t := v.Type
57408 _ = v.Args[2]
57409 x := v.Args[0]
57410 y := v.Args[1]
57411 v_2 := v.Args[2]
57412 if v_2.Op != OpAMD64SETNEF {
57413 break
57414 }
57415 cond := v_2.Args[0]
57416 if !(is64BitInt(t) || isPtr(t)) {
57417 break
57418 }
57419 v.reset(OpAMD64CMOVQNEF)
57420 v.AddArg(y)
57421 v.AddArg(x)
57422 v.AddArg(cond)
57423 return true
57424 }
57425
57426
57427
57428 for {
57429 t := v.Type
57430 _ = v.Args[2]
57431 x := v.Args[0]
57432 y := v.Args[1]
57433 v_2 := v.Args[2]
57434 if v_2.Op != OpAMD64SETGF {
57435 break
57436 }
57437 cond := v_2.Args[0]
57438 if !(is64BitInt(t) || isPtr(t)) {
57439 break
57440 }
57441 v.reset(OpAMD64CMOVQGTF)
57442 v.AddArg(y)
57443 v.AddArg(x)
57444 v.AddArg(cond)
57445 return true
57446 }
57447
57448
57449
57450 for {
57451 t := v.Type
57452 _ = v.Args[2]
57453 x := v.Args[0]
57454 y := v.Args[1]
57455 v_2 := v.Args[2]
57456 if v_2.Op != OpAMD64SETGEF {
57457 break
57458 }
57459 cond := v_2.Args[0]
57460 if !(is64BitInt(t) || isPtr(t)) {
57461 break
57462 }
57463 v.reset(OpAMD64CMOVQGEF)
57464 v.AddArg(y)
57465 v.AddArg(x)
57466 v.AddArg(cond)
57467 return true
57468 }
57469
57470
57471
57472 for {
57473 t := v.Type
57474 _ = v.Args[2]
57475 x := v.Args[0]
57476 y := v.Args[1]
57477 v_2 := v.Args[2]
57478 if v_2.Op != OpAMD64SETEQ {
57479 break
57480 }
57481 cond := v_2.Args[0]
57482 if !(is32BitInt(t)) {
57483 break
57484 }
57485 v.reset(OpAMD64CMOVLEQ)
57486 v.AddArg(y)
57487 v.AddArg(x)
57488 v.AddArg(cond)
57489 return true
57490 }
57491
57492
57493
57494 for {
57495 t := v.Type
57496 _ = v.Args[2]
57497 x := v.Args[0]
57498 y := v.Args[1]
57499 v_2 := v.Args[2]
57500 if v_2.Op != OpAMD64SETNE {
57501 break
57502 }
57503 cond := v_2.Args[0]
57504 if !(is32BitInt(t)) {
57505 break
57506 }
57507 v.reset(OpAMD64CMOVLNE)
57508 v.AddArg(y)
57509 v.AddArg(x)
57510 v.AddArg(cond)
57511 return true
57512 }
57513
57514
57515
57516 for {
57517 t := v.Type
57518 _ = v.Args[2]
57519 x := v.Args[0]
57520 y := v.Args[1]
57521 v_2 := v.Args[2]
57522 if v_2.Op != OpAMD64SETL {
57523 break
57524 }
57525 cond := v_2.Args[0]
57526 if !(is32BitInt(t)) {
57527 break
57528 }
57529 v.reset(OpAMD64CMOVLLT)
57530 v.AddArg(y)
57531 v.AddArg(x)
57532 v.AddArg(cond)
57533 return true
57534 }
57535
57536
57537
57538 for {
57539 t := v.Type
57540 _ = v.Args[2]
57541 x := v.Args[0]
57542 y := v.Args[1]
57543 v_2 := v.Args[2]
57544 if v_2.Op != OpAMD64SETG {
57545 break
57546 }
57547 cond := v_2.Args[0]
57548 if !(is32BitInt(t)) {
57549 break
57550 }
57551 v.reset(OpAMD64CMOVLGT)
57552 v.AddArg(y)
57553 v.AddArg(x)
57554 v.AddArg(cond)
57555 return true
57556 }
57557
57558
57559
57560 for {
57561 t := v.Type
57562 _ = v.Args[2]
57563 x := v.Args[0]
57564 y := v.Args[1]
57565 v_2 := v.Args[2]
57566 if v_2.Op != OpAMD64SETLE {
57567 break
57568 }
57569 cond := v_2.Args[0]
57570 if !(is32BitInt(t)) {
57571 break
57572 }
57573 v.reset(OpAMD64CMOVLLE)
57574 v.AddArg(y)
57575 v.AddArg(x)
57576 v.AddArg(cond)
57577 return true
57578 }
57579
57580
57581
57582 for {
57583 t := v.Type
57584 _ = v.Args[2]
57585 x := v.Args[0]
57586 y := v.Args[1]
57587 v_2 := v.Args[2]
57588 if v_2.Op != OpAMD64SETGE {
57589 break
57590 }
57591 cond := v_2.Args[0]
57592 if !(is32BitInt(t)) {
57593 break
57594 }
57595 v.reset(OpAMD64CMOVLGE)
57596 v.AddArg(y)
57597 v.AddArg(x)
57598 v.AddArg(cond)
57599 return true
57600 }
57601 return false
57602 }
57603 func rewriteValueAMD64_OpCondSelect_20(v *Value) bool {
57604
57605
57606
57607 for {
57608 t := v.Type
57609 _ = v.Args[2]
57610 x := v.Args[0]
57611 y := v.Args[1]
57612 v_2 := v.Args[2]
57613 if v_2.Op != OpAMD64SETA {
57614 break
57615 }
57616 cond := v_2.Args[0]
57617 if !(is32BitInt(t)) {
57618 break
57619 }
57620 v.reset(OpAMD64CMOVLHI)
57621 v.AddArg(y)
57622 v.AddArg(x)
57623 v.AddArg(cond)
57624 return true
57625 }
57626
57627
57628
57629 for {
57630 t := v.Type
57631 _ = v.Args[2]
57632 x := v.Args[0]
57633 y := v.Args[1]
57634 v_2 := v.Args[2]
57635 if v_2.Op != OpAMD64SETB {
57636 break
57637 }
57638 cond := v_2.Args[0]
57639 if !(is32BitInt(t)) {
57640 break
57641 }
57642 v.reset(OpAMD64CMOVLCS)
57643 v.AddArg(y)
57644 v.AddArg(x)
57645 v.AddArg(cond)
57646 return true
57647 }
57648
57649
57650
57651 for {
57652 t := v.Type
57653 _ = v.Args[2]
57654 x := v.Args[0]
57655 y := v.Args[1]
57656 v_2 := v.Args[2]
57657 if v_2.Op != OpAMD64SETAE {
57658 break
57659 }
57660 cond := v_2.Args[0]
57661 if !(is32BitInt(t)) {
57662 break
57663 }
57664 v.reset(OpAMD64CMOVLCC)
57665 v.AddArg(y)
57666 v.AddArg(x)
57667 v.AddArg(cond)
57668 return true
57669 }
57670
57671
57672
57673 for {
57674 t := v.Type
57675 _ = v.Args[2]
57676 x := v.Args[0]
57677 y := v.Args[1]
57678 v_2 := v.Args[2]
57679 if v_2.Op != OpAMD64SETBE {
57680 break
57681 }
57682 cond := v_2.Args[0]
57683 if !(is32BitInt(t)) {
57684 break
57685 }
57686 v.reset(OpAMD64CMOVLLS)
57687 v.AddArg(y)
57688 v.AddArg(x)
57689 v.AddArg(cond)
57690 return true
57691 }
57692
57693
57694
57695 for {
57696 t := v.Type
57697 _ = v.Args[2]
57698 x := v.Args[0]
57699 y := v.Args[1]
57700 v_2 := v.Args[2]
57701 if v_2.Op != OpAMD64SETEQF {
57702 break
57703 }
57704 cond := v_2.Args[0]
57705 if !(is32BitInt(t)) {
57706 break
57707 }
57708 v.reset(OpAMD64CMOVLEQF)
57709 v.AddArg(y)
57710 v.AddArg(x)
57711 v.AddArg(cond)
57712 return true
57713 }
57714
57715
57716
57717 for {
57718 t := v.Type
57719 _ = v.Args[2]
57720 x := v.Args[0]
57721 y := v.Args[1]
57722 v_2 := v.Args[2]
57723 if v_2.Op != OpAMD64SETNEF {
57724 break
57725 }
57726 cond := v_2.Args[0]
57727 if !(is32BitInt(t)) {
57728 break
57729 }
57730 v.reset(OpAMD64CMOVLNEF)
57731 v.AddArg(y)
57732 v.AddArg(x)
57733 v.AddArg(cond)
57734 return true
57735 }
57736
57737
57738
57739 for {
57740 t := v.Type
57741 _ = v.Args[2]
57742 x := v.Args[0]
57743 y := v.Args[1]
57744 v_2 := v.Args[2]
57745 if v_2.Op != OpAMD64SETGF {
57746 break
57747 }
57748 cond := v_2.Args[0]
57749 if !(is32BitInt(t)) {
57750 break
57751 }
57752 v.reset(OpAMD64CMOVLGTF)
57753 v.AddArg(y)
57754 v.AddArg(x)
57755 v.AddArg(cond)
57756 return true
57757 }
57758
57759
57760
57761 for {
57762 t := v.Type
57763 _ = v.Args[2]
57764 x := v.Args[0]
57765 y := v.Args[1]
57766 v_2 := v.Args[2]
57767 if v_2.Op != OpAMD64SETGEF {
57768 break
57769 }
57770 cond := v_2.Args[0]
57771 if !(is32BitInt(t)) {
57772 break
57773 }
57774 v.reset(OpAMD64CMOVLGEF)
57775 v.AddArg(y)
57776 v.AddArg(x)
57777 v.AddArg(cond)
57778 return true
57779 }
57780
57781
57782
57783 for {
57784 t := v.Type
57785 _ = v.Args[2]
57786 x := v.Args[0]
57787 y := v.Args[1]
57788 v_2 := v.Args[2]
57789 if v_2.Op != OpAMD64SETEQ {
57790 break
57791 }
57792 cond := v_2.Args[0]
57793 if !(is16BitInt(t)) {
57794 break
57795 }
57796 v.reset(OpAMD64CMOVWEQ)
57797 v.AddArg(y)
57798 v.AddArg(x)
57799 v.AddArg(cond)
57800 return true
57801 }
57802
57803
57804
57805 for {
57806 t := v.Type
57807 _ = v.Args[2]
57808 x := v.Args[0]
57809 y := v.Args[1]
57810 v_2 := v.Args[2]
57811 if v_2.Op != OpAMD64SETNE {
57812 break
57813 }
57814 cond := v_2.Args[0]
57815 if !(is16BitInt(t)) {
57816 break
57817 }
57818 v.reset(OpAMD64CMOVWNE)
57819 v.AddArg(y)
57820 v.AddArg(x)
57821 v.AddArg(cond)
57822 return true
57823 }
57824 return false
57825 }
57826 func rewriteValueAMD64_OpCondSelect_30(v *Value) bool {
57827
57828
57829
57830 for {
57831 t := v.Type
57832 _ = v.Args[2]
57833 x := v.Args[0]
57834 y := v.Args[1]
57835 v_2 := v.Args[2]
57836 if v_2.Op != OpAMD64SETL {
57837 break
57838 }
57839 cond := v_2.Args[0]
57840 if !(is16BitInt(t)) {
57841 break
57842 }
57843 v.reset(OpAMD64CMOVWLT)
57844 v.AddArg(y)
57845 v.AddArg(x)
57846 v.AddArg(cond)
57847 return true
57848 }
57849
57850
57851
57852 for {
57853 t := v.Type
57854 _ = v.Args[2]
57855 x := v.Args[0]
57856 y := v.Args[1]
57857 v_2 := v.Args[2]
57858 if v_2.Op != OpAMD64SETG {
57859 break
57860 }
57861 cond := v_2.Args[0]
57862 if !(is16BitInt(t)) {
57863 break
57864 }
57865 v.reset(OpAMD64CMOVWGT)
57866 v.AddArg(y)
57867 v.AddArg(x)
57868 v.AddArg(cond)
57869 return true
57870 }
57871
57872
57873
57874 for {
57875 t := v.Type
57876 _ = v.Args[2]
57877 x := v.Args[0]
57878 y := v.Args[1]
57879 v_2 := v.Args[2]
57880 if v_2.Op != OpAMD64SETLE {
57881 break
57882 }
57883 cond := v_2.Args[0]
57884 if !(is16BitInt(t)) {
57885 break
57886 }
57887 v.reset(OpAMD64CMOVWLE)
57888 v.AddArg(y)
57889 v.AddArg(x)
57890 v.AddArg(cond)
57891 return true
57892 }
57893
57894
57895
57896 for {
57897 t := v.Type
57898 _ = v.Args[2]
57899 x := v.Args[0]
57900 y := v.Args[1]
57901 v_2 := v.Args[2]
57902 if v_2.Op != OpAMD64SETGE {
57903 break
57904 }
57905 cond := v_2.Args[0]
57906 if !(is16BitInt(t)) {
57907 break
57908 }
57909 v.reset(OpAMD64CMOVWGE)
57910 v.AddArg(y)
57911 v.AddArg(x)
57912 v.AddArg(cond)
57913 return true
57914 }
57915
57916
57917
57918 for {
57919 t := v.Type
57920 _ = v.Args[2]
57921 x := v.Args[0]
57922 y := v.Args[1]
57923 v_2 := v.Args[2]
57924 if v_2.Op != OpAMD64SETA {
57925 break
57926 }
57927 cond := v_2.Args[0]
57928 if !(is16BitInt(t)) {
57929 break
57930 }
57931 v.reset(OpAMD64CMOVWHI)
57932 v.AddArg(y)
57933 v.AddArg(x)
57934 v.AddArg(cond)
57935 return true
57936 }
57937
57938
57939
57940 for {
57941 t := v.Type
57942 _ = v.Args[2]
57943 x := v.Args[0]
57944 y := v.Args[1]
57945 v_2 := v.Args[2]
57946 if v_2.Op != OpAMD64SETB {
57947 break
57948 }
57949 cond := v_2.Args[0]
57950 if !(is16BitInt(t)) {
57951 break
57952 }
57953 v.reset(OpAMD64CMOVWCS)
57954 v.AddArg(y)
57955 v.AddArg(x)
57956 v.AddArg(cond)
57957 return true
57958 }
57959
57960
57961
57962 for {
57963 t := v.Type
57964 _ = v.Args[2]
57965 x := v.Args[0]
57966 y := v.Args[1]
57967 v_2 := v.Args[2]
57968 if v_2.Op != OpAMD64SETAE {
57969 break
57970 }
57971 cond := v_2.Args[0]
57972 if !(is16BitInt(t)) {
57973 break
57974 }
57975 v.reset(OpAMD64CMOVWCC)
57976 v.AddArg(y)
57977 v.AddArg(x)
57978 v.AddArg(cond)
57979 return true
57980 }
57981
57982
57983
57984 for {
57985 t := v.Type
57986 _ = v.Args[2]
57987 x := v.Args[0]
57988 y := v.Args[1]
57989 v_2 := v.Args[2]
57990 if v_2.Op != OpAMD64SETBE {
57991 break
57992 }
57993 cond := v_2.Args[0]
57994 if !(is16BitInt(t)) {
57995 break
57996 }
57997 v.reset(OpAMD64CMOVWLS)
57998 v.AddArg(y)
57999 v.AddArg(x)
58000 v.AddArg(cond)
58001 return true
58002 }
58003
58004
58005
58006 for {
58007 t := v.Type
58008 _ = v.Args[2]
58009 x := v.Args[0]
58010 y := v.Args[1]
58011 v_2 := v.Args[2]
58012 if v_2.Op != OpAMD64SETEQF {
58013 break
58014 }
58015 cond := v_2.Args[0]
58016 if !(is16BitInt(t)) {
58017 break
58018 }
58019 v.reset(OpAMD64CMOVWEQF)
58020 v.AddArg(y)
58021 v.AddArg(x)
58022 v.AddArg(cond)
58023 return true
58024 }
58025
58026
58027
58028 for {
58029 t := v.Type
58030 _ = v.Args[2]
58031 x := v.Args[0]
58032 y := v.Args[1]
58033 v_2 := v.Args[2]
58034 if v_2.Op != OpAMD64SETNEF {
58035 break
58036 }
58037 cond := v_2.Args[0]
58038 if !(is16BitInt(t)) {
58039 break
58040 }
58041 v.reset(OpAMD64CMOVWNEF)
58042 v.AddArg(y)
58043 v.AddArg(x)
58044 v.AddArg(cond)
58045 return true
58046 }
58047 return false
58048 }
58049 func rewriteValueAMD64_OpCondSelect_40(v *Value) bool {
58050 b := v.Block
58051 typ := &b.Func.Config.Types
58052
58053
58054
58055 for {
58056 t := v.Type
58057 _ = v.Args[2]
58058 x := v.Args[0]
58059 y := v.Args[1]
58060 v_2 := v.Args[2]
58061 if v_2.Op != OpAMD64SETGF {
58062 break
58063 }
58064 cond := v_2.Args[0]
58065 if !(is16BitInt(t)) {
58066 break
58067 }
58068 v.reset(OpAMD64CMOVWGTF)
58069 v.AddArg(y)
58070 v.AddArg(x)
58071 v.AddArg(cond)
58072 return true
58073 }
58074
58075
58076
58077 for {
58078 t := v.Type
58079 _ = v.Args[2]
58080 x := v.Args[0]
58081 y := v.Args[1]
58082 v_2 := v.Args[2]
58083 if v_2.Op != OpAMD64SETGEF {
58084 break
58085 }
58086 cond := v_2.Args[0]
58087 if !(is16BitInt(t)) {
58088 break
58089 }
58090 v.reset(OpAMD64CMOVWGEF)
58091 v.AddArg(y)
58092 v.AddArg(x)
58093 v.AddArg(cond)
58094 return true
58095 }
58096
58097
58098
58099 for {
58100 t := v.Type
58101 check := v.Args[2]
58102 x := v.Args[0]
58103 y := v.Args[1]
58104 if !(!check.Type.IsFlags() && check.Type.Size() == 1) {
58105 break
58106 }
58107 v.reset(OpCondSelect)
58108 v.Type = t
58109 v.AddArg(x)
58110 v.AddArg(y)
58111 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64)
58112 v0.AddArg(check)
58113 v.AddArg(v0)
58114 return true
58115 }
58116
58117
58118
58119 for {
58120 t := v.Type
58121 check := v.Args[2]
58122 x := v.Args[0]
58123 y := v.Args[1]
58124 if !(!check.Type.IsFlags() && check.Type.Size() == 2) {
58125 break
58126 }
58127 v.reset(OpCondSelect)
58128 v.Type = t
58129 v.AddArg(x)
58130 v.AddArg(y)
58131 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64)
58132 v0.AddArg(check)
58133 v.AddArg(v0)
58134 return true
58135 }
58136
58137
58138
58139 for {
58140 t := v.Type
58141 check := v.Args[2]
58142 x := v.Args[0]
58143 y := v.Args[1]
58144 if !(!check.Type.IsFlags() && check.Type.Size() == 4) {
58145 break
58146 }
58147 v.reset(OpCondSelect)
58148 v.Type = t
58149 v.AddArg(x)
58150 v.AddArg(y)
58151 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
58152 v0.AddArg(check)
58153 v.AddArg(v0)
58154 return true
58155 }
58156
58157
58158
58159 for {
58160 t := v.Type
58161 check := v.Args[2]
58162 x := v.Args[0]
58163 y := v.Args[1]
58164 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) {
58165 break
58166 }
58167 v.reset(OpAMD64CMOVQNE)
58168 v.AddArg(y)
58169 v.AddArg(x)
58170 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
58171 v0.AuxInt = 0
58172 v0.AddArg(check)
58173 v.AddArg(v0)
58174 return true
58175 }
58176
58177
58178
58179 for {
58180 t := v.Type
58181 check := v.Args[2]
58182 x := v.Args[0]
58183 y := v.Args[1]
58184 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) {
58185 break
58186 }
58187 v.reset(OpAMD64CMOVLNE)
58188 v.AddArg(y)
58189 v.AddArg(x)
58190 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
58191 v0.AuxInt = 0
58192 v0.AddArg(check)
58193 v.AddArg(v0)
58194 return true
58195 }
58196
58197
58198
58199 for {
58200 t := v.Type
58201 check := v.Args[2]
58202 x := v.Args[0]
58203 y := v.Args[1]
58204 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) {
58205 break
58206 }
58207 v.reset(OpAMD64CMOVWNE)
58208 v.AddArg(y)
58209 v.AddArg(x)
58210 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
58211 v0.AuxInt = 0
58212 v0.AddArg(check)
58213 v.AddArg(v0)
58214 return true
58215 }
58216 return false
58217 }
58218 func rewriteValueAMD64_OpConst16_0(v *Value) bool {
58219
58220
58221
58222 for {
58223 val := v.AuxInt
58224 v.reset(OpAMD64MOVLconst)
58225 v.AuxInt = val
58226 return true
58227 }
58228 }
58229 func rewriteValueAMD64_OpConst32_0(v *Value) bool {
58230
58231
58232
58233 for {
58234 val := v.AuxInt
58235 v.reset(OpAMD64MOVLconst)
58236 v.AuxInt = val
58237 return true
58238 }
58239 }
58240 func rewriteValueAMD64_OpConst32F_0(v *Value) bool {
58241
58242
58243
58244 for {
58245 val := v.AuxInt
58246 v.reset(OpAMD64MOVSSconst)
58247 v.AuxInt = val
58248 return true
58249 }
58250 }
58251 func rewriteValueAMD64_OpConst64_0(v *Value) bool {
58252
58253
58254
58255 for {
58256 val := v.AuxInt
58257 v.reset(OpAMD64MOVQconst)
58258 v.AuxInt = val
58259 return true
58260 }
58261 }
58262 func rewriteValueAMD64_OpConst64F_0(v *Value) bool {
58263
58264
58265
58266 for {
58267 val := v.AuxInt
58268 v.reset(OpAMD64MOVSDconst)
58269 v.AuxInt = val
58270 return true
58271 }
58272 }
58273 func rewriteValueAMD64_OpConst8_0(v *Value) bool {
58274
58275
58276
58277 for {
58278 val := v.AuxInt
58279 v.reset(OpAMD64MOVLconst)
58280 v.AuxInt = val
58281 return true
58282 }
58283 }
58284 func rewriteValueAMD64_OpConstBool_0(v *Value) bool {
58285
58286
58287
58288 for {
58289 b := v.AuxInt
58290 v.reset(OpAMD64MOVLconst)
58291 v.AuxInt = b
58292 return true
58293 }
58294 }
58295 func rewriteValueAMD64_OpConstNil_0(v *Value) bool {
58296 b := v.Block
58297 config := b.Func.Config
58298
58299
58300
58301 for {
58302 if !(config.PtrSize == 8) {
58303 break
58304 }
58305 v.reset(OpAMD64MOVQconst)
58306 v.AuxInt = 0
58307 return true
58308 }
58309
58310
58311
58312 for {
58313 if !(config.PtrSize == 4) {
58314 break
58315 }
58316 v.reset(OpAMD64MOVLconst)
58317 v.AuxInt = 0
58318 return true
58319 }
58320 return false
58321 }
58322 func rewriteValueAMD64_OpCtz16_0(v *Value) bool {
58323 b := v.Block
58324 typ := &b.Func.Config.Types
58325
58326
58327
58328 for {
58329 x := v.Args[0]
58330 v.reset(OpAMD64BSFL)
58331 v0 := b.NewValue0(v.Pos, OpAMD64BTSLconst, typ.UInt32)
58332 v0.AuxInt = 16
58333 v0.AddArg(x)
58334 v.AddArg(v0)
58335 return true
58336 }
58337 }
58338 func rewriteValueAMD64_OpCtz16NonZero_0(v *Value) bool {
58339
58340
58341
58342 for {
58343 x := v.Args[0]
58344 v.reset(OpAMD64BSFL)
58345 v.AddArg(x)
58346 return true
58347 }
58348 }
58349 func rewriteValueAMD64_OpCtz32_0(v *Value) bool {
58350 b := v.Block
58351 typ := &b.Func.Config.Types
58352
58353
58354
58355 for {
58356 x := v.Args[0]
58357 v.reset(OpSelect0)
58358 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
58359 v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64)
58360 v1.AuxInt = 32
58361 v1.AddArg(x)
58362 v0.AddArg(v1)
58363 v.AddArg(v0)
58364 return true
58365 }
58366 }
58367 func rewriteValueAMD64_OpCtz32NonZero_0(v *Value) bool {
58368
58369
58370
58371 for {
58372 x := v.Args[0]
58373 v.reset(OpAMD64BSFL)
58374 v.AddArg(x)
58375 return true
58376 }
58377 }
58378 func rewriteValueAMD64_OpCtz64_0(v *Value) bool {
58379 b := v.Block
58380 typ := &b.Func.Config.Types
58381
58382
58383
58384 for {
58385 t := v.Type
58386 x := v.Args[0]
58387 v.reset(OpAMD64CMOVQEQ)
58388 v0 := b.NewValue0(v.Pos, OpSelect0, t)
58389 v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
58390 v1.AddArg(x)
58391 v0.AddArg(v1)
58392 v.AddArg(v0)
58393 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
58394 v2.AuxInt = 64
58395 v.AddArg(v2)
58396 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
58397 v4 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
58398 v4.AddArg(x)
58399 v3.AddArg(v4)
58400 v.AddArg(v3)
58401 return true
58402 }
58403 }
58404 func rewriteValueAMD64_OpCtz64NonZero_0(v *Value) bool {
58405 b := v.Block
58406 typ := &b.Func.Config.Types
58407
58408
58409
58410 for {
58411 x := v.Args[0]
58412 v.reset(OpSelect0)
58413 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
58414 v0.AddArg(x)
58415 v.AddArg(v0)
58416 return true
58417 }
58418 }
58419 func rewriteValueAMD64_OpCtz8_0(v *Value) bool {
58420 b := v.Block
58421 typ := &b.Func.Config.Types
58422
58423
58424
58425 for {
58426 x := v.Args[0]
58427 v.reset(OpAMD64BSFL)
58428 v0 := b.NewValue0(v.Pos, OpAMD64BTSLconst, typ.UInt32)
58429 v0.AuxInt = 8
58430 v0.AddArg(x)
58431 v.AddArg(v0)
58432 return true
58433 }
58434 }
58435 func rewriteValueAMD64_OpCtz8NonZero_0(v *Value) bool {
58436
58437
58438
58439 for {
58440 x := v.Args[0]
58441 v.reset(OpAMD64BSFL)
58442 v.AddArg(x)
58443 return true
58444 }
58445 }
58446 func rewriteValueAMD64_OpCvt32Fto32_0(v *Value) bool {
58447
58448
58449
58450 for {
58451 x := v.Args[0]
58452 v.reset(OpAMD64CVTTSS2SL)
58453 v.AddArg(x)
58454 return true
58455 }
58456 }
58457 func rewriteValueAMD64_OpCvt32Fto64_0(v *Value) bool {
58458
58459
58460
58461 for {
58462 x := v.Args[0]
58463 v.reset(OpAMD64CVTTSS2SQ)
58464 v.AddArg(x)
58465 return true
58466 }
58467 }
58468 func rewriteValueAMD64_OpCvt32Fto64F_0(v *Value) bool {
58469
58470
58471
58472 for {
58473 x := v.Args[0]
58474 v.reset(OpAMD64CVTSS2SD)
58475 v.AddArg(x)
58476 return true
58477 }
58478 }
58479 func rewriteValueAMD64_OpCvt32to32F_0(v *Value) bool {
58480
58481
58482
58483 for {
58484 x := v.Args[0]
58485 v.reset(OpAMD64CVTSL2SS)
58486 v.AddArg(x)
58487 return true
58488 }
58489 }
58490 func rewriteValueAMD64_OpCvt32to64F_0(v *Value) bool {
58491
58492
58493
58494 for {
58495 x := v.Args[0]
58496 v.reset(OpAMD64CVTSL2SD)
58497 v.AddArg(x)
58498 return true
58499 }
58500 }
58501 func rewriteValueAMD64_OpCvt64Fto32_0(v *Value) bool {
58502
58503
58504
58505 for {
58506 x := v.Args[0]
58507 v.reset(OpAMD64CVTTSD2SL)
58508 v.AddArg(x)
58509 return true
58510 }
58511 }
58512 func rewriteValueAMD64_OpCvt64Fto32F_0(v *Value) bool {
58513
58514
58515
58516 for {
58517 x := v.Args[0]
58518 v.reset(OpAMD64CVTSD2SS)
58519 v.AddArg(x)
58520 return true
58521 }
58522 }
58523 func rewriteValueAMD64_OpCvt64Fto64_0(v *Value) bool {
58524
58525
58526
58527 for {
58528 x := v.Args[0]
58529 v.reset(OpAMD64CVTTSD2SQ)
58530 v.AddArg(x)
58531 return true
58532 }
58533 }
58534 func rewriteValueAMD64_OpCvt64to32F_0(v *Value) bool {
58535
58536
58537
58538 for {
58539 x := v.Args[0]
58540 v.reset(OpAMD64CVTSQ2SS)
58541 v.AddArg(x)
58542 return true
58543 }
58544 }
58545 func rewriteValueAMD64_OpCvt64to64F_0(v *Value) bool {
58546
58547
58548
58549 for {
58550 x := v.Args[0]
58551 v.reset(OpAMD64CVTSQ2SD)
58552 v.AddArg(x)
58553 return true
58554 }
58555 }
58556 func rewriteValueAMD64_OpDiv128u_0(v *Value) bool {
58557
58558
58559
58560 for {
58561 y := v.Args[2]
58562 xhi := v.Args[0]
58563 xlo := v.Args[1]
58564 v.reset(OpAMD64DIVQU2)
58565 v.AddArg(xhi)
58566 v.AddArg(xlo)
58567 v.AddArg(y)
58568 return true
58569 }
58570 }
58571 func rewriteValueAMD64_OpDiv16_0(v *Value) bool {
58572 b := v.Block
58573 typ := &b.Func.Config.Types
58574
58575
58576
58577 for {
58578 a := v.AuxInt
58579 y := v.Args[1]
58580 x := v.Args[0]
58581 v.reset(OpSelect0)
58582 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
58583 v0.AuxInt = a
58584 v0.AddArg(x)
58585 v0.AddArg(y)
58586 v.AddArg(v0)
58587 return true
58588 }
58589 }
58590 func rewriteValueAMD64_OpDiv16u_0(v *Value) bool {
58591 b := v.Block
58592 typ := &b.Func.Config.Types
58593
58594
58595
58596 for {
58597 y := v.Args[1]
58598 x := v.Args[0]
58599 v.reset(OpSelect0)
58600 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
58601 v0.AddArg(x)
58602 v0.AddArg(y)
58603 v.AddArg(v0)
58604 return true
58605 }
58606 }
58607 func rewriteValueAMD64_OpDiv32_0(v *Value) bool {
58608 b := v.Block
58609 typ := &b.Func.Config.Types
58610
58611
58612
58613 for {
58614 a := v.AuxInt
58615 y := v.Args[1]
58616 x := v.Args[0]
58617 v.reset(OpSelect0)
58618 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
58619 v0.AuxInt = a
58620 v0.AddArg(x)
58621 v0.AddArg(y)
58622 v.AddArg(v0)
58623 return true
58624 }
58625 }
58626 func rewriteValueAMD64_OpDiv32F_0(v *Value) bool {
58627
58628
58629
58630 for {
58631 y := v.Args[1]
58632 x := v.Args[0]
58633 v.reset(OpAMD64DIVSS)
58634 v.AddArg(x)
58635 v.AddArg(y)
58636 return true
58637 }
58638 }
58639 func rewriteValueAMD64_OpDiv32u_0(v *Value) bool {
58640 b := v.Block
58641 typ := &b.Func.Config.Types
58642
58643
58644
58645 for {
58646 y := v.Args[1]
58647 x := v.Args[0]
58648 v.reset(OpSelect0)
58649 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32))
58650 v0.AddArg(x)
58651 v0.AddArg(y)
58652 v.AddArg(v0)
58653 return true
58654 }
58655 }
58656 func rewriteValueAMD64_OpDiv64_0(v *Value) bool {
58657 b := v.Block
58658 typ := &b.Func.Config.Types
58659
58660
58661
58662 for {
58663 a := v.AuxInt
58664 y := v.Args[1]
58665 x := v.Args[0]
58666 v.reset(OpSelect0)
58667 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
58668 v0.AuxInt = a
58669 v0.AddArg(x)
58670 v0.AddArg(y)
58671 v.AddArg(v0)
58672 return true
58673 }
58674 }
58675 func rewriteValueAMD64_OpDiv64F_0(v *Value) bool {
58676
58677
58678
58679 for {
58680 y := v.Args[1]
58681 x := v.Args[0]
58682 v.reset(OpAMD64DIVSD)
58683 v.AddArg(x)
58684 v.AddArg(y)
58685 return true
58686 }
58687 }
58688 func rewriteValueAMD64_OpDiv64u_0(v *Value) bool {
58689 b := v.Block
58690 typ := &b.Func.Config.Types
58691
58692
58693
58694 for {
58695 y := v.Args[1]
58696 x := v.Args[0]
58697 v.reset(OpSelect0)
58698 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64))
58699 v0.AddArg(x)
58700 v0.AddArg(y)
58701 v.AddArg(v0)
58702 return true
58703 }
58704 }
58705 func rewriteValueAMD64_OpDiv8_0(v *Value) bool {
58706 b := v.Block
58707 typ := &b.Func.Config.Types
58708
58709
58710
58711 for {
58712 y := v.Args[1]
58713 x := v.Args[0]
58714 v.reset(OpSelect0)
58715 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
58716 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
58717 v1.AddArg(x)
58718 v0.AddArg(v1)
58719 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
58720 v2.AddArg(y)
58721 v0.AddArg(v2)
58722 v.AddArg(v0)
58723 return true
58724 }
58725 }
58726 func rewriteValueAMD64_OpDiv8u_0(v *Value) bool {
58727 b := v.Block
58728 typ := &b.Func.Config.Types
58729
58730
58731
58732 for {
58733 y := v.Args[1]
58734 x := v.Args[0]
58735 v.reset(OpSelect0)
58736 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
58737 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
58738 v1.AddArg(x)
58739 v0.AddArg(v1)
58740 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
58741 v2.AddArg(y)
58742 v0.AddArg(v2)
58743 v.AddArg(v0)
58744 return true
58745 }
58746 }
58747 func rewriteValueAMD64_OpEq16_0(v *Value) bool {
58748 b := v.Block
58749
58750
58751
58752 for {
58753 y := v.Args[1]
58754 x := v.Args[0]
58755 v.reset(OpAMD64SETEQ)
58756 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
58757 v0.AddArg(x)
58758 v0.AddArg(y)
58759 v.AddArg(v0)
58760 return true
58761 }
58762 }
58763 func rewriteValueAMD64_OpEq32_0(v *Value) bool {
58764 b := v.Block
58765
58766
58767
58768 for {
58769 y := v.Args[1]
58770 x := v.Args[0]
58771 v.reset(OpAMD64SETEQ)
58772 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
58773 v0.AddArg(x)
58774 v0.AddArg(y)
58775 v.AddArg(v0)
58776 return true
58777 }
58778 }
58779 func rewriteValueAMD64_OpEq32F_0(v *Value) bool {
58780 b := v.Block
58781
58782
58783
58784 for {
58785 y := v.Args[1]
58786 x := v.Args[0]
58787 v.reset(OpAMD64SETEQF)
58788 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
58789 v0.AddArg(x)
58790 v0.AddArg(y)
58791 v.AddArg(v0)
58792 return true
58793 }
58794 }
58795 func rewriteValueAMD64_OpEq64_0(v *Value) bool {
58796 b := v.Block
58797
58798
58799
58800 for {
58801 y := v.Args[1]
58802 x := v.Args[0]
58803 v.reset(OpAMD64SETEQ)
58804 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
58805 v0.AddArg(x)
58806 v0.AddArg(y)
58807 v.AddArg(v0)
58808 return true
58809 }
58810 }
58811 func rewriteValueAMD64_OpEq64F_0(v *Value) bool {
58812 b := v.Block
58813
58814
58815
58816 for {
58817 y := v.Args[1]
58818 x := v.Args[0]
58819 v.reset(OpAMD64SETEQF)
58820 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
58821 v0.AddArg(x)
58822 v0.AddArg(y)
58823 v.AddArg(v0)
58824 return true
58825 }
58826 }
58827 func rewriteValueAMD64_OpEq8_0(v *Value) bool {
58828 b := v.Block
58829
58830
58831
58832 for {
58833 y := v.Args[1]
58834 x := v.Args[0]
58835 v.reset(OpAMD64SETEQ)
58836 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
58837 v0.AddArg(x)
58838 v0.AddArg(y)
58839 v.AddArg(v0)
58840 return true
58841 }
58842 }
58843 func rewriteValueAMD64_OpEqB_0(v *Value) bool {
58844 b := v.Block
58845
58846
58847
58848 for {
58849 y := v.Args[1]
58850 x := v.Args[0]
58851 v.reset(OpAMD64SETEQ)
58852 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
58853 v0.AddArg(x)
58854 v0.AddArg(y)
58855 v.AddArg(v0)
58856 return true
58857 }
58858 }
58859 func rewriteValueAMD64_OpEqPtr_0(v *Value) bool {
58860 b := v.Block
58861 config := b.Func.Config
58862
58863
58864
58865 for {
58866 y := v.Args[1]
58867 x := v.Args[0]
58868 if !(config.PtrSize == 8) {
58869 break
58870 }
58871 v.reset(OpAMD64SETEQ)
58872 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
58873 v0.AddArg(x)
58874 v0.AddArg(y)
58875 v.AddArg(v0)
58876 return true
58877 }
58878
58879
58880
58881 for {
58882 y := v.Args[1]
58883 x := v.Args[0]
58884 if !(config.PtrSize == 4) {
58885 break
58886 }
58887 v.reset(OpAMD64SETEQ)
58888 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
58889 v0.AddArg(x)
58890 v0.AddArg(y)
58891 v.AddArg(v0)
58892 return true
58893 }
58894 return false
58895 }
58896 func rewriteValueAMD64_OpFloor_0(v *Value) bool {
58897
58898
58899
58900 for {
58901 x := v.Args[0]
58902 v.reset(OpAMD64ROUNDSD)
58903 v.AuxInt = 1
58904 v.AddArg(x)
58905 return true
58906 }
58907 }
58908 func rewriteValueAMD64_OpGeq16_0(v *Value) bool {
58909 b := v.Block
58910
58911
58912
58913 for {
58914 y := v.Args[1]
58915 x := v.Args[0]
58916 v.reset(OpAMD64SETGE)
58917 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
58918 v0.AddArg(x)
58919 v0.AddArg(y)
58920 v.AddArg(v0)
58921 return true
58922 }
58923 }
58924 func rewriteValueAMD64_OpGeq16U_0(v *Value) bool {
58925 b := v.Block
58926
58927
58928
58929 for {
58930 y := v.Args[1]
58931 x := v.Args[0]
58932 v.reset(OpAMD64SETAE)
58933 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
58934 v0.AddArg(x)
58935 v0.AddArg(y)
58936 v.AddArg(v0)
58937 return true
58938 }
58939 }
58940 func rewriteValueAMD64_OpGeq32_0(v *Value) bool {
58941 b := v.Block
58942
58943
58944
58945 for {
58946 y := v.Args[1]
58947 x := v.Args[0]
58948 v.reset(OpAMD64SETGE)
58949 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
58950 v0.AddArg(x)
58951 v0.AddArg(y)
58952 v.AddArg(v0)
58953 return true
58954 }
58955 }
58956 func rewriteValueAMD64_OpGeq32F_0(v *Value) bool {
58957 b := v.Block
58958
58959
58960
58961 for {
58962 y := v.Args[1]
58963 x := v.Args[0]
58964 v.reset(OpAMD64SETGEF)
58965 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
58966 v0.AddArg(x)
58967 v0.AddArg(y)
58968 v.AddArg(v0)
58969 return true
58970 }
58971 }
58972 func rewriteValueAMD64_OpGeq32U_0(v *Value) bool {
58973 b := v.Block
58974
58975
58976
58977 for {
58978 y := v.Args[1]
58979 x := v.Args[0]
58980 v.reset(OpAMD64SETAE)
58981 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
58982 v0.AddArg(x)
58983 v0.AddArg(y)
58984 v.AddArg(v0)
58985 return true
58986 }
58987 }
58988 func rewriteValueAMD64_OpGeq64_0(v *Value) bool {
58989 b := v.Block
58990
58991
58992
58993 for {
58994 y := v.Args[1]
58995 x := v.Args[0]
58996 v.reset(OpAMD64SETGE)
58997 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
58998 v0.AddArg(x)
58999 v0.AddArg(y)
59000 v.AddArg(v0)
59001 return true
59002 }
59003 }
59004 func rewriteValueAMD64_OpGeq64F_0(v *Value) bool {
59005 b := v.Block
59006
59007
59008
59009 for {
59010 y := v.Args[1]
59011 x := v.Args[0]
59012 v.reset(OpAMD64SETGEF)
59013 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
59014 v0.AddArg(x)
59015 v0.AddArg(y)
59016 v.AddArg(v0)
59017 return true
59018 }
59019 }
59020 func rewriteValueAMD64_OpGeq64U_0(v *Value) bool {
59021 b := v.Block
59022
59023
59024
59025 for {
59026 y := v.Args[1]
59027 x := v.Args[0]
59028 v.reset(OpAMD64SETAE)
59029 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
59030 v0.AddArg(x)
59031 v0.AddArg(y)
59032 v.AddArg(v0)
59033 return true
59034 }
59035 }
59036 func rewriteValueAMD64_OpGeq8_0(v *Value) bool {
59037 b := v.Block
59038
59039
59040
59041 for {
59042 y := v.Args[1]
59043 x := v.Args[0]
59044 v.reset(OpAMD64SETGE)
59045 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
59046 v0.AddArg(x)
59047 v0.AddArg(y)
59048 v.AddArg(v0)
59049 return true
59050 }
59051 }
59052 func rewriteValueAMD64_OpGeq8U_0(v *Value) bool {
59053 b := v.Block
59054
59055
59056
59057 for {
59058 y := v.Args[1]
59059 x := v.Args[0]
59060 v.reset(OpAMD64SETAE)
59061 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
59062 v0.AddArg(x)
59063 v0.AddArg(y)
59064 v.AddArg(v0)
59065 return true
59066 }
59067 }
59068 func rewriteValueAMD64_OpGetCallerPC_0(v *Value) bool {
59069
59070
59071
59072 for {
59073 v.reset(OpAMD64LoweredGetCallerPC)
59074 return true
59075 }
59076 }
59077 func rewriteValueAMD64_OpGetCallerSP_0(v *Value) bool {
59078
59079
59080
59081 for {
59082 v.reset(OpAMD64LoweredGetCallerSP)
59083 return true
59084 }
59085 }
59086 func rewriteValueAMD64_OpGetClosurePtr_0(v *Value) bool {
59087
59088
59089
59090 for {
59091 v.reset(OpAMD64LoweredGetClosurePtr)
59092 return true
59093 }
59094 }
59095 func rewriteValueAMD64_OpGetG_0(v *Value) bool {
59096
59097
59098
59099 for {
59100 mem := v.Args[0]
59101 v.reset(OpAMD64LoweredGetG)
59102 v.AddArg(mem)
59103 return true
59104 }
59105 }
59106 func rewriteValueAMD64_OpGreater16_0(v *Value) bool {
59107 b := v.Block
59108
59109
59110
59111 for {
59112 y := v.Args[1]
59113 x := v.Args[0]
59114 v.reset(OpAMD64SETG)
59115 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
59116 v0.AddArg(x)
59117 v0.AddArg(y)
59118 v.AddArg(v0)
59119 return true
59120 }
59121 }
59122 func rewriteValueAMD64_OpGreater16U_0(v *Value) bool {
59123 b := v.Block
59124
59125
59126
59127 for {
59128 y := v.Args[1]
59129 x := v.Args[0]
59130 v.reset(OpAMD64SETA)
59131 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
59132 v0.AddArg(x)
59133 v0.AddArg(y)
59134 v.AddArg(v0)
59135 return true
59136 }
59137 }
59138 func rewriteValueAMD64_OpGreater32_0(v *Value) bool {
59139 b := v.Block
59140
59141
59142
59143 for {
59144 y := v.Args[1]
59145 x := v.Args[0]
59146 v.reset(OpAMD64SETG)
59147 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
59148 v0.AddArg(x)
59149 v0.AddArg(y)
59150 v.AddArg(v0)
59151 return true
59152 }
59153 }
59154 func rewriteValueAMD64_OpGreater32F_0(v *Value) bool {
59155 b := v.Block
59156
59157
59158
59159 for {
59160 y := v.Args[1]
59161 x := v.Args[0]
59162 v.reset(OpAMD64SETGF)
59163 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
59164 v0.AddArg(x)
59165 v0.AddArg(y)
59166 v.AddArg(v0)
59167 return true
59168 }
59169 }
59170 func rewriteValueAMD64_OpGreater32U_0(v *Value) bool {
59171 b := v.Block
59172
59173
59174
59175 for {
59176 y := v.Args[1]
59177 x := v.Args[0]
59178 v.reset(OpAMD64SETA)
59179 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
59180 v0.AddArg(x)
59181 v0.AddArg(y)
59182 v.AddArg(v0)
59183 return true
59184 }
59185 }
59186 func rewriteValueAMD64_OpGreater64_0(v *Value) bool {
59187 b := v.Block
59188
59189
59190
59191 for {
59192 y := v.Args[1]
59193 x := v.Args[0]
59194 v.reset(OpAMD64SETG)
59195 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
59196 v0.AddArg(x)
59197 v0.AddArg(y)
59198 v.AddArg(v0)
59199 return true
59200 }
59201 }
59202 func rewriteValueAMD64_OpGreater64F_0(v *Value) bool {
59203 b := v.Block
59204
59205
59206
59207 for {
59208 y := v.Args[1]
59209 x := v.Args[0]
59210 v.reset(OpAMD64SETGF)
59211 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
59212 v0.AddArg(x)
59213 v0.AddArg(y)
59214 v.AddArg(v0)
59215 return true
59216 }
59217 }
59218 func rewriteValueAMD64_OpGreater64U_0(v *Value) bool {
59219 b := v.Block
59220
59221
59222
59223 for {
59224 y := v.Args[1]
59225 x := v.Args[0]
59226 v.reset(OpAMD64SETA)
59227 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
59228 v0.AddArg(x)
59229 v0.AddArg(y)
59230 v.AddArg(v0)
59231 return true
59232 }
59233 }
59234 func rewriteValueAMD64_OpGreater8_0(v *Value) bool {
59235 b := v.Block
59236
59237
59238
59239 for {
59240 y := v.Args[1]
59241 x := v.Args[0]
59242 v.reset(OpAMD64SETG)
59243 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
59244 v0.AddArg(x)
59245 v0.AddArg(y)
59246 v.AddArg(v0)
59247 return true
59248 }
59249 }
59250 func rewriteValueAMD64_OpGreater8U_0(v *Value) bool {
59251 b := v.Block
59252
59253
59254
59255 for {
59256 y := v.Args[1]
59257 x := v.Args[0]
59258 v.reset(OpAMD64SETA)
59259 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
59260 v0.AddArg(x)
59261 v0.AddArg(y)
59262 v.AddArg(v0)
59263 return true
59264 }
59265 }
59266 func rewriteValueAMD64_OpHmul32_0(v *Value) bool {
59267
59268
59269
59270 for {
59271 y := v.Args[1]
59272 x := v.Args[0]
59273 v.reset(OpAMD64HMULL)
59274 v.AddArg(x)
59275 v.AddArg(y)
59276 return true
59277 }
59278 }
59279 func rewriteValueAMD64_OpHmul32u_0(v *Value) bool {
59280
59281
59282
59283 for {
59284 y := v.Args[1]
59285 x := v.Args[0]
59286 v.reset(OpAMD64HMULLU)
59287 v.AddArg(x)
59288 v.AddArg(y)
59289 return true
59290 }
59291 }
59292 func rewriteValueAMD64_OpHmul64_0(v *Value) bool {
59293
59294
59295
59296 for {
59297 y := v.Args[1]
59298 x := v.Args[0]
59299 v.reset(OpAMD64HMULQ)
59300 v.AddArg(x)
59301 v.AddArg(y)
59302 return true
59303 }
59304 }
59305 func rewriteValueAMD64_OpHmul64u_0(v *Value) bool {
59306
59307
59308
59309 for {
59310 y := v.Args[1]
59311 x := v.Args[0]
59312 v.reset(OpAMD64HMULQU)
59313 v.AddArg(x)
59314 v.AddArg(y)
59315 return true
59316 }
59317 }
59318 func rewriteValueAMD64_OpInt64Hi_0(v *Value) bool {
59319
59320
59321
59322 for {
59323 x := v.Args[0]
59324 v.reset(OpAMD64SHRQconst)
59325 v.AuxInt = 32
59326 v.AddArg(x)
59327 return true
59328 }
59329 }
59330 func rewriteValueAMD64_OpInt64Lo_0(v *Value) bool {
59331
59332
59333
59334 for {
59335 x := v.Args[0]
59336 v.reset(OpCopy)
59337 v.Type = x.Type
59338 v.AddArg(x)
59339 return true
59340 }
59341 }
59342 func rewriteValueAMD64_OpInterCall_0(v *Value) bool {
59343
59344
59345
59346 for {
59347 argwid := v.AuxInt
59348 mem := v.Args[1]
59349 entry := v.Args[0]
59350 v.reset(OpAMD64CALLinter)
59351 v.AuxInt = argwid
59352 v.AddArg(entry)
59353 v.AddArg(mem)
59354 return true
59355 }
59356 }
59357 func rewriteValueAMD64_OpIsInBounds_0(v *Value) bool {
59358 b := v.Block
59359 config := b.Func.Config
59360
59361
59362
59363 for {
59364 len := v.Args[1]
59365 idx := v.Args[0]
59366 if !(config.PtrSize == 8) {
59367 break
59368 }
59369 v.reset(OpAMD64SETB)
59370 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
59371 v0.AddArg(idx)
59372 v0.AddArg(len)
59373 v.AddArg(v0)
59374 return true
59375 }
59376
59377
59378
59379 for {
59380 len := v.Args[1]
59381 idx := v.Args[0]
59382 if !(config.PtrSize == 4) {
59383 break
59384 }
59385 v.reset(OpAMD64SETB)
59386 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
59387 v0.AddArg(idx)
59388 v0.AddArg(len)
59389 v.AddArg(v0)
59390 return true
59391 }
59392 return false
59393 }
59394 func rewriteValueAMD64_OpIsNonNil_0(v *Value) bool {
59395 b := v.Block
59396 config := b.Func.Config
59397
59398
59399
59400 for {
59401 p := v.Args[0]
59402 if !(config.PtrSize == 8) {
59403 break
59404 }
59405 v.reset(OpAMD64SETNE)
59406 v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags)
59407 v0.AddArg(p)
59408 v0.AddArg(p)
59409 v.AddArg(v0)
59410 return true
59411 }
59412
59413
59414
59415 for {
59416 p := v.Args[0]
59417 if !(config.PtrSize == 4) {
59418 break
59419 }
59420 v.reset(OpAMD64SETNE)
59421 v0 := b.NewValue0(v.Pos, OpAMD64TESTL, types.TypeFlags)
59422 v0.AddArg(p)
59423 v0.AddArg(p)
59424 v.AddArg(v0)
59425 return true
59426 }
59427 return false
59428 }
59429 func rewriteValueAMD64_OpIsSliceInBounds_0(v *Value) bool {
59430 b := v.Block
59431 config := b.Func.Config
59432
59433
59434
59435 for {
59436 len := v.Args[1]
59437 idx := v.Args[0]
59438 if !(config.PtrSize == 8) {
59439 break
59440 }
59441 v.reset(OpAMD64SETBE)
59442 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
59443 v0.AddArg(idx)
59444 v0.AddArg(len)
59445 v.AddArg(v0)
59446 return true
59447 }
59448
59449
59450
59451 for {
59452 len := v.Args[1]
59453 idx := v.Args[0]
59454 if !(config.PtrSize == 4) {
59455 break
59456 }
59457 v.reset(OpAMD64SETBE)
59458 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
59459 v0.AddArg(idx)
59460 v0.AddArg(len)
59461 v.AddArg(v0)
59462 return true
59463 }
59464 return false
59465 }
59466 func rewriteValueAMD64_OpLeq16_0(v *Value) bool {
59467 b := v.Block
59468
59469
59470
59471 for {
59472 y := v.Args[1]
59473 x := v.Args[0]
59474 v.reset(OpAMD64SETLE)
59475 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
59476 v0.AddArg(x)
59477 v0.AddArg(y)
59478 v.AddArg(v0)
59479 return true
59480 }
59481 }
59482 func rewriteValueAMD64_OpLeq16U_0(v *Value) bool {
59483 b := v.Block
59484
59485
59486
59487 for {
59488 y := v.Args[1]
59489 x := v.Args[0]
59490 v.reset(OpAMD64SETBE)
59491 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
59492 v0.AddArg(x)
59493 v0.AddArg(y)
59494 v.AddArg(v0)
59495 return true
59496 }
59497 }
59498 func rewriteValueAMD64_OpLeq32_0(v *Value) bool {
59499 b := v.Block
59500
59501
59502
59503 for {
59504 y := v.Args[1]
59505 x := v.Args[0]
59506 v.reset(OpAMD64SETLE)
59507 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
59508 v0.AddArg(x)
59509 v0.AddArg(y)
59510 v.AddArg(v0)
59511 return true
59512 }
59513 }
59514 func rewriteValueAMD64_OpLeq32F_0(v *Value) bool {
59515 b := v.Block
59516
59517
59518
59519 for {
59520 y := v.Args[1]
59521 x := v.Args[0]
59522 v.reset(OpAMD64SETGEF)
59523 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
59524 v0.AddArg(y)
59525 v0.AddArg(x)
59526 v.AddArg(v0)
59527 return true
59528 }
59529 }
59530 func rewriteValueAMD64_OpLeq32U_0(v *Value) bool {
59531 b := v.Block
59532
59533
59534
59535 for {
59536 y := v.Args[1]
59537 x := v.Args[0]
59538 v.reset(OpAMD64SETBE)
59539 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
59540 v0.AddArg(x)
59541 v0.AddArg(y)
59542 v.AddArg(v0)
59543 return true
59544 }
59545 }
59546 func rewriteValueAMD64_OpLeq64_0(v *Value) bool {
59547 b := v.Block
59548
59549
59550
59551 for {
59552 y := v.Args[1]
59553 x := v.Args[0]
59554 v.reset(OpAMD64SETLE)
59555 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
59556 v0.AddArg(x)
59557 v0.AddArg(y)
59558 v.AddArg(v0)
59559 return true
59560 }
59561 }
59562 func rewriteValueAMD64_OpLeq64F_0(v *Value) bool {
59563 b := v.Block
59564
59565
59566
59567 for {
59568 y := v.Args[1]
59569 x := v.Args[0]
59570 v.reset(OpAMD64SETGEF)
59571 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
59572 v0.AddArg(y)
59573 v0.AddArg(x)
59574 v.AddArg(v0)
59575 return true
59576 }
59577 }
59578 func rewriteValueAMD64_OpLeq64U_0(v *Value) bool {
59579 b := v.Block
59580
59581
59582
59583 for {
59584 y := v.Args[1]
59585 x := v.Args[0]
59586 v.reset(OpAMD64SETBE)
59587 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
59588 v0.AddArg(x)
59589 v0.AddArg(y)
59590 v.AddArg(v0)
59591 return true
59592 }
59593 }
59594 func rewriteValueAMD64_OpLeq8_0(v *Value) bool {
59595 b := v.Block
59596
59597
59598
59599 for {
59600 y := v.Args[1]
59601 x := v.Args[0]
59602 v.reset(OpAMD64SETLE)
59603 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
59604 v0.AddArg(x)
59605 v0.AddArg(y)
59606 v.AddArg(v0)
59607 return true
59608 }
59609 }
59610 func rewriteValueAMD64_OpLeq8U_0(v *Value) bool {
59611 b := v.Block
59612
59613
59614
59615 for {
59616 y := v.Args[1]
59617 x := v.Args[0]
59618 v.reset(OpAMD64SETBE)
59619 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
59620 v0.AddArg(x)
59621 v0.AddArg(y)
59622 v.AddArg(v0)
59623 return true
59624 }
59625 }
59626 func rewriteValueAMD64_OpLess16_0(v *Value) bool {
59627 b := v.Block
59628
59629
59630
59631 for {
59632 y := v.Args[1]
59633 x := v.Args[0]
59634 v.reset(OpAMD64SETL)
59635 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
59636 v0.AddArg(x)
59637 v0.AddArg(y)
59638 v.AddArg(v0)
59639 return true
59640 }
59641 }
59642 func rewriteValueAMD64_OpLess16U_0(v *Value) bool {
59643 b := v.Block
59644
59645
59646
59647 for {
59648 y := v.Args[1]
59649 x := v.Args[0]
59650 v.reset(OpAMD64SETB)
59651 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
59652 v0.AddArg(x)
59653 v0.AddArg(y)
59654 v.AddArg(v0)
59655 return true
59656 }
59657 }
59658 func rewriteValueAMD64_OpLess32_0(v *Value) bool {
59659 b := v.Block
59660
59661
59662
59663 for {
59664 y := v.Args[1]
59665 x := v.Args[0]
59666 v.reset(OpAMD64SETL)
59667 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
59668 v0.AddArg(x)
59669 v0.AddArg(y)
59670 v.AddArg(v0)
59671 return true
59672 }
59673 }
59674 func rewriteValueAMD64_OpLess32F_0(v *Value) bool {
59675 b := v.Block
59676
59677
59678
59679 for {
59680 y := v.Args[1]
59681 x := v.Args[0]
59682 v.reset(OpAMD64SETGF)
59683 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
59684 v0.AddArg(y)
59685 v0.AddArg(x)
59686 v.AddArg(v0)
59687 return true
59688 }
59689 }
59690 func rewriteValueAMD64_OpLess32U_0(v *Value) bool {
59691 b := v.Block
59692
59693
59694
59695 for {
59696 y := v.Args[1]
59697 x := v.Args[0]
59698 v.reset(OpAMD64SETB)
59699 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
59700 v0.AddArg(x)
59701 v0.AddArg(y)
59702 v.AddArg(v0)
59703 return true
59704 }
59705 }
59706 func rewriteValueAMD64_OpLess64_0(v *Value) bool {
59707 b := v.Block
59708
59709
59710
59711 for {
59712 y := v.Args[1]
59713 x := v.Args[0]
59714 v.reset(OpAMD64SETL)
59715 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
59716 v0.AddArg(x)
59717 v0.AddArg(y)
59718 v.AddArg(v0)
59719 return true
59720 }
59721 }
59722 func rewriteValueAMD64_OpLess64F_0(v *Value) bool {
59723 b := v.Block
59724
59725
59726
59727 for {
59728 y := v.Args[1]
59729 x := v.Args[0]
59730 v.reset(OpAMD64SETGF)
59731 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
59732 v0.AddArg(y)
59733 v0.AddArg(x)
59734 v.AddArg(v0)
59735 return true
59736 }
59737 }
59738 func rewriteValueAMD64_OpLess64U_0(v *Value) bool {
59739 b := v.Block
59740
59741
59742
59743 for {
59744 y := v.Args[1]
59745 x := v.Args[0]
59746 v.reset(OpAMD64SETB)
59747 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
59748 v0.AddArg(x)
59749 v0.AddArg(y)
59750 v.AddArg(v0)
59751 return true
59752 }
59753 }
59754 func rewriteValueAMD64_OpLess8_0(v *Value) bool {
59755 b := v.Block
59756
59757
59758
59759 for {
59760 y := v.Args[1]
59761 x := v.Args[0]
59762 v.reset(OpAMD64SETL)
59763 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
59764 v0.AddArg(x)
59765 v0.AddArg(y)
59766 v.AddArg(v0)
59767 return true
59768 }
59769 }
59770 func rewriteValueAMD64_OpLess8U_0(v *Value) bool {
59771 b := v.Block
59772
59773
59774
59775 for {
59776 y := v.Args[1]
59777 x := v.Args[0]
59778 v.reset(OpAMD64SETB)
59779 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
59780 v0.AddArg(x)
59781 v0.AddArg(y)
59782 v.AddArg(v0)
59783 return true
59784 }
59785 }
59786 func rewriteValueAMD64_OpLoad_0(v *Value) bool {
59787 b := v.Block
59788 config := b.Func.Config
59789
59790
59791
59792 for {
59793 t := v.Type
59794 mem := v.Args[1]
59795 ptr := v.Args[0]
59796 if !(is64BitInt(t) || isPtr(t) && config.PtrSize == 8) {
59797 break
59798 }
59799 v.reset(OpAMD64MOVQload)
59800 v.AddArg(ptr)
59801 v.AddArg(mem)
59802 return true
59803 }
59804
59805
59806
59807 for {
59808 t := v.Type
59809 mem := v.Args[1]
59810 ptr := v.Args[0]
59811 if !(is32BitInt(t) || isPtr(t) && config.PtrSize == 4) {
59812 break
59813 }
59814 v.reset(OpAMD64MOVLload)
59815 v.AddArg(ptr)
59816 v.AddArg(mem)
59817 return true
59818 }
59819
59820
59821
59822 for {
59823 t := v.Type
59824 mem := v.Args[1]
59825 ptr := v.Args[0]
59826 if !(is16BitInt(t)) {
59827 break
59828 }
59829 v.reset(OpAMD64MOVWload)
59830 v.AddArg(ptr)
59831 v.AddArg(mem)
59832 return true
59833 }
59834
59835
59836
59837 for {
59838 t := v.Type
59839 mem := v.Args[1]
59840 ptr := v.Args[0]
59841 if !(t.IsBoolean() || is8BitInt(t)) {
59842 break
59843 }
59844 v.reset(OpAMD64MOVBload)
59845 v.AddArg(ptr)
59846 v.AddArg(mem)
59847 return true
59848 }
59849
59850
59851
59852 for {
59853 t := v.Type
59854 mem := v.Args[1]
59855 ptr := v.Args[0]
59856 if !(is32BitFloat(t)) {
59857 break
59858 }
59859 v.reset(OpAMD64MOVSSload)
59860 v.AddArg(ptr)
59861 v.AddArg(mem)
59862 return true
59863 }
59864
59865
59866
59867 for {
59868 t := v.Type
59869 mem := v.Args[1]
59870 ptr := v.Args[0]
59871 if !(is64BitFloat(t)) {
59872 break
59873 }
59874 v.reset(OpAMD64MOVSDload)
59875 v.AddArg(ptr)
59876 v.AddArg(mem)
59877 return true
59878 }
59879 return false
59880 }
59881 func rewriteValueAMD64_OpLocalAddr_0(v *Value) bool {
59882 b := v.Block
59883 config := b.Func.Config
59884
59885
59886
59887 for {
59888 sym := v.Aux
59889 _ = v.Args[1]
59890 base := v.Args[0]
59891 if !(config.PtrSize == 8) {
59892 break
59893 }
59894 v.reset(OpAMD64LEAQ)
59895 v.Aux = sym
59896 v.AddArg(base)
59897 return true
59898 }
59899
59900
59901
59902 for {
59903 sym := v.Aux
59904 _ = v.Args[1]
59905 base := v.Args[0]
59906 if !(config.PtrSize == 4) {
59907 break
59908 }
59909 v.reset(OpAMD64LEAL)
59910 v.Aux = sym
59911 v.AddArg(base)
59912 return true
59913 }
59914 return false
59915 }
59916 func rewriteValueAMD64_OpLsh16x16_0(v *Value) bool {
59917 b := v.Block
59918
59919
59920
59921 for {
59922 t := v.Type
59923 y := v.Args[1]
59924 x := v.Args[0]
59925 if !(!shiftIsBounded(v)) {
59926 break
59927 }
59928 v.reset(OpAMD64ANDL)
59929 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
59930 v0.AddArg(x)
59931 v0.AddArg(y)
59932 v.AddArg(v0)
59933 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
59934 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
59935 v2.AuxInt = 32
59936 v2.AddArg(y)
59937 v1.AddArg(v2)
59938 v.AddArg(v1)
59939 return true
59940 }
59941
59942
59943
59944 for {
59945 y := v.Args[1]
59946 x := v.Args[0]
59947 if !(shiftIsBounded(v)) {
59948 break
59949 }
59950 v.reset(OpAMD64SHLL)
59951 v.AddArg(x)
59952 v.AddArg(y)
59953 return true
59954 }
59955 return false
59956 }
59957 func rewriteValueAMD64_OpLsh16x32_0(v *Value) bool {
59958 b := v.Block
59959
59960
59961
59962 for {
59963 t := v.Type
59964 y := v.Args[1]
59965 x := v.Args[0]
59966 if !(!shiftIsBounded(v)) {
59967 break
59968 }
59969 v.reset(OpAMD64ANDL)
59970 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
59971 v0.AddArg(x)
59972 v0.AddArg(y)
59973 v.AddArg(v0)
59974 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
59975 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
59976 v2.AuxInt = 32
59977 v2.AddArg(y)
59978 v1.AddArg(v2)
59979 v.AddArg(v1)
59980 return true
59981 }
59982
59983
59984
59985 for {
59986 y := v.Args[1]
59987 x := v.Args[0]
59988 if !(shiftIsBounded(v)) {
59989 break
59990 }
59991 v.reset(OpAMD64SHLL)
59992 v.AddArg(x)
59993 v.AddArg(y)
59994 return true
59995 }
59996 return false
59997 }
59998 func rewriteValueAMD64_OpLsh16x64_0(v *Value) bool {
59999 b := v.Block
60000
60001
60002
60003 for {
60004 t := v.Type
60005 y := v.Args[1]
60006 x := v.Args[0]
60007 if !(!shiftIsBounded(v)) {
60008 break
60009 }
60010 v.reset(OpAMD64ANDL)
60011 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
60012 v0.AddArg(x)
60013 v0.AddArg(y)
60014 v.AddArg(v0)
60015 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
60016 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
60017 v2.AuxInt = 32
60018 v2.AddArg(y)
60019 v1.AddArg(v2)
60020 v.AddArg(v1)
60021 return true
60022 }
60023
60024
60025
60026 for {
60027 y := v.Args[1]
60028 x := v.Args[0]
60029 if !(shiftIsBounded(v)) {
60030 break
60031 }
60032 v.reset(OpAMD64SHLL)
60033 v.AddArg(x)
60034 v.AddArg(y)
60035 return true
60036 }
60037 return false
60038 }
60039 func rewriteValueAMD64_OpLsh16x8_0(v *Value) bool {
60040 b := v.Block
60041
60042
60043
60044 for {
60045 t := v.Type
60046 y := v.Args[1]
60047 x := v.Args[0]
60048 if !(!shiftIsBounded(v)) {
60049 break
60050 }
60051 v.reset(OpAMD64ANDL)
60052 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
60053 v0.AddArg(x)
60054 v0.AddArg(y)
60055 v.AddArg(v0)
60056 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
60057 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
60058 v2.AuxInt = 32
60059 v2.AddArg(y)
60060 v1.AddArg(v2)
60061 v.AddArg(v1)
60062 return true
60063 }
60064
60065
60066
60067 for {
60068 y := v.Args[1]
60069 x := v.Args[0]
60070 if !(shiftIsBounded(v)) {
60071 break
60072 }
60073 v.reset(OpAMD64SHLL)
60074 v.AddArg(x)
60075 v.AddArg(y)
60076 return true
60077 }
60078 return false
60079 }
60080 func rewriteValueAMD64_OpLsh32x16_0(v *Value) bool {
60081 b := v.Block
60082
60083
60084
60085 for {
60086 t := v.Type
60087 y := v.Args[1]
60088 x := v.Args[0]
60089 if !(!shiftIsBounded(v)) {
60090 break
60091 }
60092 v.reset(OpAMD64ANDL)
60093 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
60094 v0.AddArg(x)
60095 v0.AddArg(y)
60096 v.AddArg(v0)
60097 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
60098 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
60099 v2.AuxInt = 32
60100 v2.AddArg(y)
60101 v1.AddArg(v2)
60102 v.AddArg(v1)
60103 return true
60104 }
60105
60106
60107
60108 for {
60109 y := v.Args[1]
60110 x := v.Args[0]
60111 if !(shiftIsBounded(v)) {
60112 break
60113 }
60114 v.reset(OpAMD64SHLL)
60115 v.AddArg(x)
60116 v.AddArg(y)
60117 return true
60118 }
60119 return false
60120 }
60121 func rewriteValueAMD64_OpLsh32x32_0(v *Value) bool {
60122 b := v.Block
60123
60124
60125
60126 for {
60127 t := v.Type
60128 y := v.Args[1]
60129 x := v.Args[0]
60130 if !(!shiftIsBounded(v)) {
60131 break
60132 }
60133 v.reset(OpAMD64ANDL)
60134 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
60135 v0.AddArg(x)
60136 v0.AddArg(y)
60137 v.AddArg(v0)
60138 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
60139 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
60140 v2.AuxInt = 32
60141 v2.AddArg(y)
60142 v1.AddArg(v2)
60143 v.AddArg(v1)
60144 return true
60145 }
60146
60147
60148
60149 for {
60150 y := v.Args[1]
60151 x := v.Args[0]
60152 if !(shiftIsBounded(v)) {
60153 break
60154 }
60155 v.reset(OpAMD64SHLL)
60156 v.AddArg(x)
60157 v.AddArg(y)
60158 return true
60159 }
60160 return false
60161 }
60162 func rewriteValueAMD64_OpLsh32x64_0(v *Value) bool {
60163 b := v.Block
60164
60165
60166
60167 for {
60168 t := v.Type
60169 y := v.Args[1]
60170 x := v.Args[0]
60171 if !(!shiftIsBounded(v)) {
60172 break
60173 }
60174 v.reset(OpAMD64ANDL)
60175 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
60176 v0.AddArg(x)
60177 v0.AddArg(y)
60178 v.AddArg(v0)
60179 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
60180 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
60181 v2.AuxInt = 32
60182 v2.AddArg(y)
60183 v1.AddArg(v2)
60184 v.AddArg(v1)
60185 return true
60186 }
60187
60188
60189
60190 for {
60191 y := v.Args[1]
60192 x := v.Args[0]
60193 if !(shiftIsBounded(v)) {
60194 break
60195 }
60196 v.reset(OpAMD64SHLL)
60197 v.AddArg(x)
60198 v.AddArg(y)
60199 return true
60200 }
60201 return false
60202 }
60203 func rewriteValueAMD64_OpLsh32x8_0(v *Value) bool {
60204 b := v.Block
60205
60206
60207
60208 for {
60209 t := v.Type
60210 y := v.Args[1]
60211 x := v.Args[0]
60212 if !(!shiftIsBounded(v)) {
60213 break
60214 }
60215 v.reset(OpAMD64ANDL)
60216 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
60217 v0.AddArg(x)
60218 v0.AddArg(y)
60219 v.AddArg(v0)
60220 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
60221 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
60222 v2.AuxInt = 32
60223 v2.AddArg(y)
60224 v1.AddArg(v2)
60225 v.AddArg(v1)
60226 return true
60227 }
60228
60229
60230
60231 for {
60232 y := v.Args[1]
60233 x := v.Args[0]
60234 if !(shiftIsBounded(v)) {
60235 break
60236 }
60237 v.reset(OpAMD64SHLL)
60238 v.AddArg(x)
60239 v.AddArg(y)
60240 return true
60241 }
60242 return false
60243 }
60244 func rewriteValueAMD64_OpLsh64x16_0(v *Value) bool {
60245 b := v.Block
60246
60247
60248
60249 for {
60250 t := v.Type
60251 y := v.Args[1]
60252 x := v.Args[0]
60253 if !(!shiftIsBounded(v)) {
60254 break
60255 }
60256 v.reset(OpAMD64ANDQ)
60257 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
60258 v0.AddArg(x)
60259 v0.AddArg(y)
60260 v.AddArg(v0)
60261 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
60262 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
60263 v2.AuxInt = 64
60264 v2.AddArg(y)
60265 v1.AddArg(v2)
60266 v.AddArg(v1)
60267 return true
60268 }
60269
60270
60271
60272 for {
60273 y := v.Args[1]
60274 x := v.Args[0]
60275 if !(shiftIsBounded(v)) {
60276 break
60277 }
60278 v.reset(OpAMD64SHLQ)
60279 v.AddArg(x)
60280 v.AddArg(y)
60281 return true
60282 }
60283 return false
60284 }
60285 func rewriteValueAMD64_OpLsh64x32_0(v *Value) bool {
60286 b := v.Block
60287
60288
60289
60290 for {
60291 t := v.Type
60292 y := v.Args[1]
60293 x := v.Args[0]
60294 if !(!shiftIsBounded(v)) {
60295 break
60296 }
60297 v.reset(OpAMD64ANDQ)
60298 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
60299 v0.AddArg(x)
60300 v0.AddArg(y)
60301 v.AddArg(v0)
60302 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
60303 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
60304 v2.AuxInt = 64
60305 v2.AddArg(y)
60306 v1.AddArg(v2)
60307 v.AddArg(v1)
60308 return true
60309 }
60310
60311
60312
60313 for {
60314 y := v.Args[1]
60315 x := v.Args[0]
60316 if !(shiftIsBounded(v)) {
60317 break
60318 }
60319 v.reset(OpAMD64SHLQ)
60320 v.AddArg(x)
60321 v.AddArg(y)
60322 return true
60323 }
60324 return false
60325 }
60326 func rewriteValueAMD64_OpLsh64x64_0(v *Value) bool {
60327 b := v.Block
60328
60329
60330
60331 for {
60332 t := v.Type
60333 y := v.Args[1]
60334 x := v.Args[0]
60335 if !(!shiftIsBounded(v)) {
60336 break
60337 }
60338 v.reset(OpAMD64ANDQ)
60339 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
60340 v0.AddArg(x)
60341 v0.AddArg(y)
60342 v.AddArg(v0)
60343 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
60344 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
60345 v2.AuxInt = 64
60346 v2.AddArg(y)
60347 v1.AddArg(v2)
60348 v.AddArg(v1)
60349 return true
60350 }
60351
60352
60353
60354 for {
60355 y := v.Args[1]
60356 x := v.Args[0]
60357 if !(shiftIsBounded(v)) {
60358 break
60359 }
60360 v.reset(OpAMD64SHLQ)
60361 v.AddArg(x)
60362 v.AddArg(y)
60363 return true
60364 }
60365 return false
60366 }
60367 func rewriteValueAMD64_OpLsh64x8_0(v *Value) bool {
60368 b := v.Block
60369
60370
60371
60372 for {
60373 t := v.Type
60374 y := v.Args[1]
60375 x := v.Args[0]
60376 if !(!shiftIsBounded(v)) {
60377 break
60378 }
60379 v.reset(OpAMD64ANDQ)
60380 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
60381 v0.AddArg(x)
60382 v0.AddArg(y)
60383 v.AddArg(v0)
60384 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
60385 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
60386 v2.AuxInt = 64
60387 v2.AddArg(y)
60388 v1.AddArg(v2)
60389 v.AddArg(v1)
60390 return true
60391 }
60392
60393
60394
60395 for {
60396 y := v.Args[1]
60397 x := v.Args[0]
60398 if !(shiftIsBounded(v)) {
60399 break
60400 }
60401 v.reset(OpAMD64SHLQ)
60402 v.AddArg(x)
60403 v.AddArg(y)
60404 return true
60405 }
60406 return false
60407 }
60408 func rewriteValueAMD64_OpLsh8x16_0(v *Value) bool {
60409 b := v.Block
60410
60411
60412
60413 for {
60414 t := v.Type
60415 y := v.Args[1]
60416 x := v.Args[0]
60417 if !(!shiftIsBounded(v)) {
60418 break
60419 }
60420 v.reset(OpAMD64ANDL)
60421 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
60422 v0.AddArg(x)
60423 v0.AddArg(y)
60424 v.AddArg(v0)
60425 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
60426 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
60427 v2.AuxInt = 32
60428 v2.AddArg(y)
60429 v1.AddArg(v2)
60430 v.AddArg(v1)
60431 return true
60432 }
60433
60434
60435
60436 for {
60437 y := v.Args[1]
60438 x := v.Args[0]
60439 if !(shiftIsBounded(v)) {
60440 break
60441 }
60442 v.reset(OpAMD64SHLL)
60443 v.AddArg(x)
60444 v.AddArg(y)
60445 return true
60446 }
60447 return false
60448 }
60449 func rewriteValueAMD64_OpLsh8x32_0(v *Value) bool {
60450 b := v.Block
60451
60452
60453
60454 for {
60455 t := v.Type
60456 y := v.Args[1]
60457 x := v.Args[0]
60458 if !(!shiftIsBounded(v)) {
60459 break
60460 }
60461 v.reset(OpAMD64ANDL)
60462 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
60463 v0.AddArg(x)
60464 v0.AddArg(y)
60465 v.AddArg(v0)
60466 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
60467 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
60468 v2.AuxInt = 32
60469 v2.AddArg(y)
60470 v1.AddArg(v2)
60471 v.AddArg(v1)
60472 return true
60473 }
60474
60475
60476
60477 for {
60478 y := v.Args[1]
60479 x := v.Args[0]
60480 if !(shiftIsBounded(v)) {
60481 break
60482 }
60483 v.reset(OpAMD64SHLL)
60484 v.AddArg(x)
60485 v.AddArg(y)
60486 return true
60487 }
60488 return false
60489 }
60490 func rewriteValueAMD64_OpLsh8x64_0(v *Value) bool {
60491 b := v.Block
60492
60493
60494
60495 for {
60496 t := v.Type
60497 y := v.Args[1]
60498 x := v.Args[0]
60499 if !(!shiftIsBounded(v)) {
60500 break
60501 }
60502 v.reset(OpAMD64ANDL)
60503 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
60504 v0.AddArg(x)
60505 v0.AddArg(y)
60506 v.AddArg(v0)
60507 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
60508 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
60509 v2.AuxInt = 32
60510 v2.AddArg(y)
60511 v1.AddArg(v2)
60512 v.AddArg(v1)
60513 return true
60514 }
60515
60516
60517
60518 for {
60519 y := v.Args[1]
60520 x := v.Args[0]
60521 if !(shiftIsBounded(v)) {
60522 break
60523 }
60524 v.reset(OpAMD64SHLL)
60525 v.AddArg(x)
60526 v.AddArg(y)
60527 return true
60528 }
60529 return false
60530 }
60531 func rewriteValueAMD64_OpLsh8x8_0(v *Value) bool {
60532 b := v.Block
60533
60534
60535
60536 for {
60537 t := v.Type
60538 y := v.Args[1]
60539 x := v.Args[0]
60540 if !(!shiftIsBounded(v)) {
60541 break
60542 }
60543 v.reset(OpAMD64ANDL)
60544 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
60545 v0.AddArg(x)
60546 v0.AddArg(y)
60547 v.AddArg(v0)
60548 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
60549 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
60550 v2.AuxInt = 32
60551 v2.AddArg(y)
60552 v1.AddArg(v2)
60553 v.AddArg(v1)
60554 return true
60555 }
60556
60557
60558
60559 for {
60560 y := v.Args[1]
60561 x := v.Args[0]
60562 if !(shiftIsBounded(v)) {
60563 break
60564 }
60565 v.reset(OpAMD64SHLL)
60566 v.AddArg(x)
60567 v.AddArg(y)
60568 return true
60569 }
60570 return false
60571 }
60572 func rewriteValueAMD64_OpMod16_0(v *Value) bool {
60573 b := v.Block
60574 typ := &b.Func.Config.Types
60575
60576
60577
60578 for {
60579 a := v.AuxInt
60580 y := v.Args[1]
60581 x := v.Args[0]
60582 v.reset(OpSelect1)
60583 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
60584 v0.AuxInt = a
60585 v0.AddArg(x)
60586 v0.AddArg(y)
60587 v.AddArg(v0)
60588 return true
60589 }
60590 }
60591 func rewriteValueAMD64_OpMod16u_0(v *Value) bool {
60592 b := v.Block
60593 typ := &b.Func.Config.Types
60594
60595
60596
60597 for {
60598 y := v.Args[1]
60599 x := v.Args[0]
60600 v.reset(OpSelect1)
60601 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
60602 v0.AddArg(x)
60603 v0.AddArg(y)
60604 v.AddArg(v0)
60605 return true
60606 }
60607 }
60608 func rewriteValueAMD64_OpMod32_0(v *Value) bool {
60609 b := v.Block
60610 typ := &b.Func.Config.Types
60611
60612
60613
60614 for {
60615 a := v.AuxInt
60616 y := v.Args[1]
60617 x := v.Args[0]
60618 v.reset(OpSelect1)
60619 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
60620 v0.AuxInt = a
60621 v0.AddArg(x)
60622 v0.AddArg(y)
60623 v.AddArg(v0)
60624 return true
60625 }
60626 }
60627 func rewriteValueAMD64_OpMod32u_0(v *Value) bool {
60628 b := v.Block
60629 typ := &b.Func.Config.Types
60630
60631
60632
60633 for {
60634 y := v.Args[1]
60635 x := v.Args[0]
60636 v.reset(OpSelect1)
60637 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32))
60638 v0.AddArg(x)
60639 v0.AddArg(y)
60640 v.AddArg(v0)
60641 return true
60642 }
60643 }
60644 func rewriteValueAMD64_OpMod64_0(v *Value) bool {
60645 b := v.Block
60646 typ := &b.Func.Config.Types
60647
60648
60649
60650 for {
60651 a := v.AuxInt
60652 y := v.Args[1]
60653 x := v.Args[0]
60654 v.reset(OpSelect1)
60655 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
60656 v0.AuxInt = a
60657 v0.AddArg(x)
60658 v0.AddArg(y)
60659 v.AddArg(v0)
60660 return true
60661 }
60662 }
60663 func rewriteValueAMD64_OpMod64u_0(v *Value) bool {
60664 b := v.Block
60665 typ := &b.Func.Config.Types
60666
60667
60668
60669 for {
60670 y := v.Args[1]
60671 x := v.Args[0]
60672 v.reset(OpSelect1)
60673 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64))
60674 v0.AddArg(x)
60675 v0.AddArg(y)
60676 v.AddArg(v0)
60677 return true
60678 }
60679 }
60680 func rewriteValueAMD64_OpMod8_0(v *Value) bool {
60681 b := v.Block
60682 typ := &b.Func.Config.Types
60683
60684
60685
60686 for {
60687 y := v.Args[1]
60688 x := v.Args[0]
60689 v.reset(OpSelect1)
60690 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
60691 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
60692 v1.AddArg(x)
60693 v0.AddArg(v1)
60694 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
60695 v2.AddArg(y)
60696 v0.AddArg(v2)
60697 v.AddArg(v0)
60698 return true
60699 }
60700 }
60701 func rewriteValueAMD64_OpMod8u_0(v *Value) bool {
60702 b := v.Block
60703 typ := &b.Func.Config.Types
60704
60705
60706
60707 for {
60708 y := v.Args[1]
60709 x := v.Args[0]
60710 v.reset(OpSelect1)
60711 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
60712 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
60713 v1.AddArg(x)
60714 v0.AddArg(v1)
60715 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
60716 v2.AddArg(y)
60717 v0.AddArg(v2)
60718 v.AddArg(v0)
60719 return true
60720 }
60721 }
60722 func rewriteValueAMD64_OpMove_0(v *Value) bool {
60723 b := v.Block
60724 config := b.Func.Config
60725 typ := &b.Func.Config.Types
60726
60727
60728
60729 for {
60730 if v.AuxInt != 0 {
60731 break
60732 }
60733 mem := v.Args[2]
60734 v.reset(OpCopy)
60735 v.Type = mem.Type
60736 v.AddArg(mem)
60737 return true
60738 }
60739
60740
60741
60742 for {
60743 if v.AuxInt != 1 {
60744 break
60745 }
60746 mem := v.Args[2]
60747 dst := v.Args[0]
60748 src := v.Args[1]
60749 v.reset(OpAMD64MOVBstore)
60750 v.AddArg(dst)
60751 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
60752 v0.AddArg(src)
60753 v0.AddArg(mem)
60754 v.AddArg(v0)
60755 v.AddArg(mem)
60756 return true
60757 }
60758
60759
60760
60761 for {
60762 if v.AuxInt != 2 {
60763 break
60764 }
60765 mem := v.Args[2]
60766 dst := v.Args[0]
60767 src := v.Args[1]
60768 v.reset(OpAMD64MOVWstore)
60769 v.AddArg(dst)
60770 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
60771 v0.AddArg(src)
60772 v0.AddArg(mem)
60773 v.AddArg(v0)
60774 v.AddArg(mem)
60775 return true
60776 }
60777
60778
60779
60780 for {
60781 if v.AuxInt != 4 {
60782 break
60783 }
60784 mem := v.Args[2]
60785 dst := v.Args[0]
60786 src := v.Args[1]
60787 v.reset(OpAMD64MOVLstore)
60788 v.AddArg(dst)
60789 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
60790 v0.AddArg(src)
60791 v0.AddArg(mem)
60792 v.AddArg(v0)
60793 v.AddArg(mem)
60794 return true
60795 }
60796
60797
60798
60799 for {
60800 if v.AuxInt != 8 {
60801 break
60802 }
60803 mem := v.Args[2]
60804 dst := v.Args[0]
60805 src := v.Args[1]
60806 v.reset(OpAMD64MOVQstore)
60807 v.AddArg(dst)
60808 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
60809 v0.AddArg(src)
60810 v0.AddArg(mem)
60811 v.AddArg(v0)
60812 v.AddArg(mem)
60813 return true
60814 }
60815
60816
60817
60818 for {
60819 if v.AuxInt != 16 {
60820 break
60821 }
60822 mem := v.Args[2]
60823 dst := v.Args[0]
60824 src := v.Args[1]
60825 if !(config.useSSE) {
60826 break
60827 }
60828 v.reset(OpAMD64MOVOstore)
60829 v.AddArg(dst)
60830 v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
60831 v0.AddArg(src)
60832 v0.AddArg(mem)
60833 v.AddArg(v0)
60834 v.AddArg(mem)
60835 return true
60836 }
60837
60838
60839
60840 for {
60841 if v.AuxInt != 16 {
60842 break
60843 }
60844 mem := v.Args[2]
60845 dst := v.Args[0]
60846 src := v.Args[1]
60847 if !(!config.useSSE) {
60848 break
60849 }
60850 v.reset(OpAMD64MOVQstore)
60851 v.AuxInt = 8
60852 v.AddArg(dst)
60853 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
60854 v0.AuxInt = 8
60855 v0.AddArg(src)
60856 v0.AddArg(mem)
60857 v.AddArg(v0)
60858 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
60859 v1.AddArg(dst)
60860 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
60861 v2.AddArg(src)
60862 v2.AddArg(mem)
60863 v1.AddArg(v2)
60864 v1.AddArg(mem)
60865 v.AddArg(v1)
60866 return true
60867 }
60868
60869
60870
60871 for {
60872 if v.AuxInt != 32 {
60873 break
60874 }
60875 mem := v.Args[2]
60876 dst := v.Args[0]
60877 src := v.Args[1]
60878 v.reset(OpMove)
60879 v.AuxInt = 16
60880 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
60881 v0.AuxInt = 16
60882 v0.AddArg(dst)
60883 v.AddArg(v0)
60884 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
60885 v1.AuxInt = 16
60886 v1.AddArg(src)
60887 v.AddArg(v1)
60888 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
60889 v2.AuxInt = 16
60890 v2.AddArg(dst)
60891 v2.AddArg(src)
60892 v2.AddArg(mem)
60893 v.AddArg(v2)
60894 return true
60895 }
60896
60897
60898
60899 for {
60900 if v.AuxInt != 48 {
60901 break
60902 }
60903 mem := v.Args[2]
60904 dst := v.Args[0]
60905 src := v.Args[1]
60906 if !(config.useSSE) {
60907 break
60908 }
60909 v.reset(OpMove)
60910 v.AuxInt = 32
60911 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
60912 v0.AuxInt = 16
60913 v0.AddArg(dst)
60914 v.AddArg(v0)
60915 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
60916 v1.AuxInt = 16
60917 v1.AddArg(src)
60918 v.AddArg(v1)
60919 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
60920 v2.AuxInt = 16
60921 v2.AddArg(dst)
60922 v2.AddArg(src)
60923 v2.AddArg(mem)
60924 v.AddArg(v2)
60925 return true
60926 }
60927
60928
60929
60930 for {
60931 if v.AuxInt != 64 {
60932 break
60933 }
60934 mem := v.Args[2]
60935 dst := v.Args[0]
60936 src := v.Args[1]
60937 if !(config.useSSE) {
60938 break
60939 }
60940 v.reset(OpMove)
60941 v.AuxInt = 32
60942 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
60943 v0.AuxInt = 32
60944 v0.AddArg(dst)
60945 v.AddArg(v0)
60946 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
60947 v1.AuxInt = 32
60948 v1.AddArg(src)
60949 v.AddArg(v1)
60950 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
60951 v2.AuxInt = 32
60952 v2.AddArg(dst)
60953 v2.AddArg(src)
60954 v2.AddArg(mem)
60955 v.AddArg(v2)
60956 return true
60957 }
60958 return false
60959 }
60960 func rewriteValueAMD64_OpMove_10(v *Value) bool {
60961 b := v.Block
60962 config := b.Func.Config
60963 typ := &b.Func.Config.Types
60964
60965
60966
60967 for {
60968 if v.AuxInt != 3 {
60969 break
60970 }
60971 mem := v.Args[2]
60972 dst := v.Args[0]
60973 src := v.Args[1]
60974 v.reset(OpAMD64MOVBstore)
60975 v.AuxInt = 2
60976 v.AddArg(dst)
60977 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
60978 v0.AuxInt = 2
60979 v0.AddArg(src)
60980 v0.AddArg(mem)
60981 v.AddArg(v0)
60982 v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem)
60983 v1.AddArg(dst)
60984 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
60985 v2.AddArg(src)
60986 v2.AddArg(mem)
60987 v1.AddArg(v2)
60988 v1.AddArg(mem)
60989 v.AddArg(v1)
60990 return true
60991 }
60992
60993
60994
60995 for {
60996 if v.AuxInt != 5 {
60997 break
60998 }
60999 mem := v.Args[2]
61000 dst := v.Args[0]
61001 src := v.Args[1]
61002 v.reset(OpAMD64MOVBstore)
61003 v.AuxInt = 4
61004 v.AddArg(dst)
61005 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
61006 v0.AuxInt = 4
61007 v0.AddArg(src)
61008 v0.AddArg(mem)
61009 v.AddArg(v0)
61010 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
61011 v1.AddArg(dst)
61012 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
61013 v2.AddArg(src)
61014 v2.AddArg(mem)
61015 v1.AddArg(v2)
61016 v1.AddArg(mem)
61017 v.AddArg(v1)
61018 return true
61019 }
61020
61021
61022
61023 for {
61024 if v.AuxInt != 6 {
61025 break
61026 }
61027 mem := v.Args[2]
61028 dst := v.Args[0]
61029 src := v.Args[1]
61030 v.reset(OpAMD64MOVWstore)
61031 v.AuxInt = 4
61032 v.AddArg(dst)
61033 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
61034 v0.AuxInt = 4
61035 v0.AddArg(src)
61036 v0.AddArg(mem)
61037 v.AddArg(v0)
61038 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
61039 v1.AddArg(dst)
61040 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
61041 v2.AddArg(src)
61042 v2.AddArg(mem)
61043 v1.AddArg(v2)
61044 v1.AddArg(mem)
61045 v.AddArg(v1)
61046 return true
61047 }
61048
61049
61050
61051 for {
61052 if v.AuxInt != 7 {
61053 break
61054 }
61055 mem := v.Args[2]
61056 dst := v.Args[0]
61057 src := v.Args[1]
61058 v.reset(OpAMD64MOVLstore)
61059 v.AuxInt = 3
61060 v.AddArg(dst)
61061 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
61062 v0.AuxInt = 3
61063 v0.AddArg(src)
61064 v0.AddArg(mem)
61065 v.AddArg(v0)
61066 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
61067 v1.AddArg(dst)
61068 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
61069 v2.AddArg(src)
61070 v2.AddArg(mem)
61071 v1.AddArg(v2)
61072 v1.AddArg(mem)
61073 v.AddArg(v1)
61074 return true
61075 }
61076
61077
61078
61079 for {
61080 if v.AuxInt != 9 {
61081 break
61082 }
61083 mem := v.Args[2]
61084 dst := v.Args[0]
61085 src := v.Args[1]
61086 v.reset(OpAMD64MOVBstore)
61087 v.AuxInt = 8
61088 v.AddArg(dst)
61089 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
61090 v0.AuxInt = 8
61091 v0.AddArg(src)
61092 v0.AddArg(mem)
61093 v.AddArg(v0)
61094 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
61095 v1.AddArg(dst)
61096 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
61097 v2.AddArg(src)
61098 v2.AddArg(mem)
61099 v1.AddArg(v2)
61100 v1.AddArg(mem)
61101 v.AddArg(v1)
61102 return true
61103 }
61104
61105
61106
61107 for {
61108 if v.AuxInt != 10 {
61109 break
61110 }
61111 mem := v.Args[2]
61112 dst := v.Args[0]
61113 src := v.Args[1]
61114 v.reset(OpAMD64MOVWstore)
61115 v.AuxInt = 8
61116 v.AddArg(dst)
61117 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
61118 v0.AuxInt = 8
61119 v0.AddArg(src)
61120 v0.AddArg(mem)
61121 v.AddArg(v0)
61122 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
61123 v1.AddArg(dst)
61124 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
61125 v2.AddArg(src)
61126 v2.AddArg(mem)
61127 v1.AddArg(v2)
61128 v1.AddArg(mem)
61129 v.AddArg(v1)
61130 return true
61131 }
61132
61133
61134
61135 for {
61136 if v.AuxInt != 12 {
61137 break
61138 }
61139 mem := v.Args[2]
61140 dst := v.Args[0]
61141 src := v.Args[1]
61142 v.reset(OpAMD64MOVLstore)
61143 v.AuxInt = 8
61144 v.AddArg(dst)
61145 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
61146 v0.AuxInt = 8
61147 v0.AddArg(src)
61148 v0.AddArg(mem)
61149 v.AddArg(v0)
61150 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
61151 v1.AddArg(dst)
61152 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
61153 v2.AddArg(src)
61154 v2.AddArg(mem)
61155 v1.AddArg(v2)
61156 v1.AddArg(mem)
61157 v.AddArg(v1)
61158 return true
61159 }
61160
61161
61162
61163 for {
61164 s := v.AuxInt
61165 mem := v.Args[2]
61166 dst := v.Args[0]
61167 src := v.Args[1]
61168 if !(s == 11 || s >= 13 && s <= 15) {
61169 break
61170 }
61171 v.reset(OpAMD64MOVQstore)
61172 v.AuxInt = s - 8
61173 v.AddArg(dst)
61174 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
61175 v0.AuxInt = s - 8
61176 v0.AddArg(src)
61177 v0.AddArg(mem)
61178 v.AddArg(v0)
61179 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
61180 v1.AddArg(dst)
61181 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
61182 v2.AddArg(src)
61183 v2.AddArg(mem)
61184 v1.AddArg(v2)
61185 v1.AddArg(mem)
61186 v.AddArg(v1)
61187 return true
61188 }
61189
61190
61191
61192 for {
61193 s := v.AuxInt
61194 mem := v.Args[2]
61195 dst := v.Args[0]
61196 src := v.Args[1]
61197 if !(s > 16 && s%16 != 0 && s%16 <= 8) {
61198 break
61199 }
61200 v.reset(OpMove)
61201 v.AuxInt = s - s%16
61202 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
61203 v0.AuxInt = s % 16
61204 v0.AddArg(dst)
61205 v.AddArg(v0)
61206 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
61207 v1.AuxInt = s % 16
61208 v1.AddArg(src)
61209 v.AddArg(v1)
61210 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
61211 v2.AddArg(dst)
61212 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
61213 v3.AddArg(src)
61214 v3.AddArg(mem)
61215 v2.AddArg(v3)
61216 v2.AddArg(mem)
61217 v.AddArg(v2)
61218 return true
61219 }
61220
61221
61222
61223 for {
61224 s := v.AuxInt
61225 mem := v.Args[2]
61226 dst := v.Args[0]
61227 src := v.Args[1]
61228 if !(s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE) {
61229 break
61230 }
61231 v.reset(OpMove)
61232 v.AuxInt = s - s%16
61233 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
61234 v0.AuxInt = s % 16
61235 v0.AddArg(dst)
61236 v.AddArg(v0)
61237 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
61238 v1.AuxInt = s % 16
61239 v1.AddArg(src)
61240 v.AddArg(v1)
61241 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
61242 v2.AddArg(dst)
61243 v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
61244 v3.AddArg(src)
61245 v3.AddArg(mem)
61246 v2.AddArg(v3)
61247 v2.AddArg(mem)
61248 v.AddArg(v2)
61249 return true
61250 }
61251 return false
61252 }
61253 func rewriteValueAMD64_OpMove_20(v *Value) bool {
61254 b := v.Block
61255 config := b.Func.Config
61256 typ := &b.Func.Config.Types
61257
61258
61259
61260 for {
61261 s := v.AuxInt
61262 mem := v.Args[2]
61263 dst := v.Args[0]
61264 src := v.Args[1]
61265 if !(s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE) {
61266 break
61267 }
61268 v.reset(OpMove)
61269 v.AuxInt = s - s%16
61270 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
61271 v0.AuxInt = s % 16
61272 v0.AddArg(dst)
61273 v.AddArg(v0)
61274 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
61275 v1.AuxInt = s % 16
61276 v1.AddArg(src)
61277 v.AddArg(v1)
61278 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
61279 v2.AuxInt = 8
61280 v2.AddArg(dst)
61281 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
61282 v3.AuxInt = 8
61283 v3.AddArg(src)
61284 v3.AddArg(mem)
61285 v2.AddArg(v3)
61286 v4 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
61287 v4.AddArg(dst)
61288 v5 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
61289 v5.AddArg(src)
61290 v5.AddArg(mem)
61291 v4.AddArg(v5)
61292 v4.AddArg(mem)
61293 v2.AddArg(v4)
61294 v.AddArg(v2)
61295 return true
61296 }
61297
61298
61299
61300 for {
61301 s := v.AuxInt
61302 mem := v.Args[2]
61303 dst := v.Args[0]
61304 src := v.Args[1]
61305 if !(s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice) {
61306 break
61307 }
61308 v.reset(OpAMD64DUFFCOPY)
61309 v.AuxInt = 14 * (64 - s/16)
61310 v.AddArg(dst)
61311 v.AddArg(src)
61312 v.AddArg(mem)
61313 return true
61314 }
61315
61316
61317
61318 for {
61319 s := v.AuxInt
61320 mem := v.Args[2]
61321 dst := v.Args[0]
61322 src := v.Args[1]
61323 if !((s > 16*64 || config.noDuffDevice) && s%8 == 0) {
61324 break
61325 }
61326 v.reset(OpAMD64REPMOVSQ)
61327 v.AddArg(dst)
61328 v.AddArg(src)
61329 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
61330 v0.AuxInt = s / 8
61331 v.AddArg(v0)
61332 v.AddArg(mem)
61333 return true
61334 }
61335 return false
61336 }
61337 func rewriteValueAMD64_OpMul16_0(v *Value) bool {
61338
61339
61340
61341 for {
61342 y := v.Args[1]
61343 x := v.Args[0]
61344 v.reset(OpAMD64MULL)
61345 v.AddArg(x)
61346 v.AddArg(y)
61347 return true
61348 }
61349 }
61350 func rewriteValueAMD64_OpMul32_0(v *Value) bool {
61351
61352
61353
61354 for {
61355 y := v.Args[1]
61356 x := v.Args[0]
61357 v.reset(OpAMD64MULL)
61358 v.AddArg(x)
61359 v.AddArg(y)
61360 return true
61361 }
61362 }
61363 func rewriteValueAMD64_OpMul32F_0(v *Value) bool {
61364
61365
61366
61367 for {
61368 y := v.Args[1]
61369 x := v.Args[0]
61370 v.reset(OpAMD64MULSS)
61371 v.AddArg(x)
61372 v.AddArg(y)
61373 return true
61374 }
61375 }
61376 func rewriteValueAMD64_OpMul64_0(v *Value) bool {
61377
61378
61379
61380 for {
61381 y := v.Args[1]
61382 x := v.Args[0]
61383 v.reset(OpAMD64MULQ)
61384 v.AddArg(x)
61385 v.AddArg(y)
61386 return true
61387 }
61388 }
61389 func rewriteValueAMD64_OpMul64F_0(v *Value) bool {
61390
61391
61392
61393 for {
61394 y := v.Args[1]
61395 x := v.Args[0]
61396 v.reset(OpAMD64MULSD)
61397 v.AddArg(x)
61398 v.AddArg(y)
61399 return true
61400 }
61401 }
61402 func rewriteValueAMD64_OpMul64uhilo_0(v *Value) bool {
61403
61404
61405
61406 for {
61407 y := v.Args[1]
61408 x := v.Args[0]
61409 v.reset(OpAMD64MULQU2)
61410 v.AddArg(x)
61411 v.AddArg(y)
61412 return true
61413 }
61414 }
61415 func rewriteValueAMD64_OpMul8_0(v *Value) bool {
61416
61417
61418
61419 for {
61420 y := v.Args[1]
61421 x := v.Args[0]
61422 v.reset(OpAMD64MULL)
61423 v.AddArg(x)
61424 v.AddArg(y)
61425 return true
61426 }
61427 }
61428 func rewriteValueAMD64_OpNeg16_0(v *Value) bool {
61429
61430
61431
61432 for {
61433 x := v.Args[0]
61434 v.reset(OpAMD64NEGL)
61435 v.AddArg(x)
61436 return true
61437 }
61438 }
61439 func rewriteValueAMD64_OpNeg32_0(v *Value) bool {
61440
61441
61442
61443 for {
61444 x := v.Args[0]
61445 v.reset(OpAMD64NEGL)
61446 v.AddArg(x)
61447 return true
61448 }
61449 }
61450 func rewriteValueAMD64_OpNeg32F_0(v *Value) bool {
61451 b := v.Block
61452 typ := &b.Func.Config.Types
61453
61454
61455
61456 for {
61457 x := v.Args[0]
61458 v.reset(OpAMD64PXOR)
61459 v.AddArg(x)
61460 v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32)
61461 v0.AuxInt = auxFrom32F(float32(math.Copysign(0, -1)))
61462 v.AddArg(v0)
61463 return true
61464 }
61465 }
61466 func rewriteValueAMD64_OpNeg64_0(v *Value) bool {
61467
61468
61469
61470 for {
61471 x := v.Args[0]
61472 v.reset(OpAMD64NEGQ)
61473 v.AddArg(x)
61474 return true
61475 }
61476 }
61477 func rewriteValueAMD64_OpNeg64F_0(v *Value) bool {
61478 b := v.Block
61479 typ := &b.Func.Config.Types
61480
61481
61482
61483 for {
61484 x := v.Args[0]
61485 v.reset(OpAMD64PXOR)
61486 v.AddArg(x)
61487 v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64)
61488 v0.AuxInt = auxFrom64F(math.Copysign(0, -1))
61489 v.AddArg(v0)
61490 return true
61491 }
61492 }
61493 func rewriteValueAMD64_OpNeg8_0(v *Value) bool {
61494
61495
61496
61497 for {
61498 x := v.Args[0]
61499 v.reset(OpAMD64NEGL)
61500 v.AddArg(x)
61501 return true
61502 }
61503 }
61504 func rewriteValueAMD64_OpNeq16_0(v *Value) bool {
61505 b := v.Block
61506
61507
61508
61509 for {
61510 y := v.Args[1]
61511 x := v.Args[0]
61512 v.reset(OpAMD64SETNE)
61513 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
61514 v0.AddArg(x)
61515 v0.AddArg(y)
61516 v.AddArg(v0)
61517 return true
61518 }
61519 }
61520 func rewriteValueAMD64_OpNeq32_0(v *Value) bool {
61521 b := v.Block
61522
61523
61524
61525 for {
61526 y := v.Args[1]
61527 x := v.Args[0]
61528 v.reset(OpAMD64SETNE)
61529 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
61530 v0.AddArg(x)
61531 v0.AddArg(y)
61532 v.AddArg(v0)
61533 return true
61534 }
61535 }
61536 func rewriteValueAMD64_OpNeq32F_0(v *Value) bool {
61537 b := v.Block
61538
61539
61540
61541 for {
61542 y := v.Args[1]
61543 x := v.Args[0]
61544 v.reset(OpAMD64SETNEF)
61545 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
61546 v0.AddArg(x)
61547 v0.AddArg(y)
61548 v.AddArg(v0)
61549 return true
61550 }
61551 }
61552 func rewriteValueAMD64_OpNeq64_0(v *Value) bool {
61553 b := v.Block
61554
61555
61556
61557 for {
61558 y := v.Args[1]
61559 x := v.Args[0]
61560 v.reset(OpAMD64SETNE)
61561 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
61562 v0.AddArg(x)
61563 v0.AddArg(y)
61564 v.AddArg(v0)
61565 return true
61566 }
61567 }
61568 func rewriteValueAMD64_OpNeq64F_0(v *Value) bool {
61569 b := v.Block
61570
61571
61572
61573 for {
61574 y := v.Args[1]
61575 x := v.Args[0]
61576 v.reset(OpAMD64SETNEF)
61577 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
61578 v0.AddArg(x)
61579 v0.AddArg(y)
61580 v.AddArg(v0)
61581 return true
61582 }
61583 }
61584 func rewriteValueAMD64_OpNeq8_0(v *Value) bool {
61585 b := v.Block
61586
61587
61588
61589 for {
61590 y := v.Args[1]
61591 x := v.Args[0]
61592 v.reset(OpAMD64SETNE)
61593 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
61594 v0.AddArg(x)
61595 v0.AddArg(y)
61596 v.AddArg(v0)
61597 return true
61598 }
61599 }
61600 func rewriteValueAMD64_OpNeqB_0(v *Value) bool {
61601 b := v.Block
61602
61603
61604
61605 for {
61606 y := v.Args[1]
61607 x := v.Args[0]
61608 v.reset(OpAMD64SETNE)
61609 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
61610 v0.AddArg(x)
61611 v0.AddArg(y)
61612 v.AddArg(v0)
61613 return true
61614 }
61615 }
61616 func rewriteValueAMD64_OpNeqPtr_0(v *Value) bool {
61617 b := v.Block
61618 config := b.Func.Config
61619
61620
61621
61622 for {
61623 y := v.Args[1]
61624 x := v.Args[0]
61625 if !(config.PtrSize == 8) {
61626 break
61627 }
61628 v.reset(OpAMD64SETNE)
61629 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
61630 v0.AddArg(x)
61631 v0.AddArg(y)
61632 v.AddArg(v0)
61633 return true
61634 }
61635
61636
61637
61638 for {
61639 y := v.Args[1]
61640 x := v.Args[0]
61641 if !(config.PtrSize == 4) {
61642 break
61643 }
61644 v.reset(OpAMD64SETNE)
61645 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
61646 v0.AddArg(x)
61647 v0.AddArg(y)
61648 v.AddArg(v0)
61649 return true
61650 }
61651 return false
61652 }
61653 func rewriteValueAMD64_OpNilCheck_0(v *Value) bool {
61654
61655
61656
61657 for {
61658 mem := v.Args[1]
61659 ptr := v.Args[0]
61660 v.reset(OpAMD64LoweredNilCheck)
61661 v.AddArg(ptr)
61662 v.AddArg(mem)
61663 return true
61664 }
61665 }
61666 func rewriteValueAMD64_OpNot_0(v *Value) bool {
61667
61668
61669
61670 for {
61671 x := v.Args[0]
61672 v.reset(OpAMD64XORLconst)
61673 v.AuxInt = 1
61674 v.AddArg(x)
61675 return true
61676 }
61677 }
61678 func rewriteValueAMD64_OpOffPtr_0(v *Value) bool {
61679 b := v.Block
61680 config := b.Func.Config
61681 typ := &b.Func.Config.Types
61682
61683
61684
61685 for {
61686 off := v.AuxInt
61687 ptr := v.Args[0]
61688 if !(config.PtrSize == 8 && is32Bit(off)) {
61689 break
61690 }
61691 v.reset(OpAMD64ADDQconst)
61692 v.AuxInt = off
61693 v.AddArg(ptr)
61694 return true
61695 }
61696
61697
61698
61699 for {
61700 off := v.AuxInt
61701 ptr := v.Args[0]
61702 if !(config.PtrSize == 8) {
61703 break
61704 }
61705 v.reset(OpAMD64ADDQ)
61706 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
61707 v0.AuxInt = off
61708 v.AddArg(v0)
61709 v.AddArg(ptr)
61710 return true
61711 }
61712
61713
61714
61715 for {
61716 off := v.AuxInt
61717 ptr := v.Args[0]
61718 if !(config.PtrSize == 4) {
61719 break
61720 }
61721 v.reset(OpAMD64ADDLconst)
61722 v.AuxInt = off
61723 v.AddArg(ptr)
61724 return true
61725 }
61726 return false
61727 }
61728 func rewriteValueAMD64_OpOr16_0(v *Value) bool {
61729
61730
61731
61732 for {
61733 y := v.Args[1]
61734 x := v.Args[0]
61735 v.reset(OpAMD64ORL)
61736 v.AddArg(x)
61737 v.AddArg(y)
61738 return true
61739 }
61740 }
61741 func rewriteValueAMD64_OpOr32_0(v *Value) bool {
61742
61743
61744
61745 for {
61746 y := v.Args[1]
61747 x := v.Args[0]
61748 v.reset(OpAMD64ORL)
61749 v.AddArg(x)
61750 v.AddArg(y)
61751 return true
61752 }
61753 }
61754 func rewriteValueAMD64_OpOr64_0(v *Value) bool {
61755
61756
61757
61758 for {
61759 y := v.Args[1]
61760 x := v.Args[0]
61761 v.reset(OpAMD64ORQ)
61762 v.AddArg(x)
61763 v.AddArg(y)
61764 return true
61765 }
61766 }
61767 func rewriteValueAMD64_OpOr8_0(v *Value) bool {
61768
61769
61770
61771 for {
61772 y := v.Args[1]
61773 x := v.Args[0]
61774 v.reset(OpAMD64ORL)
61775 v.AddArg(x)
61776 v.AddArg(y)
61777 return true
61778 }
61779 }
61780 func rewriteValueAMD64_OpOrB_0(v *Value) bool {
61781
61782
61783
61784 for {
61785 y := v.Args[1]
61786 x := v.Args[0]
61787 v.reset(OpAMD64ORL)
61788 v.AddArg(x)
61789 v.AddArg(y)
61790 return true
61791 }
61792 }
61793 func rewriteValueAMD64_OpPanicBounds_0(v *Value) bool {
61794
61795
61796
61797 for {
61798 kind := v.AuxInt
61799 mem := v.Args[2]
61800 x := v.Args[0]
61801 y := v.Args[1]
61802 if !(boundsABI(kind) == 0) {
61803 break
61804 }
61805 v.reset(OpAMD64LoweredPanicBoundsA)
61806 v.AuxInt = kind
61807 v.AddArg(x)
61808 v.AddArg(y)
61809 v.AddArg(mem)
61810 return true
61811 }
61812
61813
61814
61815 for {
61816 kind := v.AuxInt
61817 mem := v.Args[2]
61818 x := v.Args[0]
61819 y := v.Args[1]
61820 if !(boundsABI(kind) == 1) {
61821 break
61822 }
61823 v.reset(OpAMD64LoweredPanicBoundsB)
61824 v.AuxInt = kind
61825 v.AddArg(x)
61826 v.AddArg(y)
61827 v.AddArg(mem)
61828 return true
61829 }
61830
61831
61832
61833 for {
61834 kind := v.AuxInt
61835 mem := v.Args[2]
61836 x := v.Args[0]
61837 y := v.Args[1]
61838 if !(boundsABI(kind) == 2) {
61839 break
61840 }
61841 v.reset(OpAMD64LoweredPanicBoundsC)
61842 v.AuxInt = kind
61843 v.AddArg(x)
61844 v.AddArg(y)
61845 v.AddArg(mem)
61846 return true
61847 }
61848 return false
61849 }
61850 func rewriteValueAMD64_OpPanicExtend_0(v *Value) bool {
61851
61852
61853
61854 for {
61855 kind := v.AuxInt
61856 mem := v.Args[3]
61857 hi := v.Args[0]
61858 lo := v.Args[1]
61859 y := v.Args[2]
61860 if !(boundsABI(kind) == 0) {
61861 break
61862 }
61863 v.reset(OpAMD64LoweredPanicExtendA)
61864 v.AuxInt = kind
61865 v.AddArg(hi)
61866 v.AddArg(lo)
61867 v.AddArg(y)
61868 v.AddArg(mem)
61869 return true
61870 }
61871
61872
61873
61874 for {
61875 kind := v.AuxInt
61876 mem := v.Args[3]
61877 hi := v.Args[0]
61878 lo := v.Args[1]
61879 y := v.Args[2]
61880 if !(boundsABI(kind) == 1) {
61881 break
61882 }
61883 v.reset(OpAMD64LoweredPanicExtendB)
61884 v.AuxInt = kind
61885 v.AddArg(hi)
61886 v.AddArg(lo)
61887 v.AddArg(y)
61888 v.AddArg(mem)
61889 return true
61890 }
61891
61892
61893
61894 for {
61895 kind := v.AuxInt
61896 mem := v.Args[3]
61897 hi := v.Args[0]
61898 lo := v.Args[1]
61899 y := v.Args[2]
61900 if !(boundsABI(kind) == 2) {
61901 break
61902 }
61903 v.reset(OpAMD64LoweredPanicExtendC)
61904 v.AuxInt = kind
61905 v.AddArg(hi)
61906 v.AddArg(lo)
61907 v.AddArg(y)
61908 v.AddArg(mem)
61909 return true
61910 }
61911 return false
61912 }
61913 func rewriteValueAMD64_OpPopCount16_0(v *Value) bool {
61914 b := v.Block
61915 typ := &b.Func.Config.Types
61916
61917
61918
61919 for {
61920 x := v.Args[0]
61921 v.reset(OpAMD64POPCNTL)
61922 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
61923 v0.AddArg(x)
61924 v.AddArg(v0)
61925 return true
61926 }
61927 }
61928 func rewriteValueAMD64_OpPopCount32_0(v *Value) bool {
61929
61930
61931
61932 for {
61933 x := v.Args[0]
61934 v.reset(OpAMD64POPCNTL)
61935 v.AddArg(x)
61936 return true
61937 }
61938 }
61939 func rewriteValueAMD64_OpPopCount64_0(v *Value) bool {
61940
61941
61942
61943 for {
61944 x := v.Args[0]
61945 v.reset(OpAMD64POPCNTQ)
61946 v.AddArg(x)
61947 return true
61948 }
61949 }
61950 func rewriteValueAMD64_OpPopCount8_0(v *Value) bool {
61951 b := v.Block
61952 typ := &b.Func.Config.Types
61953
61954
61955
61956 for {
61957 x := v.Args[0]
61958 v.reset(OpAMD64POPCNTL)
61959 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
61960 v0.AddArg(x)
61961 v.AddArg(v0)
61962 return true
61963 }
61964 }
61965 func rewriteValueAMD64_OpRotateLeft16_0(v *Value) bool {
61966
61967
61968
61969 for {
61970 b := v.Args[1]
61971 a := v.Args[0]
61972 v.reset(OpAMD64ROLW)
61973 v.AddArg(a)
61974 v.AddArg(b)
61975 return true
61976 }
61977 }
61978 func rewriteValueAMD64_OpRotateLeft32_0(v *Value) bool {
61979
61980
61981
61982 for {
61983 b := v.Args[1]
61984 a := v.Args[0]
61985 v.reset(OpAMD64ROLL)
61986 v.AddArg(a)
61987 v.AddArg(b)
61988 return true
61989 }
61990 }
61991 func rewriteValueAMD64_OpRotateLeft64_0(v *Value) bool {
61992
61993
61994
61995 for {
61996 b := v.Args[1]
61997 a := v.Args[0]
61998 v.reset(OpAMD64ROLQ)
61999 v.AddArg(a)
62000 v.AddArg(b)
62001 return true
62002 }
62003 }
62004 func rewriteValueAMD64_OpRotateLeft8_0(v *Value) bool {
62005
62006
62007
62008 for {
62009 b := v.Args[1]
62010 a := v.Args[0]
62011 v.reset(OpAMD64ROLB)
62012 v.AddArg(a)
62013 v.AddArg(b)
62014 return true
62015 }
62016 }
62017 func rewriteValueAMD64_OpRound32F_0(v *Value) bool {
62018
62019
62020
62021 for {
62022 x := v.Args[0]
62023 v.reset(OpCopy)
62024 v.Type = x.Type
62025 v.AddArg(x)
62026 return true
62027 }
62028 }
62029 func rewriteValueAMD64_OpRound64F_0(v *Value) bool {
62030
62031
62032
62033 for {
62034 x := v.Args[0]
62035 v.reset(OpCopy)
62036 v.Type = x.Type
62037 v.AddArg(x)
62038 return true
62039 }
62040 }
62041 func rewriteValueAMD64_OpRoundToEven_0(v *Value) bool {
62042
62043
62044
62045 for {
62046 x := v.Args[0]
62047 v.reset(OpAMD64ROUNDSD)
62048 v.AuxInt = 0
62049 v.AddArg(x)
62050 return true
62051 }
62052 }
62053 func rewriteValueAMD64_OpRsh16Ux16_0(v *Value) bool {
62054 b := v.Block
62055
62056
62057
62058 for {
62059 t := v.Type
62060 y := v.Args[1]
62061 x := v.Args[0]
62062 if !(!shiftIsBounded(v)) {
62063 break
62064 }
62065 v.reset(OpAMD64ANDL)
62066 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
62067 v0.AddArg(x)
62068 v0.AddArg(y)
62069 v.AddArg(v0)
62070 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
62071 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
62072 v2.AuxInt = 16
62073 v2.AddArg(y)
62074 v1.AddArg(v2)
62075 v.AddArg(v1)
62076 return true
62077 }
62078
62079
62080
62081 for {
62082 y := v.Args[1]
62083 x := v.Args[0]
62084 if !(shiftIsBounded(v)) {
62085 break
62086 }
62087 v.reset(OpAMD64SHRW)
62088 v.AddArg(x)
62089 v.AddArg(y)
62090 return true
62091 }
62092 return false
62093 }
62094 func rewriteValueAMD64_OpRsh16Ux32_0(v *Value) bool {
62095 b := v.Block
62096
62097
62098
62099 for {
62100 t := v.Type
62101 y := v.Args[1]
62102 x := v.Args[0]
62103 if !(!shiftIsBounded(v)) {
62104 break
62105 }
62106 v.reset(OpAMD64ANDL)
62107 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
62108 v0.AddArg(x)
62109 v0.AddArg(y)
62110 v.AddArg(v0)
62111 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
62112 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
62113 v2.AuxInt = 16
62114 v2.AddArg(y)
62115 v1.AddArg(v2)
62116 v.AddArg(v1)
62117 return true
62118 }
62119
62120
62121
62122 for {
62123 y := v.Args[1]
62124 x := v.Args[0]
62125 if !(shiftIsBounded(v)) {
62126 break
62127 }
62128 v.reset(OpAMD64SHRW)
62129 v.AddArg(x)
62130 v.AddArg(y)
62131 return true
62132 }
62133 return false
62134 }
62135 func rewriteValueAMD64_OpRsh16Ux64_0(v *Value) bool {
62136 b := v.Block
62137
62138
62139
62140 for {
62141 t := v.Type
62142 y := v.Args[1]
62143 x := v.Args[0]
62144 if !(!shiftIsBounded(v)) {
62145 break
62146 }
62147 v.reset(OpAMD64ANDL)
62148 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
62149 v0.AddArg(x)
62150 v0.AddArg(y)
62151 v.AddArg(v0)
62152 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
62153 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
62154 v2.AuxInt = 16
62155 v2.AddArg(y)
62156 v1.AddArg(v2)
62157 v.AddArg(v1)
62158 return true
62159 }
62160
62161
62162
62163 for {
62164 y := v.Args[1]
62165 x := v.Args[0]
62166 if !(shiftIsBounded(v)) {
62167 break
62168 }
62169 v.reset(OpAMD64SHRW)
62170 v.AddArg(x)
62171 v.AddArg(y)
62172 return true
62173 }
62174 return false
62175 }
62176 func rewriteValueAMD64_OpRsh16Ux8_0(v *Value) bool {
62177 b := v.Block
62178
62179
62180
62181 for {
62182 t := v.Type
62183 y := v.Args[1]
62184 x := v.Args[0]
62185 if !(!shiftIsBounded(v)) {
62186 break
62187 }
62188 v.reset(OpAMD64ANDL)
62189 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
62190 v0.AddArg(x)
62191 v0.AddArg(y)
62192 v.AddArg(v0)
62193 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
62194 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
62195 v2.AuxInt = 16
62196 v2.AddArg(y)
62197 v1.AddArg(v2)
62198 v.AddArg(v1)
62199 return true
62200 }
62201
62202
62203
62204 for {
62205 y := v.Args[1]
62206 x := v.Args[0]
62207 if !(shiftIsBounded(v)) {
62208 break
62209 }
62210 v.reset(OpAMD64SHRW)
62211 v.AddArg(x)
62212 v.AddArg(y)
62213 return true
62214 }
62215 return false
62216 }
62217 func rewriteValueAMD64_OpRsh16x16_0(v *Value) bool {
62218 b := v.Block
62219
62220
62221
62222 for {
62223 t := v.Type
62224 y := v.Args[1]
62225 x := v.Args[0]
62226 if !(!shiftIsBounded(v)) {
62227 break
62228 }
62229 v.reset(OpAMD64SARW)
62230 v.Type = t
62231 v.AddArg(x)
62232 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
62233 v0.AddArg(y)
62234 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
62235 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
62236 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
62237 v3.AuxInt = 16
62238 v3.AddArg(y)
62239 v2.AddArg(v3)
62240 v1.AddArg(v2)
62241 v0.AddArg(v1)
62242 v.AddArg(v0)
62243 return true
62244 }
62245
62246
62247
62248 for {
62249 y := v.Args[1]
62250 x := v.Args[0]
62251 if !(shiftIsBounded(v)) {
62252 break
62253 }
62254 v.reset(OpAMD64SARW)
62255 v.AddArg(x)
62256 v.AddArg(y)
62257 return true
62258 }
62259 return false
62260 }
62261 func rewriteValueAMD64_OpRsh16x32_0(v *Value) bool {
62262 b := v.Block
62263
62264
62265
62266 for {
62267 t := v.Type
62268 y := v.Args[1]
62269 x := v.Args[0]
62270 if !(!shiftIsBounded(v)) {
62271 break
62272 }
62273 v.reset(OpAMD64SARW)
62274 v.Type = t
62275 v.AddArg(x)
62276 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
62277 v0.AddArg(y)
62278 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
62279 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
62280 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
62281 v3.AuxInt = 16
62282 v3.AddArg(y)
62283 v2.AddArg(v3)
62284 v1.AddArg(v2)
62285 v0.AddArg(v1)
62286 v.AddArg(v0)
62287 return true
62288 }
62289
62290
62291
62292 for {
62293 y := v.Args[1]
62294 x := v.Args[0]
62295 if !(shiftIsBounded(v)) {
62296 break
62297 }
62298 v.reset(OpAMD64SARW)
62299 v.AddArg(x)
62300 v.AddArg(y)
62301 return true
62302 }
62303 return false
62304 }
62305 func rewriteValueAMD64_OpRsh16x64_0(v *Value) bool {
62306 b := v.Block
62307
62308
62309
62310 for {
62311 t := v.Type
62312 y := v.Args[1]
62313 x := v.Args[0]
62314 if !(!shiftIsBounded(v)) {
62315 break
62316 }
62317 v.reset(OpAMD64SARW)
62318 v.Type = t
62319 v.AddArg(x)
62320 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
62321 v0.AddArg(y)
62322 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
62323 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
62324 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
62325 v3.AuxInt = 16
62326 v3.AddArg(y)
62327 v2.AddArg(v3)
62328 v1.AddArg(v2)
62329 v0.AddArg(v1)
62330 v.AddArg(v0)
62331 return true
62332 }
62333
62334
62335
62336 for {
62337 y := v.Args[1]
62338 x := v.Args[0]
62339 if !(shiftIsBounded(v)) {
62340 break
62341 }
62342 v.reset(OpAMD64SARW)
62343 v.AddArg(x)
62344 v.AddArg(y)
62345 return true
62346 }
62347 return false
62348 }
62349 func rewriteValueAMD64_OpRsh16x8_0(v *Value) bool {
62350 b := v.Block
62351
62352
62353
62354 for {
62355 t := v.Type
62356 y := v.Args[1]
62357 x := v.Args[0]
62358 if !(!shiftIsBounded(v)) {
62359 break
62360 }
62361 v.reset(OpAMD64SARW)
62362 v.Type = t
62363 v.AddArg(x)
62364 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
62365 v0.AddArg(y)
62366 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
62367 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
62368 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
62369 v3.AuxInt = 16
62370 v3.AddArg(y)
62371 v2.AddArg(v3)
62372 v1.AddArg(v2)
62373 v0.AddArg(v1)
62374 v.AddArg(v0)
62375 return true
62376 }
62377
62378
62379
62380 for {
62381 y := v.Args[1]
62382 x := v.Args[0]
62383 if !(shiftIsBounded(v)) {
62384 break
62385 }
62386 v.reset(OpAMD64SARW)
62387 v.AddArg(x)
62388 v.AddArg(y)
62389 return true
62390 }
62391 return false
62392 }
62393 func rewriteValueAMD64_OpRsh32Ux16_0(v *Value) bool {
62394 b := v.Block
62395
62396
62397
62398 for {
62399 t := v.Type
62400 y := v.Args[1]
62401 x := v.Args[0]
62402 if !(!shiftIsBounded(v)) {
62403 break
62404 }
62405 v.reset(OpAMD64ANDL)
62406 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
62407 v0.AddArg(x)
62408 v0.AddArg(y)
62409 v.AddArg(v0)
62410 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
62411 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
62412 v2.AuxInt = 32
62413 v2.AddArg(y)
62414 v1.AddArg(v2)
62415 v.AddArg(v1)
62416 return true
62417 }
62418
62419
62420
62421 for {
62422 y := v.Args[1]
62423 x := v.Args[0]
62424 if !(shiftIsBounded(v)) {
62425 break
62426 }
62427 v.reset(OpAMD64SHRL)
62428 v.AddArg(x)
62429 v.AddArg(y)
62430 return true
62431 }
62432 return false
62433 }
62434 func rewriteValueAMD64_OpRsh32Ux32_0(v *Value) bool {
62435 b := v.Block
62436
62437
62438
62439 for {
62440 t := v.Type
62441 y := v.Args[1]
62442 x := v.Args[0]
62443 if !(!shiftIsBounded(v)) {
62444 break
62445 }
62446 v.reset(OpAMD64ANDL)
62447 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
62448 v0.AddArg(x)
62449 v0.AddArg(y)
62450 v.AddArg(v0)
62451 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
62452 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
62453 v2.AuxInt = 32
62454 v2.AddArg(y)
62455 v1.AddArg(v2)
62456 v.AddArg(v1)
62457 return true
62458 }
62459
62460
62461
62462 for {
62463 y := v.Args[1]
62464 x := v.Args[0]
62465 if !(shiftIsBounded(v)) {
62466 break
62467 }
62468 v.reset(OpAMD64SHRL)
62469 v.AddArg(x)
62470 v.AddArg(y)
62471 return true
62472 }
62473 return false
62474 }
62475 func rewriteValueAMD64_OpRsh32Ux64_0(v *Value) bool {
62476 b := v.Block
62477
62478
62479
62480 for {
62481 t := v.Type
62482 y := v.Args[1]
62483 x := v.Args[0]
62484 if !(!shiftIsBounded(v)) {
62485 break
62486 }
62487 v.reset(OpAMD64ANDL)
62488 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
62489 v0.AddArg(x)
62490 v0.AddArg(y)
62491 v.AddArg(v0)
62492 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
62493 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
62494 v2.AuxInt = 32
62495 v2.AddArg(y)
62496 v1.AddArg(v2)
62497 v.AddArg(v1)
62498 return true
62499 }
62500
62501
62502
62503 for {
62504 y := v.Args[1]
62505 x := v.Args[0]
62506 if !(shiftIsBounded(v)) {
62507 break
62508 }
62509 v.reset(OpAMD64SHRL)
62510 v.AddArg(x)
62511 v.AddArg(y)
62512 return true
62513 }
62514 return false
62515 }
62516 func rewriteValueAMD64_OpRsh32Ux8_0(v *Value) bool {
62517 b := v.Block
62518
62519
62520
62521 for {
62522 t := v.Type
62523 y := v.Args[1]
62524 x := v.Args[0]
62525 if !(!shiftIsBounded(v)) {
62526 break
62527 }
62528 v.reset(OpAMD64ANDL)
62529 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
62530 v0.AddArg(x)
62531 v0.AddArg(y)
62532 v.AddArg(v0)
62533 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
62534 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
62535 v2.AuxInt = 32
62536 v2.AddArg(y)
62537 v1.AddArg(v2)
62538 v.AddArg(v1)
62539 return true
62540 }
62541
62542
62543
62544 for {
62545 y := v.Args[1]
62546 x := v.Args[0]
62547 if !(shiftIsBounded(v)) {
62548 break
62549 }
62550 v.reset(OpAMD64SHRL)
62551 v.AddArg(x)
62552 v.AddArg(y)
62553 return true
62554 }
62555 return false
62556 }
62557 func rewriteValueAMD64_OpRsh32x16_0(v *Value) bool {
62558 b := v.Block
62559
62560
62561
62562 for {
62563 t := v.Type
62564 y := v.Args[1]
62565 x := v.Args[0]
62566 if !(!shiftIsBounded(v)) {
62567 break
62568 }
62569 v.reset(OpAMD64SARL)
62570 v.Type = t
62571 v.AddArg(x)
62572 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
62573 v0.AddArg(y)
62574 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
62575 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
62576 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
62577 v3.AuxInt = 32
62578 v3.AddArg(y)
62579 v2.AddArg(v3)
62580 v1.AddArg(v2)
62581 v0.AddArg(v1)
62582 v.AddArg(v0)
62583 return true
62584 }
62585
62586
62587
62588 for {
62589 y := v.Args[1]
62590 x := v.Args[0]
62591 if !(shiftIsBounded(v)) {
62592 break
62593 }
62594 v.reset(OpAMD64SARL)
62595 v.AddArg(x)
62596 v.AddArg(y)
62597 return true
62598 }
62599 return false
62600 }
62601 func rewriteValueAMD64_OpRsh32x32_0(v *Value) bool {
62602 b := v.Block
62603
62604
62605
62606 for {
62607 t := v.Type
62608 y := v.Args[1]
62609 x := v.Args[0]
62610 if !(!shiftIsBounded(v)) {
62611 break
62612 }
62613 v.reset(OpAMD64SARL)
62614 v.Type = t
62615 v.AddArg(x)
62616 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
62617 v0.AddArg(y)
62618 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
62619 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
62620 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
62621 v3.AuxInt = 32
62622 v3.AddArg(y)
62623 v2.AddArg(v3)
62624 v1.AddArg(v2)
62625 v0.AddArg(v1)
62626 v.AddArg(v0)
62627 return true
62628 }
62629
62630
62631
62632 for {
62633 y := v.Args[1]
62634 x := v.Args[0]
62635 if !(shiftIsBounded(v)) {
62636 break
62637 }
62638 v.reset(OpAMD64SARL)
62639 v.AddArg(x)
62640 v.AddArg(y)
62641 return true
62642 }
62643 return false
62644 }
62645 func rewriteValueAMD64_OpRsh32x64_0(v *Value) bool {
62646 b := v.Block
62647
62648
62649
62650 for {
62651 t := v.Type
62652 y := v.Args[1]
62653 x := v.Args[0]
62654 if !(!shiftIsBounded(v)) {
62655 break
62656 }
62657 v.reset(OpAMD64SARL)
62658 v.Type = t
62659 v.AddArg(x)
62660 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
62661 v0.AddArg(y)
62662 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
62663 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
62664 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
62665 v3.AuxInt = 32
62666 v3.AddArg(y)
62667 v2.AddArg(v3)
62668 v1.AddArg(v2)
62669 v0.AddArg(v1)
62670 v.AddArg(v0)
62671 return true
62672 }
62673
62674
62675
62676 for {
62677 y := v.Args[1]
62678 x := v.Args[0]
62679 if !(shiftIsBounded(v)) {
62680 break
62681 }
62682 v.reset(OpAMD64SARL)
62683 v.AddArg(x)
62684 v.AddArg(y)
62685 return true
62686 }
62687 return false
62688 }
62689 func rewriteValueAMD64_OpRsh32x8_0(v *Value) bool {
62690 b := v.Block
62691
62692
62693
62694 for {
62695 t := v.Type
62696 y := v.Args[1]
62697 x := v.Args[0]
62698 if !(!shiftIsBounded(v)) {
62699 break
62700 }
62701 v.reset(OpAMD64SARL)
62702 v.Type = t
62703 v.AddArg(x)
62704 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
62705 v0.AddArg(y)
62706 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
62707 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
62708 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
62709 v3.AuxInt = 32
62710 v3.AddArg(y)
62711 v2.AddArg(v3)
62712 v1.AddArg(v2)
62713 v0.AddArg(v1)
62714 v.AddArg(v0)
62715 return true
62716 }
62717
62718
62719
62720 for {
62721 y := v.Args[1]
62722 x := v.Args[0]
62723 if !(shiftIsBounded(v)) {
62724 break
62725 }
62726 v.reset(OpAMD64SARL)
62727 v.AddArg(x)
62728 v.AddArg(y)
62729 return true
62730 }
62731 return false
62732 }
62733 func rewriteValueAMD64_OpRsh64Ux16_0(v *Value) bool {
62734 b := v.Block
62735
62736
62737
62738 for {
62739 t := v.Type
62740 y := v.Args[1]
62741 x := v.Args[0]
62742 if !(!shiftIsBounded(v)) {
62743 break
62744 }
62745 v.reset(OpAMD64ANDQ)
62746 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
62747 v0.AddArg(x)
62748 v0.AddArg(y)
62749 v.AddArg(v0)
62750 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
62751 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
62752 v2.AuxInt = 64
62753 v2.AddArg(y)
62754 v1.AddArg(v2)
62755 v.AddArg(v1)
62756 return true
62757 }
62758
62759
62760
62761 for {
62762 y := v.Args[1]
62763 x := v.Args[0]
62764 if !(shiftIsBounded(v)) {
62765 break
62766 }
62767 v.reset(OpAMD64SHRQ)
62768 v.AddArg(x)
62769 v.AddArg(y)
62770 return true
62771 }
62772 return false
62773 }
62774 func rewriteValueAMD64_OpRsh64Ux32_0(v *Value) bool {
62775 b := v.Block
62776
62777
62778
62779 for {
62780 t := v.Type
62781 y := v.Args[1]
62782 x := v.Args[0]
62783 if !(!shiftIsBounded(v)) {
62784 break
62785 }
62786 v.reset(OpAMD64ANDQ)
62787 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
62788 v0.AddArg(x)
62789 v0.AddArg(y)
62790 v.AddArg(v0)
62791 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
62792 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
62793 v2.AuxInt = 64
62794 v2.AddArg(y)
62795 v1.AddArg(v2)
62796 v.AddArg(v1)
62797 return true
62798 }
62799
62800
62801
62802 for {
62803 y := v.Args[1]
62804 x := v.Args[0]
62805 if !(shiftIsBounded(v)) {
62806 break
62807 }
62808 v.reset(OpAMD64SHRQ)
62809 v.AddArg(x)
62810 v.AddArg(y)
62811 return true
62812 }
62813 return false
62814 }
62815 func rewriteValueAMD64_OpRsh64Ux64_0(v *Value) bool {
62816 b := v.Block
62817
62818
62819
62820 for {
62821 t := v.Type
62822 y := v.Args[1]
62823 x := v.Args[0]
62824 if !(!shiftIsBounded(v)) {
62825 break
62826 }
62827 v.reset(OpAMD64ANDQ)
62828 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
62829 v0.AddArg(x)
62830 v0.AddArg(y)
62831 v.AddArg(v0)
62832 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
62833 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
62834 v2.AuxInt = 64
62835 v2.AddArg(y)
62836 v1.AddArg(v2)
62837 v.AddArg(v1)
62838 return true
62839 }
62840
62841
62842
62843 for {
62844 y := v.Args[1]
62845 x := v.Args[0]
62846 if !(shiftIsBounded(v)) {
62847 break
62848 }
62849 v.reset(OpAMD64SHRQ)
62850 v.AddArg(x)
62851 v.AddArg(y)
62852 return true
62853 }
62854 return false
62855 }
62856 func rewriteValueAMD64_OpRsh64Ux8_0(v *Value) bool {
62857 b := v.Block
62858
62859
62860
62861 for {
62862 t := v.Type
62863 y := v.Args[1]
62864 x := v.Args[0]
62865 if !(!shiftIsBounded(v)) {
62866 break
62867 }
62868 v.reset(OpAMD64ANDQ)
62869 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
62870 v0.AddArg(x)
62871 v0.AddArg(y)
62872 v.AddArg(v0)
62873 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
62874 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
62875 v2.AuxInt = 64
62876 v2.AddArg(y)
62877 v1.AddArg(v2)
62878 v.AddArg(v1)
62879 return true
62880 }
62881
62882
62883
62884 for {
62885 y := v.Args[1]
62886 x := v.Args[0]
62887 if !(shiftIsBounded(v)) {
62888 break
62889 }
62890 v.reset(OpAMD64SHRQ)
62891 v.AddArg(x)
62892 v.AddArg(y)
62893 return true
62894 }
62895 return false
62896 }
62897 func rewriteValueAMD64_OpRsh64x16_0(v *Value) bool {
62898 b := v.Block
62899
62900
62901
62902 for {
62903 t := v.Type
62904 y := v.Args[1]
62905 x := v.Args[0]
62906 if !(!shiftIsBounded(v)) {
62907 break
62908 }
62909 v.reset(OpAMD64SARQ)
62910 v.Type = t
62911 v.AddArg(x)
62912 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
62913 v0.AddArg(y)
62914 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
62915 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
62916 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
62917 v3.AuxInt = 64
62918 v3.AddArg(y)
62919 v2.AddArg(v3)
62920 v1.AddArg(v2)
62921 v0.AddArg(v1)
62922 v.AddArg(v0)
62923 return true
62924 }
62925
62926
62927
62928 for {
62929 y := v.Args[1]
62930 x := v.Args[0]
62931 if !(shiftIsBounded(v)) {
62932 break
62933 }
62934 v.reset(OpAMD64SARQ)
62935 v.AddArg(x)
62936 v.AddArg(y)
62937 return true
62938 }
62939 return false
62940 }
62941 func rewriteValueAMD64_OpRsh64x32_0(v *Value) bool {
62942 b := v.Block
62943
62944
62945
62946 for {
62947 t := v.Type
62948 y := v.Args[1]
62949 x := v.Args[0]
62950 if !(!shiftIsBounded(v)) {
62951 break
62952 }
62953 v.reset(OpAMD64SARQ)
62954 v.Type = t
62955 v.AddArg(x)
62956 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
62957 v0.AddArg(y)
62958 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
62959 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
62960 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
62961 v3.AuxInt = 64
62962 v3.AddArg(y)
62963 v2.AddArg(v3)
62964 v1.AddArg(v2)
62965 v0.AddArg(v1)
62966 v.AddArg(v0)
62967 return true
62968 }
62969
62970
62971
62972 for {
62973 y := v.Args[1]
62974 x := v.Args[0]
62975 if !(shiftIsBounded(v)) {
62976 break
62977 }
62978 v.reset(OpAMD64SARQ)
62979 v.AddArg(x)
62980 v.AddArg(y)
62981 return true
62982 }
62983 return false
62984 }
62985 func rewriteValueAMD64_OpRsh64x64_0(v *Value) bool {
62986 b := v.Block
62987
62988
62989
62990 for {
62991 t := v.Type
62992 y := v.Args[1]
62993 x := v.Args[0]
62994 if !(!shiftIsBounded(v)) {
62995 break
62996 }
62997 v.reset(OpAMD64SARQ)
62998 v.Type = t
62999 v.AddArg(x)
63000 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
63001 v0.AddArg(y)
63002 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
63003 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
63004 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
63005 v3.AuxInt = 64
63006 v3.AddArg(y)
63007 v2.AddArg(v3)
63008 v1.AddArg(v2)
63009 v0.AddArg(v1)
63010 v.AddArg(v0)
63011 return true
63012 }
63013
63014
63015
63016 for {
63017 y := v.Args[1]
63018 x := v.Args[0]
63019 if !(shiftIsBounded(v)) {
63020 break
63021 }
63022 v.reset(OpAMD64SARQ)
63023 v.AddArg(x)
63024 v.AddArg(y)
63025 return true
63026 }
63027 return false
63028 }
63029 func rewriteValueAMD64_OpRsh64x8_0(v *Value) bool {
63030 b := v.Block
63031
63032
63033
63034 for {
63035 t := v.Type
63036 y := v.Args[1]
63037 x := v.Args[0]
63038 if !(!shiftIsBounded(v)) {
63039 break
63040 }
63041 v.reset(OpAMD64SARQ)
63042 v.Type = t
63043 v.AddArg(x)
63044 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
63045 v0.AddArg(y)
63046 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
63047 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
63048 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
63049 v3.AuxInt = 64
63050 v3.AddArg(y)
63051 v2.AddArg(v3)
63052 v1.AddArg(v2)
63053 v0.AddArg(v1)
63054 v.AddArg(v0)
63055 return true
63056 }
63057
63058
63059
63060 for {
63061 y := v.Args[1]
63062 x := v.Args[0]
63063 if !(shiftIsBounded(v)) {
63064 break
63065 }
63066 v.reset(OpAMD64SARQ)
63067 v.AddArg(x)
63068 v.AddArg(y)
63069 return true
63070 }
63071 return false
63072 }
63073 func rewriteValueAMD64_OpRsh8Ux16_0(v *Value) bool {
63074 b := v.Block
63075
63076
63077
63078 for {
63079 t := v.Type
63080 y := v.Args[1]
63081 x := v.Args[0]
63082 if !(!shiftIsBounded(v)) {
63083 break
63084 }
63085 v.reset(OpAMD64ANDL)
63086 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
63087 v0.AddArg(x)
63088 v0.AddArg(y)
63089 v.AddArg(v0)
63090 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
63091 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
63092 v2.AuxInt = 8
63093 v2.AddArg(y)
63094 v1.AddArg(v2)
63095 v.AddArg(v1)
63096 return true
63097 }
63098
63099
63100
63101 for {
63102 y := v.Args[1]
63103 x := v.Args[0]
63104 if !(shiftIsBounded(v)) {
63105 break
63106 }
63107 v.reset(OpAMD64SHRB)
63108 v.AddArg(x)
63109 v.AddArg(y)
63110 return true
63111 }
63112 return false
63113 }
63114 func rewriteValueAMD64_OpRsh8Ux32_0(v *Value) bool {
63115 b := v.Block
63116
63117
63118
63119 for {
63120 t := v.Type
63121 y := v.Args[1]
63122 x := v.Args[0]
63123 if !(!shiftIsBounded(v)) {
63124 break
63125 }
63126 v.reset(OpAMD64ANDL)
63127 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
63128 v0.AddArg(x)
63129 v0.AddArg(y)
63130 v.AddArg(v0)
63131 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
63132 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
63133 v2.AuxInt = 8
63134 v2.AddArg(y)
63135 v1.AddArg(v2)
63136 v.AddArg(v1)
63137 return true
63138 }
63139
63140
63141
63142 for {
63143 y := v.Args[1]
63144 x := v.Args[0]
63145 if !(shiftIsBounded(v)) {
63146 break
63147 }
63148 v.reset(OpAMD64SHRB)
63149 v.AddArg(x)
63150 v.AddArg(y)
63151 return true
63152 }
63153 return false
63154 }
63155 func rewriteValueAMD64_OpRsh8Ux64_0(v *Value) bool {
63156 b := v.Block
63157
63158
63159
63160 for {
63161 t := v.Type
63162 y := v.Args[1]
63163 x := v.Args[0]
63164 if !(!shiftIsBounded(v)) {
63165 break
63166 }
63167 v.reset(OpAMD64ANDL)
63168 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
63169 v0.AddArg(x)
63170 v0.AddArg(y)
63171 v.AddArg(v0)
63172 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
63173 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
63174 v2.AuxInt = 8
63175 v2.AddArg(y)
63176 v1.AddArg(v2)
63177 v.AddArg(v1)
63178 return true
63179 }
63180
63181
63182
63183 for {
63184 y := v.Args[1]
63185 x := v.Args[0]
63186 if !(shiftIsBounded(v)) {
63187 break
63188 }
63189 v.reset(OpAMD64SHRB)
63190 v.AddArg(x)
63191 v.AddArg(y)
63192 return true
63193 }
63194 return false
63195 }
63196 func rewriteValueAMD64_OpRsh8Ux8_0(v *Value) bool {
63197 b := v.Block
63198
63199
63200
63201 for {
63202 t := v.Type
63203 y := v.Args[1]
63204 x := v.Args[0]
63205 if !(!shiftIsBounded(v)) {
63206 break
63207 }
63208 v.reset(OpAMD64ANDL)
63209 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
63210 v0.AddArg(x)
63211 v0.AddArg(y)
63212 v.AddArg(v0)
63213 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
63214 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
63215 v2.AuxInt = 8
63216 v2.AddArg(y)
63217 v1.AddArg(v2)
63218 v.AddArg(v1)
63219 return true
63220 }
63221
63222
63223
63224 for {
63225 y := v.Args[1]
63226 x := v.Args[0]
63227 if !(shiftIsBounded(v)) {
63228 break
63229 }
63230 v.reset(OpAMD64SHRB)
63231 v.AddArg(x)
63232 v.AddArg(y)
63233 return true
63234 }
63235 return false
63236 }
63237 func rewriteValueAMD64_OpRsh8x16_0(v *Value) bool {
63238 b := v.Block
63239
63240
63241
63242 for {
63243 t := v.Type
63244 y := v.Args[1]
63245 x := v.Args[0]
63246 if !(!shiftIsBounded(v)) {
63247 break
63248 }
63249 v.reset(OpAMD64SARB)
63250 v.Type = t
63251 v.AddArg(x)
63252 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
63253 v0.AddArg(y)
63254 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
63255 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
63256 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
63257 v3.AuxInt = 8
63258 v3.AddArg(y)
63259 v2.AddArg(v3)
63260 v1.AddArg(v2)
63261 v0.AddArg(v1)
63262 v.AddArg(v0)
63263 return true
63264 }
63265
63266
63267
63268 for {
63269 y := v.Args[1]
63270 x := v.Args[0]
63271 if !(shiftIsBounded(v)) {
63272 break
63273 }
63274 v.reset(OpAMD64SARB)
63275 v.AddArg(x)
63276 v.AddArg(y)
63277 return true
63278 }
63279 return false
63280 }
63281 func rewriteValueAMD64_OpRsh8x32_0(v *Value) bool {
63282 b := v.Block
63283
63284
63285
63286 for {
63287 t := v.Type
63288 y := v.Args[1]
63289 x := v.Args[0]
63290 if !(!shiftIsBounded(v)) {
63291 break
63292 }
63293 v.reset(OpAMD64SARB)
63294 v.Type = t
63295 v.AddArg(x)
63296 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
63297 v0.AddArg(y)
63298 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
63299 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
63300 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
63301 v3.AuxInt = 8
63302 v3.AddArg(y)
63303 v2.AddArg(v3)
63304 v1.AddArg(v2)
63305 v0.AddArg(v1)
63306 v.AddArg(v0)
63307 return true
63308 }
63309
63310
63311
63312 for {
63313 y := v.Args[1]
63314 x := v.Args[0]
63315 if !(shiftIsBounded(v)) {
63316 break
63317 }
63318 v.reset(OpAMD64SARB)
63319 v.AddArg(x)
63320 v.AddArg(y)
63321 return true
63322 }
63323 return false
63324 }
63325 func rewriteValueAMD64_OpRsh8x64_0(v *Value) bool {
63326 b := v.Block
63327
63328
63329
63330 for {
63331 t := v.Type
63332 y := v.Args[1]
63333 x := v.Args[0]
63334 if !(!shiftIsBounded(v)) {
63335 break
63336 }
63337 v.reset(OpAMD64SARB)
63338 v.Type = t
63339 v.AddArg(x)
63340 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
63341 v0.AddArg(y)
63342 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
63343 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
63344 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
63345 v3.AuxInt = 8
63346 v3.AddArg(y)
63347 v2.AddArg(v3)
63348 v1.AddArg(v2)
63349 v0.AddArg(v1)
63350 v.AddArg(v0)
63351 return true
63352 }
63353
63354
63355
63356 for {
63357 y := v.Args[1]
63358 x := v.Args[0]
63359 if !(shiftIsBounded(v)) {
63360 break
63361 }
63362 v.reset(OpAMD64SARB)
63363 v.AddArg(x)
63364 v.AddArg(y)
63365 return true
63366 }
63367 return false
63368 }
63369 func rewriteValueAMD64_OpRsh8x8_0(v *Value) bool {
63370 b := v.Block
63371
63372
63373
63374 for {
63375 t := v.Type
63376 y := v.Args[1]
63377 x := v.Args[0]
63378 if !(!shiftIsBounded(v)) {
63379 break
63380 }
63381 v.reset(OpAMD64SARB)
63382 v.Type = t
63383 v.AddArg(x)
63384 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
63385 v0.AddArg(y)
63386 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
63387 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
63388 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
63389 v3.AuxInt = 8
63390 v3.AddArg(y)
63391 v2.AddArg(v3)
63392 v1.AddArg(v2)
63393 v0.AddArg(v1)
63394 v.AddArg(v0)
63395 return true
63396 }
63397
63398
63399
63400 for {
63401 y := v.Args[1]
63402 x := v.Args[0]
63403 if !(shiftIsBounded(v)) {
63404 break
63405 }
63406 v.reset(OpAMD64SARB)
63407 v.AddArg(x)
63408 v.AddArg(y)
63409 return true
63410 }
63411 return false
63412 }
63413 func rewriteValueAMD64_OpSelect0_0(v *Value) bool {
63414 b := v.Block
63415 typ := &b.Func.Config.Types
63416
63417
63418
63419 for {
63420 v_0 := v.Args[0]
63421 if v_0.Op != OpMul64uover {
63422 break
63423 }
63424 y := v_0.Args[1]
63425 x := v_0.Args[0]
63426 v.reset(OpSelect0)
63427 v.Type = typ.UInt64
63428 v0 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags))
63429 v0.AddArg(x)
63430 v0.AddArg(y)
63431 v.AddArg(v0)
63432 return true
63433 }
63434
63435
63436
63437 for {
63438 v_0 := v.Args[0]
63439 if v_0.Op != OpMul32uover {
63440 break
63441 }
63442 y := v_0.Args[1]
63443 x := v_0.Args[0]
63444 v.reset(OpSelect0)
63445 v.Type = typ.UInt32
63446 v0 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
63447 v0.AddArg(x)
63448 v0.AddArg(y)
63449 v.AddArg(v0)
63450 return true
63451 }
63452
63453
63454
63455 for {
63456 v_0 := v.Args[0]
63457 if v_0.Op != OpAdd64carry {
63458 break
63459 }
63460 c := v_0.Args[2]
63461 x := v_0.Args[0]
63462 y := v_0.Args[1]
63463 v.reset(OpSelect0)
63464 v.Type = typ.UInt64
63465 v0 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags))
63466 v0.AddArg(x)
63467 v0.AddArg(y)
63468 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
63469 v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
63470 v2.AddArg(c)
63471 v1.AddArg(v2)
63472 v0.AddArg(v1)
63473 v.AddArg(v0)
63474 return true
63475 }
63476
63477
63478
63479 for {
63480 v_0 := v.Args[0]
63481 if v_0.Op != OpSub64borrow {
63482 break
63483 }
63484 c := v_0.Args[2]
63485 x := v_0.Args[0]
63486 y := v_0.Args[1]
63487 v.reset(OpSelect0)
63488 v.Type = typ.UInt64
63489 v0 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags))
63490 v0.AddArg(x)
63491 v0.AddArg(y)
63492 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
63493 v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
63494 v2.AddArg(c)
63495 v1.AddArg(v2)
63496 v0.AddArg(v1)
63497 v.AddArg(v0)
63498 return true
63499 }
63500
63501
63502
63503 for {
63504 t := v.Type
63505 v_0 := v.Args[0]
63506 if v_0.Op != OpAMD64AddTupleFirst32 {
63507 break
63508 }
63509 tuple := v_0.Args[1]
63510 val := v_0.Args[0]
63511 v.reset(OpAMD64ADDL)
63512 v.AddArg(val)
63513 v0 := b.NewValue0(v.Pos, OpSelect0, t)
63514 v0.AddArg(tuple)
63515 v.AddArg(v0)
63516 return true
63517 }
63518
63519
63520
63521 for {
63522 t := v.Type
63523 v_0 := v.Args[0]
63524 if v_0.Op != OpAMD64AddTupleFirst64 {
63525 break
63526 }
63527 tuple := v_0.Args[1]
63528 val := v_0.Args[0]
63529 v.reset(OpAMD64ADDQ)
63530 v.AddArg(val)
63531 v0 := b.NewValue0(v.Pos, OpSelect0, t)
63532 v0.AddArg(tuple)
63533 v.AddArg(v0)
63534 return true
63535 }
63536 return false
63537 }
63538 func rewriteValueAMD64_OpSelect1_0(v *Value) bool {
63539 b := v.Block
63540 typ := &b.Func.Config.Types
63541
63542
63543
63544 for {
63545 v_0 := v.Args[0]
63546 if v_0.Op != OpMul64uover {
63547 break
63548 }
63549 y := v_0.Args[1]
63550 x := v_0.Args[0]
63551 v.reset(OpAMD64SETO)
63552 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
63553 v1 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags))
63554 v1.AddArg(x)
63555 v1.AddArg(y)
63556 v0.AddArg(v1)
63557 v.AddArg(v0)
63558 return true
63559 }
63560
63561
63562
63563 for {
63564 v_0 := v.Args[0]
63565 if v_0.Op != OpMul32uover {
63566 break
63567 }
63568 y := v_0.Args[1]
63569 x := v_0.Args[0]
63570 v.reset(OpAMD64SETO)
63571 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
63572 v1 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
63573 v1.AddArg(x)
63574 v1.AddArg(y)
63575 v0.AddArg(v1)
63576 v.AddArg(v0)
63577 return true
63578 }
63579
63580
63581
63582 for {
63583 v_0 := v.Args[0]
63584 if v_0.Op != OpAdd64carry {
63585 break
63586 }
63587 c := v_0.Args[2]
63588 x := v_0.Args[0]
63589 y := v_0.Args[1]
63590 v.reset(OpAMD64NEGQ)
63591 v.Type = typ.UInt64
63592 v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64)
63593 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
63594 v2 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags))
63595 v2.AddArg(x)
63596 v2.AddArg(y)
63597 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
63598 v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
63599 v4.AddArg(c)
63600 v3.AddArg(v4)
63601 v2.AddArg(v3)
63602 v1.AddArg(v2)
63603 v0.AddArg(v1)
63604 v.AddArg(v0)
63605 return true
63606 }
63607
63608
63609
63610 for {
63611 v_0 := v.Args[0]
63612 if v_0.Op != OpSub64borrow {
63613 break
63614 }
63615 c := v_0.Args[2]
63616 x := v_0.Args[0]
63617 y := v_0.Args[1]
63618 v.reset(OpAMD64NEGQ)
63619 v.Type = typ.UInt64
63620 v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64)
63621 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
63622 v2 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags))
63623 v2.AddArg(x)
63624 v2.AddArg(y)
63625 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
63626 v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
63627 v4.AddArg(c)
63628 v3.AddArg(v4)
63629 v2.AddArg(v3)
63630 v1.AddArg(v2)
63631 v0.AddArg(v1)
63632 v.AddArg(v0)
63633 return true
63634 }
63635
63636
63637
63638 for {
63639 v_0 := v.Args[0]
63640 if v_0.Op != OpAMD64NEGLflags {
63641 break
63642 }
63643 v_0_0 := v_0.Args[0]
63644 if v_0_0.Op != OpAMD64MOVQconst {
63645 break
63646 }
63647 if v_0_0.AuxInt != 0 {
63648 break
63649 }
63650 v.reset(OpAMD64FlagEQ)
63651 return true
63652 }
63653
63654
63655
63656 for {
63657 v_0 := v.Args[0]
63658 if v_0.Op != OpAMD64NEGLflags {
63659 break
63660 }
63661 v_0_0 := v_0.Args[0]
63662 if v_0_0.Op != OpAMD64NEGQ {
63663 break
63664 }
63665 v_0_0_0 := v_0_0.Args[0]
63666 if v_0_0_0.Op != OpAMD64SBBQcarrymask {
63667 break
63668 }
63669 x := v_0_0_0.Args[0]
63670 v.reset(OpCopy)
63671 v.Type = x.Type
63672 v.AddArg(x)
63673 return true
63674 }
63675
63676
63677
63678 for {
63679 v_0 := v.Args[0]
63680 if v_0.Op != OpAMD64AddTupleFirst32 {
63681 break
63682 }
63683 tuple := v_0.Args[1]
63684 v.reset(OpSelect1)
63685 v.AddArg(tuple)
63686 return true
63687 }
63688
63689
63690
63691 for {
63692 v_0 := v.Args[0]
63693 if v_0.Op != OpAMD64AddTupleFirst64 {
63694 break
63695 }
63696 tuple := v_0.Args[1]
63697 v.reset(OpSelect1)
63698 v.AddArg(tuple)
63699 return true
63700 }
63701 return false
63702 }
63703 func rewriteValueAMD64_OpSignExt16to32_0(v *Value) bool {
63704
63705
63706
63707 for {
63708 x := v.Args[0]
63709 v.reset(OpAMD64MOVWQSX)
63710 v.AddArg(x)
63711 return true
63712 }
63713 }
63714 func rewriteValueAMD64_OpSignExt16to64_0(v *Value) bool {
63715
63716
63717
63718 for {
63719 x := v.Args[0]
63720 v.reset(OpAMD64MOVWQSX)
63721 v.AddArg(x)
63722 return true
63723 }
63724 }
63725 func rewriteValueAMD64_OpSignExt32to64_0(v *Value) bool {
63726
63727
63728
63729 for {
63730 x := v.Args[0]
63731 v.reset(OpAMD64MOVLQSX)
63732 v.AddArg(x)
63733 return true
63734 }
63735 }
63736 func rewriteValueAMD64_OpSignExt8to16_0(v *Value) bool {
63737
63738
63739
63740 for {
63741 x := v.Args[0]
63742 v.reset(OpAMD64MOVBQSX)
63743 v.AddArg(x)
63744 return true
63745 }
63746 }
63747 func rewriteValueAMD64_OpSignExt8to32_0(v *Value) bool {
63748
63749
63750
63751 for {
63752 x := v.Args[0]
63753 v.reset(OpAMD64MOVBQSX)
63754 v.AddArg(x)
63755 return true
63756 }
63757 }
63758 func rewriteValueAMD64_OpSignExt8to64_0(v *Value) bool {
63759
63760
63761
63762 for {
63763 x := v.Args[0]
63764 v.reset(OpAMD64MOVBQSX)
63765 v.AddArg(x)
63766 return true
63767 }
63768 }
63769 func rewriteValueAMD64_OpSlicemask_0(v *Value) bool {
63770 b := v.Block
63771
63772
63773
63774 for {
63775 t := v.Type
63776 x := v.Args[0]
63777 v.reset(OpAMD64SARQconst)
63778 v.AuxInt = 63
63779 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
63780 v0.AddArg(x)
63781 v.AddArg(v0)
63782 return true
63783 }
63784 }
63785 func rewriteValueAMD64_OpSqrt_0(v *Value) bool {
63786
63787
63788
63789 for {
63790 x := v.Args[0]
63791 v.reset(OpAMD64SQRTSD)
63792 v.AddArg(x)
63793 return true
63794 }
63795 }
63796 func rewriteValueAMD64_OpStaticCall_0(v *Value) bool {
63797
63798
63799
63800 for {
63801 argwid := v.AuxInt
63802 target := v.Aux
63803 mem := v.Args[0]
63804 v.reset(OpAMD64CALLstatic)
63805 v.AuxInt = argwid
63806 v.Aux = target
63807 v.AddArg(mem)
63808 return true
63809 }
63810 }
63811 func rewriteValueAMD64_OpStore_0(v *Value) bool {
63812
63813
63814
63815 for {
63816 t := v.Aux
63817 mem := v.Args[2]
63818 ptr := v.Args[0]
63819 val := v.Args[1]
63820 if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) {
63821 break
63822 }
63823 v.reset(OpAMD64MOVSDstore)
63824 v.AddArg(ptr)
63825 v.AddArg(val)
63826 v.AddArg(mem)
63827 return true
63828 }
63829
63830
63831
63832 for {
63833 t := v.Aux
63834 mem := v.Args[2]
63835 ptr := v.Args[0]
63836 val := v.Args[1]
63837 if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) {
63838 break
63839 }
63840 v.reset(OpAMD64MOVSSstore)
63841 v.AddArg(ptr)
63842 v.AddArg(val)
63843 v.AddArg(mem)
63844 return true
63845 }
63846
63847
63848
63849 for {
63850 t := v.Aux
63851 mem := v.Args[2]
63852 ptr := v.Args[0]
63853 val := v.Args[1]
63854 if !(t.(*types.Type).Size() == 8) {
63855 break
63856 }
63857 v.reset(OpAMD64MOVQstore)
63858 v.AddArg(ptr)
63859 v.AddArg(val)
63860 v.AddArg(mem)
63861 return true
63862 }
63863
63864
63865
63866 for {
63867 t := v.Aux
63868 mem := v.Args[2]
63869 ptr := v.Args[0]
63870 val := v.Args[1]
63871 if !(t.(*types.Type).Size() == 4) {
63872 break
63873 }
63874 v.reset(OpAMD64MOVLstore)
63875 v.AddArg(ptr)
63876 v.AddArg(val)
63877 v.AddArg(mem)
63878 return true
63879 }
63880
63881
63882
63883 for {
63884 t := v.Aux
63885 mem := v.Args[2]
63886 ptr := v.Args[0]
63887 val := v.Args[1]
63888 if !(t.(*types.Type).Size() == 2) {
63889 break
63890 }
63891 v.reset(OpAMD64MOVWstore)
63892 v.AddArg(ptr)
63893 v.AddArg(val)
63894 v.AddArg(mem)
63895 return true
63896 }
63897
63898
63899
63900 for {
63901 t := v.Aux
63902 mem := v.Args[2]
63903 ptr := v.Args[0]
63904 val := v.Args[1]
63905 if !(t.(*types.Type).Size() == 1) {
63906 break
63907 }
63908 v.reset(OpAMD64MOVBstore)
63909 v.AddArg(ptr)
63910 v.AddArg(val)
63911 v.AddArg(mem)
63912 return true
63913 }
63914 return false
63915 }
63916 func rewriteValueAMD64_OpSub16_0(v *Value) bool {
63917
63918
63919
63920 for {
63921 y := v.Args[1]
63922 x := v.Args[0]
63923 v.reset(OpAMD64SUBL)
63924 v.AddArg(x)
63925 v.AddArg(y)
63926 return true
63927 }
63928 }
63929 func rewriteValueAMD64_OpSub32_0(v *Value) bool {
63930
63931
63932
63933 for {
63934 y := v.Args[1]
63935 x := v.Args[0]
63936 v.reset(OpAMD64SUBL)
63937 v.AddArg(x)
63938 v.AddArg(y)
63939 return true
63940 }
63941 }
63942 func rewriteValueAMD64_OpSub32F_0(v *Value) bool {
63943
63944
63945
63946 for {
63947 y := v.Args[1]
63948 x := v.Args[0]
63949 v.reset(OpAMD64SUBSS)
63950 v.AddArg(x)
63951 v.AddArg(y)
63952 return true
63953 }
63954 }
63955 func rewriteValueAMD64_OpSub64_0(v *Value) bool {
63956
63957
63958
63959 for {
63960 y := v.Args[1]
63961 x := v.Args[0]
63962 v.reset(OpAMD64SUBQ)
63963 v.AddArg(x)
63964 v.AddArg(y)
63965 return true
63966 }
63967 }
63968 func rewriteValueAMD64_OpSub64F_0(v *Value) bool {
63969
63970
63971
63972 for {
63973 y := v.Args[1]
63974 x := v.Args[0]
63975 v.reset(OpAMD64SUBSD)
63976 v.AddArg(x)
63977 v.AddArg(y)
63978 return true
63979 }
63980 }
63981 func rewriteValueAMD64_OpSub8_0(v *Value) bool {
63982
63983
63984
63985 for {
63986 y := v.Args[1]
63987 x := v.Args[0]
63988 v.reset(OpAMD64SUBL)
63989 v.AddArg(x)
63990 v.AddArg(y)
63991 return true
63992 }
63993 }
63994 func rewriteValueAMD64_OpSubPtr_0(v *Value) bool {
63995 b := v.Block
63996 config := b.Func.Config
63997
63998
63999
64000 for {
64001 y := v.Args[1]
64002 x := v.Args[0]
64003 if !(config.PtrSize == 8) {
64004 break
64005 }
64006 v.reset(OpAMD64SUBQ)
64007 v.AddArg(x)
64008 v.AddArg(y)
64009 return true
64010 }
64011
64012
64013
64014 for {
64015 y := v.Args[1]
64016 x := v.Args[0]
64017 if !(config.PtrSize == 4) {
64018 break
64019 }
64020 v.reset(OpAMD64SUBL)
64021 v.AddArg(x)
64022 v.AddArg(y)
64023 return true
64024 }
64025 return false
64026 }
64027 func rewriteValueAMD64_OpTrunc_0(v *Value) bool {
64028
64029
64030
64031 for {
64032 x := v.Args[0]
64033 v.reset(OpAMD64ROUNDSD)
64034 v.AuxInt = 3
64035 v.AddArg(x)
64036 return true
64037 }
64038 }
64039 func rewriteValueAMD64_OpTrunc16to8_0(v *Value) bool {
64040
64041
64042
64043 for {
64044 x := v.Args[0]
64045 v.reset(OpCopy)
64046 v.Type = x.Type
64047 v.AddArg(x)
64048 return true
64049 }
64050 }
64051 func rewriteValueAMD64_OpTrunc32to16_0(v *Value) bool {
64052
64053
64054
64055 for {
64056 x := v.Args[0]
64057 v.reset(OpCopy)
64058 v.Type = x.Type
64059 v.AddArg(x)
64060 return true
64061 }
64062 }
64063 func rewriteValueAMD64_OpTrunc32to8_0(v *Value) bool {
64064
64065
64066
64067 for {
64068 x := v.Args[0]
64069 v.reset(OpCopy)
64070 v.Type = x.Type
64071 v.AddArg(x)
64072 return true
64073 }
64074 }
64075 func rewriteValueAMD64_OpTrunc64to16_0(v *Value) bool {
64076
64077
64078
64079 for {
64080 x := v.Args[0]
64081 v.reset(OpCopy)
64082 v.Type = x.Type
64083 v.AddArg(x)
64084 return true
64085 }
64086 }
64087 func rewriteValueAMD64_OpTrunc64to32_0(v *Value) bool {
64088
64089
64090
64091 for {
64092 x := v.Args[0]
64093 v.reset(OpCopy)
64094 v.Type = x.Type
64095 v.AddArg(x)
64096 return true
64097 }
64098 }
64099 func rewriteValueAMD64_OpTrunc64to8_0(v *Value) bool {
64100
64101
64102
64103 for {
64104 x := v.Args[0]
64105 v.reset(OpCopy)
64106 v.Type = x.Type
64107 v.AddArg(x)
64108 return true
64109 }
64110 }
64111 func rewriteValueAMD64_OpWB_0(v *Value) bool {
64112
64113
64114
64115 for {
64116 fn := v.Aux
64117 mem := v.Args[2]
64118 destptr := v.Args[0]
64119 srcptr := v.Args[1]
64120 v.reset(OpAMD64LoweredWB)
64121 v.Aux = fn
64122 v.AddArg(destptr)
64123 v.AddArg(srcptr)
64124 v.AddArg(mem)
64125 return true
64126 }
64127 }
64128 func rewriteValueAMD64_OpXor16_0(v *Value) bool {
64129
64130
64131
64132 for {
64133 y := v.Args[1]
64134 x := v.Args[0]
64135 v.reset(OpAMD64XORL)
64136 v.AddArg(x)
64137 v.AddArg(y)
64138 return true
64139 }
64140 }
64141 func rewriteValueAMD64_OpXor32_0(v *Value) bool {
64142
64143
64144
64145 for {
64146 y := v.Args[1]
64147 x := v.Args[0]
64148 v.reset(OpAMD64XORL)
64149 v.AddArg(x)
64150 v.AddArg(y)
64151 return true
64152 }
64153 }
64154 func rewriteValueAMD64_OpXor64_0(v *Value) bool {
64155
64156
64157
64158 for {
64159 y := v.Args[1]
64160 x := v.Args[0]
64161 v.reset(OpAMD64XORQ)
64162 v.AddArg(x)
64163 v.AddArg(y)
64164 return true
64165 }
64166 }
64167 func rewriteValueAMD64_OpXor8_0(v *Value) bool {
64168
64169
64170
64171 for {
64172 y := v.Args[1]
64173 x := v.Args[0]
64174 v.reset(OpAMD64XORL)
64175 v.AddArg(x)
64176 v.AddArg(y)
64177 return true
64178 }
64179 }
64180 func rewriteValueAMD64_OpZero_0(v *Value) bool {
64181 b := v.Block
64182 config := b.Func.Config
64183
64184
64185
64186 for {
64187 if v.AuxInt != 0 {
64188 break
64189 }
64190 mem := v.Args[1]
64191 v.reset(OpCopy)
64192 v.Type = mem.Type
64193 v.AddArg(mem)
64194 return true
64195 }
64196
64197
64198
64199 for {
64200 if v.AuxInt != 1 {
64201 break
64202 }
64203 mem := v.Args[1]
64204 destptr := v.Args[0]
64205 v.reset(OpAMD64MOVBstoreconst)
64206 v.AuxInt = 0
64207 v.AddArg(destptr)
64208 v.AddArg(mem)
64209 return true
64210 }
64211
64212
64213
64214 for {
64215 if v.AuxInt != 2 {
64216 break
64217 }
64218 mem := v.Args[1]
64219 destptr := v.Args[0]
64220 v.reset(OpAMD64MOVWstoreconst)
64221 v.AuxInt = 0
64222 v.AddArg(destptr)
64223 v.AddArg(mem)
64224 return true
64225 }
64226
64227
64228
64229 for {
64230 if v.AuxInt != 4 {
64231 break
64232 }
64233 mem := v.Args[1]
64234 destptr := v.Args[0]
64235 v.reset(OpAMD64MOVLstoreconst)
64236 v.AuxInt = 0
64237 v.AddArg(destptr)
64238 v.AddArg(mem)
64239 return true
64240 }
64241
64242
64243
64244 for {
64245 if v.AuxInt != 8 {
64246 break
64247 }
64248 mem := v.Args[1]
64249 destptr := v.Args[0]
64250 v.reset(OpAMD64MOVQstoreconst)
64251 v.AuxInt = 0
64252 v.AddArg(destptr)
64253 v.AddArg(mem)
64254 return true
64255 }
64256
64257
64258
64259 for {
64260 if v.AuxInt != 3 {
64261 break
64262 }
64263 mem := v.Args[1]
64264 destptr := v.Args[0]
64265 v.reset(OpAMD64MOVBstoreconst)
64266 v.AuxInt = makeValAndOff(0, 2)
64267 v.AddArg(destptr)
64268 v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem)
64269 v0.AuxInt = 0
64270 v0.AddArg(destptr)
64271 v0.AddArg(mem)
64272 v.AddArg(v0)
64273 return true
64274 }
64275
64276
64277
64278 for {
64279 if v.AuxInt != 5 {
64280 break
64281 }
64282 mem := v.Args[1]
64283 destptr := v.Args[0]
64284 v.reset(OpAMD64MOVBstoreconst)
64285 v.AuxInt = makeValAndOff(0, 4)
64286 v.AddArg(destptr)
64287 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
64288 v0.AuxInt = 0
64289 v0.AddArg(destptr)
64290 v0.AddArg(mem)
64291 v.AddArg(v0)
64292 return true
64293 }
64294
64295
64296
64297 for {
64298 if v.AuxInt != 6 {
64299 break
64300 }
64301 mem := v.Args[1]
64302 destptr := v.Args[0]
64303 v.reset(OpAMD64MOVWstoreconst)
64304 v.AuxInt = makeValAndOff(0, 4)
64305 v.AddArg(destptr)
64306 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
64307 v0.AuxInt = 0
64308 v0.AddArg(destptr)
64309 v0.AddArg(mem)
64310 v.AddArg(v0)
64311 return true
64312 }
64313
64314
64315
64316 for {
64317 if v.AuxInt != 7 {
64318 break
64319 }
64320 mem := v.Args[1]
64321 destptr := v.Args[0]
64322 v.reset(OpAMD64MOVLstoreconst)
64323 v.AuxInt = makeValAndOff(0, 3)
64324 v.AddArg(destptr)
64325 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
64326 v0.AuxInt = 0
64327 v0.AddArg(destptr)
64328 v0.AddArg(mem)
64329 v.AddArg(v0)
64330 return true
64331 }
64332
64333
64334
64335 for {
64336 s := v.AuxInt
64337 mem := v.Args[1]
64338 destptr := v.Args[0]
64339 if !(s%8 != 0 && s > 8 && !config.useSSE) {
64340 break
64341 }
64342 v.reset(OpZero)
64343 v.AuxInt = s - s%8
64344 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
64345 v0.AuxInt = s % 8
64346 v0.AddArg(destptr)
64347 v.AddArg(v0)
64348 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
64349 v1.AuxInt = 0
64350 v1.AddArg(destptr)
64351 v1.AddArg(mem)
64352 v.AddArg(v1)
64353 return true
64354 }
64355 return false
64356 }
64357 func rewriteValueAMD64_OpZero_10(v *Value) bool {
64358 b := v.Block
64359 config := b.Func.Config
64360
64361
64362
64363 for {
64364 if v.AuxInt != 16 {
64365 break
64366 }
64367 mem := v.Args[1]
64368 destptr := v.Args[0]
64369 if !(!config.useSSE) {
64370 break
64371 }
64372 v.reset(OpAMD64MOVQstoreconst)
64373 v.AuxInt = makeValAndOff(0, 8)
64374 v.AddArg(destptr)
64375 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
64376 v0.AuxInt = 0
64377 v0.AddArg(destptr)
64378 v0.AddArg(mem)
64379 v.AddArg(v0)
64380 return true
64381 }
64382
64383
64384
64385 for {
64386 if v.AuxInt != 24 {
64387 break
64388 }
64389 mem := v.Args[1]
64390 destptr := v.Args[0]
64391 if !(!config.useSSE) {
64392 break
64393 }
64394 v.reset(OpAMD64MOVQstoreconst)
64395 v.AuxInt = makeValAndOff(0, 16)
64396 v.AddArg(destptr)
64397 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
64398 v0.AuxInt = makeValAndOff(0, 8)
64399 v0.AddArg(destptr)
64400 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
64401 v1.AuxInt = 0
64402 v1.AddArg(destptr)
64403 v1.AddArg(mem)
64404 v0.AddArg(v1)
64405 v.AddArg(v0)
64406 return true
64407 }
64408
64409
64410
64411 for {
64412 if v.AuxInt != 32 {
64413 break
64414 }
64415 mem := v.Args[1]
64416 destptr := v.Args[0]
64417 if !(!config.useSSE) {
64418 break
64419 }
64420 v.reset(OpAMD64MOVQstoreconst)
64421 v.AuxInt = makeValAndOff(0, 24)
64422 v.AddArg(destptr)
64423 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
64424 v0.AuxInt = makeValAndOff(0, 16)
64425 v0.AddArg(destptr)
64426 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
64427 v1.AuxInt = makeValAndOff(0, 8)
64428 v1.AddArg(destptr)
64429 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
64430 v2.AuxInt = 0
64431 v2.AddArg(destptr)
64432 v2.AddArg(mem)
64433 v1.AddArg(v2)
64434 v0.AddArg(v1)
64435 v.AddArg(v0)
64436 return true
64437 }
64438
64439
64440
64441 for {
64442 s := v.AuxInt
64443 mem := v.Args[1]
64444 destptr := v.Args[0]
64445 if !(s > 8 && s < 16 && config.useSSE) {
64446 break
64447 }
64448 v.reset(OpAMD64MOVQstoreconst)
64449 v.AuxInt = makeValAndOff(0, s-8)
64450 v.AddArg(destptr)
64451 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
64452 v0.AuxInt = 0
64453 v0.AddArg(destptr)
64454 v0.AddArg(mem)
64455 v.AddArg(v0)
64456 return true
64457 }
64458
64459
64460
64461 for {
64462 s := v.AuxInt
64463 mem := v.Args[1]
64464 destptr := v.Args[0]
64465 if !(s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE) {
64466 break
64467 }
64468 v.reset(OpZero)
64469 v.AuxInt = s - s%16
64470 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
64471 v0.AuxInt = s % 16
64472 v0.AddArg(destptr)
64473 v.AddArg(v0)
64474 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
64475 v1.AddArg(destptr)
64476 v2 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
64477 v2.AuxInt = 0
64478 v1.AddArg(v2)
64479 v1.AddArg(mem)
64480 v.AddArg(v1)
64481 return true
64482 }
64483
64484
64485
64486 for {
64487 s := v.AuxInt
64488 mem := v.Args[1]
64489 destptr := v.Args[0]
64490 if !(s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE) {
64491 break
64492 }
64493 v.reset(OpZero)
64494 v.AuxInt = s - s%16
64495 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
64496 v0.AuxInt = s % 16
64497 v0.AddArg(destptr)
64498 v.AddArg(v0)
64499 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
64500 v1.AuxInt = 0
64501 v1.AddArg(destptr)
64502 v1.AddArg(mem)
64503 v.AddArg(v1)
64504 return true
64505 }
64506
64507
64508
64509 for {
64510 if v.AuxInt != 16 {
64511 break
64512 }
64513 mem := v.Args[1]
64514 destptr := v.Args[0]
64515 if !(config.useSSE) {
64516 break
64517 }
64518 v.reset(OpAMD64MOVOstore)
64519 v.AddArg(destptr)
64520 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
64521 v0.AuxInt = 0
64522 v.AddArg(v0)
64523 v.AddArg(mem)
64524 return true
64525 }
64526
64527
64528
64529 for {
64530 if v.AuxInt != 32 {
64531 break
64532 }
64533 mem := v.Args[1]
64534 destptr := v.Args[0]
64535 if !(config.useSSE) {
64536 break
64537 }
64538 v.reset(OpAMD64MOVOstore)
64539 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
64540 v0.AuxInt = 16
64541 v0.AddArg(destptr)
64542 v.AddArg(v0)
64543 v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
64544 v1.AuxInt = 0
64545 v.AddArg(v1)
64546 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
64547 v2.AddArg(destptr)
64548 v3 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
64549 v3.AuxInt = 0
64550 v2.AddArg(v3)
64551 v2.AddArg(mem)
64552 v.AddArg(v2)
64553 return true
64554 }
64555
64556
64557
64558 for {
64559 if v.AuxInt != 48 {
64560 break
64561 }
64562 mem := v.Args[1]
64563 destptr := v.Args[0]
64564 if !(config.useSSE) {
64565 break
64566 }
64567 v.reset(OpAMD64MOVOstore)
64568 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
64569 v0.AuxInt = 32
64570 v0.AddArg(destptr)
64571 v.AddArg(v0)
64572 v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
64573 v1.AuxInt = 0
64574 v.AddArg(v1)
64575 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
64576 v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
64577 v3.AuxInt = 16
64578 v3.AddArg(destptr)
64579 v2.AddArg(v3)
64580 v4 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
64581 v4.AuxInt = 0
64582 v2.AddArg(v4)
64583 v5 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
64584 v5.AddArg(destptr)
64585 v6 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
64586 v6.AuxInt = 0
64587 v5.AddArg(v6)
64588 v5.AddArg(mem)
64589 v2.AddArg(v5)
64590 v.AddArg(v2)
64591 return true
64592 }
64593
64594
64595
64596 for {
64597 if v.AuxInt != 64 {
64598 break
64599 }
64600 mem := v.Args[1]
64601 destptr := v.Args[0]
64602 if !(config.useSSE) {
64603 break
64604 }
64605 v.reset(OpAMD64MOVOstore)
64606 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
64607 v0.AuxInt = 48
64608 v0.AddArg(destptr)
64609 v.AddArg(v0)
64610 v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
64611 v1.AuxInt = 0
64612 v.AddArg(v1)
64613 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
64614 v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
64615 v3.AuxInt = 32
64616 v3.AddArg(destptr)
64617 v2.AddArg(v3)
64618 v4 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
64619 v4.AuxInt = 0
64620 v2.AddArg(v4)
64621 v5 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
64622 v6 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
64623 v6.AuxInt = 16
64624 v6.AddArg(destptr)
64625 v5.AddArg(v6)
64626 v7 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
64627 v7.AuxInt = 0
64628 v5.AddArg(v7)
64629 v8 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
64630 v8.AddArg(destptr)
64631 v9 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
64632 v9.AuxInt = 0
64633 v8.AddArg(v9)
64634 v8.AddArg(mem)
64635 v5.AddArg(v8)
64636 v2.AddArg(v5)
64637 v.AddArg(v2)
64638 return true
64639 }
64640 return false
64641 }
64642 func rewriteValueAMD64_OpZero_20(v *Value) bool {
64643 b := v.Block
64644 config := b.Func.Config
64645 typ := &b.Func.Config.Types
64646
64647
64648
64649 for {
64650 s := v.AuxInt
64651 mem := v.Args[1]
64652 destptr := v.Args[0]
64653 if !(s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice) {
64654 break
64655 }
64656 v.reset(OpAMD64DUFFZERO)
64657 v.AuxInt = s
64658 v.AddArg(destptr)
64659 v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
64660 v0.AuxInt = 0
64661 v.AddArg(v0)
64662 v.AddArg(mem)
64663 return true
64664 }
64665
64666
64667
64668 for {
64669 s := v.AuxInt
64670 mem := v.Args[1]
64671 destptr := v.Args[0]
64672 if !((s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0) {
64673 break
64674 }
64675 v.reset(OpAMD64REPSTOSQ)
64676 v.AddArg(destptr)
64677 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
64678 v0.AuxInt = s / 8
64679 v.AddArg(v0)
64680 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
64681 v1.AuxInt = 0
64682 v.AddArg(v1)
64683 v.AddArg(mem)
64684 return true
64685 }
64686 return false
64687 }
64688 func rewriteValueAMD64_OpZeroExt16to32_0(v *Value) bool {
64689
64690
64691
64692 for {
64693 x := v.Args[0]
64694 v.reset(OpAMD64MOVWQZX)
64695 v.AddArg(x)
64696 return true
64697 }
64698 }
64699 func rewriteValueAMD64_OpZeroExt16to64_0(v *Value) bool {
64700
64701
64702
64703 for {
64704 x := v.Args[0]
64705 v.reset(OpAMD64MOVWQZX)
64706 v.AddArg(x)
64707 return true
64708 }
64709 }
64710 func rewriteValueAMD64_OpZeroExt32to64_0(v *Value) bool {
64711
64712
64713
64714 for {
64715 x := v.Args[0]
64716 v.reset(OpAMD64MOVLQZX)
64717 v.AddArg(x)
64718 return true
64719 }
64720 }
64721 func rewriteValueAMD64_OpZeroExt8to16_0(v *Value) bool {
64722
64723
64724
64725 for {
64726 x := v.Args[0]
64727 v.reset(OpAMD64MOVBQZX)
64728 v.AddArg(x)
64729 return true
64730 }
64731 }
64732 func rewriteValueAMD64_OpZeroExt8to32_0(v *Value) bool {
64733
64734
64735
64736 for {
64737 x := v.Args[0]
64738 v.reset(OpAMD64MOVBQZX)
64739 v.AddArg(x)
64740 return true
64741 }
64742 }
64743 func rewriteValueAMD64_OpZeroExt8to64_0(v *Value) bool {
64744
64745
64746
64747 for {
64748 x := v.Args[0]
64749 v.reset(OpAMD64MOVBQZX)
64750 v.AddArg(x)
64751 return true
64752 }
64753 }
64754 func rewriteBlockAMD64(b *Block) bool {
64755 config := b.Func.Config
64756 typ := &config.Types
64757 _ = typ
64758 v := b.Control
64759 _ = v
64760 switch b.Kind {
64761 case BlockAMD64EQ:
64762
64763
64764
64765 for v.Op == OpAMD64TESTL {
64766 y := v.Args[1]
64767 v_0 := v.Args[0]
64768 if v_0.Op != OpAMD64SHLL {
64769 break
64770 }
64771 x := v_0.Args[1]
64772 v_0_0 := v_0.Args[0]
64773 if v_0_0.Op != OpAMD64MOVLconst {
64774 break
64775 }
64776 if v_0_0.AuxInt != 1 {
64777 break
64778 }
64779 if !(!config.nacl) {
64780 break
64781 }
64782 b.Kind = BlockAMD64UGE
64783 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
64784 v0.AddArg(x)
64785 v0.AddArg(y)
64786 b.SetControl(v0)
64787 b.Aux = nil
64788 return true
64789 }
64790
64791
64792
64793 for v.Op == OpAMD64TESTL {
64794 _ = v.Args[1]
64795 y := v.Args[0]
64796 v_1 := v.Args[1]
64797 if v_1.Op != OpAMD64SHLL {
64798 break
64799 }
64800 x := v_1.Args[1]
64801 v_1_0 := v_1.Args[0]
64802 if v_1_0.Op != OpAMD64MOVLconst {
64803 break
64804 }
64805 if v_1_0.AuxInt != 1 {
64806 break
64807 }
64808 if !(!config.nacl) {
64809 break
64810 }
64811 b.Kind = BlockAMD64UGE
64812 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
64813 v0.AddArg(x)
64814 v0.AddArg(y)
64815 b.SetControl(v0)
64816 b.Aux = nil
64817 return true
64818 }
64819
64820
64821
64822 for v.Op == OpAMD64TESTQ {
64823 y := v.Args[1]
64824 v_0 := v.Args[0]
64825 if v_0.Op != OpAMD64SHLQ {
64826 break
64827 }
64828 x := v_0.Args[1]
64829 v_0_0 := v_0.Args[0]
64830 if v_0_0.Op != OpAMD64MOVQconst {
64831 break
64832 }
64833 if v_0_0.AuxInt != 1 {
64834 break
64835 }
64836 if !(!config.nacl) {
64837 break
64838 }
64839 b.Kind = BlockAMD64UGE
64840 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
64841 v0.AddArg(x)
64842 v0.AddArg(y)
64843 b.SetControl(v0)
64844 b.Aux = nil
64845 return true
64846 }
64847
64848
64849
64850 for v.Op == OpAMD64TESTQ {
64851 _ = v.Args[1]
64852 y := v.Args[0]
64853 v_1 := v.Args[1]
64854 if v_1.Op != OpAMD64SHLQ {
64855 break
64856 }
64857 x := v_1.Args[1]
64858 v_1_0 := v_1.Args[0]
64859 if v_1_0.Op != OpAMD64MOVQconst {
64860 break
64861 }
64862 if v_1_0.AuxInt != 1 {
64863 break
64864 }
64865 if !(!config.nacl) {
64866 break
64867 }
64868 b.Kind = BlockAMD64UGE
64869 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
64870 v0.AddArg(x)
64871 v0.AddArg(y)
64872 b.SetControl(v0)
64873 b.Aux = nil
64874 return true
64875 }
64876
64877
64878
64879 for v.Op == OpAMD64TESTLconst {
64880 c := v.AuxInt
64881 x := v.Args[0]
64882 if !(isUint32PowerOfTwo(c) && !config.nacl) {
64883 break
64884 }
64885 b.Kind = BlockAMD64UGE
64886 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
64887 v0.AuxInt = log2uint32(c)
64888 v0.AddArg(x)
64889 b.SetControl(v0)
64890 b.Aux = nil
64891 return true
64892 }
64893
64894
64895
64896 for v.Op == OpAMD64TESTQconst {
64897 c := v.AuxInt
64898 x := v.Args[0]
64899 if !(isUint64PowerOfTwo(c) && !config.nacl) {
64900 break
64901 }
64902 b.Kind = BlockAMD64UGE
64903 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
64904 v0.AuxInt = log2(c)
64905 v0.AddArg(x)
64906 b.SetControl(v0)
64907 b.Aux = nil
64908 return true
64909 }
64910
64911
64912
64913 for v.Op == OpAMD64TESTQ {
64914 x := v.Args[1]
64915 v_0 := v.Args[0]
64916 if v_0.Op != OpAMD64MOVQconst {
64917 break
64918 }
64919 c := v_0.AuxInt
64920 if !(isUint64PowerOfTwo(c) && !config.nacl) {
64921 break
64922 }
64923 b.Kind = BlockAMD64UGE
64924 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
64925 v0.AuxInt = log2(c)
64926 v0.AddArg(x)
64927 b.SetControl(v0)
64928 b.Aux = nil
64929 return true
64930 }
64931
64932
64933
64934 for v.Op == OpAMD64TESTQ {
64935 _ = v.Args[1]
64936 x := v.Args[0]
64937 v_1 := v.Args[1]
64938 if v_1.Op != OpAMD64MOVQconst {
64939 break
64940 }
64941 c := v_1.AuxInt
64942 if !(isUint64PowerOfTwo(c) && !config.nacl) {
64943 break
64944 }
64945 b.Kind = BlockAMD64UGE
64946 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
64947 v0.AuxInt = log2(c)
64948 v0.AddArg(x)
64949 b.SetControl(v0)
64950 b.Aux = nil
64951 return true
64952 }
64953
64954
64955
64956 for v.Op == OpAMD64TESTQ {
64957 z2 := v.Args[1]
64958 z1 := v.Args[0]
64959 if z1.Op != OpAMD64SHLQconst {
64960 break
64961 }
64962 if z1.AuxInt != 63 {
64963 break
64964 }
64965 z1_0 := z1.Args[0]
64966 if z1_0.Op != OpAMD64SHRQconst {
64967 break
64968 }
64969 if z1_0.AuxInt != 63 {
64970 break
64971 }
64972 x := z1_0.Args[0]
64973 if !(z1 == z2 && !config.nacl) {
64974 break
64975 }
64976 b.Kind = BlockAMD64UGE
64977 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
64978 v0.AuxInt = 63
64979 v0.AddArg(x)
64980 b.SetControl(v0)
64981 b.Aux = nil
64982 return true
64983 }
64984
64985
64986
64987 for v.Op == OpAMD64TESTQ {
64988 _ = v.Args[1]
64989 z2 := v.Args[0]
64990 z1 := v.Args[1]
64991 if z1.Op != OpAMD64SHLQconst {
64992 break
64993 }
64994 if z1.AuxInt != 63 {
64995 break
64996 }
64997 z1_0 := z1.Args[0]
64998 if z1_0.Op != OpAMD64SHRQconst {
64999 break
65000 }
65001 if z1_0.AuxInt != 63 {
65002 break
65003 }
65004 x := z1_0.Args[0]
65005 if !(z1 == z2 && !config.nacl) {
65006 break
65007 }
65008 b.Kind = BlockAMD64UGE
65009 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
65010 v0.AuxInt = 63
65011 v0.AddArg(x)
65012 b.SetControl(v0)
65013 b.Aux = nil
65014 return true
65015 }
65016
65017
65018
65019 for v.Op == OpAMD64TESTL {
65020 z2 := v.Args[1]
65021 z1 := v.Args[0]
65022 if z1.Op != OpAMD64SHLLconst {
65023 break
65024 }
65025 if z1.AuxInt != 31 {
65026 break
65027 }
65028 z1_0 := z1.Args[0]
65029 if z1_0.Op != OpAMD64SHRQconst {
65030 break
65031 }
65032 if z1_0.AuxInt != 31 {
65033 break
65034 }
65035 x := z1_0.Args[0]
65036 if !(z1 == z2 && !config.nacl) {
65037 break
65038 }
65039 b.Kind = BlockAMD64UGE
65040 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
65041 v0.AuxInt = 31
65042 v0.AddArg(x)
65043 b.SetControl(v0)
65044 b.Aux = nil
65045 return true
65046 }
65047
65048
65049
65050 for v.Op == OpAMD64TESTL {
65051 _ = v.Args[1]
65052 z2 := v.Args[0]
65053 z1 := v.Args[1]
65054 if z1.Op != OpAMD64SHLLconst {
65055 break
65056 }
65057 if z1.AuxInt != 31 {
65058 break
65059 }
65060 z1_0 := z1.Args[0]
65061 if z1_0.Op != OpAMD64SHRQconst {
65062 break
65063 }
65064 if z1_0.AuxInt != 31 {
65065 break
65066 }
65067 x := z1_0.Args[0]
65068 if !(z1 == z2 && !config.nacl) {
65069 break
65070 }
65071 b.Kind = BlockAMD64UGE
65072 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
65073 v0.AuxInt = 31
65074 v0.AddArg(x)
65075 b.SetControl(v0)
65076 b.Aux = nil
65077 return true
65078 }
65079
65080
65081
65082 for v.Op == OpAMD64TESTQ {
65083 z2 := v.Args[1]
65084 z1 := v.Args[0]
65085 if z1.Op != OpAMD64SHRQconst {
65086 break
65087 }
65088 if z1.AuxInt != 63 {
65089 break
65090 }
65091 z1_0 := z1.Args[0]
65092 if z1_0.Op != OpAMD64SHLQconst {
65093 break
65094 }
65095 if z1_0.AuxInt != 63 {
65096 break
65097 }
65098 x := z1_0.Args[0]
65099 if !(z1 == z2 && !config.nacl) {
65100 break
65101 }
65102 b.Kind = BlockAMD64UGE
65103 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
65104 v0.AuxInt = 0
65105 v0.AddArg(x)
65106 b.SetControl(v0)
65107 b.Aux = nil
65108 return true
65109 }
65110
65111
65112
65113 for v.Op == OpAMD64TESTQ {
65114 _ = v.Args[1]
65115 z2 := v.Args[0]
65116 z1 := v.Args[1]
65117 if z1.Op != OpAMD64SHRQconst {
65118 break
65119 }
65120 if z1.AuxInt != 63 {
65121 break
65122 }
65123 z1_0 := z1.Args[0]
65124 if z1_0.Op != OpAMD64SHLQconst {
65125 break
65126 }
65127 if z1_0.AuxInt != 63 {
65128 break
65129 }
65130 x := z1_0.Args[0]
65131 if !(z1 == z2 && !config.nacl) {
65132 break
65133 }
65134 b.Kind = BlockAMD64UGE
65135 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
65136 v0.AuxInt = 0
65137 v0.AddArg(x)
65138 b.SetControl(v0)
65139 b.Aux = nil
65140 return true
65141 }
65142
65143
65144
65145 for v.Op == OpAMD64TESTL {
65146 z2 := v.Args[1]
65147 z1 := v.Args[0]
65148 if z1.Op != OpAMD64SHRLconst {
65149 break
65150 }
65151 if z1.AuxInt != 31 {
65152 break
65153 }
65154 z1_0 := z1.Args[0]
65155 if z1_0.Op != OpAMD64SHLLconst {
65156 break
65157 }
65158 if z1_0.AuxInt != 31 {
65159 break
65160 }
65161 x := z1_0.Args[0]
65162 if !(z1 == z2 && !config.nacl) {
65163 break
65164 }
65165 b.Kind = BlockAMD64UGE
65166 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
65167 v0.AuxInt = 0
65168 v0.AddArg(x)
65169 b.SetControl(v0)
65170 b.Aux = nil
65171 return true
65172 }
65173
65174
65175
65176 for v.Op == OpAMD64TESTL {
65177 _ = v.Args[1]
65178 z2 := v.Args[0]
65179 z1 := v.Args[1]
65180 if z1.Op != OpAMD64SHRLconst {
65181 break
65182 }
65183 if z1.AuxInt != 31 {
65184 break
65185 }
65186 z1_0 := z1.Args[0]
65187 if z1_0.Op != OpAMD64SHLLconst {
65188 break
65189 }
65190 if z1_0.AuxInt != 31 {
65191 break
65192 }
65193 x := z1_0.Args[0]
65194 if !(z1 == z2 && !config.nacl) {
65195 break
65196 }
65197 b.Kind = BlockAMD64UGE
65198 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
65199 v0.AuxInt = 0
65200 v0.AddArg(x)
65201 b.SetControl(v0)
65202 b.Aux = nil
65203 return true
65204 }
65205
65206
65207
65208 for v.Op == OpAMD64TESTQ {
65209 z2 := v.Args[1]
65210 z1 := v.Args[0]
65211 if z1.Op != OpAMD64SHRQconst {
65212 break
65213 }
65214 if z1.AuxInt != 63 {
65215 break
65216 }
65217 x := z1.Args[0]
65218 if !(z1 == z2 && !config.nacl) {
65219 break
65220 }
65221 b.Kind = BlockAMD64UGE
65222 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
65223 v0.AuxInt = 63
65224 v0.AddArg(x)
65225 b.SetControl(v0)
65226 b.Aux = nil
65227 return true
65228 }
65229
65230
65231
65232 for v.Op == OpAMD64TESTQ {
65233 _ = v.Args[1]
65234 z2 := v.Args[0]
65235 z1 := v.Args[1]
65236 if z1.Op != OpAMD64SHRQconst {
65237 break
65238 }
65239 if z1.AuxInt != 63 {
65240 break
65241 }
65242 x := z1.Args[0]
65243 if !(z1 == z2 && !config.nacl) {
65244 break
65245 }
65246 b.Kind = BlockAMD64UGE
65247 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
65248 v0.AuxInt = 63
65249 v0.AddArg(x)
65250 b.SetControl(v0)
65251 b.Aux = nil
65252 return true
65253 }
65254
65255
65256
65257 for v.Op == OpAMD64TESTL {
65258 z2 := v.Args[1]
65259 z1 := v.Args[0]
65260 if z1.Op != OpAMD64SHRLconst {
65261 break
65262 }
65263 if z1.AuxInt != 31 {
65264 break
65265 }
65266 x := z1.Args[0]
65267 if !(z1 == z2 && !config.nacl) {
65268 break
65269 }
65270 b.Kind = BlockAMD64UGE
65271 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
65272 v0.AuxInt = 31
65273 v0.AddArg(x)
65274 b.SetControl(v0)
65275 b.Aux = nil
65276 return true
65277 }
65278
65279
65280
65281 for v.Op == OpAMD64TESTL {
65282 _ = v.Args[1]
65283 z2 := v.Args[0]
65284 z1 := v.Args[1]
65285 if z1.Op != OpAMD64SHRLconst {
65286 break
65287 }
65288 if z1.AuxInt != 31 {
65289 break
65290 }
65291 x := z1.Args[0]
65292 if !(z1 == z2 && !config.nacl) {
65293 break
65294 }
65295 b.Kind = BlockAMD64UGE
65296 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
65297 v0.AuxInt = 31
65298 v0.AddArg(x)
65299 b.SetControl(v0)
65300 b.Aux = nil
65301 return true
65302 }
65303
65304
65305
65306 for v.Op == OpAMD64InvertFlags {
65307 cmp := v.Args[0]
65308 b.Kind = BlockAMD64EQ
65309 b.SetControl(cmp)
65310 b.Aux = nil
65311 return true
65312 }
65313
65314
65315
65316 for v.Op == OpAMD64FlagEQ {
65317 b.Kind = BlockFirst
65318 b.SetControl(nil)
65319 b.Aux = nil
65320 return true
65321 }
65322
65323
65324
65325 for v.Op == OpAMD64FlagLT_ULT {
65326 b.Kind = BlockFirst
65327 b.SetControl(nil)
65328 b.Aux = nil
65329 b.swapSuccessors()
65330 return true
65331 }
65332
65333
65334
65335 for v.Op == OpAMD64FlagLT_UGT {
65336 b.Kind = BlockFirst
65337 b.SetControl(nil)
65338 b.Aux = nil
65339 b.swapSuccessors()
65340 return true
65341 }
65342
65343
65344
65345 for v.Op == OpAMD64FlagGT_ULT {
65346 b.Kind = BlockFirst
65347 b.SetControl(nil)
65348 b.Aux = nil
65349 b.swapSuccessors()
65350 return true
65351 }
65352
65353
65354
65355 for v.Op == OpAMD64FlagGT_UGT {
65356 b.Kind = BlockFirst
65357 b.SetControl(nil)
65358 b.Aux = nil
65359 b.swapSuccessors()
65360 return true
65361 }
65362 case BlockAMD64GE:
65363
65364
65365
65366 for v.Op == OpAMD64InvertFlags {
65367 cmp := v.Args[0]
65368 b.Kind = BlockAMD64LE
65369 b.SetControl(cmp)
65370 b.Aux = nil
65371 return true
65372 }
65373
65374
65375
65376 for v.Op == OpAMD64FlagEQ {
65377 b.Kind = BlockFirst
65378 b.SetControl(nil)
65379 b.Aux = nil
65380 return true
65381 }
65382
65383
65384
65385 for v.Op == OpAMD64FlagLT_ULT {
65386 b.Kind = BlockFirst
65387 b.SetControl(nil)
65388 b.Aux = nil
65389 b.swapSuccessors()
65390 return true
65391 }
65392
65393
65394
65395 for v.Op == OpAMD64FlagLT_UGT {
65396 b.Kind = BlockFirst
65397 b.SetControl(nil)
65398 b.Aux = nil
65399 b.swapSuccessors()
65400 return true
65401 }
65402
65403
65404
65405 for v.Op == OpAMD64FlagGT_ULT {
65406 b.Kind = BlockFirst
65407 b.SetControl(nil)
65408 b.Aux = nil
65409 return true
65410 }
65411
65412
65413
65414 for v.Op == OpAMD64FlagGT_UGT {
65415 b.Kind = BlockFirst
65416 b.SetControl(nil)
65417 b.Aux = nil
65418 return true
65419 }
65420 case BlockAMD64GT:
65421
65422
65423
65424 for v.Op == OpAMD64InvertFlags {
65425 cmp := v.Args[0]
65426 b.Kind = BlockAMD64LT
65427 b.SetControl(cmp)
65428 b.Aux = nil
65429 return true
65430 }
65431
65432
65433
65434 for v.Op == OpAMD64FlagEQ {
65435 b.Kind = BlockFirst
65436 b.SetControl(nil)
65437 b.Aux = nil
65438 b.swapSuccessors()
65439 return true
65440 }
65441
65442
65443
65444 for v.Op == OpAMD64FlagLT_ULT {
65445 b.Kind = BlockFirst
65446 b.SetControl(nil)
65447 b.Aux = nil
65448 b.swapSuccessors()
65449 return true
65450 }
65451
65452
65453
65454 for v.Op == OpAMD64FlagLT_UGT {
65455 b.Kind = BlockFirst
65456 b.SetControl(nil)
65457 b.Aux = nil
65458 b.swapSuccessors()
65459 return true
65460 }
65461
65462
65463
65464 for v.Op == OpAMD64FlagGT_ULT {
65465 b.Kind = BlockFirst
65466 b.SetControl(nil)
65467 b.Aux = nil
65468 return true
65469 }
65470
65471
65472
65473 for v.Op == OpAMD64FlagGT_UGT {
65474 b.Kind = BlockFirst
65475 b.SetControl(nil)
65476 b.Aux = nil
65477 return true
65478 }
65479 case BlockIf:
65480
65481
65482
65483 for v.Op == OpAMD64SETL {
65484 cmp := v.Args[0]
65485 b.Kind = BlockAMD64LT
65486 b.SetControl(cmp)
65487 b.Aux = nil
65488 return true
65489 }
65490
65491
65492
65493 for v.Op == OpAMD64SETLE {
65494 cmp := v.Args[0]
65495 b.Kind = BlockAMD64LE
65496 b.SetControl(cmp)
65497 b.Aux = nil
65498 return true
65499 }
65500
65501
65502
65503 for v.Op == OpAMD64SETG {
65504 cmp := v.Args[0]
65505 b.Kind = BlockAMD64GT
65506 b.SetControl(cmp)
65507 b.Aux = nil
65508 return true
65509 }
65510
65511
65512
65513 for v.Op == OpAMD64SETGE {
65514 cmp := v.Args[0]
65515 b.Kind = BlockAMD64GE
65516 b.SetControl(cmp)
65517 b.Aux = nil
65518 return true
65519 }
65520
65521
65522
65523 for v.Op == OpAMD64SETEQ {
65524 cmp := v.Args[0]
65525 b.Kind = BlockAMD64EQ
65526 b.SetControl(cmp)
65527 b.Aux = nil
65528 return true
65529 }
65530
65531
65532
65533 for v.Op == OpAMD64SETNE {
65534 cmp := v.Args[0]
65535 b.Kind = BlockAMD64NE
65536 b.SetControl(cmp)
65537 b.Aux = nil
65538 return true
65539 }
65540
65541
65542
65543 for v.Op == OpAMD64SETB {
65544 cmp := v.Args[0]
65545 b.Kind = BlockAMD64ULT
65546 b.SetControl(cmp)
65547 b.Aux = nil
65548 return true
65549 }
65550
65551
65552
65553 for v.Op == OpAMD64SETBE {
65554 cmp := v.Args[0]
65555 b.Kind = BlockAMD64ULE
65556 b.SetControl(cmp)
65557 b.Aux = nil
65558 return true
65559 }
65560
65561
65562
65563 for v.Op == OpAMD64SETA {
65564 cmp := v.Args[0]
65565 b.Kind = BlockAMD64UGT
65566 b.SetControl(cmp)
65567 b.Aux = nil
65568 return true
65569 }
65570
65571
65572
65573 for v.Op == OpAMD64SETAE {
65574 cmp := v.Args[0]
65575 b.Kind = BlockAMD64UGE
65576 b.SetControl(cmp)
65577 b.Aux = nil
65578 return true
65579 }
65580
65581
65582
65583 for v.Op == OpAMD64SETO {
65584 cmp := v.Args[0]
65585 b.Kind = BlockAMD64OS
65586 b.SetControl(cmp)
65587 b.Aux = nil
65588 return true
65589 }
65590
65591
65592
65593 for v.Op == OpAMD64SETGF {
65594 cmp := v.Args[0]
65595 b.Kind = BlockAMD64UGT
65596 b.SetControl(cmp)
65597 b.Aux = nil
65598 return true
65599 }
65600
65601
65602
65603 for v.Op == OpAMD64SETGEF {
65604 cmp := v.Args[0]
65605 b.Kind = BlockAMD64UGE
65606 b.SetControl(cmp)
65607 b.Aux = nil
65608 return true
65609 }
65610
65611
65612
65613 for v.Op == OpAMD64SETEQF {
65614 cmp := v.Args[0]
65615 b.Kind = BlockAMD64EQF
65616 b.SetControl(cmp)
65617 b.Aux = nil
65618 return true
65619 }
65620
65621
65622
65623 for v.Op == OpAMD64SETNEF {
65624 cmp := v.Args[0]
65625 b.Kind = BlockAMD64NEF
65626 b.SetControl(cmp)
65627 b.Aux = nil
65628 return true
65629 }
65630
65631
65632
65633 for {
65634 cond := b.Control
65635 b.Kind = BlockAMD64NE
65636 v0 := b.NewValue0(v.Pos, OpAMD64TESTB, types.TypeFlags)
65637 v0.AddArg(cond)
65638 v0.AddArg(cond)
65639 b.SetControl(v0)
65640 b.Aux = nil
65641 return true
65642 }
65643 case BlockAMD64LE:
65644
65645
65646
65647 for v.Op == OpAMD64InvertFlags {
65648 cmp := v.Args[0]
65649 b.Kind = BlockAMD64GE
65650 b.SetControl(cmp)
65651 b.Aux = nil
65652 return true
65653 }
65654
65655
65656
65657 for v.Op == OpAMD64FlagEQ {
65658 b.Kind = BlockFirst
65659 b.SetControl(nil)
65660 b.Aux = nil
65661 return true
65662 }
65663
65664
65665
65666 for v.Op == OpAMD64FlagLT_ULT {
65667 b.Kind = BlockFirst
65668 b.SetControl(nil)
65669 b.Aux = nil
65670 return true
65671 }
65672
65673
65674
65675 for v.Op == OpAMD64FlagLT_UGT {
65676 b.Kind = BlockFirst
65677 b.SetControl(nil)
65678 b.Aux = nil
65679 return true
65680 }
65681
65682
65683
65684 for v.Op == OpAMD64FlagGT_ULT {
65685 b.Kind = BlockFirst
65686 b.SetControl(nil)
65687 b.Aux = nil
65688 b.swapSuccessors()
65689 return true
65690 }
65691
65692
65693
65694 for v.Op == OpAMD64FlagGT_UGT {
65695 b.Kind = BlockFirst
65696 b.SetControl(nil)
65697 b.Aux = nil
65698 b.swapSuccessors()
65699 return true
65700 }
65701 case BlockAMD64LT:
65702
65703
65704
65705 for v.Op == OpAMD64InvertFlags {
65706 cmp := v.Args[0]
65707 b.Kind = BlockAMD64GT
65708 b.SetControl(cmp)
65709 b.Aux = nil
65710 return true
65711 }
65712
65713
65714
65715 for v.Op == OpAMD64FlagEQ {
65716 b.Kind = BlockFirst
65717 b.SetControl(nil)
65718 b.Aux = nil
65719 b.swapSuccessors()
65720 return true
65721 }
65722
65723
65724
65725 for v.Op == OpAMD64FlagLT_ULT {
65726 b.Kind = BlockFirst
65727 b.SetControl(nil)
65728 b.Aux = nil
65729 return true
65730 }
65731
65732
65733
65734 for v.Op == OpAMD64FlagLT_UGT {
65735 b.Kind = BlockFirst
65736 b.SetControl(nil)
65737 b.Aux = nil
65738 return true
65739 }
65740
65741
65742
65743 for v.Op == OpAMD64FlagGT_ULT {
65744 b.Kind = BlockFirst
65745 b.SetControl(nil)
65746 b.Aux = nil
65747 b.swapSuccessors()
65748 return true
65749 }
65750
65751
65752
65753 for v.Op == OpAMD64FlagGT_UGT {
65754 b.Kind = BlockFirst
65755 b.SetControl(nil)
65756 b.Aux = nil
65757 b.swapSuccessors()
65758 return true
65759 }
65760 case BlockAMD64NE:
65761
65762
65763
65764 for v.Op == OpAMD64TESTB {
65765 _ = v.Args[1]
65766 v_0 := v.Args[0]
65767 if v_0.Op != OpAMD64SETL {
65768 break
65769 }
65770 cmp := v_0.Args[0]
65771 v_1 := v.Args[1]
65772 if v_1.Op != OpAMD64SETL {
65773 break
65774 }
65775 if cmp != v_1.Args[0] {
65776 break
65777 }
65778 b.Kind = BlockAMD64LT
65779 b.SetControl(cmp)
65780 b.Aux = nil
65781 return true
65782 }
65783
65784
65785
65786 for v.Op == OpAMD64TESTB {
65787 _ = v.Args[1]
65788 v_0 := v.Args[0]
65789 if v_0.Op != OpAMD64SETL {
65790 break
65791 }
65792 cmp := v_0.Args[0]
65793 v_1 := v.Args[1]
65794 if v_1.Op != OpAMD64SETL {
65795 break
65796 }
65797 if cmp != v_1.Args[0] {
65798 break
65799 }
65800 b.Kind = BlockAMD64LT
65801 b.SetControl(cmp)
65802 b.Aux = nil
65803 return true
65804 }
65805
65806
65807
65808 for v.Op == OpAMD64TESTB {
65809 _ = v.Args[1]
65810 v_0 := v.Args[0]
65811 if v_0.Op != OpAMD64SETLE {
65812 break
65813 }
65814 cmp := v_0.Args[0]
65815 v_1 := v.Args[1]
65816 if v_1.Op != OpAMD64SETLE {
65817 break
65818 }
65819 if cmp != v_1.Args[0] {
65820 break
65821 }
65822 b.Kind = BlockAMD64LE
65823 b.SetControl(cmp)
65824 b.Aux = nil
65825 return true
65826 }
65827
65828
65829
65830 for v.Op == OpAMD64TESTB {
65831 _ = v.Args[1]
65832 v_0 := v.Args[0]
65833 if v_0.Op != OpAMD64SETLE {
65834 break
65835 }
65836 cmp := v_0.Args[0]
65837 v_1 := v.Args[1]
65838 if v_1.Op != OpAMD64SETLE {
65839 break
65840 }
65841 if cmp != v_1.Args[0] {
65842 break
65843 }
65844 b.Kind = BlockAMD64LE
65845 b.SetControl(cmp)
65846 b.Aux = nil
65847 return true
65848 }
65849
65850
65851
65852 for v.Op == OpAMD64TESTB {
65853 _ = v.Args[1]
65854 v_0 := v.Args[0]
65855 if v_0.Op != OpAMD64SETG {
65856 break
65857 }
65858 cmp := v_0.Args[0]
65859 v_1 := v.Args[1]
65860 if v_1.Op != OpAMD64SETG {
65861 break
65862 }
65863 if cmp != v_1.Args[0] {
65864 break
65865 }
65866 b.Kind = BlockAMD64GT
65867 b.SetControl(cmp)
65868 b.Aux = nil
65869 return true
65870 }
65871
65872
65873
65874 for v.Op == OpAMD64TESTB {
65875 _ = v.Args[1]
65876 v_0 := v.Args[0]
65877 if v_0.Op != OpAMD64SETG {
65878 break
65879 }
65880 cmp := v_0.Args[0]
65881 v_1 := v.Args[1]
65882 if v_1.Op != OpAMD64SETG {
65883 break
65884 }
65885 if cmp != v_1.Args[0] {
65886 break
65887 }
65888 b.Kind = BlockAMD64GT
65889 b.SetControl(cmp)
65890 b.Aux = nil
65891 return true
65892 }
65893
65894
65895
65896 for v.Op == OpAMD64TESTB {
65897 _ = v.Args[1]
65898 v_0 := v.Args[0]
65899 if v_0.Op != OpAMD64SETGE {
65900 break
65901 }
65902 cmp := v_0.Args[0]
65903 v_1 := v.Args[1]
65904 if v_1.Op != OpAMD64SETGE {
65905 break
65906 }
65907 if cmp != v_1.Args[0] {
65908 break
65909 }
65910 b.Kind = BlockAMD64GE
65911 b.SetControl(cmp)
65912 b.Aux = nil
65913 return true
65914 }
65915
65916
65917
65918 for v.Op == OpAMD64TESTB {
65919 _ = v.Args[1]
65920 v_0 := v.Args[0]
65921 if v_0.Op != OpAMD64SETGE {
65922 break
65923 }
65924 cmp := v_0.Args[0]
65925 v_1 := v.Args[1]
65926 if v_1.Op != OpAMD64SETGE {
65927 break
65928 }
65929 if cmp != v_1.Args[0] {
65930 break
65931 }
65932 b.Kind = BlockAMD64GE
65933 b.SetControl(cmp)
65934 b.Aux = nil
65935 return true
65936 }
65937
65938
65939
65940 for v.Op == OpAMD64TESTB {
65941 _ = v.Args[1]
65942 v_0 := v.Args[0]
65943 if v_0.Op != OpAMD64SETEQ {
65944 break
65945 }
65946 cmp := v_0.Args[0]
65947 v_1 := v.Args[1]
65948 if v_1.Op != OpAMD64SETEQ {
65949 break
65950 }
65951 if cmp != v_1.Args[0] {
65952 break
65953 }
65954 b.Kind = BlockAMD64EQ
65955 b.SetControl(cmp)
65956 b.Aux = nil
65957 return true
65958 }
65959
65960
65961
65962 for v.Op == OpAMD64TESTB {
65963 _ = v.Args[1]
65964 v_0 := v.Args[0]
65965 if v_0.Op != OpAMD64SETEQ {
65966 break
65967 }
65968 cmp := v_0.Args[0]
65969 v_1 := v.Args[1]
65970 if v_1.Op != OpAMD64SETEQ {
65971 break
65972 }
65973 if cmp != v_1.Args[0] {
65974 break
65975 }
65976 b.Kind = BlockAMD64EQ
65977 b.SetControl(cmp)
65978 b.Aux = nil
65979 return true
65980 }
65981
65982
65983
65984 for v.Op == OpAMD64TESTB {
65985 _ = v.Args[1]
65986 v_0 := v.Args[0]
65987 if v_0.Op != OpAMD64SETNE {
65988 break
65989 }
65990 cmp := v_0.Args[0]
65991 v_1 := v.Args[1]
65992 if v_1.Op != OpAMD64SETNE {
65993 break
65994 }
65995 if cmp != v_1.Args[0] {
65996 break
65997 }
65998 b.Kind = BlockAMD64NE
65999 b.SetControl(cmp)
66000 b.Aux = nil
66001 return true
66002 }
66003
66004
66005
66006 for v.Op == OpAMD64TESTB {
66007 _ = v.Args[1]
66008 v_0 := v.Args[0]
66009 if v_0.Op != OpAMD64SETNE {
66010 break
66011 }
66012 cmp := v_0.Args[0]
66013 v_1 := v.Args[1]
66014 if v_1.Op != OpAMD64SETNE {
66015 break
66016 }
66017 if cmp != v_1.Args[0] {
66018 break
66019 }
66020 b.Kind = BlockAMD64NE
66021 b.SetControl(cmp)
66022 b.Aux = nil
66023 return true
66024 }
66025
66026
66027
66028 for v.Op == OpAMD64TESTB {
66029 _ = v.Args[1]
66030 v_0 := v.Args[0]
66031 if v_0.Op != OpAMD64SETB {
66032 break
66033 }
66034 cmp := v_0.Args[0]
66035 v_1 := v.Args[1]
66036 if v_1.Op != OpAMD64SETB {
66037 break
66038 }
66039 if cmp != v_1.Args[0] {
66040 break
66041 }
66042 b.Kind = BlockAMD64ULT
66043 b.SetControl(cmp)
66044 b.Aux = nil
66045 return true
66046 }
66047
66048
66049
66050 for v.Op == OpAMD64TESTB {
66051 _ = v.Args[1]
66052 v_0 := v.Args[0]
66053 if v_0.Op != OpAMD64SETB {
66054 break
66055 }
66056 cmp := v_0.Args[0]
66057 v_1 := v.Args[1]
66058 if v_1.Op != OpAMD64SETB {
66059 break
66060 }
66061 if cmp != v_1.Args[0] {
66062 break
66063 }
66064 b.Kind = BlockAMD64ULT
66065 b.SetControl(cmp)
66066 b.Aux = nil
66067 return true
66068 }
66069
66070
66071
66072 for v.Op == OpAMD64TESTB {
66073 _ = v.Args[1]
66074 v_0 := v.Args[0]
66075 if v_0.Op != OpAMD64SETBE {
66076 break
66077 }
66078 cmp := v_0.Args[0]
66079 v_1 := v.Args[1]
66080 if v_1.Op != OpAMD64SETBE {
66081 break
66082 }
66083 if cmp != v_1.Args[0] {
66084 break
66085 }
66086 b.Kind = BlockAMD64ULE
66087 b.SetControl(cmp)
66088 b.Aux = nil
66089 return true
66090 }
66091
66092
66093
66094 for v.Op == OpAMD64TESTB {
66095 _ = v.Args[1]
66096 v_0 := v.Args[0]
66097 if v_0.Op != OpAMD64SETBE {
66098 break
66099 }
66100 cmp := v_0.Args[0]
66101 v_1 := v.Args[1]
66102 if v_1.Op != OpAMD64SETBE {
66103 break
66104 }
66105 if cmp != v_1.Args[0] {
66106 break
66107 }
66108 b.Kind = BlockAMD64ULE
66109 b.SetControl(cmp)
66110 b.Aux = nil
66111 return true
66112 }
66113
66114
66115
66116 for v.Op == OpAMD64TESTB {
66117 _ = v.Args[1]
66118 v_0 := v.Args[0]
66119 if v_0.Op != OpAMD64SETA {
66120 break
66121 }
66122 cmp := v_0.Args[0]
66123 v_1 := v.Args[1]
66124 if v_1.Op != OpAMD64SETA {
66125 break
66126 }
66127 if cmp != v_1.Args[0] {
66128 break
66129 }
66130 b.Kind = BlockAMD64UGT
66131 b.SetControl(cmp)
66132 b.Aux = nil
66133 return true
66134 }
66135
66136
66137
66138 for v.Op == OpAMD64TESTB {
66139 _ = v.Args[1]
66140 v_0 := v.Args[0]
66141 if v_0.Op != OpAMD64SETA {
66142 break
66143 }
66144 cmp := v_0.Args[0]
66145 v_1 := v.Args[1]
66146 if v_1.Op != OpAMD64SETA {
66147 break
66148 }
66149 if cmp != v_1.Args[0] {
66150 break
66151 }
66152 b.Kind = BlockAMD64UGT
66153 b.SetControl(cmp)
66154 b.Aux = nil
66155 return true
66156 }
66157
66158
66159
66160 for v.Op == OpAMD64TESTB {
66161 _ = v.Args[1]
66162 v_0 := v.Args[0]
66163 if v_0.Op != OpAMD64SETAE {
66164 break
66165 }
66166 cmp := v_0.Args[0]
66167 v_1 := v.Args[1]
66168 if v_1.Op != OpAMD64SETAE {
66169 break
66170 }
66171 if cmp != v_1.Args[0] {
66172 break
66173 }
66174 b.Kind = BlockAMD64UGE
66175 b.SetControl(cmp)
66176 b.Aux = nil
66177 return true
66178 }
66179
66180
66181
66182 for v.Op == OpAMD64TESTB {
66183 _ = v.Args[1]
66184 v_0 := v.Args[0]
66185 if v_0.Op != OpAMD64SETAE {
66186 break
66187 }
66188 cmp := v_0.Args[0]
66189 v_1 := v.Args[1]
66190 if v_1.Op != OpAMD64SETAE {
66191 break
66192 }
66193 if cmp != v_1.Args[0] {
66194 break
66195 }
66196 b.Kind = BlockAMD64UGE
66197 b.SetControl(cmp)
66198 b.Aux = nil
66199 return true
66200 }
66201
66202
66203
66204 for v.Op == OpAMD64TESTB {
66205 _ = v.Args[1]
66206 v_0 := v.Args[0]
66207 if v_0.Op != OpAMD64SETO {
66208 break
66209 }
66210 cmp := v_0.Args[0]
66211 v_1 := v.Args[1]
66212 if v_1.Op != OpAMD64SETO {
66213 break
66214 }
66215 if cmp != v_1.Args[0] {
66216 break
66217 }
66218 b.Kind = BlockAMD64OS
66219 b.SetControl(cmp)
66220 b.Aux = nil
66221 return true
66222 }
66223
66224
66225
66226 for v.Op == OpAMD64TESTB {
66227 _ = v.Args[1]
66228 v_0 := v.Args[0]
66229 if v_0.Op != OpAMD64SETO {
66230 break
66231 }
66232 cmp := v_0.Args[0]
66233 v_1 := v.Args[1]
66234 if v_1.Op != OpAMD64SETO {
66235 break
66236 }
66237 if cmp != v_1.Args[0] {
66238 break
66239 }
66240 b.Kind = BlockAMD64OS
66241 b.SetControl(cmp)
66242 b.Aux = nil
66243 return true
66244 }
66245
66246
66247
66248 for v.Op == OpAMD64TESTL {
66249 y := v.Args[1]
66250 v_0 := v.Args[0]
66251 if v_0.Op != OpAMD64SHLL {
66252 break
66253 }
66254 x := v_0.Args[1]
66255 v_0_0 := v_0.Args[0]
66256 if v_0_0.Op != OpAMD64MOVLconst {
66257 break
66258 }
66259 if v_0_0.AuxInt != 1 {
66260 break
66261 }
66262 if !(!config.nacl) {
66263 break
66264 }
66265 b.Kind = BlockAMD64ULT
66266 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
66267 v0.AddArg(x)
66268 v0.AddArg(y)
66269 b.SetControl(v0)
66270 b.Aux = nil
66271 return true
66272 }
66273
66274
66275
66276 for v.Op == OpAMD64TESTL {
66277 _ = v.Args[1]
66278 y := v.Args[0]
66279 v_1 := v.Args[1]
66280 if v_1.Op != OpAMD64SHLL {
66281 break
66282 }
66283 x := v_1.Args[1]
66284 v_1_0 := v_1.Args[0]
66285 if v_1_0.Op != OpAMD64MOVLconst {
66286 break
66287 }
66288 if v_1_0.AuxInt != 1 {
66289 break
66290 }
66291 if !(!config.nacl) {
66292 break
66293 }
66294 b.Kind = BlockAMD64ULT
66295 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
66296 v0.AddArg(x)
66297 v0.AddArg(y)
66298 b.SetControl(v0)
66299 b.Aux = nil
66300 return true
66301 }
66302
66303
66304
66305 for v.Op == OpAMD64TESTQ {
66306 y := v.Args[1]
66307 v_0 := v.Args[0]
66308 if v_0.Op != OpAMD64SHLQ {
66309 break
66310 }
66311 x := v_0.Args[1]
66312 v_0_0 := v_0.Args[0]
66313 if v_0_0.Op != OpAMD64MOVQconst {
66314 break
66315 }
66316 if v_0_0.AuxInt != 1 {
66317 break
66318 }
66319 if !(!config.nacl) {
66320 break
66321 }
66322 b.Kind = BlockAMD64ULT
66323 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
66324 v0.AddArg(x)
66325 v0.AddArg(y)
66326 b.SetControl(v0)
66327 b.Aux = nil
66328 return true
66329 }
66330
66331
66332
66333 for v.Op == OpAMD64TESTQ {
66334 _ = v.Args[1]
66335 y := v.Args[0]
66336 v_1 := v.Args[1]
66337 if v_1.Op != OpAMD64SHLQ {
66338 break
66339 }
66340 x := v_1.Args[1]
66341 v_1_0 := v_1.Args[0]
66342 if v_1_0.Op != OpAMD64MOVQconst {
66343 break
66344 }
66345 if v_1_0.AuxInt != 1 {
66346 break
66347 }
66348 if !(!config.nacl) {
66349 break
66350 }
66351 b.Kind = BlockAMD64ULT
66352 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
66353 v0.AddArg(x)
66354 v0.AddArg(y)
66355 b.SetControl(v0)
66356 b.Aux = nil
66357 return true
66358 }
66359
66360
66361
66362 for v.Op == OpAMD64TESTLconst {
66363 c := v.AuxInt
66364 x := v.Args[0]
66365 if !(isUint32PowerOfTwo(c) && !config.nacl) {
66366 break
66367 }
66368 b.Kind = BlockAMD64ULT
66369 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
66370 v0.AuxInt = log2uint32(c)
66371 v0.AddArg(x)
66372 b.SetControl(v0)
66373 b.Aux = nil
66374 return true
66375 }
66376
66377
66378
66379 for v.Op == OpAMD64TESTQconst {
66380 c := v.AuxInt
66381 x := v.Args[0]
66382 if !(isUint64PowerOfTwo(c) && !config.nacl) {
66383 break
66384 }
66385 b.Kind = BlockAMD64ULT
66386 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
66387 v0.AuxInt = log2(c)
66388 v0.AddArg(x)
66389 b.SetControl(v0)
66390 b.Aux = nil
66391 return true
66392 }
66393
66394
66395
66396 for v.Op == OpAMD64TESTQ {
66397 x := v.Args[1]
66398 v_0 := v.Args[0]
66399 if v_0.Op != OpAMD64MOVQconst {
66400 break
66401 }
66402 c := v_0.AuxInt
66403 if !(isUint64PowerOfTwo(c) && !config.nacl) {
66404 break
66405 }
66406 b.Kind = BlockAMD64ULT
66407 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
66408 v0.AuxInt = log2(c)
66409 v0.AddArg(x)
66410 b.SetControl(v0)
66411 b.Aux = nil
66412 return true
66413 }
66414
66415
66416
66417 for v.Op == OpAMD64TESTQ {
66418 _ = v.Args[1]
66419 x := v.Args[0]
66420 v_1 := v.Args[1]
66421 if v_1.Op != OpAMD64MOVQconst {
66422 break
66423 }
66424 c := v_1.AuxInt
66425 if !(isUint64PowerOfTwo(c) && !config.nacl) {
66426 break
66427 }
66428 b.Kind = BlockAMD64ULT
66429 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
66430 v0.AuxInt = log2(c)
66431 v0.AddArg(x)
66432 b.SetControl(v0)
66433 b.Aux = nil
66434 return true
66435 }
66436
66437
66438
66439 for v.Op == OpAMD64TESTQ {
66440 z2 := v.Args[1]
66441 z1 := v.Args[0]
66442 if z1.Op != OpAMD64SHLQconst {
66443 break
66444 }
66445 if z1.AuxInt != 63 {
66446 break
66447 }
66448 z1_0 := z1.Args[0]
66449 if z1_0.Op != OpAMD64SHRQconst {
66450 break
66451 }
66452 if z1_0.AuxInt != 63 {
66453 break
66454 }
66455 x := z1_0.Args[0]
66456 if !(z1 == z2 && !config.nacl) {
66457 break
66458 }
66459 b.Kind = BlockAMD64ULT
66460 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
66461 v0.AuxInt = 63
66462 v0.AddArg(x)
66463 b.SetControl(v0)
66464 b.Aux = nil
66465 return true
66466 }
66467
66468
66469
66470 for v.Op == OpAMD64TESTQ {
66471 _ = v.Args[1]
66472 z2 := v.Args[0]
66473 z1 := v.Args[1]
66474 if z1.Op != OpAMD64SHLQconst {
66475 break
66476 }
66477 if z1.AuxInt != 63 {
66478 break
66479 }
66480 z1_0 := z1.Args[0]
66481 if z1_0.Op != OpAMD64SHRQconst {
66482 break
66483 }
66484 if z1_0.AuxInt != 63 {
66485 break
66486 }
66487 x := z1_0.Args[0]
66488 if !(z1 == z2 && !config.nacl) {
66489 break
66490 }
66491 b.Kind = BlockAMD64ULT
66492 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
66493 v0.AuxInt = 63
66494 v0.AddArg(x)
66495 b.SetControl(v0)
66496 b.Aux = nil
66497 return true
66498 }
66499
66500
66501
66502 for v.Op == OpAMD64TESTL {
66503 z2 := v.Args[1]
66504 z1 := v.Args[0]
66505 if z1.Op != OpAMD64SHLLconst {
66506 break
66507 }
66508 if z1.AuxInt != 31 {
66509 break
66510 }
66511 z1_0 := z1.Args[0]
66512 if z1_0.Op != OpAMD64SHRQconst {
66513 break
66514 }
66515 if z1_0.AuxInt != 31 {
66516 break
66517 }
66518 x := z1_0.Args[0]
66519 if !(z1 == z2 && !config.nacl) {
66520 break
66521 }
66522 b.Kind = BlockAMD64ULT
66523 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
66524 v0.AuxInt = 31
66525 v0.AddArg(x)
66526 b.SetControl(v0)
66527 b.Aux = nil
66528 return true
66529 }
66530
66531
66532
66533 for v.Op == OpAMD64TESTL {
66534 _ = v.Args[1]
66535 z2 := v.Args[0]
66536 z1 := v.Args[1]
66537 if z1.Op != OpAMD64SHLLconst {
66538 break
66539 }
66540 if z1.AuxInt != 31 {
66541 break
66542 }
66543 z1_0 := z1.Args[0]
66544 if z1_0.Op != OpAMD64SHRQconst {
66545 break
66546 }
66547 if z1_0.AuxInt != 31 {
66548 break
66549 }
66550 x := z1_0.Args[0]
66551 if !(z1 == z2 && !config.nacl) {
66552 break
66553 }
66554 b.Kind = BlockAMD64ULT
66555 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
66556 v0.AuxInt = 31
66557 v0.AddArg(x)
66558 b.SetControl(v0)
66559 b.Aux = nil
66560 return true
66561 }
66562
66563
66564
66565 for v.Op == OpAMD64TESTQ {
66566 z2 := v.Args[1]
66567 z1 := v.Args[0]
66568 if z1.Op != OpAMD64SHRQconst {
66569 break
66570 }
66571 if z1.AuxInt != 63 {
66572 break
66573 }
66574 z1_0 := z1.Args[0]
66575 if z1_0.Op != OpAMD64SHLQconst {
66576 break
66577 }
66578 if z1_0.AuxInt != 63 {
66579 break
66580 }
66581 x := z1_0.Args[0]
66582 if !(z1 == z2 && !config.nacl) {
66583 break
66584 }
66585 b.Kind = BlockAMD64ULT
66586 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
66587 v0.AuxInt = 0
66588 v0.AddArg(x)
66589 b.SetControl(v0)
66590 b.Aux = nil
66591 return true
66592 }
66593
66594
66595
66596 for v.Op == OpAMD64TESTQ {
66597 _ = v.Args[1]
66598 z2 := v.Args[0]
66599 z1 := v.Args[1]
66600 if z1.Op != OpAMD64SHRQconst {
66601 break
66602 }
66603 if z1.AuxInt != 63 {
66604 break
66605 }
66606 z1_0 := z1.Args[0]
66607 if z1_0.Op != OpAMD64SHLQconst {
66608 break
66609 }
66610 if z1_0.AuxInt != 63 {
66611 break
66612 }
66613 x := z1_0.Args[0]
66614 if !(z1 == z2 && !config.nacl) {
66615 break
66616 }
66617 b.Kind = BlockAMD64ULT
66618 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
66619 v0.AuxInt = 0
66620 v0.AddArg(x)
66621 b.SetControl(v0)
66622 b.Aux = nil
66623 return true
66624 }
66625
66626
66627
66628 for v.Op == OpAMD64TESTL {
66629 z2 := v.Args[1]
66630 z1 := v.Args[0]
66631 if z1.Op != OpAMD64SHRLconst {
66632 break
66633 }
66634 if z1.AuxInt != 31 {
66635 break
66636 }
66637 z1_0 := z1.Args[0]
66638 if z1_0.Op != OpAMD64SHLLconst {
66639 break
66640 }
66641 if z1_0.AuxInt != 31 {
66642 break
66643 }
66644 x := z1_0.Args[0]
66645 if !(z1 == z2 && !config.nacl) {
66646 break
66647 }
66648 b.Kind = BlockAMD64ULT
66649 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
66650 v0.AuxInt = 0
66651 v0.AddArg(x)
66652 b.SetControl(v0)
66653 b.Aux = nil
66654 return true
66655 }
66656
66657
66658
66659 for v.Op == OpAMD64TESTL {
66660 _ = v.Args[1]
66661 z2 := v.Args[0]
66662 z1 := v.Args[1]
66663 if z1.Op != OpAMD64SHRLconst {
66664 break
66665 }
66666 if z1.AuxInt != 31 {
66667 break
66668 }
66669 z1_0 := z1.Args[0]
66670 if z1_0.Op != OpAMD64SHLLconst {
66671 break
66672 }
66673 if z1_0.AuxInt != 31 {
66674 break
66675 }
66676 x := z1_0.Args[0]
66677 if !(z1 == z2 && !config.nacl) {
66678 break
66679 }
66680 b.Kind = BlockAMD64ULT
66681 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
66682 v0.AuxInt = 0
66683 v0.AddArg(x)
66684 b.SetControl(v0)
66685 b.Aux = nil
66686 return true
66687 }
66688
66689
66690
66691 for v.Op == OpAMD64TESTQ {
66692 z2 := v.Args[1]
66693 z1 := v.Args[0]
66694 if z1.Op != OpAMD64SHRQconst {
66695 break
66696 }
66697 if z1.AuxInt != 63 {
66698 break
66699 }
66700 x := z1.Args[0]
66701 if !(z1 == z2 && !config.nacl) {
66702 break
66703 }
66704 b.Kind = BlockAMD64ULT
66705 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
66706 v0.AuxInt = 63
66707 v0.AddArg(x)
66708 b.SetControl(v0)
66709 b.Aux = nil
66710 return true
66711 }
66712
66713
66714
66715 for v.Op == OpAMD64TESTQ {
66716 _ = v.Args[1]
66717 z2 := v.Args[0]
66718 z1 := v.Args[1]
66719 if z1.Op != OpAMD64SHRQconst {
66720 break
66721 }
66722 if z1.AuxInt != 63 {
66723 break
66724 }
66725 x := z1.Args[0]
66726 if !(z1 == z2 && !config.nacl) {
66727 break
66728 }
66729 b.Kind = BlockAMD64ULT
66730 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
66731 v0.AuxInt = 63
66732 v0.AddArg(x)
66733 b.SetControl(v0)
66734 b.Aux = nil
66735 return true
66736 }
66737
66738
66739
66740 for v.Op == OpAMD64TESTL {
66741 z2 := v.Args[1]
66742 z1 := v.Args[0]
66743 if z1.Op != OpAMD64SHRLconst {
66744 break
66745 }
66746 if z1.AuxInt != 31 {
66747 break
66748 }
66749 x := z1.Args[0]
66750 if !(z1 == z2 && !config.nacl) {
66751 break
66752 }
66753 b.Kind = BlockAMD64ULT
66754 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
66755 v0.AuxInt = 31
66756 v0.AddArg(x)
66757 b.SetControl(v0)
66758 b.Aux = nil
66759 return true
66760 }
66761
66762
66763
66764 for v.Op == OpAMD64TESTL {
66765 _ = v.Args[1]
66766 z2 := v.Args[0]
66767 z1 := v.Args[1]
66768 if z1.Op != OpAMD64SHRLconst {
66769 break
66770 }
66771 if z1.AuxInt != 31 {
66772 break
66773 }
66774 x := z1.Args[0]
66775 if !(z1 == z2 && !config.nacl) {
66776 break
66777 }
66778 b.Kind = BlockAMD64ULT
66779 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
66780 v0.AuxInt = 31
66781 v0.AddArg(x)
66782 b.SetControl(v0)
66783 b.Aux = nil
66784 return true
66785 }
66786
66787
66788
66789 for v.Op == OpAMD64TESTB {
66790 _ = v.Args[1]
66791 v_0 := v.Args[0]
66792 if v_0.Op != OpAMD64SETGF {
66793 break
66794 }
66795 cmp := v_0.Args[0]
66796 v_1 := v.Args[1]
66797 if v_1.Op != OpAMD64SETGF {
66798 break
66799 }
66800 if cmp != v_1.Args[0] {
66801 break
66802 }
66803 b.Kind = BlockAMD64UGT
66804 b.SetControl(cmp)
66805 b.Aux = nil
66806 return true
66807 }
66808
66809
66810
66811 for v.Op == OpAMD64TESTB {
66812 _ = v.Args[1]
66813 v_0 := v.Args[0]
66814 if v_0.Op != OpAMD64SETGF {
66815 break
66816 }
66817 cmp := v_0.Args[0]
66818 v_1 := v.Args[1]
66819 if v_1.Op != OpAMD64SETGF {
66820 break
66821 }
66822 if cmp != v_1.Args[0] {
66823 break
66824 }
66825 b.Kind = BlockAMD64UGT
66826 b.SetControl(cmp)
66827 b.Aux = nil
66828 return true
66829 }
66830
66831
66832
66833 for v.Op == OpAMD64TESTB {
66834 _ = v.Args[1]
66835 v_0 := v.Args[0]
66836 if v_0.Op != OpAMD64SETGEF {
66837 break
66838 }
66839 cmp := v_0.Args[0]
66840 v_1 := v.Args[1]
66841 if v_1.Op != OpAMD64SETGEF {
66842 break
66843 }
66844 if cmp != v_1.Args[0] {
66845 break
66846 }
66847 b.Kind = BlockAMD64UGE
66848 b.SetControl(cmp)
66849 b.Aux = nil
66850 return true
66851 }
66852
66853
66854
66855 for v.Op == OpAMD64TESTB {
66856 _ = v.Args[1]
66857 v_0 := v.Args[0]
66858 if v_0.Op != OpAMD64SETGEF {
66859 break
66860 }
66861 cmp := v_0.Args[0]
66862 v_1 := v.Args[1]
66863 if v_1.Op != OpAMD64SETGEF {
66864 break
66865 }
66866 if cmp != v_1.Args[0] {
66867 break
66868 }
66869 b.Kind = BlockAMD64UGE
66870 b.SetControl(cmp)
66871 b.Aux = nil
66872 return true
66873 }
66874
66875
66876
66877 for v.Op == OpAMD64TESTB {
66878 _ = v.Args[1]
66879 v_0 := v.Args[0]
66880 if v_0.Op != OpAMD64SETEQF {
66881 break
66882 }
66883 cmp := v_0.Args[0]
66884 v_1 := v.Args[1]
66885 if v_1.Op != OpAMD64SETEQF {
66886 break
66887 }
66888 if cmp != v_1.Args[0] {
66889 break
66890 }
66891 b.Kind = BlockAMD64EQF
66892 b.SetControl(cmp)
66893 b.Aux = nil
66894 return true
66895 }
66896
66897
66898
66899 for v.Op == OpAMD64TESTB {
66900 _ = v.Args[1]
66901 v_0 := v.Args[0]
66902 if v_0.Op != OpAMD64SETEQF {
66903 break
66904 }
66905 cmp := v_0.Args[0]
66906 v_1 := v.Args[1]
66907 if v_1.Op != OpAMD64SETEQF {
66908 break
66909 }
66910 if cmp != v_1.Args[0] {
66911 break
66912 }
66913 b.Kind = BlockAMD64EQF
66914 b.SetControl(cmp)
66915 b.Aux = nil
66916 return true
66917 }
66918
66919
66920
66921 for v.Op == OpAMD64TESTB {
66922 _ = v.Args[1]
66923 v_0 := v.Args[0]
66924 if v_0.Op != OpAMD64SETNEF {
66925 break
66926 }
66927 cmp := v_0.Args[0]
66928 v_1 := v.Args[1]
66929 if v_1.Op != OpAMD64SETNEF {
66930 break
66931 }
66932 if cmp != v_1.Args[0] {
66933 break
66934 }
66935 b.Kind = BlockAMD64NEF
66936 b.SetControl(cmp)
66937 b.Aux = nil
66938 return true
66939 }
66940
66941
66942
66943 for v.Op == OpAMD64TESTB {
66944 _ = v.Args[1]
66945 v_0 := v.Args[0]
66946 if v_0.Op != OpAMD64SETNEF {
66947 break
66948 }
66949 cmp := v_0.Args[0]
66950 v_1 := v.Args[1]
66951 if v_1.Op != OpAMD64SETNEF {
66952 break
66953 }
66954 if cmp != v_1.Args[0] {
66955 break
66956 }
66957 b.Kind = BlockAMD64NEF
66958 b.SetControl(cmp)
66959 b.Aux = nil
66960 return true
66961 }
66962
66963
66964
66965 for v.Op == OpAMD64InvertFlags {
66966 cmp := v.Args[0]
66967 b.Kind = BlockAMD64NE
66968 b.SetControl(cmp)
66969 b.Aux = nil
66970 return true
66971 }
66972
66973
66974
66975 for v.Op == OpAMD64FlagEQ {
66976 b.Kind = BlockFirst
66977 b.SetControl(nil)
66978 b.Aux = nil
66979 b.swapSuccessors()
66980 return true
66981 }
66982
66983
66984
66985 for v.Op == OpAMD64FlagLT_ULT {
66986 b.Kind = BlockFirst
66987 b.SetControl(nil)
66988 b.Aux = nil
66989 return true
66990 }
66991
66992
66993
66994 for v.Op == OpAMD64FlagLT_UGT {
66995 b.Kind = BlockFirst
66996 b.SetControl(nil)
66997 b.Aux = nil
66998 return true
66999 }
67000
67001
67002
67003 for v.Op == OpAMD64FlagGT_ULT {
67004 b.Kind = BlockFirst
67005 b.SetControl(nil)
67006 b.Aux = nil
67007 return true
67008 }
67009
67010
67011
67012 for v.Op == OpAMD64FlagGT_UGT {
67013 b.Kind = BlockFirst
67014 b.SetControl(nil)
67015 b.Aux = nil
67016 return true
67017 }
67018 case BlockAMD64UGE:
67019
67020
67021
67022 for v.Op == OpAMD64InvertFlags {
67023 cmp := v.Args[0]
67024 b.Kind = BlockAMD64ULE
67025 b.SetControl(cmp)
67026 b.Aux = nil
67027 return true
67028 }
67029
67030
67031
67032 for v.Op == OpAMD64FlagEQ {
67033 b.Kind = BlockFirst
67034 b.SetControl(nil)
67035 b.Aux = nil
67036 return true
67037 }
67038
67039
67040
67041 for v.Op == OpAMD64FlagLT_ULT {
67042 b.Kind = BlockFirst
67043 b.SetControl(nil)
67044 b.Aux = nil
67045 b.swapSuccessors()
67046 return true
67047 }
67048
67049
67050
67051 for v.Op == OpAMD64FlagLT_UGT {
67052 b.Kind = BlockFirst
67053 b.SetControl(nil)
67054 b.Aux = nil
67055 return true
67056 }
67057
67058
67059
67060 for v.Op == OpAMD64FlagGT_ULT {
67061 b.Kind = BlockFirst
67062 b.SetControl(nil)
67063 b.Aux = nil
67064 b.swapSuccessors()
67065 return true
67066 }
67067
67068
67069
67070 for v.Op == OpAMD64FlagGT_UGT {
67071 b.Kind = BlockFirst
67072 b.SetControl(nil)
67073 b.Aux = nil
67074 return true
67075 }
67076 case BlockAMD64UGT:
67077
67078
67079
67080 for v.Op == OpAMD64InvertFlags {
67081 cmp := v.Args[0]
67082 b.Kind = BlockAMD64ULT
67083 b.SetControl(cmp)
67084 b.Aux = nil
67085 return true
67086 }
67087
67088
67089
67090 for v.Op == OpAMD64FlagEQ {
67091 b.Kind = BlockFirst
67092 b.SetControl(nil)
67093 b.Aux = nil
67094 b.swapSuccessors()
67095 return true
67096 }
67097
67098
67099
67100 for v.Op == OpAMD64FlagLT_ULT {
67101 b.Kind = BlockFirst
67102 b.SetControl(nil)
67103 b.Aux = nil
67104 b.swapSuccessors()
67105 return true
67106 }
67107
67108
67109
67110 for v.Op == OpAMD64FlagLT_UGT {
67111 b.Kind = BlockFirst
67112 b.SetControl(nil)
67113 b.Aux = nil
67114 return true
67115 }
67116
67117
67118
67119 for v.Op == OpAMD64FlagGT_ULT {
67120 b.Kind = BlockFirst
67121 b.SetControl(nil)
67122 b.Aux = nil
67123 b.swapSuccessors()
67124 return true
67125 }
67126
67127
67128
67129 for v.Op == OpAMD64FlagGT_UGT {
67130 b.Kind = BlockFirst
67131 b.SetControl(nil)
67132 b.Aux = nil
67133 return true
67134 }
67135 case BlockAMD64ULE:
67136
67137
67138
67139 for v.Op == OpAMD64InvertFlags {
67140 cmp := v.Args[0]
67141 b.Kind = BlockAMD64UGE
67142 b.SetControl(cmp)
67143 b.Aux = nil
67144 return true
67145 }
67146
67147
67148
67149 for v.Op == OpAMD64FlagEQ {
67150 b.Kind = BlockFirst
67151 b.SetControl(nil)
67152 b.Aux = nil
67153 return true
67154 }
67155
67156
67157
67158 for v.Op == OpAMD64FlagLT_ULT {
67159 b.Kind = BlockFirst
67160 b.SetControl(nil)
67161 b.Aux = nil
67162 return true
67163 }
67164
67165
67166
67167 for v.Op == OpAMD64FlagLT_UGT {
67168 b.Kind = BlockFirst
67169 b.SetControl(nil)
67170 b.Aux = nil
67171 b.swapSuccessors()
67172 return true
67173 }
67174
67175
67176
67177 for v.Op == OpAMD64FlagGT_ULT {
67178 b.Kind = BlockFirst
67179 b.SetControl(nil)
67180 b.Aux = nil
67181 return true
67182 }
67183
67184
67185
67186 for v.Op == OpAMD64FlagGT_UGT {
67187 b.Kind = BlockFirst
67188 b.SetControl(nil)
67189 b.Aux = nil
67190 b.swapSuccessors()
67191 return true
67192 }
67193 case BlockAMD64ULT:
67194
67195
67196
67197 for v.Op == OpAMD64InvertFlags {
67198 cmp := v.Args[0]
67199 b.Kind = BlockAMD64UGT
67200 b.SetControl(cmp)
67201 b.Aux = nil
67202 return true
67203 }
67204
67205
67206
67207 for v.Op == OpAMD64FlagEQ {
67208 b.Kind = BlockFirst
67209 b.SetControl(nil)
67210 b.Aux = nil
67211 b.swapSuccessors()
67212 return true
67213 }
67214
67215
67216
67217 for v.Op == OpAMD64FlagLT_ULT {
67218 b.Kind = BlockFirst
67219 b.SetControl(nil)
67220 b.Aux = nil
67221 return true
67222 }
67223
67224
67225
67226 for v.Op == OpAMD64FlagLT_UGT {
67227 b.Kind = BlockFirst
67228 b.SetControl(nil)
67229 b.Aux = nil
67230 b.swapSuccessors()
67231 return true
67232 }
67233
67234
67235
67236 for v.Op == OpAMD64FlagGT_ULT {
67237 b.Kind = BlockFirst
67238 b.SetControl(nil)
67239 b.Aux = nil
67240 return true
67241 }
67242
67243
67244
67245 for v.Op == OpAMD64FlagGT_UGT {
67246 b.Kind = BlockFirst
67247 b.SetControl(nil)
67248 b.Aux = nil
67249 b.swapSuccessors()
67250 return true
67251 }
67252 }
67253 return false
67254 }
67255
View as plain text