Text file src/runtime/asm_amd64p32.s
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 #include "go_asm.h"
6 #include "go_tls.h"
7 #include "funcdata.h"
8 #include "textflag.h"
9
10 TEXT runtime·rt0_go(SB),NOSPLIT,$0
11 // copy arguments forward on an even stack
12 MOVL SP, CX
13 MOVL 8(CX), AX // argc
14 MOVL 12(CX), BX // argv
15 SUBL $128, CX // plenty of scratch
16 ANDL $~15, CX
17 MOVL CX, SP
18
19 MOVL AX, 16(SP)
20 MOVL BX, 24(SP)
21
22 // create istack out of the given (operating system) stack.
23 MOVL $runtime·g0(SB), DI
24 LEAL (-64*1024+104)(SP), BX
25 MOVL BX, g_stackguard0(DI)
26 MOVL BX, g_stackguard1(DI)
27 MOVL BX, (g_stack+stack_lo)(DI)
28 MOVL SP, (g_stack+stack_hi)(DI)
29
30 // find out information about the processor we're on
31 MOVL $0, AX
32 CPUID
33 CMPL AX, $0
34 JE nocpuinfo
35
36 CMPL BX, $0x756E6547 // "Genu"
37 JNE notintel
38 CMPL DX, $0x49656E69 // "ineI"
39 JNE notintel
40 CMPL CX, $0x6C65746E // "ntel"
41 JNE notintel
42 MOVB $1, runtime·isIntel(SB)
43 notintel:
44
45 // Load EAX=1 cpuid flags
46 MOVL $1, AX
47 CPUID
48 MOVL AX, runtime·processorVersionInfo(SB)
49
50 nocpuinfo:
51 LEAL runtime·m0+m_tls(SB), DI
52 CALL runtime·settls(SB)
53
54 // store through it, to make sure it works
55 get_tls(BX)
56 MOVQ $0x123, g(BX)
57 MOVQ runtime·m0+m_tls(SB), AX
58 CMPQ AX, $0x123
59 JEQ 2(PC)
60 CALL runtime·abort(SB)
61 ok:
62 // set the per-goroutine and per-mach "registers"
63 get_tls(BX)
64 LEAL runtime·g0(SB), CX
65 MOVL CX, g(BX)
66 LEAL runtime·m0(SB), AX
67
68 // save m->g0 = g0
69 MOVL CX, m_g0(AX)
70 // save m0 to g0->m
71 MOVL AX, g_m(CX)
72
73 CLD // convention is D is always left cleared
74 CALL runtime·check(SB)
75
76 MOVL 16(SP), AX // copy argc
77 MOVL AX, 0(SP)
78 MOVL 24(SP), AX // copy argv
79 MOVL AX, 4(SP)
80 CALL runtime·args(SB)
81 CALL runtime·osinit(SB)
82 CALL runtime·schedinit(SB)
83
84 // create a new goroutine to start program
85 MOVL $runtime·mainPC(SB), AX // entry
86 MOVL $0, 0(SP)
87 MOVL AX, 4(SP)
88 CALL runtime·newproc(SB)
89
90 // start this M
91 CALL runtime·mstart(SB)
92
93 MOVL $0xf1, 0xf1 // crash
94 RET
95
96 DATA runtime·mainPC+0(SB)/4,$runtime·main(SB)
97 GLOBL runtime·mainPC(SB),RODATA,$4
98
99 TEXT runtime·breakpoint(SB),NOSPLIT,$0-0
100 INT $3
101 RET
102
103 TEXT runtime·asminit(SB),NOSPLIT,$0-0
104 // No per-thread init.
105 RET
106
107 /*
108 * go-routine
109 */
110
111 // void gosave(Gobuf*)
112 // save state in Gobuf; setjmp
113 TEXT runtime·gosave(SB), NOSPLIT, $0-4
114 MOVL buf+0(FP), AX // gobuf
115 LEAL buf+0(FP), BX // caller's SP
116 MOVL BX, gobuf_sp(AX)
117 MOVL 0(SP), BX // caller's PC
118 MOVL BX, gobuf_pc(AX)
119 MOVQ $0, gobuf_ret(AX)
120 // Assert ctxt is zero. See func save.
121 MOVL gobuf_ctxt(AX), BX
122 TESTL BX, BX
123 JZ 2(PC)
124 CALL runtime·badctxt(SB)
125 get_tls(CX)
126 MOVL g(CX), BX
127 MOVL BX, gobuf_g(AX)
128 RET
129
130 // void gogo(Gobuf*)
131 // restore state from Gobuf; longjmp
132 TEXT runtime·gogo(SB), NOSPLIT, $8-4
133 MOVL buf+0(FP), BX // gobuf
134 MOVL gobuf_g(BX), DX
135 MOVL 0(DX), CX // make sure g != nil
136 get_tls(CX)
137 MOVL DX, g(CX)
138 MOVL gobuf_sp(BX), SP // restore SP
139 MOVL gobuf_ctxt(BX), DX
140 MOVQ gobuf_ret(BX), AX
141 MOVL $0, gobuf_sp(BX) // clear to help garbage collector
142 MOVQ $0, gobuf_ret(BX)
143 MOVL $0, gobuf_ctxt(BX)
144 MOVL gobuf_pc(BX), BX
145 JMP BX
146
147 // func mcall(fn func(*g))
148 // Switch to m->g0's stack, call fn(g).
149 // Fn must never return. It should gogo(&g->sched)
150 // to keep running g.
151 TEXT runtime·mcall(SB), NOSPLIT, $0-4
152 MOVL fn+0(FP), DI
153
154 get_tls(CX)
155 MOVL g(CX), AX // save state in g->sched
156 MOVL 0(SP), BX // caller's PC
157 MOVL BX, (g_sched+gobuf_pc)(AX)
158 LEAL fn+0(FP), BX // caller's SP
159 MOVL BX, (g_sched+gobuf_sp)(AX)
160 MOVL AX, (g_sched+gobuf_g)(AX)
161
162 // switch to m->g0 & its stack, call fn
163 MOVL g(CX), BX
164 MOVL g_m(BX), BX
165 MOVL m_g0(BX), SI
166 CMPL SI, AX // if g == m->g0 call badmcall
167 JNE 3(PC)
168 MOVL $runtime·badmcall(SB), AX
169 JMP AX
170 MOVL SI, g(CX) // g = m->g0
171 MOVL (g_sched+gobuf_sp)(SI), SP // sp = m->g0->sched.sp
172 PUSHQ AX
173 MOVL DI, DX
174 MOVL 0(DI), DI
175 CALL DI
176 POPQ AX
177 MOVL $runtime·badmcall2(SB), AX
178 JMP AX
179 RET
180
181 // systemstack_switch is a dummy routine that systemstack leaves at the bottom
182 // of the G stack. We need to distinguish the routine that
183 // lives at the bottom of the G stack from the one that lives
184 // at the top of the system stack because the one at the top of
185 // the system stack terminates the stack walk (see topofstack()).
186 TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0
187 RET
188
189 // func systemstack(fn func())
190 TEXT runtime·systemstack(SB), NOSPLIT, $0-4
191 MOVL fn+0(FP), DI // DI = fn
192 get_tls(CX)
193 MOVL g(CX), AX // AX = g
194 MOVL g_m(AX), BX // BX = m
195
196 CMPL AX, m_gsignal(BX)
197 JEQ noswitch
198
199 MOVL m_g0(BX), DX // DX = g0
200 CMPL AX, DX
201 JEQ noswitch
202
203 CMPL AX, m_curg(BX)
204 JNE bad
205
206 // switch stacks
207 // save our state in g->sched. Pretend to
208 // be systemstack_switch if the G stack is scanned.
209 MOVL $runtime·systemstack_switch(SB), SI
210 MOVL SI, (g_sched+gobuf_pc)(AX)
211 MOVL SP, (g_sched+gobuf_sp)(AX)
212 MOVL AX, (g_sched+gobuf_g)(AX)
213
214 // switch to g0
215 MOVL DX, g(CX)
216 MOVL (g_sched+gobuf_sp)(DX), SP
217
218 // call target function
219 MOVL DI, DX
220 MOVL 0(DI), DI
221 CALL DI
222
223 // switch back to g
224 get_tls(CX)
225 MOVL g(CX), AX
226 MOVL g_m(AX), BX
227 MOVL m_curg(BX), AX
228 MOVL AX, g(CX)
229 MOVL (g_sched+gobuf_sp)(AX), SP
230 MOVL $0, (g_sched+gobuf_sp)(AX)
231 RET
232
233 noswitch:
234 // already on m stack, just call directly
235 // Using a tail call here cleans up tracebacks since we won't stop
236 // at an intermediate systemstack.
237 MOVL DI, DX
238 MOVL 0(DI), DI
239 JMP DI
240
241 bad:
242 // Not g0, not curg. Must be gsignal, but that's not allowed.
243 // Hide call from linker nosplit analysis.
244 MOVL $runtime·badsystemstack(SB), AX
245 CALL AX
246 INT $3
247
248 /*
249 * support for morestack
250 */
251
252 // Called during function prolog when more stack is needed.
253 //
254 // The traceback routines see morestack on a g0 as being
255 // the top of a stack (for example, morestack calling newstack
256 // calling the scheduler calling newm calling gc), so we must
257 // record an argument size. For that purpose, it has no arguments.
258 TEXT runtime·morestack(SB),NOSPLIT,$0-0
259 get_tls(CX)
260 MOVL g(CX), BX
261 MOVL g_m(BX), BX
262
263 // Cannot grow scheduler stack (m->g0).
264 MOVL m_g0(BX), SI
265 CMPL g(CX), SI
266 JNE 3(PC)
267 CALL runtime·badmorestackg0(SB)
268 MOVL 0, AX
269
270 // Cannot grow signal stack (m->gsignal).
271 MOVL m_gsignal(BX), SI
272 CMPL g(CX), SI
273 JNE 3(PC)
274 CALL runtime·badmorestackgsignal(SB)
275 MOVL 0, AX
276
277 // Called from f.
278 // Set m->morebuf to f's caller.
279 NOP SP // tell vet SP changed - stop checking offsets
280 MOVL 8(SP), AX // f's caller's PC
281 MOVL AX, (m_morebuf+gobuf_pc)(BX)
282 LEAL 16(SP), AX // f's caller's SP
283 MOVL AX, (m_morebuf+gobuf_sp)(BX)
284 get_tls(CX)
285 MOVL g(CX), SI
286 MOVL SI, (m_morebuf+gobuf_g)(BX)
287
288 // Set g->sched to context in f.
289 MOVL 0(SP), AX // f's PC
290 MOVL AX, (g_sched+gobuf_pc)(SI)
291 MOVL SI, (g_sched+gobuf_g)(SI)
292 LEAL 8(SP), AX // f's SP
293 MOVL AX, (g_sched+gobuf_sp)(SI)
294 MOVL DX, (g_sched+gobuf_ctxt)(SI)
295
296 // Call newstack on m->g0's stack.
297 MOVL m_g0(BX), BX
298 MOVL BX, g(CX)
299 MOVL (g_sched+gobuf_sp)(BX), SP
300 CALL runtime·newstack(SB)
301 MOVL $0, 0x1003 // crash if newstack returns
302 RET
303
304 // morestack trampolines
305 TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0
306 MOVL $0, DX
307 JMP runtime·morestack(SB)
308
309 // reflectcall: call a function with the given argument list
310 // func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32).
311 // we don't have variable-sized frames, so we use a small number
312 // of constant-sized-frame functions to encode a few bits of size in the pc.
313 // Caution: ugly multiline assembly macros in your future!
314
315 #define DISPATCH(NAME,MAXSIZE) \
316 CMPL CX, $MAXSIZE; \
317 JA 3(PC); \
318 MOVL $NAME(SB), AX; \
319 JMP AX
320 // Note: can't just "JMP NAME(SB)" - bad inlining results.
321
322 TEXT ·reflectcall(SB), NOSPLIT, $0-20
323 MOVLQZX argsize+12(FP), CX
324 DISPATCH(runtime·call16, 16)
325 DISPATCH(runtime·call32, 32)
326 DISPATCH(runtime·call64, 64)
327 DISPATCH(runtime·call128, 128)
328 DISPATCH(runtime·call256, 256)
329 DISPATCH(runtime·call512, 512)
330 DISPATCH(runtime·call1024, 1024)
331 DISPATCH(runtime·call2048, 2048)
332 DISPATCH(runtime·call4096, 4096)
333 DISPATCH(runtime·call8192, 8192)
334 DISPATCH(runtime·call16384, 16384)
335 DISPATCH(runtime·call32768, 32768)
336 DISPATCH(runtime·call65536, 65536)
337 DISPATCH(runtime·call131072, 131072)
338 DISPATCH(runtime·call262144, 262144)
339 DISPATCH(runtime·call524288, 524288)
340 DISPATCH(runtime·call1048576, 1048576)
341 DISPATCH(runtime·call2097152, 2097152)
342 DISPATCH(runtime·call4194304, 4194304)
343 DISPATCH(runtime·call8388608, 8388608)
344 DISPATCH(runtime·call16777216, 16777216)
345 DISPATCH(runtime·call33554432, 33554432)
346 DISPATCH(runtime·call67108864, 67108864)
347 DISPATCH(runtime·call134217728, 134217728)
348 DISPATCH(runtime·call268435456, 268435456)
349 DISPATCH(runtime·call536870912, 536870912)
350 DISPATCH(runtime·call1073741824, 1073741824)
351 MOVL $runtime·badreflectcall(SB), AX
352 JMP AX
353
354 #define CALLFN(NAME,MAXSIZE) \
355 TEXT NAME(SB), WRAPPER, $MAXSIZE-20; \
356 NO_LOCAL_POINTERS; \
357 /* copy arguments to stack */ \
358 MOVL argptr+8(FP), SI; \
359 MOVL argsize+12(FP), CX; \
360 MOVL SP, DI; \
361 REP;MOVSB; \
362 /* call function */ \
363 MOVL f+4(FP), DX; \
364 MOVL (DX), AX; \
365 CALL AX; \
366 /* copy return values back */ \
367 MOVL argtype+0(FP), DX; \
368 MOVL argptr+8(FP), DI; \
369 MOVL argsize+12(FP), CX; \
370 MOVL retoffset+16(FP), BX; \
371 MOVL SP, SI; \
372 ADDL BX, DI; \
373 ADDL BX, SI; \
374 SUBL BX, CX; \
375 CALL callRet<>(SB); \
376 RET
377
378 // callRet copies return values back at the end of call*. This is a
379 // separate function so it can allocate stack space for the arguments
380 // to reflectcallmove. It does not follow the Go ABI; it expects its
381 // arguments in registers.
382 TEXT callRet<>(SB), NOSPLIT, $16-0
383 MOVL DX, 0(SP)
384 MOVL DI, 4(SP)
385 MOVL SI, 8(SP)
386 MOVL CX, 12(SP)
387 CALL runtime·reflectcallmove(SB)
388 RET
389
390 CALLFN(·call16, 16)
391 CALLFN(·call32, 32)
392 CALLFN(·call64, 64)
393 CALLFN(·call128, 128)
394 CALLFN(·call256, 256)
395 CALLFN(·call512, 512)
396 CALLFN(·call1024, 1024)
397 CALLFN(·call2048, 2048)
398 CALLFN(·call4096, 4096)
399 CALLFN(·call8192, 8192)
400 CALLFN(·call16384, 16384)
401 CALLFN(·call32768, 32768)
402 CALLFN(·call65536, 65536)
403 CALLFN(·call131072, 131072)
404 CALLFN(·call262144, 262144)
405 CALLFN(·call524288, 524288)
406 CALLFN(·call1048576, 1048576)
407 CALLFN(·call2097152, 2097152)
408 CALLFN(·call4194304, 4194304)
409 CALLFN(·call8388608, 8388608)
410 CALLFN(·call16777216, 16777216)
411 CALLFN(·call33554432, 33554432)
412 CALLFN(·call67108864, 67108864)
413 CALLFN(·call134217728, 134217728)
414 CALLFN(·call268435456, 268435456)
415 CALLFN(·call536870912, 536870912)
416 CALLFN(·call1073741824, 1073741824)
417
418 TEXT runtime·procyield(SB),NOSPLIT,$0-0
419 MOVL cycles+0(FP), AX
420 again:
421 PAUSE
422 SUBL $1, AX
423 JNZ again
424 RET
425
426 TEXT ·publicationBarrier(SB),NOSPLIT,$0-0
427 // Stores are already ordered on x86, so this is just a
428 // compile barrier.
429 RET
430
431 // void jmpdefer(fn, sp);
432 // called from deferreturn.
433 // 1. pop the caller
434 // 2. sub 5 bytes from the callers return
435 // 3. jmp to the argument
436 TEXT runtime·jmpdefer(SB), NOSPLIT, $0-8
437 MOVL fv+0(FP), DX
438 MOVL argp+4(FP), BX
439 LEAL -8(BX), SP // caller sp after CALL
440 SUBL $5, (SP) // return to CALL again
441 MOVL 0(DX), BX
442 JMP BX // but first run the deferred function
443
444 // func asmcgocall(fn, arg unsafe.Pointer) int32
445 // Not implemented.
446 TEXT runtime·asmcgocall(SB),NOSPLIT,$0-12
447 MOVL 0, AX // crash
448 MOVL $0, ret+8(FP) // for vet
449 RET
450
451 // cgocallback(void (*fn)(void*), void *frame, uintptr framesize)
452 // Not implemented.
453 TEXT runtime·cgocallback(SB),NOSPLIT,$0-16
454 MOVL 0, AX
455 RET
456
457 // cgocallback_gofunc(FuncVal*, void *frame, uintptr framesize)
458 // Not implemented.
459 TEXT ·cgocallback_gofunc(SB),NOSPLIT,$0-16
460 MOVL 0, AX
461 RET
462
463 // void setg(G*); set g. for use by needm.
464 // Not implemented.
465 TEXT runtime·setg(SB), NOSPLIT, $0-4
466 MOVL 0, AX
467 RET
468
469 TEXT runtime·abort(SB),NOSPLIT,$0-0
470 INT $3
471 loop:
472 JMP loop
473
474 // check that SP is in range [g->stack.lo, g->stack.hi)
475 TEXT runtime·stackcheck(SB), NOSPLIT, $0-0
476 get_tls(CX)
477 MOVL g(CX), AX
478 CMPL (g_stack+stack_hi)(AX), SP
479 JHI 2(PC)
480 MOVL 0, AX
481 CMPL SP, (g_stack+stack_lo)(AX)
482 JHI 2(PC)
483 MOVL 0, AX
484 RET
485
486 // int64 runtime·cputicks(void)
487 TEXT runtime·cputicks(SB),NOSPLIT,$0-0
488 RDTSC
489 SHLQ $32, DX
490 ADDQ DX, AX
491 MOVQ AX, ret+0(FP)
492 RET
493
494 // hash function using AES hardware instructions
495 // For now, our one amd64p32 system (NaCl) does not
496 // support using AES instructions, so have not bothered to
497 // write the implementations. Can copy and adjust the ones
498 // in asm_amd64.s when the time comes.
499
500 TEXT runtime·aeshash(SB),NOSPLIT,$0-20
501 MOVL AX, ret+16(FP)
502 RET
503
504 TEXT runtime·aeshashstr(SB),NOSPLIT,$0-12
505 MOVL AX, ret+8(FP)
506 RET
507
508 TEXT runtime·aeshash32(SB),NOSPLIT,$0-12
509 MOVL AX, ret+8(FP)
510 RET
511
512 TEXT runtime·aeshash64(SB),NOSPLIT,$0-12
513 MOVL AX, ret+8(FP)
514 RET
515
516 TEXT runtime·return0(SB), NOSPLIT, $0
517 MOVL $0, AX
518 RET
519
520 // The top-most function running on a goroutine
521 // returns to goexit+PCQuantum.
522 TEXT runtime·goexit(SB),NOSPLIT,$0-0
523 BYTE $0x90 // NOP
524 CALL runtime·goexit1(SB) // does not return
525 // traceback from goexit1 must hit code range of goexit
526 BYTE $0x90 // NOP
527
528 TEXT ·checkASM(SB),NOSPLIT,$0-1
529 MOVB $1, ret+0(FP)
530 RET
531
532 // gcWriteBarrier performs a heap pointer write and informs the GC.
533 //
534 // gcWriteBarrier does NOT follow the Go ABI. It takes two arguments:
535 // - DI is the destination of the write
536 // - AX is the value being written at DI
537 // It clobbers FLAGS and SI. It does not clobber any other general-purpose registers,
538 // but may clobber others (e.g., SSE registers).
539 TEXT runtime·gcWriteBarrier(SB),NOSPLIT,$88
540 // Save the registers clobbered by the fast path. This is slightly
541 // faster than having the caller spill these.
542 MOVQ R14, 72(SP)
543 MOVQ R13, 80(SP)
544 // TODO: Consider passing g.m.p in as an argument so they can be shared
545 // across a sequence of write barriers.
546 get_tls(R13)
547 MOVL g(R13), R13
548 MOVL g_m(R13), R13
549 MOVL m_p(R13), R13
550 MOVL (p_wbBuf+wbBuf_next)(R13), R14
551 // Increment wbBuf.next position.
552 LEAL 8(R14), R14
553 MOVL R14, (p_wbBuf+wbBuf_next)(R13)
554 CMPL R14, (p_wbBuf+wbBuf_end)(R13)
555 // Record the write.
556 MOVL AX, -8(R14) // Record value
557 MOVL (DI), R13 // TODO: This turns bad writes into bad reads.
558 MOVL R13, -4(R14) // Record *slot
559 // Is the buffer full? (flags set in CMPL above)
560 JEQ flush
561 ret:
562 MOVQ 72(SP), R14
563 MOVQ 80(SP), R13
564 // Do the write.
565 MOVL AX, (DI)
566 RET // Clobbers SI on NaCl
567
568 flush:
569 // Save all general purpose registers since these could be
570 // clobbered by wbBufFlush and were not saved by the caller.
571 // It is possible for wbBufFlush to clobber other registers
572 // (e.g., SSE registers), but the compiler takes care of saving
573 // those in the caller if necessary. This strikes a balance
574 // with registers that are likely to be used.
575 //
576 // We don't have type information for these, but all code under
577 // here is NOSPLIT, so nothing will observe these.
578 //
579 // TODO: We could strike a different balance; e.g., saving X0
580 // and not saving GP registers that are less likely to be used.
581 MOVL DI, 0(SP) // Also first argument to wbBufFlush
582 MOVL AX, 4(SP) // Also second argument to wbBufFlush
583 MOVQ BX, 8(SP)
584 MOVQ CX, 16(SP)
585 MOVQ DX, 24(SP)
586 // DI already saved
587 // SI is always clobbered on nacl
588 // BP is reserved on nacl
589 MOVQ R8, 32(SP)
590 MOVQ R9, 40(SP)
591 MOVQ R10, 48(SP)
592 MOVQ R11, 56(SP)
593 MOVQ R12, 64(SP)
594 // R13 already saved
595 // R14 already saved
596 // R15 is reserved on nacl
597
598 // This takes arguments DI and AX
599 CALL runtime·wbBufFlush(SB)
600
601 MOVL 0(SP), DI
602 MOVL 4(SP), AX
603 MOVQ 8(SP), BX
604 MOVQ 16(SP), CX
605 MOVQ 24(SP), DX
606 MOVQ 32(SP), R8
607 MOVQ 40(SP), R9
608 MOVQ 48(SP), R10
609 MOVQ 56(SP), R11
610 MOVQ 64(SP), R12
611 JMP ret
612
613 // Note: these functions use a special calling convention to save generated code space.
614 // Arguments are passed in registers, but the space for those arguments are allocated
615 // in the caller's stack frame. These stubs write the args into that stack space and
616 // then tail call to the corresponding runtime handler.
617 // The tail call makes these stubs disappear in backtraces.
618 TEXT runtime·panicIndex(SB),NOSPLIT,$0-8
619 MOVL AX, x+0(FP)
620 MOVL CX, y+4(FP)
621 JMP runtime·goPanicIndex(SB)
622 TEXT runtime·panicIndexU(SB),NOSPLIT,$0-8
623 MOVL AX, x+0(FP)
624 MOVL CX, y+4(FP)
625 JMP runtime·goPanicIndexU(SB)
626 TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-8
627 MOVL CX, x+0(FP)
628 MOVL DX, y+4(FP)
629 JMP runtime·goPanicSliceAlen(SB)
630 TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-8
631 MOVL CX, x+0(FP)
632 MOVL DX, y+4(FP)
633 JMP runtime·goPanicSliceAlenU(SB)
634 TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-8
635 MOVL CX, x+0(FP)
636 MOVL DX, y+4(FP)
637 JMP runtime·goPanicSliceAcap(SB)
638 TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-8
639 MOVL CX, x+0(FP)
640 MOVL DX, y+4(FP)
641 JMP runtime·goPanicSliceAcapU(SB)
642 TEXT runtime·panicSliceB(SB),NOSPLIT,$0-8
643 MOVL AX, x+0(FP)
644 MOVL CX, y+4(FP)
645 JMP runtime·goPanicSliceB(SB)
646 TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-8
647 MOVL AX, x+0(FP)
648 MOVL CX, y+4(FP)
649 JMP runtime·goPanicSliceBU(SB)
650 TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-8
651 MOVL DX, x+0(FP)
652 MOVL BX, y+4(FP)
653 JMP runtime·goPanicSlice3Alen(SB)
654 TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-8
655 MOVL DX, x+0(FP)
656 MOVL BX, y+4(FP)
657 JMP runtime·goPanicSlice3AlenU(SB)
658 TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-8
659 MOVL DX, x+0(FP)
660 MOVL BX, y+4(FP)
661 JMP runtime·goPanicSlice3Acap(SB)
662 TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-8
663 MOVL DX, x+0(FP)
664 MOVL BX, y+4(FP)
665 JMP runtime·goPanicSlice3AcapU(SB)
666 TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-8
667 MOVL CX, x+0(FP)
668 MOVL DX, y+4(FP)
669 JMP runtime·goPanicSlice3B(SB)
670 TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-8
671 MOVL CX, x+0(FP)
672 MOVL DX, y+4(FP)
673 JMP runtime·goPanicSlice3BU(SB)
674 TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-8
675 MOVL AX, x+0(FP)
676 MOVL CX, y+4(FP)
677 JMP runtime·goPanicSlice3C(SB)
678 TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-8
679 MOVL AX, x+0(FP)
680 MOVL CX, y+4(FP)
681 JMP runtime·goPanicSlice3CU(SB)
682
683 // Extended versions for 64-bit indexes.
684 TEXT runtime·panicExtendIndex(SB),NOSPLIT,$0-12
685 MOVL SI, hi+0(FP)
686 MOVL AX, lo+4(FP)
687 MOVL CX, y+8(FP)
688 JMP runtime·goPanicExtendIndex(SB)
689 TEXT runtime·panicExtendIndexU(SB),NOSPLIT,$0-12
690 MOVL SI, hi+0(FP)
691 MOVL AX, lo+4(FP)
692 MOVL CX, y+8(FP)
693 JMP runtime·goPanicExtendIndexU(SB)
694 TEXT runtime·panicExtendSliceAlen(SB),NOSPLIT,$0-12
695 MOVL SI, hi+0(FP)
696 MOVL CX, lo+4(FP)
697 MOVL DX, y+8(FP)
698 JMP runtime·goPanicExtendSliceAlen(SB)
699 TEXT runtime·panicExtendSliceAlenU(SB),NOSPLIT,$0-12
700 MOVL SI, hi+0(FP)
701 MOVL CX, lo+4(FP)
702 MOVL DX, y+8(FP)
703 JMP runtime·goPanicExtendSliceAlenU(SB)
704 TEXT runtime·panicExtendSliceAcap(SB),NOSPLIT,$0-12
705 MOVL SI, hi+0(FP)
706 MOVL CX, lo+4(FP)
707 MOVL DX, y+8(FP)
708 JMP runtime·goPanicExtendSliceAcap(SB)
709 TEXT runtime·panicExtendSliceAcapU(SB),NOSPLIT,$0-12
710 MOVL SI, hi+0(FP)
711 MOVL CX, lo+4(FP)
712 MOVL DX, y+8(FP)
713 JMP runtime·goPanicExtendSliceAcapU(SB)
714 TEXT runtime·panicExtendSliceB(SB),NOSPLIT,$0-12
715 MOVL SI, hi+0(FP)
716 MOVL AX, lo+4(FP)
717 MOVL CX, y+8(FP)
718 JMP runtime·goPanicExtendSliceB(SB)
719 TEXT runtime·panicExtendSliceBU(SB),NOSPLIT,$0-12
720 MOVL SI, hi+0(FP)
721 MOVL AX, lo+4(FP)
722 MOVL CX, y+8(FP)
723 JMP runtime·goPanicExtendSliceBU(SB)
724 TEXT runtime·panicExtendSlice3Alen(SB),NOSPLIT,$0-12
725 MOVL SI, hi+0(FP)
726 MOVL DX, lo+4(FP)
727 MOVL BX, y+8(FP)
728 JMP runtime·goPanicExtendSlice3Alen(SB)
729 TEXT runtime·panicExtendSlice3AlenU(SB),NOSPLIT,$0-12
730 MOVL SI, hi+0(FP)
731 MOVL DX, lo+4(FP)
732 MOVL BX, y+8(FP)
733 JMP runtime·goPanicExtendSlice3AlenU(SB)
734 TEXT runtime·panicExtendSlice3Acap(SB),NOSPLIT,$0-12
735 MOVL SI, hi+0(FP)
736 MOVL DX, lo+4(FP)
737 MOVL BX, y+8(FP)
738 JMP runtime·goPanicExtendSlice3Acap(SB)
739 TEXT runtime·panicExtendSlice3AcapU(SB),NOSPLIT,$0-12
740 MOVL SI, hi+0(FP)
741 MOVL DX, lo+4(FP)
742 MOVL BX, y+8(FP)
743 JMP runtime·goPanicExtendSlice3AcapU(SB)
744 TEXT runtime·panicExtendSlice3B(SB),NOSPLIT,$0-12
745 MOVL SI, hi+0(FP)
746 MOVL CX, lo+4(FP)
747 MOVL DX, y+8(FP)
748 JMP runtime·goPanicExtendSlice3B(SB)
749 TEXT runtime·panicExtendSlice3BU(SB),NOSPLIT,$0-12
750 MOVL SI, hi+0(FP)
751 MOVL CX, lo+4(FP)
752 MOVL DX, y+8(FP)
753 JMP runtime·goPanicExtendSlice3BU(SB)
754 TEXT runtime·panicExtendSlice3C(SB),NOSPLIT,$0-12
755 MOVL SI, hi+0(FP)
756 MOVL AX, lo+4(FP)
757 MOVL CX, y+8(FP)
758 JMP runtime·goPanicExtendSlice3C(SB)
759 TEXT runtime·panicExtendSlice3CU(SB),NOSPLIT,$0-12
760 MOVL SI, hi+0(FP)
761 MOVL AX, lo+4(FP)
762 MOVL CX, y+8(FP)
763 JMP runtime·goPanicExtendSlice3CU(SB)
View as plain text