...

Text file src/runtime/race_ppc64le.s

     1	// Copyright 2018 The Go Authors. All rights reserved.
     2	// Use of this source code is governed by a BSD-style
     3	// license that can be found in the LICENSE file.
     4	
     5	// +build race
     6	
     7	#include "go_asm.h"
     8	#include "go_tls.h"
     9	#include "funcdata.h"
    10	#include "textflag.h"
    11	
    12	// The following functions allow calling the clang-compiled race runtime directly
    13	// from Go code without going all the way through cgo.
    14	// First, it's much faster (up to 50% speedup for real Go programs).
    15	// Second, it eliminates race-related special cases from cgocall and scheduler.
    16	// Third, in long-term it will allow to remove cyclic runtime/race dependency on cmd/go.
    17	
    18	// A brief recap of the ppc64le calling convention.
    19	// Arguments are passed in R3, R4, R5 ...
    20	// SP must be 16-byte aligned.
    21	
    22	// Note that for ppc64x, LLVM follows the standard ABI and
    23	// expects arguments in registers, so these functions move
    24	// the arguments from storage to the registers expected
    25	// by the ABI.
    26	
    27	// When calling from Go to Clang tsan code:
    28	// R3 is the 1st argument and is usually the ThreadState*
    29	// R4-? are the 2nd, 3rd, 4th, etc. arguments
    30	
    31	// When calling racecalladdr:
    32	// R8 is the call target address
    33	
    34	// The race ctx is passed in R3 and loaded in
    35	// racecalladdr.
    36	//
    37	// The sequence used to get the race ctx:
    38	//    MOVD    runtime·tls_g(SB), R10	// offset to TLS
    39	//    MOVD    0(R13)(R10*1), g		// R13=TLS for this thread, g = R30
    40	//    MOVD    g_racectx(g), R3		// racectx == ThreadState
    41	
    42	// func runtime·RaceRead(addr uintptr)
    43	// Called from instrumented Go code
    44	TEXT	runtime·raceread(SB), NOSPLIT, $0-8
    45		MOVD	addr+0(FP), R4
    46		MOVD	LR, R5 // caller of this?
    47		// void __tsan_read(ThreadState *thr, void *addr, void *pc);
    48		MOVD	$__tsan_read(SB), R8
    49		BR	racecalladdr<>(SB)
    50	
    51	TEXT    runtime·RaceRead(SB), NOSPLIT, $0-8
    52		BR	runtime·raceread(SB)
    53	
    54	// void runtime·racereadpc(void *addr, void *callpc, void *pc)
    55	TEXT	runtime·racereadpc(SB), NOSPLIT, $0-24
    56		MOVD	addr+0(FP), R4
    57		MOVD	callpc+8(FP), R5
    58		MOVD	pc+16(FP), R6
    59		// void __tsan_read_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
    60		MOVD	$__tsan_read_pc(SB), R8
    61		BR	racecalladdr<>(SB)
    62	
    63	// func runtime·RaceWrite(addr uintptr)
    64	// Called from instrumented Go code
    65	TEXT	runtime·racewrite(SB), NOSPLIT, $0-8
    66		MOVD	addr+0(FP), R4
    67		MOVD	LR, R5 // caller has set LR via BL inst
    68		// void __tsan_write(ThreadState *thr, void *addr, void *pc);
    69		MOVD	$__tsan_write(SB), R8
    70		BR	racecalladdr<>(SB)
    71	
    72	TEXT    runtime·RaceWrite(SB), NOSPLIT, $0-8
    73		JMP	runtime·racewrite(SB)
    74	
    75	// void runtime·racewritepc(void *addr, void *callpc, void *pc)
    76	TEXT	runtime·racewritepc(SB), NOSPLIT, $0-24
    77		MOVD	addr+0(FP), R4
    78		MOVD	callpc+8(FP), R5
    79		MOVD	pc+16(FP), R6
    80		// void __tsan_write_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
    81		MOVD	$__tsan_write_pc(SB), R8
    82		BR	racecalladdr<>(SB)
    83	
    84	// func runtime·RaceReadRange(addr, size uintptr)
    85	// Called from instrumented Go code.
    86	TEXT	runtime·racereadrange(SB), NOSPLIT, $0-16
    87		MOVD	addr+0(FP), R4
    88		MOVD	size+8(FP), R5
    89		MOVD	LR, R6
    90		// void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
    91		MOVD	$__tsan_read_range(SB), R8
    92		BR	racecalladdr<>(SB)
    93	
    94	// void runtime·racereadrangepc1(void *addr, uintptr sz, void *pc)
    95	TEXT	runtime·racereadrangepc1(SB), NOSPLIT, $0-24
    96		MOVD    addr+0(FP), R4
    97		MOVD    size+8(FP), R5
    98		MOVD    pc+16(FP), R6
    99		ADD	$4, R6		// tsan wants return addr
   100		// void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
   101		MOVD    $__tsan_read_range(SB), R8
   102		BR	racecalladdr<>(SB)
   103	
   104	TEXT    runtime·RaceReadRange(SB), NOSPLIT, $0-24
   105		BR	runtime·racereadrange(SB)
   106	
   107	// func runtime·RaceWriteRange(addr, size uintptr)
   108	// Called from instrumented Go code.
   109	TEXT	runtime·racewriterange(SB), NOSPLIT, $0-16
   110		MOVD	addr+0(FP), R4
   111		MOVD	size+8(FP), R5
   112		MOVD	LR, R6
   113		// void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
   114		MOVD	$__tsan_write_range(SB), R8
   115		BR	racecalladdr<>(SB)
   116	
   117	TEXT    runtime·RaceWriteRange(SB), NOSPLIT, $0-16
   118		BR	runtime·racewriterange(SB)
   119	
   120	// void runtime·racewriterangepc1(void *addr, uintptr sz, void *pc)
   121	// Called from instrumented Go code
   122	TEXT	runtime·racewriterangepc1(SB), NOSPLIT, $0-24
   123		MOVD	addr+0(FP), R4
   124		MOVD	size+8(FP), R5
   125		MOVD	pc+16(FP), R6
   126		ADD	$4, R6			// add 4 to inst offset?
   127		// void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
   128		MOVD	$__tsan_write_range(SB), R8
   129		BR	racecalladdr<>(SB)
   130	
   131	// Call a __tsan function from Go code.
   132	// R8 = tsan function address
   133	// R3 = *ThreadState a.k.a. g_racectx from g
   134	// R4 = addr passed to __tsan function
   135	//
   136	// Otherwise, setup goroutine context and invoke racecall. Other arguments already set.
   137	TEXT	racecalladdr<>(SB), NOSPLIT, $0-0
   138		MOVD    runtime·tls_g(SB), R10
   139		MOVD	0(R13)(R10*1), g
   140		MOVD	g_racectx(g), R3	// goroutine context
   141		// Check that addr is within [arenastart, arenaend) or within [racedatastart, racedataend).
   142		MOVD	runtime·racearenastart(SB), R9
   143		CMP	R4, R9
   144		BLT	data
   145		MOVD	runtime·racearenaend(SB), R9
   146		CMP	R4, R9
   147		BLT	call
   148	data:
   149		MOVD	runtime·racedatastart(SB), R9
   150		CMP	R4, R9
   151		BLT	ret
   152		MOVD	runtime·racedataend(SB), R9
   153		CMP	R4, R9
   154		BGT	ret
   155	call:
   156		// Careful!! racecall will save LR on its
   157		// stack, which is OK as long as racecalladdr
   158		// doesn't change in a way that generates a stack.
   159		// racecall should return to the caller of
   160		// recalladdr.
   161		BR	racecall<>(SB)
   162	ret:
   163		RET
   164	
   165	// func runtime·racefuncenterfp()
   166	// Called from instrumented Go code.
   167	// Like racefuncenter but doesn't pass an arg, uses the caller pc
   168	// from the first slot on the stack.
   169	TEXT	runtime·racefuncenterfp(SB), NOSPLIT, $0-0
   170		MOVD	0(R1), R8
   171		BR	racefuncenter<>(SB)
   172	
   173	// func runtime·racefuncenter(pc uintptr)
   174	// Called from instrumented Go code.
   175	// Not used now since gc/racewalk.go doesn't pass the
   176	// correct caller pc and racefuncenterfp can do it.
   177	TEXT	runtime·racefuncenter(SB), NOSPLIT, $0-8
   178		MOVD	callpc+0(FP), R8
   179		BR	racefuncenter<>(SB)
   180	
   181	// Common code for racefuncenter/racefuncenterfp
   182	// R11 = caller's return address
   183	TEXT	racefuncenter<>(SB), NOSPLIT, $0-0
   184		MOVD    runtime·tls_g(SB), R10
   185		MOVD    0(R13)(R10*1), g
   186		MOVD    g_racectx(g), R3        // goroutine racectx aka *ThreadState
   187		MOVD	R8, R4			// caller pc set by caller in R8
   188		// void __tsan_func_enter(ThreadState *thr, void *pc);
   189		MOVD	$__tsan_func_enter(SB), R8
   190		BR	racecall<>(SB)
   191		RET
   192	
   193	// func runtime·racefuncexit()
   194	// Called from Go instrumented code.
   195	TEXT	runtime·racefuncexit(SB), NOSPLIT, $0-0
   196		MOVD    runtime·tls_g(SB), R10
   197		MOVD    0(R13)(R10*1), g
   198		MOVD    g_racectx(g), R3        // goroutine racectx aka *ThreadState
   199		// void __tsan_func_exit(ThreadState *thr);
   200		MOVD	$__tsan_func_exit(SB), R8
   201		BR	racecall<>(SB)
   202	
   203	// Atomic operations for sync/atomic package.
   204	// Some use the __tsan versions instead
   205	// R6 = addr of arguments passed to this function
   206	// R3, R4, R5 set in racecallatomic
   207	
   208	// Load atomic in tsan
   209	TEXT	sync∕atomic·LoadInt32(SB), NOSPLIT, $0-0
   210		// void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
   211		MOVD	$__tsan_go_atomic32_load(SB), R8
   212		ADD	$32, R1, R6	// addr of caller's 1st arg
   213		BR	racecallatomic<>(SB)
   214		RET
   215	
   216	TEXT	sync∕atomic·LoadInt64(SB), NOSPLIT, $0-0
   217		// void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
   218		MOVD	$__tsan_go_atomic64_load(SB), R8
   219		ADD	$32, R1, R6	// addr of caller's 1st arg
   220		BR	racecallatomic<>(SB)
   221		RET
   222	
   223	TEXT	sync∕atomic·LoadUint32(SB), NOSPLIT, $0-0
   224		BR	sync∕atomic·LoadInt32(SB)
   225	
   226	TEXT	sync∕atomic·LoadUint64(SB), NOSPLIT, $0-0
   227		BR	sync∕atomic·LoadInt64(SB)
   228	
   229	TEXT	sync∕atomic·LoadUintptr(SB), NOSPLIT, $0-0
   230		BR	sync∕atomic·LoadInt64(SB)
   231	
   232	TEXT	sync∕atomic·LoadPointer(SB), NOSPLIT, $0-0
   233		BR	sync∕atomic·LoadInt64(SB)
   234	
   235	// Store atomic in tsan
   236	TEXT	sync∕atomic·StoreInt32(SB), NOSPLIT, $0-0
   237		// void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
   238		MOVD	$__tsan_go_atomic32_store(SB), R8
   239		ADD	$32, R1, R6	// addr of caller's 1st arg
   240		BR	racecallatomic<>(SB)
   241	
   242	TEXT	sync∕atomic·StoreInt64(SB), NOSPLIT, $0-0
   243		// void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
   244		MOVD	$__tsan_go_atomic64_store(SB), R8
   245		ADD	$32, R1, R6	// addr of caller's 1st arg
   246		BR	racecallatomic<>(SB)
   247	
   248	TEXT	sync∕atomic·StoreUint32(SB), NOSPLIT, $0-0
   249		BR	sync∕atomic·StoreInt32(SB)
   250	
   251	TEXT	sync∕atomic·StoreUint64(SB), NOSPLIT, $0-0
   252		BR	sync∕atomic·StoreInt64(SB)
   253	
   254	TEXT	sync∕atomic·StoreUintptr(SB), NOSPLIT, $0-0
   255		BR	sync∕atomic·StoreInt64(SB)
   256	
   257	// Swap in tsan
   258	TEXT	sync∕atomic·SwapInt32(SB), NOSPLIT, $0-0
   259		// void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
   260		MOVD	$__tsan_go_atomic32_exchange(SB), R8
   261		ADD	$32, R1, R6	// addr of caller's 1st arg
   262		BR	racecallatomic<>(SB)
   263	
   264	TEXT	sync∕atomic·SwapInt64(SB), NOSPLIT, $0-0
   265		// void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a)
   266		MOVD	$__tsan_go_atomic64_exchange(SB), R8
   267		ADD	$32, R1, R6	// addr of caller's 1st arg
   268		BR	racecallatomic<>(SB)
   269	
   270	TEXT	sync∕atomic·SwapUint32(SB), NOSPLIT, $0-0
   271		BR	sync∕atomic·SwapInt32(SB)
   272	
   273	TEXT	sync∕atomic·SwapUint64(SB), NOSPLIT, $0-0
   274		BR	sync∕atomic·SwapInt64(SB)
   275	
   276	TEXT	sync∕atomic·SwapUintptr(SB), NOSPLIT, $0-0
   277		BR	sync∕atomic·SwapInt64(SB)
   278	
   279	// Add atomic in tsan
   280	TEXT	sync∕atomic·AddInt32(SB), NOSPLIT, $0-0
   281		// void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
   282		MOVD	$__tsan_go_atomic32_fetch_add(SB), R8
   283		ADD	$64, R1, R6	// addr of caller's 1st arg
   284		BL	racecallatomic<>(SB)
   285		// The tsan fetch_add result is not as expected by Go,
   286		// so the 'add' must be added to the result.
   287		MOVW	add+8(FP), R3	// The tsa fetch_add does not return the
   288		MOVW	ret+16(FP), R4	// result as expected by go, so fix it.
   289		ADD	R3, R4, R3
   290		MOVW	R3, ret+16(FP)
   291		RET
   292	
   293	TEXT	sync∕atomic·AddInt64(SB), NOSPLIT, $0-0
   294		// void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
   295		MOVD	$__tsan_go_atomic64_fetch_add(SB), R8
   296		ADD	$64, R1, R6	// addr of caller's 1st arg
   297		BL	racecallatomic<>(SB)
   298		// The tsan fetch_add result is not as expected by Go,
   299		// so the 'add' must be added to the result.
   300		MOVD	add+8(FP), R3
   301		MOVD	ret+16(FP), R4
   302		ADD	R3, R4, R3
   303		MOVD	R3, ret+16(FP)
   304		RET
   305	
   306	TEXT	sync∕atomic·AddUint32(SB), NOSPLIT, $0-0
   307		BR	sync∕atomic·AddInt32(SB)
   308	
   309	TEXT	sync∕atomic·AddUint64(SB), NOSPLIT, $0-0
   310		BR	sync∕atomic·AddInt64(SB)
   311	
   312	TEXT	sync∕atomic·AddUintptr(SB), NOSPLIT, $0-0
   313		BR	sync∕atomic·AddInt64(SB)
   314	
   315	// CompareAndSwap in tsan
   316	TEXT	sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT, $0-0
   317		// void __tsan_go_atomic32_compare_exchange(
   318		//   ThreadState *thr, uptr cpc, uptr pc, u8 *a)
   319		MOVD	$__tsan_go_atomic32_compare_exchange(SB), R8
   320		ADD	$32, R1, R6	// addr of caller's 1st arg
   321		BR	racecallatomic<>(SB)
   322	
   323	TEXT	sync∕atomic·CompareAndSwapInt64(SB), NOSPLIT, $0-0
   324		// void __tsan_go_atomic32_compare_exchange(
   325		//   ThreadState *thr, uptr cpc, uptr pc, u8 *a)
   326		MOVD	$__tsan_go_atomic64_compare_exchange(SB), R8
   327		ADD	$32, R1, R6	// addr of caller's 1st arg
   328		BR	racecallatomic<>(SB)
   329	
   330	TEXT	sync∕atomic·CompareAndSwapUint32(SB), NOSPLIT, $0-0
   331		BR	sync∕atomic·CompareAndSwapInt32(SB)
   332	
   333	TEXT	sync∕atomic·CompareAndSwapUint64(SB), NOSPLIT, $0-0
   334		BR	sync∕atomic·CompareAndSwapInt64(SB)
   335	
   336	TEXT	sync∕atomic·CompareAndSwapUintptr(SB), NOSPLIT, $0-0
   337		BR	sync∕atomic·CompareAndSwapInt64(SB)
   338	
   339	// Common function used to call tsan's atomic functions
   340	// R3 = *ThreadState
   341	// R4 = TODO: What's this supposed to be?
   342	// R5 = caller pc
   343	// R6 = addr of incoming arg list
   344	// R8 contains addr of target function.
   345	TEXT	racecallatomic<>(SB), NOSPLIT, $0-0
   346		// Trigger SIGSEGV early if address passed to atomic function is bad.
   347		MOVD	(R6), R7	// 1st arg is addr
   348		MOVD	(R7), R9	// segv here if addr is bad
   349		// Check that addr is within [arenastart, arenaend) or within [racedatastart, racedataend).
   350		MOVD	runtime·racearenastart(SB), R9
   351		CMP	R7, R9
   352		BLT	racecallatomic_data
   353		MOVD	runtime·racearenaend(SB), R9
   354		CMP	R7, R9
   355		BLT	racecallatomic_ok
   356	racecallatomic_data:
   357		MOVD	runtime·racedatastart(SB), R9
   358		CMP	R7, R9
   359		BLT	racecallatomic_ignore
   360		MOVD	runtime·racedataend(SB), R9
   361		CMP	R7, R9
   362		BGE	racecallatomic_ignore
   363	racecallatomic_ok:
   364		// Addr is within the good range, call the atomic function.
   365		MOVD    runtime·tls_g(SB), R10
   366		MOVD    0(R13)(R10*1), g
   367		MOVD    g_racectx(g), R3        // goroutine racectx aka *ThreadState
   368		MOVD	R8, R5			// pc is the function called
   369		MOVD	(R1), R4		// caller pc from stack
   370		BL	racecall<>(SB)		// BL needed to maintain stack consistency
   371		RET				//
   372	racecallatomic_ignore:
   373		// Addr is outside the good range.
   374		// Call __tsan_go_ignore_sync_begin to ignore synchronization during the atomic op.
   375		// An attempt to synchronize on the address would cause crash.
   376		MOVD	R8, R15	// save the original function
   377		MOVD	R6, R17 // save the original arg list addr
   378		MOVD	$__tsan_go_ignore_sync_begin(SB), R8 // func addr to call
   379		MOVD    runtime·tls_g(SB), R10
   380		MOVD    0(R13)(R10*1), g
   381		MOVD    g_racectx(g), R3        // goroutine context
   382		BL	racecall<>(SB)
   383		MOVD	R15, R8	// restore the original function
   384		MOVD	R17, R6 // restore arg list addr
   385		// Call the atomic function.
   386		// racecall will call LLVM race code which might clobber r30 (g)
   387		MOVD	runtime·tls_g(SB), R10
   388		MOVD	0(R13)(R10*1), g
   389	
   390		MOVD	g_racectx(g), R3
   391		MOVD	R8, R4		// pc being called same TODO as above
   392		MOVD	(R1), R5	// caller pc from latest LR
   393		BL	racecall<>(SB)
   394		// Call __tsan_go_ignore_sync_end.
   395		MOVD	$__tsan_go_ignore_sync_end(SB), R8
   396		MOVD	g_racectx(g), R3	// goroutine context g should sitll be good?
   397		BL	racecall<>(SB)
   398		RET
   399	
   400	// void runtime·racecall(void(*f)(...), ...)
   401	// Calls C function f from race runtime and passes up to 4 arguments to it.
   402	// The arguments are never heap-object-preserving pointers, so we pretend there are no arguments.
   403	TEXT	runtime·racecall(SB), NOSPLIT, $0-0
   404		MOVD	fn+0(FP), R8
   405		MOVD	arg0+8(FP), R3
   406		MOVD	arg1+16(FP), R4
   407		MOVD	arg2+24(FP), R5
   408		MOVD	arg3+32(FP), R6
   409		JMP	racecall<>(SB)
   410	
   411	// Finds g0 and sets its stack
   412	// Arguments were loaded for call from Go to C
   413	TEXT	racecall<>(SB), NOSPLIT, $0-0
   414		// Set the LR slot for the ppc64 ABI
   415		MOVD	LR, R10
   416		MOVD	R10, 0(R1)	// Go expectation
   417		MOVD	R10, 16(R1)	// C ABI
   418		// Get info from the current goroutine
   419		MOVD    runtime·tls_g(SB), R10	// g offset in TLS
   420		MOVD    0(R13)(R10*1), g	// R13 = current TLS
   421		MOVD	g_m(g), R7		// m for g
   422		MOVD	R1, R16			// callee-saved, preserved across C call
   423		MOVD	m_g0(R7), R10		// g0 for m
   424		CMP	R10, g			// same g0?
   425		BEQ	call			// already on g0
   426		MOVD	(g_sched+gobuf_sp)(R10), R1 // switch R1
   427	call:
   428		MOVD	R8, CTR			// R8 = caller addr
   429		MOVD	R8, R12			// expected by PPC64 ABI
   430		BL	(CTR)
   431		XOR     R0, R0			// clear R0 on return from Clang
   432		MOVD	R16, R1			// restore R1; R16 nonvol in Clang
   433		MOVD    runtime·tls_g(SB), R10	// find correct g
   434		MOVD    0(R13)(R10*1), g
   435		MOVD	16(R1), R10		// LR was saved away, restore for return
   436		MOVD	R10, LR
   437		RET
   438	
   439	// C->Go callback thunk that allows to call runtime·racesymbolize from C code.
   440	// Direct Go->C race call has only switched SP, finish g->g0 switch by setting correct g.
   441	// The overall effect of Go->C->Go call chain is similar to that of mcall.
   442	// RARG0 contains command code. RARG1 contains command-specific context.
   443	// See racecallback for command codes.
   444	TEXT	runtime·racecallbackthunk(SB), NOSPLIT, $-8
   445		// Handle command raceGetProcCmd (0) here.
   446		// First, code below assumes that we are on curg, while raceGetProcCmd
   447		// can be executed on g0. Second, it is called frequently, so will
   448		// benefit from this fast path.
   449		XOR	R0, R0		// clear R0 since we came from C code
   450		CMP	R3, $0
   451		BNE	rest
   452		// g0 TODO: Don't modify g here since R30 is nonvolatile
   453		MOVD	g, R9
   454		MOVD    runtime·tls_g(SB), R10
   455		MOVD    0(R13)(R10*1), g
   456		MOVD	g_m(g), R3
   457		MOVD	m_p(R3), R3
   458		MOVD	p_raceprocctx(R3), R3
   459		MOVD	R3, (R4)
   460		MOVD	R9, g		// restore R30 ??
   461		RET
   462	
   463		// This is all similar to what cgo does
   464		// Save registers according to the ppc64 ABI
   465	rest:
   466		MOVD	LR, R10	// save link register
   467		MOVD	R10, 16(R1)
   468		MOVW	CR, R10
   469		MOVW	R10, 8(R1)
   470		MOVDU   R1, -336(R1) // Allocate frame needed for register save area
   471	
   472		MOVD    R14, 40(R1)
   473		MOVD    R15, 48(R1)
   474		MOVD    R16, 56(R1)
   475		MOVD    R17, 64(R1)
   476		MOVD    R18, 72(R1)
   477		MOVD    R19, 80(R1)
   478		MOVD    R20, 88(R1)
   479		MOVD    R21, 96(R1)
   480		MOVD    R22, 104(R1)
   481		MOVD    R23, 112(R1)
   482		MOVD    R24, 120(R1)
   483		MOVD    R25, 128(R1)
   484		MOVD    R26, 136(R1)
   485		MOVD    R27, 144(R1)
   486		MOVD    R28, 152(R1)
   487		MOVD    R29, 160(R1)
   488		MOVD    g, 168(R1) // R30
   489		MOVD    R31, 176(R1)
   490		FMOVD   F14, 184(R1)
   491		FMOVD   F15, 192(R1)
   492		FMOVD   F16, 200(R1)
   493		FMOVD   F17, 208(R1)
   494		FMOVD   F18, 216(R1)
   495		FMOVD   F19, 224(R1)
   496		FMOVD   F20, 232(R1)
   497		FMOVD   F21, 240(R1)
   498		FMOVD   F22, 248(R1)
   499		FMOVD   F23, 256(R1)
   500		FMOVD   F24, 264(R1)
   501		FMOVD   F25, 272(R1)
   502		FMOVD   F26, 280(R1)
   503		FMOVD   F27, 288(R1)
   504		FMOVD   F28, 296(R1)
   505		FMOVD   F29, 304(R1)
   506		FMOVD   F30, 312(R1)
   507		FMOVD   F31, 320(R1)
   508	
   509		MOVD    runtime·tls_g(SB), R10
   510		MOVD    0(R13)(R10*1), g
   511	
   512		MOVD	g_m(g), R7
   513		MOVD	m_g0(R7), g // set g = m-> g0
   514		MOVD	R3, cmd+0(FP) // can't use R1 here ?? use input args and assumer caller expects those?
   515		MOVD	R4, ctx+8(FP) // can't use R1 here ??
   516		BL	runtime·racecallback(SB)
   517		// All registers are clobbered after Go code, reload.
   518		MOVD    runtime·tls_g(SB), R10
   519		MOVD    0(R13)(R10*1), g
   520	
   521		MOVD	g_m(g), R7
   522		MOVD	m_curg(R7), g // restore g = m->curg
   523		MOVD    40(R1), R14
   524		MOVD    48(R1), R15
   525		MOVD    56(R1), R16
   526		MOVD    64(R1), R17
   527		MOVD    72(R1), R18
   528		MOVD    80(R1), R19
   529		MOVD    88(R1), R20
   530		MOVD    96(R1), R21
   531		MOVD    104(R1), R22
   532		MOVD    112(R1), R23
   533		MOVD    120(R1), R24
   534		MOVD    128(R1), R25
   535		MOVD    136(R1), R26
   536		MOVD    144(R1), R27
   537		MOVD    152(R1), R28
   538		MOVD    160(R1), R29
   539		MOVD    168(R1), g // R30
   540		MOVD    176(R1), R31
   541		FMOVD   184(R1), F14
   542		FMOVD   192(R1), F15
   543		FMOVD   200(R1), F16
   544		FMOVD   208(R1), F17
   545		FMOVD   216(R1), F18
   546		FMOVD   224(R1), F19
   547		FMOVD   232(R1), F20
   548		FMOVD   240(R1), F21
   549		FMOVD   248(R1), F22
   550		FMOVD   256(R1), F23
   551		FMOVD   264(R1), F24
   552		FMOVD   272(R1), F25
   553		FMOVD   280(R1), F26
   554		FMOVD   288(R1), F27
   555		FMOVD   296(R1), F28
   556		FMOVD   304(R1), F29
   557		FMOVD   312(R1), F30
   558		FMOVD   320(R1), F31
   559	
   560		ADD     $336, R1
   561		MOVD    8(R1), R10
   562		MOVFL   R10, $0xff // Restore of CR
   563		MOVD    16(R1), R10	// needed?
   564		MOVD    R10, LR
   565		RET
   566	
   567	// tls_g, g value for each thread in TLS
   568	GLOBL runtime·tls_g+0(SB), TLSBSS+DUPOK, $8

View as plain text