...

Text file src/runtime/internal/atomic/asm_mipsx.s

     1	// Copyright 2016 The Go Authors. All rights reserved.
     2	// Use of this source code is governed by a BSD-style
     3	// license that can be found in the LICENSE file.
     4	
     5	// +build mips mipsle
     6	
     7	#include "textflag.h"
     8	
     9	TEXT ·Cas(SB),NOSPLIT,$0-13
    10		MOVW	ptr+0(FP), R1
    11		MOVW	old+4(FP), R2
    12		MOVW	new+8(FP), R5
    13		SYNC
    14	try_cas:
    15		MOVW	R5, R3
    16		LL	(R1), R4	// R4 = *R1
    17		BNE	R2, R4, cas_fail
    18		SC	R3, (R1)	// *R1 = R3
    19		BEQ	R3, try_cas
    20		SYNC
    21		MOVB	R3, ret+12(FP)
    22		RET
    23	cas_fail:
    24		MOVB	R0, ret+12(FP)
    25		RET
    26	
    27	TEXT ·Store(SB),NOSPLIT,$0-8
    28		MOVW	ptr+0(FP), R1
    29		MOVW	val+4(FP), R2
    30		SYNC
    31		MOVW	R2, 0(R1)
    32		SYNC
    33		RET
    34	
    35	TEXT ·Load(SB),NOSPLIT,$0-8
    36		MOVW	ptr+0(FP), R1
    37		SYNC
    38		MOVW	0(R1), R1
    39		SYNC
    40		MOVW	R1, ret+4(FP)
    41		RET
    42	
    43	TEXT ·Load8(SB),NOSPLIT,$0-5
    44		MOVW	ptr+0(FP), R1
    45		SYNC
    46		MOVB	0(R1), R1
    47		SYNC
    48		MOVB	R1, ret+4(FP)
    49		RET
    50	
    51	TEXT ·Xadd(SB),NOSPLIT,$0-12
    52		MOVW	ptr+0(FP), R2
    53		MOVW	delta+4(FP), R3
    54		SYNC
    55	try_xadd:
    56		LL	(R2), R1	// R1 = *R2
    57		ADDU	R1, R3, R4
    58		MOVW	R4, R1
    59		SC	R4, (R2)	// *R2 = R4
    60		BEQ	R4, try_xadd
    61		SYNC
    62		MOVW	R1, ret+8(FP)
    63		RET
    64	
    65	TEXT ·Xchg(SB),NOSPLIT,$0-12
    66		MOVW	ptr+0(FP), R2
    67		MOVW	new+4(FP), R5
    68		SYNC
    69	try_xchg:
    70		MOVW	R5, R3
    71		LL	(R2), R1	// R1 = *R2
    72		SC	R3, (R2)	// *R2 = R3
    73		BEQ	R3, try_xchg
    74		SYNC
    75		MOVW	R1, ret+8(FP)
    76		RET
    77	
    78	TEXT ·Casuintptr(SB),NOSPLIT,$0-13
    79		JMP	·Cas(SB)
    80	
    81	TEXT ·CasRel(SB),NOSPLIT,$0-13
    82		JMP	·Cas(SB)
    83	
    84	TEXT ·Loaduintptr(SB),NOSPLIT,$0-8
    85		JMP	·Load(SB)
    86	
    87	TEXT ·Loaduint(SB),NOSPLIT,$0-8
    88		JMP	·Load(SB)
    89	
    90	TEXT ·Loadp(SB),NOSPLIT,$-0-8
    91		JMP	·Load(SB)
    92	
    93	TEXT ·Storeuintptr(SB),NOSPLIT,$0-8
    94		JMP	·Store(SB)
    95	
    96	TEXT ·Xadduintptr(SB),NOSPLIT,$0-12
    97		JMP	·Xadd(SB)
    98	
    99	TEXT ·Loadint64(SB),NOSPLIT,$0-12
   100		JMP	·Load64(SB)
   101	
   102	TEXT ·Xaddint64(SB),NOSPLIT,$0-20
   103		JMP	·Xadd64(SB)
   104	
   105	TEXT ·Casp1(SB),NOSPLIT,$0-13
   106		JMP	·Cas(SB)
   107	
   108	TEXT ·Xchguintptr(SB),NOSPLIT,$0-12
   109		JMP	·Xchg(SB)
   110	
   111	TEXT ·StorepNoWB(SB),NOSPLIT,$0-8
   112		JMP	·Store(SB)
   113	
   114	TEXT ·StoreRel(SB),NOSPLIT,$0-8
   115		JMP	·Store(SB)
   116	
   117	// void	Or8(byte volatile*, byte);
   118	TEXT ·Or8(SB),NOSPLIT,$0-5
   119		MOVW	ptr+0(FP), R1
   120		MOVBU	val+4(FP), R2
   121		MOVW	$~3, R3	// Align ptr down to 4 bytes so we can use 32-bit load/store.
   122		AND	R1, R3
   123	#ifdef GOARCH_mips
   124		// Big endian.  ptr = ptr ^ 3
   125		XOR	$3, R1
   126	#endif
   127		AND	$3, R1, R4	// R4 = ((ptr & 3) * 8)
   128		SLL	$3, R4
   129		SLL	R4, R2, R2	// Shift val for aligned ptr. R2 = val << R4
   130		SYNC
   131	try_or8:
   132		LL	(R3), R4	// R4 = *R3
   133		OR	R2, R4
   134		SC	R4, (R3)	// *R3 = R4
   135		BEQ	R4, try_or8
   136		SYNC
   137		RET
   138	
   139	// void	And8(byte volatile*, byte);
   140	TEXT ·And8(SB),NOSPLIT,$0-5
   141		MOVW	ptr+0(FP), R1
   142		MOVBU	val+4(FP), R2
   143		MOVW	$~3, R3
   144		AND	R1, R3
   145	#ifdef GOARCH_mips
   146		// Big endian.  ptr = ptr ^ 3
   147		XOR	$3, R1
   148	#endif
   149		AND	$3, R1, R4	// R4 = ((ptr & 3) * 8)
   150		SLL	$3, R4
   151		MOVW	$0xFF, R5
   152		SLL	R4, R2
   153		SLL	R4, R5
   154		NOR	R0, R5
   155		OR	R5, R2	// Shift val for aligned ptr. R2 = val << R4 | ^(0xFF << R4)
   156		SYNC
   157	try_and8:
   158		LL	(R3), R4	// R4 = *R3
   159		AND	R2, R4
   160		SC	R4, (R3)	// *R3 = R4
   161		BEQ	R4, try_and8
   162		SYNC
   163		RET

View as plain text