...

Text file src/runtime/internal/atomic/asm_ppc64x.s

     1	// Copyright 2015 The Go Authors. All rights reserved.
     2	// Use of this source code is governed by a BSD-style
     3	// license that can be found in the LICENSE file.
     4	
     5	// +build ppc64 ppc64le
     6	
     7	#include "textflag.h"
     8	
     9	// bool cas(uint32 *ptr, uint32 old, uint32 new)
    10	// Atomically:
    11	//	if(*val == old){
    12	//		*val = new;
    13	//		return 1;
    14	//	} else
    15	//		return 0;
    16	TEXT runtime∕internal∕atomic·Cas(SB), NOSPLIT, $0-17
    17		MOVD	ptr+0(FP), R3
    18		MOVWZ	old+8(FP), R4
    19		MOVWZ	new+12(FP), R5
    20		LWSYNC
    21	cas_again:
    22		LWAR	(R3), R6
    23		CMPW	R6, R4
    24		BNE	cas_fail
    25		STWCCC	R5, (R3)
    26		BNE	cas_again
    27		MOVD	$1, R3
    28		LWSYNC
    29		MOVB	R3, ret+16(FP)
    30		RET
    31	cas_fail:
    32		MOVB	R0, ret+16(FP)
    33		RET
    34	
    35	// bool	runtime∕internal∕atomic·Cas64(uint64 *ptr, uint64 old, uint64 new)
    36	// Atomically:
    37	//	if(*val == *old){
    38	//		*val = new;
    39	//		return 1;
    40	//	} else {
    41	//		return 0;
    42	//	}
    43	TEXT runtime∕internal∕atomic·Cas64(SB), NOSPLIT, $0-25
    44		MOVD	ptr+0(FP), R3
    45		MOVD	old+8(FP), R4
    46		MOVD	new+16(FP), R5
    47		LWSYNC
    48	cas64_again:
    49		LDAR	(R3), R6
    50		CMP	R6, R4
    51		BNE	cas64_fail
    52		STDCCC	R5, (R3)
    53		BNE	cas64_again
    54		MOVD	$1, R3
    55		LWSYNC
    56		MOVB	R3, ret+24(FP)
    57		RET
    58	cas64_fail:
    59		MOVB	R0, ret+24(FP)
    60		RET
    61	
    62	TEXT runtime∕internal∕atomic·CasRel(SB), NOSPLIT, $0-17
    63		MOVD    ptr+0(FP), R3
    64		MOVWZ   old+8(FP), R4
    65		MOVWZ   new+12(FP), R5
    66		LWSYNC
    67	cas_again:
    68		LWAR    (R3), $0, R6        // 0 = Mutex release hint
    69		CMPW    R6, R4
    70		BNE     cas_fail
    71		STWCCC  R5, (R3)
    72		BNE     cas_again
    73		MOVD    $1, R3
    74		MOVB    R3, ret+16(FP)
    75		RET
    76	cas_fail:
    77		MOVB    R0, ret+16(FP)
    78		RET
    79	
    80	TEXT runtime∕internal∕atomic·Casuintptr(SB), NOSPLIT, $0-25
    81		BR	runtime∕internal∕atomic·Cas64(SB)
    82	
    83	TEXT runtime∕internal∕atomic·Loaduintptr(SB),  NOSPLIT|NOFRAME, $0-16
    84		BR	runtime∕internal∕atomic·Load64(SB)
    85	
    86	TEXT runtime∕internal∕atomic·Loaduint(SB), NOSPLIT|NOFRAME, $0-16
    87		BR	runtime∕internal∕atomic·Load64(SB)
    88	
    89	TEXT runtime∕internal∕atomic·Storeuintptr(SB), NOSPLIT, $0-16
    90		BR	runtime∕internal∕atomic·Store64(SB)
    91	
    92	TEXT runtime∕internal∕atomic·Xadduintptr(SB), NOSPLIT, $0-24
    93		BR	runtime∕internal∕atomic·Xadd64(SB)
    94	
    95	TEXT runtime∕internal∕atomic·Loadint64(SB), NOSPLIT, $0-16
    96		BR	runtime∕internal∕atomic·Load64(SB)
    97	
    98	TEXT runtime∕internal∕atomic·Xaddint64(SB), NOSPLIT, $0-24
    99		BR	runtime∕internal∕atomic·Xadd64(SB)
   100	
   101	// bool casp(void **val, void *old, void *new)
   102	// Atomically:
   103	//	if(*val == old){
   104	//		*val = new;
   105	//		return 1;
   106	//	} else
   107	//		return 0;
   108	TEXT runtime∕internal∕atomic·Casp1(SB), NOSPLIT, $0-25
   109		BR runtime∕internal∕atomic·Cas64(SB)
   110	
   111	// uint32 xadd(uint32 volatile *ptr, int32 delta)
   112	// Atomically:
   113	//	*val += delta;
   114	//	return *val;
   115	TEXT runtime∕internal∕atomic·Xadd(SB), NOSPLIT, $0-20
   116		MOVD	ptr+0(FP), R4
   117		MOVW	delta+8(FP), R5
   118		LWSYNC
   119		LWAR	(R4), R3
   120		ADD	R5, R3
   121		STWCCC	R3, (R4)
   122		BNE	-3(PC)
   123		MOVW	R3, ret+16(FP)
   124		RET
   125	
   126	TEXT runtime∕internal∕atomic·Xadd64(SB), NOSPLIT, $0-24
   127		MOVD	ptr+0(FP), R4
   128		MOVD	delta+8(FP), R5
   129		LWSYNC
   130		LDAR	(R4), R3
   131		ADD	R5, R3
   132		STDCCC	R3, (R4)
   133		BNE	-3(PC)
   134		MOVD	R3, ret+16(FP)
   135		RET
   136	
   137	TEXT runtime∕internal∕atomic·Xchg(SB), NOSPLIT, $0-20
   138		MOVD	ptr+0(FP), R4
   139		MOVW	new+8(FP), R5
   140		LWSYNC
   141		LWAR	(R4), R3
   142		STWCCC	R5, (R4)
   143		BNE	-2(PC)
   144		ISYNC
   145		MOVW	R3, ret+16(FP)
   146		RET
   147	
   148	TEXT runtime∕internal∕atomic·Xchg64(SB), NOSPLIT, $0-24
   149		MOVD	ptr+0(FP), R4
   150		MOVD	new+8(FP), R5
   151		LWSYNC
   152		LDAR	(R4), R3
   153		STDCCC	R5, (R4)
   154		BNE	-2(PC)
   155		ISYNC
   156		MOVD	R3, ret+16(FP)
   157		RET
   158	
   159	TEXT runtime∕internal∕atomic·Xchguintptr(SB), NOSPLIT, $0-24
   160		BR	runtime∕internal∕atomic·Xchg64(SB)
   161	
   162	
   163	TEXT runtime∕internal∕atomic·StorepNoWB(SB), NOSPLIT, $0-16
   164		BR	runtime∕internal∕atomic·Store64(SB)
   165	
   166	TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-12
   167		MOVD	ptr+0(FP), R3
   168		MOVW	val+8(FP), R4
   169		SYNC
   170		MOVW	R4, 0(R3)
   171		RET
   172	
   173	TEXT runtime∕internal∕atomic·Store64(SB), NOSPLIT, $0-16
   174		MOVD	ptr+0(FP), R3
   175		MOVD	val+8(FP), R4
   176		SYNC
   177		MOVD	R4, 0(R3)
   178		RET
   179	
   180	TEXT runtime∕internal∕atomic·StoreRel(SB), NOSPLIT, $0-12
   181		MOVD	ptr+0(FP), R3
   182		MOVW	val+8(FP), R4
   183		LWSYNC
   184		MOVW	R4, 0(R3)
   185		RET
   186	
   187	// void runtime∕internal∕atomic·Or8(byte volatile*, byte);
   188	TEXT runtime∕internal∕atomic·Or8(SB), NOSPLIT, $0-9
   189		MOVD	ptr+0(FP), R3
   190		MOVBZ	val+8(FP), R4
   191		LWSYNC
   192	again:
   193		LBAR	(R3), R6
   194		OR	R4, R6
   195		STBCCC	R6, (R3)
   196		BNE	again
   197		RET
   198	
   199	// void runtime∕internal∕atomic·And8(byte volatile*, byte);
   200	TEXT runtime∕internal∕atomic·And8(SB), NOSPLIT, $0-9
   201		MOVD	ptr+0(FP), R3
   202		MOVBZ	val+8(FP), R4
   203		LWSYNC
   204	again:
   205		LBAR	(R3),R6
   206		AND	R4,R6
   207		STBCCC	R6,(R3)
   208		BNE	again
   209		RET

View as plain text