Text file src/runtime/internal/atomic/atomic_arm64.s
1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 #include "textflag.h"
6
7 // uint32 runtime∕internal∕atomic·Load(uint32 volatile* addr)
8 TEXT ·Load(SB),NOSPLIT,$0-12
9 MOVD ptr+0(FP), R0
10 LDARW (R0), R0
11 MOVW R0, ret+8(FP)
12 RET
13
14 // uint8 runtime∕internal∕atomic·Load8(uint8 volatile* addr)
15 TEXT ·Load8(SB),NOSPLIT,$0-9
16 MOVD ptr+0(FP), R0
17 LDARB (R0), R0
18 MOVB R0, ret+8(FP)
19 RET
20
21 // uint64 runtime∕internal∕atomic·Load64(uint64 volatile* addr)
22 TEXT ·Load64(SB),NOSPLIT,$0-16
23 MOVD ptr+0(FP), R0
24 LDAR (R0), R0
25 MOVD R0, ret+8(FP)
26 RET
27
28 // void *runtime∕internal∕atomic·Loadp(void *volatile *addr)
29 TEXT ·Loadp(SB),NOSPLIT,$0-16
30 MOVD ptr+0(FP), R0
31 LDAR (R0), R0
32 MOVD R0, ret+8(FP)
33 RET
34
35 // uint32 runtime∕internal∕atomic·LoadAcq(uint32 volatile* addr)
36 TEXT ·LoadAcq(SB),NOSPLIT,$0-12
37 B ·Load(SB)
38
39 TEXT runtime∕internal∕atomic·StorepNoWB(SB), NOSPLIT, $0-16
40 B runtime∕internal∕atomic·Store64(SB)
41
42 TEXT runtime∕internal∕atomic·StoreRel(SB), NOSPLIT, $0-12
43 B runtime∕internal∕atomic·Store(SB)
44
45 TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-12
46 MOVD ptr+0(FP), R0
47 MOVW val+8(FP), R1
48 STLRW R1, (R0)
49 RET
50
51 TEXT runtime∕internal∕atomic·Store64(SB), NOSPLIT, $0-16
52 MOVD ptr+0(FP), R0
53 MOVD val+8(FP), R1
54 STLR R1, (R0)
55 RET
56
57 TEXT runtime∕internal∕atomic·Xchg(SB), NOSPLIT, $0-20
58 again:
59 MOVD ptr+0(FP), R0
60 MOVW new+8(FP), R1
61 LDAXRW (R0), R2
62 STLXRW R1, (R0), R3
63 CBNZ R3, again
64 MOVW R2, ret+16(FP)
65 RET
66
67 TEXT runtime∕internal∕atomic·Xchg64(SB), NOSPLIT, $0-24
68 again:
69 MOVD ptr+0(FP), R0
70 MOVD new+8(FP), R1
71 LDAXR (R0), R2
72 STLXR R1, (R0), R3
73 CBNZ R3, again
74 MOVD R2, ret+16(FP)
75 RET
76
77 // bool runtime∕internal∕atomic·Cas64(uint64 *ptr, uint64 old, uint64 new)
78 // Atomically:
79 // if(*val == *old){
80 // *val = new;
81 // return 1;
82 // } else {
83 // return 0;
84 // }
85 TEXT runtime∕internal∕atomic·Cas64(SB), NOSPLIT, $0-25
86 MOVD ptr+0(FP), R0
87 MOVD old+8(FP), R1
88 MOVD new+16(FP), R2
89 again:
90 LDAXR (R0), R3
91 CMP R1, R3
92 BNE ok
93 STLXR R2, (R0), R3
94 CBNZ R3, again
95 ok:
96 CSET EQ, R0
97 MOVB R0, ret+24(FP)
98 RET
99
100 // uint32 xadd(uint32 volatile *ptr, int32 delta)
101 // Atomically:
102 // *val += delta;
103 // return *val;
104 TEXT runtime∕internal∕atomic·Xadd(SB), NOSPLIT, $0-20
105 again:
106 MOVD ptr+0(FP), R0
107 MOVW delta+8(FP), R1
108 LDAXRW (R0), R2
109 ADDW R2, R1, R2
110 STLXRW R2, (R0), R3
111 CBNZ R3, again
112 MOVW R2, ret+16(FP)
113 RET
114
115 TEXT runtime∕internal∕atomic·Xadd64(SB), NOSPLIT, $0-24
116 again:
117 MOVD ptr+0(FP), R0
118 MOVD delta+8(FP), R1
119 LDAXR (R0), R2
120 ADD R2, R1, R2
121 STLXR R2, (R0), R3
122 CBNZ R3, again
123 MOVD R2, ret+16(FP)
124 RET
125
126 TEXT runtime∕internal∕atomic·Xchguintptr(SB), NOSPLIT, $0-24
127 B runtime∕internal∕atomic·Xchg64(SB)
128
129 TEXT ·And8(SB), NOSPLIT, $0-9
130 MOVD ptr+0(FP), R0
131 MOVB val+8(FP), R1
132 LDAXRB (R0), R2
133 AND R1, R2
134 STLXRB R2, (R0), R3
135 CBNZ R3, -3(PC)
136 RET
137
138 TEXT ·Or8(SB), NOSPLIT, $0-9
139 MOVD ptr+0(FP), R0
140 MOVB val+8(FP), R1
141 LDAXRB (R0), R2
142 ORR R1, R2
143 STLXRB R2, (R0), R3
144 CBNZ R3, -3(PC)
145 RET
146
View as plain text