Text file src/runtime/internal/atomic/asm_s390x.s
1 // Copyright 2016 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 #include "textflag.h"
6
7 // func Store(ptr *uint32, val uint32)
8 TEXT ·Store(SB), NOSPLIT, $0
9 MOVD ptr+0(FP), R2
10 MOVWZ val+8(FP), R3
11 MOVW R3, 0(R2)
12 SYNC
13 RET
14
15 // func Store64(ptr *uint64, val uint64)
16 TEXT ·Store64(SB), NOSPLIT, $0
17 MOVD ptr+0(FP), R2
18 MOVD val+8(FP), R3
19 MOVD R3, 0(R2)
20 SYNC
21 RET
22
23 // func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer)
24 TEXT ·StorepNoWB(SB), NOSPLIT, $0
25 MOVD ptr+0(FP), R2
26 MOVD val+8(FP), R3
27 MOVD R3, 0(R2)
28 SYNC
29 RET
30
31 // func Cas(ptr *uint32, old, new uint32) bool
32 // Atomically:
33 // if *ptr == old {
34 // *val = new
35 // return 1
36 // } else {
37 // return 0
38 // }
39 TEXT ·Cas(SB), NOSPLIT, $0-17
40 MOVD ptr+0(FP), R3
41 MOVWZ old+8(FP), R4
42 MOVWZ new+12(FP), R5
43 CS R4, R5, 0(R3) // if (R4 == 0(R3)) then 0(R3)= R5
44 BNE cas_fail
45 MOVB $1, ret+16(FP)
46 RET
47 cas_fail:
48 MOVB $0, ret+16(FP)
49 RET
50
51 // func Cas64(ptr *uint64, old, new uint64) bool
52 // Atomically:
53 // if *ptr == old {
54 // *ptr = new
55 // return 1
56 // } else {
57 // return 0
58 // }
59 TEXT ·Cas64(SB), NOSPLIT, $0-25
60 MOVD ptr+0(FP), R3
61 MOVD old+8(FP), R4
62 MOVD new+16(FP), R5
63 CSG R4, R5, 0(R3) // if (R4 == 0(R3)) then 0(R3)= R5
64 BNE cas64_fail
65 MOVB $1, ret+24(FP)
66 RET
67 cas64_fail:
68 MOVB $0, ret+24(FP)
69 RET
70
71 // func Casuintptr(ptr *uintptr, old, new uintptr) bool
72 TEXT ·Casuintptr(SB), NOSPLIT, $0-25
73 BR ·Cas64(SB)
74
75 // func CasRel(ptr *uint32, old, new uint32) bool
76 TEXT ·CasRel(SB), NOSPLIT, $0-17
77 BR ·Cas(SB)
78
79 // func Loaduintptr(ptr *uintptr) uintptr
80 TEXT ·Loaduintptr(SB), NOSPLIT, $0-16
81 BR ·Load64(SB)
82
83 // func Loaduint(ptr *uint) uint
84 TEXT ·Loaduint(SB), NOSPLIT, $0-16
85 BR ·Load64(SB)
86
87 // func Storeuintptr(ptr *uintptr, new uintptr)
88 TEXT ·Storeuintptr(SB), NOSPLIT, $0-16
89 BR ·Store64(SB)
90
91 // func Loadint64(ptr *int64) int64
92 TEXT ·Loadint64(SB), NOSPLIT, $0-16
93 BR ·Load64(SB)
94
95 // func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
96 TEXT ·Xadduintptr(SB), NOSPLIT, $0-24
97 BR ·Xadd64(SB)
98
99 // func Xaddint64(ptr *int64, delta int64) int64
100 TEXT ·Xaddint64(SB), NOSPLIT, $0-24
101 BR ·Xadd64(SB)
102
103 // func Casp1(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool
104 // Atomically:
105 // if *ptr == old {
106 // *ptr = new
107 // return 1
108 // } else {
109 // return 0
110 // }
111 TEXT ·Casp1(SB), NOSPLIT, $0-25
112 BR ·Cas64(SB)
113
114 // func Xadd(ptr *uint32, delta int32) uint32
115 // Atomically:
116 // *ptr += delta
117 // return *ptr
118 TEXT ·Xadd(SB), NOSPLIT, $0-20
119 MOVD ptr+0(FP), R4
120 MOVW delta+8(FP), R5
121 MOVW (R4), R3
122 repeat:
123 ADD R5, R3, R6
124 CS R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4)
125 BNE repeat
126 MOVW R6, ret+16(FP)
127 RET
128
129 // func Xadd64(ptr *uint64, delta int64) uint64
130 TEXT ·Xadd64(SB), NOSPLIT, $0-24
131 MOVD ptr+0(FP), R4
132 MOVD delta+8(FP), R5
133 MOVD (R4), R3
134 repeat:
135 ADD R5, R3, R6
136 CSG R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4)
137 BNE repeat
138 MOVD R6, ret+16(FP)
139 RET
140
141 // func Xchg(ptr *uint32, new uint32) uint32
142 TEXT ·Xchg(SB), NOSPLIT, $0-20
143 MOVD ptr+0(FP), R4
144 MOVW new+8(FP), R3
145 MOVW (R4), R6
146 repeat:
147 CS R6, R3, (R4) // if R6==(R4) then (R4)=R3 else R6=(R4)
148 BNE repeat
149 MOVW R6, ret+16(FP)
150 RET
151
152 // func Xchg64(ptr *uint64, new uint64) uint64
153 TEXT ·Xchg64(SB), NOSPLIT, $0-24
154 MOVD ptr+0(FP), R4
155 MOVD new+8(FP), R3
156 MOVD (R4), R6
157 repeat:
158 CSG R6, R3, (R4) // if R6==(R4) then (R4)=R3 else R6=(R4)
159 BNE repeat
160 MOVD R6, ret+16(FP)
161 RET
162
163 // func Xchguintptr(ptr *uintptr, new uintptr) uintptr
164 TEXT ·Xchguintptr(SB), NOSPLIT, $0-24
165 BR ·Xchg64(SB)
166
167 // func Or8(addr *uint8, v uint8)
168 TEXT ·Or8(SB), NOSPLIT, $0-9
169 MOVD ptr+0(FP), R3
170 MOVBZ val+8(FP), R4
171 // Calculate shift.
172 MOVD R3, R5
173 AND $3, R5
174 XOR $3, R5 // big endian - flip direction
175 SLD $3, R5 // MUL $8, R5
176 SLD R5, R4
177 // Align ptr down to 4 bytes so we can use 32-bit load/store.
178 AND $-4, R3
179 MOVWZ 0(R3), R6
180 again:
181 OR R4, R6, R7
182 CS R6, R7, 0(R3) // if R6==(R3) then (R3)=R7 else R6=(R3)
183 BNE again
184 RET
185
186 // func And8(addr *uint8, v uint8)
187 TEXT ·And8(SB), NOSPLIT, $0-9
188 MOVD ptr+0(FP), R3
189 MOVBZ val+8(FP), R4
190 // Calculate shift.
191 MOVD R3, R5
192 AND $3, R5
193 XOR $3, R5 // big endian - flip direction
194 SLD $3, R5 // MUL $8, R5
195 OR $-256, R4 // create 0xffffffffffffffxx
196 RLLG R5, R4
197 // Align ptr down to 4 bytes so we can use 32-bit load/store.
198 AND $-4, R3
199 MOVWZ 0(R3), R6
200 again:
201 AND R4, R6, R7
202 CS R6, R7, 0(R3) // if R6==(R3) then (R3)=R7 else R6=(R3)
203 BNE again
204 RET
View as plain text