Text file src/runtime/internal/atomic/asm_amd64p32.s
1 // Copyright 2015 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 #include "textflag.h"
6
7 // bool Cas(int32 *val, int32 old, int32 new)
8 // Atomically:
9 // if(*val == old){
10 // *val = new;
11 // return 1;
12 // } else
13 // return 0;
14 TEXT runtime∕internal∕atomic·Cas(SB), NOSPLIT, $0-17
15 MOVL ptr+0(FP), BX
16 MOVL old+4(FP), AX
17 MOVL new+8(FP), CX
18 LOCK
19 CMPXCHGL CX, 0(BX)
20 SETEQ ret+16(FP)
21 RET
22
23 TEXT runtime∕internal∕atomic·Casuintptr(SB), NOSPLIT, $0-17
24 JMP runtime∕internal∕atomic·Cas(SB)
25
26 TEXT runtime∕internal∕atomic·CasRel(SB), NOSPLIT, $0-17
27 JMP runtime∕internal∕atomic·Cas(SB)
28
29 TEXT runtime∕internal∕atomic·Loaduintptr(SB), NOSPLIT, $0-12
30 JMP runtime∕internal∕atomic·Load(SB)
31
32 TEXT runtime∕internal∕atomic·Loaduint(SB), NOSPLIT, $0-12
33 JMP runtime∕internal∕atomic·Load(SB)
34
35 TEXT runtime∕internal∕atomic·Storeuintptr(SB), NOSPLIT, $0-8
36 JMP runtime∕internal∕atomic·Store(SB)
37
38 TEXT runtime∕internal∕atomic·Loadint64(SB), NOSPLIT, $0-16
39 JMP runtime∕internal∕atomic·Load64(SB)
40
41 TEXT runtime∕internal∕atomic·Xaddint64(SB), NOSPLIT, $0-24
42 JMP runtime∕internal∕atomic·Xadd64(SB)
43
44 // bool runtime∕internal∕atomic·cas64(uint64 *val, uint64 old, uint64 new)
45 // Atomically:
46 // if(*val == *old){
47 // *val = new;
48 // return 1;
49 // } else {
50 // return 0;
51 // }
52 TEXT runtime∕internal∕atomic·Cas64(SB), NOSPLIT, $0-25
53 MOVL ptr+0(FP), BX
54 MOVQ old+8(FP), AX
55 MOVQ new+16(FP), CX
56 LOCK
57 CMPXCHGQ CX, 0(BX)
58 SETEQ ret+24(FP)
59 RET
60
61 // bool Casp1(void **val, void *old, void *new)
62 // Atomically:
63 // if(*val == old){
64 // *val = new;
65 // return 1;
66 // } else
67 // return 0;
68 TEXT runtime∕internal∕atomic·Casp1(SB), NOSPLIT, $0-17
69 MOVL ptr+0(FP), BX
70 MOVL old+4(FP), AX
71 MOVL new+8(FP), CX
72 LOCK
73 CMPXCHGL CX, 0(BX)
74 SETEQ ret+16(FP)
75 RET
76
77 // uint32 Xadd(uint32 volatile *val, int32 delta)
78 // Atomically:
79 // *val += delta;
80 // return *val;
81 TEXT runtime∕internal∕atomic·Xadd(SB), NOSPLIT, $0-12
82 MOVL ptr+0(FP), BX
83 MOVL delta+4(FP), AX
84 MOVL AX, CX
85 LOCK
86 XADDL AX, 0(BX)
87 ADDL CX, AX
88 MOVL AX, ret+8(FP)
89 RET
90
91 TEXT runtime∕internal∕atomic·Xadd64(SB), NOSPLIT, $0-24
92 MOVL ptr+0(FP), BX
93 MOVQ delta+8(FP), AX
94 MOVQ AX, CX
95 LOCK
96 XADDQ AX, 0(BX)
97 ADDQ CX, AX
98 MOVQ AX, ret+16(FP)
99 RET
100
101 TEXT runtime∕internal∕atomic·Xadduintptr(SB), NOSPLIT, $0-12
102 JMP runtime∕internal∕atomic·Xadd(SB)
103
104 TEXT runtime∕internal∕atomic·Xchg(SB), NOSPLIT, $0-12
105 MOVL ptr+0(FP), BX
106 MOVL new+4(FP), AX
107 XCHGL AX, 0(BX)
108 MOVL AX, ret+8(FP)
109 RET
110
111 TEXT runtime∕internal∕atomic·Xchg64(SB), NOSPLIT, $0-24
112 MOVL ptr+0(FP), BX
113 MOVQ new+8(FP), AX
114 TESTL $7, BX
115 JZ 2(PC)
116 MOVL 0, BX // crash when unaligned
117 XCHGQ AX, 0(BX)
118 MOVQ AX, ret+16(FP)
119 RET
120
121 TEXT runtime∕internal∕atomic·Xchguintptr(SB), NOSPLIT, $0-12
122 JMP runtime∕internal∕atomic·Xchg(SB)
123
124 TEXT runtime∕internal∕atomic·StorepNoWB(SB), NOSPLIT, $0-8
125 MOVL ptr+0(FP), BX
126 MOVL val+4(FP), AX
127 XCHGL AX, 0(BX)
128 RET
129
130 TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-8
131 MOVL ptr+0(FP), BX
132 MOVL val+4(FP), AX
133 XCHGL AX, 0(BX)
134 RET
135
136 TEXT runtime∕internal∕atomic·StoreRel(SB), NOSPLIT, $0-8
137 JMP runtime∕internal∕atomic·Store(SB)
138
139 TEXT runtime∕internal∕atomic·Store64(SB), NOSPLIT, $0-16
140 MOVL ptr+0(FP), BX
141 MOVQ val+8(FP), AX
142 XCHGQ AX, 0(BX)
143 RET
144
145 // void runtime∕internal∕atomic·Or8(byte volatile*, byte);
146 TEXT runtime∕internal∕atomic·Or8(SB), NOSPLIT, $0-5
147 MOVL ptr+0(FP), BX
148 MOVB val+4(FP), AX
149 LOCK
150 ORB AX, 0(BX)
151 RET
152
153 // void runtime∕internal∕atomic·And8(byte volatile*, byte);
154 TEXT runtime∕internal∕atomic·And8(SB), NOSPLIT, $0-5
155 MOVL ptr+0(FP), BX
156 MOVB val+4(FP), AX
157 LOCK
158 ANDB AX, 0(BX)
159 RET
View as plain text