...

Source file src/runtime/internal/atomic/atomic_arm.go

     1	// Copyright 2009 The Go Authors. All rights reserved.
     2	// Use of this source code is governed by a BSD-style
     3	// license that can be found in the LICENSE file.
     4	
     5	// +build arm
     6	
     7	package atomic
     8	
     9	import (
    10		"internal/cpu"
    11		"unsafe"
    12	)
    13	
    14	// Export some functions via linkname to assembly in sync/atomic.
    15	//go:linkname Xchg
    16	//go:linkname Xchguintptr
    17	
    18	type spinlock struct {
    19		v uint32
    20	}
    21	
    22	//go:nosplit
    23	func (l *spinlock) lock() {
    24		for {
    25			if Cas(&l.v, 0, 1) {
    26				return
    27			}
    28		}
    29	}
    30	
    31	//go:nosplit
    32	func (l *spinlock) unlock() {
    33		Store(&l.v, 0)
    34	}
    35	
    36	var locktab [57]struct {
    37		l   spinlock
    38		pad [cpu.CacheLinePadSize - unsafe.Sizeof(spinlock{})]byte
    39	}
    40	
    41	func addrLock(addr *uint64) *spinlock {
    42		return &locktab[(uintptr(unsafe.Pointer(addr))>>3)%uintptr(len(locktab))].l
    43	}
    44	
    45	// Atomic add and return new value.
    46	//go:nosplit
    47	func Xadd(val *uint32, delta int32) uint32 {
    48		for {
    49			oval := *val
    50			nval := oval + uint32(delta)
    51			if Cas(val, oval, nval) {
    52				return nval
    53			}
    54		}
    55	}
    56	
    57	//go:noescape
    58	func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
    59	
    60	//go:nosplit
    61	func Xchg(addr *uint32, v uint32) uint32 {
    62		for {
    63			old := *addr
    64			if Cas(addr, old, v) {
    65				return old
    66			}
    67		}
    68	}
    69	
    70	//go:nosplit
    71	func Xchguintptr(addr *uintptr, v uintptr) uintptr {
    72		return uintptr(Xchg((*uint32)(unsafe.Pointer(addr)), uint32(v)))
    73	}
    74	
    75	// Not noescape -- it installs a pointer to addr.
    76	func StorepNoWB(addr unsafe.Pointer, v unsafe.Pointer)
    77	
    78	//go:noescape
    79	func Store(addr *uint32, v uint32)
    80	
    81	//go:noescape
    82	func StoreRel(addr *uint32, v uint32)
    83	
    84	//go:nosplit
    85	func goCas64(addr *uint64, old, new uint64) bool {
    86		if uintptr(unsafe.Pointer(addr))&7 != 0 {
    87			*(*int)(nil) = 0 // crash on unaligned uint64
    88		}
    89		_ = *addr // if nil, fault before taking the lock
    90		var ok bool
    91		addrLock(addr).lock()
    92		if *addr == old {
    93			*addr = new
    94			ok = true
    95		}
    96		addrLock(addr).unlock()
    97		return ok
    98	}
    99	
   100	//go:nosplit
   101	func goXadd64(addr *uint64, delta int64) uint64 {
   102		if uintptr(unsafe.Pointer(addr))&7 != 0 {
   103			*(*int)(nil) = 0 // crash on unaligned uint64
   104		}
   105		_ = *addr // if nil, fault before taking the lock
   106		var r uint64
   107		addrLock(addr).lock()
   108		r = *addr + uint64(delta)
   109		*addr = r
   110		addrLock(addr).unlock()
   111		return r
   112	}
   113	
   114	//go:nosplit
   115	func goXchg64(addr *uint64, v uint64) uint64 {
   116		if uintptr(unsafe.Pointer(addr))&7 != 0 {
   117			*(*int)(nil) = 0 // crash on unaligned uint64
   118		}
   119		_ = *addr // if nil, fault before taking the lock
   120		var r uint64
   121		addrLock(addr).lock()
   122		r = *addr
   123		*addr = v
   124		addrLock(addr).unlock()
   125		return r
   126	}
   127	
   128	//go:nosplit
   129	func goLoad64(addr *uint64) uint64 {
   130		if uintptr(unsafe.Pointer(addr))&7 != 0 {
   131			*(*int)(nil) = 0 // crash on unaligned uint64
   132		}
   133		_ = *addr // if nil, fault before taking the lock
   134		var r uint64
   135		addrLock(addr).lock()
   136		r = *addr
   137		addrLock(addr).unlock()
   138		return r
   139	}
   140	
   141	//go:nosplit
   142	func goStore64(addr *uint64, v uint64) {
   143		if uintptr(unsafe.Pointer(addr))&7 != 0 {
   144			*(*int)(nil) = 0 // crash on unaligned uint64
   145		}
   146		_ = *addr // if nil, fault before taking the lock
   147		addrLock(addr).lock()
   148		*addr = v
   149		addrLock(addr).unlock()
   150	}
   151	
   152	//go:nosplit
   153	func Or8(addr *uint8, v uint8) {
   154		// Align down to 4 bytes and use 32-bit CAS.
   155		uaddr := uintptr(unsafe.Pointer(addr))
   156		addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3))
   157		word := uint32(v) << ((uaddr & 3) * 8) // little endian
   158		for {
   159			old := *addr32
   160			if Cas(addr32, old, old|word) {
   161				return
   162			}
   163		}
   164	}
   165	
   166	//go:nosplit
   167	func And8(addr *uint8, v uint8) {
   168		// Align down to 4 bytes and use 32-bit CAS.
   169		uaddr := uintptr(unsafe.Pointer(addr))
   170		addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3))
   171		word := uint32(v) << ((uaddr & 3) * 8)    // little endian
   172		mask := uint32(0xFF) << ((uaddr & 3) * 8) // little endian
   173		word |= ^mask
   174		for {
   175			old := *addr32
   176			if Cas(addr32, old, old&word) {
   177				return
   178			}
   179		}
   180	}
   181	
   182	//go:nosplit
   183	func armcas(ptr *uint32, old, new uint32) bool
   184	
   185	//go:noescape
   186	func Load(addr *uint32) uint32
   187	
   188	// NO go:noescape annotation; *addr escapes if result escapes (#31525)
   189	func Loadp(addr unsafe.Pointer) unsafe.Pointer
   190	
   191	//go:noescape
   192	func Load8(addr *uint8) uint8
   193	
   194	//go:noescape
   195	func LoadAcq(addr *uint32) uint32
   196	
   197	//go:noescape
   198	func Cas64(addr *uint64, old, new uint64) bool
   199	
   200	//go:noescape
   201	func CasRel(addr *uint32, old, new uint32) bool
   202	
   203	//go:noescape
   204	func Xadd64(addr *uint64, delta int64) uint64
   205	
   206	//go:noescape
   207	func Xchg64(addr *uint64, v uint64) uint64
   208	
   209	//go:noescape
   210	func Load64(addr *uint64) uint64
   211	
   212	//go:noescape
   213	func Store64(addr *uint64, v uint64)
   214	

View as plain text