...

Source file src/runtime/lock_futex.go

     1	// Copyright 2011 The Go Authors. All rights reserved.
     2	// Use of this source code is governed by a BSD-style
     3	// license that can be found in the LICENSE file.
     4	
     5	// +build dragonfly freebsd linux
     6	
     7	package runtime
     8	
     9	import (
    10		"runtime/internal/atomic"
    11		"unsafe"
    12	)
    13	
    14	// This implementation depends on OS-specific implementations of
    15	//
    16	//	futexsleep(addr *uint32, val uint32, ns int64)
    17	//		Atomically,
    18	//			if *addr == val { sleep }
    19	//		Might be woken up spuriously; that's allowed.
    20	//		Don't sleep longer than ns; ns < 0 means forever.
    21	//
    22	//	futexwakeup(addr *uint32, cnt uint32)
    23	//		If any procs are sleeping on addr, wake up at most cnt.
    24	
    25	const (
    26		mutex_unlocked = 0
    27		mutex_locked   = 1
    28		mutex_sleeping = 2
    29	
    30		active_spin     = 4
    31		active_spin_cnt = 30
    32		passive_spin    = 1
    33	)
    34	
    35	// Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping.
    36	// mutex_sleeping means that there is presumably at least one sleeping thread.
    37	// Note that there can be spinning threads during all states - they do not
    38	// affect mutex's state.
    39	
    40	// We use the uintptr mutex.key and note.key as a uint32.
    41	//go:nosplit
    42	func key32(p *uintptr) *uint32 {
    43		return (*uint32)(unsafe.Pointer(p))
    44	}
    45	
    46	func lock(l *mutex) {
    47		gp := getg()
    48	
    49		if gp.m.locks < 0 {
    50			throw("runtime·lock: lock count")
    51		}
    52		gp.m.locks++
    53	
    54		// Speculative grab for lock.
    55		v := atomic.Xchg(key32(&l.key), mutex_locked)
    56		if v == mutex_unlocked {
    57			return
    58		}
    59	
    60		// wait is either MUTEX_LOCKED or MUTEX_SLEEPING
    61		// depending on whether there is a thread sleeping
    62		// on this mutex. If we ever change l->key from
    63		// MUTEX_SLEEPING to some other value, we must be
    64		// careful to change it back to MUTEX_SLEEPING before
    65		// returning, to ensure that the sleeping thread gets
    66		// its wakeup call.
    67		wait := v
    68	
    69		// On uniprocessors, no point spinning.
    70		// On multiprocessors, spin for ACTIVE_SPIN attempts.
    71		spin := 0
    72		if ncpu > 1 {
    73			spin = active_spin
    74		}
    75		for {
    76			// Try for lock, spinning.
    77			for i := 0; i < spin; i++ {
    78				for l.key == mutex_unlocked {
    79					if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
    80						return
    81					}
    82				}
    83				procyield(active_spin_cnt)
    84			}
    85	
    86			// Try for lock, rescheduling.
    87			for i := 0; i < passive_spin; i++ {
    88				for l.key == mutex_unlocked {
    89					if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
    90						return
    91					}
    92				}
    93				osyield()
    94			}
    95	
    96			// Sleep.
    97			v = atomic.Xchg(key32(&l.key), mutex_sleeping)
    98			if v == mutex_unlocked {
    99				return
   100			}
   101			wait = mutex_sleeping
   102			futexsleep(key32(&l.key), mutex_sleeping, -1)
   103		}
   104	}
   105	
   106	func unlock(l *mutex) {
   107		v := atomic.Xchg(key32(&l.key), mutex_unlocked)
   108		if v == mutex_unlocked {
   109			throw("unlock of unlocked lock")
   110		}
   111		if v == mutex_sleeping {
   112			futexwakeup(key32(&l.key), 1)
   113		}
   114	
   115		gp := getg()
   116		gp.m.locks--
   117		if gp.m.locks < 0 {
   118			throw("runtime·unlock: lock count")
   119		}
   120		if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
   121			gp.stackguard0 = stackPreempt
   122		}
   123	}
   124	
   125	// One-time notifications.
   126	func noteclear(n *note) {
   127		n.key = 0
   128	}
   129	
   130	func notewakeup(n *note) {
   131		old := atomic.Xchg(key32(&n.key), 1)
   132		if old != 0 {
   133			print("notewakeup - double wakeup (", old, ")\n")
   134			throw("notewakeup - double wakeup")
   135		}
   136		futexwakeup(key32(&n.key), 1)
   137	}
   138	
   139	func notesleep(n *note) {
   140		gp := getg()
   141		if gp != gp.m.g0 {
   142			throw("notesleep not on g0")
   143		}
   144		ns := int64(-1)
   145		if *cgo_yield != nil {
   146			// Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
   147			ns = 10e6
   148		}
   149		for atomic.Load(key32(&n.key)) == 0 {
   150			gp.m.blocked = true
   151			futexsleep(key32(&n.key), 0, ns)
   152			if *cgo_yield != nil {
   153				asmcgocall(*cgo_yield, nil)
   154			}
   155			gp.m.blocked = false
   156		}
   157	}
   158	
   159	// May run with m.p==nil if called from notetsleep, so write barriers
   160	// are not allowed.
   161	//
   162	//go:nosplit
   163	//go:nowritebarrier
   164	func notetsleep_internal(n *note, ns int64) bool {
   165		gp := getg()
   166	
   167		if ns < 0 {
   168			if *cgo_yield != nil {
   169				// Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
   170				ns = 10e6
   171			}
   172			for atomic.Load(key32(&n.key)) == 0 {
   173				gp.m.blocked = true
   174				futexsleep(key32(&n.key), 0, ns)
   175				if *cgo_yield != nil {
   176					asmcgocall(*cgo_yield, nil)
   177				}
   178				gp.m.blocked = false
   179			}
   180			return true
   181		}
   182	
   183		if atomic.Load(key32(&n.key)) != 0 {
   184			return true
   185		}
   186	
   187		deadline := nanotime() + ns
   188		for {
   189			if *cgo_yield != nil && ns > 10e6 {
   190				ns = 10e6
   191			}
   192			gp.m.blocked = true
   193			futexsleep(key32(&n.key), 0, ns)
   194			if *cgo_yield != nil {
   195				asmcgocall(*cgo_yield, nil)
   196			}
   197			gp.m.blocked = false
   198			if atomic.Load(key32(&n.key)) != 0 {
   199				break
   200			}
   201			now := nanotime()
   202			if now >= deadline {
   203				break
   204			}
   205			ns = deadline - now
   206		}
   207		return atomic.Load(key32(&n.key)) != 0
   208	}
   209	
   210	func notetsleep(n *note, ns int64) bool {
   211		gp := getg()
   212		if gp != gp.m.g0 && gp.m.preemptoff != "" {
   213			throw("notetsleep not on g0")
   214		}
   215	
   216		return notetsleep_internal(n, ns)
   217	}
   218	
   219	// same as runtime·notetsleep, but called on user g (not g0)
   220	// calls only nosplit functions between entersyscallblock/exitsyscall
   221	func notetsleepg(n *note, ns int64) bool {
   222		gp := getg()
   223		if gp == gp.m.g0 {
   224			throw("notetsleepg on g0")
   225		}
   226	
   227		entersyscallblock()
   228		ok := notetsleep_internal(n, ns)
   229		exitsyscall()
   230		return ok
   231	}
   232	
   233	func beforeIdle() bool {
   234		return false
   235	}
   236	
   237	func checkTimeouts() {}
   238	

View as plain text