...

Source file src/runtime/lock_sema.go

     1	// Copyright 2011 The Go Authors. All rights reserved.
     2	// Use of this source code is governed by a BSD-style
     3	// license that can be found in the LICENSE file.
     4	
     5	// +build aix darwin nacl netbsd openbsd plan9 solaris windows
     6	
     7	package runtime
     8	
     9	import (
    10		"runtime/internal/atomic"
    11		"unsafe"
    12	)
    13	
    14	// This implementation depends on OS-specific implementations of
    15	//
    16	//	func semacreate(mp *m)
    17	//		Create a semaphore for mp, if it does not already have one.
    18	//
    19	//	func semasleep(ns int64) int32
    20	//		If ns < 0, acquire m's semaphore and return 0.
    21	//		If ns >= 0, try to acquire m's semaphore for at most ns nanoseconds.
    22	//		Return 0 if the semaphore was acquired, -1 if interrupted or timed out.
    23	//
    24	//	func semawakeup(mp *m)
    25	//		Wake up mp, which is or will soon be sleeping on its semaphore.
    26	//
    27	const (
    28		locked uintptr = 1
    29	
    30		active_spin     = 4
    31		active_spin_cnt = 30
    32		passive_spin    = 1
    33	)
    34	
    35	func lock(l *mutex) {
    36		gp := getg()
    37		if gp.m.locks < 0 {
    38			throw("runtime·lock: lock count")
    39		}
    40		gp.m.locks++
    41	
    42		// Speculative grab for lock.
    43		if atomic.Casuintptr(&l.key, 0, locked) {
    44			return
    45		}
    46		semacreate(gp.m)
    47	
    48		// On uniprocessor's, no point spinning.
    49		// On multiprocessors, spin for ACTIVE_SPIN attempts.
    50		spin := 0
    51		if ncpu > 1 {
    52			spin = active_spin
    53		}
    54	Loop:
    55		for i := 0; ; i++ {
    56			v := atomic.Loaduintptr(&l.key)
    57			if v&locked == 0 {
    58				// Unlocked. Try to lock.
    59				if atomic.Casuintptr(&l.key, v, v|locked) {
    60					return
    61				}
    62				i = 0
    63			}
    64			if i < spin {
    65				procyield(active_spin_cnt)
    66			} else if i < spin+passive_spin {
    67				osyield()
    68			} else {
    69				// Someone else has it.
    70				// l->waitm points to a linked list of M's waiting
    71				// for this lock, chained through m->nextwaitm.
    72				// Queue this M.
    73				for {
    74					gp.m.nextwaitm = muintptr(v &^ locked)
    75					if atomic.Casuintptr(&l.key, v, uintptr(unsafe.Pointer(gp.m))|locked) {
    76						break
    77					}
    78					v = atomic.Loaduintptr(&l.key)
    79					if v&locked == 0 {
    80						continue Loop
    81					}
    82				}
    83				if v&locked != 0 {
    84					// Queued. Wait.
    85					semasleep(-1)
    86					i = 0
    87				}
    88			}
    89		}
    90	}
    91	
    92	//go:nowritebarrier
    93	// We might not be holding a p in this code.
    94	func unlock(l *mutex) {
    95		gp := getg()
    96		var mp *m
    97		for {
    98			v := atomic.Loaduintptr(&l.key)
    99			if v == locked {
   100				if atomic.Casuintptr(&l.key, locked, 0) {
   101					break
   102				}
   103			} else {
   104				// Other M's are waiting for the lock.
   105				// Dequeue an M.
   106				mp = muintptr(v &^ locked).ptr()
   107				if atomic.Casuintptr(&l.key, v, uintptr(mp.nextwaitm)) {
   108					// Dequeued an M.  Wake it.
   109					semawakeup(mp)
   110					break
   111				}
   112			}
   113		}
   114		gp.m.locks--
   115		if gp.m.locks < 0 {
   116			throw("runtime·unlock: lock count")
   117		}
   118		if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
   119			gp.stackguard0 = stackPreempt
   120		}
   121	}
   122	
   123	// One-time notifications.
   124	func noteclear(n *note) {
   125		if GOOS == "aix" {
   126			// On AIX, semaphores might not synchronize the memory in some
   127			// rare cases. See issue #30189.
   128			atomic.Storeuintptr(&n.key, 0)
   129		} else {
   130			n.key = 0
   131		}
   132	}
   133	
   134	func notewakeup(n *note) {
   135		var v uintptr
   136		for {
   137			v = atomic.Loaduintptr(&n.key)
   138			if atomic.Casuintptr(&n.key, v, locked) {
   139				break
   140			}
   141		}
   142	
   143		// Successfully set waitm to locked.
   144		// What was it before?
   145		switch {
   146		case v == 0:
   147			// Nothing was waiting. Done.
   148		case v == locked:
   149			// Two notewakeups! Not allowed.
   150			throw("notewakeup - double wakeup")
   151		default:
   152			// Must be the waiting m. Wake it up.
   153			semawakeup((*m)(unsafe.Pointer(v)))
   154		}
   155	}
   156	
   157	func notesleep(n *note) {
   158		gp := getg()
   159		if gp != gp.m.g0 {
   160			throw("notesleep not on g0")
   161		}
   162		semacreate(gp.m)
   163		if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
   164			// Must be locked (got wakeup).
   165			if n.key != locked {
   166				throw("notesleep - waitm out of sync")
   167			}
   168			return
   169		}
   170		// Queued. Sleep.
   171		gp.m.blocked = true
   172		if *cgo_yield == nil {
   173			semasleep(-1)
   174		} else {
   175			// Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
   176			const ns = 10e6
   177			for atomic.Loaduintptr(&n.key) == 0 {
   178				semasleep(ns)
   179				asmcgocall(*cgo_yield, nil)
   180			}
   181		}
   182		gp.m.blocked = false
   183	}
   184	
   185	//go:nosplit
   186	func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool {
   187		// gp and deadline are logically local variables, but they are written
   188		// as parameters so that the stack space they require is charged
   189		// to the caller.
   190		// This reduces the nosplit footprint of notetsleep_internal.
   191		gp = getg()
   192	
   193		// Register for wakeup on n->waitm.
   194		if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
   195			// Must be locked (got wakeup).
   196			if n.key != locked {
   197				throw("notetsleep - waitm out of sync")
   198			}
   199			return true
   200		}
   201		if ns < 0 {
   202			// Queued. Sleep.
   203			gp.m.blocked = true
   204			if *cgo_yield == nil {
   205				semasleep(-1)
   206			} else {
   207				// Sleep in arbitrary-but-moderate intervals to poll libc interceptors.
   208				const ns = 10e6
   209				for semasleep(ns) < 0 {
   210					asmcgocall(*cgo_yield, nil)
   211				}
   212			}
   213			gp.m.blocked = false
   214			return true
   215		}
   216	
   217		deadline = nanotime() + ns
   218		for {
   219			// Registered. Sleep.
   220			gp.m.blocked = true
   221			if *cgo_yield != nil && ns > 10e6 {
   222				ns = 10e6
   223			}
   224			if semasleep(ns) >= 0 {
   225				gp.m.blocked = false
   226				// Acquired semaphore, semawakeup unregistered us.
   227				// Done.
   228				return true
   229			}
   230			if *cgo_yield != nil {
   231				asmcgocall(*cgo_yield, nil)
   232			}
   233			gp.m.blocked = false
   234			// Interrupted or timed out. Still registered. Semaphore not acquired.
   235			ns = deadline - nanotime()
   236			if ns <= 0 {
   237				break
   238			}
   239			// Deadline hasn't arrived. Keep sleeping.
   240		}
   241	
   242		// Deadline arrived. Still registered. Semaphore not acquired.
   243		// Want to give up and return, but have to unregister first,
   244		// so that any notewakeup racing with the return does not
   245		// try to grant us the semaphore when we don't expect it.
   246		for {
   247			v := atomic.Loaduintptr(&n.key)
   248			switch v {
   249			case uintptr(unsafe.Pointer(gp.m)):
   250				// No wakeup yet; unregister if possible.
   251				if atomic.Casuintptr(&n.key, v, 0) {
   252					return false
   253				}
   254			case locked:
   255				// Wakeup happened so semaphore is available.
   256				// Grab it to avoid getting out of sync.
   257				gp.m.blocked = true
   258				if semasleep(-1) < 0 {
   259					throw("runtime: unable to acquire - semaphore out of sync")
   260				}
   261				gp.m.blocked = false
   262				return true
   263			default:
   264				throw("runtime: unexpected waitm - semaphore out of sync")
   265			}
   266		}
   267	}
   268	
   269	func notetsleep(n *note, ns int64) bool {
   270		gp := getg()
   271		if gp != gp.m.g0 {
   272			throw("notetsleep not on g0")
   273		}
   274		semacreate(gp.m)
   275		return notetsleep_internal(n, ns, nil, 0)
   276	}
   277	
   278	// same as runtime·notetsleep, but called on user g (not g0)
   279	// calls only nosplit functions between entersyscallblock/exitsyscall
   280	func notetsleepg(n *note, ns int64) bool {
   281		gp := getg()
   282		if gp == gp.m.g0 {
   283			throw("notetsleepg on g0")
   284		}
   285		semacreate(gp.m)
   286		entersyscallblock()
   287		ok := notetsleep_internal(n, ns, nil, 0)
   288		exitsyscall()
   289		return ok
   290	}
   291	
   292	func beforeIdle() bool {
   293		return false
   294	}
   295	
   296	func checkTimeouts() {}
   297	

View as plain text