...

Source file src/runtime/map_fast64.go

     1	// Copyright 2018 The Go Authors. All rights reserved.
     2	// Use of this source code is governed by a BSD-style
     3	// license that can be found in the LICENSE file.
     4	
     5	package runtime
     6	
     7	import (
     8		"runtime/internal/sys"
     9		"unsafe"
    10	)
    11	
    12	func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
    13		if raceenabled && h != nil {
    14			callerpc := getcallerpc()
    15			racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64))
    16		}
    17		if h == nil || h.count == 0 {
    18			return unsafe.Pointer(&zeroVal[0])
    19		}
    20		if h.flags&hashWriting != 0 {
    21			throw("concurrent map read and map write")
    22		}
    23		var b *bmap
    24		if h.B == 0 {
    25			// One-bucket table. No need to hash.
    26			b = (*bmap)(h.buckets)
    27		} else {
    28			hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
    29			m := bucketMask(h.B)
    30			b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
    31			if c := h.oldbuckets; c != nil {
    32				if !h.sameSizeGrow() {
    33					// There used to be half as many buckets; mask down one more power of two.
    34					m >>= 1
    35				}
    36				oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
    37				if !evacuated(oldb) {
    38					b = oldb
    39				}
    40			}
    41		}
    42		for ; b != nil; b = b.overflow(t) {
    43			for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
    44				if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
    45					return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize))
    46				}
    47			}
    48		}
    49		return unsafe.Pointer(&zeroVal[0])
    50	}
    51	
    52	func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
    53		if raceenabled && h != nil {
    54			callerpc := getcallerpc()
    55			racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64))
    56		}
    57		if h == nil || h.count == 0 {
    58			return unsafe.Pointer(&zeroVal[0]), false
    59		}
    60		if h.flags&hashWriting != 0 {
    61			throw("concurrent map read and map write")
    62		}
    63		var b *bmap
    64		if h.B == 0 {
    65			// One-bucket table. No need to hash.
    66			b = (*bmap)(h.buckets)
    67		} else {
    68			hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
    69			m := bucketMask(h.B)
    70			b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
    71			if c := h.oldbuckets; c != nil {
    72				if !h.sameSizeGrow() {
    73					// There used to be half as many buckets; mask down one more power of two.
    74					m >>= 1
    75				}
    76				oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
    77				if !evacuated(oldb) {
    78					b = oldb
    79				}
    80			}
    81		}
    82		for ; b != nil; b = b.overflow(t) {
    83			for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
    84				if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
    85					return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize)), true
    86				}
    87			}
    88		}
    89		return unsafe.Pointer(&zeroVal[0]), false
    90	}
    91	
    92	func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
    93		if h == nil {
    94			panic(plainError("assignment to entry in nil map"))
    95		}
    96		if raceenabled {
    97			callerpc := getcallerpc()
    98			racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
    99		}
   100		if h.flags&hashWriting != 0 {
   101			throw("concurrent map writes")
   102		}
   103		hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
   104	
   105		// Set hashWriting after calling alg.hash for consistency with mapassign.
   106		h.flags ^= hashWriting
   107	
   108		if h.buckets == nil {
   109			h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
   110		}
   111	
   112	again:
   113		bucket := hash & bucketMask(h.B)
   114		if h.growing() {
   115			growWork_fast64(t, h, bucket)
   116		}
   117		b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
   118	
   119		var insertb *bmap
   120		var inserti uintptr
   121		var insertk unsafe.Pointer
   122	
   123	bucketloop:
   124		for {
   125			for i := uintptr(0); i < bucketCnt; i++ {
   126				if isEmpty(b.tophash[i]) {
   127					if insertb == nil {
   128						insertb = b
   129						inserti = i
   130					}
   131					if b.tophash[i] == emptyRest {
   132						break bucketloop
   133					}
   134					continue
   135				}
   136				k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
   137				if k != key {
   138					continue
   139				}
   140				insertb = b
   141				inserti = i
   142				goto done
   143			}
   144			ovf := b.overflow(t)
   145			if ovf == nil {
   146				break
   147			}
   148			b = ovf
   149		}
   150	
   151		// Did not find mapping for key. Allocate new cell & add entry.
   152	
   153		// If we hit the max load factor or we have too many overflow buckets,
   154		// and we're not already in the middle of growing, start growing.
   155		if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
   156			hashGrow(t, h)
   157			goto again // Growing the table invalidates everything, so try again
   158		}
   159	
   160		if insertb == nil {
   161			// all current buckets are full, allocate a new one.
   162			insertb = h.newoverflow(t, b)
   163			inserti = 0 // not necessary, but avoids needlessly spilling inserti
   164		}
   165		insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
   166	
   167		insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
   168		// store new key at insert position
   169		*(*uint64)(insertk) = key
   170	
   171		h.count++
   172	
   173	done:
   174		elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.elemsize))
   175		if h.flags&hashWriting == 0 {
   176			throw("concurrent map writes")
   177		}
   178		h.flags &^= hashWriting
   179		return elem
   180	}
   181	
   182	func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
   183		if h == nil {
   184			panic(plainError("assignment to entry in nil map"))
   185		}
   186		if raceenabled {
   187			callerpc := getcallerpc()
   188			racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
   189		}
   190		if h.flags&hashWriting != 0 {
   191			throw("concurrent map writes")
   192		}
   193		hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
   194	
   195		// Set hashWriting after calling alg.hash for consistency with mapassign.
   196		h.flags ^= hashWriting
   197	
   198		if h.buckets == nil {
   199			h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
   200		}
   201	
   202	again:
   203		bucket := hash & bucketMask(h.B)
   204		if h.growing() {
   205			growWork_fast64(t, h, bucket)
   206		}
   207		b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
   208	
   209		var insertb *bmap
   210		var inserti uintptr
   211		var insertk unsafe.Pointer
   212	
   213	bucketloop:
   214		for {
   215			for i := uintptr(0); i < bucketCnt; i++ {
   216				if isEmpty(b.tophash[i]) {
   217					if insertb == nil {
   218						insertb = b
   219						inserti = i
   220					}
   221					if b.tophash[i] == emptyRest {
   222						break bucketloop
   223					}
   224					continue
   225				}
   226				k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8)))
   227				if k != key {
   228					continue
   229				}
   230				insertb = b
   231				inserti = i
   232				goto done
   233			}
   234			ovf := b.overflow(t)
   235			if ovf == nil {
   236				break
   237			}
   238			b = ovf
   239		}
   240	
   241		// Did not find mapping for key. Allocate new cell & add entry.
   242	
   243		// If we hit the max load factor or we have too many overflow buckets,
   244		// and we're not already in the middle of growing, start growing.
   245		if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
   246			hashGrow(t, h)
   247			goto again // Growing the table invalidates everything, so try again
   248		}
   249	
   250		if insertb == nil {
   251			// all current buckets are full, allocate a new one.
   252			insertb = h.newoverflow(t, b)
   253			inserti = 0 // not necessary, but avoids needlessly spilling inserti
   254		}
   255		insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
   256	
   257		insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
   258		// store new key at insert position
   259		*(*unsafe.Pointer)(insertk) = key
   260	
   261		h.count++
   262	
   263	done:
   264		elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.elemsize))
   265		if h.flags&hashWriting == 0 {
   266			throw("concurrent map writes")
   267		}
   268		h.flags &^= hashWriting
   269		return elem
   270	}
   271	
   272	func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
   273		if raceenabled && h != nil {
   274			callerpc := getcallerpc()
   275			racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast64))
   276		}
   277		if h == nil || h.count == 0 {
   278			return
   279		}
   280		if h.flags&hashWriting != 0 {
   281			throw("concurrent map writes")
   282		}
   283	
   284		hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
   285	
   286		// Set hashWriting after calling alg.hash for consistency with mapdelete
   287		h.flags ^= hashWriting
   288	
   289		bucket := hash & bucketMask(h.B)
   290		if h.growing() {
   291			growWork_fast64(t, h, bucket)
   292		}
   293		b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
   294		bOrig := b
   295	search:
   296		for ; b != nil; b = b.overflow(t) {
   297			for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
   298				if key != *(*uint64)(k) || isEmpty(b.tophash[i]) {
   299					continue
   300				}
   301				// Only clear key if there are pointers in it.
   302				if t.key.ptrdata != 0 {
   303					memclrHasPointers(k, t.key.size)
   304				}
   305				e := add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize))
   306				if t.elem.ptrdata != 0 {
   307					memclrHasPointers(e, t.elem.size)
   308				} else {
   309					memclrNoHeapPointers(e, t.elem.size)
   310				}
   311				b.tophash[i] = emptyOne
   312				// If the bucket now ends in a bunch of emptyOne states,
   313				// change those to emptyRest states.
   314				if i == bucketCnt-1 {
   315					if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
   316						goto notLast
   317					}
   318				} else {
   319					if b.tophash[i+1] != emptyRest {
   320						goto notLast
   321					}
   322				}
   323				for {
   324					b.tophash[i] = emptyRest
   325					if i == 0 {
   326						if b == bOrig {
   327							break // beginning of initial bucket, we're done.
   328						}
   329						// Find previous bucket, continue at its last entry.
   330						c := b
   331						for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
   332						}
   333						i = bucketCnt - 1
   334					} else {
   335						i--
   336					}
   337					if b.tophash[i] != emptyOne {
   338						break
   339					}
   340				}
   341			notLast:
   342				h.count--
   343				break search
   344			}
   345		}
   346	
   347		if h.flags&hashWriting == 0 {
   348			throw("concurrent map writes")
   349		}
   350		h.flags &^= hashWriting
   351	}
   352	
   353	func growWork_fast64(t *maptype, h *hmap, bucket uintptr) {
   354		// make sure we evacuate the oldbucket corresponding
   355		// to the bucket we're about to use
   356		evacuate_fast64(t, h, bucket&h.oldbucketmask())
   357	
   358		// evacuate one more oldbucket to make progress on growing
   359		if h.growing() {
   360			evacuate_fast64(t, h, h.nevacuate)
   361		}
   362	}
   363	
   364	func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
   365		b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
   366		newbit := h.noldbuckets()
   367		if !evacuated(b) {
   368			// TODO: reuse overflow buckets instead of using new ones, if there
   369			// is no iterator using the old buckets.  (If !oldIterator.)
   370	
   371			// xy contains the x and y (low and high) evacuation destinations.
   372			var xy [2]evacDst
   373			x := &xy[0]
   374			x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
   375			x.k = add(unsafe.Pointer(x.b), dataOffset)
   376			x.e = add(x.k, bucketCnt*8)
   377	
   378			if !h.sameSizeGrow() {
   379				// Only calculate y pointers if we're growing bigger.
   380				// Otherwise GC can see bad pointers.
   381				y := &xy[1]
   382				y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
   383				y.k = add(unsafe.Pointer(y.b), dataOffset)
   384				y.e = add(y.k, bucketCnt*8)
   385			}
   386	
   387			for ; b != nil; b = b.overflow(t) {
   388				k := add(unsafe.Pointer(b), dataOffset)
   389				e := add(k, bucketCnt*8)
   390				for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 8), add(e, uintptr(t.elemsize)) {
   391					top := b.tophash[i]
   392					if isEmpty(top) {
   393						b.tophash[i] = evacuatedEmpty
   394						continue
   395					}
   396					if top < minTopHash {
   397						throw("bad map state")
   398					}
   399					var useY uint8
   400					if !h.sameSizeGrow() {
   401						// Compute hash to make our evacuation decision (whether we need
   402						// to send this key/elem to bucket x or bucket y).
   403						hash := t.key.alg.hash(k, uintptr(h.hash0))
   404						if hash&newbit != 0 {
   405							useY = 1
   406						}
   407					}
   408	
   409					b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
   410					dst := &xy[useY]                 // evacuation destination
   411	
   412					if dst.i == bucketCnt {
   413						dst.b = h.newoverflow(t, dst.b)
   414						dst.i = 0
   415						dst.k = add(unsafe.Pointer(dst.b), dataOffset)
   416						dst.e = add(dst.k, bucketCnt*8)
   417					}
   418					dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
   419	
   420					// Copy key.
   421					if t.key.ptrdata != 0 && writeBarrier.enabled {
   422						if sys.PtrSize == 8 {
   423							// Write with a write barrier.
   424							*(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
   425						} else {
   426							// There are three ways to squeeze at least one 32 bit pointer into 64 bits.
   427							// Give up and call typedmemmove.
   428							typedmemmove(t.key, dst.k, k)
   429						}
   430					} else {
   431						*(*uint64)(dst.k) = *(*uint64)(k)
   432					}
   433	
   434					typedmemmove(t.elem, dst.e, e)
   435					dst.i++
   436					// These updates might push these pointers past the end of the
   437					// key or elem arrays.  That's ok, as we have the overflow pointer
   438					// at the end of the bucket to protect against pointing past the
   439					// end of the bucket.
   440					dst.k = add(dst.k, 8)
   441					dst.e = add(dst.e, uintptr(t.elemsize))
   442				}
   443			}
   444			// Unlink the overflow buckets & clear key/elem to help GC.
   445			if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
   446				b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
   447				// Preserve b.tophash because the evacuation
   448				// state is maintained there.
   449				ptr := add(b, dataOffset)
   450				n := uintptr(t.bucketsize) - dataOffset
   451				memclrHasPointers(ptr, n)
   452			}
   453		}
   454	
   455		if oldbucket == h.nevacuate {
   456			advanceEvacuationMark(h, t, newbit)
   457		}
   458	}
   459	

View as plain text