Copyright 2018 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.

package runtime

import (
	
	
)

func ( *maptype,  *hmap,  uint64) unsafe.Pointer {
	if raceenabled &&  != nil {
		 := getcallerpc()
		racereadpc(unsafe.Pointer(), , funcPC())
	}
	if  == nil || .count == 0 {
		return unsafe.Pointer(&zeroVal[0])
	}
	if .flags&hashWriting != 0 {
		throw("concurrent map read and map write")
	}
	var  *bmap
One-bucket table. No need to hash.
		 = (*bmap)(.buckets)
	} else {
		 := .hasher(noescape(unsafe.Pointer(&)), uintptr(.hash0))
		 := bucketMask(.B)
		 = (*bmap)(add(.buckets, (&)*uintptr(.bucketsize)))
		if  := .oldbuckets;  != nil {
There used to be half as many buckets; mask down one more power of two.
				 >>= 1
			}
			 := (*bmap)(add(, (&)*uintptr(.bucketsize)))
			if !evacuated() {
				 = 
			}
		}
	}
	for ;  != nil;  = .overflow() {
		for ,  := uintptr(0), .keys();  < bucketCnt; ,  = +1, add(, 8) {
			if *(*uint64)() ==  && !isEmpty(.tophash[]) {
				return add(unsafe.Pointer(), dataOffset+bucketCnt*8+*uintptr(.elemsize))
			}
		}
	}
	return unsafe.Pointer(&zeroVal[0])
}

func ( *maptype,  *hmap,  uint64) (unsafe.Pointer, bool) {
	if raceenabled &&  != nil {
		 := getcallerpc()
		racereadpc(unsafe.Pointer(), , funcPC())
	}
	if  == nil || .count == 0 {
		return unsafe.Pointer(&zeroVal[0]), false
	}
	if .flags&hashWriting != 0 {
		throw("concurrent map read and map write")
	}
	var  *bmap
One-bucket table. No need to hash.
		 = (*bmap)(.buckets)
	} else {
		 := .hasher(noescape(unsafe.Pointer(&)), uintptr(.hash0))
		 := bucketMask(.B)
		 = (*bmap)(add(.buckets, (&)*uintptr(.bucketsize)))
		if  := .oldbuckets;  != nil {
There used to be half as many buckets; mask down one more power of two.
				 >>= 1
			}
			 := (*bmap)(add(, (&)*uintptr(.bucketsize)))
			if !evacuated() {
				 = 
			}
		}
	}
	for ;  != nil;  = .overflow() {
		for ,  := uintptr(0), .keys();  < bucketCnt; ,  = +1, add(, 8) {
			if *(*uint64)() ==  && !isEmpty(.tophash[]) {
				return add(unsafe.Pointer(), dataOffset+bucketCnt*8+*uintptr(.elemsize)), true
			}
		}
	}
	return unsafe.Pointer(&zeroVal[0]), false
}

func ( *maptype,  *hmap,  uint64) unsafe.Pointer {
	if  == nil {
		panic(plainError("assignment to entry in nil map"))
	}
	if raceenabled {
		 := getcallerpc()
		racewritepc(unsafe.Pointer(), , funcPC())
	}
	if .flags&hashWriting != 0 {
		throw("concurrent map writes")
	}
	 := .hasher(noescape(unsafe.Pointer(&)), uintptr(.hash0))
Set hashWriting after calling t.hasher for consistency with mapassign.
	.flags ^= hashWriting

	if .buckets == nil {
		.buckets = newobject(.bucket) // newarray(t.bucket, 1)
	}

:
	 :=  & bucketMask(.B)
	if .growing() {
		growWork_fast64(, , )
	}
	 := (*bmap)(add(.buckets, *uintptr(.bucketsize)))

	var  *bmap
	var  uintptr
	var  unsafe.Pointer

:
	for {
		for  := uintptr(0);  < bucketCnt; ++ {
			if isEmpty(.tophash[]) {
				if  == nil {
					 = 
					 = 
				}
				if .tophash[] == emptyRest {
					break 
				}
				continue
			}
			 := *((*uint64)(add(unsafe.Pointer(), dataOffset+*8)))
			if  !=  {
				continue
			}
			 = 
			 = 
			goto 
		}
		 := .overflow()
		if  == nil {
			break
		}
		 = 
	}
Did not find mapping for key. Allocate new cell & add entry.
If we hit the max load factor or we have too many overflow buckets, and we're not already in the middle of growing, start growing.
	if !.growing() && (overLoadFactor(.count+1, .B) || tooManyOverflowBuckets(.noverflow, .B)) {
		hashGrow(, )
		goto  // Growing the table invalidates everything, so try again
	}

The current bucket and all the overflow buckets connected to it are full, allocate a new one.
		 = .newoverflow(, )
		 = 0 // not necessary, but avoids needlessly spilling inserti
	}
	.tophash[&(bucketCnt-1)] = tophash() // mask inserti to avoid bounds checks

store new key at insert position
	*(*uint64)() = 

	.count++

:
	 := add(unsafe.Pointer(), dataOffset+bucketCnt*8+*uintptr(.elemsize))
	if .flags&hashWriting == 0 {
		throw("concurrent map writes")
	}
	.flags &^= hashWriting
	return 
}

func ( *maptype,  *hmap,  unsafe.Pointer) unsafe.Pointer {
	if  == nil {
		panic(plainError("assignment to entry in nil map"))
	}
	if raceenabled {
		 := getcallerpc()
		racewritepc(unsafe.Pointer(), , funcPC(mapassign_fast64))
	}
	if .flags&hashWriting != 0 {
		throw("concurrent map writes")
	}
	 := .hasher(noescape(unsafe.Pointer(&)), uintptr(.hash0))
Set hashWriting after calling t.hasher for consistency with mapassign.
	.flags ^= hashWriting

	if .buckets == nil {
		.buckets = newobject(.bucket) // newarray(t.bucket, 1)
	}

:
	 :=  & bucketMask(.B)
	if .growing() {
		growWork_fast64(, , )
	}
	 := (*bmap)(add(.buckets, *uintptr(.bucketsize)))

	var  *bmap
	var  uintptr
	var  unsafe.Pointer

:
	for {
		for  := uintptr(0);  < bucketCnt; ++ {
			if isEmpty(.tophash[]) {
				if  == nil {
					 = 
					 = 
				}
				if .tophash[] == emptyRest {
					break 
				}
				continue
			}
			 := *((*unsafe.Pointer)(add(unsafe.Pointer(), dataOffset+*8)))
			if  !=  {
				continue
			}
			 = 
			 = 
			goto 
		}
		 := .overflow()
		if  == nil {
			break
		}
		 = 
	}
Did not find mapping for key. Allocate new cell & add entry.
If we hit the max load factor or we have too many overflow buckets, and we're not already in the middle of growing, start growing.
	if !.growing() && (overLoadFactor(.count+1, .B) || tooManyOverflowBuckets(.noverflow, .B)) {
		hashGrow(, )
		goto  // Growing the table invalidates everything, so try again
	}

The current bucket and all the overflow buckets connected to it are full, allocate a new one.
		 = .newoverflow(, )
		 = 0 // not necessary, but avoids needlessly spilling inserti
	}
	.tophash[&(bucketCnt-1)] = tophash() // mask inserti to avoid bounds checks

store new key at insert position
	*(*unsafe.Pointer)() = 

	.count++

:
	 := add(unsafe.Pointer(), dataOffset+bucketCnt*8+*uintptr(.elemsize))
	if .flags&hashWriting == 0 {
		throw("concurrent map writes")
	}
	.flags &^= hashWriting
	return 
}

func ( *maptype,  *hmap,  uint64) {
	if raceenabled &&  != nil {
		 := getcallerpc()
		racewritepc(unsafe.Pointer(), , funcPC())
	}
	if  == nil || .count == 0 {
		return
	}
	if .flags&hashWriting != 0 {
		throw("concurrent map writes")
	}

	 := .hasher(noescape(unsafe.Pointer(&)), uintptr(.hash0))
Set hashWriting after calling t.hasher for consistency with mapdelete
	.flags ^= hashWriting

	 :=  & bucketMask(.B)
	if .growing() {
		growWork_fast64(, , )
	}
	 := (*bmap)(add(.buckets, *uintptr(.bucketsize)))
	 := 
:
	for ;  != nil;  = .overflow() {
		for ,  := uintptr(0), .keys();  < bucketCnt; ,  = +1, add(, 8) {
			if  != *(*uint64)() || isEmpty(.tophash[]) {
				continue
Only clear key if there are pointers in it.
			if .key.ptrdata != 0 {
				if sys.PtrSize == 8 {
					*(*unsafe.Pointer)() = nil
There are three ways to squeeze at one ore more 32 bit pointers into 64 bits. Just call memclrHasPointers instead of trying to handle all cases here.
If the bucket now ends in a bunch of emptyOne states, change those to emptyRest states.
			if  == bucketCnt-1 {
				if .overflow() != nil && .overflow().tophash[0] != emptyRest {
					goto 
				}
			} else {
				if .tophash[+1] != emptyRest {
					goto 
				}
			}
			for {
				.tophash[] = emptyRest
				if  == 0 {
					if  ==  {
						break // beginning of initial bucket, we're done.
Find previous bucket, continue at its last entry.
					 := 
					for  = ; .overflow() != ;  = .overflow() {
					}
					 = bucketCnt - 1
				} else {
					--
				}
				if .tophash[] != emptyOne {
					break
				}
			}
		:
Reset the hash seed to make it more difficult for attackers to repeatedly trigger hash collisions. See issue 25237.
			if .count == 0 {
				.hash0 = fastrand()
			}
			break 
		}
	}

	if .flags&hashWriting == 0 {
		throw("concurrent map writes")
	}
	.flags &^= hashWriting
}

make sure we evacuate the oldbucket corresponding to the bucket we're about to use
	evacuate_fast64(, , &.oldbucketmask())
evacuate one more oldbucket to make progress on growing
	if .growing() {
		evacuate_fast64(, , .nevacuate)
	}
}

func ( *maptype,  *hmap,  uintptr) {
	 := (*bmap)(add(.oldbuckets, *uintptr(.bucketsize)))
	 := .noldbuckets()
TODO: reuse overflow buckets instead of using new ones, if there is no iterator using the old buckets. (If !oldIterator.)
xy contains the x and y (low and high) evacuation destinations.
		var  [2]evacDst
		 := &[0]
		.b = (*bmap)(add(.buckets, *uintptr(.bucketsize)))
		.k = add(unsafe.Pointer(.b), dataOffset)
		.e = add(.k, bucketCnt*8)

Only calculate y pointers if we're growing bigger. Otherwise GC can see bad pointers.
			 := &[1]
			.b = (*bmap)(add(.buckets, (+)*uintptr(.bucketsize)))
			.k = add(unsafe.Pointer(.b), dataOffset)
			.e = add(.k, bucketCnt*8)
		}

		for ;  != nil;  = .overflow() {
			 := add(unsafe.Pointer(), dataOffset)
			 := add(, bucketCnt*8)
			for  := 0;  < bucketCnt; , ,  = +1, add(, 8), add(, uintptr(.elemsize)) {
				 := .tophash[]
				if isEmpty() {
					.tophash[] = evacuatedEmpty
					continue
				}
				if  < minTopHash {
					throw("bad map state")
				}
				var  uint8
Compute hash to make our evacuation decision (whether we need to send this key/elem to bucket x or bucket y).
					 := .hasher(, uintptr(.hash0))
					if & != 0 {
						 = 1
					}
				}

				.tophash[] = evacuatedX +  // evacuatedX + 1 == evacuatedY, enforced in makemap
				 := &[]                 // evacuation destination

				if .i == bucketCnt {
					.b = .newoverflow(, .b)
					.i = 0
					.k = add(unsafe.Pointer(.b), dataOffset)
					.e = add(.k, bucketCnt*8)
				}
				.b.tophash[.i&(bucketCnt-1)] =  // mask dst.i as an optimization, to avoid a bounds check
Copy key.
				if .key.ptrdata != 0 && writeBarrier.enabled {
Write with a write barrier.
						*(*unsafe.Pointer)(.k) = *(*unsafe.Pointer)()
There are three ways to squeeze at least one 32 bit pointer into 64 bits. Give up and call typedmemmove.
						typedmemmove(.key, .k, )
					}
				} else {
					*(*uint64)(.k) = *(*uint64)()
				}

				typedmemmove(.elem, .e, )
These updates might push these pointers past the end of the key or elem arrays. That's ok, as we have the overflow pointer at the end of the bucket to protect against pointing past the end of the bucket.
				.k = add(.k, 8)
				.e = add(.e, uintptr(.elemsize))
			}
Unlink the overflow buckets & clear key/elem to help GC.
		if .flags&oldIterator == 0 && .bucket.ptrdata != 0 {
Preserve b.tophash because the evacuation state is maintained there.
			 := add(, dataOffset)
			 := uintptr(.bucketsize) - dataOffset
			memclrHasPointers(, )
		}
	}

	if  == .nevacuate {
		advanceEvacuationMark(, , )
	}