Source File
map.go
Belonging Package
runtime
package runtime
import (
)
bucketCntBits = 3
bucketCnt = 1 << bucketCntBits
loadFactorNum = 13
loadFactorDen = 2
maxKeySize = 128
maxElemSize = 128
emptyRest = 0 // this cell is empty, and there are no more non-empty cells at higher indexes or overflows.
emptyOne = 1 // this cell is empty
evacuatedX = 2 // key/elem is valid. Entry has been evacuated to first half of larger table.
evacuatedY = 3 // same as above, but evacuated to second half of larger table.
evacuatedEmpty = 4 // cell is empty, bucket is evacuated.
minTopHash = 5 // minimum tophash for a normal filled cell.
iterator = 1 // there may be an iterator using buckets
oldIterator = 2 // there may be an iterator using oldbuckets
hashWriting = 4 // a goroutine is writing to the map
sameSizeGrow = 8 // the current map growth is to a new map of the same size
count int // # live cells == size of map. Must be first (used by len() builtin)
flags uint8
B uint8 // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
noverflow uint16 // approximate number of overflow buckets; see incrnoverflow for details
hash0 uint32 // hash seed
buckets unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
nevacuate uintptr // progress counter for evacuation (buckets less than this have been evacuated)
extra *mapextra // optional fields
}
overflow *[]*bmap
oldoverflow *[]*bmap
nextOverflow *bmap
}
}
type hiter struct {
key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/compile/internal/gc/range.go).
elem unsafe.Pointer // Must be in second position (see cmd/compile/internal/gc/range.go).
t *maptype
h *hmap
buckets unsafe.Pointer // bucket ptr at hash_iter initialization time
bptr *bmap // current bucket
overflow *[]*bmap // keeps overflow buckets of hmap.buckets alive
oldoverflow *[]*bmap // keeps overflow buckets of hmap.oldbuckets alive
startBucket uintptr // bucket iteration started at
offset uint8 // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
wrapped bool // already wrapped around from end of bucket array to beginning
B uint8
i uint8
bucket uintptr
checkBucket uintptr
}
func ( uint8) uintptr {
return bucketShift() - 1
}
func ( uintptr) uint8 {
:= uint8( >> (sys.PtrSize*8 - 8))
if < minTopHash {
+= minTopHash
}
return
}
func ( *bmap) bool {
:= .tophash[0]
return > emptyOne && < minTopHash
}
func ( *bmap) ( *maptype) *bmap {
return *(**bmap)(add(unsafe.Pointer(), uintptr(.bucketsize)-sys.PtrSize))
}
func ( *bmap) ( *maptype, *bmap) {
*(**bmap)(add(unsafe.Pointer(), uintptr(.bucketsize)-sys.PtrSize)) =
}
func ( *bmap) () unsafe.Pointer {
return add(unsafe.Pointer(), dataOffset)
}
.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(), uintptr(.bucketsize)))
.setoverflow(, nil)
.extra.nextOverflow = nil
}
} else {
= (*bmap)(newobject(.bucket))
}
.incrnoverflow()
if .bucket.ptrdata == 0 {
.createOverflow()
*.extra.overflow = append(*.extra.overflow, )
}
.setoverflow(, )
return
}
func ( *hmap) () {
if .extra == nil {
.extra = new(mapextra)
}
if .extra.overflow == nil {
.extra.overflow = new([]*bmap)
}
}
func ( *maptype, int64, *hmap) *hmap {
if int64(int()) != {
= 0
}
return makemap(, int(), )
}
:= uint8(0)
for overLoadFactor(, ) {
++
}
.B =
+= bucketShift( - 4)
:= .bucket.size *
:= roundupsize()
if != {
= / .bucket.size
}
}
if == nil {
= newarray(.bucket, int())
=
:= .bucket.size *
if .bucket.ptrdata != 0 {
memclrHasPointers(, )
} else {
memclrNoHeapPointers(, )
}
}
= (*bmap)(add(, *uintptr(.bucketsize)))
:= (*bmap)(add(, (-1)*uintptr(.bucketsize)))
.setoverflow(, (*bmap)())
}
return ,
}
func ( *maptype, *hmap, unsafe.Pointer) unsafe.Pointer {
if raceenabled && != nil {
:= getcallerpc()
:= funcPC()
racereadpc(unsafe.Pointer(), , )
raceReadObjectPC(.key, , , )
}
if msanenabled && != nil {
msanread(, .key.size)
}
if == nil || .count == 0 {
if .hashMightPanic() {
.hasher(, 0) // see issue 23734
}
return unsafe.Pointer(&zeroVal[0])
}
if .flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
:= .hasher(, uintptr(.hash0))
:= bucketMask(.B)
:= (*bmap)(add(.buckets, (&)*uintptr(.bucketsize)))
if := .oldbuckets; != nil {
>>= 1
}
:= (*bmap)(add(, (&)*uintptr(.bucketsize)))
if !evacuated() {
=
}
}
:= tophash()
:
for ; != nil; = .overflow() {
for := uintptr(0); < bucketCnt; ++ {
if .tophash[] != {
if .tophash[] == emptyRest {
break
}
continue
}
:= add(unsafe.Pointer(), dataOffset+*uintptr(.keysize))
if .indirectkey() {
= *((*unsafe.Pointer)())
}
if .key.equal(, ) {
:= add(unsafe.Pointer(), dataOffset+bucketCnt*uintptr(.keysize)+*uintptr(.elemsize))
if .indirectelem() {
= *((*unsafe.Pointer)())
}
return
}
}
}
return unsafe.Pointer(&zeroVal[0])
}
func ( *maptype, *hmap, unsafe.Pointer) (unsafe.Pointer, bool) {
if raceenabled && != nil {
:= getcallerpc()
:= funcPC()
racereadpc(unsafe.Pointer(), , )
raceReadObjectPC(.key, , , )
}
if msanenabled && != nil {
msanread(, .key.size)
}
if == nil || .count == 0 {
if .hashMightPanic() {
.hasher(, 0) // see issue 23734
}
return unsafe.Pointer(&zeroVal[0]), false
}
if .flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
:= .hasher(, uintptr(.hash0))
:= bucketMask(.B)
:= (*bmap)(unsafe.Pointer(uintptr(.buckets) + (&)*uintptr(.bucketsize)))
if := .oldbuckets; != nil {
>>= 1
}
:= (*bmap)(unsafe.Pointer(uintptr() + (&)*uintptr(.bucketsize)))
if !evacuated() {
=
}
}
:= tophash()
:
for ; != nil; = .overflow() {
for := uintptr(0); < bucketCnt; ++ {
if .tophash[] != {
if .tophash[] == emptyRest {
break
}
continue
}
:= add(unsafe.Pointer(), dataOffset+*uintptr(.keysize))
if .indirectkey() {
= *((*unsafe.Pointer)())
}
if .key.equal(, ) {
:= add(unsafe.Pointer(), dataOffset+bucketCnt*uintptr(.keysize)+*uintptr(.elemsize))
if .indirectelem() {
= *((*unsafe.Pointer)())
}
return , true
}
}
}
return unsafe.Pointer(&zeroVal[0]), false
}
>>= 1
}
:= (*bmap)(unsafe.Pointer(uintptr() + (&)*uintptr(.bucketsize)))
if !evacuated() {
=
}
}
:= tophash()
:
for ; != nil; = .overflow() {
for := uintptr(0); < bucketCnt; ++ {
if .tophash[] != {
if .tophash[] == emptyRest {
break
}
continue
}
:= add(unsafe.Pointer(), dataOffset+*uintptr(.keysize))
if .indirectkey() {
= *((*unsafe.Pointer)())
}
if .key.equal(, ) {
:= add(unsafe.Pointer(), dataOffset+bucketCnt*uintptr(.keysize)+*uintptr(.elemsize))
if .indirectelem() {
= *((*unsafe.Pointer)())
}
return ,
}
}
}
return nil, nil
}
func ( *maptype, *hmap, , unsafe.Pointer) unsafe.Pointer {
:= mapaccess1(, , )
if == unsafe.Pointer(&zeroVal[0]) {
return
}
return
}
func ( *maptype, *hmap, , unsafe.Pointer) (unsafe.Pointer, bool) {
:= mapaccess1(, , )
if == unsafe.Pointer(&zeroVal[0]) {
return , false
}
return , true
}
func ( *maptype, *hmap, unsafe.Pointer) unsafe.Pointer {
if == nil {
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
:= getcallerpc()
:= funcPC()
racewritepc(unsafe.Pointer(), , )
raceReadObjectPC(.key, , , )
}
if msanenabled {
msanread(, .key.size)
}
if .flags&hashWriting != 0 {
throw("concurrent map writes")
}
:= .hasher(, uintptr(.hash0))
.flags ^= hashWriting
if .buckets == nil {
.buckets = newobject(.bucket) // newarray(t.bucket, 1)
}
:
:= & bucketMask(.B)
if .growing() {
growWork(, , )
}
:= (*bmap)(add(.buckets, *uintptr(.bucketsize)))
:= tophash()
var *uint8
var unsafe.Pointer
var unsafe.Pointer
:
for {
for := uintptr(0); < bucketCnt; ++ {
if .tophash[] != {
if isEmpty(.tophash[]) && == nil {
= &.tophash[]
= add(unsafe.Pointer(), dataOffset+*uintptr(.keysize))
= add(unsafe.Pointer(), dataOffset+bucketCnt*uintptr(.keysize)+*uintptr(.elemsize))
}
if .tophash[] == emptyRest {
break
}
continue
}
:= add(unsafe.Pointer(), dataOffset+*uintptr(.keysize))
if .indirectkey() {
= *((*unsafe.Pointer)())
}
if !.key.equal(, ) {
continue
if .needkeyupdate() {
typedmemmove(.key, , )
}
= add(unsafe.Pointer(), dataOffset+bucketCnt*uintptr(.keysize)+*uintptr(.elemsize))
goto
}
:= .overflow()
if == nil {
break
}
=
}
if !.growing() && (overLoadFactor(.count+1, .B) || tooManyOverflowBuckets(.noverflow, .B)) {
hashGrow(, )
goto // Growing the table invalidates everything, so try again
}
:= .newoverflow(, )
= &.tophash[0]
= add(unsafe.Pointer(), dataOffset)
= add(, bucketCnt*uintptr(.keysize))
}
if .indirectkey() {
:= newobject(.key)
*(*unsafe.Pointer)() =
=
}
if .indirectelem() {
:= newobject(.elem)
*(*unsafe.Pointer)() =
}
typedmemmove(.key, , )
* =
.count++
:
if .flags&hashWriting == 0 {
throw("concurrent map writes")
}
.flags &^= hashWriting
if .indirectelem() {
= *((*unsafe.Pointer)())
}
return
}
func ( *maptype, *hmap, unsafe.Pointer) {
if raceenabled && != nil {
:= getcallerpc()
:= funcPC()
racewritepc(unsafe.Pointer(), , )
raceReadObjectPC(.key, , , )
}
if msanenabled && != nil {
msanread(, .key.size)
}
if == nil || .count == 0 {
if .hashMightPanic() {
.hasher(, 0) // see issue 23734
}
return
}
if .flags&hashWriting != 0 {
throw("concurrent map writes")
}
:= .hasher(, uintptr(.hash0))
.flags ^= hashWriting
:= & bucketMask(.B)
if .growing() {
growWork(, , )
}
:= (*bmap)(add(.buckets, *uintptr(.bucketsize)))
:=
:= tophash()
:
for ; != nil; = .overflow() {
for := uintptr(0); < bucketCnt; ++ {
if .tophash[] != {
if .tophash[] == emptyRest {
break
}
continue
}
:= add(unsafe.Pointer(), dataOffset+*uintptr(.keysize))
:=
if .indirectkey() {
= *((*unsafe.Pointer)())
}
if !.key.equal(, ) {
continue
if .indirectkey() {
*(*unsafe.Pointer)() = nil
} else if .key.ptrdata != 0 {
memclrHasPointers(, .key.size)
}
:= add(unsafe.Pointer(), dataOffset+bucketCnt*uintptr(.keysize)+*uintptr(.elemsize))
if .indirectelem() {
*(*unsafe.Pointer)() = nil
} else if .elem.ptrdata != 0 {
memclrHasPointers(, .elem.size)
} else {
memclrNoHeapPointers(, .elem.size)
}
if .count == 0 {
.hash0 = fastrand()
}
break
}
}
if .flags&hashWriting == 0 {
throw("concurrent map writes")
}
.flags &^= hashWriting
}
.createOverflow()
.overflow = .extra.overflow
.oldoverflow = .extra.oldoverflow
}
:= uintptr(fastrand())
if .B > 31-bucketCntBits {
+= uintptr(fastrand()) << 31
}
.startBucket = & bucketMask(.B)
.offset = uint8( >> .B & (bucketCnt - 1))
.bucket = .startBucket
if := .flags; &(iterator|oldIterator) != iterator|oldIterator {
atomic.Or8(&.flags, iterator|oldIterator)
}
mapiternext()
}
func ( *hiter) {
:= .h
if raceenabled {
:= getcallerpc()
racereadpc(unsafe.Pointer(), , funcPC())
}
if .flags&hashWriting != 0 {
throw("concurrent map iteration and map write")
}
:= .t
:= .bucket
:= .bptr
:= .i
:= .checkBucket
:
if == nil {
:= & .h.oldbucketmask()
= (*bmap)(add(.oldbuckets, *uintptr(.bucketsize)))
if !evacuated() {
=
} else {
= (*bmap)(add(.buckets, *uintptr(.bucketsize)))
= noCheck
}
} else {
= (*bmap)(add(.buckets, *uintptr(.bucketsize)))
= noCheck
}
++
if == bucketShift(.B) {
= 0
.wrapped = true
}
= 0
}
for ; < bucketCnt; ++ {
:= ( + .offset) & (bucketCnt - 1)
:= .hasher(, uintptr(.hash0))
if &bucketMask(.B) != {
continue
}
if >>(.B-1) != uintptr(.tophash[]&1) {
continue
}
}
}
if (.tophash[] != evacuatedX && .tophash[] != evacuatedY) ||
.key =
if .indirectelem() {
= *((*unsafe.Pointer)())
}
.elem =
, := mapaccessK(, , )
if == nil {
continue // key has been deleted
}
.key =
.elem =
}
.bucket =
if .bptr != { // avoid unnecessary write barrier; see issue 14921
.bptr =
}
.i = + 1
.checkBucket =
return
}
= .overflow()
= 0
goto
}
func ( *maptype, *hmap) {
if raceenabled && != nil {
:= getcallerpc()
:= funcPC()
racewritepc(unsafe.Pointer(), , )
}
if == nil || .count == 0 {
return
}
if .flags&hashWriting != 0 {
throw("concurrent map writes")
}
.flags ^= hashWriting
.flags &^= sameSizeGrow
.oldbuckets = nil
.nevacuate = 0
.noverflow = 0
.count = 0
, := makeBucketArray(, .B, .buckets)
.extra.nextOverflow =
}
if .flags&hashWriting == 0 {
throw("concurrent map writes")
}
.flags &^= hashWriting
}
:= uint8(1)
if !overLoadFactor(.count+1, .B) {
= 0
.flags |= sameSizeGrow
}
:= .buckets
, := makeBucketArray(, .B+, nil)
:= .flags &^ (iterator | oldIterator)
if .flags&iterator != 0 {
|= oldIterator
}
func ( int, uint8) bool {
return > bucketCnt && uintptr() > loadFactorNum*(bucketShift()/loadFactorDen)
}
if > 15 {
= 15
return >= uint16(1)<<(&15)
}
func ( *hmap) () bool {
return .oldbuckets != nil
}
func ( *hmap) () bool {
return .flags&sameSizeGrow != 0
}
func ( *hmap) () uintptr {
:= .B
if !.sameSizeGrow() {
--
}
return bucketShift()
}
func ( *hmap) () uintptr {
return .noldbuckets() - 1
}
evacuate(, , &.oldbucketmask())
type evacDst struct {
b *bmap // current destination bucket
i int // key/elem index into b
k unsafe.Pointer // pointer to current key storage
e unsafe.Pointer // pointer to current elem storage
}
func ( *maptype, *hmap, uintptr) {
:= (*bmap)(add(.oldbuckets, *uintptr(.bucketsize)))
:= .noldbuckets()
:= &[1]
.b = (*bmap)(add(.buckets, (+)*uintptr(.bucketsize)))
.k = add(unsafe.Pointer(.b), dataOffset)
.e = add(.k, bucketCnt*uintptr(.keysize))
}
for ; != nil; = .overflow() {
:= add(unsafe.Pointer(), dataOffset)
:= add(, bucketCnt*uintptr(.keysize))
for := 0; < bucketCnt; , , = +1, add(, uintptr(.keysize)), add(, uintptr(.elemsize)) {
:= .tophash[]
if isEmpty() {
.tophash[] = evacuatedEmpty
continue
}
if < minTopHash {
throw("bad map state")
}
:=
if .indirectkey() {
= *((*unsafe.Pointer)())
}
var uint8
= & 1
= tophash()
} else {
if & != 0 {
= 1
}
}
}
if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY {
throw("bad evacuatedN")
}
.tophash[] = evacuatedX + // evacuatedX + 1 == evacuatedY
:= &[] // evacuation destination
if .i == bucketCnt {
.b = .newoverflow(, .b)
.i = 0
.k = add(unsafe.Pointer(.b), dataOffset)
.e = add(.k, bucketCnt*uintptr(.keysize))
}
.b.tophash[.i&(bucketCnt-1)] = // mask dst.i as an optimization, to avoid a bounds check
if .indirectkey() {
*(*unsafe.Pointer)(.k) = // copy pointer
} else {
typedmemmove(.key, .k, ) // copy elem
}
if .indirectelem() {
*(*unsafe.Pointer)(.e) = *(*unsafe.Pointer)()
} else {
typedmemmove(.elem, .e, )
}
if .flags&oldIterator == 0 && .bucket.ptrdata != 0 {
:= add(, dataOffset)
:= uintptr(.bucketsize) - dataOffset
memclrHasPointers(, )
}
}
if == .nevacuate {
advanceEvacuationMark(, , )
}
}
func ( *hmap, *maptype, uintptr) {
:= .nevacuate + 1024
if > {
=
}
for .nevacuate != && bucketEvacuated(, , .nevacuate) {
.nevacuate++
}
if .extra != nil {
.extra.oldoverflow = nil
}
.flags &^= sameSizeGrow
}
}
if .key.equal == nil {
throw("runtime.reflect_makemap: unsupported map key type")
}
if .key.size > maxKeySize && (!.indirectkey() || .keysize != uint8(sys.PtrSize)) ||
.key.size <= maxKeySize && (.indirectkey() || .keysize != uint8(.key.size)) {
throw("key size wrong")
}
if .elem.size > maxElemSize && (!.indirectelem() || .elemsize != uint8(sys.PtrSize)) ||
.elem.size <= maxElemSize && (.indirectelem() || .elemsize != uint8(.elem.size)) {
throw("elem size wrong")
}
if .key.align > bucketCnt {
throw("key align too big")
}
if .elem.align > bucketCnt {
throw("elem align too big")
}
if .key.size%uintptr(.key.align) != 0 {
throw("key size not a multiple of key align")
}
if .elem.size%uintptr(.elem.align) != 0 {
throw("elem size not a multiple of elem align")
}
if bucketCnt < 8 {
throw("bucketsize too small for proper alignment")
}
if dataOffset%uintptr(.key.align) != 0 {
throw("need padding in bucket (key)")
}
if dataOffset%uintptr(.elem.align) != 0 {
throw("need padding in bucket (elem)")
}
return makemap(, , nil)
}
= nil
}
return
}
func ( *hiter) {
mapiternext()
}
func ( *hmap) int {
if == nil {
return 0
}
if raceenabled {
:= getcallerpc()
racereadpc(unsafe.Pointer(), , funcPC())
}
return .count
}
func ( *hmap) int {
if == nil {
return 0
}
if raceenabled {
:= getcallerpc()
racereadpc(unsafe.Pointer(), , funcPC(reflect_maplen))
}
return .count
}
const maxZero = 1024 // must match value in reflect/value.go:maxZero cmd/compile/internal/gc/walk.go:zeroValSize
![]() |
The pages are generated with Golds v0.3.2-preview. (GOOS=darwin GOARCH=amd64) Golds is a Go 101 project developed by Tapir Liu. PR and bug reports are welcome and can be submitted to the issue list. Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds. |