Source File
map_fast64.go
Belonging Package
runtime
package runtime
import (
)
func ( *maptype, *hmap, uint64) unsafe.Pointer {
if raceenabled && != nil {
:= getcallerpc()
racereadpc(unsafe.Pointer(), , funcPC())
}
if == nil || .count == 0 {
return unsafe.Pointer(&zeroVal[0])
}
if .flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
var *bmap
>>= 1
}
:= (*bmap)(add(, (&)*uintptr(.bucketsize)))
if !evacuated() {
=
}
}
}
for ; != nil; = .overflow() {
for , := uintptr(0), .keys(); < bucketCnt; , = +1, add(, 8) {
if *(*uint64)() == && !isEmpty(.tophash[]) {
return add(unsafe.Pointer(), dataOffset+bucketCnt*8+*uintptr(.elemsize))
}
}
}
return unsafe.Pointer(&zeroVal[0])
}
func ( *maptype, *hmap, uint64) (unsafe.Pointer, bool) {
if raceenabled && != nil {
:= getcallerpc()
racereadpc(unsafe.Pointer(), , funcPC())
}
if == nil || .count == 0 {
return unsafe.Pointer(&zeroVal[0]), false
}
if .flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
var *bmap
>>= 1
}
:= (*bmap)(add(, (&)*uintptr(.bucketsize)))
if !evacuated() {
=
}
}
}
for ; != nil; = .overflow() {
for , := uintptr(0), .keys(); < bucketCnt; , = +1, add(, 8) {
if *(*uint64)() == && !isEmpty(.tophash[]) {
return add(unsafe.Pointer(), dataOffset+bucketCnt*8+*uintptr(.elemsize)), true
}
}
}
return unsafe.Pointer(&zeroVal[0]), false
}
func ( *maptype, *hmap, uint64) unsafe.Pointer {
if == nil {
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
:= getcallerpc()
racewritepc(unsafe.Pointer(), , funcPC())
}
if .flags&hashWriting != 0 {
throw("concurrent map writes")
}
:= .hasher(noescape(unsafe.Pointer(&)), uintptr(.hash0))
.flags ^= hashWriting
if .buckets == nil {
.buckets = newobject(.bucket) // newarray(t.bucket, 1)
}
:
:= & bucketMask(.B)
if .growing() {
growWork_fast64(, , )
}
:= (*bmap)(add(.buckets, *uintptr(.bucketsize)))
var *bmap
var uintptr
var unsafe.Pointer
:
for {
for := uintptr(0); < bucketCnt; ++ {
if isEmpty(.tophash[]) {
if == nil {
=
=
}
if .tophash[] == emptyRest {
break
}
continue
}
:= *((*uint64)(add(unsafe.Pointer(), dataOffset+*8)))
if != {
continue
}
=
=
goto
}
:= .overflow()
if == nil {
break
}
=
}
if !.growing() && (overLoadFactor(.count+1, .B) || tooManyOverflowBuckets(.noverflow, .B)) {
hashGrow(, )
goto // Growing the table invalidates everything, so try again
}
= .newoverflow(, )
= 0 // not necessary, but avoids needlessly spilling inserti
}
.tophash[&(bucketCnt-1)] = tophash() // mask inserti to avoid bounds checks
*(*uint64)() =
.count++
:
:= add(unsafe.Pointer(), dataOffset+bucketCnt*8+*uintptr(.elemsize))
if .flags&hashWriting == 0 {
throw("concurrent map writes")
}
.flags &^= hashWriting
return
}
func ( *maptype, *hmap, unsafe.Pointer) unsafe.Pointer {
if == nil {
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
:= getcallerpc()
racewritepc(unsafe.Pointer(), , funcPC(mapassign_fast64))
}
if .flags&hashWriting != 0 {
throw("concurrent map writes")
}
:= .hasher(noescape(unsafe.Pointer(&)), uintptr(.hash0))
.flags ^= hashWriting
if .buckets == nil {
.buckets = newobject(.bucket) // newarray(t.bucket, 1)
}
:
:= & bucketMask(.B)
if .growing() {
growWork_fast64(, , )
}
:= (*bmap)(add(.buckets, *uintptr(.bucketsize)))
var *bmap
var uintptr
var unsafe.Pointer
:
for {
for := uintptr(0); < bucketCnt; ++ {
if isEmpty(.tophash[]) {
if == nil {
=
=
}
if .tophash[] == emptyRest {
break
}
continue
}
:= *((*unsafe.Pointer)(add(unsafe.Pointer(), dataOffset+*8)))
if != {
continue
}
=
=
goto
}
:= .overflow()
if == nil {
break
}
=
}
if !.growing() && (overLoadFactor(.count+1, .B) || tooManyOverflowBuckets(.noverflow, .B)) {
hashGrow(, )
goto // Growing the table invalidates everything, so try again
}
= .newoverflow(, )
= 0 // not necessary, but avoids needlessly spilling inserti
}
.tophash[&(bucketCnt-1)] = tophash() // mask inserti to avoid bounds checks
*(*unsafe.Pointer)() =
.count++
:
:= add(unsafe.Pointer(), dataOffset+bucketCnt*8+*uintptr(.elemsize))
if .flags&hashWriting == 0 {
throw("concurrent map writes")
}
.flags &^= hashWriting
return
}
func ( *maptype, *hmap, uint64) {
if raceenabled && != nil {
:= getcallerpc()
racewritepc(unsafe.Pointer(), , funcPC())
}
if == nil || .count == 0 {
return
}
if .flags&hashWriting != 0 {
throw("concurrent map writes")
}
:= .hasher(noescape(unsafe.Pointer(&)), uintptr(.hash0))
.flags ^= hashWriting
:= & bucketMask(.B)
if .growing() {
growWork_fast64(, , )
}
:= (*bmap)(add(.buckets, *uintptr(.bucketsize)))
:=
:
for ; != nil; = .overflow() {
for , := uintptr(0), .keys(); < bucketCnt; , = +1, add(, 8) {
if != *(*uint64)() || isEmpty(.tophash[]) {
continue
memclrHasPointers(, 8)
}
}
:= add(unsafe.Pointer(), dataOffset+bucketCnt*8+*uintptr(.elemsize))
if .elem.ptrdata != 0 {
memclrHasPointers(, .elem.size)
} else {
memclrNoHeapPointers(, .elem.size)
}
if .count == 0 {
.hash0 = fastrand()
}
break
}
}
if .flags&hashWriting == 0 {
throw("concurrent map writes")
}
.flags &^= hashWriting
}
evacuate_fast64(, , &.oldbucketmask())
if .growing() {
evacuate_fast64(, , .nevacuate)
}
}
func ( *maptype, *hmap, uintptr) {
:= (*bmap)(add(.oldbuckets, *uintptr(.bucketsize)))
:= .noldbuckets()
:= &[1]
.b = (*bmap)(add(.buckets, (+)*uintptr(.bucketsize)))
.k = add(unsafe.Pointer(.b), dataOffset)
.e = add(.k, bucketCnt*8)
}
for ; != nil; = .overflow() {
:= add(unsafe.Pointer(), dataOffset)
:= add(, bucketCnt*8)
for := 0; < bucketCnt; , , = +1, add(, 8), add(, uintptr(.elemsize)) {
:= .tophash[]
if isEmpty() {
.tophash[] = evacuatedEmpty
continue
}
if < minTopHash {
throw("bad map state")
}
var uint8
:= .hasher(, uintptr(.hash0))
if & != 0 {
= 1
}
}
.tophash[] = evacuatedX + // evacuatedX + 1 == evacuatedY, enforced in makemap
:= &[] // evacuation destination
if .i == bucketCnt {
.b = .newoverflow(, .b)
.i = 0
.k = add(unsafe.Pointer(.b), dataOffset)
.e = add(.k, bucketCnt*8)
}
.b.tophash[.i&(bucketCnt-1)] = // mask dst.i as an optimization, to avoid a bounds check
if .key.ptrdata != 0 && writeBarrier.enabled {
typedmemmove(.key, .k, )
}
} else {
*(*uint64)(.k) = *(*uint64)()
}
typedmemmove(.elem, .e, )
if .flags&oldIterator == 0 && .bucket.ptrdata != 0 {
:= add(, dataOffset)
:= uintptr(.bucketsize) - dataOffset
memclrHasPointers(, )
}
}
if == .nevacuate {
advanceEvacuationMark(, , )
}
![]() |
The pages are generated with Golds v0.3.2-preview. (GOOS=darwin GOARCH=amd64) Golds is a Go 101 project developed by Tapir Liu. PR and bug reports are welcome and can be submitted to the issue list. Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds. |