const runtime.bucketCnt
107 uses
runtime (current package)
map.go#L66: bucketCnt = 1 << bucketCntBits
map.go#L153: tophash [bucketCnt]uint8
map.go#L429: for i := uintptr(0); i < bucketCnt; i++ {
map.go#L441: e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
map.go#L487: for i := uintptr(0); i < bucketCnt; i++ {
map.go#L499: e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
map.go#L531: for i := uintptr(0); i < bucketCnt; i++ {
map.go#L543: e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
map.go#L610: for i := uintptr(0); i < bucketCnt; i++ {
map.go#L615: elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
map.go#L633: elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
map.go#L657: elem = add(insertk, bucketCnt*uintptr(t.keysize))
map.go#L720: for i := uintptr(0); i < bucketCnt; i++ {
map.go#L741: e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
map.go#L754: if i == bucketCnt-1 {
map.go#L773: i = bucketCnt - 1
map.go#L837: it.offset = uint8(r >> h.B & (bucketCnt - 1))
map.go#L898: for ; i < bucketCnt; i++ {
map.go#L899: offi := (i + it.offset) & (bucketCnt - 1)
map.go#L909: e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.elemsize))
map.go#L1071: return count > bucketCnt && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
map.go#L1149: x.e = add(x.k, bucketCnt*uintptr(t.keysize))
map.go#L1157: y.e = add(y.k, bucketCnt*uintptr(t.keysize))
map.go#L1162: e := add(k, bucketCnt*uintptr(t.keysize))
map.go#L1163: for i := 0; i < bucketCnt; i, k, e = i+1, add(k, uintptr(t.keysize)), add(e, uintptr(t.elemsize)) {
map.go#L1209: if dst.i == bucketCnt {
map.go#L1213: dst.e = add(dst.k, bucketCnt*uintptr(t.keysize))
map.go#L1215: dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
map.go#L1291: if t.key.align > bucketCnt {
map.go#L1294: if t.elem.align > bucketCnt {
map.go#L1303: if bucketCnt < 8 {
map_fast32.go#L43: for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
map_fast32.go#L45: return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize))
map_fast32.go#L83: for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
map_fast32.go#L85: return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize)), true
map_fast32.go#L125: for i := uintptr(0); i < bucketCnt; i++ {
map_fast32.go#L165: insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
map_fast32.go#L174: elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.elemsize))
map_fast32.go#L215: for i := uintptr(0); i < bucketCnt; i++ {
map_fast32.go#L255: insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
map_fast32.go#L264: elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.elemsize))
map_fast32.go#L297: for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
map_fast32.go#L309: e := add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize))
map_fast32.go#L318: if i == bucketCnt-1 {
map_fast32.go#L337: i = bucketCnt - 1
map_fast32.go#L385: x.e = add(x.k, bucketCnt*4)
map_fast32.go#L393: y.e = add(y.k, bucketCnt*4)
map_fast32.go#L398: e := add(k, bucketCnt*4)
map_fast32.go#L399: for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 4), add(e, uintptr(t.elemsize)) {
map_fast32.go#L421: if dst.i == bucketCnt {
map_fast32.go#L425: dst.e = add(dst.k, bucketCnt*4)
map_fast32.go#L427: dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
map_fast64.go#L43: for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
map_fast64.go#L45: return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize))
map_fast64.go#L83: for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
map_fast64.go#L85: return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize)), true
map_fast64.go#L125: for i := uintptr(0); i < bucketCnt; i++ {
map_fast64.go#L165: insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
map_fast64.go#L174: elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.elemsize))
map_fast64.go#L215: for i := uintptr(0); i < bucketCnt; i++ {
map_fast64.go#L255: insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
map_fast64.go#L264: elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.elemsize))
map_fast64.go#L297: for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
map_fast64.go#L311: e := add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize))
map_fast64.go#L320: if i == bucketCnt-1 {
map_fast64.go#L339: i = bucketCnt - 1
map_fast64.go#L387: x.e = add(x.k, bucketCnt*8)
map_fast64.go#L395: y.e = add(y.k, bucketCnt*8)
map_fast64.go#L400: e := add(k, bucketCnt*8)
map_fast64.go#L401: for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 8), add(e, uintptr(t.elemsize)) {
map_fast64.go#L423: if dst.i == bucketCnt {
map_fast64.go#L427: dst.e = add(dst.k, bucketCnt*8)
map_fast64.go#L429: dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
map_faststr.go#L29: for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
map_faststr.go#L38: return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
map_faststr.go#L44: keymaybe := uintptr(bucketCnt)
map_faststr.go#L45: for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
map_faststr.go#L54: return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
map_faststr.go#L64: if keymaybe != bucketCnt {
map_faststr.go#L70: if keymaybe != bucketCnt {
map_faststr.go#L73: return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.elemsize))
map_faststr.go#L94: for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
map_faststr.go#L100: return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
map_faststr.go#L124: for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
map_faststr.go#L133: return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
map_faststr.go#L139: keymaybe := uintptr(bucketCnt)
map_faststr.go#L140: for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
map_faststr.go#L149: return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
map_faststr.go#L159: if keymaybe != bucketCnt {
map_faststr.go#L165: if keymaybe != bucketCnt {
map_faststr.go#L168: return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.elemsize)), true
map_faststr.go#L189: for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
map_faststr.go#L195: return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
map_faststr.go#L237: for i := uintptr(0); i < bucketCnt; i++ {
map_faststr.go#L281: insertb.tophash[inserti&(bucketCnt-1)] = top // mask inserti to avoid bounds checks
map_faststr.go#L289: elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*sys.PtrSize+inserti*uintptr(t.elemsize))
map_faststr.go#L324: for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
map_faststr.go#L334: e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
map_faststr.go#L343: if i == bucketCnt-1 {
map_faststr.go#L362: i = bucketCnt - 1
map_faststr.go#L410: x.e = add(x.k, bucketCnt*2*sys.PtrSize)
map_faststr.go#L418: y.e = add(y.k, bucketCnt*2*sys.PtrSize)
map_faststr.go#L423: e := add(k, bucketCnt*2*sys.PtrSize)
map_faststr.go#L424: for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 2*sys.PtrSize), add(e, uintptr(t.elemsize)) {
map_faststr.go#L446: if dst.i == bucketCnt {
map_faststr.go#L450: dst.e = add(dst.k, bucketCnt*2*sys.PtrSize)
map_faststr.go#L452: dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
 |
The pages are generated with Golds v0.3.2-preview. (GOOS=darwin GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu.
PR and bug reports are welcome and can be submitted to the issue list.
Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds. |