const runtime/internal/sys.PtrSize
259 uses
runtime/internal/sys (current package)
stubs.go#L9: const PtrSize = 4 << (^uintptr(0) >> 63) // unsafe.Sizeof(uintptr(0)) but an ideal const
runtime/internal/math
math.go#L14: if a|b < 1<<(4*sys.PtrSize) || a == 0 {
runtime
alg.go#L14: c0 = uintptr((8-sys.PtrSize)/4*2860486313 + (sys.PtrSize-4)/4*33054211828000289)
alg.go#L15: c1 = uintptr((8-sys.PtrSize)/4*3267000013 + (sys.PtrSize-4)/4*23344194077549503)
alg.go#L320: const hashRandomBytes = sys.PtrSize / 4 * 64
alg.go#L341: getRandomData((*[len(hashkey) * sys.PtrSize]byte)(unsafe.Pointer(&hashkey))[:])
cgocall.go#L480: p = *(*unsafe.Pointer)(add(p, sys.PtrSize))
cgocall.go#L560: for i = uintptr(0); i < n; i += sys.PtrSize {
cgocheck.go#L154: for i := uintptr(0); i < off+size; i += sys.PtrSize {
cgocheck.go#L172: skipMask := off / sys.PtrSize / 8
cgocheck.go#L173: skipBytes := skipMask * sys.PtrSize * 8
cgocheck.go#L179: for i := uintptr(0); i < size; i += sys.PtrSize {
cgocheck.go#L180: if i&(sys.PtrSize*8-1) == 0 {
cgocheck.go#L187: off -= sys.PtrSize
heapdump.go#L250: dumpint(uint64(offset + i*sys.PtrSize))
heapdump.go#L301: for off := child.argoff; off < child.argoff+child.arglen; off += sys.PtrSize {
heapdump.go#L310: for off := child.arglen; off < s.varp-s.sp; off += sys.PtrSize {
heapdump.go#L317: for off := s.varp - size - s.sp; off < s.varp-s.sp; off += sys.PtrSize {
heapdump.go#L324: dumpbv(&bv, s.varp-uintptr(bv.n)*sys.PtrSize-s.sp)
heapdump.go#L512: dumpint(sys.PtrSize)
heapdump.go#L727: nptr := size / sys.PtrSize
iface.go#L66: m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.mhdr)-1)*sys.PtrSize, 0, &memstats.other_sys))
iface.go#L103: p := (**itab)(add(unsafe.Pointer(&t.entries), h*sys.PtrSize))
iface.go#L136: t2 := (*itabTableType)(mallocgc((2+2*t.size)*sys.PtrSize, nil, true))
iface.go#L164: p := (**itab)(add(unsafe.Pointer(&t.entries), h*sys.PtrSize))
iface.go#L524: m := *(**itab)(add(unsafe.Pointer(&t.entries), i*sys.PtrSize))
malloc.go#L153: _NumStackOrders = 4 - sys.PtrSize/4*sys.GoosWindows - 1*sys.GoosPlan9
malloc.go#L253: heapArenaBitmapBytes = heapArenaBytes / (sys.PtrSize * 8 / 2)
malloc.go#L487: if sys.PtrSize == 8 {
malloc.go#L737: l2 = (*[1 << arenaL2Bits]*heapArena)(persistentalloc(unsafe.Sizeof(*l2), sys.PtrSize, nil))
malloc.go#L748: r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gcMiscSys))
malloc.go#L750: r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gcMiscSys))
malloc.go#L758: size := 2 * uintptr(cap(h.allArenas)) * sys.PtrSize
malloc.go#L762: newArray := (*notInHeap)(persistentalloc(size, sys.PtrSize, &memstats.gcMiscSys))
malloc.go#L767: *(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / sys.PtrSize)}
malloc.go#L1019: } else if sys.PtrSize == 4 && size == 12 {
malloc.go#L1371: persistent.off = alignUp(sys.PtrSize, align)
map.go#L106: noCheck = 1<<(8*sys.PtrSize) - 1
map.go#L185: return uintptr(1) << (b & (sys.PtrSize*8 - 1))
map.go#L195: top := uint8(hash >> (sys.PtrSize*8 - 8))
map.go#L208: return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize))
map.go#L212: *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize)) = ovf
map.go#L812: if unsafe.Sizeof(hiter{})/sys.PtrSize != 12 {
map.go#L1283: if t.key.size > maxKeySize && (!t.indirectkey() || t.keysize != uint8(sys.PtrSize)) ||
map.go#L1287: if t.elem.size > maxElemSize && (!t.indirectelem() || t.elemsize != uint8(sys.PtrSize)) ||
map_fast32.go#L304: if sys.PtrSize == 4 && t.key.ptrdata != 0 {
map_fast32.go#L430: if sys.PtrSize == 4 && t.key.ptrdata != 0 && writeBarrier.enabled {
map_fast64.go#L303: if sys.PtrSize == 8 {
map_fast64.go#L433: if sys.PtrSize == 8 {
map_faststr.go#L29: for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
map_faststr.go#L38: return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
map_faststr.go#L45: for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
map_faststr.go#L54: return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
map_faststr.go#L71: k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
map_faststr.go#L73: return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.elemsize))
map_faststr.go#L94: for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
map_faststr.go#L100: return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
map_faststr.go#L124: for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
map_faststr.go#L133: return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
map_faststr.go#L140: for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
map_faststr.go#L149: return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
map_faststr.go#L166: k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
map_faststr.go#L168: return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.elemsize)), true
map_faststr.go#L189: for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
map_faststr.go#L195: return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
map_faststr.go#L248: k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
map_faststr.go#L283: insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*sys.PtrSize)
map_faststr.go#L289: elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*sys.PtrSize+inserti*uintptr(t.elemsize))
map_faststr.go#L324: for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
map_faststr.go#L334: e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
map_faststr.go#L410: x.e = add(x.k, bucketCnt*2*sys.PtrSize)
map_faststr.go#L418: y.e = add(y.k, bucketCnt*2*sys.PtrSize)
map_faststr.go#L423: e := add(k, bucketCnt*2*sys.PtrSize)
map_faststr.go#L424: for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 2*sys.PtrSize), add(e, uintptr(t.elemsize)) {
map_faststr.go#L450: dst.e = add(dst.k, bucketCnt*2*sys.PtrSize)
map_faststr.go#L463: dst.k = add(dst.k, 2*sys.PtrSize)
mbarrier.go#L199: if writeBarrier.needed && typ.ptrdata > off && size >= sys.PtrSize {
mbarrier.go#L200: if off&(sys.PtrSize-1) != 0 {
mbarrier.go#L203: pwsize := alignDown(size, sys.PtrSize)
mbarrier.go#L227: if writeBarrier.needed && typ != nil && typ.ptrdata != 0 && size >= sys.PtrSize {
mbitmap.go#L320: h.bitp = &ha.bitmap[(addr/(sys.PtrSize*4))%heapArenaBitmapBytes]
mbitmap.go#L321: h.shift = uint32((addr / sys.PtrSize) & 3)
mbitmap.go#L553: if (dst|src|size)&(sys.PtrSize-1) != 0 {
mbitmap.go#L588: for i := uintptr(0); i < size; i += sys.PtrSize {
mbitmap.go#L598: for i := uintptr(0); i < size; i += sys.PtrSize {
mbitmap.go#L621: if (dst|src|size)&(sys.PtrSize-1) != 0 {
mbitmap.go#L629: for i := uintptr(0); i < size; i += sys.PtrSize {
mbitmap.go#L649: word := maskOffset / sys.PtrSize
mbitmap.go#L654: for i := uintptr(0); i < size; i += sys.PtrSize {
mbitmap.go#L659: i += 7 * sys.PtrSize
mbitmap.go#L716: for i := uintptr(0); i < typ.ptrdata; i += sys.PtrSize {
mbitmap.go#L717: if i&(sys.PtrSize*8-1) == 0 {
mbitmap.go#L747: nw := (s.npages << _PageShift) / sys.PtrSize
mbitmap.go#L754: isPtrs := sys.PtrSize == 8 && s.elemsize == sys.PtrSize
mbitmap.go#L832: if sys.PtrSize == 8 && size == sys.PtrSize {
mbitmap.go#L858: if size == 2*sys.PtrSize {
mbitmap.go#L859: if typ.size == sys.PtrSize {
mbitmap.go#L868: if sys.PtrSize == 4 && dataSize == sys.PtrSize {
mbitmap.go#L882: if typ.size != 2*sys.PtrSize || typ.kind&kindGCProg != 0 {
mbitmap.go#L889: hb |= bitScanAll & ((bitScan << (typ.ptrdata / sys.PtrSize)) - 1)
mbitmap.go#L895: } else if size == 3*sys.PtrSize {
mbitmap.go#L902: if sys.PtrSize != 8 {
mbitmap.go#L908: if typ.size == 2*sys.PtrSize {
mbitmap.go#L913: if typ.size == sys.PtrSize {
mbitmap.go#L1059: const maxBits = sys.PtrSize*8 - 7
mbitmap.go#L1060: if typ.ptrdata/sys.PtrSize <= maxBits {
mbitmap.go#L1071: nb = typ.ptrdata / sys.PtrSize
mbitmap.go#L1076: nb = typ.size / sys.PtrSize
mbitmap.go#L1087: for endnb <= sys.PtrSize*8 {
mbitmap.go#L1106: n := (typ.ptrdata/sys.PtrSize+7)/8 - 1
mbitmap.go#L1108: endnb = typ.size/sys.PtrSize - n*8
mbitmap.go#L1119: nw = typ.ptrdata / sys.PtrSize
mbitmap.go#L1124: nw = ((dataSize/typ.size-1)*typ.size + typ.ptrdata) / sys.PtrSize
mbitmap.go#L1287: nw = size / sys.PtrSize
mbitmap.go#L1321: cnw := size / sys.PtrSize
mbitmap.go#L1386: end := heapBitsForAddr(x + size - sys.PtrSize)
mbitmap.go#L1413: nptr := typ.ptrdata / sys.PtrSize
mbitmap.go#L1414: ndata := typ.size / sys.PtrSize
mbitmap.go#L1416: totalptr := ((count-1)*typ.size + typ.ptrdata) / sys.PtrSize
mbitmap.go#L1417: for i := uintptr(0); i < size/sys.PtrSize; i++ {
mbitmap.go#L1442: println("at word", i, "offset", i*sys.PtrSize, "have", hex(have), "want", hex(want))
mbitmap.go#L1473: if sys.PtrSize == 8 && allocSize%(4*sys.PtrSize) != 0 {
mbitmap.go#L1480: if totalBits*sys.PtrSize != progSize {
mbitmap.go#L1495: if n := elemSize/sys.PtrSize - progSize/sys.PtrSize; n > 0 {
mbitmap.go#L1517: n := elemSize / sys.PtrSize
mbitmap.go#L1541: totalBits = (elemSize*(count-1) + progSize) / sys.PtrSize
mbitmap.go#L1544: endAlloc := unsafe.Pointer(addb(h.bitp, allocSize/sys.PtrSize/wordsPerBitmapByte))
mbitmap.go#L1552: n := (size/sys.PtrSize + 7) / 8
mbitmap.go#L1687: const maxBits = sys.PtrSize*8 - 7
mbitmap.go#L1740: for nb <= sys.PtrSize*8 {
mbitmap.go#L1868: bitmapBytes := divRoundUp(ptrdata, 8*sys.PtrSize)
mbitmap.go#L1941: nptr := typ.ptrdata / sys.PtrSize
mbitmap.go#L1961: mask = make([]byte, n/sys.PtrSize)
mbitmap.go#L1962: for i := uintptr(0); i < n; i += sys.PtrSize {
mbitmap.go#L1963: off := (uintptr(p) + i - datap.data) / sys.PtrSize
mbitmap.go#L1964: mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
mbitmap.go#L1973: mask = make([]byte, n/sys.PtrSize)
mbitmap.go#L1974: for i := uintptr(0); i < n; i += sys.PtrSize {
mbitmap.go#L1975: off := (uintptr(p) + i - datap.bss) / sys.PtrSize
mbitmap.go#L1976: mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
mbitmap.go#L1986: mask = make([]byte, n/sys.PtrSize)
mbitmap.go#L1987: for i := uintptr(0); i < n; i += sys.PtrSize {
mbitmap.go#L1989: mask[i/sys.PtrSize] = 1
mbitmap.go#L1992: mask = mask[:i/sys.PtrSize]
mbitmap.go#L2011: size := uintptr(locals.n) * sys.PtrSize
mbitmap.go#L2013: mask = make([]byte, n/sys.PtrSize)
mbitmap.go#L2014: for i := uintptr(0); i < n; i += sys.PtrSize {
mbitmap.go#L2015: off := (uintptr(p) + i - frame.varp + size) / sys.PtrSize
mbitmap.go#L2016: mask[i/sys.PtrSize] = locals.ptrbit(off)
mcheckmark.go#L27: type checkmarksMap [heapArenaBytes / sys.PtrSize / 8]uint8
mfinal.go#L28: fin [(_FinBlockSize - 2*sys.PtrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer
mfinal.go#L35: var finptrmask [_FinBlockSize / sys.PtrSize / 8]byte
mfinal.go#L97: if (unsafe.Sizeof(finalizer{}) != 5*sys.PtrSize ||
mfinal.go#L99: unsafe.Offsetof(finalizer{}.arg) != sys.PtrSize ||
mfinal.go#L100: unsafe.Offsetof(finalizer{}.nret) != 2*sys.PtrSize ||
mfinal.go#L101: unsafe.Offsetof(finalizer{}.fint) != 3*sys.PtrSize ||
mfinal.go#L102: unsafe.Offsetof(finalizer{}.ot) != 4*sys.PtrSize) {
mfinal.go#L412: nret = alignUp(nret, sys.PtrSize)
mgcmark.go#L249: if rootBlockBytes%(8*sys.PtrSize) != 0 {
mgcmark.go#L262: ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*sys.PtrSize))))
mgcmark.go#L376: scanblock(uintptr(unsafe.Pointer(&spf.fn)), sys.PtrSize, &oneptrmask[0], gcw, nil)
mgcmark.go#L741: scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), sys.PtrSize, &oneptrmask[0], gcw, &state)
mgcmark.go#L762: scanblock(uintptr(unsafe.Pointer(&d.fn)), sys.PtrSize, &oneptrmask[0], gcw, &state)
mgcmark.go#L767: scanblock(uintptr(unsafe.Pointer(&d.link)), sys.PtrSize, &oneptrmask[0], gcw, &state)
mgcmark.go#L773: scanblock(uintptr(unsafe.Pointer(&d)), sys.PtrSize, &oneptrmask[0], gcw, &state)
mgcmark.go#L917: size := uintptr(locals.n) * sys.PtrSize
mgcmark.go#L923: scanblock(frame.argp, uintptr(args.n)*sys.PtrSize, args.bytedata, gcw, state)
mgcmark.go#L1176: bits := uint32(*addb(ptrmask, i/(sys.PtrSize*8)))
mgcmark.go#L1178: i += sys.PtrSize * 8
mgcmark.go#L1194: i += sys.PtrSize
mgcmark.go#L1255: for i = 0; i < n; i += sys.PtrSize {
mgcmark.go#L1309: word := (p - b) / sys.PtrSize
mgcmark.go#L1334: for i := uintptr(0); i < n; i += sys.PtrSize {
mgcmark.go#L1336: word := i / sys.PtrSize
mgcmark.go#L1345: if i%(sys.PtrSize*8) != 0 {
mgcmark.go#L1348: i += sys.PtrSize*8 - sys.PtrSize
mgcmark.go#L1410: if obj&(sys.PtrSize-1) != 0 {
mgcmark.go#L1482: size = off + sys.PtrSize
mgcmark.go#L1484: for i := uintptr(0); i < size; i += sys.PtrSize {
mgcmark.go#L1488: if !(i < 128*sys.PtrSize || off-16*sys.PtrSize < i && i < off+16*sys.PtrSize) {
mgcstack.go#L110: obj [(_WorkbufSize - unsafe.Sizeof(stackWorkBufHdr{})) / sys.PtrSize]uintptr
mgcwork.go#L325: obj [(_WorkbufSize - unsafe.Sizeof(workbufhdr{})) / sys.PtrSize]uintptr
mheap.go#L501: n := 64 * 1024 / sys.PtrSize
mheap.go#L507: sp.array = sysAlloc(uintptr(n)*sys.PtrSize, &memstats.other_sys)
mheap.go#L1804: scanblock(uintptr(unsafe.Pointer(&s.fn)), sys.PtrSize, &oneptrmask[0], gcw, nil)
mranges.go#L170: ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), sys.PtrSize, sysStat))
mranges.go#L297: ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), sys.PtrSize, a.sysStat))
mranges.go#L367: ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), sys.PtrSize, b.sysStat))
mspanset.go#L85: blockp := add(spine, sys.PtrSize*top)
mspanset.go#L105: newSpine := persistentalloc(newCap*sys.PtrSize, cpu.CacheLineSize, &memstats.gcMiscSys)
mspanset.go#L109: memmove(newSpine, b.spine, b.spineCap*sys.PtrSize)
mspanset.go#L127: blockp := add(b.spine, sys.PtrSize*top)
mspanset.go#L184: blockp := add(spine, sys.PtrSize*uintptr(top))
mspanset.go#L244: blockp := (**spanSetBlock)(add(b.spine, sys.PtrSize*uintptr(top)))
mwbbuf.go#L148: b.next += 2 * sys.PtrSize
panic.go#L251: case sys.PtrSize:
panic.go#L553: case sys.PtrSize:
preempt.go#L323: asyncPreemptStack = uintptr(total) + 8*sys.PtrSize
print.go#L269: var buf [2 * sys.PtrSize]byte
print.go#L284: for i := uintptr(0); p+i < end; i += sys.PtrSize {
proc.go#L125: if sys.PtrSize == 8 {
proc.go#L541: return *(**g)(add(unsafe.Pointer(ptr), i*sys.PtrSize))
proc.go#L3975: argp := add(unsafe.Pointer(&fn), sys.PtrSize)
proc.go#L4057: bulkBarrierBitmap(spArg, spArg, uintptr(bv.n)*sys.PtrSize, 0, bv.bytedata)
proc.go#L6240: p := add(unsafe.Pointer(t), (3+i)*sys.PtrSize)
proc.go#L6261: firstFunc := add(unsafe.Pointer(t), (3+t.ndeps)*sys.PtrSize)
proc.go#L6263: p := add(firstFunc, i*sys.PtrSize)
runtime1.go#L58: return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*sys.PtrSize))
runtime1.go#L193: if unsafe.Sizeof(k) != sys.PtrSize {
runtime1.go#L196: if unsafe.Sizeof(l) != sys.PtrSize {
runtime2.go#L891: for i := 0; i < sys.PtrSize && n < len(r); i++ {
signal_amd64.go#L79: sp -= sys.PtrSize
slice.go#L178: case et.size == sys.PtrSize:
slice.go#L179: lenmem = uintptr(old.len) * sys.PtrSize
slice.go#L180: newlenmem = uintptr(cap) * sys.PtrSize
slice.go#L181: capmem = roundupsize(uintptr(newcap) * sys.PtrSize)
slice.go#L182: overflow = uintptr(newcap) > maxAlloc/sys.PtrSize
slice.go#L183: newcap = int(capmem / sys.PtrSize)
slice.go#L186: if sys.PtrSize == 8 {
stack.go#L69: _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosIos*sys.GoarchArm64*1024
stack.go#L123: uintptrMask = 1<<(8*sys.PtrSize) - 1
stack.go#L592: print(" ", add(scanp, (i+j)*sys.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*sys.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
stack.go#L599: pp := (*uintptr)(add(scanp, (i+j)*sys.PtrSize))
stack.go#L648: size := uintptr(locals.n) * sys.PtrSize
stack.go#L703: for i := uintptr(0); i < t.ptrdata; i += sys.PtrSize {
stack.go#L704: if *addb(gcdata, i/(8*sys.PtrSize))>>(i/sys.PtrSize&7)&1 != 0 {
stack.go#L1012: sp -= sys.PtrSize
stack.go#L1282: n := int32(frame.arglen / sys.PtrSize)
stack.go#L1307: p = add(p, sys.PtrSize)
symtab.go#L538: if hdr.magic != 0xfffffffa || hdr.pad1 != 0 || hdr.pad2 != 0 || hdr.minLC != sys.PCQuantum || hdr.ptrSize != sys.PtrSize {
symtab.go#L736: return (targetpc / sys.PtrSize) % uintptr(len(pcvalueCache{}.entries))
symtab.go#L905: if x&(sys.PtrSize-1) != 0 {
symtab.go#L964: if sys.PtrSize == 8 && uintptr(p)&4 != 0 {
symtab.go#L970: return *(*unsafe.Pointer)(add(p, uintptr(i)*sys.PtrSize))
sys_x86.go#L18: if sys.RegSize > sys.PtrSize {
sys_x86.go#L19: sp -= sys.PtrSize
sys_x86.go#L22: sp -= sys.PtrSize
trace.go#L831: return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*sys.PtrSize))
trace.go#L931: data [64<<10 - sys.PtrSize]byte
trace.go#L942: n = alignUp(n, sys.PtrSize)
traceback.go#L436: for i := uintptr(0); i < frame.arglen/sys.PtrSize; i++ {
traceback.go#L496: frame.sp += sys.PtrSize
traceback.go#L612: retValid = *(*bool)(unsafe.Pointer(arg0 + 3*sys.PtrSize))
traceback.go#L619: arglen = uintptr(bv.n * sys.PtrSize)
traceback.go#L621: arglen = uintptr(mv.argLen) &^ (sys.PtrSize - 1)
traceback.go#L960: const expand = 32 * sys.PtrSize
traceback.go#L961: const maxExpand = 256 * sys.PtrSize
 |
The pages are generated with Golds v0.3.2-preview. (GOOS=darwin GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu.
PR and bug reports are welcome and can be submitted to the issue list.
Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds. |