func unsafe.Sizeof
156 uses
unsafe (current package)
unsafe.go#L188: func Sizeof(x ArbitraryType) uintptr
go.opentelemetry.io/otel/label
value.go#L123: if unsafe.Sizeof(v) == 4 {
value.go#L132: if unsafe.Sizeof(v) == 4 {
golang.org/x/sys/unix
syscall_bsd.go#L552: n := uintptr(unsafe.Sizeof(tv))
syscall_bsd.go#L556: if n != unsafe.Sizeof(tv) {
syscall_darwin.go#L71: const siz = unsafe.Sizeof(mib[0])
syscall_darwin.go#L98: return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino))
syscall_darwin.go#L102: return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen))
syscall_darwin.go#L106: return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen))
syscall_darwin.go#L137: bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf))
syscall_darwin.go#L268: unsafe.Sizeof(attributes),
syscall_darwin.go#L324: n := unsafe.Sizeof(uname.Sysname)
syscall_darwin.go#L330: n = unsafe.Sizeof(uname.Nodename)
syscall_darwin.go#L336: n = unsafe.Sizeof(uname.Release)
syscall_darwin.go#L342: n = unsafe.Sizeof(uname.Version)
syscall_darwin.go#L360: n = unsafe.Sizeof(uname.Machine)
syscall_unix.go#L291: vallen := _Socklen(unsafe.Sizeof(tv))
syscall_unix.go#L361: return setsockopt(fd, level, opt, unsafe.Pointer(tv), unsafe.Sizeof(*tv))
google.golang.org/protobuf/internal/impl
pointer_unsafe.go#L140: const _ = uint(unsafe.Sizeof(unsafe.Pointer(nil)) - unsafe.Sizeof(MessageState{}))
internal/reflectlite
type.go#L664: uadd := unsafe.Sizeof(*t)
type.go#L666: uadd += unsafe.Sizeof(uncommonType{})
type.go#L675: uadd := unsafe.Sizeof(*t)
type.go#L677: uadd += unsafe.Sizeof(uncommonType{})
reflect
type.go#L1007: uadd := unsafe.Sizeof(*t)
type.go#L1009: uadd += unsafe.Sizeof(uncommonType{})
type.go#L1018: uadd := unsafe.Sizeof(*t)
type.go#L1020: uadd += unsafe.Sizeof(uncommonType{})
type.go#L2622: ut.moff = uint32(unsafe.Sizeof(uncommonType{}))
runtime
alg.go#L37: size := *(*uintptr)(unsafe.Pointer(ptr + unsafe.Sizeof(h)))
chan.go#L28: hchanSize = unsafe.Sizeof(hchan{}) + uintptr(-int(unsafe.Sizeof(hchan{}))&(maxAlign-1))
debugcall.go#L117: newg := newproc1(*(**funcval)(unsafe.Pointer(&fn)), unsafe.Pointer(&args), int32(unsafe.Sizeof(args)), gp, callerpc)
debuglog.go#L75: l = (*dlogger)(sysAlloc(unsafe.Sizeof(dlogger{}), nil))
debuglog.go#L717: state1 := sysAlloc(unsafe.Sizeof(readState{})*uintptr(n), nil)
heapdump.go#L677: memclrNoHeapPointers(unsafe.Pointer(&typecache), unsafe.Sizeof(typecache))
iface.go#L66: m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.mhdr)-1)*sys.PtrSize, 0, &memstats.other_sys))
iface.go#L374: x = mallocgc(unsafe.Sizeof(val), stringType, true)
iface.go#L385: x = mallocgc(unsafe.Sizeof(val), sliceType, true)
malloc.go#L568: const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{})
malloc.go#L737: l2 = (*[1 << arenaL2Bits]*heapArena)(persistentalloc(unsafe.Sizeof(*l2), sys.PtrSize, nil))
malloc.go#L748: r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gcMiscSys))
malloc.go#L750: r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gcMiscSys))
malloc.go#L1094: dataSize = unsafe.Sizeof(_defer{})
map.go#L812: if unsafe.Sizeof(hiter{})/sys.PtrSize != 12 {
mcheckmark.go#L46: bitmap = (*checkmarksMap)(persistentalloc(unsafe.Sizeof(*bitmap), 0, &memstats.gcMiscSys))
mfinal.go#L28: fin [(_FinBlockSize - 2*sys.PtrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer
mfinal.go#L97: if (unsafe.Sizeof(finalizer{}) != 5*sys.PtrSize ||
mfinal.go#L186: framesz := unsafe.Sizeof((interface{})(nil)) + f.nret
mgc.go#L173: if unsafe.Sizeof(workbuf{}) != _WorkbufSize {
mgcmark.go#L176: scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw, nil)
mgcstack.go#L110: obj [(_WorkbufSize - unsafe.Sizeof(stackWorkBufHdr{})) / sys.PtrSize]uintptr
mgcstack.go#L130: obj [(_WorkbufSize - unsafe.Sizeof(stackObjectBufHdr{})) / unsafe.Sizeof(stackObject{})]stackObject
mgcstack.go#L140: if unsafe.Sizeof(stackWorkBuf{}) > unsafe.Sizeof(workbuf{}) {
mgcstack.go#L143: if unsafe.Sizeof(stackObjectBuf{}) > unsafe.Sizeof(workbuf{}) {
mgcwork.go#L325: obj [(_WorkbufSize - unsafe.Sizeof(workbufhdr{})) / sys.PtrSize]uintptr
mgcwork.go#L436: memmove(unsafe.Pointer(&b1.obj[0]), unsafe.Pointer(&b.obj[b.nobj]), uintptr(n)*unsafe.Sizeof(b1.obj[0]))
mheap.go#L208: pad [cpu.CacheLinePadSize - unsafe.Sizeof(mcentral{})%cpu.CacheLinePadSize]byte
mheap.go#L519: sysFree(unsafe.Pointer(&oldAllspans[0]), uintptr(cap(oldAllspans))*unsafe.Sizeof(oldAllspans[0]), &memstats.other_sys)
mheap.go#L705: h.spanalloc.init(unsafe.Sizeof(mspan{}), recordspan, unsafe.Pointer(h), &memstats.mspan_sys)
mheap.go#L706: h.cachealloc.init(unsafe.Sizeof(mcache{}), nil, nil, &memstats.mcache_sys)
mheap.go#L707: h.specialfinalizeralloc.init(unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys)
mheap.go#L708: h.specialprofilealloc.init(unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys)
mheap.go#L709: h.arenaHintAlloc.init(unsafe.Sizeof(arenaHint{}), nil, nil, &memstats.other_sys)
mheap.go#L1887: const gcBitsHeaderBytes = unsafe.Sizeof(gcBitsHeader{})
mpagealloc.go#L397: r := sysAlloc(unsafe.Sizeof(*p.chunks[0]), p.sysStat)
mpagealloc.go#L902: pallocSumBytes = unsafe.Sizeof(pallocSum(0))
mpagecache.go#L12: const pageCachePages = 8 * unsafe.Sizeof(pageCache{}.cache)
mprof.go#L163: size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0))
mprof.go#L168: size += unsafe.Sizeof(memRecord{})
mprof.go#L170: size += unsafe.Sizeof(blockRecord{})
mprof.go#L182: stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
mprof.go#L191: data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
mprof.go#L200: data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
mprof.go#L207: buckhash = (*[buckHashSize]*bucket)(sysAlloc(unsafe.Sizeof(*buckhash), &memstats.buckhash_sys))
mprof.go#L599: racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), funcPC(MemProfile))
mprof.go#L602: msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
mprof.go#L647: racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), funcPC(BlockProfile))
mprof.go#L650: msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
mranges.go#L170: ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), sys.PtrSize, sysStat))
mranges.go#L297: ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), sys.PtrSize, a.sysStat))
mranges.go#L367: ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), sys.PtrSize, b.sysStat))
mspanset.go#L286: return (*spanSetBlock)(persistentalloc(unsafe.Sizeof(spanSetBlock{}), cpu.CacheLineSize, &memstats.gcMiscSys))
mstats.go#L460: if size := unsafe.Sizeof(heapStatsDelta{}); size%8 != 0 {
mwbbuf.go#L94: b.end = start + uintptr(len(b.buf))*unsafe.Sizeof(b.buf[0])
mwbbuf.go#L97: if (b.end-b.next)%(wbBufEntryPointers*unsafe.Sizeof(b.buf[0])) != 0 {
mwbbuf.go#L218: n := (_p_.wbBuf.next - start) / unsafe.Sizeof(_p_.wbBuf.buf[0])
netpoll.go#L532: const pdSize = unsafe.Sizeof(pollDesc{})
os_darwin.go#L135: nout := unsafe.Sizeof(out)
os_darwin.go#L155: nout := unsafe.Sizeof(out)
os_darwin.go#L167: nout := unsafe.Sizeof(out)
panic.go#L236: argp := uintptr(unsafe.Pointer(&fn)) + unsafe.Sizeof(fn)
panic.go#L318: deferHeaderSize = unsafe.Sizeof(_defer{})
panic.go#L373: return add(unsafe.Pointer(d), unsafe.Sizeof(*d))
panic.go#L794: fd = add(fd, unsafe.Sizeof(b))
proc.go#L767: s := (*[unsafe.Sizeof(fastrandseed)]byte)(unsafe.Pointer(&fastrandseed))[:]
proc.go#L2116: msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
proc.go#L4062: memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
runtime1.go#L163: if unsafe.Sizeof(a) != 1 {
runtime1.go#L166: if unsafe.Sizeof(b) != 1 {
runtime1.go#L169: if unsafe.Sizeof(c) != 2 {
runtime1.go#L172: if unsafe.Sizeof(d) != 2 {
runtime1.go#L175: if unsafe.Sizeof(e) != 4 {
runtime1.go#L178: if unsafe.Sizeof(f) != 4 {
runtime1.go#L181: if unsafe.Sizeof(g) != 8 {
runtime1.go#L184: if unsafe.Sizeof(h) != 8 {
runtime1.go#L187: if unsafe.Sizeof(i) != 4 {
runtime1.go#L190: if unsafe.Sizeof(j) != 8 {
runtime1.go#L193: if unsafe.Sizeof(k) != sys.PtrSize {
runtime1.go#L196: if unsafe.Sizeof(l) != sys.PtrSize {
runtime1.go#L199: if unsafe.Sizeof(x1) != 1 {
runtime1.go#L205: if unsafe.Sizeof(y1) != 2 {
sema.go#L51: pad [cpu.CacheLinePadSize - unsafe.Sizeof(semaRoot{})]byte
sema.go#L608: if sz != unsafe.Sizeof(notifyList{}) {
sema.go#L609: print("runtime: bad notifyList size - sync=", sz, " runtime=", unsafe.Sizeof(notifyList{}), "\n")
stack.go#L143: _ [cpu.CacheLinePadSize - unsafe.Sizeof(stackpoolItem{})%cpu.CacheLinePadSize]byte
string.go#L204: uintptr(len(a))*unsafe.Sizeof(a[0]),
string.go#L209: msanread(unsafe.Pointer(&a[0]), uintptr(len(a))*unsafe.Sizeof(a[0]))
symtab.go#L682: ffb := (*findfuncbucket)(add(unsafe.Pointer(datap.findfunctab), b*unsafe.Sizeof(findfuncbucket{})))
symtab.go#L931: return *(*uint32)(add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(table)*4))
symtab.go#L963: p := add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(f.npcdata)*4)
trace.go#L160: arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf
trace.go#L364: sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf.ptr()), &memstats.other_sys)
trace.go#L643: buf = traceBufPtr(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
trace.go#L784: hash := memhash(unsafe.Pointer(&pcs[0]), 0, uintptr(len(pcs))*unsafe.Sizeof(pcs[0]))
trace.go#L831: return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*sys.PtrSize))
trace.go#L947: block := (*traceAllocBlock)(sysAlloc(unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys))
trace.go#L965: sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys)
traceback.go#L1321: msanwrite(unsafe.Pointer(arg), unsafe.Sizeof(cgoSymbolizerArg{}))
traceback.go#L1343: msanwrite(unsafe.Pointer(&arg), unsafe.Sizeof(arg))
type.go#L321: uadd := uintptr(unsafe.Sizeof(functype{}))
type.go#L323: uadd += unsafe.Sizeof(uncommontype{})
type.go#L330: uadd := uintptr(unsafe.Sizeof(functype{}))
type.go#L332: uadd += unsafe.Sizeof(uncommontype{})
runtime/pprof
map.go#L32: h = h<<8 | (h >> (8 * (unsafe.Sizeof(h) - 1)))
map.go#L35: h = h<<8 | (h >> (8 * (unsafe.Sizeof(h) - 1)))
sync
pool.go#L70: pad [128 - unsafe.Sizeof(poolLocalInternal{})%128]byte
pool.go#L277: lp := unsafe.Pointer(uintptr(l) + uintptr(i)*unsafe.Sizeof(poolLocal{}))
runtime.go#L47: runtime_notifyListCheck(unsafe.Sizeof(n))
syscall
exec_libc2.go#L257: rawSyscall(funcPC(libc_write_trampoline), uintptr(pipe), uintptr(unsafe.Pointer(&err1)), unsafe.Sizeof(err1))
exec_unix.go#L221: n, err = readlen(p[0], (*byte)(unsafe.Pointer(&err1)), int(unsafe.Sizeof(err1)))
exec_unix.go#L228: if n == int(unsafe.Sizeof(err1)) {
route_bsd.go#L205: const anyMessageLen = int(unsafe.Sizeof(anyMessage{}))
syscall_darwin.go#L31: const siz = unsafe.Sizeof(mib[0])
syscall_darwin.go#L58: return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino))
syscall_darwin.go#L62: return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen))
syscall_darwin.go#L66: return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen))
syscall_darwin.go#L106: bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf))
syscall_darwin.go#L139: uintptr(unsafe.Sizeof(attributes)),
syscall_unix.go#L339: return setsockopt(fd, level, opt, unsafe.Pointer(tv), unsafe.Sizeof(*tv))
 |
The pages are generated with Golds v0.3.2-preview. (GOOS=darwin GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu.
PR and bug reports are welcome and can be submitted to the issue list.
Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds. |