Source File
mstats.go
Belonging Package
runtime
package runtime
import (
)
alloc uint64 // bytes allocated and not yet freed
total_alloc uint64 // bytes allocated (even if freed)
sys uint64 // bytes obtained from system (should be sum of xxx_sys below, no locking, approximate)
nlookup uint64 // number of pointer lookups (unused)
nmalloc uint64 // number of mallocs
nfree uint64 // number of frees
heap_sys sysMemStat // virtual address space obtained from system for GC'd heap
heap_inuse uint64 // bytes in mSpanInUse spans
heap_released uint64 // bytes released to the os
heap_objects uint64 // total number of allocated objects
stacks_inuse uint64 // bytes in manually-managed stack spans; computed by updatememstats
stacks_sys sysMemStat // only counts newosproc0 stack in mstats; differs from MemStats.StackSys
mspan_inuse uint64 // mspan structures
mspan_sys sysMemStat
mcache_inuse uint64 // mcache structures
mcache_sys sysMemStat
buckhash_sys sysMemStat // profiling bucket hash table
gcWorkBufInUse uint64 // computed by updatememstats
gcProgPtrScalarBitsInUse uint64 // computed by updatememstats
gcMiscSys sysMemStat // updated atomically or during STW
other_sys sysMemStat // updated atomically or during STW
last_gc_unix uint64 // last gc (in unix time)
pause_total_ns uint64
pause_ns [256]uint64 // circular buffer of recent gc pause lengths
pause_end [256]uint64 // circular buffer of recent gc end times (nanoseconds since 1970)
numgc uint32
numforcedgc uint32 // number of user-forced GCs
gc_cpu_fraction float64 // fraction of CPU time used by GC
enablegc bool
debuggc bool
by_size [_NumSizeClasses]struct {
size uint32
nmalloc uint64
nfree uint64
}
_ [1 - _NumSizeClasses%2]uint32
last_gc_nanotime uint64 // last gc (monotonic time)
tinyallocs uint64 // number of tiny allocations that didn't cause actual allocation; not exported to go directly
last_next_gc uint64 // next_gc for the previous GC
last_heap_inuse uint64 // heap_inuse at mark termination of the previous GC
Size uint32
Mallocs uint64
Frees uint64
}
}
func () {
if := unsafe.Offsetof(memstats.heap_live); %8 != 0 {
println()
throw("memstats.heap_live not aligned to 8 bytes")
}
if := unsafe.Offsetof(memstats.heapStats); %8 != 0 {
println()
throw("memstats.heapStats not aligned to 8 bytes")
}
if := unsafe.Offsetof(memstats.gcPauseDist); %8 != 0 {
println()
throw("memstats.gcPauseDist not aligned to 8 bytes")
if := unsafe.Sizeof(heapStatsDelta{}); %8 != 0 {
println()
throw("heapStatsDelta not a multiple of 8 bytes in size")
}
}
func ( *MemStats) {
stopTheWorld("read mem stats")
systemstack(func() {
readmemstats_m()
})
startTheWorld()
}
func ( *MemStats) {
updatememstats()
.Alloc = memstats.alloc
.TotalAlloc = memstats.total_alloc
.Sys = memstats.sys
.Mallocs = memstats.nmalloc
.Frees = memstats.nfree
.HeapAlloc = memstats.alloc
.GCSys = memstats.gcMiscSys.load() + memstats.gcWorkBufInUse + memstats.gcProgPtrScalarBitsInUse
.OtherSys = memstats.other_sys.load()
.NextGC = memstats.next_gc
.LastGC = memstats.last_gc_unix
.PauseTotalNs = memstats.pause_total_ns
.PauseNs = memstats.pause_ns
.PauseEnd = memstats.pause_end
.NumGC = memstats.numgc
.NumForcedGC = memstats.numforcedgc
.GCCPUFraction = memstats.gc_cpu_fraction
.EnableGC = true
func ( *[]uint64) {
systemstack(func() {
readGCStats_m()
})
}
func ( *[]uint64) {
func () {
assertWorldStopped()
systemstack(flushallmcaches)
memstats.mcache_inuse = uint64(mheap_.cachealloc.inuse)
memstats.mspan_inuse = uint64(mheap_.spanalloc.inuse)
memstats.sys = memstats.heap_sys.load() + memstats.stacks_sys.load() + memstats.mspan_sys.load() +
memstats.mcache_sys.load() + memstats.buckhash_sys.load() + memstats.gcMiscSys.load() +
memstats.other_sys.load()
:= uint64(.largeAlloc)
memstats.nmalloc += uint64(.largeAllocCount)
:= uint64(.largeFree)
memstats.nfree += uint64(.largeFreeCount)
:= uint64(.smallAllocCount[])
+= * uint64(class_to_size[])
memstats.nmalloc +=
memstats.by_size[].nmalloc =
:= uint64(.smallFreeCount[])
+= * uint64(class_to_size[])
memstats.nfree +=
memstats.by_size[].nfree =
}
if memstats.heap_inuse != uint64(.inHeap) {
print("runtime: heap_inuse=", memstats.heap_inuse, "\n")
print("runtime: consistent value=", .inHeap, "\n")
throw("heap_inuse and consistent stats are not equal")
}
if memstats.heap_released != uint64(.released) {
print("runtime: heap_released=", memstats.heap_released, "\n")
print("runtime: consistent value=", .released, "\n")
throw("heap_released and consistent stats are not equal")
}
:= memstats.heap_sys.load() - memstats.heap_released
:= uint64(.committed - .inStacks - .inWorkBufs - .inPtrScalarBits)
if != {
print("runtime: global value=", , "\n")
print("runtime: consistent value=", , "\n")
throw("measures of the retained heap are not equal")
}
}
func ( int) {
assertWorldStopped()
:= allp[]
:= .mcache
if == nil {
return
}
.releaseAll()
stackcache_clear()
}
func () {
assertWorldStopped()
for := 0; < int(gomaxprocs); ++ {
flushmcache()
}
}
type sysMemStat uint64
func ( *sysMemStat) () uint64 {
return atomic.Load64((*uint64)())
}
committed int64 // byte delta of memory committed
released int64 // byte delta of released memory generated
inHeap int64 // byte delta of memory placed in the heap
inStacks int64 // byte delta of memory reserved for stacks
inWorkBufs int64 // byte delta of memory reserved for work bufs
inPtrScalarBits int64 // byte delta of memory reserved for unrolled GC prog bits
largeAlloc uintptr // bytes allocated for large objects
largeAllocCount uintptr // number of large object allocations
smallAllocCount [_NumSizeClasses]uintptr // number of allocs for small objects
largeFree uintptr // bytes freed for large objects (>maxSmallSize)
largeFreeCount uintptr // number of frees for large objects (>maxSmallSize)
smallFreeCount [_NumSizeClasses]uintptr // number of frees for small objects (<=maxSmallSize)
}
func ( *heapStatsDelta) ( *heapStatsDelta) {
.committed += .committed
.released += .released
.inHeap += .inHeap
.inStacks += .inStacks
.inWorkBufs += .inWorkBufs
.inPtrScalarBits += .inPtrScalarBits
.largeAlloc += .largeAlloc
.largeAllocCount += .largeAllocCount
for := range .smallAllocCount {
.smallAllocCount[] += .smallAllocCount[]
}
.largeFree += .largeFree
.largeFreeCount += .largeFreeCount
for := range .smallFreeCount {
.smallFreeCount[] += .smallFreeCount[]
}
}
func ( *consistentHeapStats) () *heapStatsDelta {
if := getg().m.p.ptr(); != nil {
:= atomic.Xadd(&.statsSeq, 1)
func ( *consistentHeapStats) ( *heapStatsDelta) {
assertWorldStopped()
for := range .stats {
.merge(&.stats[])
}
}
func ( *consistentHeapStats) () {
assertWorldStopped()
for := range .stats {
.stats[] = heapStatsDelta{}
}
}
:= acquirem()
.stats[].merge(&.stats[])
.stats[] = heapStatsDelta{}
![]() |
The pages are generated with Golds v0.3.2-preview. (GOOS=darwin GOARCH=amd64) Golds is a Go 101 project developed by Tapir Liu. PR and bug reports are welcome and can be submitted to the issue list. Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds. |