Source File
mheap.go
Belonging Package
runtime
package runtime
import (
)
minPhysPageSize = 4096
maxPhysPageSize = 512 << 10
pagesPerReclaimerChunk = 512
physPageAlignedStacks = GOOS == "openbsd"
)
pagesInUse uint64 // pages of spans in stats mSpanInUse; updated atomically
pagesSwept uint64 // pages swept this cycle; updated atomically
pagesSweptBasis uint64 // pagesSwept to use as the origin of the sweep ratio; updated atomically
sweepHeapLiveBasis uint64 // value of heap_live to use as the origin of sweep ratio; written with lock, read without
arenas [1 << arenaL1Bits]*[1 << arenaL2Bits]*heapArena
central [numSpanClasses]struct {
mcentral mcentral
pad [cpu.CacheLinePadSize - unsafe.Sizeof(mcentral{})%cpu.CacheLinePadSize]byte
}
spanalloc fixalloc // allocator for span*
cachealloc fixalloc // allocator for mcache*
specialfinalizeralloc fixalloc // allocator for specialfinalizer*
specialprofilealloc fixalloc // allocator for specialprofile*
speciallock mutex // lock for special record allocators.
arenaHintAlloc fixalloc // allocator for arenaHints
unused *specialfinalizer // never set, just here to force the specialfinalizer type into DWARF
}
var mheap_ mheap
pageInUse [pagesPerArena / 8]uint8
pageMarks [pagesPerArena / 8]uint8
type mSpanState uint8
const (
mSpanDead mSpanState = iota
mSpanInUse // allocated for garbage collected heap
mSpanManual // allocated for manual management (e.g., stack allocator)
)
var mSpanStateNames = []string{
"mSpanDead",
"mSpanInUse",
"mSpanManual",
"mSpanFree",
}
type mSpanStateBox struct {
s mSpanState
}
func ( *mSpanStateBox) ( mSpanState) {
atomic.Store8((*uint8)(&.s), uint8())
}
func ( *mSpanStateBox) () mSpanState {
return mSpanState(atomic.Load8((*uint8)(&.s)))
}
type mspan struct {
next *mspan // next span in list, or nil if none
prev *mspan // previous span in list, or nil if none
list *mSpanList // For debugging. TODO: Remove.
startAddr uintptr // address of first byte of span aka s.base()
npages uintptr // number of pages in span
manualFreeList gclinkptr // list of free objects in mSpanManual spans
sweepgen uint32
divMul uint16 // for divide by elemsize - divMagic.mul
baseMask uint16 // if non-0, elemsize is a power of 2, & this will get object allocation base
allocCount uint16 // number of allocated objects
spanclass spanClass // size class and noscan (uint8)
state mSpanStateBox // mSpanInUse etc; accessed atomically (get/set methods)
needzero uint8 // needs to be zeroed before allocation
divShift uint8 // for divide by elemsize - divMagic.shift
divShift2 uint8 // for divide by elemsize - divMagic.shift2
elemsize uintptr // computed from sizeclass or from npages
limit uintptr // end of data in span
speciallock mutex // guards specials list
specials *special // linked list of special records sorted by offset.
}
func ( *mspan) () uintptr {
return .startAddr
}
func ( *mspan) () (, , uintptr) {
= .npages << _PageShift
= .elemsize
if > 0 {
= /
}
return
}
func ( unsafe.Pointer, unsafe.Pointer) {
:= (*mheap)()
:= (*mspan)()
assertLockHeld(&.lock)
if len(.allspans) >= cap(.allspans) {
:= 64 * 1024 / sys.PtrSize
if < cap(.allspans)*3/2 {
= cap(.allspans) * 3 / 2
}
var []*mspan
:= (*slice)(unsafe.Pointer(&))
.array = sysAlloc(uintptr()*sys.PtrSize, &memstats.other_sys)
if .array == nil {
throw("runtime: cannot allocate memory")
}
.len = len(.allspans)
.cap =
if len(.allspans) > 0 {
copy(, .allspans)
}
:= .allspans
*(*notInHeapSlice)(unsafe.Pointer(&.allspans)) = *(*notInHeapSlice)(unsafe.Pointer(&))
if len() != 0 {
sysFree(unsafe.Pointer(&[0]), uintptr(cap())*unsafe.Sizeof([0]), &memstats.other_sys)
}
}
.allspans = .allspans[:len(.allspans)+1]
.allspans[len(.allspans)-1] =
}
type spanClass uint8
const (
numSpanClasses = _NumSizeClasses << 1
tinySpanClass = spanClass(tinySizeClass<<1 | 1)
)
func ( uint8, bool) spanClass {
return spanClass(<<1) | spanClass(bool2int())
}
func ( spanClass) () int8 {
return int8( >> 1)
}
func ( spanClass) () bool {
return &1 != 0
}
func ( uintptr) arenaIdx {
return arenaIdx(( - arenaBaseOffset) / heapArenaBytes)
}
func ( arenaIdx) uintptr {
return uintptr()*heapArenaBytes + arenaBaseOffset
}
type arenaIdx uint
func ( arenaIdx) () uint {
return 0
} else {
return uint() >> arenaL1Shift
}
}
func ( arenaIdx) () uint {
if arenaL1Bits == 0 {
return uint()
} else {
return uint() & (1<<arenaL2Bits - 1)
}
}
func ( uintptr) bool {
return spanOfHeap() != nil
}
:= arenaIndex()
func ( uintptr) *mspan {
:= arenaIndex()
return mheap_.arenas[.l1()][.l2()].spans[(/pageSize)%pagesPerArena]
}
func ( *mheap) () {
lockInit(&.lock, lockRankMheap)
lockInit(&.speciallock, lockRankMheapSpecial)
.spanalloc.init(unsafe.Sizeof(mspan{}), recordspan, unsafe.Pointer(), &memstats.mspan_sys)
.cachealloc.init(unsafe.Sizeof(mcache{}), nil, nil, &memstats.mcache_sys)
.specialfinalizeralloc.init(unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys)
.specialprofilealloc.init(unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys)
.arenaHintAlloc.init(unsafe.Sizeof(arenaHint{}), nil, nil, &memstats.other_sys)
if atomic.Load64(&.reclaimIndex) >= 1<<63 {
return
}
:= acquirem()
if trace.enabled {
traceGCSweepStart()
}
:= .sweepArenas
:= false
if := atomic.Loaduintptr(&.reclaimCredit); > 0 {
:=
=
}
if atomic.Casuintptr(&.reclaimCredit, , -) {
-=
}
continue
}
atomic.Store64(&.reclaimIndex, 1<<63)
break
}
:= .reclaimChunk(, , pagesPerReclaimerChunk)
if <= {
-=
atomic.Xadduintptr(&.reclaimCredit, -)
= 0
}
}
if {
unlock(&.lock)
}
if trace.enabled {
traceGCSweepDone()
}
releasem()
}
assertLockHeld(&.lock)
:=
var uintptr
:= .sweepgen
for > 0 {
:= [/pagesPerArena]
:= .arenas[.l1()][.l2()]
traceGCSweepSpan(( - ) * pageSize)
lock(&.lock)
}
assertLockHeld(&.lock) // Must be locked on return.
return
}
type spanAllocType uint8
const (
spanAllocHeap spanAllocType = iota // heap span
spanAllocStack // stack span
spanAllocPtrScalarBits // unrolled GC prog bitmap span
spanAllocWorkBuf // work buf span
)
func ( spanAllocType) () bool {
return != spanAllocHeap
}
var *mspan
if .sweepdone == 0 {
.reclaim()
}
= .allocSpan(, spanAllocHeap, )
})
if != nil {
if && .needzero != 0 {
memclrNoHeapPointers(unsafe.Pointer(.base()), .npages<<_PageShift)
}
.needzero = 0
}
return
}
func ( *mheap) (, uintptr) ( bool) {
for > 0 {
:= arenaIndex()
:= .arenas[.l1()][.l2()]
:= atomic.Loaduintptr(&.zeroedBase)
:= % heapArenaBytes
= true
:= + *pageSize
if > heapArenaBytes {
= heapArenaBytes
for > {
if atomic.Casuintptr(&.zeroedBase, , ) {
break
}
throw("potentially overlapping in-use allocations detected")
}
}
+= -
-= ( - ) / pageSize
}
return
}
if == nil || .mspancache.len == 0 {
return nil
:= .mspancache.buf[.mspancache.len-1]
.mspancache.len--
return
}
if .mspancache.len == 0 {
const = len(.mspancache.buf) / 2
for := 0; < ; ++ {
.mspancache.buf[] = (*mspan)(.spanalloc.alloc())
}
.mspancache.len =
:= .mspancache.buf[.mspancache.len-1]
.mspancache.len--
return
}
func ( *mheap) ( *mspan) {
assertLockHeld(&.lock)
if != nil && .mspancache.len < len(.mspancache.buf) {
.mspancache.buf[.mspancache.len] =
.mspancache.len++
return
:= physPageAlignedStacks && == spanAllocStack && pageSize < physPageSize
, = .alloc()
if != 0 {
= .tryAllocMSpan()
if != nil {
goto
}
}
+= physPageSize / pageSize
}
= .allocMSpanLocked()
}
if {
, := ,
= alignUp(, physPageSize)
-= physPageSize / pageSize
.init(, )
if .allocNeedsZero(, ) {
.needzero = 1
}
:= * pageSize
if .manual() {
.manualFreeList = 0
.nelems = 0
.limit = .base() + .npages*pageSize
.state.set(mSpanManual)
.freeindex = 0
.allocCache = ^uint64(0) // all 1s indicating all free.
.gcmarkBits = newMarkBits(.nelems)
.allocBits = newAllocBits(.nelems)
.state.set(mSpanInUse)
}
if == spanAllocHeap {
atomic.Xadd64(&memstats.heap_inuse, int64())
}
:= memstats.heapStats.acquire()
atomic.Xaddint64(&.committed, int64())
atomic.Xaddint64(&.released, -int64())
switch {
case spanAllocHeap:
atomic.Xaddint64(&.inHeap, int64())
case spanAllocStack:
atomic.Xaddint64(&.inStacks, int64())
case spanAllocPtrScalarBits:
atomic.Xaddint64(&.inPtrScalarBits, int64())
case spanAllocWorkBuf:
atomic.Xaddint64(&.inWorkBufs, int64())
}
memstats.heapStats.release()
, , := pageIndexOf(.base())
atomic.Or8(&.pageInUse[], )
atomic.Xadd64(&.pagesInUse, int64())
}
publicationBarrier()
return
}
func ( *mheap) ( uintptr) bool {
assertLockHeld(&.lock)
:= alignUp(, pallocChunkPages) * pageSize
:= .curArena.base +
:= alignUp(, physPageSize)
= alignUp(.curArena.base+, physPageSize)
}
if := heapRetained(); +uint64() > .scavengeGoal {
:=
if := uintptr( + uint64() - .scavengeGoal); > {
=
}
.pages.scavenge(, false)
}
return true
}
func ( *mheap) ( *mspan) {
systemstack(func() {
lock(&.lock)
:= unsafe.Pointer(.base())
:= .npages << _PageShift
msanfree(, )
}
.freeSpanLocked(, spanAllocHeap)
unlock(&.lock)
})
}
func ( *mheap) ( *mspan, spanAllocType) {
.needzero = 1
lock(&.lock)
.freeSpanLocked(, )
unlock(&.lock)
}
func ( *mheap) ( *mspan, spanAllocType) {
assertLockHeld(&.lock)
switch .state.get() {
case mSpanManual:
if .allocCount != 0 {
throw("mheap.freeSpanLocked - invalid stack free")
}
case mSpanInUse:
if .allocCount != 0 || .sweepgen != .sweepgen {
print("mheap.freeSpanLocked - span ", , " ptr ", hex(.base()), " allocCount ", .allocCount, " sweepgen ", .sweepgen, "/", .sweepgen, "\n")
throw("mheap.freeSpanLocked - invalid free")
}
atomic.Xadd64(&.pagesInUse, -int64(.npages))
:= .npages * pageSize
if == spanAllocHeap {
atomic.Xadd64(&memstats.heap_inuse, -int64())
}
:= memstats.heapStats.acquire()
switch {
case spanAllocHeap:
atomic.Xaddint64(&.inHeap, -int64())
case spanAllocStack:
atomic.Xaddint64(&.inStacks, -int64())
case spanAllocPtrScalarBits:
atomic.Xaddint64(&.inPtrScalarBits, -int64())
case spanAllocWorkBuf:
atomic.Xaddint64(&.inWorkBufs, -int64())
}
memstats.heapStats.release()
.state.set(mSpanDead)
.freeMSpanLocked()
}
func () {
GC()
systemstack(func() { mheap_.scavengeAll() })
}
.next = nil
.prev = nil
.list = nil
.startAddr =
.npages =
.allocCount = 0
.spanclass = 0
.elemsize = 0
.speciallock.key = 0
.specials = nil
.needzero = 0
.freeindex = 0
.allocBits = nil
.gcmarkBits = nil
.state.set(mSpanDead)
lockInit(&.speciallock, lockRankMspanSpecial)
}
func ( *mspan) () bool {
return .list != nil
}
func ( *mSpanList) () {
.first = nil
.last = nil
}
func ( *mSpanList) ( *mspan) {
if .list != {
print("runtime: failed mSpanList.remove span.npages=", .npages,
" span=", , " prev=", .prev, " span.list=", .list, " list=", , "\n")
throw("mSpanList.remove")
}
if .first == {
.first = .next
} else {
.prev.next = .next
}
if .last == {
.last = .prev
} else {
.next.prev = .prev
}
.next = nil
.prev = nil
.list = nil
}
func ( *mSpanList) () bool {
return .first == nil
}
func ( *mSpanList) ( *mspan) {
if .next != nil || .prev != nil || .list != nil {
println("runtime: failed mSpanList.insert", , .next, .prev, .list)
throw("mSpanList.insert")
}
.next = .first
if .isEmpty() {
* = *
)
func ( *mspan) {
:= (.base() / pageSize) % pagesPerArena
:= arenaIndex(.base())
:= mheap_.arenas[.l1()][.l2()]
atomic.Or8(&.pageSpecials[/8], uint8(1)<<(%8))
}
func ( *mspan) {
:= (.base() / pageSize) % pagesPerArena
:= arenaIndex(.base())
:= mheap_.arenas[.l1()][.l2()]
atomic.And8(&.pageSpecials[/8], ^(uint8(1) << ( % 8)))
}
:= acquirem()
.ensureSwept()
:= uintptr() - .base()
:= .kind
lock(&.speciallock)
.offset = uint16()
.next = *
* =
spanHasSpecials()
unlock(&.speciallock)
releasem()
return true
}
:= acquirem()
.ensureSwept()
:= uintptr() - .base()
var *special
lock(&.speciallock)
:= &.specials
for {
:= *
if == nil {
break
func ( unsafe.Pointer, *funcval, uintptr, *_type, *ptrtype) bool {
lock(&mheap_.speciallock)
:= (*specialfinalizer)(mheap_.specialfinalizeralloc.alloc())
unlock(&mheap_.speciallock)
.special.kind = _KindSpecialFinalizer
.fn =
.nret =
.fint =
.ot =
if gcphase != _GCoff {
, , := findObject(uintptr(), 0, 0)
:= acquirem()
lock(&mheap_.speciallock)
mheap_.specialfinalizeralloc.free(unsafe.Pointer())
unlock(&mheap_.speciallock)
return false
}
func ( unsafe.Pointer) {
:= (*specialfinalizer)(unsafe.Pointer(removespecial(, _KindSpecialFinalizer)))
if == nil {
return // there wasn't a finalizer to remove
}
lock(&mheap_.speciallock)
mheap_.specialfinalizeralloc.free(unsafe.Pointer())
unlock(&mheap_.speciallock)
}
type specialprofile struct {
special special
b *bucket
}
func ( unsafe.Pointer, *bucket) {
lock(&mheap_.speciallock)
:= (*specialprofile)(mheap_.specialprofilealloc.alloc())
unlock(&mheap_.speciallock)
.special.kind = _KindSpecialProfile
.b =
if !addspecial(, &.special) {
throw("setprofilebucket: profile already set")
}
}
func ( *special, unsafe.Pointer, uintptr) {
switch .kind {
case _KindSpecialFinalizer:
:= (*specialfinalizer)(unsafe.Pointer())
queuefinalizer(, .fn, .nret, .fint, .ot)
lock(&mheap_.speciallock)
mheap_.specialfinalizeralloc.free(unsafe.Pointer())
unlock(&mheap_.speciallock)
case _KindSpecialProfile:
:= (*specialprofile)(unsafe.Pointer())
mProf_Free(.b, )
lock(&mheap_.speciallock)
mheap_.specialprofilealloc.free(unsafe.Pointer())
unlock(&mheap_.speciallock)
default:
throw("bad special kind")
panic("not reached")
}
}
func ( *gcBits) ( uintptr) ( *uint8, uint8) {
return .bytep( / 8), 1 << ( % 8)
}
const gcBitsChunkBytes = uintptr(64 << 10)
const gcBitsHeaderBytes = unsafe.Sizeof(gcBitsHeader{})
type gcBitsHeader struct {
free uintptr // free is the index into bits of the next free byte.
next uintptr // *gcBits triggers recursive type bug. (issue 14620)
}
free uintptr // free is the index into bits of the next free byte; read/write atomically
next *gcBitsArena
bits [gcBitsChunkBytes - gcBitsHeaderBytes]gcBits
}
var gcBitsArenas struct {
lock mutex
free *gcBitsArena
next *gcBitsArena // Read atomically. Write atomically under lock.
current *gcBitsArena
previous *gcBitsArena
}
:= -
return &.bits[]
}
:= (*gcBitsArena)(atomic.Loadp(unsafe.Pointer(&gcBitsArenas.next)))
if := .tryAlloc(); != nil {
return
}
if := gcBitsArenas.next.tryAlloc(); != nil {
unlock(&gcBitsArenas.lock)
return
}
.next = gcBitsArenas.free
gcBitsArenas.free =
unlock(&gcBitsArenas.lock)
return
}
.next = gcBitsArenas.next
atomic.StorepNoWB(unsafe.Pointer(&gcBitsArenas.next), unsafe.Pointer())
unlock(&gcBitsArenas.lock)
return
}
func ( uintptr) *gcBits {
return newMarkBits()
}
func () {
lock(&gcBitsArenas.lock)
if gcBitsArenas.previous != nil {
if gcBitsArenas.free == nil {
gcBitsArenas.free = gcBitsArenas.previous
:= gcBitsArenas.previous
for = gcBitsArenas.previous; .next != nil; = .next {
}
.next = gcBitsArenas.free
gcBitsArenas.free = gcBitsArenas.previous
}
}
gcBitsArenas.previous = gcBitsArenas.current
gcBitsArenas.current = gcBitsArenas.next
atomic.StorepNoWB(unsafe.Pointer(&gcBitsArenas.next), nil) // newMarkBits calls newArena when needed
unlock(&gcBitsArenas.lock)
}
func () *gcBitsArena {
var *gcBitsArena
if gcBitsArenas.free == nil {
unlock(&gcBitsArenas.lock)
= (*gcBitsArena)(sysAlloc(gcBitsChunkBytes, &memstats.gcMiscSys))
if == nil {
throw("runtime: cannot allocate memory")
}
lock(&gcBitsArenas.lock)
} else {
= gcBitsArenas.free
gcBitsArenas.free = gcBitsArenas.free.next
memclrNoHeapPointers(unsafe.Pointer(), gcBitsChunkBytes)
}
![]() |
The pages are generated with Golds v0.3.2-preview. (GOOS=darwin GOARCH=amd64) Golds is a Go 101 project developed by Tapir Liu. PR and bug reports are welcome and can be submitted to the issue list. Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds. |