Source File
mgcsweep.go
Belonging Package
runtime
type sweepClass uint32
const (
numSweepClasses = numSpanClasses * 2
sweepClassDone sweepClass = sweepClass(^uint32(0))
)
func ( *sweepClass) () sweepClass {
return sweepClass(atomic.Load((*uint32)()))
}
}
func ( *sweepClass) () {
atomic.Store((*uint32)(), 0)
}
func ( sweepClass) () ( spanClass, bool) {
return spanClass( >> 1), &1 == 0
}
func ( *mheap) () *mspan {
:= .sweepgen
for := sweep.centralIndex.load(); < numSweepClasses; ++ {
, := .split()
:= &.central[].mcentral
var *mspan
if {
= .fullUnswept().pop()
} else {
= .partialUnswept().pop()
}
sweep.centralIndex.update()
return
}
sweep.centralIndex.update(sweepClassDone)
return nil
}
func () {
assertWorldStopped()
for sweepone() != ^uintptr(0) {
sweep.npausesweep++
}
wakeScavenger()
nextMarkBitArenaEpoch()
}
func ( chan int) {
sweep.g = getg()
lockInit(&sweep.lock, lockRankSweep)
lock(&sweep.lock)
sweep.parked = true
<- 1
goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
for {
for sweepone() != ^uintptr(0) {
sweep.nbgsweep++
Gosched()
}
for freeSomeWbufs(true) {
Gosched()
}
lock(&sweep.lock)
unlock(&sweep.lock)
continue
}
sweep.parked = true
goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
}
}
func () uintptr {
:= getg()
:= mheap_.sweepPagesPerByte // For debugging
= 0
}
}
systemstack(func() {
lock(&mheap_.lock)
mheap_.pages.scavengeStartGen()
unlock(&mheap_.lock)
readyForScavenger()
if debug.gcpacertrace > 0 {
print("pacer: sweep done at heap size ", memstats.heap_live>>20, "MB; allocated ", (memstats.heap_live-mheap_.sweepHeapLiveBasis)>>20, "MB during sweep; swept ", mheap_.pagesSwept, " pages at ", , " pages/byte\n")
}
}
.m.locks--
return
}
:= getg()
if .m.locks == 0 && .m.mallocing == 0 && != .m.g0 {
throw("mspan.sweep: m is not locked")
}
:= mheap_.sweepgen
if := .state.get(); != mSpanInUse || .sweepgen != -1 {
print("mspan.sweep: state=", , " sweepgen=", .sweepgen, " mheap.sweepgen=", , "\n")
throw("mspan.sweep: bad span state")
}
if trace.enabled {
traceGCSweepSpan(.npages * _PageSize)
}
atomic.Xadd64(&mheap_.pagesSwept, int64(.npages))
:= .spanclass
:= .elemsize
:= uintptr(.offset) /
:= .base() + *
:= .markBitsForIndex()
.setMarkedNonAtomic()
= true
break
}
:=
= .next
* =
freespecial(, unsafe.Pointer(), )
= &.next
= *
}
}
= &.next
= *
}
}
if && .specials == nil {
spanHasNoSpecials()
}
:= .markBitsForBase()
:= .allocBitsForIndex(0)
for := uintptr(0); < .nelems; ++ {
if !.isMarked() && (.index < .freeindex || .isMarked()) {
:= .base() + *.elemsize
if debug.allocfreetrace != 0 {
tracefree(unsafe.Pointer(), )
}
if debug.clobberfree != 0 {
clobberfree(unsafe.Pointer(), )
}
if raceenabled {
racefree(unsafe.Pointer(), )
}
if msanenabled {
msanfree(unsafe.Pointer(), )
}
}
.advance()
.advance()
}
}
:= .freeindex
if (*.gcmarkBits.bytep( / 8)&^*.allocBits.bytep( / 8))>>(%8) != 0 {
.reportZombies()
for := /8 + 1; < divRoundUp(.nelems, 8); ++ {
if *.gcmarkBits.bytep()&^*.allocBits.bytep() != 0 {
.reportZombies()
}
}
}
:= uint16(.countAlloc())
:= .allocCount -
print("runtime: nelems=", .nelems, " nalloc=", , " previous allocCount=", .allocCount, " nfreed=", , "\n")
throw("sweep increased allocation count")
}
.allocCount =
.freeindex = 0 // reset allocation index to start of span.
if trace.enabled {
getg().m.p.ptr().traceReclaimed += uintptr() * .elemsize
}
.allocBits = .gcmarkBits
.gcmarkBits = newMarkBits(.nelems)
.refillAllocCache(0)
func ( *mspan) () {
printlock()
print("runtime: marked free object in span ", , ", elemsize=", .elemsize, " freeindex=", .freeindex, " (bad use of unsafe.Pointer? try -d=checkptr)\n")
:= .markBitsForBase()
:= .allocBitsForIndex(0)
for := uintptr(0); < .nelems; ++ {
:= .base() + *.elemsize
print(hex())
:= < .freeindex || .isMarked()
if {
print(" alloc")
} else {
print(" free ")
}
if .isMarked() {
print(" marked ")
} else {
print(" unmarked")
}
:= .isMarked() && !
if {
print(" zombie")
}
print("\n")
if {
:= .elemsize
if > 1024 {
= 1024
}
hexdumpWords(, +, nil)
}
.advance()
.advance()
}
throw("found pointer to free object")
}
return
}
if trace.enabled {
traceGCSweepStart()
}
:
:= atomic.Load64(&mheap_.pagesSweptBasis)
goto
}
}
if trace.enabled {
traceGCSweepDone()
}
}
![]() |
The pages are generated with Golds v0.3.2-preview. (GOOS=darwin GOARCH=amd64) Golds is a Go 101 project developed by Tapir Liu. PR and bug reports are welcome and can be submitted to the issue list. Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds. |