Source File
mgc.go
Belonging Package
runtime
package runtime
import (
)
const (
_DebugGC = 0
_ConcurrentSweep = true
_FinBlockSize = 4 * 1024
sweepMinHeapDistance = 1024 * 1024
)
const defaultHeapMinimum = 4 << 20
memstats.triggerRatio = 7 / 8.0
memstats.heap_marked = uint64(float64(heapminimum) / (1 + memstats.triggerRatio))
_ = setGCPercent(readgogc())
work.startSema = 1
work.markDoneSema = 1
lockInit(&work.sweepWaiters.lock, lockRankSweepWaiters)
lockInit(&work.assistQueue.lock, lockRankAssistQueue)
lockInit(&work.wbufSpans.lock, lockRankWbufSpans)
}
func () int32 {
:= gogetenv("GOGC")
if == "off" {
return -1
}
if , := atoi32(); {
return
}
return 100
}
if < 0 {
gcWaitOnMark(atomic.Load(&work.cycles))
}
return
}
var writeBarrier struct {
enabled bool // compiler emits a check of this before calling write barrier
pad [3]byte // compiler uses 32-bit load for "enabled" field
needed bool // whether we need a write barrier for current GC phase
cgo bool // whether we need a write barrier for a cgo check
alignme uint64 // guarantee alignment so that compiler can use a 32 or 64-bit load
}
var gcBlackenEnabled uint32
const (
_GCoff = iota // GC not running; sweeping in background, write barrier disabled
_GCmark // GC marking roots and workbufs: allocate black, write barrier ENABLED
_GCmarktermination // GC mark termination: allocate black, P's help GC, write barrier ENABLED
)
func ( uint32) {
atomic.Store(&gcphase, )
writeBarrier.needed = gcphase == _GCmark || gcphase == _GCmarktermination
writeBarrier.enabled = writeBarrier.needed || writeBarrier.cgo
}
type gcMarkWorkerMode int
var gcMarkWorkerModeStrings = [...]string{
"Not worker",
"GC (dedicated)",
"GC (fractional)",
"GC (idle)",
}
func ( *gcControllerState) () {
.scanWork = 0
.bgScanCredit = 0
.assistTime = 0
.dedicatedMarkTime = 0
.fractionalMarkTime = 0
.idleMarkTime = 0
:= float64(gomaxprocs) * gcBackgroundUtilization
.dedicatedMarkWorkersNeeded = int64( + 0.5)
:= float64(.dedicatedMarkWorkersNeeded)/ - 1
const = 0.3
.dedicatedMarkWorkersNeeded--
}
.fractionalUtilizationGoal = ( - float64(.dedicatedMarkWorkersNeeded)) / float64(gomaxprocs)
} else {
.fractionalUtilizationGoal = 0
}
if debug.gcstoptheworld > 0 {
.dedicatedMarkWorkersNeeded = int64(gomaxprocs)
.fractionalUtilizationGoal = 0
}
for , := range allp {
.gcAssistTime = 0
.gcFractionalMarkTime = 0
}
.revise()
if debug.gcpacertrace > 0 {
:= float64frombits(atomic.Load64(&.assistWorkPerByte))
print("pacer: assist ratio=", ,
" (scan ", memstats.heap_scan>>20, " MB in ",
work.initialHeapLive>>20, "->",
memstats.next_gc>>20, " MB)",
" workers=", .dedicatedMarkWorkersNeeded,
"+", .fractionalUtilizationGoal, "\n")
}
}
func ( *gcControllerState) () {
:= gcpercent
= int64()
}
:= -
= 1000
}
:= - int64()
= 1
}
:= float64() / float64()
:= float64() / float64()
atomic.Store64(&.assistWorkPerByte, float64bits())
atomic.Store64(&.assistBytesPerWork, float64bits())
}
func ( *gcControllerState) () float64 {
return memstats.triggerRatio
}
const = 0.5
:= gcEffectiveGrowthRatio()
:= float64(memstats.heap_live)/float64(memstats.heap_marked) - 1
:= nanotime() - .markStartTime
if > 0 {
+= float64(.assistTime) / float64(*int64(gomaxprocs))
}
:= - memstats.triggerRatio - /gcGoalUtilization*(-memstats.triggerRatio)
:= memstats.triggerRatio + *
:= memstats.heap_marked
:= memstats.triggerRatio
:= memstats.gc_trigger
:=
:= memstats.heap_live
:=
:= int64(float64() * (1 + ))
:=
:= gcGoalUtilization
:= .scanWork
print("pacer: H_m_prev=", ,
" h_t=", , " H_T=", ,
" h_a=", , " H_a=", ,
" h_g=", , " H_g=", ,
" u_a=", , " u_g=", ,
" W_a=", ,
" goalΔ=", -,
" actualΔ=", -,
" u_a/u_g=", /,
"\n")
}
return
}
if .dedicatedMarkWorkersNeeded <= 0 {
return
func ( *gcControllerState) ( *p) *g {
if gcBlackenEnabled == 0 {
throw("gcControllerState.findRunnable: blackening not enabled")
}
return nil
}
:= (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
gcBgMarkWorkerPool.push(&.node)
return nil
:= nanotime() - gcController.markStartTime
gcBgMarkWorkerPool.push(&.node)
return nil
:= .gp.ptr()
casgstatus(, _Gwaiting, _Grunnable)
if trace.enabled {
traceGoUnpark(, 0)
}
return
}
:= nanotime()
:= - gcController.markStartTime
if <= 0 {
return true
}
:= getg().m.p.ptr()
return float64()/float64() > 1.2*gcController.fractionalUtilizationGoal
}
func ( float64) {
assertWorldStoppedOrLockHeld(&mheap_.lock)
:= ^uint64(0)
if gcpercent >= 0 {
= memstats.heap_marked + memstats.heap_marked*uint64(gcpercent)/100
}
if gcpercent >= 0 {
:= 0.95 *
if > {
=
}
:= 0.6 *
if < {
=
}
= 0
}
memstats.triggerRatio =
:= heapminimum
:= atomic.Load64(&memstats.heap_live) + sweepMinHeapDistance
if > {
=
}
}
if < {
=
}
if int64() < 0 {
print("runtime: next_gc=", memstats.next_gc, " heap_marked=", memstats.heap_marked, " heap_live=", memstats.heap_live, " initialHeapLive=", work.initialHeapLive, "triggerRatio=", , " minTrigger=", , "\n")
throw("gc_trigger underflow")
}
=
}
}
memstats.gc_trigger =
atomic.Store64(&memstats.next_gc, )
if trace.enabled {
traceNextGC()
}
if gcphase != _GCoff {
gcController.revise()
}
if isSweepDone() {
mheap_.sweepPagesPerByte = 0
-= 1024 * 1024
= _PageSize
}
:= atomic.Load64(&mheap_.pagesSwept)
:= atomic.Load64(&mheap_.pagesInUse)
:= int64() - int64()
if <= 0 {
mheap_.sweepPagesPerByte = 0
} else {
mheap_.sweepPagesPerByte = float64() / float64()
atomic.Store64(&mheap_.pagesSweptBasis, )
}
}
gcPaceScavenger()
}
func () float64 {
assertWorldStoppedOrLockHeld(&mheap_.lock)
:= float64(atomic.Load64(&memstats.next_gc)-memstats.heap_marked) / float64(memstats.heap_marked)
= 0
}
return
}
const gcGoalUtilization = 0.30
const gcBackgroundUtilization = 0.25
const gcCreditSlack = 2000
const gcAssistTimeSlack = 5000
const gcOverAssistWork = 64 << 10
var work struct {
full lfstack // lock-free list of full blocks workbuf
empty lfstack // lock-free list of empty blocks workbuf
pad0 cpu.CacheLinePad // prevents false-sharing between full/empty and nproc/nwait
wbufSpans struct {
busy mSpanList
}
_ uint32
mode gcMode
userForced bool
totaltime int64
initialHeapLive uint64
cycles uint32
heap0, heap1, heap2, heapGoal uint64
}
:= atomic.Load(&work.cycles)
gcWaitOnMark()
gcStart(gcTrigger{kind: gcTriggerCycle, n: + 1})
gcWaitOnMark( + 1)
func ( uint32) {
++
}
unlock(&work.sweepWaiters.lock)
return
}
type gcMode int
const (
gcBackgroundMode gcMode = iota // concurrent GC and sweep
gcForceMode // stop-the-world GC now, concurrent sweep
gcForceBlockMode // stop-the-world GC now and STW sweep (forced by user)
)
type gcTrigger struct {
kind gcTriggerKind
now int64 // gcTriggerTime: current time
n uint32 // gcTriggerCycle: cycle number to start
}
type gcTriggerKind int
return memstats.heap_live >= memstats.gc_trigger
case gcTriggerTime:
if gcpercent < 0 {
return false
}
:= int64(atomic.Load64(&memstats.last_gc_nanotime))
return != 0 && .now- > forcegcperiod
if !.test() {
semrelease(&work.startSema)
return
}
work.userForced = .kind == gcTriggerCycle
:= gcBackgroundMode
if debug.gcstoptheworld == 1 {
= gcForceMode
} else if debug.gcstoptheworld == 2 {
= gcForceBlockMode
}
semacquire(&gcsema)
semacquire(&worldsema)
if trace.enabled {
traceGCStart()
}
for , := range allp {
if := atomic.Load(&.mcache.flushGen); != mheap_.sweepgen {
println("runtime: p", .id, "flushGen", , "!= sweepgen", mheap_.sweepgen)
throw("p mcache not flushed")
}
}
gcBgMarkStartWorkers()
systemstack(gcResetMarkState)
work.stwprocs, work.maxprocs = gomaxprocs, gomaxprocs
systemstack(func() {
finishsweep_m()
})
if != gcBackgroundMode {
schedEnableUser(false)
}
setGCPhase(_GCmark)
gcBgMarkPrepare() // Must happen before assist enable.
gcMarkRootPrepare()
atomic.Store(&gcBlackenEnabled, 1)
= acquirem()
systemstack(func() {
= startTheWorldWithSema(trace.enabled)
work.pauseNS += - work.pauseStart
work.tMark =
memstats.gcPauseDist.record( - work.pauseStart)
})
if != gcBackgroundMode {
Gosched()
}
semrelease(&work.startSema)
}
if !(gcphase == _GCmark && work.nwait == work.nproc && !gcMarkWorkAvailable(nil)) {
semrelease(&work.markDoneSema)
return
}
gcMarkDoneFlushed = 0
systemstack(func() {
if .gcw.flushedWork {
atomic.Xadd(&gcMarkDoneFlushed, 1)
.gcw.flushedWork = false
}
})
casgstatus(, _Gwaiting, _Grunning)
})
semrelease(&worldsema)
goto
}
:= nanotime()
work.tMarkTerm =
work.pauseStart =
getg().m.preemptoff = "gcing"
if trace.enabled {
traceGCSTWStart(0)
}
:= false
systemstack(func() {
for , := range allp {
wbBufFlush1()
if !.gcw.empty() {
= true
break
}
}
})
if {
getg().m.preemptoff = ""
systemstack(func() {
:= startTheWorldWithSema(true)
work.pauseNS += - work.pauseStart
memstats.gcPauseDist.record( - work.pauseStart)
})
semrelease(&worldsema)
goto
}
atomic.Store(&gcBlackenEnabled, 0)
:= gcController.endCycle()
setGCPhase(_GCmarktermination)
work.heap1 = memstats.heap_live
:= nanotime()
:= acquirem()
.preemptoff = "gcing"
:= getg()
.m.traceback = 2
:= .m.curg
casgstatus(, _Grunning, _Gwaiting)
.waitreason = waitReasonGarbageCollection
systemstack(func() {
})
systemstack(func() {
work.heap2 = work.bytesMarked
startCheckmarks()
gcResetMarkState()
:= &getg().m.p.ptr().gcw
gcDrain(, 0)
wbBufFlush1(getg().m.p.ptr())
.dispose()
endCheckmarks()
}
setGCPhase(_GCoff)
gcSweep(work.mode)
})
.m.traceback = 0
casgstatus(, _Gwaiting, _Grunning)
if trace.enabled {
traceGCDone()
}
.preemptoff = ""
if gcphase != _GCoff {
throw("gc done but gcphase != _GCoff")
}
:= nanotime()
, , := time_now()
:= *1e9 + int64()
work.pauseNS += - work.pauseStart
work.tEnd =
memstats.gcPauseDist.record( - work.pauseStart)
atomic.Store64(&memstats.last_gc_unix, uint64()) // must be Unix time to make sense to user
atomic.Store64(&memstats.last_gc_nanotime, uint64()) // monotonic time for us
memstats.pause_ns[memstats.numgc%uint32(len(memstats.pause_ns))] = uint64(work.pauseNS)
memstats.pause_end[memstats.numgc%uint32(len(memstats.pause_end))] = uint64()
memstats.pause_total_ns += uint64(work.pauseNS)
:= gcController.assistTime + gcController.dedicatedMarkTime + gcController.fractionalMarkTime
:= int64(work.stwprocs) * (work.tEnd - work.tMarkTerm)
:= + +
work.totaltime +=
:= sched.totaltime + (-sched.procresizetime)*int64(gomaxprocs)
memstats.gc_cpu_fraction = float64(work.totaltime) / float64()
sweep.nbgsweep = 0
sweep.npausesweep = 0
if work.userForced {
memstats.numforcedgc++
}
mProf_NextCycle()
systemstack(func() { startTheWorldWithSema(true) })
systemstack(func() {
forEachP(func( *p) {
.mcache.prepareForSweep()
})
})
if debug.gctrace > 0 {
:= int(memstats.gc_cpu_fraction * 100)
var [24]byte
printlock()
print("gc ", memstats.numgc,
" @", string(itoaDiv([:], uint64(work.tSweepTerm-runtimeInitTime)/1e6, 3)), "s ",
, "%: ")
:= work.tSweepTerm
for , := range []int64{work.tMark, work.tMarkTerm, work.tEnd} {
if != 0 {
print("+")
}
print(string(fmtNSAsMS([:], uint64(-))))
=
}
print(" ms clock, ")
for , := range []int64{, gcController.assistTime, gcController.dedicatedMarkTime + gcController.fractionalMarkTime, gcController.idleMarkTime, } {
print("/")
} else if != 0 {
print("+")
}
print(string(fmtNSAsMS([:], uint64())))
}
print(" ms cpu, ",
work.heap0>>20, "->", work.heap1>>20, "->", work.heap2>>20, " MB, ",
work.heapGoal>>20, " MB goal, ",
work.maxprocs, " P")
if work.userForced {
print(" (forced)")
}
print("\n")
printunlock()
}
semrelease(&worldsema)
Gosched()
}
}
for gcBgMarkWorkerCount < gomaxprocs {
go gcBgMarkWorker()
notetsleepg(&work.bgMarkReady, -1)
gcBgMarkWorkerCount++
}
}
.m.preemptoff = "GC worker init"
:= new(gcBgMarkWorkerNode)
.m.preemptoff = ""
.gp.set()
.m.set(acquirem())
releasem()
}
return true
}, unsafe.Pointer(), waitReasonGCWorkerIdle, traceEvGoBlock, 0)
.m.set(acquirem())
:= .m.p.ptr() // P can't change with preemption disabled.
if gcBlackenEnabled == 0 {
println("worker mode", .gcMarkWorkerMode)
throw("gcBgMarkWorker: blackening not enabled")
}
if .gcMarkWorkerMode == gcMarkWorkerNotWorker {
throw("gcBgMarkWorker: mode not set")
}
:= nanotime()
.gcMarkWorkerStartTime =
:= atomic.Xadd(&work.nwait, -1)
if == work.nproc {
println("runtime: work.nwait=", , "work.nproc=", work.nproc)
throw("work.nwait was > work.nproc")
}
casgstatus(, _Grunning, _Gwaiting)
switch .gcMarkWorkerMode {
default:
throw("gcBgMarkWorker: unexpected gcMarkWorkerMode")
case gcMarkWorkerDedicatedMode:
gcDrain(&.gcw, gcDrainUntilPreempt|gcDrainFlushBgCredit)
:= nanotime() -
switch .gcMarkWorkerMode {
case gcMarkWorkerDedicatedMode:
atomic.Xaddint64(&gcController.dedicatedMarkTime, )
atomic.Xaddint64(&gcController.dedicatedMarkWorkersNeeded, 1)
case gcMarkWorkerFractionalMode:
atomic.Xaddint64(&gcController.fractionalMarkTime, )
atomic.Xaddint64(&.gcFractionalMarkTime, )
case gcMarkWorkerIdleMode:
atomic.Xaddint64(&gcController.idleMarkTime, )
}
func ( int64) {
if debug.allocfreetrace > 0 {
tracegc()
}
if gcphase != _GCmarktermination {
throw("in gcMark expecting to see gcphase as _GCmarktermination")
}
work.tstart =
if work.full != 0 || work.markrootNext < work.markrootJobs {
print("runtime: full=", hex(work.full), " next=", work.markrootNext, " jobs=", work.markrootJobs, " nDataRoots=", work.nDataRoots, " nBSSRoots=", work.nBSSRoots, " nSpanRoots=", work.nSpanRoots, " nStackRoots=", work.nStackRoots, "\n")
panic("non-empty mark queue after concurrent mark")
}
gcMarkRootCheck()
}
if work.full != 0 {
throw("work.full != 0")
}
wbBufFlush1()
} else {
.wbBuf.reset()
}
:= &.gcw
if !.empty() {
printlock()
print("runtime: P ", .id, " flushedWork ", .flushedWork)
if .wbuf1 == nil {
print(" wbuf1=<nil>")
} else {
print(" wbuf1.n=", .wbuf1.nobj)
}
if .wbuf2 == nil {
print(" wbuf2=<nil>")
} else {
print(" wbuf2.n=", .wbuf2.nobj)
}
print("\n")
throw("P has cached GC work at end of mark termination")
.dispose()
}
memstats.heap_live = work.bytesMarked
memstats.heap_scan = uint64(gcController.scanWork)
if trace.enabled {
traceHeapAlloc()
}
}
func ( gcMode) {
assertWorldStopped()
if gcphase != _GCoff {
throw("gcSweep being done but phase is not GCoff")
}
lock(&mheap_.lock)
mheap_.sweepgen += 2
mheap_.sweepdone = 0
mheap_.pagesSwept = 0
mheap_.sweepArenas = mheap_.allArenas
mheap_.reclaimIndex = 0
mheap_.reclaimCredit = 0
unlock(&mheap_.lock)
sweep.centralIndex.clear()
lock(&mheap_.lock)
mheap_.sweepPagesPerByte = 0
for sweepone() != ^uintptr(0) {
sweep.npausesweep++
prepareFreeWorkbufs()
for freeSomeWbufs(false) {
mProf_NextCycle()
mProf_Flush()
return
}
lock(&allglock)
for , := range allgs {
.gcscandone = false // set to true in gcphasework
.gcAssistBytes = 0
}
unlock(&allglock)
var poolcleanup func()
func ( func()) {
poolcleanup =
}
if poolcleanup != nil {
poolcleanup()
}
return itoaDiv(, /1e6, 0)
:= / 1e3
if == 0 {
[0] = '0'
return [:1]
}
:= 3
for >= 100 {
/= 10
--
}
return itoaDiv(, , )
![]() |
The pages are generated with Golds v0.3.2-preview. (GOOS=darwin GOARCH=amd64) Golds is a Go 101 project developed by Tapir Liu. PR and bug reports are welcome and can be submitted to the issue list. Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds. |