Source File
mgcmark.go
Belonging Package
runtime
package runtime
import (
)
const (
fixedRootFinalizers = iota
fixedRootFreeGStacks
fixedRootCount
rootBlockBytes = 256 << 10
maxObletBytes = 128 << 10
drainCheckThreshold = 100000
pagesPerSpanRoot = 512
)
func () {
assertWorldStopped()
work.nFlushCacheRoots = 0
:= func( uintptr) int {
return int(divRoundUp(, rootBlockBytes))
}
work.nDataRoots = 0
work.nBSSRoots = 0
for , := range activeModules() {
:= (.edata - .data)
if > work.nDataRoots {
work.nDataRoots =
}
}
for , := range activeModules() {
:= (.ebss - .bss)
if > work.nBSSRoots {
work.nBSSRoots =
}
}
work.nStackRoots = int(atomic.Loaduintptr(&allglen))
work.markrootNext = 0
work.markrootJobs = uint32(fixedRootCount + work.nFlushCacheRoots + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots)
}
func () {
if work.markrootNext < work.markrootJobs {
print(work.markrootNext, " of ", work.markrootJobs, " markroot jobs done\n")
throw("left over markroot jobs")
}
var *g
for := 0; < work.nStackRoots; ++ {
= allgs[]
if !.gcscandone {
goto
}
}
unlock(&allglock)
return
:
println("gp", , "goid", .goid,
"status", readgstatus(),
"gcscandone", .gcscandone)
throw("scan missed a g")
}
var oneptrmask = [...]uint8{1}
:= uint32(fixedRootCount)
:= + uint32(work.nFlushCacheRoots)
:= + uint32(work.nDataRoots)
:= + uint32(work.nBSSRoots)
:= + uint32(work.nSpanRoots)
:= + uint32(work.nStackRoots)
switch {
case <= && < :
flushmcache(int( - ))
case <= && < :
for , := range activeModules() {
markrootBlock(.data, .edata-.data, .gcdatamask.bytedata, , int(-))
}
case <= && < :
for , := range activeModules() {
markrootBlock(.bss, .ebss-.bss, .gcbssmask.bytedata, , int(-))
}
case == fixedRootFinalizers:
for := allfin; != nil; = .alllink {
:= uintptr(atomic.Load(&.cnt))
scanblock(uintptr(unsafe.Pointer(&.fin[0])), *unsafe.Sizeof(.fin[0]), &finptrmask[0], , nil)
}
markrootSpans(, int(-))
:= getg().m.curg
:= == && readgstatus() == _Grunning
if {
casgstatus(, _Grunning, _Gwaiting)
.waitreason = waitReasonGarbageCollectionScan
}
:= suspendG()
if .dead {
.gcscandone = true
return
}
if .gcscandone {
throw("g already scanned")
}
scanstack(, )
.gcscandone = true
resumeG()
if {
casgstatus(, _Gwaiting, _Grunning)
}
})
}
}
throw("rootBlockBytes must be a multiple of 8*ptrSize")
}
:= uintptr() * rootBlockBytes
if >= {
return
}
:= +
:= (*uint8)(add(unsafe.Pointer(), uintptr()*(rootBlockBytes/(8*sys.PtrSize))))
:= uintptr(rootBlockBytes)
if + > {
= -
}
:= mheap_.markArenas[/(pagesPerArena/pagesPerSpanRoot)]
:= mheap_.arenas[.l1()][.l2()]
:= uint(uintptr() * pagesPerSpanRoot % pagesPerArena)
:= .pageSpecials[/8:]
= [:pagesPerSpanRoot/8]
if := .state.get(); != mSpanInUse {
print("s.state = ", , "\n")
throw("non in-use span found with specials bit set")
lock(&.speciallock)
for := .specials; != nil; = .next {
if .kind != _KindSpecialFinalizer {
continue
scanobject(, )
:= float64frombits(atomic.Load64(&gcController.assistWorkPerByte))
:= float64frombits(atomic.Load64(&gcController.assistBytesPerWork))
:= -.gcAssistBytes
:= int64( * float64())
if < gcOverAssistWork {
= gcOverAssistWork
= int64( * float64())
}
:= atomic.Loadint64(&gcController.bgScanCredit)
:= int64(0)
if > 0 {
if < {
=
.gcAssistBytes += 1 + int64(*float64())
} else {
=
.gcAssistBytes +=
}
atomic.Xaddint64(&gcController.bgScanCredit, -)
-=
if {
traceGCMarkAssistDone()
}
return
}
}
if trace.enabled && ! {
= true
traceGCMarkAssistStart()
}
systemstack(func() {
})
:= .param != nil
.param = nil
if {
gcMarkDone()
}
if !gcParkAssist() {
goto
}
}
if {
traceGCMarkAssistDone()
}
}
.gcAssistBytes = 0
return
:= float64frombits(atomic.Load64(&gcController.assistBytesPerWork))
.gcAssistBytes += 1 + int64(*float64())
.param = unsafe.Pointer()
}
:= nanotime() -
:= .m.p.ptr()
.gcAssistTime +=
if .gcAssistTime > gcAssistTimeSlack {
atomic.Xaddint64(&gcController.assistTime, .gcAssistTime)
.gcAssistTime = 0
}
}
func () {
lock(&work.assistQueue.lock)
:= work.assistQueue.q.popList()
injectglist(&)
unlock(&work.assistQueue.lock)
}
func () bool {
if atomic.Load(&gcBlackenEnabled) == 0 {
unlock(&work.assistQueue.lock)
return true
}
:= getg()
:= work.assistQueue.q
work.assistQueue.q.pushBack()
if atomic.Loadint64(&gcController.bgScanCredit) > 0 {
work.assistQueue.q =
if .tail != 0 {
.tail.ptr().schedlink.set(nil)
}
unlock(&work.assistQueue.lock)
return false
goparkunlock(&work.assistQueue.lock, waitReasonGCAssistWait, traceEvGoBlockGC, 2)
return true
}
func ( int64) {
atomic.Xaddint64(&gcController.bgScanCredit, )
return
}
:= float64frombits(atomic.Load64(&gcController.assistBytesPerWork))
:= int64(float64() * )
lock(&work.assistQueue.lock)
for !work.assistQueue.q.empty() && > 0 {
+= .gcAssistBytes
.gcAssistBytes +=
work.assistQueue.q.pushBack()
break
}
}
:= float64frombits(atomic.Load64(&gcController.assistWorkPerByte))
= int64(float64() * )
atomic.Xaddint64(&gcController.bgScanCredit, )
}
unlock(&work.assistQueue.lock)
}
func ( *g, *gcWork) {
if readgstatus()&_Gscan == 0 {
print("runtime:scanstack: gp=", , ", goid=", .goid, ", gp->atomicstatus=", hex(readgstatus()), "\n")
throw("scanstack - bad status")
}
switch readgstatus() &^ _Gscan {
default:
print("runtime: gp=", , ", goid=", .goid, ", gp->atomicstatus=", readgstatus(), "\n")
throw("mark - bad status")
case _Gdead:
return
case _Grunning:
print("runtime: gp=", , ", goid=", .goid, ", gp->atomicstatus=", readgstatus(), "\n")
throw("scanstack: goroutine not stopped")
.preemptShrink = true
}
var stackScanState
.stack = .stack
if stackTraceDebug {
println("stack trace goroutine", .goid)
}
if debugScanConservative && .asyncSafePoint {
print("scanning async preempted goroutine ", .goid, " stack [", hex(.stack.lo), ",", hex(.stack.hi), ")\n")
}
tracebackdefers(, , nil)
.buildIndex()
for {
, := .getPtr()
if == 0 {
break
}
:= .findObject()
if == nil {
continue
}
:= .typ
func ( *stkframe, *stackScanState, *gcWork) {
if _DebugGC > 1 && .continpc != 0 {
print("scanframe ", funcname(.fn), "\n")
}
:= .fn.valid() && .fn.funcID == funcID_asyncPreempt
:= .fn.valid() && .fn.funcID == funcID_debugCallV1
if .conservative || || {
if debugScanConservative {
println("conservatively scanning function", funcname(.fn), "at PC", hex(.continpc))
}
scanConservative(.argp, .arglen, nil, , )
}
.conservative = true
.conservative = false
}
return
}
, , := getStackMap(, &.cache, false)
continue
}
if stackTraceDebug {
println("stkobj at", hex(), "of type", .typ.string())
}
.addObject(, .typ)
}
}
}
type gcDrainFlags int
const (
gcDrainUntilPreempt gcDrainFlags = 1 << iota
gcDrainFlushBgCredit
gcDrainIdle
gcDrainFractional
)
func ( *gcWork, gcDrainFlags) {
if !writeBarrier.needed {
throw("gcDrain phase incorrect")
}
:= getg().m.curg
:= &gcDrainUntilPreempt != 0
:= &gcDrainFlushBgCredit != 0
:= &gcDrainIdle != 0
:= .scanWork
:= int64(1<<63 - 1)
var func() bool
if &(gcDrainIdle|gcDrainFractional) != 0 {
= + drainCheckThreshold
if {
= pollWork
} else if &gcDrainFractional != 0 {
= pollFractionalWorkerExit
}
}
if work.full == 0 {
.balance()
}
:= .tryGetFast()
if == 0 {
= .tryGet()
wbBufFlush(nil, 0)
= .tryGet()
}
}
break
}
scanobject(, )
if .scanWork >= gcCreditSlack {
atomic.Xaddint64(&gcController.scanWork, .scanWork)
if {
gcFlushBgCredit(.scanWork - )
= 0
}
-= .scanWork
.scanWork = 0
if <= 0 {
+= drainCheckThreshold
if != nil && () {
break
}
}
}
}
if .scanWork > 0 {
atomic.Xaddint64(&gcController.scanWork, .scanWork)
if {
gcFlushBgCredit(.scanWork - )
}
.scanWork = 0
}
}
:= .tryGetFast()
if == 0 {
= .tryGet()
wbBufFlush(nil, 0)
= .tryGet()
}
}
if work.markrootNext < work.markrootJobs {
:= atomic.Xadd(&work.markrootNext, +1) - 1
if < work.markrootJobs {
markroot(, )
continue
}
break
}
scanobject(, )
if .scanWork >= gcCreditSlack {
atomic.Xaddint64(&gcController.scanWork, .scanWork)
+= .scanWork
.scanWork = 0
}
}
return + .scanWork
}
:=
:=
:= heapBitsForAddr()
:= spanOfUnchecked()
:= .elemsize
if == 0 {
throw("scanobject n == 0")
}
.bytesMarked += uint64()
return
}
for := + maxObletBytes; < .base()+.elemsize; += maxObletBytes {
if !.putFast() {
.put()
}
}
}
= .base() + .elemsize -
if > maxObletBytes {
= maxObletBytes
}
}
var uintptr
= .next()
:= .bits()
if &bitScan == 0 {
break // no more pointers in this object
}
if &bitPointer == 0 {
continue // not a pointer
}
if , , := findObject(, , ); != 0 {
greyobject(, , , , , )
}
}
}
.bytesMarked += uint64()
.scanWork += int64()
}
func (, uintptr, *uint8, *gcWork, *stackScanState) {
if debugScanConservative {
printlock()
print("conservatively scanning [", hex(), ",", hex(+), ")\n")
hexdumpWords(, +, func( uintptr) byte {
if != nil {
:= ( - ) / sys.PtrSize
:= *addb(, /8)
if (>>(%8))&1 == 0 {
return '$'
}
}
:= *(*uintptr)(unsafe.Pointer())
if != nil && .stack.lo <= && < .stack.hi {
return '@'
}
:= spanOfHeap()
if == nil {
return ' '
}
:= .objIndex()
if .isFree() {
return ' '
}
return '*'
})
printunlock()
}
for := uintptr(0); < ; += sys.PtrSize {
if != nil {
:= / sys.PtrSize
:= *addb(, /8)
:= spanOfHeap()
if == nil {
continue
}
:= .base() + *.elemsize
greyobject(, , , , , )
}
}
func ( uintptr) {
if , , := findObject(, 0, 0); != 0 {
:= &getg().m.p.ptr().gcw
greyobject(, 0, 0, , , )
}
}
if &(sys.PtrSize-1) != 0 {
throw("greyobject: obj not pointer-aligned")
}
:= .markBitsForIndex()
if useCheckmark {
return
}
} else {
if debug.gccheckmark > 0 && .isFree() {
print("runtime: marking free object ", hex(), " found at *(", hex(), "+", hex(), ")\n")
gcDumpObject("base", , )
gcDumpObject("obj", , ^uintptr(0))
getg().m.traceback = 2
throw("marking free object")
}
if .spanclass.noscan() {
.bytesMarked += uint64(.elemsize)
return
}
}
func ( string, , uintptr) {
:= spanOf()
print(, "=", hex())
if == nil {
print(" s=nil\n")
return
}
print(" s.base()=", hex(.base()), " s.limit=", hex(.limit), " s.spanclass=", .spanclass, " s.elemsize=", .elemsize, " s.state=")
if := .state.get(); 0 <= && int() < len(mSpanStateNames) {
print(mSpanStateNames[], "\n")
} else {
print("unknown(", , ")\n")
}
:= false
:= .elemsize
func ( *mspan, , , uintptr) {
if useCheckmark { // The world should be stopped so this should not happen.
throw("gcmarknewobject called while doing checkmark")
}
:= .objIndex()
.markBitsForIndex().setMarked()
func () {
assertWorldStopped()
for , := range allp {
:= .mcache
if == nil || .tiny == 0 {
continue
}
, , := findObject(.tiny, 0, 0)
:= &.gcw
greyobject(.tiny, 0, 0, , , )
}
![]() |
The pages are generated with Golds v0.3.2-preview. (GOOS=darwin GOARCH=amd64) Golds is a Go 101 project developed by Tapir Liu. PR and bug reports are welcome and can be submitted to the issue list. Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds. |