Copyright 2009 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
Garbage collector: marking and scanning

package runtime

import (
	
	
	
)

const (
	fixedRootFinalizers = iota
	fixedRootFreeGStacks
	fixedRootCount
rootBlockBytes is the number of bytes to scan per data or BSS root.
	rootBlockBytes = 256 << 10
maxObletBytes is the maximum bytes of an object to scan at once. Larger objects will be split up into "oblets" of at most this size. Since we can scan 1–2 MB/ms, 128 KB bounds scan preemption at ~100 µs. This must be > _MaxSmallSize so that the object base is the span base.
	maxObletBytes = 128 << 10
drainCheckThreshold specifies how many units of work to do between self-preemption checks in gcDrain. Assuming a scan rate of 1 MB/ms, this is ~100 µs. Lower values have higher overhead in the scan loop (the scheduler check may perform a syscall, so its overhead is nontrivial). Higher values make the system less responsive to incoming work.
pagesPerSpanRoot indicates how many pages to scan from a span root at a time. Used by special root marking. Higher values improve throughput by increasing locality, but increase the minimum latency of a marking operation. Must be a multiple of the pageInUse bitmap element size and must also evenly divide pagesPerArena.
gcMarkRootPrepare queues root scanning jobs (stacks, globals, and some miscellany) and initializes scanning-related state. The world must be stopped.
Compute how many data and BSS root blocks there are.
	 := func( uintptr) int {
		return int(divRoundUp(, rootBlockBytes))
	}

	work.nDataRoots = 0
	work.nBSSRoots = 0
Scan globals.
	for ,  := range activeModules() {
		 := (.edata - .data)
		if  > work.nDataRoots {
			work.nDataRoots = 
		}
	}

	for ,  := range activeModules() {
		 := (.ebss - .bss)
		if  > work.nBSSRoots {
			work.nBSSRoots = 
		}
	}
Scan span roots for finalizer specials. We depend on addfinalizer to mark objects that get finalizers after root marking. We're going to scan the whole heap (that was available at the time the mark phase started, i.e. markArenas) for in-use spans which have specials. Break up the work into arenas, and further into chunks. Snapshot allArenas as markArenas. This snapshot is safe because allArenas is append-only.
Scan stacks. Gs may be created after this point, but it's okay that we ignore them because they begin life without any roots, so there's nothing to scan, and any roots they create during the concurrent phase will be caught by the write barrier.
gcMarkRootCheck checks that all roots have been scanned. It is purely for debugging.
func () {
	if work.markrootNext < work.markrootJobs {
		print(work.markrootNext, " of ", work.markrootJobs, " markroot jobs done\n")
		throw("left over markroot jobs")
	}

Check that stacks have been scanned.
	var  *g
	for  := 0;  < work.nStackRoots; ++ {
		 = allgs[]
		if !.gcscandone {
			goto 
		}
	}
	unlock(&allglock)
	return

:
	println("gp", , "goid", .goid,
		"status", readgstatus(),
		"gcscandone", .gcscandone)
	throw("scan missed a g")
}
ptrmask for an allocation containing a single pointer.
var oneptrmask = [...]uint8{1}
markroot scans the i'th root. Preemption must be disabled (because this uses a gcWork). nowritebarrier is only advisory here.go:nowritebarrier
TODO(austin): This is a bit ridiculous. Compute and store the bases in gcMarkRootPrepare instead of the counts.
	 := uint32(fixedRootCount)
	 :=  + uint32(work.nFlushCacheRoots)
	 :=  + uint32(work.nDataRoots)
	 :=  + uint32(work.nBSSRoots)
	 :=  + uint32(work.nSpanRoots)
	 :=  + uint32(work.nStackRoots)
Note: if you add a case here, please also update heapdump.go:dumproots.
	switch {
	case  <=  &&  < :
		flushmcache(int( - ))

	case  <=  &&  < :
		for ,  := range activeModules() {
			markrootBlock(.data, .edata-.data, .gcdatamask.bytedata, , int(-))
		}

	case  <=  &&  < :
		for ,  := range activeModules() {
			markrootBlock(.bss, .ebss-.bss, .gcbssmask.bytedata, , int(-))
		}

	case  == fixedRootFinalizers:
		for  := allfin;  != nil;  = .alllink {
			 := uintptr(atomic.Load(&.cnt))
			scanblock(uintptr(unsafe.Pointer(&.fin[0])), *unsafe.Sizeof(.fin[0]), &finptrmask[0], , nil)
		}

Switch to the system stack so we can call stackfree.
mark mspan.specials
		markrootSpans(, int(-))

the rest is scanning goroutine stacks
		var  *g
		if  <=  &&  <  {
			 = allgs[-]
		} else {
			throw("markroot: bad index")
		}
remember when we've first observed the G blocked needed only to output in traceback
		 := readgstatus() // We are not in a scan state
		if ( == _Gwaiting ||  == _Gsyscall) && .waitsince == 0 {
			.waitsince = work.tstart
		}
scanstack must be done on the system stack in case we're trying to scan our own stack.
If this is a self-scan, put the user G in _Gwaiting to prevent self-deadlock. It may already be in _Gwaiting if this is a mark worker or we're in mark termination.
			 := getg().m.curg
			 :=  ==  && readgstatus() == _Grunning
			if  {
				casgstatus(, _Grunning, _Gwaiting)
				.waitreason = waitReasonGarbageCollectionScan
			}
TODO: suspendG blocks (and spins) until gp stops, which may take a while for running goroutines. Consider doing this in two phases where the first is non-blocking: we scan the stacks we can and ask running goroutines to scan themselves; and the second blocks.
			 := suspendG()
			if .dead {
				.gcscandone = true
				return
			}
			if .gcscandone {
				throw("g already scanned")
			}
			scanstack(, )
			.gcscandone = true
			resumeG()

			if  {
				casgstatus(, _Gwaiting, _Grunning)
			}
		})
	}
}
markrootBlock scans the shard'th shard of the block of memory [b0, b0+n0), with the given pointer mask.go:nowritebarrier
func (,  uintptr,  *uint8,  *gcWork,  int) {
This is necessary to pick byte offsets in ptrmask0.
		throw("rootBlockBytes must be a multiple of 8*ptrSize")
	}
Note that if b0 is toward the end of the address space, then b0 + rootBlockBytes might wrap around. These tests are written to avoid any possible overflow.
	 := uintptr() * rootBlockBytes
	if  >=  {
		return
	}
	 :=  + 
	 := (*uint8)(add(unsafe.Pointer(), uintptr()*(rootBlockBytes/(8*sys.PtrSize))))
	 := uintptr(rootBlockBytes)
	if + >  {
		 =  - 
	}
Scan this shard.
	scanblock(, , , , nil)
}
markrootFreeGStacks frees stacks of dead Gs. This does not free stacks of dead Gs cached on Ps, but having a few cached stacks around isn't a problem.
Take list of dead Gs with stacks.
	lock(&sched.gFree.lock)
	 := sched.gFree.stack
	sched.gFree.stack = gList{}
	unlock(&sched.gFree.lock)
	if .empty() {
		return
	}
Free stacks.
	 := gQueue{.head, .head}
	for  := .head.ptr();  != nil;  = .schedlink.ptr() {
		stackfree(.stack)
		.stack.lo = 0
Manipulate the queue directly since the Gs are already all linked the right way.
		.tail.set()
	}
Put Gs back on the free list.
markrootSpans marks roots for one shard of markArenas.go:nowritebarrier
Objects with finalizers have two GC-related invariants: 1) Everything reachable from the object must be marked. This ensures that when we pass the object to its finalizer, everything the finalizer can reach will be retained. 2) Finalizer specials (which are not in the garbage collected heap) are roots. In practice, this means the fn field must be scanned.
Find the arena and page index into that arena for this shard.
	 := mheap_.markArenas[/(pagesPerArena/pagesPerSpanRoot)]
	 := mheap_.arenas[.l1()][.l2()]
	 := uint(uintptr() * pagesPerSpanRoot % pagesPerArena)
Construct slice of bitmap which we'll iterate over.
	 := .pageSpecials[/8:]
	 = [:pagesPerSpanRoot/8]
Find set bits, which correspond to spans with specials.
		 := atomic.Load8(&[])
		if  == 0 {
			continue
		}
		for  := uint(0);  < 8; ++ {
			if &(1<<) == 0 {
				continue
Find the span for this bit. This value is guaranteed to be non-nil because having specials implies that the span is in-use, and since we're currently marking we can be sure that we don't have to worry about the span being freed and re-used.
			 := .spans[+uint()*8+]
The state must be mSpanInUse if the specials bit is set, so sanity check that.
			if  := .state.get();  != mSpanInUse {
				print("s.state = ", , "\n")
				throw("non in-use span found with specials bit set")
Check that this span was swept (it may be cached or uncached).
sweepgen was updated (+2) during non-checkmark GC pass
				print("sweep ", .sweepgen, " ", , "\n")
				throw("gc: unswept span")
			}
Lock the specials to prevent a special from being removed from the list while we're traversing it.
			lock(&.speciallock)
			for  := .specials;  != nil;  = .next {
				if .kind != _KindSpecialFinalizer {
					continue
don't mark finalized object, but scan it so we retain everything it points to.
A finalizer can be set for an inner byte of an object, find object beginning.
				 := .base() + uintptr(.special.offset)/.elemsize*.elemsize
Mark everything that can be reached from the object (but *not* the object itself or we'll never collect it).
				scanobject(, )
The special itself is a root.
gcAssistAlloc performs GC work to make gp's assist debt positive. gp must be the calling user gorountine. This must be called with preemption enabled.
Don't assist in non-preemptible contexts. These are generally fragile and won't allow the assist to block.
	if getg() == .m.g0 {
		return
	}
	if  := getg().m; .locks > 0 || .preemptoff != "" {
		return
	}

	 := false
Compute the amount of scan work we need to do to make the balance positive. When the required amount of work is low, we over-assist to build up credit for future allocations and amortize the cost of assisting.
	 := float64frombits(atomic.Load64(&gcController.assistWorkPerByte))
	 := float64frombits(atomic.Load64(&gcController.assistBytesPerWork))
	 := -.gcAssistBytes
	 := int64( * float64())
	if  < gcOverAssistWork {
		 = gcOverAssistWork
		 = int64( * float64())
	}
Steal as much credit as we can from the background GC's scan credit. This is racy and may drop the background credit below 0 if two mutators steal at the same time. This will just cause steals to fail until credit is accumulated again, so in the long run it doesn't really matter, but we do have to handle the negative credit case.
	 := atomic.Loadint64(&gcController.bgScanCredit)
	 := int64(0)
	if  > 0 {
		if  <  {
			 = 
			.gcAssistBytes += 1 + int64(*float64())
		} else {
			 = 
			.gcAssistBytes += 
		}
		atomic.Xaddint64(&gcController.bgScanCredit, -)

		 -= 

We were able to steal all of the credit we needed.
			if  {
				traceGCMarkAssistDone()
			}
			return
		}
	}

	if trace.enabled && ! {
		 = true
		traceGCMarkAssistStart()
	}
Perform assist work
	systemstack(func() {
The user stack may have moved, so this can't touch anything on it until it returns from systemstack.
	})

	 := .param != nil
	.param = nil
	if  {
		gcMarkDone()
	}

We were unable steal enough credit or perform enough work to pay off the assist debt. We need to do one of these before letting the mutator allocate more to prevent over-allocation. If this is because we were preempted, reschedule and try some more.
		if .preempt {
			Gosched()
			goto 
		}
Add this G to an assist queue and park. When the GC has more background credit, it will satisfy queued assists before flushing to the global credit pool. Note that this does *not* get woken up when more work is added to the work list. The theory is that there wasn't enough work to do anyway, so we might as well let background marking take care of the work that is available.
		if !gcParkAssist() {
			goto 
		}
At this point either background GC has satisfied this G's assist debt, or the GC cycle is over.
	}
	if  {
		traceGCMarkAssistDone()
	}
}
gcAssistAlloc1 is the part of gcAssistAlloc that runs on the system stack. This is a separate function to make it easier to see that we're not capturing anything from the user stack, since the user stack may move while we're in this function. gcAssistAlloc1 indicates whether this assist completed the mark phase by setting gp.param to non-nil. This can't be communicated on the stack since it may move.go:systemstack
Clear the flag indicating that this assist completed the mark phase.
	.param = nil

The gcBlackenEnabled check in malloc races with the store that clears it but an atomic check in every malloc would be a performance hit. Instead we recheck it here on the non-preemptable system stack to determine if we should perform an assist.
GC is done, so ignore any remaining debt.
		.gcAssistBytes = 0
		return
Track time spent in this assist. Since we're on the system stack, this is non-preemptible, so we can just measure start and end time.
	 := nanotime()

	 := atomic.Xadd(&work.nwait, -1)
	if  == work.nproc {
		println("runtime: work.nwait =", , "work.nproc=", work.nproc)
		throw("nwait > work.nprocs")
	}
gcDrainN requires the caller to be preemptible.
drain own cached work first in the hopes that it will be more cache friendly.
	 := &getg().m.p.ptr().gcw
	 := gcDrainN(, )

	casgstatus(, _Gwaiting, _Grunning)
Record that we did this much scan work. Back out the number of bytes of assist credit that this scan work counts for. The "1+" is a poor man's round-up, to ensure this adds credit even if assistBytesPerWork is very low.
	 := float64frombits(atomic.Load64(&gcController.assistBytesPerWork))
	.gcAssistBytes += 1 + int64(*float64())
If this is the last worker and we ran out of work, signal a completion point.
	 := atomic.Xadd(&work.nwait, +1)
	if  > work.nproc {
		println("runtime: work.nwait=", ,
			"work.nproc=", work.nproc)
		throw("work.nwait > work.nproc")
	}

This has reached a background completion point. Set gp.param to a non-nil value to indicate this. It doesn't matter what we set it to (it just has to be a valid pointer).
		.param = unsafe.Pointer()
	}
	 := nanotime() - 
	 := .m.p.ptr()
	.gcAssistTime += 
	if .gcAssistTime > gcAssistTimeSlack {
		atomic.Xaddint64(&gcController.assistTime, .gcAssistTime)
		.gcAssistTime = 0
	}
}
gcWakeAllAssists wakes all currently blocked assists. This is used at the end of a GC cycle. gcBlackenEnabled must be false to prevent new assists from going to sleep after this point.
gcParkAssist puts the current goroutine on the assist queue and parks. gcParkAssist reports whether the assist is now satisfied. If it returns false, the caller must retry the assist.go:nowritebarrier
func () bool {
If the GC cycle finished while we were getting the lock, exit the assist. The cycle can't finish while we hold the lock.
Recheck for background credit now that this G is in the queue, but can still back out. This avoids a race in case background marking has flushed more credit since we checked above.
	if atomic.Loadint64(&gcController.bgScanCredit) > 0 {
		work.assistQueue.q = 
		if .tail != 0 {
			.tail.ptr().schedlink.set(nil)
		}
		unlock(&work.assistQueue.lock)
		return false
gcFlushBgCredit flushes scanWork units of background scan work credit. This first satisfies blocked assists on the work.assistQueue and then flushes any remaining credit to gcController.bgScanCredit. Write barriers are disallowed because this is used by gcDrain after it has ensured that all work is drained and this must preserve that condition.go:nowritebarrierrec
func ( int64) {
Fast path; there are no blocked assists. There's a small window here where an assist may add itself to the blocked queue and park. If that happens, we'll just get it on the next flush.
		atomic.Xaddint64(&gcController.bgScanCredit, )
		return
	}

	 := float64frombits(atomic.Load64(&gcController.assistBytesPerWork))
	 := int64(float64() * )

	lock(&work.assistQueue.lock)
	for !work.assistQueue.q.empty() &&  > 0 {
Note that gp.gcAssistBytes is negative because gp is in debt. Think carefully about the signs below.
Satisfy this entire assist debt.
			 += .gcAssistBytes
It's important that we *not* put gp in runnext. Otherwise, it's possible for user code to exploit the GC worker's high scheduler priority to get itself always run before other goroutines and always in the fresh quantum started by GC.
			ready(, 0, false)
Partially satisfy this assist.
			.gcAssistBytes += 
As a heuristic, we move this assist to the back of the queue so that large assists can't clog up the assist queue and substantially delay small assists.
			work.assistQueue.q.pushBack()
			break
		}
	}

Convert from scan bytes back to work.
		 := float64frombits(atomic.Load64(&gcController.assistWorkPerByte))
		 = int64(float64() * )
		atomic.Xaddint64(&gcController.bgScanCredit, )
	}
	unlock(&work.assistQueue.lock)
}
scanstack scans gp's stack, greying all pointers found on the stack. scanstack will also shrink the stack if it is safe to do so. If it is not, it schedules a stack shrink for the next synchronous safe point. scanstack is marked go:systemstack because it must not be preempted while using a workbuf.go:nowritebarriergo:systemstack
func ( *g,  *gcWork) {
	if readgstatus()&_Gscan == 0 {
		print("runtime:scanstack: gp=", , ", goid=", .goid, ", gp->atomicstatus=", hex(readgstatus()), "\n")
		throw("scanstack - bad status")
	}

	switch readgstatus() &^ _Gscan {
	default:
		print("runtime: gp=", , ", goid=", .goid, ", gp->atomicstatus=", readgstatus(), "\n")
		throw("mark - bad status")
	case _Gdead:
		return
	case _Grunning:
		print("runtime: gp=", , ", goid=", .goid, ", gp->atomicstatus=", readgstatus(), "\n")
		throw("scanstack: goroutine not stopped")
ok
	}

	if  == getg() {
		throw("can't scan our own stack")
	}

Shrink the stack if not much of it is being used.
Otherwise, shrink the stack at the next sync safe point.
		.preemptShrink = true
	}

	var  stackScanState
	.stack = .stack

	if stackTraceDebug {
		println("stack trace goroutine", .goid)
	}

	if debugScanConservative && .asyncSafePoint {
		print("scanning async preempted goroutine ", .goid, " stack [", hex(.stack.lo), ",", hex(.stack.hi), ")\n")
	}
Scan the saved context register. This is effectively a live register that gets moved back and forth between the register and sched.ctxt without a write barrier.
	if .sched.ctxt != nil {
		scanblock(uintptr(unsafe.Pointer(&.sched.ctxt)), sys.PtrSize, &oneptrmask[0], , &)
	}
Scan the stack. Accumulate a list of stack objects.
	 := func( *stkframe,  unsafe.Pointer) bool {
		scanframeworker(, &, )
		return true
	}
	gentraceback(^uintptr(0), ^uintptr(0), 0, , 0, nil, 0x7fffffff, , nil, 0)
Find additional pointers that point into the stack from the heap. Currently this includes defers and panics. See also function copystack.
Find and trace all defer arguments.
	tracebackdefers(, , nil)
Find and trace other pointers in defer records.
	for  := ._defer;  != nil;  = .link {
tracebackdefers above does not scan the func value, which could be a stack allocated closure. See issue 30453.
			scanblock(uintptr(unsafe.Pointer(&.fn)), sys.PtrSize, &oneptrmask[0], , &)
		}
The link field of a stack-allocated defer record might point to a heap-allocated defer record. Keep that heap record live.
Retain defers records themselves. Defer records might not be reachable from the G through regular heap tracing because the defer linked list might weave between the stack and the heap.
		if .heap {
			scanblock(uintptr(unsafe.Pointer(&)), sys.PtrSize, &oneptrmask[0], , &)
		}
	}
Panics are always stack allocated.
Find and scan all reachable stack objects. The state's pointer queue prioritizes precise pointers over conservative pointers so that we'll prefer scanning stack objects precisely.
	.buildIndex()
	for {
		,  := .getPtr()
		if  == 0 {
			break
		}
		 := .findObject()
		if  == nil {
			continue
		}
		 := .typ
We've already scanned this object.
			continue
		}
		.setType(nil) // Don't scan it again.
		if stackTraceDebug {
			printlock()
			print("  live stkobj at", hex(.stack.lo+uintptr(.off)), "of type", .string())
			if  {
				print(" (conservative)")
			}
			println()
			printunlock()
		}
		 := .gcdata
		var  *mspan
This path is pretty unlikely, an object large enough to have a GC program allocated on the stack. We need some space to unpack the program into a straight bitmask, which we allocate/free here. TODO: it would be nice if there were a way to run a GC program without having to store all its bits. We'd have to change from a Lempel-Ziv style program to something else. Or we can forbid putting objects on stacks if they require a gc program (see issue 27447).
			 = materializeGCProg(.ptrdata, )
			 = (*byte)(unsafe.Pointer(.startAddr))
		}

		 := .stack.lo + uintptr(.off)
		if  {
			scanConservative(, .ptrdata, , , &)
		} else {
			scanblock(, .ptrdata, , , &)
		}

		if  != nil {
			dematerializeGCProg()
		}
	}
Deallocate object buffers. (Pointer buffers were all deallocated in the loop above.)
	for .head != nil {
		 := .head
		.head = .next
		if stackTraceDebug {
			for  := 0;  < .nobj; ++ {
				 := &.obj[]
				if .typ == nil { // reachable
					continue
				}
Note: not necessarily really dead - only reachable-from-ptr dead.
			}
		}
		.nobj = 0
		putempty((*workbuf)(unsafe.Pointer()))
	}
	if .buf != nil || .cbuf != nil || .freeBuf != nil {
		throw("remaining pointer buffers")
	}
}
Scan a stack frame: local variables and function arguments/results.go:nowritebarrier
func ( *stkframe,  *stackScanState,  *gcWork) {
	if _DebugGC > 1 && .continpc != 0 {
		print("scanframe ", funcname(.fn), "\n")
	}

	 := .fn.valid() && .fn.funcID == funcID_asyncPreempt
	 := .fn.valid() && .fn.funcID == funcID_debugCallV1
	if .conservative ||  ||  {
		if debugScanConservative {
			println("conservatively scanning function", funcname(.fn), "at PC", hex(.continpc))
		}
Conservatively scan the frame. Unlike the precise case, this includes the outgoing argument space since we may have stopped while this function was setting up a call. TODO: We could narrow this down if the compiler produced a single map per function of stack slots and registers that ever contain a pointer.
		if .varp != 0 {
			 := .varp - .sp
			if  > 0 {
				scanConservative(.sp, , nil, , )
			}
		}
Scan arguments to this frame.
TODO: We could pass the entry argument map to narrow this down further.
			scanConservative(.argp, .arglen, nil, , )
		}

This function's frame contained the registers for the asynchronously stopped parent frame. Scan the parent conservatively.
			.conservative = true
We only wanted to scan those two frames conservatively. Clear the flag for future frames.
			.conservative = false
		}
		return
	}

	, ,  := getStackMap(, &.cache, false)
Scan local variables if stack frame has been allocated.
	if .n > 0 {
		 := uintptr(.n) * sys.PtrSize
		scanblock(.varp-, , .bytedata, , )
	}
Scan arguments.
	if .n > 0 {
		scanblock(.argp, uintptr(.n)*sys.PtrSize, .bytedata, , )
	}
Add all stack objects to the stack object list.
varp is 0 for defers, where there are no locals. In that case, there can't be a pointer to its args, either. (And all args would be scanned above anyway.)
		for ,  := range  {
			 := .off
			 := .varp // locals base pointer
			if  >= 0 {
				 = .argp // arguments and return values base pointer
			}
			 :=  + uintptr()
object hasn't been allocated in the frame yet.
				continue
			}
			if stackTraceDebug {
				println("stkobj at", hex(), "of type", .typ.string())
			}
			.addObject(, .typ)
		}
	}
}

type gcDrainFlags int

const (
	gcDrainUntilPreempt gcDrainFlags = 1 << iota
	gcDrainFlushBgCredit
	gcDrainIdle
	gcDrainFractional
)
gcDrain scans roots and objects in work buffers, blackening grey objects until it is unable to get more work. It may return before GC is done; it's the caller's responsibility to balance work from other Ps. If flags&gcDrainUntilPreempt != 0, gcDrain returns when g.preempt is set. If flags&gcDrainIdle != 0, gcDrain returns when there is other work to do. If flags&gcDrainFractional != 0, gcDrain self-preempts when pollFractionalWorkerExit() returns true. This implies gcDrainNoBlock. If flags&gcDrainFlushBgCredit != 0, gcDrain flushes scan work credit to gcController.bgScanCredit every gcCreditSlack units of scan work. gcDrain will always return if there is a pending STW.go:nowritebarrier
func ( *gcWork,  gcDrainFlags) {
	if !writeBarrier.needed {
		throw("gcDrain phase incorrect")
	}

	 := getg().m.curg
	 := &gcDrainUntilPreempt != 0
	 := &gcDrainFlushBgCredit != 0
	 := &gcDrainIdle != 0

	 := .scanWork
checkWork is the scan work before performing the next self-preempt check.
	 := int64(1<<63 - 1)
	var  func() bool
	if &(gcDrainIdle|gcDrainFractional) != 0 {
		 =  + drainCheckThreshold
		if  {
			 = pollWork
		} else if &gcDrainFractional != 0 {
			 = pollFractionalWorkerExit
		}
	}
Drain root marking jobs.
Stop if we're preemptible or if someone wants to STW.
		for !(.preempt && ( || atomic.Load(&sched.gcwaiting) != 0)) {
			 := atomic.Xadd(&work.markrootNext, +1) - 1
			if  >= work.markrootJobs {
				break
			}
			markroot(, )
			if  != nil && () {
				goto 
			}
		}
	}
Drain heap marking jobs. Stop if we're preemptible or if someone wants to STW.
Try to keep work available on the global queue. We used to check if there were waiting workers, but it's better to just keep work available than to make workers wait. In the worst case, we'll do O(log(_WorkbufSize)) unnecessary balances.
		if work.full == 0 {
			.balance()
		}

		 := .tryGetFast()
		if  == 0 {
			 = .tryGet()
Flush the write barrier buffer; this may create more work.
				wbBufFlush(nil, 0)
				 = .tryGet()
			}
		}
Unable to get work.
			break
		}
		scanobject(, )
Flush background scan work credit to the global account if we've accumulated enough locally so mutator assists can draw on it.
		if .scanWork >= gcCreditSlack {
			atomic.Xaddint64(&gcController.scanWork, .scanWork)
			if  {
				gcFlushBgCredit(.scanWork - )
				 = 0
			}
			 -= .scanWork
			.scanWork = 0

			if  <= 0 {
				 += drainCheckThreshold
				if  != nil && () {
					break
				}
			}
		}
	}

Flush remaining scan work credit.
	if .scanWork > 0 {
		atomic.Xaddint64(&gcController.scanWork, .scanWork)
		if  {
			gcFlushBgCredit(.scanWork - )
		}
		.scanWork = 0
	}
}
gcDrainN blackens grey objects until it has performed roughly scanWork units of scan work or the G is preempted. This is best-effort, so it may perform less work if it fails to get a work buffer. Otherwise, it will perform at least n units of work, but may perform more because scanning is always done in whole object increments. It returns the amount of scan work performed. The caller goroutine must be in a preemptible state (e.g., _Gwaiting) to prevent deadlocks during stack scanning. As a consequence, this must be called on the system stack.go:nowritebarriergo:systemstack
func ( *gcWork,  int64) int64 {
	if !writeBarrier.needed {
		throw("gcDrainN phase incorrect")
	}
There may already be scan work on the gcw, which we don't want to claim was done by this call.
	 := -.scanWork

	 := getg().m.curg
See gcDrain comment.
		if work.full == 0 {
			.balance()
		}
This might be a good place to add prefetch code... if(wbuf.nobj > 4) { PREFETCH(wbuf->obj[wbuf.nobj - 3]; }
		 := .tryGetFast()
		if  == 0 {
			 = .tryGet()
Flush the write barrier buffer; this may create more work.
				wbBufFlush(nil, 0)
				 = .tryGet()
			}
		}

Try to do a root job. TODO: Assists should get credit for this work.
			if work.markrootNext < work.markrootJobs {
				 := atomic.Xadd(&work.markrootNext, +1) - 1
				if  < work.markrootJobs {
					markroot(, )
					continue
				}
No heap or root jobs.
			break
		}
		scanobject(, )
Flush background scan work credit.
		if .scanWork >= gcCreditSlack {
			atomic.Xaddint64(&gcController.scanWork, .scanWork)
			 += .scanWork
			.scanWork = 0
		}
	}
Unlike gcDrain, there's no need to flush remaining work here because this never flushes to bgScanCredit and gcw.dispose will flush any remaining work to scanWork.

	return  + .scanWork
}
scanblock scans b as scanobject would, but using an explicit pointer bitmap instead of the heap bitmap. This is used to scan non-heap roots, so it does not update gcw.bytesMarked or gcw.scanWork. If stk != nil, possible stack pointers are also reported to stk.putPtr.go:nowritebarrier
Use local copies of original parameters, so that a stack trace due to one of the throws below shows the original block base and extent.
	 := 
	 := 

Find bits for the next word.
		 := uint32(*addb(, /(sys.PtrSize*8)))
		if  == 0 {
			 += sys.PtrSize * 8
			continue
		}
		for  := 0;  < 8 &&  < ; ++ {
Same work as in scanobject; see comments there.
				 := *(*uintptr)(unsafe.Pointer( + ))
				if  != 0 {
					if , ,  := findObject(, , );  != 0 {
						greyobject(, , , , , )
					} else if  != nil &&  >= .stack.lo &&  < .stack.hi {
						.putPtr(, false)
					}
				}
			}
			 >>= 1
			 += sys.PtrSize
		}
	}
}
scanobject scans the object starting at b, adding pointers to gcw. b must point to the beginning of a heap object or an oblet. scanobject consults the GC bitmap for the pointer mask and the spans for the size of the object.go:nowritebarrier
Find the bits for b and the size of the object at b. b is either the beginning of an object, in which case this is the size of the object to scan, or it points to an oblet, in which case we compute the size to scan below.
	 := heapBitsForAddr()
	 := spanOfUnchecked()
	 := .elemsize
	if  == 0 {
		throw("scanobject n == 0")
	}

Large object. Break into oblets for better parallelism and lower latency.
It's possible this is a noscan object (not from greyobject, but from other code paths), in which case we must *not* enqueue oblets since their bitmaps will be uninitialized.
Bypass the whole scan.
				.bytesMarked += uint64()
				return
			}
Enqueue the other oblets to scan later. Some oblets may be in b's scalar tail, but these will be marked as "no more pointers", so we'll drop out immediately when we go to scan those.
			for  :=  + maxObletBytes;  < .base()+.elemsize;  += maxObletBytes {
				if !.putFast() {
					.put()
				}
			}
		}
Compute the size of the oblet. Since this object must be a large object, s.base() is the beginning of the object.
		 = .base() + .elemsize - 
		if  > maxObletBytes {
			 = maxObletBytes
		}
	}

	var  uintptr
Find bits for this word.
Avoid needless hbits.next() on last iteration.
			 = .next()
Load bits once. See CL 22712 and issue 16973 for discussion.
		 := .bits()
		if &bitScan == 0 {
			break // no more pointers in this object
		}
		if &bitPointer == 0 {
			continue // not a pointer
		}
Work here is duplicated in scanblock and above. If you make changes here, make changes there too.
		 := *(*uintptr)(unsafe.Pointer( + ))
At this point we have extracted the next potential pointer. Quickly filter out nil and pointers back to the current object.
Test if obj points into the Go heap and, if so, mark the object. Note that it's possible for findObject to fail if obj points to a just-allocated heap object because of a race with growing the heap. In this case, we know the object was just allocated and hence will be marked by allocation itself.
			if , ,  := findObject(, , );  != 0 {
				greyobject(, , , , , )
			}
		}
	}
	.bytesMarked += uint64()
	.scanWork += int64()
}
scanConservative scans block [b, b+n) conservatively, treating any pointer-like value in the block as a pointer. If ptrmask != nil, only words that are marked in ptrmask are considered as potential pointers. If state != nil, it's assumed that [b, b+n) is a block in the stack and may contain pointers to stack objects.
func (,  uintptr,  *uint8,  *gcWork,  *stackScanState) {
	if debugScanConservative {
		printlock()
		print("conservatively scanning [", hex(), ",", hex(+), ")\n")
		hexdumpWords(, +, func( uintptr) byte {
			if  != nil {
				 := ( - ) / sys.PtrSize
				 := *addb(, /8)
				if (>>(%8))&1 == 0 {
					return '$'
				}
			}

			 := *(*uintptr)(unsafe.Pointer())
			if  != nil && .stack.lo <=  &&  < .stack.hi {
				return '@'
			}

			 := spanOfHeap()
			if  == nil {
				return ' '
			}
			 := .objIndex()
			if .isFree() {
				return ' '
			}
			return '*'
		})
		printunlock()
	}

	for  := uintptr(0);  < ;  += sys.PtrSize {
		if  != nil {
			 :=  / sys.PtrSize
			 := *addb(, /8)
Skip 8 words (the loop increment will do the 8th) This must be the first time we've seen this word of ptrmask, so i must be 8-word-aligned, but check our reasoning just in case.
				if %(sys.PtrSize*8) != 0 {
					throw("misaligned mask")
				}
				 += sys.PtrSize*8 - sys.PtrSize
				continue
			}
			if (>>(%8))&1 == 0 {
				continue
			}
		}

		 := *(*uintptr)(unsafe.Pointer( + ))
Check if val points into the stack.
val may point to a stack object. This object may be dead from last cycle and hence may contain pointers to unallocated objects, but unlike heap objects we can't tell if it's already dead. Hence, if all pointers to this object are from conservative scanning, we have to scan it defensively, too.
			.putPtr(, true)
			continue
		}
Check if val points to a heap span.
		 := spanOfHeap()
		if  == nil {
			continue
		}
Check if val points to an allocated object.
		 := .objIndex()
		if .isFree() {
			continue
		}
val points to an allocated object. Mark it.
		 := .base() + *.elemsize
		greyobject(, , , , , )
	}
}
Shade the object if it isn't already. The object is not nil and known to be in the heap. Preemption must be disabled.go:nowritebarrier
func ( uintptr) {
	if , ,  := findObject(, 0, 0);  != 0 {
		 := &getg().m.p.ptr().gcw
		greyobject(, 0, 0, , , )
	}
}
obj is the start of an object with mark mbits. If it isn't already marked, mark it and enqueue into gcw. base and off are for debugging only and could be removed. See also wbBufFlush1, which partially duplicates this logic.go:nowritebarrierrec
obj should be start of allocation, and so must be at least pointer-aligned.
	if &(sys.PtrSize-1) != 0 {
		throw("greyobject: obj not pointer-aligned")
	}
	 := .markBitsForIndex()

	if useCheckmark {
Already marked.
			return
		}
	} else {
		if debug.gccheckmark > 0 && .isFree() {
			print("runtime: marking free object ", hex(), " found at *(", hex(), "+", hex(), ")\n")
			gcDumpObject("base", , )
			gcDumpObject("obj", , ^uintptr(0))
			getg().m.traceback = 2
			throw("marking free object")
		}
If marked we have nothing to do.
		if .isMarked() {
			return
		}
		.setMarked()
Mark span.
		, ,  := pageIndexOf(.base())
		if .pageMarks[]& == 0 {
			atomic.Or8(&.pageMarks[], )
		}
If this is a noscan object, fast-track it to black instead of greying it.
		if .spanclass.noscan() {
			.bytesMarked += uint64(.elemsize)
			return
		}
	}
Queue the obj for scanning. The PREFETCH(obj) logic has been removed but seems like a nice optimization that can be added back in. There needs to be time between the PREFETCH and the use. Previously we put the obj in an 8 element buffer that is drained at a rate to give the PREFETCH time to do its work. Use of PREFETCHNTA might be more appropriate than PREFETCH
	if !.putFast() {
		.put()
	}
}
gcDumpObject dumps the contents of obj for debugging and marks the field at byte offset off in obj.
func ( string, ,  uintptr) {
	 := spanOf()
	print(, "=", hex())
	if  == nil {
		print(" s=nil\n")
		return
	}
	print(" s.base()=", hex(.base()), " s.limit=", hex(.limit), " s.spanclass=", .spanclass, " s.elemsize=", .elemsize, " s.state=")
	if  := .state.get(); 0 <=  && int() < len(mSpanStateNames) {
		print(mSpanStateNames[], "\n")
	} else {
		print("unknown(", , ")\n")
	}

	 := false
	 := .elemsize
We're printing something from a stack frame. We don't know how big it is, so just show up to an including off.
		 =  + sys.PtrSize
	}
For big objects, just print the beginning (because that usually hints at the object's type) and the fields around off.
		if !( < 128*sys.PtrSize || -16*sys.PtrSize <  &&  < +16*sys.PtrSize) {
			 = true
			continue
		}
		if  {
			print(" ...\n")
			 = false
		}
		print(" *(", , "+", , ") = ", hex(*(*uintptr)(unsafe.Pointer( + ))))
		if  ==  {
			print(" <==")
		}
		print("\n")
	}
	if  {
		print(" ...\n")
	}
}
gcmarknewobject marks a newly allocated object black. obj must not contain any non-nil pointers. This is nosplit so it can manipulate a gcWork without preemption.go:nowritebarriergo:nosplit
func ( *mspan, , ,  uintptr) {
	if useCheckmark { // The world should be stopped so this should not happen.
		throw("gcmarknewobject called while doing checkmark")
	}
Mark object.
	 := .objIndex()
	.markBitsForIndex().setMarked()
Mark span.
	, ,  := pageIndexOf(.base())
	if .pageMarks[]& == 0 {
		atomic.Or8(&.pageMarks[], )
	}

	 := &getg().m.p.ptr().gcw
	.bytesMarked += uint64()
	.scanWork += int64()
}
gcMarkTinyAllocs greys all active tiny alloc blocks. The world must be stopped.
func () {
	assertWorldStopped()

	for ,  := range allp {
		 := .mcache
		if  == nil || .tiny == 0 {
			continue
		}
		, ,  := findObject(.tiny, 0, 0)
		 := &.gcw
		greyobject(.tiny, 0, 0, , , )
	}