var runtime.work
203 uses
runtime (current package)
mgc.go#L192: work.startSema = 1
mgc.go#L193: work.markDoneSema = 1
mgc.go#L194: lockInit(&work.sweepWaiters.lock, lockRankSweepWaiters)
mgc.go#L195: lockInit(&work.assistQueue.lock, lockRankAssistQueue)
mgc.go#L196: lockInit(&work.wbufSpans.lock, lockRankWbufSpans)
mgc.go#L243: gcWaitOnMark(atomic.Load(&work.cycles))
mgc.go#L495: work.initialHeapLive>>20, "->",
mgc.go#L604: if work.userForced {
mgc.go#L895: print("runtime: next_gc=", memstats.next_gc, " heap_marked=", memstats.heap_marked, " heap_live=", memstats.heap_live, " initialHeapLive=", work.initialHeapLive, "triggerRatio=", triggerRatio, " minTrigger=", minTrigger, "\n")
mgc.go#L1009: var work struct {
mgc.go#L1153: n := atomic.Load(&work.cycles)
mgc.go#L1168: for atomic.Load(&work.cycles) == n+1 && sweepone() != ^uintptr(0) {
mgc.go#L1184: for atomic.Load(&work.cycles) == n+1 && atomic.Load(&mheap_.sweepers) != 0 {
mgc.go#L1192: cycle := atomic.Load(&work.cycles)
mgc.go#L1204: lock(&work.sweepWaiters.lock)
mgc.go#L1205: nMarks := atomic.Load(&work.cycles)
mgc.go#L1212: unlock(&work.sweepWaiters.lock)
mgc.go#L1218: work.sweepWaiters.list.push(getg())
mgc.go#L1219: goparkunlock(&work.sweepWaiters.lock, waitReasonWaitForGCCycle, traceEvGoBlock, 1)
mgc.go#L1281: return int32(t.n-work.cycles) > 0
mgc.go#L1321: semacquire(&work.startSema)
mgc.go#L1324: semrelease(&work.startSema)
mgc.go#L1329: work.userForced = trigger.kind == gcTriggerCycle
mgc.go#L1362: work.stwprocs, work.maxprocs = gomaxprocs, gomaxprocs
mgc.go#L1363: if work.stwprocs > ncpu {
mgc.go#L1366: work.stwprocs = ncpu
mgc.go#L1368: work.heap0 = atomic.Load64(&memstats.heap_live)
mgc.go#L1369: work.pauseNS = 0
mgc.go#L1370: work.mode = mode
mgc.go#L1373: work.tSweepTerm = now
mgc.go#L1374: work.pauseStart = now
mgc.go#L1388: work.cycles++
mgc.go#L1391: work.heapGoal = memstats.next_gc
mgc.go#L1444: work.pauseNS += now - work.pauseStart
mgc.go#L1445: work.tMark = now
mgc.go#L1446: memstats.gcPauseDist.record(now - work.pauseStart)
mgc.go#L1462: semrelease(&work.startSema)
mgc.go#L1497: semacquire(&work.markDoneSema)
mgc.go#L1506: if !(gcphase == _GCmark && work.nwait == work.nproc && !gcMarkWorkAvailable(nil)) {
mgc.go#L1507: semrelease(&work.markDoneSema)
mgc.go#L1559: work.tMarkTerm = now
mgc.go#L1560: work.pauseStart = now
mgc.go#L1593: work.pauseNS += now - work.pauseStart
mgc.go#L1594: memstats.gcPauseDist.record(now - work.pauseStart)
mgc.go#L1611: semrelease(&work.markDoneSema)
mgc.go#L1632: work.heap1 = memstats.heap_live
mgc.go#L1660: work.heap2 = work.bytesMarked
mgc.go#L1677: gcSweep(work.mode)
mgc.go#L1705: work.pauseNS += now - work.pauseStart
mgc.go#L1706: work.tEnd = now
mgc.go#L1707: memstats.gcPauseDist.record(now - work.pauseStart)
mgc.go#L1710: memstats.pause_ns[memstats.numgc%uint32(len(memstats.pause_ns))] = uint64(work.pauseNS)
mgc.go#L1712: memstats.pause_total_ns += uint64(work.pauseNS)
mgc.go#L1715: sweepTermCpu := int64(work.stwprocs) * (work.tMark - work.tSweepTerm)
mgc.go#L1719: markTermCpu := int64(work.stwprocs) * (work.tEnd - work.tMarkTerm)
mgc.go#L1721: work.totaltime += cycleCpu
mgc.go#L1725: memstats.gc_cpu_fraction = float64(work.totaltime) / float64(totalCpu)
mgc.go#L1731: if work.userForced {
mgc.go#L1736: lock(&work.sweepWaiters.lock)
mgc.go#L1738: injectglist(&work.sweepWaiters.list)
mgc.go#L1739: unlock(&work.sweepWaiters.lock)
mgc.go#L1779: " @", string(itoaDiv(sbuf[:], uint64(work.tSweepTerm-runtimeInitTime)/1e6, 3)), "s ",
mgc.go#L1781: prev := work.tSweepTerm
mgc.go#L1782: for i, ns := range []int64{work.tMark, work.tMarkTerm, work.tEnd} {
mgc.go#L1800: work.heap0>>20, "->", work.heap1>>20, "->", work.heap2>>20, " MB, ",
mgc.go#L1801: work.heapGoal>>20, " MB goal, ",
mgc.go#L1802: work.maxprocs, " P")
mgc.go#L1803: if work.userForced {
mgc.go#L1837: notetsleepg(&work.bgMarkReady, -1)
mgc.go#L1838: noteclear(&work.bgMarkReady)
mgc.go#L1858: work.nproc = ^uint32(0)
mgc.go#L1859: work.nwait = ^uint32(0)
mgc.go#L1890: notewakeup(&work.bgMarkReady)
mgc.go#L1961: decnwait := atomic.Xadd(&work.nwait, -1)
mgc.go#L1962: if decnwait == work.nproc {
mgc.go#L1963: println("runtime: work.nwait=", decnwait, "work.nproc=", work.nproc)
mgc.go#L2023: incnwait := atomic.Xadd(&work.nwait, +1)
mgc.go#L2024: if incnwait > work.nproc {
mgc.go#L2026: "work.nwait=", incnwait, "work.nproc=", work.nproc)
mgc.go#L2038: if incnwait == work.nproc && !gcMarkWorkAvailable(nil) {
mgc.go#L2057: if !work.full.empty() {
mgc.go#L2060: if work.markrootNext < work.markrootJobs {
mgc.go#L2077: work.tstart = start_time
mgc.go#L2080: if work.full != 0 || work.markrootNext < work.markrootJobs {
mgc.go#L2081: print("runtime: full=", hex(work.full), " next=", work.markrootNext, " jobs=", work.markrootJobs, " nDataRoots=", work.nDataRoots, " nBSSRoots=", work.nBSSRoots, " nSpanRoots=", work.nSpanRoots, " nStackRoots=", work.nStackRoots, "\n")
mgc.go#L2090: if work.full != 0 {
mgc.go#L2139: memstats.heap_marked = work.bytesMarked
mgc.go#L2156: memstats.heap_live = work.bytesMarked
mgc.go#L2251: work.bytesMarked = 0
mgc.go#L2252: work.initialHeapLive = atomic.Load64(&memstats.heap_live)
mgcmark.go#L59: work.nFlushCacheRoots = 0
mgcmark.go#L66: work.nDataRoots = 0
mgcmark.go#L67: work.nBSSRoots = 0
mgcmark.go#L72: if nDataRoots > work.nDataRoots {
mgcmark.go#L73: work.nDataRoots = nDataRoots
mgcmark.go#L79: if nBSSRoots > work.nBSSRoots {
mgcmark.go#L80: work.nBSSRoots = nBSSRoots
mgcmark.go#L97: work.nSpanRoots = len(mheap_.markArenas) * (pagesPerArena / pagesPerSpanRoot)
mgcmark.go#L105: work.nStackRoots = int(atomic.Loaduintptr(&allglen))
mgcmark.go#L107: work.markrootNext = 0
mgcmark.go#L108: work.markrootJobs = uint32(fixedRootCount + work.nFlushCacheRoots + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots)
mgcmark.go#L114: if work.markrootNext < work.markrootJobs {
mgcmark.go#L115: print(work.markrootNext, " of ", work.markrootJobs, " markroot jobs done\n")
mgcmark.go#L122: for i := 0; i < work.nStackRoots; i++ {
mgcmark.go#L152: baseData := baseFlushCache + uint32(work.nFlushCacheRoots)
mgcmark.go#L153: baseBSS := baseData + uint32(work.nDataRoots)
mgcmark.go#L154: baseSpans := baseBSS + uint32(work.nBSSRoots)
mgcmark.go#L155: baseStacks := baseSpans + uint32(work.nSpanRoots)
mgcmark.go#L156: end := baseStacks + uint32(work.nStackRoots)
mgcmark.go#L201: gp.waitsince = work.tstart
mgcmark.go#L525: decnwait := atomic.Xadd(&work.nwait, -1)
mgcmark.go#L526: if decnwait == work.nproc {
mgcmark.go#L527: println("runtime: work.nwait =", decnwait, "work.nproc=", work.nproc)
mgcmark.go#L553: incnwait := atomic.Xadd(&work.nwait, +1)
mgcmark.go#L554: if incnwait > work.nproc {
mgcmark.go#L556: "work.nproc=", work.nproc)
mgcmark.go#L560: if incnwait == work.nproc && !gcMarkWorkAvailable(nil) {
mgcmark.go#L580: lock(&work.assistQueue.lock)
mgcmark.go#L581: list := work.assistQueue.q.popList()
mgcmark.go#L583: unlock(&work.assistQueue.lock)
mgcmark.go#L593: lock(&work.assistQueue.lock)
mgcmark.go#L598: unlock(&work.assistQueue.lock)
mgcmark.go#L603: oldList := work.assistQueue.q
mgcmark.go#L604: work.assistQueue.q.pushBack(gp)
mgcmark.go#L611: work.assistQueue.q = oldList
mgcmark.go#L615: unlock(&work.assistQueue.lock)
mgcmark.go#L619: goparkunlock(&work.assistQueue.lock, waitReasonGCAssistWait, traceEvGoBlockGC, 2)
mgcmark.go#L634: if work.assistQueue.q.empty() {
mgcmark.go#L646: lock(&work.assistQueue.lock)
mgcmark.go#L647: for !work.assistQueue.q.empty() && scanBytes > 0 {
mgcmark.go#L648: gp := work.assistQueue.q.pop()
mgcmark.go#L670: work.assistQueue.q.pushBack(gp)
mgcmark.go#L681: unlock(&work.assistQueue.lock)
mgcmark.go#L1007: if work.markrootNext < work.markrootJobs {
mgcmark.go#L1010: job := atomic.Xadd(&work.markrootNext, +1) - 1
mgcmark.go#L1011: if job >= work.markrootJobs {
mgcmark.go#L1029: if work.full == 0 {
mgcmark.go#L1107: if work.full == 0 {
mgcmark.go#L1132: if work.markrootNext < work.markrootJobs {
mgcmark.go#L1133: job := atomic.Xadd(&work.markrootNext, +1) - 1
mgcmark.go#L1134: if job < work.markrootJobs {
mgcwork.go#L115: lockWithRankMayAcquire(&work.wbufSpans.lock, lockRankWbufSpans)
mgcwork.go#L274: atomic.Xadd64(&work.bytesMarked, int64(w.bytesMarked))
mgcwork.go#L350: if work.empty != 0 {
mgcwork.go#L351: b = (*workbuf)(work.empty.pop())
mgcwork.go#L358: lockWithRankMayAcquire(&work.wbufSpans.lock, lockRankWbufSpans)
mgcwork.go#L363: if work.wbufSpans.free.first != nil {
mgcwork.go#L364: lock(&work.wbufSpans.lock)
mgcwork.go#L365: s = work.wbufSpans.free.first
mgcwork.go#L367: work.wbufSpans.free.remove(s)
mgcwork.go#L368: work.wbufSpans.busy.insert(s)
mgcwork.go#L370: unlock(&work.wbufSpans.lock)
mgcwork.go#L380: lock(&work.wbufSpans.lock)
mgcwork.go#L381: work.wbufSpans.busy.insert(s)
mgcwork.go#L382: unlock(&work.wbufSpans.lock)
mgcwork.go#L405: work.empty.push(&b.node)
mgcwork.go#L414: work.full.push(&b.node)
mgcwork.go#L421: b := (*workbuf)(work.full.pop())
mgcwork.go#L447: lock(&work.wbufSpans.lock)
mgcwork.go#L448: if work.full != 0 {
mgcwork.go#L454: work.empty = 0
mgcwork.go#L455: work.wbufSpans.free.takeAll(&work.wbufSpans.busy)
mgcwork.go#L456: unlock(&work.wbufSpans.lock)
mgcwork.go#L463: lock(&work.wbufSpans.lock)
mgcwork.go#L464: if gcphase != _GCoff || work.wbufSpans.free.isEmpty() {
mgcwork.go#L465: unlock(&work.wbufSpans.lock)
mgcwork.go#L471: span := work.wbufSpans.free.first
mgcwork.go#L475: work.wbufSpans.free.remove(span)
mgcwork.go#L479: more := !work.wbufSpans.free.isEmpty()
mgcwork.go#L480: unlock(&work.wbufSpans.lock)
 |
The pages are generated with Golds v0.3.2-preview. (GOOS=darwin GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu.
PR and bug reports are welcome and can be submitted to the issue list.
Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds. |