Source File
proc.go
Belonging Package
runtime
package runtime
import (
)
var buildVersion = sys.TheVersion
var main_init_done chan bool
func ()
var mainStarted bool
var runtimeInitTime int64
var initSigmask sigset
func () {
:= getg()
if sys.PtrSize == 8 {
maxstacksize = 1000000000
} else {
maxstacksize = 250000000
}
maxstackceiling = 2 * maxstacksize
atomic.Store(&sched.sysmonStarting, 1)
systemstack(func() {
newm(sysmon, nil, -1)
})
}
runtimeInitTime = nanotime()
if runtimeInitTime == 0 {
throw("nanotime returning zero")
}
if debug.inittrace != 0 {
inittrace.id = getg().goid
inittrace.active = true
}
doInit(&runtime_inittask) // Must be before defer.
:= true
defer func() {
if {
unlockOSThread()
}
}()
gcenable()
main_init_done = make(chan bool)
if iscgo {
if _cgo_thread_start == nil {
throw("_cgo_thread_start missing")
}
if GOOS != "windows" {
if _cgo_setenv == nil {
throw("_cgo_setenv missing")
}
if _cgo_unsetenv == nil {
throw("_cgo_unsetenv missing")
}
}
if _cgo_notify_runtime_init_done == nil {
throw("_cgo_notify_runtime_init_done missing")
return
}
:= main_main // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime
()
if raceenabled {
racefini()
}
for := 0; < 1000; ++ {
if atomic.Load(&runningPanicDefers) == 0 {
break
}
Gosched()
}
}
if atomic.Load(&panicking) != 0 {
gopark(nil, nil, waitReasonPanicWait, traceEvGoStop, 1)
}
exit(0)
for {
var *int32
* = 0
}
}
func () {
if raceenabled {
racefini()
}
}
func () {
checkTimeouts()
mcall(gosched_m)
}
func () {
mcall(goschedguarded_m)
}
func ( func(*g, unsafe.Pointer) bool, unsafe.Pointer, waitReason, byte, int) {
if != waitReasonSleep {
checkTimeouts() // timeouts may expire while two goroutines keep the scheduler busy
}
:= acquirem()
:= .curg
:= readgstatus()
if != _Grunning && != _Gscanrunning {
throw("gopark: bad g status")
}
.waitlock =
.waitunlockf =
.waitreason =
.waittraceev =
.waittraceskip =
func ( *mutex, waitReason, byte, int) {
gopark(parkunlock_c, unsafe.Pointer(), , , )
}
func ( *g, int) {
systemstack(func() {
ready(, , true)
})
}
:= acquirem()
:= .p.ptr()
if len(.sudogcache) == 0 {
for len(.sudogcache) < cap(.sudogcache)/2 && sched.sudogcache != nil {
:= sched.sudogcache
sched.sudogcache = .next
.next = nil
.sudogcache = append(.sudogcache, )
}
if len(.sudogcache) == 0 {
.sudogcache = append(.sudogcache, new(sudog))
}
}
:= len(.sudogcache)
:= .sudogcache[-1]
.sudogcache[-1] = nil
.sudogcache = .sudogcache[:-1]
if .elem != nil {
throw("acquireSudog: found s.elem != nil in cache")
}
releasem()
return
}
func ( *sudog) {
if .elem != nil {
throw("runtime: sudog with non-nil elem")
}
if .isSelect {
throw("runtime: sudog with non-false isSelect")
}
if .next != nil {
throw("runtime: sudog with non-nil next")
}
if .prev != nil {
throw("runtime: sudog with non-nil prev")
}
if .waitlink != nil {
throw("runtime: sudog with non-nil waitlink")
}
if .c != nil {
throw("runtime: sudog with non-nil c")
}
:= getg()
if .param != nil {
throw("runtime: releaseSudog with non-nil gp.param")
}
:= acquirem() // avoid rescheduling to another P
:= .p.ptr()
var , *sudog
for len(.sudogcache) > cap(.sudogcache)/2 {
:= len(.sudogcache)
:= .sudogcache[-1]
.sudogcache[-1] = nil
.sudogcache = .sudogcache[:-1]
if == nil {
=
} else {
.next =
}
=
}
lock(&sched.sudoglock)
.next = sched.sudogcache
sched.sudogcache =
unlock(&sched.sudoglock)
}
.sudogcache = append(.sudogcache, )
releasem()
}
func ( func(*g)) {
throw("runtime: mcall called on m->g0 stack")
}
func ( func(*g)) {
throw("runtime: mcall function returned")
}
func () {
panic(plainError("arg size to reflect.call more than 1GB"))
}
var badmorestackg0Msg = "fatal: morestack on g0\n"
func () {
:= stringStructOf(&badmorestackg0Msg)
write(2, .str, int32(.len))
}
var badmorestackgsignalMsg = "fatal: morestack on gsignal\n"
func () {
:= stringStructOf(&badmorestackgsignalMsg)
write(2, .str, int32(.len))
}
allglen uintptr
allgptr **g
)
func ( *g) {
if readgstatus() == _Gidle {
throw("allgadd: bad status Gidle")
}
lock(&allglock)
allgs = append(allgs, )
if &allgs[0] != allgptr {
atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
}
atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
unlock(&allglock)
}
_GoidCacheBatch = 16
)
func () {
const = "GODEBUG="
var string
switch GOOS {
case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
cpu.DebugOptions = true
func () {
lockInit(&sched.lock, lockRankSched)
lockInit(&sched.sysmonlock, lockRankSysmon)
lockInit(&sched.deferlock, lockRankDefer)
lockInit(&sched.sudoglock, lockRankSudog)
lockInit(&deadlock, lockRankDeadlock)
lockInit(&paniclk, lockRankPanic)
lockInit(&allglock, lockRankAllg)
lockInit(&allpLock, lockRankAllp)
lockInit(&reflectOffs.lock, lockRankReflectOffs)
lockInit(&finlock, lockRankFin)
lockInit(&trace.bufLock, lockRankTraceBuf)
lockInit(&trace.stringsLock, lockRankTraceStrings)
lockInit(&trace.lock, lockRankTrace)
lockInit(&cpuprof.lock, lockRankCpuprof)
:= getg()
if raceenabled {
.racectx, raceprocctx0 = raceinit()
}
sched.maxmcount = 10000
worldStopped()
moduledataverify()
stackinit()
mallocinit()
fastrandinit() // must run before mcommoninit
mcommoninit(.m, -1)
cpuinit() // must run before alginit
alginit() // maps must not be used before this call
modulesinit() // provides activeModules
typelinksinit() // uses maps, activeModules
itabsinit() // uses activeModules
sigsave(&.m.sigmask)
initSigmask = .m.sigmask
goargs()
goenvs()
parsedebugvars()
gcinit()
lock(&sched.lock)
sched.lastpoll = uint64(nanotime())
:= ncpu
if , := atoi32(gogetenv("GOMAXPROCS")); && > 0 {
=
}
if procresize() != nil {
throw("unknown runnable goroutine during bootstrap")
}
unlock(&sched.lock)
buildVersion = "unknown"
}
modinfo = ""
}
}
func ( *g) {
:= getg()
print("runtime: gp: gp=", , ", goid=", .goid, ", gp->atomicstatus=", readgstatus(), "\n")
print("runtime: g: g=", , ", goid=", .goid, ", g->atomicstatus=", readgstatus(), "\n")
}
if != .m.g0 {
callers(1, .createstack[:])
}
lock(&sched.lock)
if >= 0 {
.id =
} else {
.id = mReserveID()
}
.fastrand[0] = uint32(int64Hash(uint64(.id), fastrandseed))
.fastrand[1] = uint32(int64Hash(uint64(cputicks()), ^fastrandseed))
if .fastrand[0]|.fastrand[1] == 0 {
.fastrand[1] = 1
}
mpreinit()
if .gsignal != nil {
.gsignal.stackguard1 = .gsignal.stack.lo + _StackGuard
}
if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
.cgoCallers = new(cgoCallers)
}
}
var fastrandseed uintptr
func () {
:= (*[unsafe.Sizeof(fastrandseed)]byte)(unsafe.Pointer(&fastrandseed))[:]
getRandomData()
}
func ( *g, int, bool) {
if trace.enabled {
traceGoUnpark(, )
}
:= readgstatus()
casgstatus(, _Gwaiting, _Grunnable)
runqput(.m.p.ptr(), , )
wakep()
releasem()
}
const freezeStopWait = 0x7fffffff
func () {
if !preemptall() {
break // no running goroutines
}
usleep(1000)
usleep(1000)
preemptall()
usleep(1000)
}
func ( *g) uint32 {
return atomic.Load(&.atomicstatus)
}
switch {
default:
print("runtime: casfrom_Gscanstatus bad oldval gp=", , ", oldval=", hex(), ", newval=", hex(), "\n")
dumpgstatus()
throw("casfrom_Gscanstatus:top gp->status is not in scan state")
case _Gscanrunnable,
_Gscanwaiting,
_Gscanrunning,
_Gscansyscall,
_Gscanpreempted:
if == &^_Gscan {
= atomic.Cas(&.atomicstatus, , )
}
}
if ! {
print("runtime: casfrom_Gscanstatus failed gp=", , ", oldval=", hex(), ", newval=", hex(), "\n")
dumpgstatus()
throw("casfrom_Gscanstatus: gp->status is not in scan state")
}
releaseLockRank(lockRankGscan)
}
func ( *g, , uint32) bool {
switch {
case _Grunnable,
_Grunning,
_Gwaiting,
_Gsyscall:
if == |_Gscan {
:= atomic.Cas(&.atomicstatus, , )
if {
acquireLockRank(lockRankGscan)
}
return
}
}
print("runtime: castogscanstatus oldval=", hex(), " newval=", hex(), "\n")
throw("castogscanstatus")
panic("not reached")
}
func ( *g, , uint32) {
if (&_Gscan != 0) || (&_Gscan != 0) || == {
systemstack(func() {
print("runtime: casgstatus: oldval=", hex(), " newval=", hex(), "\n")
throw("casgstatus: bad incoming values")
})
}
acquireLockRank(lockRankGscan)
releaseLockRank(lockRankGscan)
const = 5 * 1000
var int64
for := 0; !atomic.Cas(&.atomicstatus, , ); ++ {
if == _Gwaiting && .atomicstatus == _Grunnable {
throw("casgstatus: waiting for Gwaiting but is Grunnable")
}
if == 0 {
= nanotime() +
}
if nanotime() < {
for := 0; < 10 && .atomicstatus != ; ++ {
procyield(1)
}
} else {
osyield()
= nanotime() + /2
}
}
}
func ( *g) uint32 {
for {
:= readgstatus() &^ _Gscan
if != _Gwaiting && != _Grunnable {
throw("copystack: bad status, not Gwaiting or Grunnable")
}
if atomic.Cas(&.atomicstatus, , _Gcopystack) {
return
}
}
}
func ( *g, , uint32) {
if != _Grunning || != _Gscan|_Gpreempted {
throw("bad g transition")
}
acquireLockRank(lockRankGscan)
for !atomic.Cas(&.atomicstatus, _Grunning, _Gscan|_Gpreempted) {
}
}
func ( *g, , uint32) bool {
if != _Gpreempted || != _Gwaiting {
throw("bad g transition")
}
return atomic.Cas(&.atomicstatus, _Gpreempted, _Gwaiting)
}
func ( string) {
semacquire(&worldsema)
:= getg()
.m.preemptoff =
casgstatus(, _Grunning, _Gwaiting)
stopTheWorldWithSema()
casgstatus(, _Gwaiting, _Grunning)
})
}
func () {
systemstack(func() { startTheWorldWithSema(false) })
:= acquirem()
.preemptoff = ""
semrelease1(&worldsema, true, 0)
releasem()
}
func ( string) {
semacquire(&gcsema)
stopTheWorld()
}
func () {
startTheWorld()
semrelease(&gcsema)
}
func () {
:= getg()
for , := range allp {
:= .status
if == _Psyscall && atomic.Cas(&.status, , _Pgcstop) {
if trace.enabled {
traceGoSysBlock()
traceProcStop()
}
.syscalltick++
sched.stopwait--
}
if {
if notetsleep(&sched.stopnote, 100*1000) {
noteclear(&sched.stopnote)
break
}
preemptall()
}
}
lock(&deadlock)
lock(&deadlock)
}
if != "" {
throw()
}
worldStopped()
}
func ( bool) int64 {
assertWorldStopped()
:= acquirem() // disable preemption because it can be holding p in a local var
if netpollinited() {
:= netpoll(0) // non-blocking
injectglist(&)
}
lock(&sched.lock)
:= gomaxprocs
if newprocs != 0 {
= newprocs
newprocs = 0
}
:= procresize()
sched.gcwaiting = 0
if sched.sysmonwait != 0 {
sched.sysmonwait = 0
notewakeup(&sched.sysmonnote)
}
unlock(&sched.lock)
worldStarted()
for != nil {
:=
= .link.ptr()
if .m != 0 {
:= .m.ptr()
.m = 0
if .nextp != 0 {
throw("startTheWorld: inconsistent mp->nextp")
}
.nextp.set()
notewakeup(&.park)
:= nanotime()
if {
traceGCSTWDone()
}
.stackguard1 = .stackguard0
mstart1()
save(getcallerpc(), getcallersp())
asminit()
minit()
if (iscgo || GOOS == "windows") && !cgoHasExtraM {
cgoHasExtraM = true
newextram()
}
initsig(false)
}
if atomic.Load(&.signalPending) != 0 {
atomic.Xadd(&pendingPreemptSignals, -1)
}
}
mdestroy()
return
}
exitThread(&.freeWait)
}
func ( func(*p)) {
:= acquirem()
:= getg().m.p.ptr()
lock(&sched.lock)
if sched.safePointWait != 0 {
throw("forEachP: sched.safePointWait != 0")
}
sched.safePointWait = gomaxprocs - 1
sched.safePointFn =
for , := range allp {
if != {
atomic.Store(&.runSafePointFn, 1)
}
}
preemptall()
()
for , := range allp {
:= .status
if == _Psyscall && .runSafePointFn == 1 && atomic.Cas(&.status, , _Pidle) {
if trace.enabled {
traceGoSysBlock()
traceProcStop()
}
.syscalltick++
handoffp()
}
}
if {
if notetsleep(&sched.safePointNote, 100*1000) {
noteclear(&sched.safePointNote)
break
}
preemptall()
}
}
if sched.safePointWait != 0 {
throw("forEachP: not done")
}
for , := range allp {
if .runSafePointFn != 0 {
throw("forEachP: P did not run fn")
}
}
lock(&sched.lock)
sched.safePointFn = nil
unlock(&sched.lock)
releasem()
}
lock(&newmHandoff.lock)
for !newmHandoff.waiting {
unlock(&newmHandoff.lock)
osyield()
lock(&newmHandoff.lock)
}
unlock(&newmHandoff.lock)
}
if netpollinited() {
netpollBreak()
}
sigRecvPrepareForFixup()
:= getg()
lock(&mFixupRace.lock)
mFixupRace.ctx = .racectx
unlock(&mFixupRace.lock)
}
if := (true); {
:= .m.procid
for := allm; != nil; = .alllink {
continue
throw("unsupported runtime environment")
lock(&sched.lock)
if atomic.Load(&sched.sysmonwait) != 0 {
atomic.Store(&sched.sysmonwait, 0)
notewakeup(&sched.sysmonnote)
}
unlock(&sched.lock)
lock(&newmHandoff.lock)
if newmHandoff.waiting {
newmHandoff.waiting = false
notewakeup(&newmHandoff.wake)
}
unlock(&newmHandoff.lock)
osyield()
}
}
if raceenabled {
lock(&mFixupRace.lock)
mFixupRace.ctx = 0
unlock(&mFixupRace.lock)
}
startTheWorldGC()
}
func () {
if !atomic.Cas(&.runSafePointFn, 1, 0) {
return
}
sched.safePointFn()
lock(&sched.lock)
sched.safePointWait--
if sched.safePointWait == 0 {
notewakeup(&sched.safePointNote)
}
unlock(&sched.lock)
}
func () {
write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback)))
exit(1)
}
.needextram = .schedlink == 0
extraMCount--
unlockextra(.schedlink.ptr())
.sigmask =
setg(.g0)
:= getg()
.stack.hi = getcallersp() + 1024
.stack.lo = getcallersp() - 32*1024
.stackguard0 = .stack.lo + _StackGuard
casgstatus(.curg, _Gdead, _Gsyscall)
atomic.Xadd(&sched.ngsys, -1)
}
var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n")
func () {
:= atomic.Xchg(&extraMWaiters, 0)
if > 0 {
for := uint32(0); < ; ++ {
oneNewExtraM()
}
:= lockextra(true)
unlockextra()
if == nil {
oneNewExtraM()
}
}
}
allgadd()
:= lockextra(true)
.schedlink.set()
extraMCount++
unlockextra()
}
unlockextra()
msigrestore()
}
var newmHandoff struct {
lock mutex
newm muintptr
haveTemplateThread uint32
}
lock(&newmHandoff.lock)
if newmHandoff.haveTemplateThread == 0 {
throw("on a locked thread with no template thread")
}
.schedlink = newmHandoff.newm
newmHandoff.newm.set()
if newmHandoff.waiting {
newmHandoff.waiting = false
notewakeup(&newmHandoff.wake)
}
unlock(&newmHandoff.lock)
return
}
newm1()
}
func ( *m) {
if iscgo {
var cgothreadstart
if _cgo_thread_start == nil {
throw("_cgo_thread_start missing")
}
.g.set(.g0)
.tls = (*uint64)(unsafe.Pointer(&.tls[0]))
.fn = unsafe.Pointer(funcPC(mstart))
if msanenabled {
msanwrite(unsafe.Pointer(&), unsafe.Sizeof())
}
execLock.rlock() // Prevent process clone.
asmcgocall(_cgo_thread_start, unsafe.Pointer(&))
execLock.runlock()
return
}
execLock.rlock() // Prevent process clone.
newosproc()
execLock.runlock()
}
func () {
if GOARCH == "wasm" { // no threads on wasm yet
return
}
:= acquirem()
if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
releasem()
return
}
newm(templateThread, nil, -1)
releasem()
}
var mFixupRace struct {
lock mutex
ctx uintptr
}
func () {
lock(&sched.lock)
sched.nmsys++
checkdead()
unlock(&sched.lock)
for {
lock(&newmHandoff.lock)
for newmHandoff.newm != 0 {
:= newmHandoff.newm.ptr()
newmHandoff.newm = 0
unlock(&newmHandoff.lock)
for != nil {
:= .schedlink.ptr()
.schedlink = 0
newm1()
=
}
lock(&newmHandoff.lock)
}
newmHandoff.waiting = true
noteclear(&newmHandoff.wake)
unlock(&newmHandoff.lock)
notesleep(&newmHandoff.wake)
mDoFixup()
}
}
:= mReserveID()
unlock(&sched.lock)
var func()
= mspinning
}
releasem()
}
if gcBlackenEnabled != 0 && gcMarkWorkAvailable() {
startm(, false)
return
if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic
startm(, true)
return
}
lock(&sched.lock)
if sched.gcwaiting != 0 {
.status = _Pgcstop
sched.stopwait--
if sched.stopwait == 0 {
notewakeup(&sched.stopnote)
}
unlock(&sched.lock)
return
}
if .runSafePointFn != 0 && atomic.Cas(&.runSafePointFn, 1, 0) {
sched.safePointFn()
sched.safePointWait--
if sched.safePointWait == 0 {
notewakeup(&sched.safePointNote)
}
}
if sched.runqsize != 0 {
unlock(&sched.lock)
startm(, false)
return
:= nobarrierWakeTime()
pidleput()
unlock(&sched.lock)
if != 0 {
wakeNetPoller()
}
}
incidlelocked(-1)
:= releasep()
.nextp.set()
notewakeup(&.park)
stopm()
}
.m.curg =
.m = .m
casgstatus(, _Grunnable, _Grunning)
.waitsince = 0
.preempt = false
.stackguard0 = .stack.lo + _StackGuard
if ! {
.m.p.ptr().schedtick++
}
:= sched.profilehz
if .m.profilehz != {
setThreadCPUProfiler()
}
if .syscallsp != 0 && .sysblocktraced {
traceGoSysExit(.sysexitticks)
}
traceGoStart()
}
gogo(&.sched)
}
:
:= .m.p.ptr()
if sched.gcwaiting != 0 {
gcstopm()
goto
}
if .runSafePointFn != 0 {
runSafePointFn()
}
, , := checkTimers(, 0)
if fingwait && fingwake {
if := wakefing(); != nil {
ready(, 0, true)
}
}
if *cgo_yield != nil {
asmcgocall(*cgo_yield, nil)
}
if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Load64(&sched.lastpoll) != 0 {
if := netpoll(0); !.empty() { // non-blocking
:= .pop()
injectglist(&)
casgstatus(, _Gwaiting, _Grunnable)
if trace.enabled {
traceGoUnpark(, 0)
}
return , false
}
}
:= uint32(gomaxprocs)
if !.m.spinning && 2*atomic.Load(&sched.nmspinning) >= -atomic.Load(&sched.npidle) {
goto
}
if !.m.spinning {
.m.spinning = true
atomic.Xadd(&sched.nmspinning, 1)
}
const = 4
for := 0; < ; ++ {
:= == -1
for := stealOrder.start(fastrand()); !.done(); .next() {
if sched.gcwaiting != 0 {
goto
}
:= allp[.position()]
if == {
continue
}
if && timerpMask.read(.position()) {
, , := checkTimers(, )
=
if != 0 && ( == 0 || < ) {
=
}
goto
}
:
if gcBlackenEnabled != 0 && gcMarkWorkAvailable() {
:= (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
if != nil {
.gcMarkWorkerMode = gcMarkWorkerIdleMode
:= .gp.ptr()
casgstatus(, _Gwaiting, _Grunnable)
if trace.enabled {
traceGoUnpark(, 0)
}
return , false
}
}
:= int64(-1)
= -
}
, := beforeIdle()
if != nil {
casgstatus(, _Gwaiting, _Grunnable)
if trace.enabled {
traceGoUnpark(, 0)
}
return , false
}
if {
goto
}
:= idlepMask
:= timerpMask
for , := range {
if .read(uint32()) {
:= nobarrierWakeTime()
if != 0 && ( == 0 || < ) {
=
}
}
}
if != 0 {
if == 0 {
= nanotime()
}
= -
if < 0 {
= 0
}
}
lock(&sched.lock)
var *gcBgMarkWorkerNode
= pidleget()
.gcMarkWorkerMode = gcMarkWorkerIdleMode
:= .gp.ptr()
casgstatus(, _Gwaiting, _Grunnable)
if trace.enabled {
traceGoUnpark(, 0)
}
return , false
}
}
stopm()
goto
}
lock(&sched.lock)
= pidleget()
unlock(&sched.lock)
if == nil {
injectglist(&)
} else {
acquirep()
if !.empty() {
:= .pop()
injectglist(&)
casgstatus(, _Gwaiting, _Grunnable)
if trace.enabled {
traceGoUnpark(, 0)
}
return , false
}
if {
.m.spinning = true
atomic.Xadd(&sched.nmspinning, 1)
}
goto
}
} else if != 0 && netpollinited() {
:= int64(atomic.Load64(&sched.pollUntil))
if == 0 || > {
netpollBreak()
}
}
stopm()
goto
}
func ( int64) {
wakep()
}
:= .head.ptr()
var *g
:= 0
for := ; != nil; = .schedlink.ptr() {
=
++
casgstatus(, _Gwaiting, _Grunnable)
}
var gQueue
.head.set()
.tail.set()
* = gList{}
:= func( int) {
for ; != 0 && sched.npidle != 0; -- {
startm(nil, false)
}
}
:= getg().m.p.ptr()
if == nil {
lock(&sched.lock)
globrunqputbatch(&, int32())
unlock(&sched.lock)
()
return
}
:= int(atomic.Load(&sched.npidle))
var gQueue
var int
for = 0; < && !.empty(); ++ {
:= .pop()
.pushBack()
}
if > 0 {
lock(&sched.lock)
globrunqputbatch(&, int32())
unlock(&sched.lock)
()
-=
}
if !.empty() {
runqputbatch(, &, )
}
}
:= false
if trace.enabled || trace.shutdown {
= traceReader()
if != nil {
casgstatus(, _Gwaiting, _Grunnable)
traceGoUnpark(, 0)
= true
}
}
if == nil && gcBlackenEnabled != 0 {
= gcController.findRunnableGCWorker(.m.p.ptr())
= || != nil
}
}
if == nil {
, = findrunnable() // blocks until work is available
}
if .m.spinning {
resetspinning()
}
if {
wakep()
}
startlockedm()
goto
}
execute(, )
}
:= int64(atomic.Load64(&.timer0When))
:= int64(atomic.Load64(&.timerModifiedEarliest))
if == 0 || ( != 0 && < ) {
=
}
func ( *g) {
:= getg()
if trace.enabled {
traceGoPark(.m.waittraceev, .m.waittraceskip)
}
casgstatus(, _Grunning, _Gwaiting)
dropg()
if := .m.waitunlockf; != nil {
:= (, .m.waitlock)
.m.waitunlockf = nil
.m.waitlock = nil
if ! {
if trace.enabled {
traceGoUnpark(, 2)
}
casgstatus(, _Gwaiting, _Grunnable)
execute(, true) // Schedule it back, never returns.
}
}
schedule()
}
func ( *g) {
:= readgstatus()
if &^_Gscan != _Grunning {
dumpgstatus()
throw("bad g status")
}
casgstatus(, _Grunning, _Grunnable)
dropg()
lock(&sched.lock)
globrunqput()
unlock(&sched.lock)
schedule()
}
func ( *g) {
if trace.enabled {
traceGoSched()
}
goschedImpl()
}
func ( *g) {
if !canPreemptM(.m) {
gogo(&.sched) // never return
}
if trace.enabled {
traceGoSched()
}
goschedImpl()
}
func ( *g) {
if trace.enabled {
traceGoPreempt()
}
goschedImpl()
}
func ( *g) {
if trace.enabled {
traceGoPark(traceEvGoBlock, 0)
}
:= readgstatus()
if &^_Gscan != _Grunning {
dumpgstatus()
throw("bad g status")
}
func () {
checkTimeouts()
mcall(goyield_m)
}
func ( *g) {
if trace.enabled {
traceGoPreempt()
}
:= .m.p.ptr()
casgstatus(, _Grunning, _Grunnable)
dropg()
runqput(, , false)
schedule()
}
func () {
if raceenabled {
racegoend()
}
if trace.enabled {
traceGoEnd()
}
mcall(goexit0)
}
func ( *g) {
:= getg()
casgstatus(, _Grunning, _Gdead)
if isSystemGoroutine(, false) {
atomic.Xadd(&sched.ngsys, -1)
}
.m = nil
:= .lockedm != 0
.lockedm = 0
.m.lockedg = 0
.preemptStop = false
.paniconfault = false
._defer = nil // should be true already but just in case.
._panic = nil // non-nil for Goexit during panic. points at stack-allocated data.
.writebuf = nil
.waitreason = 0
.param = nil
.labels = nil
.timer = nil
:= float64frombits(atomic.Load64(&gcController.assistWorkPerByte))
:= int64( * float64(.gcAssistBytes))
atomic.Xaddint64(&gcController.bgScanCredit, )
.gcAssistBytes = 0
}
dropg()
if GOARCH == "wasm" { // no threads yet on wasm
gfput(.m.p.ptr(), )
schedule() // never returns
}
if .m.lockedInt != 0 {
print("invalid m->lockedInt = ", .m.lockedInt, "\n")
throw("internal lockOSThread error")
}
gfput(.m.p.ptr(), )
save(, )
}
if atomic.Load(&sched.sysmonwait) != 0 {
systemstack(entersyscall_sysmon)
save(, )
}
systemstack(runSafePointFn)
save(, )
}
.m.syscalltick = .m.p.ptr().syscalltick
.sysblocktraced = true
:= .m.p.ptr()
.m = 0
.m.oldp.set()
.m.p = 0
atomic.Store(&.status, _Psyscall)
if sched.gcwaiting != 0 {
systemstack(entersyscall_gcwait)
save(, )
}
.m.locks--
}
func () {
reentersyscall(getcallerpc(), getcallersp())
}
func () {
lock(&sched.lock)
if atomic.Load(&sched.sysmonwait) != 0 {
atomic.Store(&sched.sysmonwait, 0)
notewakeup(&sched.sysmonnote)
}
unlock(&sched.lock)
}
func () {
:= getg()
:= .m.oldp.ptr()
lock(&sched.lock)
if sched.stopwait > 0 && atomic.Cas(&.status, _Psyscall, _Pgcstop) {
if trace.enabled {
traceGoSysBlock()
traceProcStop()
}
.syscalltick++
if sched.stopwait--; sched.stopwait == 0 {
notewakeup(&sched.stopnote)
}
}
unlock(&sched.lock)
}
func () {
:= getg()
.m.locks++ // see comment in entersyscall
.throwsplit = true
.stackguard0 = stackPreempt // see comment in entersyscall
.m.syscalltick = .m.p.ptr().syscalltick
.sysblocktraced = true
.m.p.ptr().syscalltick++
:= getcallerpc()
:= getcallersp()
save(, )
.syscallsp = .sched.sp
.syscallpc = .sched.pc
if .syscallsp < .stack.lo || .stack.hi < .syscallsp {
:=
:= .sched.sp
:= .syscallsp
systemstack(func() {
print("entersyscallblock inconsistent ", hex(), " ", hex(), " ", hex(), " [", hex(.stack.lo), ",", hex(.stack.hi), "]\n")
throw("entersyscallblock")
})
}
casgstatus(, _Grunning, _Gsyscall)
if .syscallsp < .stack.lo || .stack.hi < .syscallsp {
systemstack(func() {
print("entersyscallblock inconsistent ", hex(), " ", hex(.sched.sp), " ", hex(.syscallsp), " [", hex(.stack.lo), ",", hex(.stack.hi), "]\n")
throw("entersyscallblock")
})
}
systemstack(entersyscallblock_handoff)
save(getcallerpc(), getcallersp())
.m.locks--
}
func () {
if trace.enabled {
traceGoSysCall()
traceGoSysBlock(getg().m.p.ptr())
}
handoffp(releasep())
}
func () {
:= getg()
.m.locks++ // see comment in entersyscall
if getcallersp() > .syscallsp {
throw("exitsyscall: syscall frame is no longer valid")
}
.waitsince = 0
:= .m.oldp.ptr()
.m.oldp = 0
if exitsyscallfast() {
if trace.enabled {
if != .m.p.ptr() || .m.syscalltick != .m.p.ptr().syscalltick {
systemstack(traceGoStart)
}
.stackguard0 = .stack.lo + _StackGuard
}
.throwsplit = false
Gosched()
}
return
}
.sysexitticks = 0
for != nil && .syscalltick == .m.syscalltick {
osyield()
.sysexitticks = cputicks()
}
.m.locks--
.syscallsp = 0
.m.p.ptr().syscalltick++
.throwsplit = false
}
if sched.stopwait == freezeStopWait {
return false
}
wirep()
exitsyscallfast_reacquired()
return true
}
if sched.pidle != 0 {
var bool
systemstack(func() {
= exitsyscallfast_pidle()
if && trace.enabled {
for .syscalltick == .m.syscalltick {
osyield()
}
}
traceGoSysExit(0)
}
})
if {
return true
}
}
return false
}
func () {
:= getg()
if .m.syscalltick != .m.p.ptr().syscalltick {
traceGoSysExit(0)
})
}
.m.p.ptr().syscalltick++
}
}
func () bool {
lock(&sched.lock)
:= pidleget()
if != nil && atomic.Load(&sched.sysmonwait) != 0 {
atomic.Store(&sched.sysmonwait, 0)
notewakeup(&sched.sysmonnote)
}
unlock(&sched.lock)
if != nil {
acquirep()
return true
}
return false
}
func ( *g) {
:= getg()
casgstatus(, _Gsyscall, _Grunnable)
dropg()
lock(&sched.lock)
var *p
if schedEnabled() {
= pidleget()
}
if == nil {
globrunqput()
} else if atomic.Load(&sched.sysmonwait) != 0 {
atomic.Store(&sched.sysmonwait, 0)
notewakeup(&sched.sysmonnote)
}
unlock(&sched.lock)
if != nil {
acquirep()
execute(, false) // Never returns.
}
.stackguard0 = stackFork
}
func () {
systemstack(beforefork)
}
func () {
:= getg().m.curg
.stackguard0 = .stack.lo + _StackGuard
msigrestore(.m.sigmask)
.m.locks--
}
func () {
systemstack(afterfork)
}
var inForkedChild bool
msigrestore(getg().m.sigmask)
inForkedChild = false
}
func ( int32) *g {
:= new(g)
if >= 0 {
= round2(_StackSystem + )
systemstack(func() {
.stack = stackalloc(uint32())
})
.stackguard0 = .stack.lo + _StackGuard
if >= _StackMin-4*sys.RegSize-sys.RegSize {
throw("newproc: function arguments too large for new goroutine")
}
:= .m.p.ptr()
:= gfget()
if == nil {
= malg(_StackMin)
casgstatus(, _Gidle, _Gdead)
allgadd() // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack.
}
if .stack.hi == 0 {
throw("newproc1: newg missing stack")
}
if readgstatus() != _Gdead {
throw("newproc1: new g is not Gdead")
}
:= 4*sys.RegSize + uintptr() + sys.MinFrameSize // extra space in case of reads slightly beyond frame
+= - & (sys.SpAlign - 1) // align to spAlign
:= .stack.hi -
:=
*(*uintptr)(unsafe.Pointer()) = 0
prepGoExitFrame()
+= sys.MinFrameSize
}
if > 0 {
if writeBarrier.needed && !.m.curg.gcscandone {
:= findfunc(.fn)
:= (*stackmap)(funcdata(, _FUNCDATA_ArgsPointerMaps))
:= stackmapdata(, 0)
bulkBarrierBitmap(, , uintptr(.n)*sys.PtrSize, 0, .bytedata)
}
}
}
memclrNoHeapPointers(unsafe.Pointer(&.sched), unsafe.Sizeof(.sched))
.sched.sp =
.stktopsp =
.sched.pc = funcPC(goexit) + sys.PCQuantum // +PCQuantum so that previous instruction is in same function
.sched.g = guintptr(unsafe.Pointer())
gostartcallfn(&.sched, )
.gopc =
.ancestors = saveAncestors()
.startpc = .fn
if .m.curg != nil {
.labels = .m.curg.labels
}
if isSystemGoroutine(, false) {
atomic.Xadd(&sched.ngsys, +1)
}
casgstatus(, _Gdead, _Grunnable)
.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)
.goidcache -= _GoidCacheBatch - 1
.goidcacheend = .goidcache + _GoidCacheBatch
}
.goid = int64(.goidcache)
.goidcache++
if raceenabled {
.racectx = racegostart()
}
if trace.enabled {
traceGoCreate(, .startpc)
}
releasem(.m)
return
}
if debug.tracebackancestors <= 0 || .goid == 0 {
return nil
}
var []ancestorInfo
if .ancestors != nil {
= *.ancestors
}
:= int32(len()) + 1
if > debug.tracebackancestors {
= debug.tracebackancestors
}
:= make([]ancestorInfo, )
copy([1:], )
var [_TracebackMaxFrames]uintptr
:= gcallers(, 0, [:])
:= make([]uintptr, )
copy(, [:])
[0] = ancestorInfo{
pcs: ,
goid: .goid,
gopc: .gopc,
}
:= new([]ancestorInfo)
* =
return
}
stackfree(.stack)
.stack.lo = 0
.stack.hi = 0
.stackguard0 = 0
}
.gFree.push()
.gFree.n++
if .gFree.n >= 64 {
lock(&sched.gFree.lock)
for .gFree.n >= 32 {
.gFree.n--
= .gFree.pop()
if .stack.lo == 0 {
sched.gFree.noStack.push()
} else {
sched.gFree.stack.push()
}
sched.gFree.n++
}
unlock(&sched.gFree.lock)
}
}
systemstack(func() {
.stack = stackalloc(_FixedStack)
})
.stackguard0 = .stack.lo + _StackGuard
} else {
if raceenabled {
racemalloc(unsafe.Pointer(.stack.lo), .stack.hi-.stack.lo)
}
if msanenabled {
msanmalloc(unsafe.Pointer(.stack.lo), .stack.hi-.stack.lo)
}
}
return
}
func () {
breakpoint()
}
func () {
startTemplateThread()
}
:= getg()
.m.lockedExt++
if .m.lockedExt == 0 {
.m.lockedExt--
panic("LockOSThread nesting overflow")
}
dolockOSThread()
}
func () {
getg().m.lockedInt++
dolockOSThread()
}
func () {
:= getg()
if .m.lockedInt == 0 {
systemstack(badunlockosthread)
}
.m.lockedInt--
dounlockOSThread()
}
func () {
throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
}
func () int32 {
:= int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - int32(atomic.Load(&sched.ngsys))
for , := range allp {
-= .gFree.n
}
if atomic.Load(&.cgoCallersUse) == 0 && .cgoCallers != nil && .cgoCallers[0] != 0 {
for < len(.cgoCallers) && .cgoCallers[] != 0 {
++
}
copy([:], .cgoCallers[:])
.cgoCallers[0] = 0
}
= gentraceback(.curg.syscallpc, .curg.syscallsp, 0, .curg, 0, &[], len()-, nil, nil, 0)
if > 0 {
+=
}
} else if {
= gentraceback(, , , , 0, &[0], len(), nil, nil, _TraceTrap|_TraceJumpStack)
}
= 0
= gentraceback(.libcallpc, .libcallsp, 0, .libcallg.ptr(), 0, &[0], len(), nil, nil, 0)
}
if == 0 && != nil && .vdsoSP != 0 {
= gentraceback(.vdsoPC, .vdsoSP, 0, , 0, &[0], len(), nil, nil, _TraceTrap|_TraceJumpStack)
}
= 2
if inVDSOPage() {
= funcPC(_VDSO) + sys.PCQuantum
func () {
if prof.hz != 0 {
:= 0
for < len(sigprofCallers) && sigprofCallers[] != 0 {
++
}
cpuprof.addNonGo(sigprofCallers[:])
}
atomic.Store(&sigprofCallersUse, 0)
}
return true
}
switch .funcID {
case funcID_gogo, funcID_systemstack, funcID_mcall, funcID_morestack:
return true
}
return false
}
if < 0 {
= 0
}
.mcache = mcache0
} else {
.mcache = allocmcache()
}
}
if raceenabled && .raceprocctx == 0 {
if == 0 {
.raceprocctx = raceprocctx0
raceprocctx0 = 0 // bootstrap
} else {
.raceprocctx = raceproccreate()
}
}
lockInit(&.timersLock, lockRankTimers)
func ( *p) () {
assertLockHeld(&sched.lock)
assertWorldStopped()
.runqtail--
globrunqputhead()
}
if .runnext != 0 {
globrunqputhead(.runnext.ptr())
.runnext = 0
}
if len(.timers) > 0 {
lock(&.timersLock)
lock(&.timersLock)
moveTimers(, .timers)
.timers = nil
.numTimers = 0
.adjustTimers = 0
.deletedTimers = 0
atomic.Store64(&.timer0When, 0)
unlock(&.timersLock)
unlock(&.timersLock)
if gcphase != _GCoff {
wbBufFlush1()
.gcw.dispose()
}
for := range .sudogbuf {
.sudogbuf[] = nil
}
.sudogcache = .sudogbuf[:0]
for := range .deferpool {
for := range .deferpoolbuf[] {
.deferpoolbuf[][] = nil
}
.deferpool[] = .deferpoolbuf[][:0]
}
systemstack(func() {
:= getg().m
:= .p.ptr()
.p.set()
racectxend(.timerRaceCtx)
.timerRaceCtx = 0
.p.set()
}
raceprocdestroy(.raceprocctx)
.raceprocctx = 0
}
.gcAssistTime = 0
.status = _Pdead
}
func ( int32) *p {
assertLockHeld(&sched.lock)
assertWorldStopped()
:= gomaxprocs
if < 0 || <= 0 {
throw("procresize: invalid arg")
}
if trace.enabled {
traceGomaxprocs()
}
:= nanotime()
if sched.procresizetime != 0 {
sched.totaltime += int64() * ( - sched.procresizetime)
}
sched.procresizetime =
:= ( + 31) / 32
copy(, idlepMask)
idlepMask =
:= make([]uint32, )
copy(, timerpMask)
timerpMask =
}
unlock(&allpLock)
}
for := ; < ; ++ {
:= allp[]
}
if int32(len(allp)) != {
lock(&allpLock)
allp = allp[:]
idlepMask = idlepMask[:]
timerpMask = timerpMask[:]
unlock(&allpLock)
}
var *p
for := - 1; >= 0; -- {
:= allp[]
if .m.p.ptr() == {
continue
}
.status = _Pidle
if runqempty() {
pidleput()
} else {
.m.set(mget())
.link.set()
=
}
}
stealOrder.reset(uint32())
var *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32
atomic.Store((*uint32)(unsafe.Pointer()), uint32())
return
}
wirep()
.mcache.prepareForSweep()
if trace.enabled {
traceProcStart()
}
}
func () *p {
:= getg()
if .m.p == 0 {
throw("releasep: invalid arg")
}
:= .m.p.ptr()
if .m.ptr() != .m || .status != _Prunning {
print("releasep: m=", .m, " m->p=", .m.p.ptr(), " p->m=", hex(.m), " p->status=", .status, "\n")
throw("releasep: invalid p state")
}
if trace.enabled {
traceProcStop(.m.p.ptr())
}
.m.p = 0
.m = 0
.status = _Pidle
return
}
func ( int32) {
lock(&sched.lock)
sched.nmidlelocked +=
if > 0 {
checkdead()
}
unlock(&sched.lock)
}
func () {
assertLockHeld(&sched.lock)
if panicking > 0 {
return
}
var int32
if !iscgo && cgoHasExtraM {
:= lockextra(true)
:= extraMCount > 0
unlockextra()
if {
= 1
}
}
:= mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
if > {
return
}
if < 0 {
print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
throw("checkdead: inconsistent counts")
}
:= 0
lock(&allglock)
for := 0; < len(allgs); ++ {
:= allgs[]
if isSystemGoroutine(, false) {
continue
}
:= readgstatus()
switch &^ _Gscan {
case _Gwaiting,
_Gpreempted:
++
case _Grunnable,
_Grunning,
_Gsyscall:
print("runtime: checkdead: find g ", .goid, " in status ", , "\n")
throw("checkdead: runnable g")
}
}
unlock(&allglock)
if == 0 { // possible if main goroutine calls runtime·Goexit()
unlock(&sched.lock) // unlock so that GODEBUG=scheddetail=1 doesn't hang
throw("no goroutines (main called runtime.Goexit) - deadlock!")
}
throw("checkdead: no m for timer")
}
.nextp.set()
notewakeup(&.park)
return
}
}
var forcegcperiod int64 = 2 * 60 * 1e9
atomic.Store(&sched.sysmonStarting, 0)
:= int64(0)
:= 0 // how many cycles in succession we had not wokeup somebody
:= uint32(0)
for {
if == 0 { // start with 20us sleep...
= 20
} else if > 50 { // start doubling the sleep after 1ms...
*= 2
}
if > 10*1000 { // up to 10ms
= 10 * 1000
}
usleep()
mDoFixup()
:= nanotime()
if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) {
lock(&sched.lock)
if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) {
:= false
, := timeSleepUntil()
if > {
atomic.Store(&sched.sysmonwait, 1)
:= forcegcperiod / 2
if - < {
= -
}
:= >= osRelaxMinNS
if {
osRelax(true)
}
= notetsleep(&sched.sysmonnote, )
mDoFixup()
if {
osRelax(false)
}
lock(&sched.lock)
atomic.Store(&sched.sysmonwait, 0)
noteclear(&sched.sysmonnote)
}
if {
= 0
= 20
}
}
unlock(&sched.lock)
}
= nanotime()
if *cgo_yield != nil {
asmcgocall(*cgo_yield, nil)
incidlelocked(-1)
injectglist(&)
incidlelocked(1)
}
}
mDoFixup()
if , := timeSleepUntil(); < {
startm(nil, false)
}
}
if retake() != 0 {
= 0
} else {
++
if := (gcTrigger{kind: gcTriggerTime, now: }); .test() && atomic.Load(&forcegc.idle) != 0 {
lock(&forcegc.lock)
forcegc.idle = 0
var gList
.push(forcegc.g)
injectglist(&)
unlock(&forcegc.lock)
}
if debug.schedtrace > 0 && +int64(debug.schedtrace)*1000000 <= {
=
schedtrace(debug.scheddetail > 0)
}
unlock(&sched.sysmonlock)
}
}
type sysmontick struct {
schedtick uint32
schedwhen int64
syscalltick uint32
syscallwhen int64
}
const forcePreemptNS = 10 * 1000 * 1000 // 10ms
func ( int64) uint32 {
continue
}
:= &.sysmontick
:= .status
:= false
= true
}
}
:= int64(.syscalltick)
if ! && int64(.syscalltick) != {
.syscalltick = uint32()
.syscallwhen =
continue
if runqempty() && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && .syscallwhen+10*1000*1000 > {
continue
incidlelocked(-1)
if atomic.Cas(&.status, , _Pidle) {
if trace.enabled {
traceGoSysBlock()
traceProcStop()
}
++
.syscalltick++
handoffp()
}
incidlelocked(1)
lock(&allpLock)
}
}
unlock(&allpLock)
return uint32()
}
if preemptMSupported && debug.asyncpreemptoff == 0 {
.preempt = true
preemptM()
}
return true
}
var starttime int64
func ( bool) {
:= nanotime()
if starttime == 0 {
starttime =
}
lock(&sched.lock)
print("SCHED ", (-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", mcount(), " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
if {
print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n")
for , := range allp {
:= .m.ptr()
:= atomic.Load(&.runqhead)
:= atomic.Load(&.runqtail)
if {
:= int64(-1)
if != nil {
= .id
}
print(" P", , ": status=", .status, " schedtick=", .schedtick, " syscalltick=", .syscalltick, " m=", , " runqsize=", -, " gfreecnt=", .gFree.n, " timerslen=", len(.timers), "\n")
print(" ")
if == 0 {
print("[")
}
print( - )
if == len(allp)-1 {
print("]\n")
}
}
}
if ! {
unlock(&sched.lock)
return
}
for := allm; != nil; = .alllink {
:= .p.ptr()
:= .curg
:= .lockedg.ptr()
:= int32(-1)
if != nil {
= .id
}
:= int64(-1)
if != nil {
= .goid
}
:= int64(-1)
if != nil {
= .goid
}
print(" M", .id, ": p=", , " curg=", , " mallocing=", .mallocing, " throwing=", .throwing, " preemptoff=", .preemptoff, ""+" locks=", .locks, " dying=", .dying, " spinning=", .spinning, " blocked=", .blocked, " lockedg=", , "\n")
}
lock(&allglock)
for := 0; < len(allgs); ++ {
:= allgs[]
:= .m
:= .lockedm.ptr()
:= int64(-1)
if != nil {
= .id
}
:= int64(-1)
if != nil {
= .id
}
print(" G", .goid, ": status=", readgstatus(), "(", .waitreason.String(), ") m=", , " lockedm=", , "\n")
}
unlock(&allglock)
unlock(&sched.lock)
}
func ( bool) {
lock(&sched.lock)
if sched.disable.user == ! {
unlock(&sched.lock)
return
}
sched.disable.user = !
if {
:= sched.disable.n
sched.disable.n = 0
globrunqputbatch(&sched.disable.runnable, )
unlock(&sched.lock)
for ; != 0 && sched.npidle != 0; -- {
startm(nil, false)
}
} else {
unlock(&sched.lock)
}
}
func ( *p, int32) *g {
assertLockHeld(&sched.lock)
if sched.runqsize == 0 {
return nil
}
:= sched.runqsize/gomaxprocs + 1
if > sched.runqsize {
= sched.runqsize
}
if > 0 && > {
=
}
if > int32(len(.runq))/2 {
= int32(len(.runq)) / 2
}
sched.runqsize -=
:= sched.runq.pop()
--
for ; > 0; -- {
:= sched.runq.pop()
runqput(, , false)
}
return
}
lock(&.timersLock)
if atomic.Load(&.numTimers) == 0 {
timerpMask.clear(.id)
}
unlock(&.timersLock)
}
const randomizeScheduler = raceenabled
goto
}
:= -
= / 2
if != uint32(len(.runq)/2) {
throw("runqputslow: queue is not full")
}
for := uint32(0); < ; ++ {
[] = .runq[(+)%uint32(len(.runq))].ptr()
}
if !atomic.CasRel(&.runqhead, , +) { // cas-release, commits consume
return false
}
[] =
if randomizeScheduler {
for := uint32(1); <= ; ++ {
:= fastrandn( + 1)
[], [] = [], []
}
}
func ( *p, *gQueue, int) {
:= atomic.LoadAcq(&.runqhead)
:= .runqtail
:= uint32(0)
for !.empty() && - < uint32(len(.runq)) {
:= .pop()
.runq[%uint32(len(.runq))].set()
++
++
}
-= int()
if randomizeScheduler {
:= func( uint32) uint32 {
return (.runqtail + ) % uint32(len(.runq))
}
for := uint32(1); < ; ++ {
:= fastrandn( + 1)
.runq[()], .runq[()] = .runq[()], .runq[()]
}
}
atomic.StoreRel(&.runqtail, )
if !.empty() {
lock(&sched.lock)
globrunqputbatch(, int32())
unlock(&sched.lock)
}
}
for {
:= .runnext
if == 0 {
break
}
if .runnext.cas(, 0) {
return .ptr(), true
}
}
for {
:= atomic.LoadAcq(&.runqhead) // load-acquire, synchronize with other consumers
:= .runqtail
if == {
return nil, false
}
:= .runq[%uint32(len(.runq))].ptr()
if atomic.CasRel(&.runqhead, , +1) { // cas-release, commits consume
return , false
}
}
}
if := .runnext; != 0 {
osyield()
}
}
if !.runnext.cas(, 0) {
continue
}
[%uint32(len())] =
return 1
}
}
return 0
}
if > uint32(len(.runq)/2) { // read inconsistent h and t
continue
}
for := uint32(0); < ; ++ {
:= .runq[(+)%uint32(len(.runq))]
[(+)%uint32(len())] =
}
if atomic.CasRel(&.runqhead, , +) { // cas-release, commits consume
return
}
}
}
func (, *p, bool) *g {
:= .runqtail
:= runqgrab(, &.runq, , )
if == 0 {
return nil
}
--
:= .runq[(+)%uint32(len(.runq))].ptr()
if == 0 {
return
}
:= atomic.LoadAcq(&.runqhead) // load-acquire, synchronize with consumers
if -+ >= uint32(len(.runq)) {
throw("runqsteal: runq overflow")
}
atomic.StoreRel(&.runqtail, +) // store-release, makes the item available for consumption
return
}
func ( int) ( int) {
lock(&sched.lock)
= int(sched.maxmcount)
if > 0x7fffffff { // MaxInt32
sched.maxmcount = 0x7fffffff
} else {
sched.maxmcount = int32()
}
checkmcount()
unlock(&sched.lock)
return
}
func ( string) bool {
:= sys.Goexperiment
for != "" {
:= ""
:= bytealg.IndexByteString(, ',')
if < 0 {
, = , ""
} else {
, = [:], [+1:]
}
if == {
return true
}
if len() > 2 && [:2] == "no" && [2:] == {
return false
}
}
return false
}
func () {
procUnpin()
}
func () {
procUnpin()
}
func () {
procyield(active_spin_cnt)
}
var stealOrder randomOrder
type randomOrder struct {
count uint32
coprimes []uint32
}
type randomEnum struct {
i uint32
count uint32
pos uint32
inc uint32
}
func ( *randomOrder) ( uint32) {
.count =
.coprimes = .coprimes[:0]
for := uint32(1); <= ; ++ {
if gcd(, ) == 1 {
.coprimes = append(.coprimes, )
}
}
}
func ( *randomOrder) ( uint32) randomEnum {
return randomEnum{
count: .count,
pos: % .count,
inc: .coprimes[%uint32(len(.coprimes))],
}
}
func ( *randomEnum) () bool {
return .i == .count
}
func ( *randomEnum) () {
.i++
.pos = (.pos + .inc) % .count
}
func ( *randomEnum) () uint32 {
return .pos
}
func (, uint32) uint32 {
for != 0 {
, = , %
}
return
}
}
var inittrace tracestat
type tracestat struct {
active bool // init tracing activation status
id int64 // init go routine id
allocs uint64 // heap allocations
bytes uint64 // heap allocated bytes
}
func ( *initTask) {
switch .state {
case 2: // fully initialized
return
case 1: // initialization in progress
throw("recursive call during initialization - linker skew")
default: // not initialized yet
.state = 1 // initialization in progress
for := uintptr(0); < .ndeps; ++ {
:= add(unsafe.Pointer(), (3+)*sys.PtrSize)
:= *(**initTask)()
()
}
if .nfns == 0 {
.state = 2 // initialization done
return
}
var (
int64
tracestat
)
if inittrace.active {
:= inittrace
:= funcpkgpath(findfunc(funcPC()))
var [24]byte
print("init ", , " @")
print(string(fmtNSAsMS([:], uint64(-runtimeInitTime))), " ms, ")
print(string(fmtNSAsMS([:], uint64(-))), " ms clock, ")
print(string(itoa([:], .bytes-.bytes)), " bytes, ")
print(string(itoa([:], .allocs-.allocs)), " allocs")
print("\n")
}
.state = 2 // initialization done
}
![]() |
The pages are generated with Golds v0.3.2-preview. (GOOS=darwin GOARCH=amd64) Golds is a Go 101 project developed by Tapir Liu. PR and bug reports are welcome and can be submitted to the issue list. Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds. |