Copyright 2009 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.

package runtime

import (
	
	
	
	
)
The code in this file implements stack trace walking for all architectures. The most important fact about a given architecture is whether it uses a link register. On systems with link registers, the prologue for a non-leaf function stores the incoming value of LR at the bottom of the newly allocated stack frame. On systems without link registers, the architecture pushes a return PC during the call instruction, so the return PC ends up above the stack frame. In this file, the return PC is always called LR, no matter how it was found. To date, the opposite of a link register architecture is an x86 architecture. This code may need to change if some other kind of non-link-register architecture comes along. The other important fact is the size of a pointer: on 32-bit systems the LR takes up only 4 bytes on the stack, while on 64-bit systems it takes up 8 bytes. Typically this is ptrSize. As an exception, amd64p32 had ptrSize == 4 but the CALL instruction still stored an 8-byte return PC onto the stack. To accommodate this, we used regSize as the size of the architecture-pushed return PC. usesLR is defined below in terms of minFrameSize, which is defined in arch_$GOARCH.go. ptrSize and regSize are defined in stubs.go.

const usesLR = sys.MinFrameSize > 0
Traceback over the deferred function calls. Report them like calls that have been invoked but not started executing yet.
func ( *g,  func(*stkframe, unsafe.Pointer) bool,  unsafe.Pointer) {
	var  stkframe
	for  := ._defer;  != nil;  = .link {
		 := .fn
Defer of nil function. Args don't matter.
			.pc = 0
			.fn = funcInfo{}
			.argp = 0
			.arglen = 0
			.argmap = nil
		} else {
			.pc = .fn
			 := findfunc(.pc)
			if !.valid() {
				print("runtime: unknown pc in defer ", hex(.pc), "\n")
				throw("unknown pc")
			}
			.fn = 
			.argp = uintptr(deferArgs())
			var  bool
			.arglen, .argmap,  = getArgInfoFast(, true)
			if ! {
				.arglen, .argmap = getArgInfo(&, , true, )
			}
		}
		.continpc = .pc
		if !((*stkframe)(noescape(unsafe.Pointer(&))), ) {
			return
		}
	}
}

const sizeofSkipFunction = 256
Generic traceback. Handles runtime stack prints (pcbuf == nil), the runtime.Callers function (pcbuf != nil), as well as the garbage collector (callback != nil). A little clunky to merge these, but avoids duplicating the code and all its subtlety. The skip argument is only valid with pcbuf != nil and counts the number of logical frames to skip rather than physical frames (with inlining, a PC in pcbuf can represent multiple calls). If a PC is partially skipped and max > 1, pcbuf[1] will be runtime.skipPleaseUseCallersFrames+N where N indicates the number of logical frames to skip in pcbuf[0].
func (, ,  uintptr,  *g,  int,  *uintptr,  int,  func(*stkframe, unsafe.Pointer) bool,  unsafe.Pointer,  uint) int {
	if  > 0 &&  != nil {
		throw("gentraceback callback cannot be used with non-zero skip")
	}
Don't call this "g"; it's too easy get "g" and "gp" confused.
The starting sp has been passed in as a uintptr, and the caller may have other uintptr-typed stack references as well. If during one of the calls that got us here or during one of the callbacks below the stack must be grown, all these uintptr references to the stack will not be updated, and gentraceback will continue to inspect the old stack memory, which may no longer be valid. Even if all the variables were updated correctly, it is not clear that we want to expose a traceback that begins on one stack and ends on another stack. That could confuse callers quite a bit. Instead, we require that gentraceback and any other function that accepts an sp for the current goroutine (typically obtained by calling getcallersp) must not run on that goroutine's stack but instead on the g0 stack.
		throw("gentraceback cannot trace user goroutine on its own stack")
	}
	, ,  := gotraceback()

	var  *funcval // Context pointer for unstarted goroutines. See issue #25897.

	if  == ^uintptr(0) &&  == ^uintptr(0) { // Signal to fetch saved values from gp.
		if .syscallsp != 0 {
			 = .syscallpc
			 = .syscallsp
			if usesLR {
				 = 0
			}
		} else {
			 = .sched.pc
			 = .sched.sp
			if usesLR {
				 = .sched.lr
			}
			 = (*funcval)(.sched.ctxt)
		}
	}

	 := 0
	var  stkframe
	.pc = 
	.sp = 
	if usesLR {
		.lr = 
	}
	 := false
	 := .cgoCtxt
	 :=  == nil &&  == nil
If the PC is zero, it's likely a nil function call. Start in the caller's frame.
	if .pc == 0 {
		if usesLR {
			.pc = *(*uintptr)(unsafe.Pointer(.sp))
			.lr = 0
		} else {
			.pc = uintptr(*(*sys.Uintreg)(unsafe.Pointer(.sp)))
			.sp += sys.RegSize
		}
	}

	 := findfunc(.pc)
	if !.valid() {
		if  != nil ||  {
			print("runtime: unknown pc ", hex(.pc), "\n")
			tracebackHexdump(.stack, &, 0)
		}
		if  != nil {
			throw("unknown pc")
		}
		return 0
	}
	.fn = 

	var  pcvalueCache

	 := funcID_normal
	 := 0
Typically: pc is the PC of the running function. sp is the stack pointer at that program counter. fp is the frame pointer (caller's stack pointer) at that program counter, or nil if unknown. stk is the stack containing sp. The caller's program counter is lr, unless lr is zero, in which case it is *(uintptr*)sp.
		 = .fn
No frame information, must be external function, like race support. See golang.org/issue/13568.
			break
		}
Found an actual function. Derive frame pointer and link register.
Jump over system stack transitions. If we're on g0 and there's a user goroutine, try to jump. Otherwise this is a regular call.
			if &_TraceJumpStack != 0 &&  == .m.g0 && .m.curg != nil {
				switch .funcID {
morestack does not return normally -- newstack() gogo's to curg.sched. Match that. This keeps morestack() from showing up in the backtrace, but that makes some sense since it'll never be returned to.
					.pc = .m.curg.sched.pc
					.fn = findfunc(.pc)
					 = .fn
					.sp = .m.curg.sched.sp
					 = .m.curg.cgoCtxt
systemstack returns normally, so just follow the stack transition.
					.sp = .m.curg.sched.sp
					 = .m.curg.cgoCtxt
				}
			}
			.fp = .sp + uintptr(funcspdelta(, .pc, &))
On x86, call instruction pushes return PC before entering new function.
				.fp += sys.RegSize
			}
		}
		var  funcInfo
		if topofstack(, .m != nil &&  == .m.g0) {
			.lr = 0
			 = funcInfo{}
jmpdefer modifies SP/LR/PC non-atomically. If a profiling interrupt arrives during jmpdefer, the stack unwind may see a mismatched register set and get confused. Stop if we see PC within jmpdefer to avoid that confusion. See golang.org/issue/8153.
			if  != nil {
				throw("traceback_arm: found jmpdefer when tracing with callback")
			}
			.lr = 0
		} else {
			var  uintptr
			if usesLR {
				if  == 0 && .sp < .fp || .lr == 0 {
					 = .sp
					.lr = *(*uintptr)(unsafe.Pointer())
				}
			} else {
				if .lr == 0 {
					 = .fp - sys.RegSize
					.lr = uintptr(*(*sys.Uintreg)(unsafe.Pointer()))
				}
			}
			 = findfunc(.lr)
This happens if you get a profiling interrupt at just the wrong time. In that context it is okay to stop early. But if callback is set, we're doing a garbage collection and must get everything, so crash loudly.
				 := 
We can inject sigpanic calls directly into C code, in which case we'll see a C return PC. Don't complain.
					 = false
				}
				if  != nil ||  {
					print("runtime: unexpected return pc for ", funcname(), " called from ", hex(.lr), "\n")
					tracebackHexdump(.stack, &, )
				}
				if  != nil {
					throw("unknown caller pc")
				}
			}
		}

		.varp = .fp
On x86, call instruction pushes return PC before entering new function.
			.varp -= sys.RegSize
		}
For architectures with frame pointers, if there's a frame, then there's a saved frame pointer here.
		if .varp > .sp && (GOARCH == "amd64" || GOARCH == "arm64") {
			.varp -= sys.RegSize
		}
Derive size of arguments. Most functions have a fixed-size argument block, so we can use metadata about the function f. Not all, though: there are some variadic functions in package runtime and reflect, and for those we use call-specific metadata recorded by f's caller.
		if  != nil ||  {
			.argp = .fp + sys.MinFrameSize
			var  bool
			.arglen, .argmap,  = getArgInfoFast(,  != nil)
			if ! {
				.arglen, .argmap = getArgInfo(&, ,  != nil, )
			}
		}
		 = nil // ctxt is only needed to get arg maps for the topmost frame
Determine frame's 'continuation PC', where it can continue. Normally this is the return address on the stack, but if sigpanic is immediately below this function on the stack, then the frame stopped executing due to a trap, and frame.pc is probably not a safe point for looking up liveness information. In this panicking case, the function either doesn't return at all (if it has no defers or if the defers do not recover) or it returns from one of the calls to deferproc a second time (if the corresponding deferred func recovers). In the latter case, use a deferreturn call site as the continuation pc.
		.continpc = .pc
		if  {
			if .fn.deferreturn != 0 {
Note: this may perhaps keep return variables alive longer than strictly necessary, as we are using "function has a defer statement" as a proxy for "function actually deferred something". It seems to be a minor drawback. (We used to actually look through the gp._defer for a defer corresponding to this function, but that is hard to do with defer records on the stack during a stack copy.) Note: the +1 is to offset the -1 that stack.go:getStackMap does to back up a return address make sure the pc is in the CALL instruction.
			} else {
				.continpc = 0
			}
		}

		if  != nil {
			if !((*stkframe)(noescape(unsafe.Pointer(&))), ) {
				return 
			}
		}

		if  != nil {
backup to CALL instruction to read inlining info (same logic as below)
Normally, pc is a return address. In that case, we want to look up file/line information using pc-1, because that is the pc of the call instruction (more precisely, the last byte of the call instruction). Callers expect the pc buffer to contain return addresses and do the same -1 themselves, so we keep pc unchanged. When the pc is from a signal (e.g. profiler or segv) then we want to look up file/line information using pc, and we store pc+1 in the pc buffer so callers can unconditionally subtract 1 before looking up. See issue 34123. The pc can be at function entry when the frame is initialized without actually running code, like runtime.mstart.
			if ( == 0 && &_TraceTrap != 0) ||  ||  == .entry {
				++
			} else {
				--
			}
If there is inlining info, record the inner frames.
			if  := funcdata(, _FUNCDATA_InlTree);  != nil {
				 := (*[1 << 20]inlinedCall)()
				for {
					 := pcdatavalue(, _PCDATA_InlTreeIndex, , &)
					if  < 0 {
						break
					}
ignore wrappers
					} else if  > 0 {
						--
					} else if  <  {
						(*[1 << 20]uintptr)(unsafe.Pointer())[] = 
						++
					}
Back up to an instruction in the "caller".
					 = .fn.entry + uintptr([].parentPc)
					 =  + 1
				}
Record the main frame.
Ignore wrapper functions (except when they trigger panics).
			} else if  > 0 {
				--
			} else if  <  {
				(*[1 << 20]uintptr)(unsafe.Pointer())[] = 
				++
			}
			 = .funcID
			-- // offset n++ below
		}

assume skip=0 for printing. Never elide wrappers if we haven't printed any frames. And don't elide wrappers that called panic rather than the wrapped function. Otherwise, leave them out.
backup to CALL instruction to read inlining info (same logic as below)
			 := .pc
			if ( > 0 || &_TraceTrap == 0) && .pc > .entry && ! {
				--
If there is inlining info, print the inner frames.
			if  := funcdata(, _FUNCDATA_InlTree);  != nil {
				 := (*[1 << 20]inlinedCall)()
				var  _func
				 := funcInfo{&, .datap}
				for {
					 := pcdatavalue(, _PCDATA_InlTreeIndex, , nil)
					if  < 0 {
						break
					}
Create a fake _func for the inlined function.
					.nameoff = [].func_
					.funcID = [].funcID

					if (&_TraceRuntimeFrames) != 0 || showframe(, ,  == 0, .funcID, ) {
						 := funcname()
						,  := funcline(, )
						print(, "(...)\n")
						print("\t", , ":", , "\n")
						++
					}
Back up to an instruction in the "caller".
					 = .fn.entry + uintptr([].parentPc)
				}
			}
Print during crash. main(0x1, 0x2, 0x3) /home/rsc/go/src/runtime/x.go:23 +0xf
				 := funcname()
				,  := funcline(, )
				if  == "runtime.gopanic" {
					 = "panic"
				}
				print(, "(")
				 := (*[100]uintptr)(unsafe.Pointer(.argp))
				for  := uintptr(0);  < .arglen/sys.PtrSize; ++ {
					if  >= 10 {
						print(", ...")
						break
					}
					if  != 0 {
						print(", ")
					}
					print(hex([]))
				}
				print(")\n")
				print("\t", , ":", )
				if .pc > .entry {
					print(" +", hex(.pc-.entry))
				}
				if .m != nil && .m.throwing > 0 &&  == .m.curg ||  >= 2 {
					print(" fp=", hex(.fp), " sp=", hex(.sp), " pc=", hex(.pc))
				}
				print("\n")
				++
			}
			 = .funcID
		}
		++

		if .funcID == funcID_cgocallback && len() > 0 {
			 := [len()-1]
			 = [:len()-1]
skip only applies to Go frames. callback != nil only used when we only care about Go frames.
			if  == 0 &&  == nil {
				 = tracebackCgoContext(, , , , )
			}
		}

		 = .funcID == funcID_sigpanic
		 :=  || .funcID == funcID_asyncPreempt
Do not unwind past the bottom of the stack.
		if !.valid() {
			break
		}
Unwind to next frame.
		.fn = 
		.pc = .lr
		.lr = 0
		.sp = .fp
		.fp = 0
		.argmap = nil
On link register architectures, sighandler saves the LR on stack before faking a call.
		if usesLR &&  {
			 := *(*uintptr)(unsafe.Pointer(.sp))
			.sp += sys.MinFrameSize
arm64 needs 16-byte aligned SP, always
				.sp += sys.PtrSize
			}
			 = findfunc(.pc)
			.fn = 
			if !.valid() {
				.pc = 
			} else if funcspdelta(, .pc, &) == 0 {
				.lr = 
			}
		}
	}

	if  {
		 = 
	}
Note that panic != nil is okay here: there can be leftover panics, because the defers on the panic stack do not nest in frame order as they do on the defer stack. If you have: frame 1 defers d1 frame 2 defers d2 frame 3 defers d3 frame 4 panics frame 4's panic starts running defers frame 5, running d3, defers d4 frame 5 panics frame 5's panic starts running defers frame 6, running d4, garbage collects frame 6, running d2, garbage collects During the execution of d4, the panic stack is d4 -> d3, which is nested properly, and we'll treat frame 3 as resumable, because we can find d3. (And in fact frame 3 is resumable. If d4 recovers and frame 5 continues running, d3, d3 can recover and we'll resume execution in (returning from) frame 3.) During the execution of d2, however, the panic stack is d2 -> d3, which is inverted. The scan will match d2 to frame 2 but having d2 on the stack until then means it will not match d3 to frame 3. This is okay: if we're running d2, then all the defers after d2 have completed and their corresponding frames are dead. Not finding d3 for frame 3 means we'll set frame 3's continpc == 0, which is correct (frame 3 is dead). At the end of the walk the panic stack can thus contain defers (d3 in this case) for dead frames. The inversion here always indicates a dead frame, and the effect of the inversion on the scan is to hide those dead frames, so the scan is still okay: what's left on the panic stack are exactly (and only) the dead frames. We require callback != nil here because only when callback != nil do we know that gentraceback is being called in a "must be correct" context as opposed to a "best effort" context. The tracebacks with callbacks only happen when everything is stopped nicely. At other times, such as when gathering a stack for a profiling signal or when printing a traceback during a crash, everything may not be stopped nicely, and the stack walk may not be able to complete.
	if  != nil &&  <  && .sp != .stktopsp {
		print("runtime: g", .goid, ": frame.sp=", hex(.sp), " top=", hex(.stktopsp), "\n")
		print("\tstack=[", hex(.stack.lo), "-", hex(.stack.hi), "] n=", , " max=", , "\n")
		throw("traceback did not unwind completely")
	}

	return 
}
reflectMethodValue is a partial duplicate of reflect.makeFuncImpl and reflect.methodValue.
type reflectMethodValue struct {
	fn     uintptr
	stack  *bitvector // ptrmap for both args and results
	argLen uintptr    // just args
}
getArgInfoFast returns the argument frame information for a call to f. It is short and inlineable. However, it does not handle all functions. If ok reports false, you must call getArgInfo instead. TODO(josharian): once we do mid-stack inlining, call getArgInfo directly from getArgInfoFast and stop returning an ok bool.
func ( funcInfo,  bool) ( uintptr,  *bitvector,  bool) {
	return uintptr(.args), nil, !( && .args == _ArgsSizeUnknown)
}
getArgInfo returns the argument frame information for a call to f with call frame frame. This is used for both actual calls with active stack frames and for deferred calls or goroutines that are not yet executing. If this is an actual call, ctxt must be nil (getArgInfo will retrieve what it needs from the active stack frame). If this is a deferred call or unstarted goroutine, ctxt must be the function object that was deferred or go'd.
func ( *stkframe,  funcInfo,  bool,  *funcval) ( uintptr,  *bitvector) {
	 = uintptr(.args)
Extract argument bitmaps for reflect stubs from the calls they made to reflect.
		switch funcname() {
These take a *reflect.methodValue as their context register.
			var  *reflectMethodValue
			var  bool
This is not an actual call, but a deferred call or an unstarted goroutine. The function value is itself the *reflect.methodValue.
This is a real call that took the *reflect.methodValue as its context register and immediately saved it to 0(SP). Get the methodValue from 0(SP).
				 := .sp + sys.MinFrameSize
Figure out whether the return values are valid. Reflect will update this value after it copies in the return values.
				 = *(*bool)(unsafe.Pointer( + 3*sys.PtrSize))
			}
			if .fn != .entry {
				print("runtime: confused by ", funcname(), "\n")
				throw("reflect mismatch")
			}
			 := .stack
			 = uintptr(.n * sys.PtrSize)
			if ! {
				 = uintptr(.argLen) &^ (sys.PtrSize - 1)
			}
			 = 
		}
	}
	return
}
tracebackCgoContext handles tracing back a cgo context value, from the context argument to setCgoTraceback, for the gentraceback function. It returns the new value of n.
func ( *uintptr,  bool,  uintptr, ,  int) int {
	var  [32]uintptr
	cgoContextPCs(, [:])
	var  cgoSymbolizerArg
	 := false
	for ,  := range  {
		if  == 0 ||  >=  {
			break
		}
		if  != nil {
			(*[1 << 20]uintptr)(unsafe.Pointer())[] = 
		}
		if  {
			if cgoSymbolizer == nil {
				print("non-Go function at pc=", hex(), "\n")
			} else {
				 := printOneCgoTraceback(, -, &)
				 +=  - 1 // +1 a few lines down
				 = true
			}
		}
		++
	}
	if  {
		.pc = 0
		callCgoSymbolizer(&)
	}
	return 
}

Show what created goroutine, except main goroutine (goid 1).
	 := .gopc
	 := findfunc()
	if .valid() && showframe(, , false, funcID_normal, funcID_normal) && .goid != 1 {
		printcreatedby1(, )
	}
}

func ( funcInfo,  uintptr) {
	print("created by ", funcname(), "\n")
	 :=  // back up to CALL instruction for funcline.
	if  > .entry {
		 -= sys.PCQuantum
	}
	,  := funcline(, )
	print("\t", , ":", )
	if  > .entry {
		print(" +", hex(-.entry))
	}
	print("\n")
}

func (, ,  uintptr,  *g) {
	traceback1(, , , , 0)
}
tracebacktrap is like traceback but expects that the PC and SP were obtained from a trap, not from gp->sched or gp->syscallpc/gp->syscallsp or getcallerpc/getcallersp. Because they are from a trap instead of from a saved pair, the initial PC must not be rewound to the previous instruction. (All the saved pairs record a PC that is a return address, so we rewind it into the CALL instruction.) If gp.m.libcall{g,pc,sp} information is available, it uses that information in preference to the pc/sp/lr passed in.
func (, ,  uintptr,  *g) {
We're in C code somewhere, traceback from the saved position.
		traceback1(.m.libcallpc, .m.libcallsp, 0, .m.libcallg.ptr(), 0)
		return
	}
	traceback1(, , , , _TraceTrap)
}

If the goroutine is in cgo, and we have a cgo traceback, print that.
Lock cgoCallers so that a signal handler won't change it, copy the array, reset it, unlock it. We are locked to the thread and are not running concurrently with a signal handler. We just have to stop a signal handler from interrupting in the middle of our copy.
		atomic.Store(&.m.cgoCallersUse, 1)
		 := *.m.cgoCallers
		.m.cgoCallers[0] = 0
		atomic.Store(&.m.cgoCallersUse, 0)

		printCgoTraceback(&)
	}

	var  int
Override registers if blocked in system call.
		 = .syscallpc
		 = .syscallsp
		 &^= _TraceTrap
Print traceback. By default, omits runtime frames. If that means we print nothing at all, repeat forcing all frames printed.
	 = gentraceback(, , , , 0, nil, _TracebackMaxFrames, nil, nil, )
	if  == 0 && (&_TraceRuntimeFrames) == 0 {
		 = gentraceback(, , , , 0, nil, _TracebackMaxFrames, nil, nil, |_TraceRuntimeFrames)
	}
	if  == _TracebackMaxFrames {
		print("...additional frames elided...\n")
	}
	printcreatedby()

	if .ancestors == nil {
		return
	}
	for ,  := range *.ancestors {
		printAncestorTraceback()
	}
}
printAncestorTraceback prints the traceback of the given ancestor. TODO: Unify this with gentraceback and CallersFrames.
func ( ancestorInfo) {
	print("[originating from goroutine ", .goid, "]:\n")
	for ,  := range .pcs {
		 := findfunc() // f previously validated
		if showfuncinfo(,  == 0, funcID_normal, funcID_normal) {
			printAncestorTracebackFuncInfo(, )
		}
	}
	if len(.pcs) == _TracebackMaxFrames {
		print("...additional frames elided...\n")
Show what created goroutine, except main goroutine (goid 1).
	 := findfunc(.gopc)
	if .valid() && showfuncinfo(, false, funcID_normal, funcID_normal) && .goid != 1 {
		printcreatedby1(, .gopc)
	}
}
printAncestorTraceback prints the given function info at a given pc within an ancestor traceback. The precision of this info is reduced due to only have access to the pcs at the time of the caller goroutine being created.
func ( funcInfo,  uintptr) {
	 := funcname()
	if  := funcdata(, _FUNCDATA_InlTree);  != nil {
		 := (*[1 << 20]inlinedCall)()
		 := pcdatavalue(, _PCDATA_InlTreeIndex, , nil)
		if  >= 0 {
			 = funcnameFromNameoff(, [].func_)
		}
	}
	,  := funcline(, )
	if  == "runtime.gopanic" {
		 = "panic"
	}
	print(, "(...)\n")
	print("\t", , ":", )
	if  > .entry {
		print(" +", hex(-.entry))
	}
	print("\n")
}

func ( int,  []uintptr) int {
	 := getcallersp()
	 := getcallerpc()
	 := getg()
	var  int
	systemstack(func() {
		 = gentraceback(, , 0, , , &[0], len(), nil, nil, 0)
	})
	return 
}

func ( *g,  int,  []uintptr) int {
	return gentraceback(^uintptr(0), ^uintptr(0), 0, , , &[0], len(), nil, nil, 0)
}
showframe reports whether the frame with the given characteristics should be printed during a traceback.
func ( funcInfo,  *g,  bool, ,  funcID) bool {
	 := getg()
	if .m.throwing > 0 &&  != nil && ( == .m.curg ||  == .m.caughtsig.ptr()) {
		return true
	}
	return showfuncinfo(, , , )
}
showfuncinfo reports whether a function with the given characteristics should be printed during a traceback.
Note that f may be a synthesized funcInfo for an inlined function, in which case only nameoff and funcID are set.

	, ,  := gotraceback()
Show all frames.
		return true
	}

	if !.valid() {
		return false
	}

	if  == funcID_wrapper && elideWrapperCalling() {
		return false
	}

	 := funcname()
Special case: always show runtime.gopanic frame in the middle of a stack trace, so that we can see the boundary between ordinary code and panic-induced deferred code. See golang.org/issue/5832.
	if  == "runtime.gopanic" && ! {
		return true
	}

	return bytealg.IndexByteString(, '.') >= 0 && (!hasPrefix(, "runtime.") || isExportedRuntime())
}
isExportedRuntime reports whether name is an exported runtime function. It is only for runtime functions, so ASCII A-Z is fine.
func ( string) bool {
	const  = len("runtime.")
	return len() >  && [:] == "runtime." && 'A' <= [] && [] <= 'Z'
}
elideWrapperCalling reports whether a wrapper function that called function id should be elided from stack traces.
If the wrapper called a panic function instead of the wrapped function, we want to include it in stacks.
	return !( == funcID_gopanic ||  == funcID_sigpanic ||  == funcID_panicwrap)
}

var gStatusStrings = [...]string{
	_Gidle:      "idle",
	_Grunnable:  "runnable",
	_Grunning:   "running",
	_Gsyscall:   "syscall",
	_Gwaiting:   "waiting",
	_Gdead:      "dead",
	_Gcopystack: "copystack",
	_Gpreempted: "preempted",
}

func ( *g) {
	 := readgstatus()

	 := &_Gscan != 0
	 &^= _Gscan // drop the scan bit
Basic string status
	var  string
	if 0 <=  &&  < uint32(len(gStatusStrings)) {
		 = gStatusStrings[]
	} else {
		 = "???"
	}
Override.
	if  == _Gwaiting && .waitreason != waitReasonZero {
		 = .waitreason.String()
	}
approx time the G is blocked, in minutes
	var  int64
	if ( == _Gwaiting ||  == _Gsyscall) && .waitsince != 0 {
		 = (nanotime() - .waitsince) / 60e9
	}
	print("goroutine ", .goid, " [", )
	if  {
		print(" (scan)")
	}
	if  >= 1 {
		print(", ", , " minutes")
	}
	if .lockedm != 0 {
		print(", locked to thread")
	}
	print("]:\n")
}

func ( *g) {
	, ,  := gotraceback()
Show the current goroutine first, if we haven't already.
	 := getg().m.curg
	if  != nil &&  !=  {
		print("\n")
		goroutineheader()
		traceback(^uintptr(0), ^uintptr(0), 0, )
	}
We can't take allglock here because this may be during fatal throw/panic, where locking allglock could be out-of-order or a direct deadlock. Instead, use atomic access to allgs which requires no locking. We don't lock against concurrent creation of new Gs, but even with allglock we may miss Gs created after this loop.
	,  := atomicAllG()
	for  := uintptr(0);  < ; ++ {
		 := atomicAllGIndex(, )

		if  ==  ||  ==  || readgstatus() == _Gdead || isSystemGoroutine(, false) &&  < 2 {
			continue
		}
		print("\n")
Note: gp.m == g.m occurs when tracebackothers is called from a signal handler initiated during a systemstack call. The original G is still in the running state, and we want to print its stack.
		if .m != getg().m && readgstatus()&^_Gscan == _Grunning {
			print("\tgoroutine running on other thread; stack unavailable\n")
			printcreatedby()
		} else {
			traceback(^uintptr(0), ^uintptr(0), 0, )
		}
	}
}
tracebackHexdump hexdumps part of stk around frame.sp and frame.fp for debugging purposes. If the address bad is included in the hexdumped range, it will mark it as well.
func ( stack,  *stkframe,  uintptr) {
	const  = 32 * sys.PtrSize
Start around frame.sp.
Expand to include frame.fp.
	if .fp != 0 && .fp <  {
		 = .fp
	}
	if .fp != 0 && .fp >  {
		 = .fp
Expand a bit more.
But don't go too far from frame.sp.
	if  < .sp- {
		 = .sp - 
	}
	if  > .sp+ {
		 = .sp + 
And don't go outside the stack bounds.
	if  < .lo {
		 = .lo
	}
	if  > .hi {
		 = .hi
	}
Print the hex dump.
	print("stack: frame={sp:", hex(.sp), ", fp:", hex(.fp), "} stack=[", hex(.lo), ",", hex(.hi), ")\n")
	hexdumpWords(, , func( uintptr) byte {
		switch  {
		case .fp:
			return '>'
		case .sp:
			return '<'
		case :
			return '!'
		}
		return 0
	})
}
Does f mark the top of a goroutine stack?
asmcgocall is TOS on the system stack because it switches to the system stack, but in this case we can come back to the regular stack and still want to be able to unwind through the call that appeared on the regular stack.
		( && .funcID == funcID_asmcgocall)
}
isSystemGoroutine reports whether the goroutine g must be omitted in stack dumps and deadlock detector. This is any goroutine that starts at a runtime.* entry point, except for runtime.main, runtime.handleAsyncEvent (wasm only) and sometimes runtime.runfinq. If fixed is true, any goroutine that can vary between user and system (that is, the finalizer goroutine) is considered a user goroutine.
Keep this in sync with cmd/trace/trace.go:isSystemGoroutine.
	 := findfunc(.startpc)
	if !.valid() {
		return false
	}
	if .funcID == funcID_runtime_main || .funcID == funcID_handleAsyncEvent {
		return false
	}
We include the finalizer goroutine if it's calling back into user code.
This goroutine can vary. In fixed mode, always consider it a user goroutine.
			return false
		}
		return !fingRunning
	}
	return hasPrefix(funcname(), "runtime.")
}
SetCgoTraceback records three C functions to use to gather traceback information from C code and to convert that traceback information into symbolic information. These are used when printing stack traces for a program that uses cgo. The traceback and context functions may be called from a signal handler, and must therefore use only async-signal safe functions. The symbolizer function may be called while the program is crashing, and so must be cautious about using memory. None of the functions may call back into Go. The context function will be called with a single argument, a pointer to a struct: struct { Context uintptr } In C syntax, this struct will be struct { uintptr_t Context; }; If the Context field is 0, the context function is being called to record the current traceback context. It should record in the Context field whatever information is needed about the current point of execution to later produce a stack trace, probably the stack pointer and PC. In this case the context function will be called from C code. If the Context field is not 0, then it is a value returned by a previous call to the context function. This case is called when the context is no longer needed; that is, when the Go code is returning to its C code caller. This permits the context function to release any associated resources. While it would be correct for the context function to record a complete a stack trace whenever it is called, and simply copy that out in the traceback function, in a typical program the context function will be called many times without ever recording a traceback for that context. Recording a complete stack trace in a call to the context function is likely to be inefficient. The traceback function will be called with a single argument, a pointer to a struct: struct { Context uintptr SigContext uintptr Buf *uintptr Max uintptr } In C syntax, this struct will be struct { uintptr_t Context; uintptr_t SigContext; uintptr_t* Buf; uintptr_t Max; }; The Context field will be zero to gather a traceback from the current program execution point. In this case, the traceback function will be called from C code. Otherwise Context will be a value previously returned by a call to the context function. The traceback function should gather a stack trace from that saved point in the program execution. The traceback function may be called from an execution thread other than the one that recorded the context, but only when the context is known to be valid and unchanging. The traceback function may also be called deeper in the call stack on the same thread that recorded the context. The traceback function may be called multiple times with the same Context value; it will usually be appropriate to cache the result, if possible, the first time this is called for a specific context value. If the traceback function is called from a signal handler on a Unix system, SigContext will be the signal context argument passed to the signal handler (a C ucontext_t* cast to uintptr_t). This may be used to start tracing at the point where the signal occurred. If the traceback function is not called from a signal handler, SigContext will be zero. Buf is where the traceback information should be stored. It should be PC values, such that Buf[0] is the PC of the caller, Buf[1] is the PC of that function's caller, and so on. Max is the maximum number of entries to store. The function should store a zero to indicate the top of the stack, or that the caller is on a different stack, presumably a Go stack. Unlike runtime.Callers, the PC values returned should, when passed to the symbolizer function, return the file/line of the call instruction. No additional subtraction is required or appropriate. On all platforms, the traceback function is invoked when a call from Go to C to Go requests a stack trace. On linux/amd64, linux/ppc64le, and freebsd/amd64, the traceback function is also invoked when a signal is received by a thread that is executing a cgo call. The traceback function should not make assumptions about when it is called, as future versions of Go may make additional calls. The symbolizer function will be called with a single argument, a pointer to a struct: struct { PC uintptr // program counter to fetch information for File *byte // file name (NUL terminated) Lineno uintptr // line number Func *byte // function name (NUL terminated) Entry uintptr // function entry point More uintptr // set non-zero if more info for this PC Data uintptr // unused by runtime, available for function } In C syntax, this struct will be struct { uintptr_t PC; char* File; uintptr_t Lineno; char* Func; uintptr_t Entry; uintptr_t More; uintptr_t Data; }; The PC field will be a value returned by a call to the traceback function. The first time the function is called for a particular traceback, all the fields except PC will be 0. The function should fill in the other fields if possible, setting them to 0/nil if the information is not available. The Data field may be used to store any useful information across calls. The More field should be set to non-zero if there is more information for this PC, zero otherwise. If More is set non-zero, the function will be called again with the same PC, and may return different information (this is intended for use with inlined functions). If More is zero, the function will be called with the next PC value in the traceback. When the traceback is complete, the function will be called once more with PC set to zero; this may be used to free any information. Each call will leave the fields of the struct set to the same values they had upon return, except for the PC field when the More field is zero. The function must not keep a copy of the struct pointer between calls. When calling SetCgoTraceback, the version argument is the version number of the structs that the functions expect to receive. Currently this must be zero. The symbolizer function may be nil, in which case the results of the traceback function will be displayed as numbers. If the traceback function is nil, the symbolizer function will never be called. The context function may be nil, in which case the traceback function will only be called with the context field set to zero. If the context function is nil, then calls from Go to C to Go will not show a traceback for the C portion of the call stack. SetCgoTraceback should be called only once, ideally from an init function.
func ( int, , ,  unsafe.Pointer) {
	if  != 0 {
		panic("unsupported version")
	}

	if cgoTraceback != nil && cgoTraceback !=  ||
		cgoContext != nil && cgoContext !=  ||
		cgoSymbolizer != nil && cgoSymbolizer !=  {
		panic("call SetCgoTraceback only once")
	}

	cgoTraceback = 
	cgoContext = 
	cgoSymbolizer = 
The context function is called when a C function calls a Go function. As such it is only called by C code in runtime/cgo.
cgoTracebackArg is the type passed to cgoTraceback.
cgoContextArg is the type passed to the context function.
type cgoContextArg struct {
	context uintptr
}
cgoSymbolizerArg is the type passed to cgoSymbolizer.
cgoTraceback prints a traceback of callers.
func ( *cgoCallers) {
	if cgoSymbolizer == nil {
		for ,  := range  {
			if  == 0 {
				break
			}
			print("non-Go function at pc=", hex(), "\n")
		}
		return
	}

	var  cgoSymbolizerArg
	for ,  := range  {
		if  == 0 {
			break
		}
		printOneCgoTraceback(, 0x7fffffff, &)
	}
	.pc = 0
	callCgoSymbolizer(&)
}
printOneCgoTraceback prints the traceback of a single cgo caller. This can print more than one line because of inlining. Returns the number of frames printed.
func ( uintptr,  int,  *cgoSymbolizerArg) int {
	 := 0
	.pc = 
	for  <=  {
		callCgoSymbolizer()
Note that we don't print any argument information here, not even parentheses. The symbolizer must add that if appropriate.
			println(gostringnocopy(.funcName))
		} else {
			println("non-Go function")
		}
		print("\t")
		if .file != nil {
			print(gostringnocopy(.file), ":", .lineno, " ")
		}
		print("pc=", hex(), "\n")
		++
		if .more == 0 {
			break
		}
	}
	return 
}
callCgoSymbolizer calls the cgoSymbolizer function.
We do not want to call into the scheduler when panicking or when on the system stack.
cgoContextPCs gets the PC values from a cgo traceback.
func ( uintptr,  []uintptr) {
	if cgoTraceback == nil {
		return
	}
	 := cgocall
We do not want to call into the scheduler when panicking or when on the system stack.
		 = asmcgocall
	}
	 := cgoTracebackArg{
		context: ,
		buf:     (*uintptr)(noescape(unsafe.Pointer(&[0]))),
		max:     uintptr(len()),
	}
	if msanenabled {
		msanwrite(unsafe.Pointer(&), unsafe.Sizeof())
	}
	(cgoTraceback, noescape(unsafe.Pointer(&)))