Copyright 2009 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.

package runtime

import (
	
	
	
	
)
Keep a cached value to make gotraceback fast, since we call it on every call to gentraceback. The cached value is a uint32 in which the low bits are the "crash" and "all" settings and the remaining bits are the traceback value (0 off, 1 on, 2 include system).
gotraceback returns the current traceback settings. If level is 0, suppress all tracebacks. If level is 1, show tracebacks, but exclude runtime frames. If level is 2, show tracebacks including runtime frames. If all is set, print all goroutine stacks. Otherwise, print just the current goroutine. If crash is set, crash (core dump, etc) after tracebacking.go:nosplit
func () ( int32, ,  bool) {
	 := getg()
	 := atomic.Load(&traceback_cache)
	 = &tracebackCrash != 0
	 = .m.throwing > 0 || &tracebackAll != 0
	if .m.traceback != 0 {
		 = int32(.m.traceback)
	} else {
		 = int32( >> tracebackShift)
	}
	return
}

var (
	argc int32
	argv **byte
)
nosplit for use in linux startup sysargsgo:nosplit
func ( **byte,  int32) *byte {
	return *(**byte)(add(unsafe.Pointer(), uintptr()*sys.PtrSize))
}

func ( int32,  **byte) {
	argc = 
	argv = 
	sysargs(, )
}

func () {
	if GOOS == "windows" {
		return
	}
	argslice = make([]string, argc)
	for  := int32(0);  < argc; ++ {
		argslice[] = gostringnocopy(argv_index(argv, ))
	}
}

TODO(austin): ppc64 in dynamic linking mode doesn't guarantee env[] will immediately follow argv. Might cause problems.
	 := int32(0)
	for argv_index(argv, argc+1+) != nil {
		++
	}

	envs = make([]string, )
	for  := int32(0);  < ; ++ {
		envs[] = gostring(argv_index(argv, argc+1+))
	}
}

func () []string {
	return envs
}
TODO: These should be locals in testAtomic64, but we don't 8-byte align stack variables on 386.
var test_z64, test_x64 uint64

func () {
	test_z64 = 42
	test_x64 = 0
	if atomic.Cas64(&test_z64, test_x64, 1) {
		throw("cas64 failed")
	}
	if test_x64 != 0 {
		throw("cas64 failed")
	}
	test_x64 = 42
	if !atomic.Cas64(&test_z64, test_x64, 1) {
		throw("cas64 failed")
	}
	if test_x64 != 42 || test_z64 != 1 {
		throw("cas64 failed")
	}
	if atomic.Load64(&test_z64) != 1 {
		throw("load64 failed")
	}
	atomic.Store64(&test_z64, (1<<40)+1)
	if atomic.Load64(&test_z64) != (1<<40)+1 {
		throw("store64 failed")
	}
	if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
		throw("xadd64 failed")
	}
	if atomic.Load64(&test_z64) != (2<<40)+2 {
		throw("xadd64 failed")
	}
	if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
		throw("xchg64 failed")
	}
	if atomic.Load64(&test_z64) != (3<<40)+3 {
		throw("xchg64 failed")
	}
}

func () {
	var (
		     int8
		     uint8
		     int16
		     uint16
		     int32
		     uint32
		     int64
		     uint64
		,  float32
		,  float64
		     unsafe.Pointer
		     *uint16
		     [4]byte
	)
	type  struct {
		 uint8
	}
	type  struct {
		 
		  uint8
	}
	var  
	var  

	if unsafe.Sizeof() != 1 {
		throw("bad a")
	}
	if unsafe.Sizeof() != 1 {
		throw("bad b")
	}
	if unsafe.Sizeof() != 2 {
		throw("bad c")
	}
	if unsafe.Sizeof() != 2 {
		throw("bad d")
	}
	if unsafe.Sizeof() != 4 {
		throw("bad e")
	}
	if unsafe.Sizeof() != 4 {
		throw("bad f")
	}
	if unsafe.Sizeof() != 8 {
		throw("bad g")
	}
	if unsafe.Sizeof() != 8 {
		throw("bad h")
	}
	if unsafe.Sizeof() != 4 {
		throw("bad i")
	}
	if unsafe.Sizeof() != 8 {
		throw("bad j")
	}
	if unsafe.Sizeof() != sys.PtrSize {
		throw("bad k")
	}
	if unsafe.Sizeof() != sys.PtrSize {
		throw("bad l")
	}
	if unsafe.Sizeof() != 1 {
		throw("bad unsafe.Sizeof x1")
	}
	if unsafe.Offsetof(.) != 1 {
		throw("bad offsetof y1.y")
	}
	if unsafe.Sizeof() != 2 {
		throw("bad unsafe.Sizeof y1")
	}

	if timediv(12345*1000000000+54321, 1000000000, &) != 12345 ||  != 54321 {
		throw("bad timediv")
	}

	var  uint32
	 = 1
	if !atomic.Cas(&, 1, 2) {
		throw("cas1")
	}
	if  != 2 {
		throw("cas2")
	}

	 = 4
	if atomic.Cas(&, 5, 6) {
		throw("cas3")
	}
	if  != 4 {
		throw("cas4")
	}

	 = 0xffffffff
	if !atomic.Cas(&, 0xffffffff, 0xfffffffe) {
		throw("cas5")
	}
	if  != 0xfffffffe {
		throw("cas6")
	}

	 = [4]byte{1, 1, 1, 1}
	atomic.Or8(&[1], 0xf0)
	if [0] != 1 || [1] != 0xf1 || [2] != 1 || [3] != 1 {
		throw("atomicor8")
	}

	 = [4]byte{0xff, 0xff, 0xff, 0xff}
	atomic.And8(&[1], 0x1)
	if [0] != 0xff || [1] != 0x1 || [2] != 0xff || [3] != 0xff {
		throw("atomicand8")
	}

	*(*uint64)(unsafe.Pointer(&)) = ^uint64(0)
	if  ==  {
		throw("float64nan")
	}
	if !( != ) {
		throw("float64nan1")
	}

	*(*uint64)(unsafe.Pointer(&)) = ^uint64(1)
	if  ==  {
		throw("float64nan2")
	}
	if !( != ) {
		throw("float64nan3")
	}

	*(*uint32)(unsafe.Pointer(&)) = ^uint32(0)
	if  ==  {
		throw("float32nan")
	}
	if  ==  {
		throw("float32nan1")
	}

	*(*uint32)(unsafe.Pointer(&)) = ^uint32(1)
	if  ==  {
		throw("float32nan2")
	}
	if  ==  {
		throw("float32nan3")
	}

	testAtomic64()

	if _FixedStack != round2(_FixedStack) {
		throw("FixedStack is not power-of-2")
	}

	if !checkASM() {
		throw("assembly checks failed")
	}
}

type dbgVar struct {
	name  string
	value *int32
}
Holds variables parsed from GODEBUG env var, except for "memprofilerate" since there is an existing int var for that value, which may already have an initial value.
var debug struct {
	cgocheck           int32
	clobberfree        int32
	efence             int32
	gccheckmark        int32
	gcpacertrace       int32
	gcshrinkstackoff   int32
	gcstoptheworld     int32
	gctrace            int32
	invalidptr         int32
	madvdontneed       int32 // for Linux; issue 28466
	scavenge           int32
	scavtrace          int32
	scheddetail        int32
	schedtrace         int32
	tracebackancestors int32
	asyncpreemptoff    int32
debug.malloc is used as a combined debug check in the malloc function and should be set if any of the below debug options is != 0.
	malloc         bool
	allocfreetrace int32
	inittrace      int32
	sbrk           int32
}

var dbgvars = []dbgVar{
	{"allocfreetrace", &debug.allocfreetrace},
	{"clobberfree", &debug.clobberfree},
	{"cgocheck", &debug.cgocheck},
	{"efence", &debug.efence},
	{"gccheckmark", &debug.gccheckmark},
	{"gcpacertrace", &debug.gcpacertrace},
	{"gcshrinkstackoff", &debug.gcshrinkstackoff},
	{"gcstoptheworld", &debug.gcstoptheworld},
	{"gctrace", &debug.gctrace},
	{"invalidptr", &debug.invalidptr},
	{"madvdontneed", &debug.madvdontneed},
	{"sbrk", &debug.sbrk},
	{"scavenge", &debug.scavenge},
	{"scavtrace", &debug.scavtrace},
	{"scheddetail", &debug.scheddetail},
	{"schedtrace", &debug.schedtrace},
	{"tracebackancestors", &debug.tracebackancestors},
	{"asyncpreemptoff", &debug.asyncpreemptoff},
	{"inittrace", &debug.inittrace},
}

defaults
On Linux, MADV_FREE is faster than MADV_DONTNEED, but doesn't affect many of the statistics that MADV_DONTNEED does until the memory is actually reclaimed. This generally leads to poor user experience, like confusing stats in top and other monitoring tools; and bad integration with management systems that respond to memory usage. Hence, default to MADV_DONTNEED.
		debug.madvdontneed = 1
	}

	for  := gogetenv("GODEBUG");  != ""; {
		 := ""
		 := bytealg.IndexByteString(, ',')
		if  < 0 {
			,  = , ""
		} else {
			,  = [:], [+1:]
		}
		 = bytealg.IndexByteString(, '=')
		if  < 0 {
			continue
		}
		,  := [:], [+1:]
Update MemProfileRate directly here since it is int, not int32, and should only be updated if specified in GODEBUG.
		if  == "memprofilerate" {
			if ,  := atoi();  {
				MemProfileRate = 
			}
		} else {
			for ,  := range dbgvars {
				if .name ==  {
					if ,  := atoi32();  {
						*.value = 
					}
				}
			}
		}
	}

	debug.malloc = (debug.allocfreetrace | debug.inittrace | debug.sbrk) != 0

	setTraceback(gogetenv("GOTRACEBACK"))
	traceback_env = traceback_cache
}
go:linkname setTraceback runtime/debug.SetTraceback
func ( string) {
	var  uint32
	switch  {
	case "none":
		 = 0
	case "single", "":
		 = 1 << tracebackShift
	case "all":
		 = 1<<tracebackShift | tracebackAll
	case "system":
		 = 2<<tracebackShift | tracebackAll
	case "crash":
		 = 2<<tracebackShift | tracebackAll | tracebackCrash
	default:
		 = tracebackAll
		if ,  := atoi();  &&  == int(uint32()) {
			 |= uint32() << tracebackShift
		}
when C owns the process, simply exit'ing the process on fatal errors and panics is surprising. Be louder and abort instead.
Poor mans 64-bit division. This is a very special function, do not use it if you are not sure what you are doing. int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions. Handles overflow in a time-specific manner. This keeps us within no-split stack limits on 32-bit processors.go:nosplit
func ( int64,  int32,  *int32) int32 {
	 := int32(0)
	for  := 30;  >= 0; -- {
		if  >= int64()<<uint() {
Before this for loop, res was 0, thus all these power of 2 increments are now just bitsets.
			 |= 1 << uint()
		}
	}
	if  >= int64() {
		if  != nil {
			* = 0
		}
		return 0x7fffffff
	}
	if  != nil {
		* = int32()
	}
	return 
}
Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
go:nosplit
func () *m {
	 := getg()
	.m.locks++
	return .m
}
go:nosplit
func ( *m) {
	 := getg()
	.locks--
restore the preemption request in case we've cleared it in newstack
go:linkname reflect_typelinks reflect.typelinks
func () ([]unsafe.Pointer, [][]int32) {
	 := activeModules()
	 := []unsafe.Pointer{unsafe.Pointer([0].types)}
	 := [][]int32{[0].typelinks}
	for ,  := range [1:] {
		 = append(, unsafe.Pointer(.types))
		 = append(, .typelinks)
	}
	return , 
}
reflect_resolveNameOff resolves a name offset from a base pointer.go:linkname reflect_resolveNameOff reflect.resolveNameOff
func ( unsafe.Pointer,  int32) unsafe.Pointer {
	return unsafe.Pointer(resolveNameOff(, nameOff()).bytes)
}
reflect_resolveTypeOff resolves an *rtype offset from a base type.go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
reflect_resolveTextOff resolves a function pointer offset from a base type.go:linkname reflect_resolveTextOff reflect.resolveTextOff
func ( unsafe.Pointer,  int32) unsafe.Pointer {
	return (*_type)().textOff(textOff())

}
reflectlite_resolveNameOff resolves a name offset from a base pointer.go:linkname reflectlite_resolveNameOff internal/reflectlite.resolveNameOff
func ( unsafe.Pointer,  int32) unsafe.Pointer {
	return unsafe.Pointer(resolveNameOff(, nameOff()).bytes)
}
reflectlite_resolveTypeOff resolves an *rtype offset from a base type.go:linkname reflectlite_resolveTypeOff internal/reflectlite.resolveTypeOff
reflect_addReflectOff adds a pointer to the reflection offset lookup map.go:linkname reflect_addReflectOff reflect.addReflectOff
func ( unsafe.Pointer) int32 {
	reflectOffsLock()
	if reflectOffs.m == nil {
		reflectOffs.m = make(map[int32]unsafe.Pointer)
		reflectOffs.minv = make(map[unsafe.Pointer]int32)
		reflectOffs.next = -1
	}
	,  := reflectOffs.minv[]
	if ! {
		 = reflectOffs.next
		reflectOffs.next-- // use negative offsets as IDs to aid debugging
		reflectOffs.m[] = 
		reflectOffs.minv[] = 
	}
	reflectOffsUnlock()
	return