Copyright 2009 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
Runtime type representation.

package runtime

import 
tflag is documented in reflect/type.go. tflag values must be kept in sync with copies in: cmd/compile/internal/gc/reflect.go cmd/link/internal/ld/decodesym.go reflect/type.go internal/reflectlite/type.go
type tflag uint8

const (
	tflagUncommon      tflag = 1 << 0
	tflagExtraStar     tflag = 1 << 1
	tflagNamed         tflag = 1 << 2
	tflagRegularMemory tflag = 1 << 3 // equal and hash can treat values of this type as a single region of t.size bytes
)
Needs to be in sync with ../cmd/link/internal/ld/decodesym.go:/^func.commonsize, ../cmd/compile/internal/gc/reflect.go:/^func.dcommontype and ../reflect/type.go:/^type.rtype. ../internal/reflectlite/type.go:/^type.rtype.
type _type struct {
	size       uintptr
	ptrdata    uintptr // size of memory prefix holding all pointers
	hash       uint32
	tflag      tflag
	align      uint8
	fieldAlign uint8
function for comparing objects of this type (ptr to object A, ptr to object B) -> ==?
gcdata stores the GC type data for the garbage collector. If the KindGCProg bit is set in kind, gcdata is a GC program. Otherwise it is a ptrmask bitmap. See mbitmap.go for details.
	gcdata    *byte
	str       nameOff
	ptrToThis typeOff
}

func ( *_type) () string {
	 := .nameOff(.str).name()
	if .tflag&tflagExtraStar != 0 {
		return [1:]
	}
	return 
}

func ( *_type) () *uncommontype {
	if .tflag&tflagUncommon == 0 {
		return nil
	}
	switch .kind & kindMask {
	case kindStruct:
		type  struct {
			structtype
			 uncommontype
		}
		return &(*)(unsafe.Pointer()).
	case kindPtr:
		type  struct {
			ptrtype
			 uncommontype
		}
		return &(*)(unsafe.Pointer()).
	case kindFunc:
		type  struct {
			functype
			 uncommontype
		}
		return &(*)(unsafe.Pointer()).
	case kindSlice:
		type  struct {
			slicetype
			 uncommontype
		}
		return &(*)(unsafe.Pointer()).
	case kindArray:
		type  struct {
			arraytype
			 uncommontype
		}
		return &(*)(unsafe.Pointer()).
	case kindChan:
		type  struct {
			chantype
			 uncommontype
		}
		return &(*)(unsafe.Pointer()).
	case kindMap:
		type  struct {
			maptype
			 uncommontype
		}
		return &(*)(unsafe.Pointer()).
	case kindInterface:
		type  struct {
			interfacetype
			 uncommontype
		}
		return &(*)(unsafe.Pointer()).
	default:
		type  struct {
			_type
			 uncommontype
		}
		return &(*)(unsafe.Pointer()).
	}
}

func ( *_type) () string {
	if .tflag&tflagNamed == 0 {
		return ""
	}
	 := .string()
	 := len() - 1
	for  >= 0 && [] != '.' {
		--
	}
	return [+1:]
}
pkgpath returns the path of the package where t was defined, if available. This is not the same as the reflect package's PkgPath method, in that it returns the package path for struct and interface types, not just named types.
func ( *_type) () string {
	if  := .uncommon();  != nil {
		return .nameOff(.pkgpath).name()
	}
	switch .kind & kindMask {
	case kindStruct:
		 := (*structtype)(unsafe.Pointer())
		return .pkgPath.name()
	case kindInterface:
		 := (*interfacetype)(unsafe.Pointer())
		return .pkgpath.name()
	}
	return ""
}
reflectOffs holds type offsets defined at run time by the reflect package. When a type is defined at run time, its *rtype data lives on the heap. There are a wide range of possible addresses the heap may use, that may not be representable as a 32-bit offset. Moreover the GC may one day start moving heap memory, in which case there is no stable offset that can be defined. To provide stable offsets, we add pin *rtype objects in a global map and treat the offset as an identifier. We use negative offsets that do not overlap with any compile-time module offsets. Entries are created by reflect.addReflectOff.
var reflectOffs struct {
	lock mutex
	next int32
	m    map[int32]unsafe.Pointer
	minv map[unsafe.Pointer]int32
}

func () {
	lock(&reflectOffs.lock)
	if raceenabled {
		raceacquire(unsafe.Pointer(&reflectOffs.lock))
	}
}

func () {
	if raceenabled {
		racerelease(unsafe.Pointer(&reflectOffs.lock))
	}
	unlock(&reflectOffs.lock)
}

func ( unsafe.Pointer,  nameOff) name {
	if  == 0 {
		return name{}
	}
	 := uintptr()
	for  := &firstmoduledata;  != nil;  = .next {
		if  >= .types &&  < .etypes {
			 := .types + uintptr()
			if  > .etypes {
				println("runtime: nameOff", hex(), "out of range", hex(.types), "-", hex(.etypes))
				throw("runtime: name offset out of range")
			}
			return name{(*byte)(unsafe.Pointer())}
		}
	}
No module found. see if it is a run time name.
	reflectOffsLock()
	,  := reflectOffs.m[int32()]
	reflectOffsUnlock()
	if ! {
		println("runtime: nameOff", hex(), "base", hex(), "not in ranges:")
		for  := &firstmoduledata;  != nil;  = .next {
			println("\ttypes", hex(.types), "etypes", hex(.etypes))
		}
		throw("runtime: name offset base pointer out of range")
	}
	return name{(*byte)()}
}

func ( *_type) ( nameOff) name {
	return resolveNameOff(unsafe.Pointer(), )
}

func ( unsafe.Pointer,  typeOff) *_type {
-1 is the sentinel value for unreachable code. See cmd/link/internal/ld/data.go:relocsym.
		return nil
	}
	 := uintptr()
	var  *moduledata
	for  := &firstmoduledata;  != nil;  = .next {
		if  >= .types &&  < .etypes {
			 = 
			break
		}
	}
	if  == nil {
		reflectOffsLock()
		 := reflectOffs.m[int32()]
		reflectOffsUnlock()
		if  == nil {
			println("runtime: typeOff", hex(), "base", hex(), "not in ranges:")
			for  := &firstmoduledata;  != nil;  = .next {
				println("\ttypes", hex(.types), "etypes", hex(.etypes))
			}
			throw("runtime: type offset base pointer out of range")
		}
		return (*_type)()
	}
	if  := .typemap[];  != nil {
		return 
	}
	 := .types + uintptr()
	if  > .etypes {
		println("runtime: typeOff", hex(), "out of range", hex(.types), "-", hex(.etypes))
		throw("runtime: type offset out of range")
	}
	return (*_type)(unsafe.Pointer())
}

func ( *_type) ( typeOff) *_type {
	return resolveTypeOff(unsafe.Pointer(), )
}

func ( *_type) ( textOff) unsafe.Pointer {
-1 is the sentinel value for unreachable code. See cmd/link/internal/ld/data.go:relocsym.
		return unsafe.Pointer(^uintptr(0))
	}
	 := uintptr(unsafe.Pointer())
	var  *moduledata
	for  := &firstmoduledata;  != nil;  = .next {
		if  >= .types &&  < .etypes {
			 = 
			break
		}
	}
	if  == nil {
		reflectOffsLock()
		 := reflectOffs.m[int32()]
		reflectOffsUnlock()
		if  == nil {
			println("runtime: textOff", hex(), "base", hex(), "not in ranges:")
			for  := &firstmoduledata;  != nil;  = .next {
				println("\ttypes", hex(.types), "etypes", hex(.etypes))
			}
			throw("runtime: text offset base pointer out of range")
		}
		return 
	}
	 := uintptr(0)
The text, or instruction stream is generated as one large buffer. The off (offset) for a method is its offset within this buffer. If the total text size gets too large, there can be issues on platforms like ppc64 if the target of calls are too far for the call instruction. To resolve the large text issue, the text is split into multiple text sections to allow the linker to generate long calls when necessary. When this happens, the vaddr for each text section is set to its offset within the text. Each method's offset is compared against the section vaddrs and sizes to determine the containing section. Then the section relative offset is added to the section's relocated baseaddr to compute the method addess.

	if len(.textsectmap) > 1 {
		for  := range .textsectmap {
			 := .textsectmap[].vaddr
			 := .textsectmap[].length
			if uintptr() >=  && uintptr() < + {
				 = .textsectmap[].baseaddr + uintptr() - uintptr(.textsectmap[].vaddr)
				break
			}
		}
single text section
		 = .text + uintptr()
	}

	if  > .etext && GOARCH != "wasm" { // on wasm, functions do not live in the same address space as the linear memory
		println("runtime: textOff", hex(), "out of range", hex(.text), "-", hex(.etext))
		throw("runtime: text offset out of range")
	}
	return unsafe.Pointer()
}

See funcType in reflect/type.go for details on data layout.
	 := uintptr(unsafe.Sizeof(functype{}))
	if .typ.tflag&tflagUncommon != 0 {
		 += unsafe.Sizeof(uncommontype{})
	}
	return (*[1 << 20]*_type)(add(unsafe.Pointer(), ))[:.inCount]
}

See funcType in reflect/type.go for details on data layout.
	 := uintptr(unsafe.Sizeof(functype{}))
	if .typ.tflag&tflagUncommon != 0 {
		 += unsafe.Sizeof(uncommontype{})
	}
	 := .outCount & (1<<15 - 1)
	return (*[1 << 20]*_type)(add(unsafe.Pointer(), ))[.inCount : .inCount+]
}

func ( *functype) () bool {
	return .outCount&(1<<15) != 0
}

type nameOff int32
type typeOff int32
type textOff int32

type method struct {
	name nameOff
	mtyp typeOff
	ifn  textOff
	tfn  textOff
}

type uncommontype struct {
	pkgpath nameOff
	mcount  uint16 // number of methods
	xcount  uint16 // number of exported methods
	moff    uint32 // offset from this uncommontype to [mcount]method
	_       uint32 // unused
}

type imethod struct {
	name nameOff
	ityp typeOff
}

type interfacetype struct {
	typ     _type
	pkgpath name
	mhdr    []imethod
}

type maptype struct {
	typ    _type
	key    *_type
	elem   *_type
function for hashing keys (ptr to key, seed) -> hash
	hasher     func(unsafe.Pointer, uintptr) uintptr
	keysize    uint8  // size of key slot
	elemsize   uint8  // size of elem slot
	bucketsize uint16 // size of bucket
	flags      uint32
}
Note: flag values must match those used in the TMAP case in ../cmd/compile/internal/gc/reflect.go:dtypesym.
func ( *maptype) () bool { // store ptr to key instead of key itself
	return .flags&1 != 0
}
func ( *maptype) () bool { // store ptr to elem instead of elem itself
	return .flags&2 != 0
}
func ( *maptype) () bool { // true if k==k for all keys
	return .flags&4 != 0
}
func ( *maptype) () bool { // true if we need to update key on an overwrite
	return .flags&8 != 0
}
func ( *maptype) () bool { // true if hash function might panic
	return .flags&16 != 0
}

type arraytype struct {
	typ   _type
	elem  *_type
	slice *_type
	len   uintptr
}

type chantype struct {
	typ  _type
	elem *_type
	dir  uintptr
}

type slicetype struct {
	typ  _type
	elem *_type
}

type functype struct {
	typ      _type
	inCount  uint16
	outCount uint16
}

type ptrtype struct {
	typ  _type
	elem *_type
}

type structfield struct {
	name       name
	typ        *_type
	offsetAnon uintptr
}

func ( *structfield) () uintptr {
	return .offsetAnon >> 1
}

type structtype struct {
	typ     _type
	pkgPath name
	fields  []structfield
}
name is an encoded type name with optional extra data. See reflect/type.go for details.
type name struct {
	bytes *byte
}

func ( name) ( int) *byte {
	return (*byte)(add(unsafe.Pointer(.bytes), uintptr()))
}

func ( name) () bool {
	return (*.bytes)&(1<<0) != 0
}

func ( name) () int {
	return int(uint16(*.data(1))<<8 | uint16(*.data(2)))
}

func ( name) () int {
	if *.data(0)&(1<<1) == 0 {
		return 0
	}
	 := 3 + .nameLen()
	return int(uint16(*.data())<<8 | uint16(*.data( + 1)))
}

func ( name) () ( string) {
	if .bytes == nil {
		return ""
	}
	 := .nameLen()
	if  == 0 {
		return ""
	}
	 := (*stringStruct)(unsafe.Pointer(&))
	.str = unsafe.Pointer(.data(3))
	.len = 
	return 
}

func ( name) () ( string) {
	 := .tagLen()
	if  == 0 {
		return ""
	}
	 := .nameLen()
	 := (*stringStruct)(unsafe.Pointer(&))
	.str = unsafe.Pointer(.data(3 +  + 2))
	.len = 
	return 
}

func ( name) () string {
	if .bytes == nil || *.data(0)&(1<<2) == 0 {
		return ""
	}
	 := 3 + .nameLen()
	if  := .tagLen();  > 0 {
		 += 2 + 
	}
	var  nameOff
	copy((*[4]byte)(unsafe.Pointer(&))[:], (*[4]byte)(unsafe.Pointer(.data()))[:])
	 := resolveNameOff(unsafe.Pointer(.bytes), )
	return .name()
}

func ( name) () bool {
	if .bytes == nil {
		return false
	}
	if .nameLen() != 1 {
		return false
	}
	return *.data(3) == '_'
}
typelinksinit scans the types from extra modules and builds the moduledata typemap used to de-duplicate type pointers.
func () {
	if firstmoduledata.next == nil {
		return
	}
	 := make(map[uint32][]*_type, len(firstmoduledata.typelinks))

	 := activeModules()
	 := [0]
Collect types from the previous module into typehash.
	:
		for ,  := range .typelinks {
			var  *_type
			if .typemap == nil {
				 = (*_type)(unsafe.Pointer(.types + uintptr()))
			} else {
				 = .typemap[typeOff()]
Add to typehash if not seen before.
			 := [.hash]
			for ,  := range  {
				if  ==  {
					continue 
				}
			}
			[.hash] = append(, )
		}

If any of this module's typelinks match a type from a prior module, prefer that prior type by adding the offset to this module's typemap.
			 := make(map[typeOff]*_type, len(.typelinks))
			pinnedTypemaps = append(pinnedTypemaps, )
			.typemap = 
			for ,  := range .typelinks {
				 := (*_type)(unsafe.Pointer(.types + uintptr()))
				for ,  := range [.hash] {
					 := map[_typePair]struct{}{}
					if typesEqual(, , ) {
						 = 
						break
					}
				}
				.typemap[typeOff()] = 
			}
		}

		 = 
	}
}

type _typePair struct {
	t1 *_type
	t2 *_type
}
typesEqual reports whether two types are equal. Everywhere in the runtime and reflect packages, it is assumed that there is exactly one *_type per Go type, so that pointer equality can be used to test if types are equal. There is one place that breaks this assumption: buildmode=shared. In this case a type can appear as two different pieces of memory. This is hidden from the runtime and reflect package by the per-module typemap built in typelinksinit. It uses typesEqual to map types from later modules back into earlier ones. Only typelinksinit needs this function.
func (,  *_type,  map[_typePair]struct{}) bool {
	 := _typePair{, }
	if ,  := [];  {
		return true
	}
mark these types as seen, and thus equivalent which prevents an infinite loop if the two types are identical, but recursively defined and loaded from different modules
	[] = struct{}{}

	if  ==  {
		return true
	}
	 := .kind & kindMask
	if  != .kind&kindMask {
		return false
	}
	if .string() != .string() {
		return false
	}
	 := .uncommon()
	 := .uncommon()
	if  != nil ||  != nil {
		if  == nil ||  == nil {
			return false
		}
		 := .nameOff(.pkgpath).name()
		 := .nameOff(.pkgpath).name()
		if  !=  {
			return false
		}
	}
	if kindBool <=  &&  <= kindComplex128 {
		return true
	}
	switch  {
	case kindString, kindUnsafePointer:
		return true
	case kindArray:
		 := (*arraytype)(unsafe.Pointer())
		 := (*arraytype)(unsafe.Pointer())
		return (.elem, .elem, ) && .len == .len
	case kindChan:
		 := (*chantype)(unsafe.Pointer())
		 := (*chantype)(unsafe.Pointer())
		return .dir == .dir && (.elem, .elem, )
	case kindFunc:
		 := (*functype)(unsafe.Pointer())
		 := (*functype)(unsafe.Pointer())
		if .outCount != .outCount || .inCount != .inCount {
			return false
		}
		,  := .in(), .in()
		for  := 0;  < len(); ++ {
			if !([], [], ) {
				return false
			}
		}
		,  := .out(), .out()
		for  := 0;  < len(); ++ {
			if !([], [], ) {
				return false
			}
		}
		return true
	case kindInterface:
		 := (*interfacetype)(unsafe.Pointer())
		 := (*interfacetype)(unsafe.Pointer())
		if .pkgpath.name() != .pkgpath.name() {
			return false
		}
		if len(.mhdr) != len(.mhdr) {
			return false
		}
		for  := range .mhdr {
			 := &.mhdr[]
Note the mhdr array can be relocated from another module. See #17724.
			 := resolveNameOff(unsafe.Pointer(), .name)
			 := resolveNameOff(unsafe.Pointer(), .name)
			if .name() != .name() {
				return false
			}
			if .pkgPath() != .pkgPath() {
				return false
			}
			 := resolveTypeOff(unsafe.Pointer(), .ityp)
			 := resolveTypeOff(unsafe.Pointer(), .ityp)
			if !(, , ) {
				return false
			}
		}
		return true
	case kindMap:
		 := (*maptype)(unsafe.Pointer())
		 := (*maptype)(unsafe.Pointer())
		return (.key, .key, ) && (.elem, .elem, )
	case kindPtr:
		 := (*ptrtype)(unsafe.Pointer())
		 := (*ptrtype)(unsafe.Pointer())
		return (.elem, .elem, )
	case kindSlice:
		 := (*slicetype)(unsafe.Pointer())
		 := (*slicetype)(unsafe.Pointer())
		return (.elem, .elem, )
	case kindStruct:
		 := (*structtype)(unsafe.Pointer())
		 := (*structtype)(unsafe.Pointer())
		if len(.fields) != len(.fields) {
			return false
		}
		if .pkgPath.name() != .pkgPath.name() {
			return false
		}
		for  := range .fields {
			 := &.fields[]
			 := &.fields[]
			if .name.name() != .name.name() {
				return false
			}
			if !(.typ, .typ, ) {
				return false
			}
			if .name.tag() != .name.tag() {
				return false
			}
			if .offsetAnon != .offsetAnon {
				return false
			}
		}
		return true
	default:
		println("runtime: impossible type kind", )
		throw("runtime: impossible type kind")
		return false
	}