Copyright 2014 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
Implementation of runtime/debug.WriteHeapDump. Writes all objects in the heap plus additional info (roots, threads, finalizers, etc.) to a file.
The format of the dumped file is described at https://golang.org/s/go15heapdump.

package runtime

import (
	
	
)
go:linkname runtime_debug_WriteHeapDump runtime/debug.WriteHeapDump
func ( uintptr) {
	stopTheWorld("write heap dump")
Keep m on this G's stack instead of the system stack. Both readmemstats_m and writeheapdump_m have pretty large peak stack depths and we risk blowing the system stack. This is safe because the world is stopped, so we don't need to worry about anyone shrinking and therefore moving our stack.
	var  MemStats
Call readmemstats_m here instead of deeper in writeheapdump_m because we might blow the system stack otherwise.
buffer of pending write data
const (
	bufSize = 4096
)

var buf [bufSize]byte
var nbuf uintptr

func ( unsafe.Pointer,  uintptr) {
	if  == 0 {
		return
	}
	if nbuf+ <= bufSize {
		copy(buf[nbuf:], (*[bufSize]byte)()[:])
		nbuf += 
		return
	}

	write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
	if  >= bufSize {
		write(dumpfd, , int32())
		nbuf = 0
	} else {
		copy(buf[:], (*[bufSize]byte)()[:])
		nbuf = 
	}
}

func ( byte) {
	dwrite(unsafe.Pointer(&), 1)
}

func () {
	write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
	nbuf = 0
}
Cache of types that have been serialized already. We use a type's hash field to pick a bucket. Inside a bucket, we keep a list of types that have been serialized so far, most recently used first. Note: when a bucket overflows we may end up serializing a type more than once. That's ok.
dump a uint64 in a varint format parseable by encoding/binary
func ( uint64) {
	var  [10]byte
	var  int
	for  >= 0x80 {
		[] = byte( | 0x80)
		++
		 >>= 7
	}
	[] = byte()
	++
	dwrite(unsafe.Pointer(&), uintptr())
}

func ( bool) {
	if  {
		dumpint(1)
	} else {
		dumpint(0)
	}
}
dump varint uint64 length followed by memory contents
func ( unsafe.Pointer,  uintptr) {
	dumpint(uint64())
	dwrite(, )
}

func ( []byte) {
	dumpint(uint64(len()))
	if len() > 0 {
		dwrite(unsafe.Pointer(&[0]), uintptr(len()))
	}
}

func ( string) {
	 := stringStructOf(&)
	dumpmemrange(.str, uintptr(.len))
}
dump information for a type
func ( *_type) {
	if  == nil {
		return
	}
If we've definitely serialized the type before, no need to do it again.
	 := &typecache[.hash&(typeCacheBuckets-1)]
	if  == .t[0] {
		return
	}
	for  := 1;  < typeCacheAssoc; ++ {
Move-to-front
			for  := ;  > 0; -- {
				.t[] = .t[-1]
			}
			.t[0] = 
			return
		}
	}
Might not have been dumped yet. Dump it and remember we did so.
	for  := typeCacheAssoc - 1;  > 0; -- {
		.t[] = .t[-1]
	}
	.t[0] = 
dump the type
	dumpint(tagType)
	dumpint(uint64(uintptr(unsafe.Pointer())))
	dumpint(uint64(.size))
	if  := .uncommon();  == nil || .nameOff(.pkgpath).name() == "" {
		dumpstr(.string())
	} else {
		 := .nameOff(.pkgpath).name()
		 := stringStructOf(&)
		 := .name()
		 := stringStructOf(&)
		dumpint(uint64(uintptr(.len) + 1 + uintptr(.len)))
		dwrite(.str, uintptr(.len))
		dwritebyte('.')
		dwrite(.str, uintptr(.len))
	}
	dumpbool(.kind&kindDirectIface == 0 || .ptrdata != 0)
}
Information passed up from the callee frame about the layout of the outargs region.
	argoff uintptr   // where the arguments start in the frame
	arglen uintptr   // size of args region
	args   bitvector // if args.n >= 0, pointer map of args region
	sp     *uint8    // callee sp
	depth  uintptr   // depth in call stack (0 == most recent)
}
dump kinds & offsets of interesting fields in bv
func ( *bitvector,  uintptr) {
	for  := uintptr(0);  < uintptr(.n); ++ {
		if .ptrbit() == 1 {
			dumpint(fieldKindPtr)
			dumpint(uint64( + *sys.PtrSize))
		}
	}
}

func ( *stkframe,  unsafe.Pointer) bool {
	 := (*childInfo)()
	 := .fn
Figure out what we can about our stack map
	 := .pc
	 := int32(-1) // Use the entry map at function entry
	if  != .entry {
		--
		 = pcdatavalue(, _PCDATA_StackMapIndex, , nil)
	}
We do not have a valid pcdata value but there might be a stackmap for this function. It is likely that we are looking at the function prologue, assume so and hope for the best.
		 = 0
	}
	 := (*stackmap)(funcdata(, _FUNCDATA_LocalsPointerMaps))

	var  bitvector
	if  != nil && .n > 0 {
		 = stackmapdata(, )
	} else {
		.n = -1
	}
Dump main body of stack frame.
	dumpint(tagStackFrame)
	dumpint(uint64(.sp))                              // lowest address in frame
	dumpint(uint64(.depth))                       // # of frames deep on the stack
	dumpint(uint64(uintptr(unsafe.Pointer(.sp)))) // sp of child, or 0 if bottom of stack
	dumpmemrange(unsafe.Pointer(.sp), .fp-.sp)      // frame contents
	dumpint(uint64(.entry))
	dumpint(uint64(.pc))
	dumpint(uint64(.continpc))
	 := funcname()
	if  == "" {
		 = "unknown function"
	}
	dumpstr()
Dump fields in the outargs section
	if .args.n >= 0 {
		dumpbv(&.args, .argoff)
conservative - everything might be a pointer
		for  := .argoff;  < .argoff+.arglen;  += sys.PtrSize {
			dumpint(fieldKindPtr)
			dumpint(uint64())
		}
	}
Dump fields in the local vars section
No locals information, dump everything.
		for  := .arglen;  < .varp-.sp;  += sys.PtrSize {
			dumpint(fieldKindPtr)
			dumpint(uint64())
		}
Locals size information, dump just the locals.
		 := uintptr(-.n)
		for  := .varp -  - .sp;  < .varp-.sp;  += sys.PtrSize {
			dumpint(fieldKindPtr)
			dumpint(uint64())
		}
Locals bitmap information, scan just the pointers in locals.
Record arg info for parent.
	.argoff = .argp - .fp
	.arglen = .arglen
	.sp = (*uint8)(unsafe.Pointer(.sp))
	.depth++
	 = (*stackmap)(funcdata(, _FUNCDATA_ArgsPointerMaps))
	if  != nil {
		.args = stackmapdata(, )
	} else {
		.args.n = -1
	}
	return true
}

func ( *g) {
	var , ,  uintptr
	if .syscallsp != 0 {
		 = .syscallsp
		 = .syscallpc
		 = 0
	} else {
		 = .sched.sp
		 = .sched.pc
		 = .sched.lr
	}

	dumpint(tagGoroutine)
	dumpint(uint64(uintptr(unsafe.Pointer())))
	dumpint(uint64())
	dumpint(uint64(.goid))
	dumpint(uint64(.gopc))
	dumpint(uint64(readgstatus()))
	dumpbool(isSystemGoroutine(, false))
	dumpbool(false) // isbackground
	dumpint(uint64(.waitsince))
	dumpstr(.waitreason.String())
	dumpint(uint64(uintptr(.sched.ctxt)))
	dumpint(uint64(uintptr(unsafe.Pointer(.m))))
	dumpint(uint64(uintptr(unsafe.Pointer(._defer))))
	dumpint(uint64(uintptr(unsafe.Pointer(._panic))))
dump stack
	var  childInfo
	.args.n = -1
	.arglen = 0
	.sp = nil
	.depth = 0
	gentraceback(, , , , 0, nil, 0x7fffffff, dumpframe, noescape(unsafe.Pointer(&)), 0)
dump defer & panic records
d.fn can be nil for open-coded defers
goroutines & stacks
	for  := 0; uintptr() < allglen; ++ {
		 := allgs[]
		 := readgstatus() // The world is stopped so gp will not be in a scan state.
		switch  {
		default:
			print("runtime: unexpected G.status ", hex(), "\n")
			throw("dumpgs in STW - bad status")
To protect mheap_.allspans.
mspan.types
	for ,  := range mheap_.allspans {
Finalizers
			for  := .specials;  != nil;  = .next {
				if .kind != _KindSpecialFinalizer {
					continue
				}
				 := (*specialfinalizer)(unsafe.Pointer())
				 := unsafe.Pointer(.base() + uintptr(.special.offset))
				dumpfinalizer(, .fn, .fint, .ot)
			}
		}
	}
Finalizer queue
Bit vector of free marks. Needs to be as big as the largest number of objects per span.
To protect mheap_.allspans.
	assertWorldStopped()

	for ,  := range mheap_.allspans {
		if .state.get() != mSpanInUse {
			continue
		}
		 := .base()
		 := .elemsize
		 := (.npages << _PageShift) / 
		if  > uintptr(len(freemark)) {
			throw("freemark array doesn't have enough entries")
		}

		for  := uintptr(0);  < .nelems; ++ {
			if .isFree() {
				freemark[] = true
			}
		}

		for  := uintptr(0);  < ; ,  = +1, + {
			if freemark[] {
				freemark[] = false
				continue
			}
			dumpobj(unsafe.Pointer(), , makeheapobjbv(, ))
		}
	}
}

func () {
	dumpint(tagParams)
	 := uintptr(1)
	if *(*byte)(unsafe.Pointer(&)) == 1 {
		dumpbool(false) // little-endian ptrs
	} else {
		dumpbool(true) // big-endian ptrs
	}
	dumpint(sys.PtrSize)
	var ,  uintptr
	for  := range mheap_.arenas {
		if mheap_.arenas[] == nil {
			continue
		}
		for ,  := range mheap_.arenas[] {
			if  == nil {
				continue
			}
			 := arenaBase(arenaIdx()<<arenaL1Shift | arenaIdx())
			if  == 0 ||  <  {
				 = 
			}
			if +heapArenaBytes >  {
				 =  + heapArenaBytes
			}
		}
	}
	dumpint(uint64())
	dumpint(uint64())
	dumpstr(sys.GOARCH)
	dumpstr(sys.Goexperiment)
	dumpint(uint64(ncpu))
}

func ( *itab) {
	 := ._type
	dumptype()
	dumpint(tagItab)
	dumpint(uint64(uintptr(unsafe.Pointer())))
	dumpint(uint64(uintptr(unsafe.Pointer())))
}

func () {
	iterate_itabs(itab_callback)
}

func () {
	for  := allm;  != nil;  = .alllink {
		dumpint(tagOSThread)
		dumpint(uint64(uintptr(unsafe.Pointer())))
		dumpint(uint64(.id))
		dumpint(.procid)
	}
}
go:systemstack
These ints should be identical to the exported MemStats structure and should be ordered the same way too.
	dumpint(tagMemStats)
	dumpint(.Alloc)
	dumpint(.TotalAlloc)
	dumpint(.Sys)
	dumpint(.Lookups)
	dumpint(.Mallocs)
	dumpint(.Frees)
	dumpint(.HeapAlloc)
	dumpint(.HeapSys)
	dumpint(.HeapIdle)
	dumpint(.HeapInuse)
	dumpint(.HeapReleased)
	dumpint(.HeapObjects)
	dumpint(.StackInuse)
	dumpint(.StackSys)
	dumpint(.MSpanInuse)
	dumpint(.MSpanSys)
	dumpint(.MCacheInuse)
	dumpint(.MCacheSys)
	dumpint(.BuckHashSys)
	dumpint(.GCSys)
	dumpint(.OtherSys)
	dumpint(.NextGC)
	dumpint(.LastGC)
	dumpint(.PauseTotalNs)
	for  := 0;  < 256; ++ {
		dumpint(.PauseNs[])
	}
	dumpint(uint64(.NumGC))
}

func ( *bucket,  uintptr,  *uintptr, , ,  uintptr) {
	 := (*[100000]uintptr)(unsafe.Pointer())
	dumpint(tagMemProf)
	dumpint(uint64(uintptr(unsafe.Pointer())))
	dumpint(uint64())
	dumpint(uint64())
	for  := uintptr(0);  < ; ++ {
		 := []
		 := findfunc()
		if !.valid() {
			var  [64]byte
			 := len()
			--
			[] = ')'
			if  == 0 {
				--
				[] = '0'
			} else {
				for  > 0 {
					--
					[] = "0123456789abcdef"[&15]
					 >>= 4
				}
			}
			--
			[] = 'x'
			--
			[] = '0'
			--
			[] = '('
			dumpslice([:])
			dumpstr("?")
			dumpint(0)
		} else {
			dumpstr(funcname())
			if  > 0 &&  > .entry {
				--
			}
			,  := funcline(, )
			dumpstr()
			dumpint(uint64())
		}
	}
	dumpint(uint64())
	dumpint(uint64())
}

To protect mheap_.allspans.
	assertWorldStopped()

	iterate_memprof(dumpmemprof_callback)
	for ,  := range mheap_.allspans {
		if .state.get() != mSpanInUse {
			continue
		}
		for  := .specials;  != nil;  = .next {
			if .kind != _KindSpecialProfile {
				continue
			}
			 := (*specialprofile)(unsafe.Pointer())
			 := .base() + uintptr(.special.offset)
			dumpint(tagAllocSample)
			dumpint(uint64())
			dumpint(uint64(uintptr(unsafe.Pointer(.b))))
		}
	}
}

var dumphdr = []byte("go1.7 heap dump\n")

func ( *MemStats) {
	assertWorldStopped()
Update stats so we can dump them. As a side effect, flushes all the mcaches so the mspan.freelist lists contain all the free objects.
Set dump file.
	dumpfd = 
Call dump routine.
	mdump()
dumpint() the kind & offset of each field in an object.
Extend the temp buffer if necessary.
	 :=  / sys.PtrSize
	if uintptr(len(tmpbuf)) < /8+1 {
		if tmpbuf != nil {
			sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
		}
		 := /8 + 1
		 := sysAlloc(, &memstats.other_sys)
		if  == nil {
			throw("heapdump: out of memory")
		}
		tmpbuf = (*[1 << 30]byte)()[:]
Convert heap bitmap to pointer bitmap.
	for  := uintptr(0);  < /8+1; ++ {
		tmpbuf[] = 0
	}
	 := uintptr(0)
	 := heapBitsForAddr()
	for ;  < ; ++ {
		if !.morePointers() {
			break // end of object
		}
		if .isPointer() {
			tmpbuf[/8] |= 1 << ( % 8)
		}
		 = .next()
	}
	return bitvector{int32(), &tmpbuf[0]}