var runtime.mheap_

188 uses

	runtime (current package)
		heapdump.go#L451: 	for _, s := range mheap_.allspans {
		heapdump.go#L477: 	for _, s := range mheap_.allspans {
		heapdump.go#L514: 	for i1 := range mheap_.arenas {
		heapdump.go#L515: 		if mheap_.arenas[i1] == nil {
		heapdump.go#L518: 		for i, ha := range mheap_.arenas[i1] {
		heapdump.go#L649: 	for _, s := range mheap_.allspans {
		heapdump.go#L672: 	for _, s := range mheap_.allspans {
		malloc.go#L480: 	mheap_.init()
		malloc.go#L546: 			hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
		malloc.go#L548: 			hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
		malloc.go#L571: 			mheap_.heapArenaAlloc.init(meta, arenaMetaSize)
		malloc.go#L594: 		if mheap_.heapArenaAlloc.next <= p && p < mheap_.heapArenaAlloc.end {
		malloc.go#L595: 			p = mheap_.heapArenaAlloc.end
		malloc.go#L608: 				mheap_.arena.init(uintptr(a), size)
		malloc.go#L609: 				p = mheap_.arena.end // For hint below
		malloc.go#L613: 		hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
		malloc.go#L615: 		hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
		malloc.go#L699: 		hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
		malloc.go#L702: 		hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
		mbitmap.go#L311: 	ha := mheap_.arenas[arena.l1()][arena.l2()]
		mbitmap.go#L442: 	l2 := mheap_.arenas[ai.l1()]
		mbitmap.go#L477: 	if l2 := mheap_.arenas[ai.l1()]; l2 != nil && l2[ai.l2()] != nil {
		mbitmap.go#L1871: 	s := mheap_.allocManual(pages, spanAllocPtrScalarBits)
		mbitmap.go#L1876: 	mheap_.freeManual(s, spanAllocPtrScalarBits)
		mcache.go#L87: 		lock(&mheap_.lock)
		mcache.go#L88: 		c = (*mcache)(mheap_.cachealloc.alloc())
		mcache.go#L89: 		c.flushGen = mheap_.sweepgen
		mcache.go#L90: 		unlock(&mheap_.lock)
		mcache.go#L115: 		lock(&mheap_.lock)
		mcache.go#L116: 		mheap_.cachealloc.free(unsafe.Pointer(c))
		mcache.go#L117: 		unlock(&mheap_.lock)
		mcache.go#L155: 		if s.sweepgen != mheap_.sweepgen+3 {
		mcache.go#L158: 		mheap_.central[spc].mcentral.uncacheSpan(s)
		mcache.go#L162: 	s = mheap_.central[spc].mcentral.cacheSpan()
		mcache.go#L173: 	s.sweepgen = mheap_.sweepgen + 3
		mcache.go#L224: 	s := mheap_.alloc(npages, spc, needzero)
		mcache.go#L245: 	mheap_.central[spc].mcentral.fullSwept(mheap_.sweepgen).push(s)
		mcache.go#L256: 	sg := mheap_.sweepgen
		mcache.go#L276: 			mheap_.central[i].mcentral.uncacheSpan(s)
		mcache.go#L303: 	sg := mheap_.sweepgen
		mcache.go#L312: 	atomic.Store(&c.flushGen, mheap_.sweepgen) // Synchronizes with gcStart
		mcentral.go#L84: 	sg := mheap_.sweepgen
		mcentral.go#L193: 	sg := mheap_.sweepgen
		mcentral.go#L232: 	s := mheap_.alloc(npages, c.spanclass, true)
		mcheckmark.go#L40: 	for _, ai := range mheap_.allArenas {
		mcheckmark.go#L41: 		arena := mheap_.arenas[ai.l1()][ai.l2()]
		mcheckmark.go#L90: 	arena := mheap_.arenas[ai.l1()][ai.l2()]
		metrics.go#L372: 		lock(&mheap_.lock)
		metrics.go#L374: 		a.mSpanInUse = uint64(mheap_.spanalloc.inuse)
		metrics.go#L376: 		a.mCacheInUse = uint64(mheap_.cachealloc.inuse)
		metrics.go#L377: 		unlock(&mheap_.lock)
		mgc.go#L178: 	mheap_.sweepdone = 1
		mgc.go#L228: 		lock(&mheap_.lock)
		mgc.go#L237: 		unlock(&mheap_.lock)
		mgc.go#L824: 	assertWorldStoppedOrLockHeld(&mheap_.lock)
		mgc.go#L920: 		mheap_.sweepPagesPerByte = 0
		mgc.go#L937: 		pagesSwept := atomic.Load64(&mheap_.pagesSwept)
		mgc.go#L938: 		pagesInUse := atomic.Load64(&mheap_.pagesInUse)
		mgc.go#L941: 			mheap_.sweepPagesPerByte = 0
		mgc.go#L943: 			mheap_.sweepPagesPerByte = float64(sweepDistancePages) / float64(heapDistance)
		mgc.go#L944: 			mheap_.sweepHeapLiveBasis = heapLiveBasis
		mgc.go#L948: 			atomic.Store64(&mheap_.pagesSweptBasis, pagesSwept)
		mgc.go#L965: 	assertWorldStoppedOrLockHeld(&mheap_.lock)
		mgc.go#L1184: 	for atomic.Load(&work.cycles) == n+1 && atomic.Load(&mheap_.sweepers) != 0 {
		mgc.go#L1352: 		if fg := atomic.Load(&p.mcache.flushGen); fg != mheap_.sweepgen {
		mgc.go#L1353: 			println("runtime: p", p.id, "flushGen", fg, "!= sweepgen", mheap_.sweepgen)
		mgc.go#L2177: 	lock(&mheap_.lock)
		mgc.go#L2178: 	mheap_.sweepgen += 2
		mgc.go#L2179: 	mheap_.sweepdone = 0
		mgc.go#L2180: 	mheap_.pagesSwept = 0
		mgc.go#L2181: 	mheap_.sweepArenas = mheap_.allArenas
		mgc.go#L2182: 	mheap_.reclaimIndex = 0
		mgc.go#L2183: 	mheap_.reclaimCredit = 0
		mgc.go#L2184: 	unlock(&mheap_.lock)
		mgc.go#L2191: 		lock(&mheap_.lock)
		mgc.go#L2192: 		mheap_.sweepPagesPerByte = 0
		mgc.go#L2193: 		unlock(&mheap_.lock)
		mgc.go#L2241: 	lock(&mheap_.lock)
		mgc.go#L2242: 	arenas := mheap_.allArenas
		mgc.go#L2243: 	unlock(&mheap_.lock)
		mgc.go#L2245: 		ha := mheap_.arenas[ai.l1()][ai.l2()]
		mgcmark.go#L96: 	mheap_.markArenas = mheap_.allArenas[:len(mheap_.allArenas):len(mheap_.allArenas)]
		mgcmark.go#L97: 	work.nSpanRoots = len(mheap_.markArenas) * (pagesPerArena / pagesPerSpanRoot)
		mgcmark.go#L316: 	sg := mheap_.sweepgen
		mgcmark.go#L319: 	ai := mheap_.markArenas[shard/(pagesPerArena/pagesPerSpanRoot)]
		mgcmark.go#L320: 	ha := mheap_.arenas[ai.l1()][ai.l2()]
		mgcscavenge.go#L122: 		mheap_.scavengeGoal = ^uint64(0)
		mgcscavenge.go#L154: 		mheap_.scavengeGoal = ^uint64(0)
		mgcscavenge.go#L157: 	mheap_.scavengeGoal = retainedGoal
		mgcscavenge.go#L293: 			lock(&mheap_.lock)
		mgcscavenge.go#L296: 			retained, goal := heapRetained(), mheap_.scavengeGoal
		mgcscavenge.go#L298: 				unlock(&mheap_.lock)
		mgcscavenge.go#L304: 			released = mheap_.pages.scavenge(physPageSize, true)
		mgcscavenge.go#L305: 			mheap_.pages.scav.released += released
		mgcscavenge.go#L308: 			unlock(&mheap_.lock)
		mgcsweep.go#L141: 	sg := mheap_.sweepgen
		mgcsweep.go#L142: 	for i := range mheap_.central {
		mgcsweep.go#L143: 		c := &mheap_.central[i].mcentral
		mgcsweep.go#L190: 	sweepRatio := mheap_.sweepPagesPerByte // For debugging
		mgcsweep.go#L195: 	if atomic.Load(&mheap_.sweepdone) != 0 {
		mgcsweep.go#L199: 	atomic.Xadd(&mheap_.sweepers, +1)
		mgcsweep.go#L203: 	sg := mheap_.sweepgen
		mgcsweep.go#L205: 		s = mheap_.nextSpanForSweep()
		mgcsweep.go#L207: 			atomic.Store(&mheap_.sweepdone, 1)
		mgcsweep.go#L233: 			atomic.Xadduintptr(&mheap_.reclaimCredit, npages)
		mgcsweep.go#L244: 	if atomic.Xadd(&mheap_.sweepers, -1) == 0 && atomic.Load(&mheap_.sweepdone) != 0 {
		mgcsweep.go#L257: 			lock(&mheap_.lock)
		mgcsweep.go#L258: 			mheap_.pages.scavengeStartGen()
		mgcsweep.go#L259: 			unlock(&mheap_.lock)
		mgcsweep.go#L267: 			print("pacer: sweep done at heap size ", memstats.heap_live>>20, "MB; allocated ", (memstats.heap_live-mheap_.sweepHeapLiveBasis)>>20, "MB during sweep; swept ", mheap_.pagesSwept, " pages at ", sweepRatio, " pages/byte\n")
		mgcsweep.go#L281: 	return mheap_.sweepdone != 0
		mgcsweep.go#L295: 	sg := mheap_.sweepgen
		mgcsweep.go#L327: 	sweepgen := mheap_.sweepgen
		mgcsweep.go#L337: 	atomic.Xadd64(&mheap_.pagesSwept, int64(s.npages))
		mgcsweep.go#L518: 				mheap_.freeSpan(s)
		mgcsweep.go#L523: 				mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
		mgcsweep.go#L525: 				mheap_.central[spc].mcentral.partialSwept(sweepgen).push(s)
		mgcsweep.go#L551: 				mheap_.freeSpan(s)
		mgcsweep.go#L561: 		mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
		mgcsweep.go#L635: 	if mheap_.sweepPagesPerByte == 0 {
		mgcsweep.go#L645: 	sweptBasis := atomic.Load64(&mheap_.pagesSweptBasis)
		mgcsweep.go#L648: 	newHeapLive := uintptr(atomic.Load64(&memstats.heap_live)-mheap_.sweepHeapLiveBasis) + spanBytes
		mgcsweep.go#L649: 	pagesTarget := int64(mheap_.sweepPagesPerByte*float64(newHeapLive)) - int64(callerSweepPages)
		mgcsweep.go#L650: 	for pagesTarget > int64(atomic.Load64(&mheap_.pagesSwept)-sweptBasis) {
		mgcsweep.go#L652: 			mheap_.sweepPagesPerByte = 0
		mgcsweep.go#L655: 		if atomic.Load64(&mheap_.pagesSweptBasis) != sweptBasis {
		mgcwork.go#L116: 	lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
		mgcwork.go#L359: 	lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
		mgcwork.go#L374: 				s = mheap_.allocManual(workbufAlloc/pageSize, spanAllocWorkBuf)
		mgcwork.go#L476: 			mheap_.freeManual(span, spanAllocWorkBuf)
		mheap.go#L221: var mheap_ mheap
		mheap.go#L640: 		if ri.l2() >= uint(len(mheap_.arenas[0])) {
		mheap.go#L645: 		if ri.l1() >= uint(len(mheap_.arenas)) {
		mheap.go#L649: 	l2 := mheap_.arenas[ri.l1()]
		mheap.go#L668: 	return mheap_.arenas[ai.l1()][ai.l2()].spans[(p/pageSize)%pagesPerArena]
		mheap.go#L694: 	arena = mheap_.arenas[ai.l1()][ai.l2()]
		mheap.go#L1521: 	systemstack(func() { mheap_.scavengeAll() })
		mheap.go#L1660: 	ha := mheap_.arenas[ai.l1()][ai.l2()]
		mheap.go#L1668: 	ha := mheap_.arenas[ai.l1()][ai.l2()]
		mheap.go#L1782: 	lock(&mheap_.speciallock)
		mheap.go#L1783: 	s := (*specialfinalizer)(mheap_.specialfinalizeralloc.alloc())
		mheap.go#L1784: 	unlock(&mheap_.speciallock)
		mheap.go#L1811: 	lock(&mheap_.speciallock)
		mheap.go#L1812: 	mheap_.specialfinalizeralloc.free(unsafe.Pointer(s))
		mheap.go#L1813: 	unlock(&mheap_.speciallock)
		mheap.go#L1823: 	lock(&mheap_.speciallock)
		mheap.go#L1824: 	mheap_.specialfinalizeralloc.free(unsafe.Pointer(s))
		mheap.go#L1825: 	unlock(&mheap_.speciallock)
		mheap.go#L1838: 	lock(&mheap_.speciallock)
		mheap.go#L1839: 	s := (*specialprofile)(mheap_.specialprofilealloc.alloc())
		mheap.go#L1840: 	unlock(&mheap_.speciallock)
		mheap.go#L1855: 		lock(&mheap_.speciallock)
		mheap.go#L1856: 		mheap_.specialfinalizeralloc.free(unsafe.Pointer(sf))
		mheap.go#L1857: 		unlock(&mheap_.speciallock)
		mheap.go#L1861: 		lock(&mheap_.speciallock)
		mheap.go#L1862: 		mheap_.specialprofilealloc.free(unsafe.Pointer(sp))
		mheap.go#L1863: 		unlock(&mheap_.speciallock)
		mpagealloc.go#L548: 	if p.test || mheap_.arenas[ai.l1()] == nil || mheap_.arenas[ai.l1()][ai.l2()] == nil {
		mstats.go#L573: 	lock(&mheap_.lock)
		mstats.go#L594: 	unlock(&mheap_.lock)
		mstats.go#L612: 	memstats.mcache_inuse = uint64(mheap_.cachealloc.inuse)
		mstats.go#L613: 	memstats.mspan_inuse = uint64(mheap_.spanalloc.inuse)
		panic.go#L1241: 	if mheap_.cachealloc.size == 0 { // very early
		proc.go#L4734: 			mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
		proc.go#L4737: 		lock(&mheap_.lock)
		proc.go#L4738: 		pp.pcache.flush(&mheap_.pages)
		proc.go#L4739: 		unlock(&mheap_.lock)
		stack.go#L187: 	lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
		stack.go#L190: 		s = mheap_.allocManual(_StackCacheSize>>_PageShift, spanAllocStack)
		stack.go#L254: 		mheap_.freeManual(s, spanAllocStack)
		stack.go#L395: 		lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
		stack.go#L399: 			s = mheap_.allocManual(npage, spanAllocStack)
		stack.go#L483: 			mheap_.freeManual(s, spanAllocStack)
		stack.go#L1196: 				mheap_.freeManual(s, spanAllocStack)
		stack.go#L1210: 			mheap_.freeManual(s, spanAllocStack)