Copyright 2020 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.

package runtime
Metrics implementation exported to runtime/metrics.

import (
	
	
)

metrics is a map of runtime/metrics keys to data used by the runtime to sample each metric's value.
deps is the set of runtime statistics that this metric depends on. Before compute is called, the statAggregate which will be passed must ensure() these dependencies.
compute is a function that populates a metricValue given a populated statAggregate structure.
	compute func(in *statAggregate, out *metricValue)
}
initMetrics initializes the metrics map if it hasn't been yet. metricsSema must be held.
func () {
	if metricsInit {
		return
	}

Skip size class 0 which is a stand-in for large objects, but large objects are tracked separately (and they actually get placed in the last bucket, not the first).
	sizeClassBuckets[0] = 1 // The smallest allocation is 1 byte in size.
Size classes have an inclusive upper-bound and exclusive lower bound (e.g. 48-byte size class is (32, 48]) whereas we want and inclusive lower-bound and exclusive upper-bound (e.g. 48-byte size class is [33, 49). We can achieve this by shifting all bucket boundaries up by 1. Also, a float64 can precisely represent integers with value up to 2^53 and size classes are relatively small (nowhere near 2^48 even) so this will give us exact boundaries.
		sizeClassBuckets[] = float64(class_to_size[] + 1)
	}
	sizeClassBuckets = append(sizeClassBuckets, float64Inf())

	timeHistBuckets = timeHistogramMetricsBuckets()
	metrics = map[string]metricData{
		"/gc/cycles/automatic:gc-cycles": {
			deps: makeStatDepSet(sysStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .sysStats.gcCyclesDone - .sysStats.gcCyclesForced
			},
		},
		"/gc/cycles/forced:gc-cycles": {
			deps: makeStatDepSet(sysStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .sysStats.gcCyclesForced
			},
		},
		"/gc/cycles/total:gc-cycles": {
			deps: makeStatDepSet(sysStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .sysStats.gcCyclesDone
			},
		},
		"/gc/heap/allocs-by-size:bytes": {
			deps: makeStatDepSet(heapStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				 := .float64HistOrInit(sizeClassBuckets)
Cut off the first index which is ostensibly for size class 0, but large objects are tracked separately so it's actually unused.
				for ,  := range .heapStats.smallAllocCount[1:] {
					.counts[] = uint64()
				}
			},
		},
		"/gc/heap/frees-by-size:bytes": {
			deps: makeStatDepSet(heapStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				 := .float64HistOrInit(sizeClassBuckets)
Cut off the first index which is ostensibly for size class 0, but large objects are tracked separately so it's actually unused.
				for ,  := range .heapStats.smallFreeCount[1:] {
					.counts[] = uint64()
				}
			},
		},
		"/gc/heap/goal:bytes": {
			deps: makeStatDepSet(sysStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .sysStats.heapGoal
			},
		},
		"/gc/heap/objects:objects": {
			deps: makeStatDepSet(heapStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .heapStats.numObjects
			},
		},
		"/gc/pauses:seconds": {
			compute: func( *statAggregate,  *metricValue) {
The bottom-most bucket, containing negative values, is tracked as a separately as underflow, so fill that in manually and then iterate over the rest.
				.counts[0] = atomic.Load64(&memstats.gcPauseDist.underflow)
				for  := range memstats.gcPauseDist.counts {
					.counts[+1] = atomic.Load64(&memstats.gcPauseDist.counts[])
				}
			},
		},
		"/memory/classes/heap/free:bytes": {
			deps: makeStatDepSet(heapStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = uint64(.heapStats.committed - .heapStats.inHeap -
					.heapStats.inStacks - .heapStats.inWorkBufs -
					.heapStats.inPtrScalarBits)
			},
		},
		"/memory/classes/heap/objects:bytes": {
			deps: makeStatDepSet(heapStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .heapStats.inObjects
			},
		},
		"/memory/classes/heap/released:bytes": {
			deps: makeStatDepSet(heapStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = uint64(.heapStats.released)
			},
		},
		"/memory/classes/heap/stacks:bytes": {
			deps: makeStatDepSet(heapStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = uint64(.heapStats.inStacks)
			},
		},
		"/memory/classes/heap/unused:bytes": {
			deps: makeStatDepSet(heapStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = uint64(.heapStats.inHeap) - .heapStats.inObjects
			},
		},
		"/memory/classes/metadata/mcache/free:bytes": {
			deps: makeStatDepSet(sysStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .sysStats.mCacheSys - .sysStats.mCacheInUse
			},
		},
		"/memory/classes/metadata/mcache/inuse:bytes": {
			deps: makeStatDepSet(sysStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .sysStats.mCacheInUse
			},
		},
		"/memory/classes/metadata/mspan/free:bytes": {
			deps: makeStatDepSet(sysStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .sysStats.mSpanSys - .sysStats.mSpanInUse
			},
		},
		"/memory/classes/metadata/mspan/inuse:bytes": {
			deps: makeStatDepSet(sysStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .sysStats.mSpanInUse
			},
		},
		"/memory/classes/metadata/other:bytes": {
			deps: makeStatDepSet(heapStatsDep, sysStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = uint64(.heapStats.inWorkBufs+.heapStats.inPtrScalarBits) + .sysStats.gcMiscSys
			},
		},
		"/memory/classes/os-stacks:bytes": {
			deps: makeStatDepSet(sysStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .sysStats.stacksSys
			},
		},
		"/memory/classes/other:bytes": {
			deps: makeStatDepSet(sysStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .sysStats.otherSys
			},
		},
		"/memory/classes/profiling/buckets:bytes": {
			deps: makeStatDepSet(sysStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .sysStats.buckHashSys
			},
		},
		"/memory/classes/total:bytes": {
			deps: makeStatDepSet(heapStatsDep, sysStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = uint64(.heapStats.committed+.heapStats.released) +
					.sysStats.stacksSys + .sysStats.mSpanSys +
					.sysStats.mCacheSys + .sysStats.buckHashSys +
					.sysStats.gcMiscSys + .sysStats.otherSys
			},
		},
		"/sched/goroutines:goroutines": {
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = uint64(gcount())
			},
		},
	}
	metricsInit = true
}
statDep is a dependency on a group of statistics that a metric might have.
type statDep uint

const (
	heapStatsDep statDep = iota // corresponds to heapStatsAggregate
	sysStatsDep                 // corresponds to sysStatsAggregate
	numStatsDeps
)
statDepSet represents a set of statDeps. Under the hood, it's a bitmap.
makeStatDepSet creates a new statDepSet from a list of statDeps.
func ( ...statDep) statDepSet {
	var  statDepSet
	for ,  := range  {
		[/64] |= 1 << ( % 64)
	}
	return 
}
differennce returns set difference of s from b as a new set.
func ( statDepSet) ( statDepSet) statDepSet {
	var  statDepSet
	for  := range  {
		[] = [] &^ []
	}
	return 
}
union returns the union of the two sets as a new set.
func ( statDepSet) ( statDepSet) statDepSet {
	var  statDepSet
	for  := range  {
		[] = [] | []
	}
	return 
}
empty returns true if there are no dependencies in the set.
func ( *statDepSet) () bool {
	for ,  := range  {
		if  != 0 {
			return false
		}
	}
	return true
}
has returns true if the set contains a given statDep.
func ( *statDepSet) ( statDep) bool {
	return [/64]&(1<<(%64)) != 0
}
heapStatsAggregate represents memory stats obtained from the runtime. This set of stats is grouped together because they depend on each other in some way to make sense of the runtime's current heap memory use. They're also sharded across Ps, so it makes sense to grab them all at once.
Derived from values in heapStatsDelta.
inObjects is the bytes of memory occupied by objects,
numObjects is the number of live objects in the heap.
compute populates the heapStatsAggregate with values from the runtime.
Calculate derived stats.
sysStatsAggregate represents system memory stats obtained from the runtime. This set of stats is grouped together because they're all relatively cheap to acquire and generally independent of one another and other runtime memory stats. The fact that they may be acquired at different times, especially with respect to heapStatsAggregate, means there could be some skew, but because of these stats are independent, there's no real consistency issue here.
statAggregate is the main driver of the metrics implementation. It contains multiple aggregates of runtime statistics, as well as a set of these aggregates that it has populated. The aggergates are populated lazily by its ensure method.
ensure populates statistics aggregates determined by deps if they haven't yet been populated.
func ( *statAggregate) ( *statDepSet) {
	 := .difference(.ensured)
	if .empty() {
		return
	}
	for  := statDep(0);  < numStatsDeps; ++ {
		if !.has() {
			continue
		}
		switch  {
		case heapStatsDep:
			.heapStats.compute()
		case sysStatsDep:
			.sysStats.compute()
		}
	}
	.ensured = .ensured.union()
}
metricValidKind is a runtime copy of runtime/metrics.ValueKind and must be kept structurally identical to that type.
These values must be kept identical to their corresponding Kind* values in the runtime/metrics package.
metricSample is a runtime copy of runtime/metrics.Sample and must be kept structurally identical to that type.
metricValue is a runtime copy of runtime/metrics.Sample and must be kept structurally identical to that type.
type metricValue struct {
	kind    metricKind
	scalar  uint64         // contains scalar values for scalar Kinds.
	pointer unsafe.Pointer // contains non-scalar values.
}
float64HistOrInit tries to pull out an existing float64Histogram from the value, but if none exists, then it allocates one with the given buckets.
func ( *metricValue) ( []float64) *metricFloat64Histogram {
	var  *metricFloat64Histogram
	if .kind == metricKindFloat64Histogram && .pointer != nil {
		 = (*metricFloat64Histogram)(.pointer)
	} else {
		.kind = metricKindFloat64Histogram
		 = new(metricFloat64Histogram)
		.pointer = unsafe.Pointer()
	}
	.buckets = 
	if len(.counts) != len(.buckets)-1 {
		.counts = make([]uint64, len()-1)
	}
	return 
}
metricFloat64Histogram is a runtime copy of runtime/metrics.Float64Histogram and must be kept structurally identical to that type.
agg is used by readMetrics, and is protected by metricsSema. Managed as a global variable because its pointer will be an argument to a dynamically-defined function, and we'd like to avoid it escaping to the heap.
readMetrics is the implementation of runtime/metrics.Read.go:linkname readMetrics runtime/metrics.runtime_readMetrics
Construct a slice from the args.
	 := slice{, , }
	 := *(*[]metricSample)(unsafe.Pointer(&))
Acquire the metricsSema but with handoff. This operation is expensive enough that queueing up goroutines and handing off between them will be noticably better-behaved.
Ensure the map is initialized.
Clear agg defensively.
Sample.
	for  := range  {
		 := &[]
		,  := metrics[.name]
		if ! {
			.value.kind = metricKindBad
			continue
Ensure we have all the stats we need. agg is populated lazily.
		agg.ensure(&.deps)
Compute the value based on the stats we have.
		.compute(&agg, &.value)
	}

	semrelease(&metricsSema)