Source File
mprof.go
Belonging Package
runtime
package runtime
import (
)
buckHashSize = 179999
maxStack = 32
)
type bucketType int
future [3]memRecordCycle
}
type memRecordCycle struct {
allocs, frees uintptr
alloc_bytes, free_bytes uintptr
}
func ( *memRecordCycle) ( *memRecordCycle) {
.allocs += .allocs
.frees += .frees
.alloc_bytes += .alloc_bytes
.free_bytes += .free_bytes
}
func ( bucketType, int) *bucket {
:= unsafe.Sizeof(bucket{}) + uintptr()*unsafe.Sizeof(uintptr(0))
switch {
default:
throw("invalid profile bucket type")
case memProfile:
+= unsafe.Sizeof(memRecord{})
case blockProfile, mutexProfile:
+= unsafe.Sizeof(blockRecord{})
}
:= (*bucket)(persistentalloc(, 0, &memstats.buckhash_sys))
bucketmem +=
.typ =
.nstk = uintptr()
return
}
func ( *bucket) () *blockRecord {
if .typ != blockProfile && .typ != mutexProfile {
throw("bad use of bucket.bp")
}
:= add(unsafe.Pointer(), unsafe.Sizeof(*)+.nstk*unsafe.Sizeof(uintptr(0)))
return (*blockRecord)()
}
var uintptr
for , := range {
+=
+= << 10
^= >> 6
+=
+= << 10
:= newBucket(, len())
copy(.stk(), )
.hash =
.size =
.next = buckhash[]
buckhash[] =
if == memProfile {
.allnext = mbuckets
mbuckets =
} else if == mutexProfile {
.allnext = xbuckets
xbuckets =
} else {
.allnext = bbuckets
bbuckets =
}
return
}
func (, []uintptr) bool {
if len() != len() {
return false
}
for , := range {
if != [] {
return false
}
}
return true
}
func () {
func () {
systemstack(func() {
setprofilebucket(, )
})
}
= int64(float64() * float64(tickspersecond()) / (1000 * 1000 * 1000))
if == 0 {
= 1
}
}
atomic.Store64(&blockprofilerate, uint64())
}
func ( int64, int) {
if <= 0 {
= 1
}
if blocksampled() {
saveblockevent(, +1, blockProfile)
}
}
func ( int64) bool {
:= int64(atomic.Load64(&blockprofilerate))
if <= 0 || ( > && int64(fastrand())% > ) {
return false
}
return true
}
func ( int64, int, bucketType) {
:= getg()
var int
var [maxStack]uintptr
if .m.curg == nil || .m.curg == {
= callers(, [:])
} else {
= gcallers(.m.curg, , [:])
}
lock(&proflock)
:= stkbucket(, 0, [:], true)
.bp().count++
.bp().cycles +=
unlock(&proflock)
}
var mutexprofilerate uint64 // fraction sampled
func ( int) int {
if < 0 {
return int(mutexprofilerate)
}
:= mutexprofilerate
atomic.Store64(&mutexprofilerate, uint64())
return int()
}
if > 0 && int64(fastrand())% == 0 {
saveblockevent(, +1, mutexProfile)
}
}
type StackRecord struct {
Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
}
func ( *StackRecord) () []uintptr {
for , := range .Stack0 {
if == 0 {
return .Stack0[0:]
}
}
return .Stack0[0:]
}
var MemProfileRate int = 512 * 1024
type MemProfileRecord struct {
AllocBytes, FreeBytes int64 // number of bytes allocated, freed
AllocObjects, FreeObjects int64 // number of objects allocated, freed
Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
}
func ( *MemProfileRecord) () int64 { return .AllocBytes - .FreeBytes }
func ( *MemProfileRecord) () int64 {
return .AllocObjects - .FreeObjects
}
func ( *MemProfileRecord) () []uintptr {
for , := range .Stack0 {
if == 0 {
return .Stack0[0:]
}
}
return .Stack0[0:]
}
func ( []MemProfileRecord, bool) ( int, bool) {
mProf_FlushLocked()
:= true
for := mbuckets; != nil; = .allnext {
:= .mp()
if || .active.alloc_bytes != .active.free_bytes {
++
}
if .active.allocs != 0 || .active.frees != 0 {
= false
}
}
= 0
for := mbuckets; != nil; = .allnext {
:= .mp()
for := range .future {
.active.add(&.future[])
.future[] = memRecordCycle{}
}
if || .active.alloc_bytes != .active.free_bytes {
++
}
}
}
if <= len() {
= true
:= 0
for := mbuckets; != nil; = .allnext {
:= .mp()
if || .active.alloc_bytes != .active.free_bytes {
record(&[], )
++
}
}
}
unlock(&proflock)
return
}
func ( *MemProfileRecord, *bucket) {
:= .mp()
.AllocBytes = int64(.active.alloc_bytes)
.FreeBytes = int64(.active.free_bytes)
.AllocObjects = int64(.active.allocs)
.FreeObjects = int64(.active.frees)
if raceenabled {
racewriterangepc(unsafe.Pointer(&.Stack0[0]), unsafe.Sizeof(.Stack0), getcallerpc(), funcPC(MemProfile))
}
if msanenabled {
msanwrite(unsafe.Pointer(&.Stack0[0]), unsafe.Sizeof(.Stack0))
}
copy(.Stack0[:], .stk())
for := int(.nstk); < len(.Stack0); ++ {
.Stack0[] = 0
}
}
func ( func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) {
lock(&proflock)
for := mbuckets; != nil; = .allnext {
:= .mp()
(, .nstk, &.stk()[0], .size, .active.allocs, .active.frees)
}
unlock(&proflock)
}
type BlockProfileRecord struct {
Count int64
Cycles int64
StackRecord
}
func ( []BlockProfileRecord) ( int, bool) {
lock(&proflock)
for := bbuckets; != nil; = .allnext {
++
}
if <= len() {
= true
for := bbuckets; != nil; = .allnext {
:= .bp()
:= &[0]
.Count = .count
.Cycles = .cycles
if raceenabled {
racewriterangepc(unsafe.Pointer(&.Stack0[0]), unsafe.Sizeof(.Stack0), getcallerpc(), funcPC())
}
if msanenabled {
msanwrite(unsafe.Pointer(&.Stack0[0]), unsafe.Sizeof(.Stack0))
}
:= copy(.Stack0[:], .stk())
for ; < len(.Stack0); ++ {
.Stack0[] = 0
}
= [1:]
}
}
unlock(&proflock)
return
}
func ( []BlockProfileRecord) ( int, bool) {
lock(&proflock)
for := xbuckets; != nil; = .allnext {
++
}
if <= len() {
= true
for := xbuckets; != nil; = .allnext {
:= .bp()
:= &[0]
.Count = int64(.count)
.Cycles = .cycles
:= copy(.Stack0[:], .stk())
for ; < len(.Stack0); ++ {
.Stack0[] = 0
}
= [1:]
}
}
unlock(&proflock)
return
}
func ( []StackRecord, []unsafe.Pointer) ( int, bool) {
return goroutineProfileWithLabels(, )
}
return != && readgstatus() != _Gdead && !isSystemGoroutine(, false)
}
stopTheWorld("profile")
= 1
for , := range allgs {
if () {
++
}
}
if <= len() {
= true
, := ,
:= getcallersp()
:= getcallerpc()
systemstack(func() {
saveg(, , , &[0])
})
= [1:]
for , := range allgs {
if () {
func ( []StackRecord) ( int, bool) {
return goroutineProfileWithLabels(, nil)
}
func (, uintptr, *g, *StackRecord) {
:= gentraceback(, , 0, , 0, &.Stack0[0], len(.Stack0), nil, nil, 0)
if < len(.Stack0) {
.Stack0[] = 0
}
}
func ( []byte, bool) int {
if {
stopTheWorld("stack trace")
}
:= 0
if len() > 0 {
:= getg()
:= getcallersp()
:= getcallerpc()
systemstack(func() {
.m.traceback = 1
.writebuf = [0:0:len()]
goroutineheader()
traceback(, , 0, )
if {
tracebackothers()
}
.m.traceback = 0
= len(.writebuf)
.writebuf = nil
})
}
if {
startTheWorld()
}
return
}
var tracelock mutex
func ( unsafe.Pointer, uintptr, *_type) {
lock(&tracelock)
:= getg()
.m.traceback = 2
if == nil {
print("tracealloc(", , ", ", hex(), ")\n")
} else {
print("tracealloc(", , ", ", hex(), ", ", .string(), ")\n")
}
if .m.curg == nil || == .m.curg {
goroutineheader()
:= getcallerpc()
:= getcallersp()
systemstack(func() {
traceback(, , 0, )
})
} else {
goroutineheader(.m.curg)
traceback(^uintptr(0), ^uintptr(0), 0, .m.curg)
}
print("\n")
.m.traceback = 0
unlock(&tracelock)
}
func ( unsafe.Pointer, uintptr) {
lock(&tracelock)
:= getg()
.m.traceback = 2
print("tracefree(", , ", ", hex(), ")\n")
goroutineheader()
:= getcallerpc()
:= getcallersp()
systemstack(func() {
traceback(, , 0, )
})
print("\n")
.m.traceback = 0
unlock(&tracelock)
}
func () {
lock(&tracelock)
:= getg()
.m.traceback = 2
![]() |
The pages are generated with Golds v0.3.2-preview. (GOOS=darwin GOARCH=amd64) Golds is a Go 101 project developed by Tapir Liu. PR and bug reports are welcome and can be submitted to the issue list. Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds. |