Source File
malloc.go
Belonging Package
runtime
package runtime
import (
)
const (
debugMalloc = false
maxTinySize = _TinySize
tinySizeClass = _TinySizeClass
maxSmallSize = _MaxSmallSize
pageShift = _PageShift
pageSize = _PageSize
maxObjsPerSpan = pageSize / 8
concurrentSweep = _ConcurrentSweep
_PageSize = 1 << _PageShift
_PageMask = _PageSize - 1
_TinySize = 16
_TinySizeClass = int8(2)
_FixAllocChunk = 16 << 10 // Chunk size for FixAlloc
_StackCacheSize = 32 * 1024
_NumStackOrders = 4 - sys.PtrSize/4*sys.GoosWindows - 1*sys.GoosPlan9
heapAddrBits = (_64bit*(1-sys.GoarchWasm)*(1-sys.GoosIos*sys.GoarchArm64))*48 + (1-_64bit+sys.GoarchWasm)*(32-(sys.GoarchMips+sys.GoarchMipsle)) + 33*sys.GoosIos*sys.GoarchArm64
maxAlloc = (1 << heapAddrBits) - (1-_64bit)*1
heapArenaBytes = 1 << logHeapArenaBytes
logHeapArenaBytes = (6+20)*(_64bit*(1-sys.GoosWindows)*(1-sys.GoarchWasm)) + (2+20)*(_64bit*sys.GoosWindows) + (2+20)*(1-_64bit) + (2+20)*sys.GoarchWasm
heapArenaBitmapBytes = heapArenaBytes / (sys.PtrSize * 8 / 2)
pagesPerArena = heapArenaBytes / pageSize
arenaL1Bits = 6 * (_64bit * sys.GoosWindows)
_MaxGcproc = 32
minLegalPointer uintptr = 4096
)
var physPageSize uintptr
var (
physHugePageSize uintptr
physHugePageShift uint
)
func () {
if class_to_size[_TinySizeClass] != _TinySize {
throw("bad TinySizeClass")
}
testdefersizes()
throw("heapArenaBitmapBytes not a power of 2")
}
for := range class_to_size {
memstats.by_size[].size = uint32(class_to_size[])
}
throw("failed to get system page size")
}
if physPageSize > maxPhysPageSize {
print("system page size (", physPageSize, ") is larger than maximum page size (", maxPhysPageSize, ")\n")
throw("bad system page size")
}
if physPageSize < minPhysPageSize {
print("system page size (", physPageSize, ") is smaller than minimum page size (", minPhysPageSize, ")\n")
throw("bad system page size")
}
if physPageSize&(physPageSize-1) != 0 {
print("system page size (", physPageSize, ") must be a power of 2\n")
throw("bad system page size")
}
if physHugePageSize&(physHugePageSize-1) != 0 {
print("system huge page size (", physHugePageSize, ") must be a power of 2\n")
throw("bad system huge page size")
}
physHugePageSize = 0
}
for 1<<physHugePageShift != physHugePageSize {
physHugePageShift++
}
}
if pagesPerArena%pagesPerSpanRoot != 0 {
print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerSpanRoot (", pagesPerSpanRoot, ")\n")
throw("bad pagesPerSpanRoot")
}
if pagesPerArena%pagesPerReclaimerChunk != 0 {
print("pagesPerArena (", pagesPerArena, ") is not divisible by pagesPerReclaimerChunk (", pagesPerReclaimerChunk, ")\n")
throw("bad pagesPerReclaimerChunk")
}
for := 0x7f; >= 0; -- {
var uintptr
switch {
= uintptr()<<32 | uintptrMask&(0x00c0<<32)
if >= uintptrMask&0x00e000000000 {
continue
}
case GOARCH == "arm64" && GOOS == "ios":
= uintptr()<<40 | uintptrMask&(0x0013<<28)
case GOARCH == "arm64":
= uintptr()<<40 | uintptrMask&(0x0040<<32)
case GOOS == "aix":
continue
}
= uintptr()<<40 | uintptrMask&(0xa0<<52)
default:
= uintptr()<<40 | uintptrMask&(0x00c0<<32)
}
:= (*arenaHint)(mheap_.arenaHintAlloc.alloc())
.addr =
.next, mheap_.arenaHints = mheap_.arenaHints,
}
const = (1 << arenaBits) * unsafe.Sizeof(heapArena{})
:= uintptr(sysReserve(nil, ))
if != 0 {
mheap_.heapArenaAlloc.init(, )
}
:= sbrk0()
:= firstmoduledata.end
if < {
=
}
if mheap_.heapArenaAlloc.next <= && < mheap_.heapArenaAlloc.end {
= mheap_.heapArenaAlloc.end
}
:= []uintptr{
512 << 20,
256 << 20,
128 << 20,
}
for , := range {
, := sysReserveAligned(unsafe.Pointer(), , heapArenaBytes)
if != nil {
mheap_.arena.init(uintptr(), )
= mheap_.arena.end // For hint below
break
}
}
:= (*arenaHint)(mheap_.arenaHintAlloc.alloc())
.addr =
.next, mheap_.arenaHints = mheap_.arenaHints,
}
}
func ( *mheap) ( uintptr) ( unsafe.Pointer, uintptr) {
assertLockHeld(&.lock)
= alignUp(, heapArenaBytes)
for .arenaHints != nil {
:= .arenaHints
:= .addr
if .down {
-=
}
= nil
= nil
} else {
= sysReserve(unsafe.Pointer(), )
}
if != nil {
sysFree(, , nil)
}
.arenaHints = .next
.arenaHintAlloc.free(unsafe.Pointer())
}
if == 0 {
throw("too many address space collisions for -race mode")
}
, = sysReserveAligned(nil, , heapArenaBytes)
if == nil {
return nil, 0
}
:= (*arenaHint)(.arenaHintAlloc.alloc())
.addr, .down = uintptr(), true
.next, mheap_.arenaHints = mheap_.arenaHints,
= (*arenaHint)(.arenaHintAlloc.alloc())
.addr = uintptr() +
.next, mheap_.arenaHints = mheap_.arenaHints,
}
{
var string
:= uintptr()
if + < {
= "region exceeds uintptr range"
} else if arenaIndex() >= 1<<arenaBits {
= "base outside usable address space"
} else if arenaIndex(+-1) >= 1<<arenaBits {
= "end outside usable address space"
}
for := arenaIndex(uintptr()); <= arenaIndex(uintptr()+-1); ++ {
:= .arenas[.l1()]
= (*[1 << arenaL2Bits]*heapArena)(persistentalloc(unsafe.Sizeof(*), sys.PtrSize, nil))
if == nil {
throw("out of memory allocating heap arena map")
}
atomic.StorepNoWB(unsafe.Pointer(&.arenas[.l1()]), unsafe.Pointer())
}
if [.l2()] != nil {
throw("arena already initialized")
}
var *heapArena
= (*heapArena)(.heapArenaAlloc.alloc(unsafe.Sizeof(*), sys.PtrSize, &memstats.gcMiscSys))
if == nil {
= (*heapArena)(persistentalloc(unsafe.Sizeof(*), sys.PtrSize, &memstats.gcMiscSys))
if == nil {
throw("out of memory allocating heap arena metadata")
}
}
if len(.allArenas) == cap(.allArenas) {
:= 2 * uintptr(cap(.allArenas)) * sys.PtrSize
if == 0 {
= physPageSize
}
:= (*notInHeap)(persistentalloc(, sys.PtrSize, &memstats.gcMiscSys))
if == nil {
throw("out of memory allocating allArenas")
}
:= .allArenas
*(*notInHeapSlice)(unsafe.Pointer(&.allArenas)) = notInHeapSlice{, len(.allArenas), int( / sys.PtrSize)}
if raceenabled {
racemapshadow(, )
}
return
}
:= 0
:
:= uintptr(sysReserve(, +))
switch {
case == 0:
return nil, 0
return ,
func ( *mspan) gclinkptr {
:= sys.Ctz64(.allocCache) // Is there a free object in the allocCache?
if < 64 {
:= .freeindex + uintptr()
if < .nelems {
:= + 1
if %64 == 0 && != .nelems {
return 0
}
.allocCache >>= uint( + 1)
.freeindex =
.allocCount++
return gclinkptr(*.elemsize + .base())
}
}
return 0
}
if uintptr(.allocCount) != .nelems {
println("runtime: s.allocCount=", .allocCount, "s.nelems=", .nelems)
throw("s.allocCount != s.nelems && freeIndex == s.nelems")
}
.refill()
= true
= .alloc[]
= .nextFreeIndex()
}
if >= .nelems {
throw("freeIndex is not valid")
}
= gclinkptr(*.elemsize + .base())
.allocCount++
if uintptr(.allocCount) > .nelems {
println("s.allocCount=", .allocCount, "s.nelems=", .nelems)
throw("s.allocCount > s.nelems")
}
return
}
if &7 == 0 {
= 8
} else if &3 == 0 {
= 4
} else if &1 == 0 {
= 2
} else {
= 1
}
}
return persistentalloc(, , &memstats.other_sys)
}
var *g
.gcAssistBytes -= int64()
gcAssistAlloc()
}
}
:= acquirem()
if .mallocing != 0 {
throw("malloc deadlock")
}
if .gsignal == getg() {
throw("malloc during signal")
}
.mallocing = 1
:= false
:=
:= getMCache()
if == nil {
throw("mallocgc called without a P or outside bootstrapping")
}
var *mspan
var unsafe.Pointer
:= == nil || .ptrdata == 0
if <= maxSmallSize {
if &7 == 0 {
= alignUp(, 8)
= unsafe.Pointer(.tiny + )
.tinyoffset = +
.tinyAllocs++
.mallocing = 0
releasem()
return
= .alloc[tinySpanClass]
:= nextFreeFast()
if == 0 {
, , = .nextFree(tinySpanClass)
}
= unsafe.Pointer()
(*[2]uint64)()[0] = 0
if < .tinyoffset || .tiny == 0 {
.tiny = uintptr()
.tinyoffset =
}
= maxTinySize
} else {
var uint8
if <= smallSizeMax-8 {
= size_to_class8[divRoundUp(, smallSizeDiv)]
} else {
= size_to_class128[divRoundUp(-smallSizeMax, largeSizeDiv)]
}
= uintptr(class_to_size[])
:= makeSpanClass(, )
= .alloc[]
:= nextFreeFast()
if == 0 {
, , = .nextFree()
}
= unsafe.Pointer()
if && .needzero != 0 {
memclrNoHeapPointers(unsafe.Pointer(), )
}
}
} else {
= true
= .allocLarge(, , )
.freeindex = 1
.allocCount = 1
= unsafe.Pointer(.base())
= .elemsize
}
var uintptr
if gcphase != _GCoff {
gcmarknewobject(, uintptr(), , )
}
if raceenabled {
racemalloc(, )
}
if msanenabled {
msanmalloc(, )
}
.mallocing = 0
releasem()
if debug.malloc {
if debug.allocfreetrace != 0 {
tracealloc(, , )
}
inittrace.bytes += uint64()
}
}
if := MemProfileRate; > 0 {
if != 1 && < .nextSample {
.nextSample -=
} else {
:= acquirem()
profilealloc(, , )
releasem()
}
}
.gcAssistBytes -= int64( - )
}
if {
if := (gcTrigger{kind: gcTriggerHeap}); .test() {
gcStart()
}
}
return
}
func ( *_type, int) unsafe.Pointer {
return newarray(, )
}
func ( *m, unsafe.Pointer, uintptr) {
:= getMCache()
if == nil {
throw("profilealloc called without a P or outside bootstrapping")
}
.nextSample = nextSample()
mProf_Malloc(, )
}
func () uintptr {
return 0
}
if := getg(); == .m.gsignal {
return nextSampleNoFP()
}
}
return uintptr(fastexprand(MemProfileRate))
}
switch {
case > 0x7000000:
= 0x7000000
case == 0:
return 0
}
:= MemProfileRate
if > 0x3fffffff { // make 2*rate not overflow
= 0x3fffffff
}
if != 0 {
return uintptr(fastrand() % uint32(2*))
}
return 0
}
type persistentAlloc struct {
base *notInHeap
off uintptr
}
var globalAlloc struct {
mutex
persistentAlloc
}
const persistentChunkSize = 256 << 10
func (, uintptr, *sysMemStat) unsafe.Pointer {
var *notInHeap
systemstack(func() {
= persistentalloc1(, , )
})
return unsafe.Pointer()
}
func (, uintptr, *sysMemStat) *notInHeap {
const (
= 64 << 10 // VM reservation granularity is 64K on windows
)
if == 0 {
throw("persistentalloc: size == 0")
}
if != 0 {
if &(-1) != 0 {
throw("persistentalloc: align is not a power of 2")
}
if > _PageSize {
throw("persistentalloc: align is too large")
}
} else {
= 8
}
if >= {
return (*notInHeap)(sysAlloc(, ))
}
:= acquirem()
var *persistentAlloc
if != nil && .p != 0 {
= &.p.ptr().palloc
} else {
lock(&globalAlloc.mutex)
= &globalAlloc.persistentAlloc
}
.off = alignUp(.off, )
if .off+ > persistentChunkSize || .base == nil {
.base = (*notInHeap)(sysAlloc(persistentChunkSize, &memstats.other_sys))
if .base == nil {
if == &globalAlloc.persistentAlloc {
unlock(&globalAlloc.mutex)
}
throw("runtime: cannot allocate memory")
}
for {
:= uintptr(unsafe.Pointer(persistentChunks))
*(*uintptr)(unsafe.Pointer(.base)) =
if atomic.Casuintptr((*uintptr)(unsafe.Pointer(&persistentChunks)), , uintptr(unsafe.Pointer(.base))) {
break
}
}
.off = alignUp(sys.PtrSize, )
}
:= .base.add(.off)
.off +=
releasem()
if == &globalAlloc.persistentAlloc {
unlock(&globalAlloc.mutex)
}
if != &memstats.other_sys {
.add(int64())
memstats.other_sys.add(-int64())
}
return
}
func ( uintptr) bool {
:= atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&persistentChunks)))
for != 0 {
if >= && < +persistentChunkSize {
return true
}
= *(*uintptr)(unsafe.Pointer())
}
return false
}
type linearAlloc struct {
next uintptr // next free byte
mapped uintptr // one byte past end of mapped space
end uintptr // end of reserved space
}
func ( *linearAlloc) (, uintptr) {
![]() |
The pages are generated with Golds v0.3.2-preview. (GOOS=darwin GOARCH=amd64) Golds is a Go 101 project developed by Tapir Liu. PR and bug reports are welcome and can be submitted to the issue list. Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds. |