Copyright 2009 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
Garbage collector: finalizers and block profiling.

package runtime

import (
	
	
	
)
finblock is an array of finalizers to be executed. finblocks are arranged in a linked list for the finalizer queue. finblock is allocated from non-GC'd memory, so any heap pointers must be specially handled. GC currently assumes that the finalizer queue does not grow during marking (but it can shrink).go:notinheap
type finblock struct {
	alllink *finblock
	next    *finblock
	cnt     uint32
	_       int32
	fin     [(_FinBlockSize - 2*sys.PtrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer
}

var finlock mutex  // protects the following variables
var fing *g        // goroutine that runs finalizers
var finq *finblock // list of finalizers that are to be executed
var finc *finblock // cache of free blocks
var finptrmask [_FinBlockSize / sys.PtrSize / 8]byte
var fingwait bool
var fingwake bool
var allfin *finblock // list of all blocks
NOTE: Layout known to queuefinalizer.
type finalizer struct {
	fn   *funcval       // function to call (may be a heap pointer)
	arg  unsafe.Pointer // ptr to object (may be a heap pointer)
	nret uintptr        // bytes of return values from fn
	fint *_type         // type of first argument of fn
	ot   *ptrtype       // type of ptr to object (may be a heap pointer)
}

Each Finalizer is 5 words, ptr ptr INT ptr ptr (INT = uintptr here) Each byte describes 8 words. Need 8 Finalizers described by 5 bytes before pattern repeats: ptr ptr INT ptr ptr ptr ptr INT ptr ptr ptr ptr INT ptr ptr ptr ptr INT ptr ptr ptr ptr INT ptr ptr ptr ptr INT ptr ptr ptr ptr INT ptr ptr ptr ptr INT ptr ptr aka ptr ptr INT ptr ptr ptr ptr INT ptr ptr ptr ptr INT ptr ptr ptr ptr INT ptr ptr ptr ptr INT ptr ptr ptr ptr INT ptr ptr ptr ptr INT ptr ptr ptr ptr INT ptr ptr Assumptions about Finalizer layout checked below.
	1<<0 | 1<<1 | 0<<2 | 1<<3 | 1<<4 | 1<<5 | 1<<6 | 0<<7,
	1<<0 | 1<<1 | 1<<2 | 1<<3 | 0<<4 | 1<<5 | 1<<6 | 1<<7,
	1<<0 | 0<<1 | 1<<2 | 1<<3 | 1<<4 | 1<<5 | 0<<6 | 1<<7,
	1<<0 | 1<<1 | 1<<2 | 0<<3 | 1<<4 | 1<<5 | 1<<6 | 1<<7,
	0<<0 | 1<<1 | 1<<2 | 1<<3 | 1<<4 | 0<<5 | 1<<6 | 1<<7,
}

func ( unsafe.Pointer,  *funcval,  uintptr,  *_type,  *ptrtype) {
Currently we assume that the finalizer queue won't grow during marking so we don't have to rescan it during mark termination. If we ever need to lift this assumption, we can do it by adding the necessary barriers to queuefinalizer (which it may have automatically).
		throw("queuefinalizer during GC")
	}

	lock(&finlock)
	if finq == nil || finq.cnt == uint32(len(finq.fin)) {
		if finc == nil {
			finc = (*finblock)(persistentalloc(_FinBlockSize, 0, &memstats.gcMiscSys))
			finc.alllink = allfin
			allfin = finc
Build pointer mask for Finalizer array in block. Check assumptions made in finalizer1 array above.
				if (unsafe.Sizeof(finalizer{}) != 5*sys.PtrSize ||
					unsafe.Offsetof(finalizer{}.fn) != 0 ||
					unsafe.Offsetof(finalizer{}.arg) != sys.PtrSize ||
					unsafe.Offsetof(finalizer{}.nret) != 2*sys.PtrSize ||
					unsafe.Offsetof(finalizer{}.fint) != 3*sys.PtrSize ||
					unsafe.Offsetof(finalizer{}.ot) != 4*sys.PtrSize) {
					throw("finalizer out of sync")
				}
				for  := range finptrmask {
					finptrmask[] = finalizer1[%len(finalizer1)]
				}
			}
		}
		 := finc
		finc = .next
		.next = finq
		finq = 
	}
	 := &finq.fin[finq.cnt]
	atomic.Xadd(&finq.cnt, +1) // Sync with markroots
	.fn = 
	.nret = 
	.fint = 
	.ot = 
	.arg = 
	fingwake = true
	unlock(&finlock)
}
go:nowritebarrier
func ( func(*funcval, unsafe.Pointer, uintptr, *_type, *ptrtype)) {
	for  := allfin;  != nil;  = .alllink {
		for  := uint32(0);  < .cnt; ++ {
			 := &.fin[]
			(.fn, .arg, .nret, .fint, .ot)
		}
	}
}

func () *g {
	var  *g
	lock(&finlock)
	if fingwait && fingwake {
		fingwait = false
		fingwake = false
		 = fing
	}
	unlock(&finlock)
	return 
}

var (
	fingCreate  uint32
	fingRunning bool
)

start the finalizer goroutine exactly once
	if fingCreate == 0 && atomic.Cas(&fingCreate, 0, 1) {
		go runfinq()
	}
}
This is the goroutine that runs all of the finalizers
func () {
	var (
		    unsafe.Pointer
		 uintptr
	)

	for {
		lock(&finlock)
		 := finq
		finq = nil
		if  == nil {
			 := getg()
			fing = 
			fingwait = true
			goparkunlock(&finlock, waitReasonFinalizerWait, traceEvGoBlock, 1)
			continue
		}
		unlock(&finlock)
		if raceenabled {
			racefingo()
		}
		for  != nil {
			for  := .cnt;  > 0; -- {
				 := &.fin[-1]

				 := unsafe.Sizeof((interface{})(nil)) + .nret
The frame does not contain pointers interesting for GC, all not yet finalized objects are stored in finq. If we do not mark it as FlagNoScan, the last finalized object is not collected.
					 = mallocgc(, nil, true)
					 = 
				}

				if .fint == nil {
					throw("missing type in runfinq")
frame is effectively uninitialized memory. That means we have to clear it before writing to it to avoid confusing the write barrier.
				*(*[2]uintptr)() = [2]uintptr{}
				switch .fint.kind & kindMask {
direct use of pointer
					*(*unsafe.Pointer)() = .arg
				case kindInterface:
set up with empty interface
					(*eface)()._type = &.ot.typ
					(*eface)().data = .arg
convert to interface with methods this conversion is guaranteed to succeed - we checked in SetFinalizer
						*(*iface)() = assertE2I(, *(*eface)())
					}
				default:
					throw("bad kind in runfinq")
				}
				fingRunning = true
				reflectcall(nil, unsafe.Pointer(.fn), , uint32(), uint32())
				fingRunning = false
Drop finalizer queue heap references before hiding them from markroot. This also ensures these will be clear if we reuse the finalizer.
				.fn = nil
				.arg = nil
				.ot = nil
				atomic.Store(&.cnt, -1)
			}
			 := .next
			lock(&finlock)
			.next = finc
			finc = 
			unlock(&finlock)
			 = 
		}
	}
}
SetFinalizer sets the finalizer associated with obj to the provided finalizer function. When the garbage collector finds an unreachable block with an associated finalizer, it clears the association and runs finalizer(obj) in a separate goroutine. This makes obj reachable again, but now without an associated finalizer. Assuming that SetFinalizer is not called again, the next time the garbage collector sees that obj is unreachable, it will free obj. SetFinalizer(obj, nil) clears any finalizer associated with obj. The argument obj must be a pointer to an object allocated by calling new, by taking the address of a composite literal, or by taking the address of a local variable. The argument finalizer must be a function that takes a single argument to which obj's type can be assigned, and can have arbitrary ignored return values. If either of these is not true, SetFinalizer may abort the program. Finalizers are run in dependency order: if A points at B, both have finalizers, and they are otherwise unreachable, only the finalizer for A runs; once A is freed, the finalizer for B can run. If a cyclic structure includes a block with a finalizer, that cycle is not guaranteed to be garbage collected and the finalizer is not guaranteed to run, because there is no ordering that respects the dependencies. The finalizer is scheduled to run at some arbitrary time after the program can no longer reach the object to which obj points. There is no guarantee that finalizers will run before a program exits, so typically they are useful only for releasing non-memory resources associated with an object during a long-running program. For example, an os.File object could use a finalizer to close the associated operating system file descriptor when a program discards an os.File without calling Close, but it would be a mistake to depend on a finalizer to flush an in-memory I/O buffer such as a bufio.Writer, because the buffer would not be flushed at program exit. It is not guaranteed that a finalizer will run if the size of *obj is zero bytes. It is not guaranteed that a finalizer will run for objects allocated in initializers for package-level variables. Such objects may be linker-allocated, not heap-allocated. A finalizer may run as soon as an object becomes unreachable. In order to use finalizers correctly, the program must ensure that the object is reachable until it is no longer required. Objects stored in global variables, or that can be found by tracing pointers from a global variable, are reachable. For other objects, pass the object to a call of the KeepAlive function to mark the last point in the function where the object must be reachable. For example, if p points to a struct, such as os.File, that contains a file descriptor d, and p has a finalizer that closes that file descriptor, and if the last use of p in a function is a call to syscall.Write(p.d, buf, size), then p may be unreachable as soon as the program enters syscall.Write. The finalizer may run at that moment, closing p.d, causing syscall.Write to fail because it is writing to a closed file descriptor (or, worse, to an entirely different file descriptor opened by a different goroutine). To avoid this problem, call runtime.KeepAlive(p) after the call to syscall.Write. A single goroutine runs all finalizers for a program, sequentially. If a finalizer must run for a long time, it should do so by starting a new goroutine.
func ( interface{},  interface{}) {
debug.sbrk never frees memory, so no finalizers run (and we don't have the data structures to record them).
		return
	}
	 := efaceOf(&)
	 := ._type
	if  == nil {
		throw("runtime.SetFinalizer: first argument is nil")
	}
	if .kind&kindMask != kindPtr {
		throw("runtime.SetFinalizer: first argument is " + .string() + ", not pointer")
	}
	 := (*ptrtype)(unsafe.Pointer())
	if .elem == nil {
		throw("nil elem type!")
	}
find the containing object
	, ,  := findObject(uintptr(.data), 0, 0)

0-length objects are okay.
		if .data == unsafe.Pointer(&zerobase) {
			return
		}
Global initializers might be linker-allocated. var Foo = &Object{} func main() { runtime.SetFinalizer(Foo, nil) } The relevant segments are: noptrdata, data, bss, noptrbss. We cannot assume they are in any order or even contiguous, due to external linking.
		for  := &firstmoduledata;  != nil;  = .next {
			if .noptrdata <= uintptr(.data) && uintptr(.data) < .enoptrdata ||
				.data <= uintptr(.data) && uintptr(.data) < .edata ||
				.bss <= uintptr(.data) && uintptr(.data) < .ebss ||
				.noptrbss <= uintptr(.data) && uintptr(.data) < .enoptrbss {
				return
			}
		}
		throw("runtime.SetFinalizer: pointer not in allocated block")
	}

As an implementation detail we allow to set finalizers for an inner byte of an object if it could come from tiny alloc (see mallocgc for details).
		if .elem == nil || .elem.ptrdata != 0 || .elem.size >= maxTinySize {
			throw("runtime.SetFinalizer: pointer not at beginning of allocated block")
		}
	}

	 := efaceOf(&)
	 := ._type
switch to system stack and remove finalizer
		systemstack(func() {
			removefinalizer(.data)
		})
		return
	}

	if .kind&kindMask != kindFunc {
		throw("runtime.SetFinalizer: second argument is " + .string() + ", not a function")
	}
	 := (*functype)(unsafe.Pointer())
	if .dotdotdot() {
		throw("runtime.SetFinalizer: cannot pass " + .string() + " to finalizer " + .string() + " because dotdotdot")
	}
	if .inCount != 1 {
		throw("runtime.SetFinalizer: cannot pass " + .string() + " to finalizer " + .string())
	}
	 := .in()[0]
	switch {
ok - same type
		goto 
	case .kind&kindMask == kindPtr:
ok - not same type, but both pointers, one or the other is unnamed, and same element type, so assignable.
			goto 
		}
	case .kind&kindMask == kindInterface:
		 := (*interfacetype)(unsafe.Pointer())
ok - satisfies empty interface
			goto 
		}
		if ,  := assertE2I2(, *efaceOf(&));  {
			goto 
		}
	}
	throw("runtime.SetFinalizer: cannot pass " + .string() + " to finalizer " + .string())
compute size needed for return parameters
	 := uintptr(0)
	for ,  := range .out() {
		 = alignUp(, uintptr(.align)) + uintptr(.size)
	}
	 = alignUp(, sys.PtrSize)
make sure we have a finalizer goroutine
	createfing()

	systemstack(func() {
		if !addfinalizer(.data, (*funcval)(.data), , , ) {
			throw("runtime.SetFinalizer: finalizer already set")
		}
	})
}
Mark KeepAlive as noinline so that it is easily detectable as an intrinsic.go:noinline
KeepAlive marks its argument as currently reachable. This ensures that the object is not freed, and its finalizer is not run, before the point in the program where KeepAlive is called. A very simplified example showing where KeepAlive is required: type File struct { d int } d, err := syscall.Open("/file/path", syscall.O_RDONLY, 0) // ... do something if err != nil ... p := &File{d} runtime.SetFinalizer(p, func(p *File) { syscall.Close(p.d) }) var buf [10]byte n, err := syscall.Read(p.d, buf[:]) // Ensure p is not finalized until Read returns. runtime.KeepAlive(p) // No more uses of p after this point. Without the KeepAlive call, the finalizer could run at the start of syscall.Read, closing the file descriptor before syscall.Read makes the actual system call.
Introduce a use of x that the compiler can't eliminate. This makes sure x is alive on entry. We need x to be alive on entry for "defer runtime.KeepAlive(x)"; see issue 21402.