func runtime/internal/atomic.Load64

66 uses

	runtime/internal/atomic (current package)
		atomic_amd64.go#L28: func Load64(ptr *uint64) uint64 {

	runtime
		lfstack.go#L33: 		old := atomic.Load64((*uint64)(head))
		lfstack.go#L43: 		old := atomic.Load64((*uint64)(head))
		lfstack.go#L48: 		next := atomic.Load64(&node.next)
		lfstack.go#L56: 	return atomic.Load64((*uint64)(head)) == 0
		metrics.go#L133: 				hist.counts[0] = atomic.Load64(&memstats.gcPauseDist.underflow)
		metrics.go#L135: 					hist.counts[i+1] = atomic.Load64(&memstats.gcPauseDist.counts[i])
		metrics.go#L367: 	a.heapGoal = atomic.Load64(&memstats.next_gc)
		mgc.go#L492: 		assistRatio := float64frombits(atomic.Load64(&c.assistWorkPerByte))
		mgc.go#L530: 	live := atomic.Load64(&memstats.heap_live)
		mgc.go#L531: 	scan := atomic.Load64(&memstats.heap_scan)
		mgc.go#L536: 	heapGoal := int64(atomic.Load64(&memstats.next_gc))
		mgc.go#L886: 			sweepMin := atomic.Load64(&memstats.heap_live) + sweepMinHeapDistance
		mgc.go#L927: 		heapLiveBasis := atomic.Load64(&memstats.heap_live)
		mgc.go#L937: 		pagesSwept := atomic.Load64(&mheap_.pagesSwept)
		mgc.go#L938: 		pagesInUse := atomic.Load64(&mheap_.pagesInUse)
		mgc.go#L967: 	egogc := float64(atomic.Load64(&memstats.next_gc)-memstats.heap_marked) / float64(memstats.heap_marked)
		mgc.go#L1277: 		lastgc := int64(atomic.Load64(&memstats.last_gc_nanotime))
		mgc.go#L1368: 	work.heap0 = atomic.Load64(&memstats.heap_live)
		mgc.go#L2252: 	work.initialHeapLive = atomic.Load64(&memstats.heap_live)
		mgcmark.go#L403: 	assistWorkPerByte := float64frombits(atomic.Load64(&gcController.assistWorkPerByte))
		mgcmark.go#L404: 	assistBytesPerWork := float64frombits(atomic.Load64(&gcController.assistBytesPerWork))
		mgcmark.go#L548: 	assistBytesPerWork := float64frombits(atomic.Load64(&gcController.assistBytesPerWork))
		mgcmark.go#L643: 	assistBytesPerWork := float64frombits(atomic.Load64(&gcController.assistBytesPerWork))
		mgcmark.go#L677: 		assistWorkPerByte := float64frombits(atomic.Load64(&gcController.assistWorkPerByte))
		mgcscavenge.go#L103: 	return memstats.heap_sys.load() - atomic.Load64(&memstats.heap_released)
		mgcscavenge.go#L126: 	goalRatio := float64(atomic.Load64(&memstats.next_gc)) / float64(memstats.last_next_gc)
		mgcscavenge.go#L432: 		atomic.Load64(&memstats.heap_released)>>10, " KiB total, ",
		mgcscavenge.go#L433: 		(atomic.Load64(&memstats.heap_inuse)*100)/heapRetained(), "% util",
		mgcsweep.go#L645: 	sweptBasis := atomic.Load64(&mheap_.pagesSweptBasis)
		mgcsweep.go#L648: 	newHeapLive := uintptr(atomic.Load64(&memstats.heap_live)-mheap_.sweepHeapLiveBasis) + spanBytes
		mgcsweep.go#L650: 	for pagesTarget > int64(atomic.Load64(&mheap_.pagesSwept)-sweptBasis) {
		mgcsweep.go#L655: 		if atomic.Load64(&mheap_.pagesSweptBasis) != sweptBasis {
		mheap.go#L742: 	if atomic.Load64(&h.reclaimIndex) >= 1<<63 {
		mprof.go#L407: 	rate := int64(atomic.Load64(&blockprofilerate))
		mprof.go#L453: 	rate := int64(atomic.Load64(&mutexprofilerate))
		mspanset.go#L322: 	return headTailIndex(atomic.Load64((*uint64)(h)))
		mstats.go#L745: 	return atomic.Load64((*uint64)(s))
		proc.go#L2406: 	if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 {
		proc.go#L2604: 	if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Load64(&sched.lastpoll) != 0 {
		proc.go#L2911: 		pollerPollUntil := int64(atomic.Load64(&sched.pollUntil))
		proc.go#L2945: 	if atomic.Load64(&sched.lastpoll) == 0 {
		proc.go#L2950: 		pollerPollUntil := int64(atomic.Load64(&sched.pollUntil))
		proc.go#L3193: 	next := int64(atomic.Load64(&pp.timer0When))
		proc.go#L3194: 	nextAdj := int64(atomic.Load64(&pp.timerModifiedEarliest))
		proc.go#L3401: 		assistWorkPerByte := float64frombits(atomic.Load64(&gcController.assistWorkPerByte))
		proc.go#L5186: 		lastpoll := int64(atomic.Load64(&sched.lastpoll))
		profbuf.go#L118: 	return profIndex(atomic.Load64((*uint64)(x)))
		profbuf.go#L154: 	return uint32(atomic.Load64(&b.overflow)) > 0
		profbuf.go#L161: 	overflow := atomic.Load64(&b.overflow)
		profbuf.go#L162: 	time = atomic.Load64(&b.overflowTime)
		profbuf.go#L173: 		overflow = atomic.Load64(&b.overflow)
		profbuf.go#L174: 		time = atomic.Load64(&b.overflowTime)
		profbuf.go#L183: 		overflow := atomic.Load64(&b.overflow)
		runtime.go#L24: 	r := int64(atomic.Load64(&ticks.val))
		runtime1.go#L116: 	if atomic.Load64(&test_z64) != 1 {
		runtime1.go#L120: 	if atomic.Load64(&test_z64) != (1<<40)+1 {
		runtime1.go#L126: 	if atomic.Load64(&test_z64) != (2<<40)+2 {
		runtime1.go#L132: 	if atomic.Load64(&test_z64) != (3<<40)+3 {
		time.go#L684: 	if first := atomic.Load64(&pp.timerModifiedEarliest); first != 0 {
		time.go#L771: 	next := int64(atomic.Load64(&pp.timer0When))
		time.go#L772: 	nextAdj := int64(atomic.Load64(&pp.timerModifiedEarliest))
		time.go#L1037: 		old := atomic.Load64(&pp.timerModifiedEarliest)
		time.go#L1063: 		w := int64(atomic.Load64(&pp.timer0When))
		time.go#L1069: 		w = int64(atomic.Load64(&pp.timerModifiedEarliest))
		trace.go#L1150: 	if nextGC := atomic.Load64(&memstats.next_gc); nextGC == ^uint64(0) {