Copyright 2016 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.

package sync

import (
	
	
)
Map is like a Go map[interface{}]interface{} but is safe for concurrent use by multiple goroutines without additional locking or coordination. Loads, stores, and deletes run in amortized constant time. The Map type is specialized. Most code should use a plain Go map instead, with separate locking or coordination, for better type safety and to make it easier to maintain other invariants along with the map content. The Map type is optimized for two common use cases: (1) when the entry for a given key is only ever written once but read many times, as in caches that only grow, or (2) when multiple goroutines read, write, and overwrite entries for disjoint sets of keys. In these two cases, use of a Map may significantly reduce lock contention compared to a Go map paired with a separate Mutex or RWMutex. The zero Map is empty and ready for use. A Map must not be copied after first use.
type Map struct {
	mu Mutex
read contains the portion of the map's contents that are safe for concurrent access (with or without mu held). The read field itself is always safe to load, but must only be stored with mu held. Entries stored in read may be updated concurrently without mu, but updating a previously-expunged entry requires that the entry be copied to the dirty map and unexpunged with mu held.
	read atomic.Value // readOnly
dirty contains the portion of the map's contents that require mu to be held. To ensure that the dirty map can be promoted to the read map quickly, it also includes all of the non-expunged entries in the read map. Expunged entries are not stored in the dirty map. An expunged entry in the clean map must be unexpunged and added to the dirty map before a new value can be stored to it. If the dirty map is nil, the next write to the map will initialize it by making a shallow copy of the clean map, omitting stale entries.
	dirty map[interface{}]*entry
misses counts the number of loads since the read map was last updated that needed to lock mu to determine whether the key was present. Once enough misses have occurred to cover the cost of copying the dirty map, the dirty map will be promoted to the read map (in the unamended state) and the next store to the map will make a new dirty copy.
readOnly is an immutable struct stored atomically in the Map.read field.
type readOnly struct {
	m       map[interface{}]*entry
	amended bool // true if the dirty map contains some key not in m.
}
expunged is an arbitrary pointer that marks entries which have been deleted from the dirty map.
var expunged = unsafe.Pointer(new(interface{}))
An entry is a slot in the map corresponding to a particular key.
p points to the interface{} value stored for the entry. If p == nil, the entry has been deleted and m.dirty == nil. If p == expunged, the entry has been deleted, m.dirty != nil, and the entry is missing from m.dirty. Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty != nil, in m.dirty[key]. An entry can be deleted by atomic replacement with nil: when m.dirty is next created, it will atomically replace nil with expunged and leave m.dirty[key] unset. An entry's associated value can be updated by atomic replacement, provided p != expunged. If p == expunged, an entry's associated value can be updated only after first setting m.dirty[key] = e so that lookups using the dirty map find the entry.
	p unsafe.Pointer // *interface{}
}

func ( interface{}) *entry {
	return &entry{p: unsafe.Pointer(&)}
}
Load returns the value stored in the map for a key, or nil if no value is present. The ok result indicates whether value was found in the map.
func ( *Map) ( interface{}) ( interface{},  bool) {
	,  := .read.Load().(readOnly)
	,  := .m[]
	if ! && .amended {
Avoid reporting a spurious miss if m.dirty got promoted while we were blocked on m.mu. (If further loads of the same key will not miss, it's not worth copying the dirty map for this key.)
		, _ = .read.Load().(readOnly)
		,  = .m[]
		if ! && .amended {
Regardless of whether the entry was present, record a miss: this key will take the slow path until the dirty map is promoted to the read map.
			.missLocked()
		}
		.mu.Unlock()
	}
	if ! {
		return nil, false
	}
	return .load()
}

func ( *entry) () ( interface{},  bool) {
	 := atomic.LoadPointer(&.p)
	if  == nil ||  == expunged {
		return nil, false
	}
	return *(*interface{})(), true
}
Store sets the value for a key.
func ( *Map) (,  interface{}) {
	,  := .read.Load().(readOnly)
	if ,  := .m[];  && .tryStore(&) {
		return
	}

	.mu.Lock()
	, _ = .read.Load().(readOnly)
	if ,  := .m[];  {
The entry was previously expunged, which implies that there is a non-nil dirty map and this entry is not in it.
			.dirty[] = 
		}
		.storeLocked(&)
	} else if ,  := .dirty[];  {
		.storeLocked(&)
	} else {
We're adding the first new key to the dirty map. Make sure it is allocated and mark the read-only map as incomplete.
			.dirtyLocked()
			.read.Store(readOnly{m: .m, amended: true})
		}
		.dirty[] = newEntry()
	}
	.mu.Unlock()
}
tryStore stores a value if the entry has not been expunged. If the entry is expunged, tryStore returns false and leaves the entry unchanged.
func ( *entry) ( *interface{}) bool {
	for {
		 := atomic.LoadPointer(&.p)
		if  == expunged {
			return false
		}
		if atomic.CompareAndSwapPointer(&.p, , unsafe.Pointer()) {
			return true
		}
	}
}
unexpungeLocked ensures that the entry is not marked as expunged. If the entry was previously expunged, it must be added to the dirty map before m.mu is unlocked.
func ( *entry) () ( bool) {
	return atomic.CompareAndSwapPointer(&.p, expunged, nil)
}
storeLocked unconditionally stores a value to the entry. The entry must be known not to be expunged.
func ( *entry) ( *interface{}) {
	atomic.StorePointer(&.p, unsafe.Pointer())
}
LoadOrStore returns the existing value for the key if present. Otherwise, it stores and returns the given value. The loaded result is true if the value was loaded, false if stored.
Avoid locking if it's a clean hit.
	,  := .read.Load().(readOnly)
	if ,  := .m[];  {
		, ,  := .tryLoadOrStore()
		if  {
			return , 
		}
	}

	.mu.Lock()
	, _ = .read.Load().(readOnly)
	if ,  := .m[];  {
		if .unexpungeLocked() {
			.dirty[] = 
		}
		, , _ = .tryLoadOrStore()
	} else if ,  := .dirty[];  {
		, , _ = .tryLoadOrStore()
		.missLocked()
	} else {
We're adding the first new key to the dirty map. Make sure it is allocated and mark the read-only map as incomplete.
			.dirtyLocked()
			.read.Store(readOnly{m: .m, amended: true})
		}
		.dirty[] = newEntry()
		,  = , false
	}
	.mu.Unlock()

	return , 
}
tryLoadOrStore atomically loads or stores a value if the entry is not expunged. If the entry is expunged, tryLoadOrStore leaves the entry unchanged and returns with ok==false.
func ( *entry) ( interface{}) ( interface{}, ,  bool) {
	 := atomic.LoadPointer(&.p)
	if  == expunged {
		return nil, false, false
	}
	if  != nil {
		return *(*interface{})(), true, true
	}
Copy the interface after the first load to make this method more amenable to escape analysis: if we hit the "load" path or the entry is expunged, we shouldn't bother heap-allocating.
	 := 
	for {
		if atomic.CompareAndSwapPointer(&.p, nil, unsafe.Pointer(&)) {
			return , false, true
		}
		 = atomic.LoadPointer(&.p)
		if  == expunged {
			return nil, false, false
		}
		if  != nil {
			return *(*interface{})(), true, true
		}
	}
}
LoadAndDelete deletes the value for a key, returning the previous value if any. The loaded result reports whether the key was present.
func ( *Map) ( interface{}) ( interface{},  bool) {
	,  := .read.Load().(readOnly)
	,  := .m[]
	if ! && .amended {
		.mu.Lock()
		, _ = .read.Load().(readOnly)
		,  = .m[]
		if ! && .amended {
			,  = .dirty[]
Regardless of whether the entry was present, record a miss: this key will take the slow path until the dirty map is promoted to the read map.
			.missLocked()
		}
		.mu.Unlock()
	}
	if  {
		return .delete()
	}
	return nil, false
}
Delete deletes the value for a key.
func ( *Map) ( interface{}) {
	.LoadAndDelete()
}

func ( *entry) () ( interface{},  bool) {
	for {
		 := atomic.LoadPointer(&.p)
		if  == nil ||  == expunged {
			return nil, false
		}
		if atomic.CompareAndSwapPointer(&.p, , nil) {
			return *(*interface{})(), true
		}
	}
}
Range calls f sequentially for each key and value present in the map. If f returns false, range stops the iteration. Range does not necessarily correspond to any consistent snapshot of the Map's contents: no key will be visited more than once, but if the value for any key is stored or deleted concurrently, Range may reflect any mapping for that key from any point during the Range call. Range may be O(N) with the number of elements in the map even if f returns false after a constant number of calls.
We need to be able to iterate over all of the keys that were already present at the start of the call to Range. If read.amended is false, then read.m satisfies that property without requiring us to hold m.mu for a long time.
	,  := .read.Load().(readOnly)
m.dirty contains keys not in read.m. Fortunately, Range is already O(N) (assuming the caller does not break out early), so a call to Range amortizes an entire copy of the map: we can promote the dirty copy immediately!
		.mu.Lock()
		, _ = .read.Load().(readOnly)
		if .amended {
			 = readOnly{m: .dirty}
			.read.Store()
			.dirty = nil
			.misses = 0
		}
		.mu.Unlock()
	}

	for ,  := range .m {
		,  := .load()
		if ! {
			continue
		}
		if !(, ) {
			break
		}
	}
}

func ( *Map) () {
	.misses++
	if .misses < len(.dirty) {
		return
	}
	.read.Store(readOnly{m: .dirty})
	.dirty = nil
	.misses = 0
}

func ( *Map) () {
	if .dirty != nil {
		return
	}

	,  := .read.Load().(readOnly)
	.dirty = make(map[interface{}]*entry, len(.m))
	for ,  := range .m {
		if !.tryExpungeLocked() {
			.dirty[] = 
		}
	}
}

func ( *entry) () ( bool) {
	 := atomic.LoadPointer(&.p)
	for  == nil {
		if atomic.CompareAndSwapPointer(&.p, nil, expunged) {
			return true
		}
		 = atomic.LoadPointer(&.p)
	}
	return  == expunged