Copyright 2009 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.

package runtime

import (
	
	
	
)

type slice struct {
	array unsafe.Pointer
	len   int
	cap   int
}
A notInHeapSlice is a slice backed by go:notinheap memory.
type notInHeapSlice struct {
	array *notInHeap
	len   int
	cap   int
}

func () {
	panic(errorString("makeslice: len out of range"))
}

func () {
	panic(errorString("makeslice: cap out of range"))
}
makeslicecopy allocates a slice of "tolen" elements of type "et", then copies "fromlen" elements of type "et" into that new allocation from "from".
func ( *_type,  int,  int,  unsafe.Pointer) unsafe.Pointer {
	var ,  uintptr
	if uintptr() > uintptr() {
		var  bool
		,  = math.MulUintptr(.size, uintptr())
		if  ||  > maxAlloc ||  < 0 {
			panicmakeslicelen()
		}
		 = .size * uintptr()
fromlen is a known good length providing and equal or greater than tolen, thereby making tolen a good slice length too as from and to slices have the same element width.
		 = .size * uintptr()
		 = 
	}

	var  unsafe.Pointer
	if .ptrdata == 0 {
		 = mallocgc(, nil, false)
		if  <  {
			memclrNoHeapPointers(add(, ), -)
		}
Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory.
		 = mallocgc(, , true)
Only shade the pointers in old.array since we know the destination slice to only contains nil pointers because it has been cleared during alloc.
			bulkBarrierPreWriteSrcOnly(uintptr(), uintptr(), )
		}
	}

	if raceenabled {
		 := getcallerpc()
		 := funcPC()
		racereadrangepc(, , , )
	}
	if msanenabled {
		msanread(, )
	}

	memmove(, , )

	return 
}

func ( *_type, ,  int) unsafe.Pointer {
	,  := math.MulUintptr(.size, uintptr())
NOTE: Produce a 'len out of range' error instead of a 'cap out of range' error when someone does make([]T, bignumber). 'cap out of range' is true too, but since the cap is only being supplied implicitly, saying len is clearer. See golang.org/issue/4085.
		,  := math.MulUintptr(.size, uintptr())
		if  ||  > maxAlloc ||  < 0 {
			panicmakeslicelen()
		}
		panicmakeslicecap()
	}

	return mallocgc(, , true)
}

func ( *_type, ,  int64) unsafe.Pointer {
	 := int()
	if int64() !=  {
		panicmakeslicelen()
	}

	 := int()
	if int64() !=  {
		panicmakeslicecap()
	}

	return makeslice(, , )
}
growslice handles slice growth during append. It is passed the slice element type, the old slice, and the desired new minimum capacity, and it returns a new slice with at least that capacity, with the old data copied into it. The new slice's length is set to the old slice's length, NOT to the new requested capacity. This is for codegen convenience. The old slice's length is used immediately to calculate where to write new values during an append. TODO: When the old backend is gone, reconsider this decision. The SSA backend might prefer the new length or to return only ptr/cap and save stack space.
func ( *_type,  slice,  int) slice {
	if raceenabled {
		 := getcallerpc()
		racereadrangepc(.array, uintptr(.len*int(.size)), , funcPC())
	}
	if msanenabled {
		msanread(.array, uintptr(.len*int(.size)))
	}

	if  < .cap {
		panic(errorString("growslice: cap out of range"))
	}

append should not create a slice with nil pointer but non-zero len. We assume that append doesn't need to preserve old.array in this case.
		return slice{unsafe.Pointer(&zerobase), .len, }
	}

	 := .cap
	 :=  + 
	if  >  {
		 = 
	} else {
		if .cap < 1024 {
			 = 
Check 0 < newcap to detect overflow and prevent an infinite loop.
			for 0 <  &&  <  {
				 +=  / 4
Set newcap to the requested cap when the newcap calculation overflowed.
			if  <= 0 {
				 = 
			}
		}
	}

	var  bool
Specialize for common values of et.size. For 1 we don't need any division/multiplication. For sys.PtrSize, compiler will optimize division/multiplication into a shift by a constant. For powers of 2, use a variable shift.
	switch {
	case .size == 1:
		 = uintptr(.len)
		 = uintptr()
		 = roundupsize(uintptr())
		 = uintptr() > maxAlloc
		 = int()
	case .size == sys.PtrSize:
		 = uintptr(.len) * sys.PtrSize
		 = uintptr() * sys.PtrSize
		 = roundupsize(uintptr() * sys.PtrSize)
		 = uintptr() > maxAlloc/sys.PtrSize
		 = int( / sys.PtrSize)
	case isPowerOfTwo(.size):
		var  uintptr
Mask shift for better code generation.
			 = uintptr(sys.Ctz64(uint64(.size))) & 63
		} else {
			 = uintptr(sys.Ctz32(uint32(.size))) & 31
		}
		 = uintptr(.len) << 
		 = uintptr() << 
		 = roundupsize(uintptr() << )
		 = uintptr() > (maxAlloc >> )
		 = int( >> )
	default:
		 = uintptr(.len) * .size
		 = uintptr() * .size
		,  = math.MulUintptr(.size, uintptr())
		 = roundupsize()
		 = int( / .size)
	}
The check of overflow in addition to capmem > maxAlloc is needed to prevent an overflow which can be used to trigger a segfault on 32bit architectures with this example program: type T [1<<27 + 1]int64 var d T var s []T func main() { s = append(s, d, d, d, d) print(len(s), "\n") }
	if  ||  > maxAlloc {
		panic(errorString("growslice: cap out of range"))
	}

	var  unsafe.Pointer
	if .ptrdata == 0 {
The append() that calls growslice is going to overwrite from old.len to cap (which will be the new length). Only clear the part that will not be overwritten.
		memclrNoHeapPointers(add(, ), -)
Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory.
		 = mallocgc(, , true)
Only shade the pointers in old.array since we know the destination slice p only contains nil pointers because it has been cleared during alloc.
			bulkBarrierPreWriteSrcOnly(uintptr(), uintptr(.array), -.size+.ptrdata)
		}
	}
	memmove(, .array, )

	return slice{, .len, }
}

func ( uintptr) bool {
	return &(-1) == 0
}
slicecopy is used to copy from a string or slice of pointerless elements into a slice.
func ( unsafe.Pointer,  int,  unsafe.Pointer,  int,  uintptr) int {
	if  == 0 ||  == 0 {
		return 0
	}

	 := 
	if  <  {
		 = 
	}

	if  == 0 {
		return 
	}

	 := uintptr() * 
	if raceenabled {
		 := getcallerpc()
		 := funcPC()
		racereadrangepc(, , , )
		racewriterangepc(, , , )
	}
	if msanenabled {
		msanread(, )
		msanwrite(, )
	}

TODO: is this still worth it with new memmove impl?
		*(*byte)() = *(*byte)() // known to be a byte pointer
	} else {
		memmove(, , )
	}
	return