Copyright 2014 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.

package http2

import (
	
	
	
)
Buffer chunks are allocated from a pool to reduce pressure on GC. The maximum wasted space per dataBuffer is 2x the largest size class, which happens when the dataBuffer has multiple chunks and there is one unread byte in both the first and last chunks. We use a few size classes to minimize overheads for servers that typically receive very small request bodies. TODO: Benchmark to determine if the pools are necessary. The GC may have improved enough that we can instead allocate chunks like this: make([]byte, max(16<<10, expectedBytesRemaining))
var (
	dataChunkSizeClasses = []int{
		1 << 10,
		2 << 10,
		4 << 10,
		8 << 10,
		16 << 10,
	}
	dataChunkPools = [...]sync.Pool{
		{New: func() interface{} { return make([]byte, 1<<10) }},
		{New: func() interface{} { return make([]byte, 2<<10) }},
		{New: func() interface{} { return make([]byte, 4<<10) }},
		{New: func() interface{} { return make([]byte, 8<<10) }},
		{New: func() interface{} { return make([]byte, 16<<10) }},
	}
)

func ( int64) []byte {
	 := 0
	for ;  < len(dataChunkSizeClasses)-1; ++ {
		if  <= int64(dataChunkSizeClasses[]) {
			break
		}
	}
	return dataChunkPools[].Get().([]byte)
}

func ( []byte) {
	for ,  := range dataChunkSizeClasses {
		if len() ==  {
			dataChunkPools[].Put()
			return
		}
	}
	panic(fmt.Sprintf("unexpected buffer len=%v", len()))
}
dataBuffer is an io.ReadWriter backed by a list of data chunks. Each dataBuffer is used to read DATA frames on a single stream. The buffer is divided into chunks so the server can limit the total memory used by a single connection without limiting the request body size on any single stream.
type dataBuffer struct {
	chunks   [][]byte
	r        int   // next byte to read is chunks[0][r]
	w        int   // next byte to write is chunks[len(chunks)-1][w]
	size     int   // total buffered bytes
	expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0)
}

var errReadEmpty = errors.New("read from empty dataBuffer")
Read copies bytes from the buffer into p. It is an error to read when no data is available.
func ( *dataBuffer) ( []byte) (int, error) {
	if .size == 0 {
		return 0, errReadEmpty
	}
	var  int
	for len() > 0 && .size > 0 {
		 := .bytesFromFirstChunk()
		 := copy(, )
		 = [:]
		 += 
		.r += 
If the first chunk has been consumed, advance to the next chunk.
		if .r == len(.chunks[0]) {
			putDataBufferChunk(.chunks[0])
			 := len(.chunks) - 1
			copy(.chunks[:], .chunks[1:])
			.chunks[] = nil
			.chunks = .chunks[:]
			.r = 0
		}
	}
	return , nil
}

func ( *dataBuffer) () []byte {
	if len(.chunks) == 1 {
		return .chunks[0][.r:.w]
	}
	return .chunks[0][.r:]
}
Len returns the number of bytes of the unread portion of the buffer.
func ( *dataBuffer) () int {
	return .size
}
Write appends p to the buffer.
func ( *dataBuffer) ( []byte) (int, error) {
	 := len()
If the last chunk is empty, allocate a new chunk. Try to allocate enough to fully copy p plus any additional bytes we expect to receive. However, this may allocate less than len(p).
		 := int64(len())
		if .expected >  {
			 = .expected
		}
		 := .lastChunkOrAlloc()
		 := copy([.w:], )
		 = [:]
		.w += 
		.size += 
		.expected -= int64()
	}
	return , nil
}

func ( *dataBuffer) ( int64) []byte {
	if len(.chunks) != 0 {
		 := .chunks[len(.chunks)-1]
		if .w < len() {
			return 
		}
	}
	 := getDataBufferChunk()
	.chunks = append(.chunks, )
	.w = 0
	return