Copyright 2009 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
The wire protocol for HTTP's "chunked" Transfer-Encoding.
Package internal contains HTTP internals shared by net/http and net/http/httputil.
package internal

import (
	
	
	
	
	
)

const maxLineLength = 4096 // assumed <= bufio.defaultBufSize

var ErrLineTooLong = errors.New("header line too long")
NewChunkedReader returns a new chunkedReader that translates the data read from r out of HTTP "chunked" format before returning it. The chunkedReader returns io.EOF when the final 0-length chunk is read. NewChunkedReader is not needed by normal applications. The http package automatically decodes chunking when reading response bodies.
func ( io.Reader) io.Reader {
	,  := .(*bufio.Reader)
	if ! {
		 = bufio.NewReader()
	}
	return &chunkedReader{r: }
}

type chunkedReader struct {
	r        *bufio.Reader
	n        uint64 // unread bytes in chunk
	err      error
	buf      [2]byte
	checkEnd bool // whether need to check for \r\n chunk footer
}

chunk-size CRLF
	var  []byte
	, .err = readChunkLine(.r)
	if .err != nil {
		return
	}
	.n, .err = parseHexUint()
	if .err != nil {
		return
	}
	if .n == 0 {
		.err = io.EOF
	}
}

func ( *chunkedReader) () bool {
	 := .r.Buffered()
	if  > 0 {
		,  := .r.Peek()
		return bytes.IndexByte(, '\n') >= 0
	}
	return false
}

func ( *chunkedReader) ( []uint8) ( int,  error) {
	for .err == nil {
		if .checkEnd {
We have some data. Return early (per the io.Reader contract) instead of potentially blocking while reading more.
				break
			}
			if _, .err = io.ReadFull(.r, .buf[:2]); .err == nil {
				if string(.buf[:]) != "\r\n" {
					.err = errors.New("malformed chunked encoding")
					break
				}
			}
			.checkEnd = false
		}
		if .n == 0 {
We've read enough. Don't potentially block reading a new chunk header.
				break
			}
			.beginChunk()
			continue
		}
		if len() == 0 {
			break
		}
		 := 
		if uint64(len()) > .n {
			 = [:.n]
		}
		var  int
		, .err = .r.Read()
		 += 
		 = [:]
If we're at the end of a chunk, read the next two bytes to verify they are "\r\n".
		if .n == 0 && .err == nil {
			.checkEnd = true
		}
	}
	return , .err
}
Read a line of bytes (up to \n) from b. Give up if the line exceeds maxLineLength. The returned bytes are owned by the bufio.Reader so they are only valid until the next bufio read.
func ( *bufio.Reader) ([]byte, error) {
	,  := .ReadSlice('\n')
We always know when EOF is coming. If the caller asked for a line, there should be a line.
		if  == io.EOF {
			 = io.ErrUnexpectedEOF
		} else if  == bufio.ErrBufferFull {
			 = ErrLineTooLong
		}
		return nil, 
	}
	if len() >= maxLineLength {
		return nil, ErrLineTooLong
	}
	 = trimTrailingWhitespace()
	,  = removeChunkExtension()
	if  != nil {
		return nil, 
	}
	return , nil
}

func ( []byte) []byte {
	for len() > 0 && isASCIISpace([len()-1]) {
		 = [:len()-1]
	}
	return 
}

func ( byte) bool {
	return  == ' ' ||  == '\t' ||  == '\n' ||  == '\r'
}
removeChunkExtension removes any chunk-extension from p. For example, "0" => "0" "0;token" => "0" "0;token=val" => "0" `0;token="quoted string"` => "0"
func ( []byte) ([]byte, error) {
	 := bytes.IndexByte(, ';')
	if  == -1 {
		return , nil
TODO: care about exact syntax of chunk extensions? We're ignoring and stripping them anyway. For now just never return an error.
	return [:], nil
}
NewChunkedWriter returns a new chunkedWriter that translates writes into HTTP "chunked" format before writing them to w. Closing the returned chunkedWriter sends the final 0-length chunk that marks the end of the stream but does not send the final CRLF that appears after trailers; trailers and the last CRLF must be written separately. NewChunkedWriter is not needed by normal applications. The http package adds chunking automatically if handlers don't set a Content-Length header. Using newChunkedWriter inside a handler would result in double chunking or chunking with a Content-Length length, both of which are wrong.
Writing to chunkedWriter translates to writing in HTTP chunked Transfer Encoding wire format to the underlying Wire chunkedWriter.
type chunkedWriter struct {
	Wire io.Writer
}
Write the contents of data as one chunk to Wire. NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has a bug since it does not check for success of io.WriteString
func ( *chunkedWriter) ( []byte) ( int,  error) {
Don't send 0-length data. It looks like EOF for chunked encoding.
	if len() == 0 {
		return 0, nil
	}

	if _,  = fmt.Fprintf(.Wire, "%x\r\n", len());  != nil {
		return 0, 
	}
	if ,  = .Wire.Write();  != nil {
		return
	}
	if  != len() {
		 = io.ErrShortWrite
		return
	}
	if _,  = io.WriteString(.Wire, "\r\n");  != nil {
		return
	}
	if ,  := .Wire.(*FlushAfterChunkWriter);  {
		 = .Flush()
	}
	return
}

func ( *chunkedWriter) () error {
	,  := io.WriteString(.Wire, "0\r\n")
	return 
}
FlushAfterChunkWriter signals from the caller of NewChunkedWriter that each chunk should be followed by a flush. It is used by the http.Transport code to keep the buffering behavior for headers and trailers, but flush out chunks aggressively in the middle for request bodies which may be generated slowly. See Issue 6574.
type FlushAfterChunkWriter struct {
	*bufio.Writer
}

func ( []byte) ( uint64,  error) {
	for ,  := range  {
		switch {
		case '0' <=  &&  <= '9':
			 =  - '0'
		case 'a' <=  &&  <= 'f':
			 =  - 'a' + 10
		case 'A' <=  &&  <= 'F':
			 =  - 'A' + 10
		default:
			return 0, errors.New("invalid byte in chunk length")
		}
		if  == 16 {
			return 0, errors.New("http chunk length too large")
		}
		 <<= 4
		 |= uint64()
	}
	return