Copyright 2015 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
Transport code.

package http2

import (
	
	
	
	
	
	
	
	
	
	
	
	
	mathrand 
	
	
	
	
	
	
	
	
	
	

	
	
	
)

transportDefaultConnFlow is how many connection-level flow control tokens we give the server at start-up, past the default 64k.
transportDefaultStreamFlow is how many stream-level flow control tokens we announce to the peer, and how many bytes we buffer per stream.
transportDefaultStreamMinRefresh is the minimum number of bytes we'll send a stream-level WINDOW_UPDATE for at a time.
	transportDefaultStreamMinRefresh = 4 << 10

	defaultUserAgent = "Go-http-client/2.0"
)
Transport is an HTTP/2 Transport. A Transport internally caches connections to servers. It is safe for concurrent use by multiple goroutines.
DialTLS specifies an optional dial function for creating TLS connections for requests. If DialTLS is nil, tls.Dial is used. If the returned net.Conn has a ConnectionState method like tls.Conn, it will be used to set http.Response.TLS.
	DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error)
TLSClientConfig specifies the TLS configuration to use with tls.Client. If nil, the default configuration is used.
ConnPool optionally specifies an alternate connection pool to use. If nil, the default is used.
DisableCompression, if true, prevents the Transport from requesting compression with an "Accept-Encoding: gzip" request header when the Request contains no existing Accept-Encoding value. If the Transport requests gzip on its own and gets a gzipped response, it's transparently decoded in the Response.Body. However, if the user explicitly requested gzip it is not automatically uncompressed.
AllowHTTP, if true, permits HTTP/2 requests using the insecure, plain-text "http" scheme. Note that this does not enable h2c support.
MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to send in the initial settings frame. It is how many bytes of response headers are allowed. Unlike the http2 spec, zero here means to use a default limit (currently 10MB). If you actually want to advertise an unlimited value to the peer, Transport interprets the highest possible value here (0xffffffff or 1<<32-1) to mean no limit.
StrictMaxConcurrentStreams controls whether the server's SETTINGS_MAX_CONCURRENT_STREAMS should be respected globally. If false, new TCP connections are created to the server as needed to keep each under the per-connection SETTINGS_MAX_CONCURRENT_STREAMS limit. If true, the server's SETTINGS_MAX_CONCURRENT_STREAMS is interpreted as a global limit and callers of RoundTrip block when needed, waiting for their turn.
ReadIdleTimeout is the timeout after which a health check using ping frame will be carried out if no frame is received on the connection. Note that a ping response will is considered a received frame, so if there is no other traffic on the connection, the health check will be performed every ReadIdleTimeout interval. If zero, no health check is performed.
PingTimeout is the timeout after which the connection will be closed if a response to Ping is not received. Defaults to 15s.
t1, if non-nil, is the standard library Transport using this transport. Its settings are used (but not its RoundTrip method, etc).
	t1 *http.Transport

	connPoolOnce  sync.Once
	connPoolOrDef ClientConnPool // non-nil version of ConnPool
}

func ( *Transport) () uint32 {
	if .MaxHeaderListSize == 0 {
		return 10 << 20
	}
	if .MaxHeaderListSize == 0xffffffff {
		return 0
	}
	return .MaxHeaderListSize
}

func ( *Transport) () bool {
	return .DisableCompression || (.t1 != nil && .t1.DisableCompression)
}

func ( *Transport) () time.Duration {
	if .PingTimeout == 0 {
		return 15 * time.Second
	}
	return .PingTimeout

}
ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. It returns an error if t1 has already been HTTP/2-enabled. Use ConfigureTransports instead to configure the HTTP/2 Transport.
func ( *http.Transport) error {
	,  := ConfigureTransports()
	return 
}
ConfigureTransports configures a net/http HTTP/1 Transport to use HTTP/2. It returns a new HTTP/2 Transport for further configuration. It returns an error if t1 has already been HTTP/2-enabled.
func ( *http.Transport) (*Transport, error) {
	return configureTransports()
}

func ( *http.Transport) (*Transport, error) {
	 := new(clientConnPool)
	 := &Transport{
		ConnPool: noDialClientConnPool{},
		t1:       ,
	}
	.t = 
	if  := registerHTTPSProtocol(, noDialH2RoundTripper{});  != nil {
		return nil, 
	}
	if .TLSClientConfig == nil {
		.TLSClientConfig = new(tls.Config)
	}
	if !strSliceContains(.TLSClientConfig.NextProtos, "h2") {
		.TLSClientConfig.NextProtos = append([]string{"h2"}, .TLSClientConfig.NextProtos...)
	}
	if !strSliceContains(.TLSClientConfig.NextProtos, "http/1.1") {
		.TLSClientConfig.NextProtos = append(.TLSClientConfig.NextProtos, "http/1.1")
	}
	 := func( string,  *tls.Conn) http.RoundTripper {
		 := authorityAddr("https", )
		if ,  := .addConnIfNeeded(, , );  != nil {
			go .Close()
			return erringRoundTripper{}
Turns out we don't need this c. For example, two goroutines made requests to the same host at the same time, both kicking off TCP dials. (since protocol was unknown)
			go .Close()
		}
		return 
	}
	if  := .TLSNextProto; len() == 0 {
		.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{
			"h2": ,
		}
	} else {
		["h2"] = 
	}
	return , nil
}

func ( *Transport) () ClientConnPool {
	.connPoolOnce.Do(.initConnPool)
	return .connPoolOrDef
}

func ( *Transport) () {
	if .ConnPool != nil {
		.connPoolOrDef = .ConnPool
	} else {
		.connPoolOrDef = &clientConnPool{t: }
	}
}
ClientConn is the state of a single HTTP/2 client connection to an HTTP/2 server.
type ClientConn struct {
	t         *Transport
	tconn     net.Conn             // usually *tls.Conn, except specialized impls
	tlsState  *tls.ConnectionState // nil only for specialized impls
	reused    uint32               // whether conn is being reused; atomic
	singleUse bool                 // whether being used for a single http.Request
readLoop goroutine fields:
	readerDone chan struct{} // closed on error
	readerErr  error         // set before readerDone is closed

	idleTimeout time.Duration // or 0 for never
	idleTimer   *time.Timer

	mu              sync.Mutex // guards following
	cond            *sync.Cond // hold mu; broadcast on flow/closed changes
	flow            flow       // our conn-level flow control quota (cs.flow is per stream)
	inflow          flow       // peer's conn-level flow control
	closing         bool
	closed          bool
	wantSettingsAck bool                     // we sent a SETTINGS frame and haven't heard back
	goAway          *GoAwayFrame             // if non-nil, the GoAwayFrame we received
	goAwayDebug     string                   // goAway frame's debug data, retained as a string
	streams         map[uint32]*clientStream // client-initiated
	nextStreamID    uint32
	pendingRequests int                       // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams
	pings           map[[8]byte]chan struct{} // in flight ping data to notification channel
	bw              *bufio.Writer
	br              *bufio.Reader
	fr              *Framer
	lastActive      time.Time
Settings from peer: (also guarded by mu)
	maxFrameSize          uint32
	maxConcurrentStreams  uint32
	peerMaxHeaderListSize uint64
	initialWindowSize     uint32

	hbuf    bytes.Buffer // HPACK encoder writes into this
	henc    *hpack.Encoder
	freeBuf [][]byte

	wmu  sync.Mutex // held while writing; acquire AFTER mu if holding both
	werr error      // first write error that has occurred
}
clientStream is the state for a single HTTP/2 stream. One of these is created for each Transport.RoundTrip call.
type clientStream struct {
	cc            *ClientConn
	req           *http.Request
	trace         *httptrace.ClientTrace // or nil
	ID            uint32
	resc          chan resAndError
	bufPipe       pipe // buffered pipe with the flow-controlled response payload
	startedWrite  bool // started request body write; guarded by cc.mu
	requestedGzip bool
	on100         func() // optional code to run if get a 100 continue response

	flow        flow  // guarded by cc.mu
	inflow      flow  // guarded by cc.mu
	bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read
	readErr     error // sticky read error; owned by transportResponseBody.Read
	stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu
	didReset    bool  // whether we sent a RST_STREAM to the server; guarded by cc.mu

	peerReset chan struct{} // closed on peer reset
	resetErr  error         // populated before peerReset is closed

	done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu
owned by clientConnReadLoop:
	firstByte    bool  // got the first response byte
	pastHeaders  bool  // got first MetaHeadersFrame (actual headers)
	pastTrailers bool  // got optional second MetaHeadersFrame (trailers)
	num1xx       uint8 // number of 1xx responses seen

	trailer    http.Header  // accumulated trailers
	resTrailer *http.Header // client's Response.Trailer
}
awaitRequestCancel waits for the user to cancel a request or for the done channel to be signaled. A non-nil error is returned only if the request was canceled.
func ( *http.Request,  <-chan struct{}) error {
	 := .Context()
	if .Cancel == nil && .Done() == nil {
		return nil
	}
	select {
	case <-.Cancel:
		return errRequestCanceled
	case <-.Done():
		return .Err()
	case <-:
		return nil
	}
}

var got1xxFuncForTests func(int, textproto.MIMEHeader) error
get1xxTraceFunc returns the value of request's httptrace.ClientTrace.Got1xxResponse func, if any. It returns nil if not set or if the Go version is too old.
func ( *clientStream) () func(int, textproto.MIMEHeader) error {
	if  := got1xxFuncForTests;  != nil {
		return 
	}
	return traceGot1xxResponseFunc(.trace)
}
awaitRequestCancel waits for the user to cancel a request, its context to expire, or for the request to be done (any way it might be removed from the cc.streams map: peer reset, successful completion, TCP connection breakage, etc). If the request is canceled, then cs will be canceled and closed.
func ( *clientStream) ( *http.Request) {
	if  := awaitRequestCancel(, .done);  != nil {
		.cancelStream()
		.bufPipe.CloseWithError()
	}
}

func ( *clientStream) () {
	 := .cc
	.mu.Lock()
	 := .didReset
	.didReset = true
	.mu.Unlock()

	if ! {
		.writeStreamReset(.ID, ErrCodeCancel, nil)
		.forgetStreamID(.ID)
	}
}
checkResetOrDone reports any error sent in a RST_STREAM frame by the server, or errStreamClosed if the stream is complete.
func ( *clientStream) () error {
	select {
	case <-.peerReset:
		return .resetErr
	case <-.done:
		return errStreamClosed
	default:
		return nil
	}
}

func ( *clientStream) () bool {
	 := .cc
	.mu.Lock()
	defer .mu.Unlock()
	return .startedWrite
}

func ( *clientStream) ( error) {
	if  == nil {
		panic("nil error")
	}
	 := .cc
	.mu.Lock()
	.stopReqBody = 
	.cond.Broadcast()
	.mu.Unlock()
}

type stickyErrWriter struct {
	w   io.Writer
	err *error
}

func ( stickyErrWriter) ( []byte) ( int,  error) {
	if *.err != nil {
		return 0, *.err
	}
	,  = .w.Write()
	*.err = 
	return
}
noCachedConnError is the concrete type of ErrNoCachedConn, which needs to be detected by net/http regardless of whether it's its bundled version (in h2_bundle.go with a rewritten type name) or from a user's x/net/http2. As such, as it has a unique method name (IsHTTP2NoCachedConnError) that net/http sniffs for via func isNoCachedConnError.
type noCachedConnError struct{}

func (noCachedConnError) () {}
func (noCachedConnError) () string             { return "http2: no cached connection was available" }
isNoCachedConnError reports whether err is of type noCachedConnError or its equivalent renamed type in net/http2's h2_bundle.go. Both types may coexist in the same running program.
func ( error) bool {
	,  := .(interface{ () })
	return 
}

var ErrNoCachedConn error = noCachedConnError{}
RoundTripOpt are options for the Transport.RoundTripOpt method.
OnlyCachedConn controls whether RoundTripOpt may create a new TCP connection. If set true and no cached connection is available, RoundTripOpt will return ErrNoCachedConn.
authorityAddr returns a given authority (a host/IP, or host:port / ip:port) and returns a host:port. The port 443 is added if needed.
func ( string,  string) ( string) {
	, ,  := net.SplitHostPort()
	if  != nil { // authority didn't have a port
		 = "443"
		if  == "http" {
			 = "80"
		}
		 = 
	}
	if ,  := idna.ToASCII();  == nil {
		 = 
IPv6 address literal, without a port:
	if strings.HasPrefix(, "[") && strings.HasSuffix(, "]") {
		return  + ":" + 
	}
	return net.JoinHostPort(, )
}
RoundTripOpt is like RoundTrip, but takes options.
func ( *Transport) ( *http.Request,  RoundTripOpt) (*http.Response, error) {
	if !(.URL.Scheme == "https" || (.URL.Scheme == "http" && .AllowHTTP)) {
		return nil, errors.New("http2: unsupported scheme")
	}

	 := authorityAddr(.URL.Scheme, .URL.Host)
	for  := 0; ; ++ {
		,  := .connPool().GetClientConn(, )
		if  != nil {
			.vlogf("http2: Transport failed to get client conn for %s: %v", , )
			return nil, 
		}
		 := !atomic.CompareAndSwapUint32(&.reused, 0, 1)
		traceGotConn(, , )
		, ,  := .roundTrip()
		if  != nil &&  <= 6 {
After the first retry, do exponential backoff with 10% jitter.
				if  == 0 {
					continue
				}
				 := float64(uint(1) << (uint() - 1))
				 +=  * (0.1 * mathrand.Float64())
				select {
				case <-time.After(time.Second * time.Duration()):
					continue
				case <-.Context().Done():
					return nil, .Context().Err()
				}
			}
		}
		if  != nil {
			.vlogf("RoundTrip failure: %v", )
			return nil, 
		}
		return , nil
	}
}
CloseIdleConnections closes any connections which were previously connected from previous requests but are now sitting idle. It does not interrupt any connections currently in use.
func ( *Transport) () {
	if ,  := .connPool().(clientConnPoolIdleCloser);  {
		.closeIdleConnections()
	}
}

var (
	errClientConnClosed    = errors.New("http2: client conn is closed")
	errClientConnUnusable  = errors.New("http2: client conn not usable")
	errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
)
shouldRetryRequest is called by RoundTrip when a request fails to get response headers. It is always called with a non-nil error. It returns either a request to retry (either the same request, or a modified clone), or an error if the request can't be replayed.
func ( *http.Request,  error,  bool) (*http.Request, error) {
	if !canRetryError() {
		return nil, 
If the Body is nil (or http.NoBody), it's safe to reuse this request and its Body.
	if .Body == nil || .Body == http.NoBody {
		return , nil
	}
If the request body can be reset back to its original state via the optional req.GetBody, do that.
TODO: consider a req.Body.Close here? or audit that all caller paths do?
		,  := .GetBody()
		if  != nil {
			return nil, 
		}
		 := *
		.Body = 
		return &, nil
	}
The Request.Body can't reset back to the beginning, but we don't seem to have started to read from it yet, so reuse the request directly. The "afterBodyWrite" means the bodyWrite process has started, which becomes true before the first Read.
	if ! {
		return , nil
	}

	return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", )
}

func ( error) bool {
	if  == errClientConnUnusable ||  == errClientConnGotGoAway {
		return true
	}
	if ,  := .(StreamError);  {
		return .Code == ErrCodeRefusedStream
	}
	return false
}

func ( *Transport) ( string,  bool) (*ClientConn, error) {
	, ,  := net.SplitHostPort()
	if  != nil {
		return nil, 
	}
	,  := .dialTLS()("tcp", , .newTLSConfig())
	if  != nil {
		return nil, 
	}
	return .newClientConn(, )
}

func ( *Transport) ( string) *tls.Config {
	 := new(tls.Config)
	if .TLSClientConfig != nil {
		* = *.TLSClientConfig.Clone()
	}
	if !strSliceContains(.NextProtos, NextProtoTLS) {
		.NextProtos = append([]string{NextProtoTLS}, .NextProtos...)
	}
	if .ServerName == "" {
		.ServerName = 
	}
	return 
}

func ( *Transport) () func(string, string, *tls.Config) (net.Conn, error) {
	if .DialTLS != nil {
		return .DialTLS
	}
	return .dialTLSDefault
}

func ( *Transport) (,  string,  *tls.Config) (net.Conn, error) {
	,  := tls.Dial(, , )
	if  != nil {
		return nil, 
	}
	if  := .Handshake();  != nil {
		return nil, 
	}
	if !.InsecureSkipVerify {
		if  := .VerifyHostname(.ServerName);  != nil {
			return nil, 
		}
	}
	 := .ConnectionState()
	if  := .NegotiatedProtocol;  != NextProtoTLS {
		return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", , NextProtoTLS)
	}
	if !.NegotiatedProtocolIsMutual {
		return nil, errors.New("http2: could not negotiate protocol mutually")
	}
	return , nil
}
disableKeepAlives reports whether connections should be closed as soon as possible after handling the first request.
func ( *Transport) () bool {
	return .t1 != nil && .t1.DisableKeepAlives
}

func ( *Transport) () time.Duration {
	if .t1 == nil {
		return 0
	}
	return .t1.ExpectContinueTimeout
}

func ( *Transport) ( net.Conn) (*ClientConn, error) {
	return .newClientConn(, .disableKeepAlives())
}

func ( *Transport) ( net.Conn,  bool) (*ClientConn, error) {
	 := &ClientConn{
		t:                     ,
		tconn:                 ,
		readerDone:            make(chan struct{}),
		nextStreamID:          1,
		maxFrameSize:          16 << 10,           // spec default
		initialWindowSize:     65535,              // spec default
		maxConcurrentStreams:  1000,               // "infinite", per spec. 1000 seems good enough.
		peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
		streams:               make(map[uint32]*clientStream),
		singleUse:             ,
		wantSettingsAck:       true,
		pings:                 make(map[[8]byte]chan struct{}),
	}
	if  := .idleConnTimeout();  != 0 {
		.idleTimeout = 
		.idleTimer = time.AfterFunc(, .onIdleTimeout)
	}
	if VerboseLogs {
		.vlogf("http2: Transport creating client conn %p to %v", , .RemoteAddr())
	}

	.cond = sync.NewCond(&.mu)
	.flow.add(int32(initialWindowSize))
TODO: adjust this writer size to account for frame size + MTU + crypto/tls record padding.
TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on henc in response to SETTINGS frames?
	.henc = hpack.NewEncoder(&.hbuf)

	if .AllowHTTP {
		.nextStreamID = 3
	}

	if ,  := .(connectionStater);  {
		 := .ConnectionState()
		.tlsState = &
	}

	 := []Setting{
		{ID: SettingEnablePush, Val: 0},
		{ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow},
	}
	if  := .maxHeaderListSize();  != 0 {
		 = append(, Setting{ID: SettingMaxHeaderListSize, Val: })
	}

	.bw.Write(clientPreface)
	.fr.WriteSettings(...)
	.fr.WriteWindowUpdate(0, transportDefaultConnFlow)
	.inflow.add(transportDefaultConnFlow + initialWindowSize)
	.bw.Flush()
	if .werr != nil {
		.Close()
		return nil, .werr
	}

	go .readLoop()
	return , nil
}

func ( *ClientConn) () {
We don't need to periodically ping in the health check, because the readLoop of ClientConn will trigger the healthCheck again if there is no frame received.
	,  := context.WithTimeout(context.Background(), )
	defer ()
	 := .Ping()
	if  != nil {
		.closeForLostPing()
		.t.connPool().MarkDead()
		return
	}
}

func ( *ClientConn) ( *GoAwayFrame) {
	.mu.Lock()
	defer .mu.Unlock()

	 := .goAway
	.goAway = 
Merge the previous and current GoAway error frames.
	if .goAwayDebug == "" {
		.goAwayDebug = string(.DebugData())
	}
	if  != nil && .ErrCode != ErrCodeNo {
		.goAway.ErrCode = .ErrCode
	}
	 := .LastStreamID
	for ,  := range .streams {
		if  >  {
			select {
			case .resc <- resAndError{err: errClientConnGotGoAway}:
			default:
			}
		}
	}
}
CanTakeNewRequest reports whether the connection can take a new request, meaning it has not been closed or received or sent a GOAWAY.
func ( *ClientConn) () bool {
	.mu.Lock()
	defer .mu.Unlock()
	return .canTakeNewRequestLocked()
}
clientConnIdleState describes the suitability of a client connection to initiate a new RoundTrip request.
type clientConnIdleState struct {
	canTakeNewRequest bool
	freshConn         bool // whether it's unused by any previous request
}

func ( *ClientConn) () clientConnIdleState {
	.mu.Lock()
	defer .mu.Unlock()
	return .idleStateLocked()
}

func ( *ClientConn) () ( clientConnIdleState) {
	if .singleUse && .nextStreamID > 1 {
		return
	}
	var  bool
We'll tell the caller we can take a new request to prevent the caller from dialing a new TCP connection, but then we'll block later before writing it.
		 = true
	} else {
		 = int64(len(.streams)+1) < int64(.maxConcurrentStreams)
	}

	.canTakeNewRequest = .goAway == nil && !.closed && !.closing &&  &&
		int64(.nextStreamID)+2*int64(.pendingRequests) < math.MaxInt32 &&
		!.tooIdleLocked()
	.freshConn = .nextStreamID == 1 && .canTakeNewRequest
	return
}

func ( *ClientConn) () bool {
	 := .idleStateLocked()
	return .canTakeNewRequest
}
tooIdleLocked reports whether this connection has been been sitting idle for too much wall time.
The Round(0) strips the monontonic clock reading so the times are compared based on their wall time. We don't want to reuse a connection that's been sitting idle during VM/laptop suspend if monotonic time was also frozen.
	return .idleTimeout != 0 && !.lastIdle.IsZero() && time.Since(.lastIdle.Round(0)) > .idleTimeout
}
onIdleTimeout is called from a time.AfterFunc goroutine. It will only be called when we're idle, but because we're coming from a new goroutine, there could be a new request coming in at the same time, so this simply calls the synchronized closeIfIdle to shut down this connection. The timer could just call closeIfIdle, but this is more clear.
func ( *ClientConn) () {
	.closeIfIdle()
}

func ( *ClientConn) () {
	.mu.Lock()
	if len(.streams) > 0 {
		.mu.Unlock()
		return
	}
	.closed = true
TODO: do clients send GOAWAY too? maybe? Just Close:
	.mu.Unlock()

	if VerboseLogs {
		.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", , .singleUse, -2)
	}
	.tconn.Close()
}

var shutdownEnterWaitStateHook = func() {}
Shutdown gracefully close the client connection, waiting for running streams to complete.
func ( *ClientConn) ( context.Context) error {
	if  := .sendGoAway();  != nil {
		return 
Wait for all in-flight streams to complete or connection to close
	 := make(chan error, 1)
	 := false // guarded by cc.mu
	go func() {
		.mu.Lock()
		defer .mu.Unlock()
		for {
			if len(.streams) == 0 || .closed {
				.closed = true
				 <- .tconn.Close()
				break
			}
			if  {
				break
			}
			.cond.Wait()
		}
	}()
	shutdownEnterWaitStateHook()
	select {
	case  := <-:
		return 
	case <-.Done():
Free the goroutine above
		 = true
		.cond.Broadcast()
		.mu.Unlock()
		return .Err()
	}
}

func ( *ClientConn) () error {
	.mu.Lock()
	defer .mu.Unlock()
	.wmu.Lock()
	defer .wmu.Unlock()
GOAWAY sent already
		return nil
Send a graceful shutdown frame to server
	 := .nextStreamID
	if  := .fr.WriteGoAway(, ErrCodeNo, nil);  != nil {
		return 
	}
	if  := .bw.Flush();  != nil {
		return 
Prevent new requests
	.closing = true
	return nil
}
closes the client connection immediately. In-flight requests are interrupted. err is sent to streams.
func ( *ClientConn) ( error) error {
	.mu.Lock()
	defer .cond.Broadcast()
	defer .mu.Unlock()
	for ,  := range .streams {
		select {
		case .resc <- resAndError{err: }:
		default:
		}
		.bufPipe.CloseWithError()
		delete(.streams, )
	}
	.closed = true
	return .tconn.Close()
}
Close closes the client connection immediately. In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead.
func ( *ClientConn) () error {
	 := errors.New("http2: client connection force closed via ClientConn.Close")
	return .closeForError()
}
closes the client connection immediately. In-flight requests are interrupted.
func ( *ClientConn) () error {
	 := errors.New("http2: client connection lost")
	return .closeForError()
}

const maxAllocFrameSize = 512 << 10
frameBuffer returns a scratch buffer suitable for writing DATA frames. They're capped at the min of the peer's max frame size or 512KB (kinda arbitrarily), but definitely capped so we don't allocate 4GB bufers.
func ( *ClientConn) () []byte {
	.mu.Lock()
	 := .maxFrameSize
	if  > maxAllocFrameSize {
		 = maxAllocFrameSize
	}
	for ,  := range .freeBuf {
		if len() >= int() {
			.freeBuf[] = nil
			.mu.Unlock()
			return [:]
		}
	}
	.mu.Unlock()
	return make([]byte, )
}

func ( *ClientConn) ( []byte) {
	.mu.Lock()
	defer .mu.Unlock()
	const  = 4 // arbitrary; 4 concurrent requests per conn? investigate.
	if len(.freeBuf) <  {
		.freeBuf = append(.freeBuf, )
		return
	}
	for ,  := range .freeBuf {
		if  == nil {
			.freeBuf[] = 
			return
		}
forget about it.
}
errRequestCanceled is a copy of net/http's errRequestCanceled because it's not exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests.
var errRequestCanceled = errors.New("net/http: request canceled")

func ( *http.Request) (string, error) {
	 := make([]string, 0, len(.Trailer))
	for  := range .Trailer {
		 = http.CanonicalHeaderKey()
		switch  {
		case "Transfer-Encoding", "Trailer", "Content-Length":
			return "", fmt.Errorf("invalid Trailer key %q", )
		}
		 = append(, )
	}
	if len() > 0 {
		sort.Strings()
		return strings.Join(, ","), nil
	}
	return "", nil
}

func ( *ClientConn) () time.Duration {
	if .t.t1 != nil {
		return .t.t1.ResponseHeaderTimeout
No way to do this (yet?) with just an http2.Transport. Probably no need. Request.Cancel this is the new way. We only need to support this for compatibility with the old http.Transport fields when we're doing transparent http2.
	return 0
}
checkConnHeaders checks whether req has any invalid connection-level headers. per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. Certain headers are special-cased as okay but not transmitted later.
func ( *http.Request) error {
	if  := .Header.Get("Upgrade");  != "" {
		return fmt.Errorf("http2: invalid Upgrade request header: %q", .Header["Upgrade"])
	}
	if  := .Header["Transfer-Encoding"]; len() > 0 && (len() > 1 || [0] != "" && [0] != "chunked") {
		return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", )
	}
	if  := .Header["Connection"]; len() > 0 && (len() > 1 || [0] != "" && !strings.EqualFold([0], "close") && !strings.EqualFold([0], "keep-alive")) {
		return fmt.Errorf("http2: invalid Connection request header: %q", )
	}
	return nil
}
actualContentLength returns a sanitized version of req.ContentLength, where 0 actually means zero (not unknown) and -1 means unknown.
func ( *http.Request) int64 {
	if .Body == nil || .Body == http.NoBody {
		return 0
	}
	if .ContentLength != 0 {
		return .ContentLength
	}
	return -1
}

func ( *ClientConn) ( *http.Request) (*http.Response, error) {
	, ,  := .roundTrip()
	return , 
}

func ( *ClientConn) ( *http.Request) ( *http.Response,  bool,  error) {
	if  := checkConnHeaders();  != nil {
		return nil, false, 
	}
	if .idleTimer != nil {
		.idleTimer.Stop()
	}

	,  := commaSeparatedTrailers()
	if  != nil {
		return nil, false, 
	}
	 :=  != ""

	.mu.Lock()
	if  := .awaitOpenSlotForRequest();  != nil {
		.mu.Unlock()
		return nil, false, 
	}

	 := .Body
	 := actualContentLength()
	 :=  != 0
TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
	var  bool
	if !.t.disableCompression() &&
		.Header.Get("Accept-Encoding") == "" &&
		.Header.Get("Range") == "" &&
Request gzip only, not deflate. Deflate is ambiguous and not as universally supported anyway. See: https://zlib.net/zlib_faq.html#faq39 Note that we don't request this for HEAD requests, due to a bug in nginx: http://trac.nginx.org/nginx/ticket/358 https://golang.org/issue/5522 We don't request gzip if the request is for a range, since auto-decoding a portion of a gzipped document will just fail anyway. See https://golang.org/issue/8923
		 = true
	}
we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is sent by writeRequestBody below, along with any Trailers, again in form HEADERS{1}, CONTINUATION{0,})
	,  := .encodeHeaders(, , , )
	if  != nil {
		.mu.Unlock()
		return nil, false, 
	}

	 := .newStream()
	.req = 
	.trace = httptrace.ContextClientTrace(.Context())
	.requestedGzip = 
	 := .t.getBodyWriterState(, )
	.on100 = .on100

	defer func() {
		.wmu.Lock()
		 := .werr
		.wmu.Unlock()
		if  != nil {
			.Close()
		}
	}()

	.wmu.Lock()
	 := ! && !
	 := .writeHeaders(.ID, , int(.maxFrameSize), )
	.wmu.Unlock()
	traceWroteHeaders(.trace)
	.mu.Unlock()

	if  != nil {
		if  {
			.Body.Close() // per RoundTripper contract
			.cancel()
		}
Don't bother sending a RST_STREAM (our write already failed; no need to keep writing)
		traceWroteRequest(.trace, )
		return nil, false, 
	}

	var  <-chan time.Time
	if  {
		.scheduleBodyWrite()
	} else {
		traceWroteRequest(.trace, nil)
		if  := .responseHeaderTimeout();  != 0 {
			 := time.NewTimer()
			defer .Stop()
			 = .C
		}
	}

	 := .resc
	 := false
	 := .Context()

	 := func( resAndError) (*http.Response, bool, error) {
		 := .res
On error or status code 3xx, 4xx, 5xx, etc abort any ongoing write, assuming that the server doesn't care about our request body. If the server replied with 1xx or 2xx, however, then assume the server DOES potentially want our body (e.g. full-duplex streaming: golang.org/issue/13444). If it turns out the server doesn't, they'll RST_STREAM us soon enough. This is a heuristic to avoid adding knobs to Transport. Hopefully we can keep it.
			.cancel()
			.abortRequestBodyWrite(errStopReqBodyWrite)
			if  && ! {
				<-.resc
			}
		}
		if .err != nil {
			.forgetStreamID(.ID)
			return nil, .getStartedWrite(), .err
		}
		.Request = 
		.TLS = .tlsState
		return , false, nil
	}

	for {
		select {
		case  := <-:
			return ()
		case <-:
			if ! ||  {
				.writeStreamReset(.ID, ErrCodeCancel, nil)
			} else {
				.cancel()
				.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
				<-.resc
			}
			.forgetStreamID(.ID)
			return nil, .getStartedWrite(), errTimeout
		case <-.Done():
			if ! ||  {
				.writeStreamReset(.ID, ErrCodeCancel, nil)
			} else {
				.cancel()
				.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
				<-.resc
			}
			.forgetStreamID(.ID)
			return nil, .getStartedWrite(), .Err()
		case <-.Cancel:
			if ! ||  {
				.writeStreamReset(.ID, ErrCodeCancel, nil)
			} else {
				.cancel()
				.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
				<-.resc
			}
			.forgetStreamID(.ID)
			return nil, .getStartedWrite(), errRequestCanceled
processResetStream already removed the stream from the streams map; no need for forgetStreamID.
			return nil, .getStartedWrite(), .resetErr
		case  := <-.resc:
Prefer the read loop's response, if available. Issue 16102.
			select {
			case  := <-:
				return ()
			default:
			}
			if  != nil {
				.forgetStreamID(.ID)
				return nil, .getStartedWrite(), 
			}
			if  := .responseHeaderTimeout();  != 0 {
				 := time.NewTimer()
				defer .Stop()
				 = .C
			}
		}
	}
}
awaitOpenSlotForRequest waits until len(streams) < maxConcurrentStreams. Must hold cc.mu.
func ( *ClientConn) ( *http.Request) error {
	var  chan struct{}
	var  error // guarded by cc.mu
	for {
		.lastActive = time.Now()
		if .closed || !.canTakeNewRequestLocked() {
			if  != nil {
				close()
			}
			return errClientConnUnusable
		}
		.lastIdle = time.Time{}
		if int64(len(.streams))+1 <= int64(.maxConcurrentStreams) {
			if  != nil {
				close()
			}
			return nil
Unfortunately, we cannot wait on a condition variable and channel at the same time, so instead, we spin up a goroutine to check if the request is canceled while we wait for a slot to open in the connection.
		if  == nil {
			 = make(chan struct{})
			go func() {
				if  := awaitRequestCancel(, );  != nil {
					.mu.Lock()
					 = 
					.cond.Broadcast()
					.mu.Unlock()
				}
			}()
		}
		.pendingRequests++
		.cond.Wait()
		.pendingRequests--
		if  != nil {
			return 
		}
	}
}
requires cc.wmu be held
func ( *ClientConn) ( uint32,  bool,  int,  []byte) error {
	 := true // first frame written (HEADERS is first, then CONTINUATION)
	for len() > 0 && .werr == nil {
		 := 
		if len() >  {
			 = [:]
		}
		 = [len():]
		 := len() == 0
		if  {
			.fr.WriteHeaders(HeadersFrameParam{
				StreamID:      ,
				BlockFragment: ,
				EndStream:     ,
				EndHeaders:    ,
			})
			 = false
		} else {
			.fr.WriteContinuation(, , )
		}
TODO(bradfitz): this Flush could potentially block (as could the WriteHeaders call(s) above), which means they wouldn't respond to Request.Cancel being readable. That's rare, but this should probably be in a goroutine.
	.bw.Flush()
	return .werr
}
internal error values; they don't escape to callers
abort request body write; don't send cancel
	errStopReqBodyWrite = errors.New("http2: aborting request body write")
abort request body write, but send stream reset of cancel.
	errStopReqBodyWriteAndCancel = errors.New("http2: canceling request")

	errReqBodyTooLong = errors.New("http2: request body larger than specified content length")
)

func ( *clientStream) ( io.Reader,  io.Closer) ( error) {
	 := .cc
	 := false // whether we sent the final DATA frame w/ END_STREAM
	 := .frameScratchBuffer()
	defer .putFrameScratchBuffer()

	defer func() {
TODO: write h12Compare test showing whether Request.Body is closed by the Transport, and in multiple cases: server replies <=299 and >299 while still writing request body
		 := .Close()
		if  == nil {
			 = 
		}
	}()

	 := .req
	 := .Trailer != nil
	 := actualContentLength()
	 :=  != -1

	var  bool
	for ! {
		,  := .Read([:len()-1])
		if  {
			 -= int64()
The request body's Content-Length was predeclared and we just finished reading it all, but the underlying io.Reader returned the final chunk with a nil error (which is one of the two valid things a Reader can do at EOF). Because we'd prefer to send the END_STREAM bit early, double-check that we're actually at EOF. Subsequent reads should return (0, EOF) at this point. If either value is different, we return an error in one of two ways below.
				var  int
				,  = .Read([:])
				 -= int64()
			}
			if  < 0 {
				 = errReqBodyTooLong
				.writeStreamReset(.ID, ErrCodeCancel, )
				return 
			}
		}
		if  == io.EOF {
			 = true
			 = nil
		} else if  != nil {
			.writeStreamReset(.ID, ErrCodeCancel, )
			return 
		}

		 := [:]
		for len() > 0 &&  == nil {
			var  int32
			,  = .awaitFlowControl(len())
			switch {
			case  == errStopReqBodyWrite:
				return 
			case  == errStopReqBodyWriteAndCancel:
				.writeStreamReset(.ID, ErrCodeCancel, nil)
				return 
			case  != nil:
				return 
			}
			.wmu.Lock()
			 := [:]
			 = [:]
			 =  && len() == 0 && !
			 = .fr.WriteData(.ID, , )
TODO(bradfitz): this flush is for latency, not bandwidth. Most requests won't need this. Make this opt-in or opt-out? Use some heuristic on the body type? Nagel-like timers? Based on 'n'? Only last chunk of this for loop, unless flow control tokens are low? For now, always. If we change this, see comment below.
				 = .bw.Flush()
			}
			.wmu.Unlock()
		}
		if  != nil {
			return 
		}
	}

Already sent END_STREAM (which implies we have no trailers) and flushed, because currently all WriteData frames above get a flush. So we're done.
		return nil
	}

	var  []byte
	if  {
		.mu.Lock()
		,  = .encodeTrailers()
		.mu.Unlock()
		if  != nil {
			.writeStreamReset(.ID, ErrCodeInternal, )
			.forgetStreamID(.ID)
			return 
		}
	}

	.mu.Lock()
	 := int(.maxFrameSize)
	.mu.Unlock()

	.wmu.Lock()
	defer .wmu.Unlock()
Two ways to send END_STREAM: either with trailers, or with an empty DATA frame.
	if len() > 0 {
		 = .writeHeaders(.ID, true, , )
	} else {
		 = .fr.WriteData(.ID, true, nil)
	}
	if  := .bw.Flush();  != nil &&  == nil {
		 = 
	}
	return 
}
awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow control tokens from the server. It returns either the non-zero number of tokens taken or an error if the stream is dead.
func ( *clientStream) ( int) ( int32,  error) {
	 := .cc
	.mu.Lock()
	defer .mu.Unlock()
	for {
		if .closed {
			return 0, errClientConnClosed
		}
		if .stopReqBody != nil {
			return 0, .stopReqBody
		}
		if  := .checkResetOrDone();  != nil {
			return 0, 
		}
		if  := .flow.available();  > 0 {
			 := 
			if int() >  {

				 = int32() // can't truncate int; take is int32
			}
			if  > int32(.maxFrameSize) {
				 = int32(.maxFrameSize)
			}
			.flow.take()
			return , nil
		}
		.cond.Wait()
	}
}
requires cc.mu be held.
func ( *ClientConn) ( *http.Request,  bool,  string,  int64) ([]byte, error) {
	.hbuf.Reset()

	 := .Host
	if  == "" {
		 = .URL.Host
	}
	,  := httpguts.PunycodeHostPort()
	if  != nil {
		return nil, 
	}

	var  string
	if .Method != "CONNECT" {
		 = .URL.RequestURI()
		if !validPseudoPath() {
			 := 
			 = strings.TrimPrefix(, .URL.Scheme+"://"+)
			if !validPseudoPath() {
				if .URL.Opaque != "" {
					return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", , .URL.Opaque)
				} else {
					return nil, fmt.Errorf("invalid request :path %q", )
				}
			}
		}
	}
Check for any invalid headers and return an error before we potentially pollute our hpack state. (We want to be able to continue to reuse the hpack encoder for future requests)
	for ,  := range .Header {
		if !httpguts.ValidHeaderFieldName() {
			return nil, fmt.Errorf("invalid HTTP header name %q", )
		}
		for ,  := range  {
			if !httpguts.ValidHeaderFieldValue() {
				return nil, fmt.Errorf("invalid HTTP header value %q for header %q", , )
			}
		}
	}

8.1.2.3 Request Pseudo-Header Fields The :path pseudo-header field includes the path and query parts of the target URI (the path-absolute production and optionally a '?' character followed by the query production (see Sections 3.3 and 3.4 of [RFC3986]).
		(":authority", )
		 := .Method
		if  == "" {
			 = http.MethodGet
		}
		(":method", )
		if .Method != "CONNECT" {
			(":path", )
			(":scheme", .URL.Scheme)
		}
		if  != "" {
			("trailer", )
		}

		var  bool
		for ,  := range .Header {
Host is :authority, already sent. Content-Length is automatic, set below.
				continue
			} else if strings.EqualFold(, "connection") || strings.EqualFold(, "proxy-connection") ||
				strings.EqualFold(, "transfer-encoding") || strings.EqualFold(, "upgrade") ||
Per 8.1.2.2 Connection-Specific Header Fields, don't send connection-specific fields. We have already checked if any are error-worthy so just ignore the rest.
				continue
Match Go's http1 behavior: at most one User-Agent. If set to nil or empty string, then omit it. Otherwise if not mentioned, include the default (below).
				 = true
				if len() < 1 {
					continue
				}
				 = [:1]
				if [0] == "" {
					continue
				}
Per 8.1.2.5 To allow for better compression efficiency, the Cookie header field MAY be split into separate header fields, each with one or more cookie-pairs.
				for ,  := range  {
					for {
						 := strings.IndexByte(, ';')
						if  < 0 {
							break
						}
						("cookie", [:])
strip space after semicolon if any.
						for +1 <= len() && [] == ' ' {
							++
						}
						 = [:]
					}
					if len() > 0 {
						("cookie", )
					}
				}
				continue
			}

			for ,  := range  {
				(, )
			}
		}
		if shouldSendReqContentLength(.Method, ) {
			("content-length", strconv.FormatInt(, 10))
		}
		if  {
			("accept-encoding", "gzip")
		}
		if ! {
			("user-agent", defaultUserAgent)
		}
	}
Do a first pass over the headers counting bytes to ensure we don't exceed cc.peerMaxHeaderListSize. This is done as a separate pass before encoding the headers to prevent modifying the hpack state.
	 := uint64(0)
	(func(,  string) {
		 := hpack.HeaderField{Name: , Value: }
		 += uint64(.Size())
	})

	if  > .peerMaxHeaderListSize {
		return nil, errRequestHeaderListSize
	}

	 := httptrace.ContextClientTrace(.Context())
	 := traceHasWroteHeaderField()
Header list size is ok. Write the headers.
	(func(,  string) {
		 = strings.ToLower()
		.writeHeader(, )
		if  {
			traceWroteHeaderField(, , )
		}
	})

	return .hbuf.Bytes(), nil
}
shouldSendReqContentLength reports whether the http2.Transport should send a "content-length" request header. This logic is basically a copy of the net/http transferWriter.shouldSendContentLength. The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). -1 means unknown.
func ( string,  int64) bool {
	if  > 0 {
		return true
	}
	if  < 0 {
		return false
For zero bodies, whether we send a content-length depends on the method. It also kinda doesn't matter for http2 either way, with END_STREAM.
	switch  {
	case "POST", "PUT", "PATCH":
		return true
	default:
		return false
	}
}
requires cc.mu be held.
func ( *ClientConn) ( *http.Request) ([]byte, error) {
	.hbuf.Reset()

	 := uint64(0)
	for ,  := range .Trailer {
		for ,  := range  {
			 := hpack.HeaderField{Name: , Value: }
			 += uint64(.Size())
		}
	}
	if  > .peerMaxHeaderListSize {
		return nil, errRequestHeaderListSize
	}

Transfer-Encoding, etc.. have already been filtered at the start of RoundTrip
		 := strings.ToLower()
		for ,  := range  {
			.writeHeader(, )
		}
	}
	return .hbuf.Bytes(), nil
}

func ( *ClientConn) (,  string) {
	if VerboseLogs {
		log.Printf("http2: Transport encoding header %q = %q", , )
	}
	.henc.WriteField(hpack.HeaderField{Name: , Value: })
}

type resAndError struct {
	_   incomparable
	res *http.Response
	err error
}
requires cc.mu be held.
func ( *ClientConn) () *clientStream {
	 := &clientStream{
		cc:        ,
		ID:        .nextStreamID,
		resc:      make(chan resAndError, 1),
		peerReset: make(chan struct{}),
		done:      make(chan struct{}),
	}
	.flow.add(int32(.initialWindowSize))
	.flow.setConnFlow(&.flow)
	.inflow.add(transportDefaultStreamFlow)
	.inflow.setConnFlow(&.inflow)
	.nextStreamID += 2
	.streams[.ID] = 
	return 
}

func ( *ClientConn) ( uint32) {
	.streamByID(, true)
}

func ( *ClientConn) ( uint32,  bool) *clientStream {
	.mu.Lock()
	defer .mu.Unlock()
	 := .streams[]
	if  &&  != nil && !.closed {
		.lastActive = time.Now()
		delete(.streams, )
		if len(.streams) == 0 && .idleTimer != nil {
			.idleTimer.Reset(.idleTimeout)
			.lastIdle = time.Now()
		}
Wake up checkResetOrDone via clientStream.awaitFlowControl and wake up RoundTrip if there is a pending request.
		.cond.Broadcast()
	}
	return 
}
clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop.
readLoop runs in its own goroutine and reads and dispatches frames.
func ( *ClientConn) () {
	 := &clientConnReadLoop{cc: }
	defer .cleanup()
	.readerErr = .run()
	if ,  := .readerErr.(ConnectionError);  {
		.wmu.Lock()
		.fr.WriteGoAway(0, ErrCode(), nil)
		.wmu.Unlock()
	}
}
GoAwayError is returned by the Transport when the server closes the TCP connection after sending a GOAWAY frame.
type GoAwayError struct {
	LastStreamID uint32
	ErrCode      ErrCode
	DebugData    string
}

func ( GoAwayError) () string {
	return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q",
		.LastStreamID, .ErrCode, .DebugData)
}

func ( error) bool {
	if  == io.EOF {
		return true
	}
	,  := .(*net.OpError)
	return  && .Op == "read"
}

func ( *clientConnReadLoop) () {
	 := .cc
	defer .tconn.Close()
	defer .t.connPool().MarkDead()
	defer close(.readerDone)

	if .idleTimer != nil {
		.idleTimer.Stop()
	}
Close any response bodies if the server closes prematurely. TODO: also do this if we've written the headers but not gotten a response yet.
	 := .readerErr
	.mu.Lock()
	if .goAway != nil && isEOFOrNetReadError() {
		 = GoAwayError{
			LastStreamID: .goAway.LastStreamID,
			ErrCode:      .goAway.ErrCode,
			DebugData:    .goAwayDebug,
		}
	} else if  == io.EOF {
		 = io.ErrUnexpectedEOF
	}
	for ,  := range .streams {
		.bufPipe.CloseWithError() // no-op if already closed
		select {
		case .resc <- resAndError{err: }:
		default:
		}
		close(.done)
	}
	.closed = true
	.cond.Broadcast()
	.mu.Unlock()
}

func ( *clientConnReadLoop) () error {
	 := .cc
	.closeWhenIdle = .t.disableKeepAlives() || .singleUse
	 := false // ever saw a HEADERS reply
	 := false
	 := .t.ReadIdleTimeout
	var  *time.Timer
	if  != 0 {
		 = time.AfterFunc(, .healthCheck)
		defer .Stop()
	}
	for {
		,  := .fr.ReadFrame()
		if  != nil {
			.Reset()
		}
		if  != nil {
			.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", , , )
		}
		if ,  := .(StreamError);  {
			if  := .streamByID(.StreamID, false);  != nil {
				.cc.writeStreamReset(.ID, .Code, )
				.cc.forgetStreamID(.ID)
				if .Cause == nil {
					.Cause = .fr.errDetail
				}
				.endStreamError(, )
			}
			continue
		} else if  != nil {
			return 
		}
		if VerboseLogs {
			.vlogf("http2: Transport received %s", summarizeFrame())
		}
		if ! {
			if ,  := .(*SettingsFrame); ! {
				.logf("protocol error: received %T before a SETTINGS frame", )
				return ConnectionError(ErrCodeProtocol)
			}
			 = true
		}
		 := false // whether frame might transition us to idle

		switch f := .(type) {
		case *MetaHeadersFrame:
			 = .processHeaders()
			 = true
			 = true
		case *DataFrame:
			 = .processData()
			 = true
		case *GoAwayFrame:
			 = .processGoAway()
			 = true
		case *RSTStreamFrame:
			 = .processResetStream()
			 = true
		case *SettingsFrame:
			 = .processSettings()
		case *PushPromiseFrame:
			 = .processPushPromise()
		case *WindowUpdateFrame:
			 = .processWindowUpdate()
		case *PingFrame:
			 = .processPing()
		default:
			.logf("Transport: unhandled response frame type %T", )
		}
		if  != nil {
			if VerboseLogs {
				.vlogf("http2: Transport conn %p received error from processing frame %v: %v", , summarizeFrame(), )
			}
			return 
		}
		if .closeWhenIdle &&  &&  {
			.closeIfIdle()
		}
	}
}

func ( *clientConnReadLoop) ( *MetaHeadersFrame) error {
	 := .cc
	 := .streamByID(.StreamID, false)
We'd get here if we canceled a request while the server had its response still in flight. So if this was just something we canceled, ignore it.
		return nil
	}
Issue 20521: If the stream has ended, streamByID() causes clientStream.done to be closed, which causes the request's bodyWriter to be closed with an errStreamClosed, which may be received by clientConn.RoundTrip before the result of processing these headers. Deferring stream closure allows the header processing to occur first. clientConn.RoundTrip may still receive the bodyWriter error first, but the fix for issue 16102 prioritises any response. Issue 22413: If there is no request body, we should close the stream before writing to cs.resc so that the stream is closed immediately once RoundTrip returns.
		if .req.Body != nil {
			defer .forgetStreamID(.StreamID)
		} else {
			.forgetStreamID(.StreamID)
		}
	}
	if !.firstByte {
TODO(bradfitz): move first response byte earlier, when we first read the 9 byte header, not waiting until all the HEADERS+CONTINUATION frames have been merged. This works for now.
			traceFirstResponseByte(.trace)
		}
		.firstByte = true
	}
	if !.pastHeaders {
		.pastHeaders = true
	} else {
		return .processTrailers(, )
	}

	,  := .handleResponse(, )
	if  != nil {
		if ,  := .(ConnectionError);  {
			return 
Any other error type is a stream error.
		.cc.writeStreamReset(.StreamID, ErrCodeProtocol, )
		.forgetStreamID(.ID)
		.resc <- resAndError{err: }
		return nil // return nil from process* funcs to keep conn alive
	}
(nil, nil) special case. See handleResponse docs.
		return nil
	}
	.resTrailer = &.Trailer
	.resc <- resAndError{res: }
	return nil
}
may return error types nil, or ConnectionError. Any other error value is a StreamError of type ErrCodeProtocol. The returned error in that case is the detail. As a special case, handleResponse may return (nil, nil) to skip the frame (currently only used for 1xx responses).
func ( *clientConnReadLoop) ( *clientStream,  *MetaHeadersFrame) (*http.Response, error) {
	if .Truncated {
		return nil, errResponseHeaderListSize
	}

	 := .PseudoValue("status")
	if  == "" {
		return nil, errors.New("malformed response from server: missing status pseudo header")
	}
	,  := strconv.Atoi()
	if  != nil {
		return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header")
	}

	 := .RegularFields()
	 := make([]string, len())
	 := make(http.Header, len())
	 := &http.Response{
		Proto:      "HTTP/2.0",
		ProtoMajor: 2,
		Header:     ,
		StatusCode: ,
		Status:      + " " + http.StatusText(),
	}
	for ,  := range  {
		 := http.CanonicalHeaderKey(.Name)
		if  == "Trailer" {
			 := .Trailer
			if  == nil {
				 = make(http.Header)
				.Trailer = 
			}
			foreachHeaderElement(.Value, func( string) {
				[http.CanonicalHeaderKey()] = nil
			})
		} else {
			 := []
More than likely this will be a single-element key. Most headers aren't multi-valued. Set the capacity on strs[0] to 1, so any future append won't extend the slice into the other strings.
				,  = [:1:1], [1:]
				[0] = .Value
				[] = 
			} else {
				[] = append(, .Value)
			}
		}
	}

	if  >= 100 &&  <= 199 {
		.num1xx++
		const  = 5 // arbitrary bound on number of informational responses, same as net/http
		if .num1xx >  {
			return nil, errors.New("http2: too many 1xx informational responses")
		}
		if  := .get1xxTraceFunc();  != nil {
			if  := (, textproto.MIMEHeader());  != nil {
				return nil, 
			}
		}
		if  == 100 {
			traceGot100Continue(.trace)
			if .on100 != nil {
				.on100() // forces any write delay timer to fire
			}
		}
		.pastHeaders = false // do it all again
		return nil, nil
	}

	 := .StreamEnded()
	 := .req.Method == "HEAD"
	if ! ||  {
		.ContentLength = -1
		if  := .Header["Content-Length"]; len() == 1 {
			if ,  := strconv.ParseUint([0], 10, 63);  == nil {
				.ContentLength = int64()
TODO: care? unlike http/1, it won't mess up our framing, so it's more safe smuggling-wise to ignore.
			}
TODO: care? unlike http/1, it won't mess up our framing, so it's more safe smuggling-wise to ignore.
		}
	}

	if  ||  {
		.Body = noBody
		return , nil
	}

	.bufPipe = pipe{b: &dataBuffer{expected: .ContentLength}}
	.bytesRemain = .ContentLength
	.Body = transportResponseBody{}
	go .awaitRequestCancel(.req)

	if .requestedGzip && .Header.Get("Content-Encoding") == "gzip" {
		.Header.Del("Content-Encoding")
		.Header.Del("Content-Length")
		.ContentLength = -1
		.Body = &gzipReader{body: .Body}
		.Uncompressed = true
	}
	return , nil
}

func ( *clientConnReadLoop) ( *clientStream,  *MetaHeadersFrame) error {
Too many HEADERS frames for this stream.
We expect that any headers for trailers also has END_STREAM.
No pseudo header fields are defined for trailers. TODO: ConnectionError might be overly harsh? Check.
		return ConnectionError(ErrCodeProtocol)
	}

	 := make(http.Header)
	for ,  := range .RegularFields() {
		 := http.CanonicalHeaderKey(.Name)
		[] = append([], .Value)
	}
	.trailer = 

	.endStream()
	return nil
}
transportResponseBody is the concrete type of Transport.RoundTrip's Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body. On Close it sends RST_STREAM if EOF wasn't already seen.
type transportResponseBody struct {
	cs *clientStream
}

func ( transportResponseBody) ( []byte) ( int,  error) {
	 := .cs
	 := .cc

	if .readErr != nil {
		return 0, .readErr
	}
	,  = .cs.bufPipe.Read()
	if .bytesRemain != -1 {
		if int64() > .bytesRemain {
			 = int(.bytesRemain)
			if  == nil {
				 = errors.New("net/http: server replied with more than declared Content-Length; truncated")
				.writeStreamReset(.ID, ErrCodeProtocol, )
			}
			.readErr = 
			return int(.bytesRemain), 
		}
		.bytesRemain -= int64()
		if  == io.EOF && .bytesRemain > 0 {
			 = io.ErrUnexpectedEOF
			.readErr = 
			return , 
		}
	}
No flow control tokens to send back.
		return
	}

	.mu.Lock()
	defer .mu.Unlock()

Check the conn-level first, before the stream-level.
	if  := .inflow.available();  < transportDefaultConnFlow/2 {
		 = transportDefaultConnFlow - 
		.inflow.add()
	}
Consider any buffered body data (read from the conn but not consumed by the client) when computing flow control for this stream.
		 := int(.inflow.available()) + .bufPipe.Len()
		if  < transportDefaultStreamFlow-transportDefaultStreamMinRefresh {
			 = int32(transportDefaultStreamFlow - )
			.inflow.add()
		}
	}
	if  != 0 ||  != 0 {
		.wmu.Lock()
		defer .wmu.Unlock()
		if  != 0 {
			.fr.WriteWindowUpdate(0, mustUint31())
		}
		if  != 0 {
			.fr.WriteWindowUpdate(.ID, mustUint31())
		}
		.bw.Flush()
	}
	return
}

var errClosedResponseBody = errors.New("http2: response body closed")

func ( transportResponseBody) () error {
	 := .cs
	 := .cc

	 := .bufPipe.Err() == io.EOF
	 := .bufPipe.Len()

	if  > 0 || ! {
		.mu.Lock()
		.wmu.Lock()
		if ! {
			.fr.WriteRSTStream(.ID, ErrCodeCancel)
			.didReset = true
Return connection-level flow control.
		if  > 0 {
			.inflow.add(int32())
			.fr.WriteWindowUpdate(0, uint32())
		}
		.bw.Flush()
		.wmu.Unlock()
		.mu.Unlock()
	}

	.bufPipe.BreakWithError(errClosedResponseBody)
	.forgetStreamID(.ID)
	return nil
}

func ( *clientConnReadLoop) ( *DataFrame) error {
	 := .cc
	 := .streamByID(.StreamID, .StreamEnded())
	 := .Data()
	if  == nil {
		.mu.Lock()
		 := .nextStreamID
		.mu.Unlock()
We never asked for this.
			.logf("http2: Transport received unsolicited DATA frame; closing connection")
			return ConnectionError(ErrCodeProtocol)
We probably did ask for this, but canceled. Just ignore it. TODO: be stricter here? only silently ignore things which we canceled, but not things which were closed normally by the peer? Tough without accumulating too much state.
But at least return their flow control:
		if .Length > 0 {
			.mu.Lock()
			.inflow.add(int32(.Length))
			.mu.Unlock()

			.wmu.Lock()
			.fr.WriteWindowUpdate(0, uint32(.Length))
			.bw.Flush()
			.wmu.Unlock()
		}
		return nil
	}
	if !.firstByte {
		.logf("protocol error: received DATA before a HEADERS frame")
		.endStreamError(, StreamError{
			StreamID: .StreamID,
			Code:     ErrCodeProtocol,
		})
		return nil
	}
	if .Length > 0 {
		if .req.Method == "HEAD" && len() > 0 {
			.logf("protocol error: received DATA on a HEAD request")
			.endStreamError(, StreamError{
				StreamID: .StreamID,
				Code:     ErrCodeProtocol,
			})
			return nil
Check connection-level flow control.
		.mu.Lock()
		if .inflow.available() >= int32(.Length) {
			.inflow.take(int32(.Length))
		} else {
			.mu.Unlock()
			return ConnectionError(ErrCodeFlowControl)
Return any padded flow control now, since we won't refund it later on body reads.
		var  int
		if  := int(.Length) - len();  > 0 {
			 += 
Return len(data) now if the stream is already closed, since data will never be read.
		 := .didReset
		if  {
			 += len()
		}
		if  > 0 {
			.inflow.add(int32())
			.wmu.Lock()
			.fr.WriteWindowUpdate(0, uint32())
			if ! {
				.inflow.add(int32())
				.fr.WriteWindowUpdate(.ID, uint32())
			}
			.bw.Flush()
			.wmu.Unlock()
		}
		.mu.Unlock()

		if len() > 0 && ! {
			if ,  := .bufPipe.Write();  != nil {
				.endStreamError(, )
				return 
			}
		}
	}

	if .StreamEnded() {
		.endStream()
	}
	return nil
}

TODO: check that any declared content-length matches, like server.go's (*stream).endStream method.
	.endStreamError(, nil)
}

func ( *clientConnReadLoop) ( *clientStream,  error) {
	var  func()
	if  == nil {
		 = io.EOF
		 = .copyTrailers
	}
	if isConnectionCloseRequest(.req) {
		.closeWhenIdle = true
	}
	.bufPipe.closeWithErrorAndCode(, )

	select {
	case .resc <- resAndError{err: }:
	default:
	}
}

func ( *clientStream) () {
	for ,  := range .trailer {
		 := .resTrailer
		if * == nil {
			* = make(http.Header)
		}
		(*)[] = 
	}
}

func ( *clientConnReadLoop) ( *GoAwayFrame) error {
	 := .cc
	.t.connPool().MarkDead()
TODO: deal with GOAWAY more. particularly the error code
		.vlogf("transport got GOAWAY with error code = %v", .ErrCode)
	}
	.setGoAway()
	return nil
}

func ( *clientConnReadLoop) ( *SettingsFrame) error {
	 := .cc
	.mu.Lock()
	defer .mu.Unlock()

	if .IsAck() {
		if .wantSettingsAck {
			.wantSettingsAck = false
			return nil
		}
		return ConnectionError(ErrCodeProtocol)
	}

	 := .ForeachSetting(func( Setting) error {
		switch .ID {
		case SettingMaxFrameSize:
			.maxFrameSize = .Val
		case SettingMaxConcurrentStreams:
			.maxConcurrentStreams = .Val
		case SettingMaxHeaderListSize:
			.peerMaxHeaderListSize = uint64(.Val)
Values above the maximum flow-control window size of 2^31-1 MUST be treated as a connection error (Section 5.4.1) of type FLOW_CONTROL_ERROR.
Adjust flow control of currently-open frames by the difference of the old initial window size and this one.
			 := int32(.Val) - int32(.initialWindowSize)
			for ,  := range .streams {
				.flow.add()
			}
			.cond.Broadcast()

			.initialWindowSize = .Val
TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably.
			.vlogf("Unhandled Setting: %v", )
		}
		return nil
	})
	if  != nil {
		return 
	}

	.wmu.Lock()
	defer .wmu.Unlock()

	.fr.WriteSettingsAck()
	.bw.Flush()
	return .werr
}

func ( *clientConnReadLoop) ( *WindowUpdateFrame) error {
	 := .cc
	 := .streamByID(.StreamID, false)
	if .StreamID != 0 &&  == nil {
		return nil
	}

	.mu.Lock()
	defer .mu.Unlock()

	 := &.flow
	if  != nil {
		 = &.flow
	}
	if !.add(int32(.Increment)) {
		return ConnectionError(ErrCodeFlowControl)
	}
	.cond.Broadcast()
	return nil
}

func ( *clientConnReadLoop) ( *RSTStreamFrame) error {
	 := .cc.streamByID(.StreamID, true)
TODO: return error if server tries to RST_STEAM an idle stream
		return nil
	}
	select {
Already reset. This is the only goroutine which closes this, so there isn't a race.
	default:
		 := streamError(.ID, .ErrCode)
		.resetErr = 
		close(.peerReset)
		.bufPipe.CloseWithError()
		.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl
	}
	return nil
}
Ping sends a PING frame to the server and waits for the ack.
func ( *ClientConn) ( context.Context) error {
Generate a random payload
	var  [8]byte
	for {
		if ,  := rand.Read([:]);  != nil {
			return 
		}
check for dup before insert
		if ,  := .pings[]; ! {
			.pings[] = 
			.mu.Unlock()
			break
		}
		.mu.Unlock()
	}
	.wmu.Lock()
	if  := .fr.WritePing(false, );  != nil {
		.wmu.Unlock()
		return 
	}
	if  := .bw.Flush();  != nil {
		.wmu.Unlock()
		return 
	}
	.wmu.Unlock()
	select {
	case <-:
		return nil
	case <-.Done():
		return .Err()
connection closed
		return .readerErr
	}
}

func ( *clientConnReadLoop) ( *PingFrame) error {
	if .IsAck() {
		 := .cc
		.mu.Lock()
If ack, notify listener if any
		if ,  := .pings[.Data];  {
			close()
			delete(.pings, .Data)
		}
		return nil
	}
	 := .cc
	.wmu.Lock()
	defer .wmu.Unlock()
	if  := .fr.WritePing(true, .Data);  != nil {
		return 
	}
	return .bw.Flush()
}

We told the peer we don't want them. Spec says: "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH setting of the peer endpoint is set to 0. An endpoint that has set this setting and has received acknowledgement MUST treat the receipt of a PUSH_PROMISE frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR."
TODO: map err to more interesting error codes, once the HTTP community comes up with some. But currently for RST_STREAM there's no equivalent to GOAWAY frame's debug data, and the error codes are all pretty vague ("cancel").
	.wmu.Lock()
	.fr.WriteRSTStream(, )
	.bw.Flush()
	.wmu.Unlock()
}

var (
	errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit")
	errRequestHeaderListSize  = errors.New("http2: request header list larger than peer's advertised limit")
)

func ( *ClientConn) ( string,  ...interface{}) {
	.t.logf(, ...)
}

func ( *ClientConn) ( string,  ...interface{}) {
	.t.vlogf(, ...)
}

func ( *Transport) ( string,  ...interface{}) {
	if VerboseLogs {
		.logf(, ...)
	}
}

func ( *Transport) ( string,  ...interface{}) {
	log.Printf(, ...)
}

var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil))

func ( []string,  string) bool {
	for ,  := range  {
		if  ==  {
			return true
		}
	}
	return false
}

type erringRoundTripper struct{ err error }

func ( erringRoundTripper) () error                             { return .err }
func ( erringRoundTripper) (*http.Request) (*http.Response, error) { return nil, .err }
gzipReader wraps a response body so it can lazily call gzip.NewReader on the first call to Read
type gzipReader struct {
	_    incomparable
	body io.ReadCloser // underlying Response.Body
	zr   *gzip.Reader  // lazily-initialized gzip reader
	zerr error         // sticky error
}

func ( *gzipReader) ( []byte) ( int,  error) {
	if .zerr != nil {
		return 0, .zerr
	}
	if .zr == nil {
		.zr,  = gzip.NewReader(.body)
		if  != nil {
			.zerr = 
			return 0, 
		}
	}
	return .zr.Read()
}

func ( *gzipReader) () error {
	return .body.Close()
}

type errorReader struct{ err error }

func ( errorReader) ( []byte) (int, error) { return 0, .err }
bodyWriterState encapsulates various state around the Transport's writing of the request body, particularly regarding doing delayed writes of the body when the request contains "Expect: 100-continue".
type bodyWriterState struct {
	cs     *clientStream
	timer  *time.Timer   // if non-nil, we're doing a delayed write
	fnonce *sync.Once    // to call fn with
	fn     func()        // the code to run in the goroutine, writing the body
	resc   chan error    // result of fn's execution
	delay  time.Duration // how long we should delay a delayed write for
}

func ( *Transport) ( *clientStream,  io.Reader) ( bodyWriterState) {
	.cs = 
	if  == nil {
		return
	}
	 := make(chan error, 1)
	.resc = 
	.fn = func() {
		.cc.mu.Lock()
		.startedWrite = true
		.cc.mu.Unlock()
		 <- .writeRequestBody(, .req.Body)
	}
	.delay = .expectContinueTimeout()
	if .delay == 0 ||
		!httpguts.HeaderValuesContainsToken(
			.req.Header["Expect"],
			"100-continue") {
		return
	}
	.fnonce = new(sync.Once)
Arm the timer with a very large duration, which we'll intentionally lower later. It has to be large now because we need a handle to it before writing the headers, but the s.delay value is defined to not start until after the request headers were written.
	const  = 365 * 24 * time.Hour
	.timer = time.AfterFunc(, func() {
		.fnonce.Do(.fn)
	})
	return
}

func ( bodyWriterState) () {
	if .timer != nil {
		if .timer.Stop() {
			.resc <- nil
		}
	}
}

func ( bodyWriterState) () {
If we didn't do a delayed write, ignore the server's bogus 100 continue response.
		return
	}
	.timer.Stop()
	go func() { .fnonce.Do(.fn) }()
}
scheduleBodyWrite starts writing the body, either immediately (in the common case) or after the delay timeout. It should not be called until after the headers have been written.
We're not doing a delayed write (see getBodyWriterState), so just start the writing goroutine immediately.
		go .fn()
		return
	}
	traceWait100Continue(.cs.trace)
	if .timer.Stop() {
		.timer.Reset(.delay)
	}
}
isConnectionCloseRequest reports whether req should use its own connection for a single request and then close the connection.
func ( *http.Request) bool {
	return .Close || httpguts.HeaderValuesContainsToken(.Header["Connection"], "close")
}
registerHTTPSProtocol calls Transport.RegisterProtocol but converting panics into errors.
func ( *http.Transport,  noDialH2RoundTripper) ( error) {
	defer func() {
		if  := recover();  != nil {
			 = fmt.Errorf("%v", )
		}
	}()
	.RegisterProtocol("https", )
	return nil
}
noDialH2RoundTripper is a RoundTripper which only tries to complete the request if there's already has a cached connection to the host. (The field is exported so it can be accessed via reflect from net/http; tested by TestNoDialH2RoundTripperType)
type noDialH2RoundTripper struct{ *Transport }

func ( noDialH2RoundTripper) ( *http.Request) (*http.Response, error) {
	,  := .Transport.RoundTrip()
	if isNoCachedConnError() {
		return nil, http.ErrSkipAltProtocol
	}
	return , 
}

func ( *Transport) () time.Duration {
	if .t1 != nil {
		return .t1.IdleConnTimeout
	}
	return 0
}

func ( *http.Request,  string) {
	 := httptrace.ContextClientTrace(.Context())
	if  == nil || .GetConn == nil {
		return
	}
	.GetConn()
}

func ( *http.Request,  *ClientConn,  bool) {
	 := httptrace.ContextClientTrace(.Context())
	if  == nil || .GotConn == nil {
		return
	}
	 := httptrace.GotConnInfo{Conn: .tconn}
	.Reused = 
	.mu.Lock()
	.WasIdle = len(.streams) == 0 && 
	if .WasIdle && !.lastActive.IsZero() {
		.IdleTime = time.Now().Sub(.lastActive)
	}
	.mu.Unlock()

	.GotConn()
}

func ( *httptrace.ClientTrace) {
	if  != nil && .WroteHeaders != nil {
		.WroteHeaders()
	}
}

func ( *httptrace.ClientTrace) {
	if  != nil && .Got100Continue != nil {
		.Got100Continue()
	}
}

func ( *httptrace.ClientTrace) {
	if  != nil && .Wait100Continue != nil {
		.Wait100Continue()
	}
}

func ( *httptrace.ClientTrace,  error) {
	if  != nil && .WroteRequest != nil {
		.WroteRequest(httptrace.WroteRequestInfo{Err: })
	}
}

func ( *httptrace.ClientTrace) {
	if  != nil && .GotFirstResponseByte != nil {
		.GotFirstResponseByte()
	}