Copyright 2014 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.

package http2

import (
	
	
	
	
	

	
	
)
writeFramer is implemented by any type that is used to write frames.
type writeFramer interface {
	writeFrame(writeContext) error
staysWithinBuffer reports whether this writer promises that it will only write less than or equal to size bytes, and it won't Flush the write context.
	staysWithinBuffer(size int) bool
}
writeContext is the interface needed by the various frame writer types below. All the writeFrame methods below are scheduled via the frame writing scheduler (see writeScheduler in writesched.go). This interface is implemented by *serverConn. TODO: decide whether to a) use this in the client code (which didn't end up using this yet, because it has a simpler design, not currently implementing priorities), or b) delete this and make the server code a bit more concrete.
type writeContext interface {
	Framer() *Framer
	Flush() error
HeaderEncoder returns an HPACK encoder that writes to the returned buffer.
	HeaderEncoder() (*hpack.Encoder, *bytes.Buffer)
}
writeEndsStream reports whether w writes a frame that will transition the stream to a half-closed local state. This returns false for RST_STREAM, which closes the entire stream (not just the local half).
func ( writeFramer) bool {
	switch v := .(type) {
	case *writeData:
		return .endStream
	case *writeResHeaders:
		return .endStream
This can only happen if the caller reuses w after it's been intentionally nil'ed out to prevent use. Keep this here to catch future refactoring breaking it.
		panic("writeEndsStream called on nil writeFramer")
	}
	return false
}

type flushFrameWriter struct{}

func (flushFrameWriter) ( writeContext) error {
	return .Flush()
}

func (flushFrameWriter) ( int) bool { return false }

type writeSettings []Setting

func ( writeSettings) ( int) bool {
	const  = 6 // uint16 + uint32
	return frameHeaderLen+*len() <= 

}

func ( writeSettings) ( writeContext) error {
	return .Framer().WriteSettings([]Setting()...)
}

type writeGoAway struct {
	maxStreamID uint32
	code        ErrCode
}

func ( *writeGoAway) ( writeContext) error {
	 := .Framer().WriteGoAway(.maxStreamID, .code, nil)
	.Flush() // ignore error: we're hanging up on them anyway
	return 
}

func (*writeGoAway) ( int) bool { return false } // flushes

type writeData struct {
	streamID  uint32
	p         []byte
	endStream bool
}

func ( *writeData) () string {
	return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", .streamID, len(.p), .endStream)
}

func ( *writeData) ( writeContext) error {
	return .Framer().WriteData(.streamID, .endStream, .p)
}

func ( *writeData) ( int) bool {
	return frameHeaderLen+len(.p) <= 
}
handlerPanicRST is the message sent from handler goroutines when the handler panics.
type handlerPanicRST struct {
	StreamID uint32
}

func ( handlerPanicRST) ( writeContext) error {
	return .Framer().WriteRSTStream(.StreamID, ErrCodeInternal)
}

func ( handlerPanicRST) ( int) bool { return frameHeaderLen+4 <=  }

func ( StreamError) ( writeContext) error {
	return .Framer().WriteRSTStream(.StreamID, .Code)
}

func ( StreamError) ( int) bool { return frameHeaderLen+4 <=  }

type writePingAck struct{ pf *PingFrame }

func ( writePingAck) ( writeContext) error {
	return .Framer().WritePing(true, .pf.Data)
}

func ( writePingAck) ( int) bool { return frameHeaderLen+len(.pf.Data) <=  }

type writeSettingsAck struct{}

func (writeSettingsAck) ( writeContext) error {
	return .Framer().WriteSettingsAck()
}

func (writeSettingsAck) ( int) bool { return frameHeaderLen <=  }
splitHeaderBlock splits headerBlock into fragments so that each fragment fits in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true for the first/last fragment, respectively.
For now we're lazy and just pick the minimum MAX_FRAME_SIZE that all peers must support (16KB). Later we could care more and send larger frames if the peer advertised it, but there's little point. Most headers are small anyway (so we generally won't have CONTINUATION frames), and extra frames only waste 9 bytes anyway.
	const  = 16384

	 := true
	for len() > 0 {
		 := 
		if len() >  {
			 = [:]
		}
		 = [len():]
		if  := (, , , len() == 0);  != nil {
			return 
		}
		 = false
	}
	return nil
}
writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames for HTTP response headers or trailers from a server handler.
type writeResHeaders struct {
	streamID    uint32
	httpResCode int         // 0 means no ":status" line
	h           http.Header // may be nil
	trailers    []string    // if non-nil, which keys of h to write. nil means all.
	endStream   bool

	date          string
	contentType   string
	contentLength string
}

func ( *hpack.Encoder, ,  string) {
	if VerboseLogs {
		log.Printf("http2: server encoding header %q = %q", , )
	}
	.WriteField(hpack.HeaderField{Name: , Value: })
}

TODO: this is a common one. It'd be nice to return true here and get into the fast path if we could be clever and calculate the size fast enough, or at least a conservative upper bound that usually fires. (Maybe if w.h and w.trailers are nil, so we don't need to enumerate it.) Otherwise I'm afraid that just calculating the length to answer this question would be slower than the ~2µs benefit.
	return false
}

func ( *writeResHeaders) ( writeContext) error {
	,  := .HeaderEncoder()
	.Reset()

	if .httpResCode != 0 {
		encKV(, ":status", httpCodeString(.httpResCode))
	}

	encodeHeaders(, .h, .trailers)

	if .contentType != "" {
		encKV(, "content-type", .contentType)
	}
	if .contentLength != "" {
		encKV(, "content-length", .contentLength)
	}
	if .date != "" {
		encKV(, "date", .date)
	}

	 := .Bytes()
	if len() == 0 && .trailers == nil {
		panic("unexpected empty hpack")
	}

	return splitHeaderBlock(, , .writeHeaderBlock)
}

func ( *writeResHeaders) ( writeContext,  []byte, ,  bool) error {
	if  {
		return .Framer().WriteHeaders(HeadersFrameParam{
			StreamID:      .streamID,
			BlockFragment: ,
			EndStream:     .endStream,
			EndHeaders:    ,
		})
	} else {
		return .Framer().WriteContinuation(.streamID, , )
	}
}
writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames.
type writePushPromise struct {
	streamID uint32   // pusher stream
	method   string   // for :method
	url      *url.URL // for :scheme, :authority, :path
	h        http.Header
Creates an ID for a pushed stream. This runs on serveG just before the frame is written. The returned ID is copied to promisedID.
TODO: see writeResHeaders.staysWithinBuffer
	return false
}

func ( *writePushPromise) ( writeContext) error {
	,  := .HeaderEncoder()
	.Reset()

	encKV(, ":method", .method)
	encKV(, ":scheme", .url.Scheme)
	encKV(, ":authority", .url.Host)
	encKV(, ":path", .url.RequestURI())
	encodeHeaders(, .h, nil)

	 := .Bytes()
	if len() == 0 {
		panic("unexpected empty hpack")
	}

	return splitHeaderBlock(, , .writeHeaderBlock)
}

func ( *writePushPromise) ( writeContext,  []byte, ,  bool) error {
	if  {
		return .Framer().WritePushPromise(PushPromiseParam{
			StreamID:      .streamID,
			PromiseID:     .promisedID,
			BlockFragment: ,
			EndHeaders:    ,
		})
	} else {
		return .Framer().WriteContinuation(.streamID, , )
	}
}

type write100ContinueHeadersFrame struct {
	streamID uint32
}

func ( write100ContinueHeadersFrame) ( writeContext) error {
	,  := .HeaderEncoder()
	.Reset()
	encKV(, ":status", "100")
	return .Framer().WriteHeaders(HeadersFrameParam{
		StreamID:      .streamID,
		BlockFragment: .Bytes(),
		EndStream:     false,
		EndHeaders:    true,
	})
}

Sloppy but conservative:
	return 9+2*(len(":status")+len("100")) <= 
}

type writeWindowUpdate struct {
	streamID uint32 // or 0 for conn-level
	n        uint32
}

func ( writeWindowUpdate) ( int) bool { return frameHeaderLen+4 <=  }

func ( writeWindowUpdate) ( writeContext) error {
	return .Framer().WriteWindowUpdate(.streamID, .n)
}
encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k]) is encoded only if k is in keys.
func ( *hpack.Encoder,  http.Header,  []string) {
	if  == nil {
Using defer here, since the returned keys from the sorter.Keys method is only valid until the sorter is returned:
		defer sorterPool.Put()
		 = .Keys()
	}
	for ,  := range  {
		 := []
		 = lowerHeader()
Skip it as backup paranoia. Per golang.org/issue/14048, these should already be rejected at a higher level.
			continue
		}
		 :=  == "transfer-encoding"
		for ,  := range  {
TODO: return an error? golang.org/issue/14048 For now just omit it.
				continue
TODO: more of "8.1.2.2 Connection-Specific Header Fields"
			if  &&  != "trailers" {
				continue
			}
			encKV(, , )
		}
	}