Copyright 2016 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.

package storage

import (
	
	
	
	
	
	
	
	
	
	
	
	

	
	
)

var crc32cTable = crc32.MakeTable(crc32.Castagnoli)
ReaderObjectAttrs are attributes about the object being read. These are populated during the New call. This struct only holds a subset of object attributes: to get the full set of attributes, use ObjectHandle.Attrs. Each field is read-only.
Size is the length of the object's content.
StartOffset is the byte offset within the object from which reading begins. This value is only non-zero for range requests.
ContentType is the MIME type of the object's content.
ContentEncoding is the encoding of the object's content.
CacheControl specifies whether and for how long browser and Internet caches are allowed to cache your objects.
LastModified is the time that the object was last modified.
Generation is the generation number of the object's content.
Metageneration is the version of the metadata for this object at this generation. This field is used for preconditions and for detecting changes in metadata. A metageneration number is only meaningful in the context of a particular generation of a particular object.
NewReader creates a new Reader to read the contents of the object. ErrObjectNotExist will be returned if the object is not found. The caller must call Close on the returned Reader when done reading.
func ( *ObjectHandle) ( context.Context) (*Reader, error) {
	return .NewRangeReader(, 0, -1)
}
NewRangeReader reads part of an object, reading at most length bytes starting at the given offset. If length is negative, the object is read until the end. If offset is negative, the object is read abs(offset) bytes from the end, and length must also be negative to indicate all remaining bytes will be read. If the object's metadata property "Content-Encoding" is set to "gzip" or satisfies decompressive transcoding per https://cloud.google.com/storage/docs/transcoding that file will be served back whole, regardless of the requested range as Google Cloud Storage dictates.
func ( *ObjectHandle) ( context.Context, ,  int64) ( *Reader,  error) {
	 = trace.StartSpan(, "cloud.google.com/go/storage.Object.NewRangeReader")
	defer func() { trace.EndSpan(, ) }()

	if  := .validate();  != nil {
		return nil, 
	}
	if  < 0 &&  >= 0 {
		return nil, fmt.Errorf("storage: invalid offset %d < 0 requires negative length", )
	}
	if .conds != nil {
		if  := .conds.validate("NewRangeReader");  != nil {
			return nil, 
		}
	}
	 := &url.URL{
		Scheme: .c.scheme,
		Host:   .c.readHost,
		Path:   fmt.Sprintf("/%s/%s", .bucket, .object),
	}
	 := "GET"
	if  == 0 {
		 = "HEAD"
	}
	,  := http.NewRequest(, .String(), nil)
	if  != nil {
		return nil, 
	}
	 = .WithContext()
	if .userProject != "" {
		.Header.Set("X-Goog-User-Project", .userProject)
	}
	if .readCompressed {
		.Header.Set("Accept-Encoding", "gzip")
	}
	if  := setEncryptionHeaders(.Header, .encryptionKey, false);  != nil {
		return nil, 
	}

	 := .gen
Define a function that initiates a Read with offset and length, assuming we have already read seen bytes.
	 := func( int64) (*http.Response, error) {
		 :=  + 
		if  < 0 &&  < 0 {
			.Header.Set("Range", fmt.Sprintf("bytes=%d", ))
		} else if  < 0 &&  > 0 {
			.Header.Set("Range", fmt.Sprintf("bytes=%d-", ))
The end character isn't affected by how many bytes we've seen.
			.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", , +-1))
We wait to assign conditions here because the generation number can change in between reopen() runs.
		.URL.RawQuery = conditionsQuery(, .conds)
		var  *http.Response
		 = runWithRetry(, func() error {
			,  = .c.hc.Do()
			if  != nil {
				return 
			}
			if .StatusCode == http.StatusNotFound {
				.Body.Close()
				return ErrObjectNotExist
			}
			if .StatusCode < 200 || .StatusCode > 299 {
				,  := ioutil.ReadAll(.Body)
				.Body.Close()
				return &googleapi.Error{
					Code:   .StatusCode,
					Header: .Header,
					Body:   string(),
				}
			}

			 :=
				!decompressiveTranscoding() &&
					 > 0 &&  != 0 &&
					.StatusCode != http.StatusPartialContent

			if  {
				.Body.Close()
				return errors.New("storage: partial request not satisfied")
			}
With "Content-Encoding": "gzip" aka decompressive transcoding, GCS serves back the whole file regardless of the range count passed in as per: https://cloud.google.com/storage/docs/transcoding#range, thus we have to manually move the body forward by seen bytes.
			if decompressiveTranscoding() &&  > 0 {
				_, _ = io.CopyN(ioutil.Discard, .Body, )
			}
If a generation hasn't been specified, and this is the first response we get, let's record the generation. In future requests we'll use this generation as a precondition to avoid data races.
			if  < 0 && .Header.Get("X-Goog-Generation") != "" {
				,  := strconv.ParseInt(.Header.Get("X-Goog-Generation"), 10, 64)
				if  != nil {
					return 
				}
				 = 
			}
			return nil
		})
		if  != nil {
			return nil, 
		}
		return , nil
	}

	,  := (0)
	if  != nil {
		return nil, 
	}
	var (
		        int64 // total size of object, even if a range was requested.
		    bool
		         uint32
		 int64 // non-zero if range request.
	)
	if .StatusCode == http.StatusPartialContent {
		 := strings.TrimSpace(.Header.Get("Content-Range"))
		if !strings.HasPrefix(, "bytes ") || !strings.Contains(, "/") {
			return nil, fmt.Errorf("storage: invalid Content-Range %q", )
		}
		,  = strconv.ParseInt([strings.LastIndex(, "/")+1:], 10, 64)
		if  != nil {
			return nil, fmt.Errorf("storage: invalid Content-Range %q", )
		}

		 := strings.Index(, "-")
		if  >= 0 {
			,  = strconv.ParseInt([len("bytes="):], 10, 64)
			if  != nil {
				return nil, fmt.Errorf("storage: invalid Content-Range %q: %v", , )
			}
		}
	} else {
Check the CRC iff all of the following hold: - We asked for content (length != 0). - We got all the content (status != PartialContent). - The server sent a CRC header. - The Go http stack did not uncompress the file. - We were not served compressed data that was uncompressed on download. The problem with the last two cases is that the CRC will not match -- GCS computes it on the compressed contents, but we compute it on the uncompressed contents.
		if  != 0 && !.Uncompressed && !uncompressedByServer() {
			,  = parseCRC32c()
		}
	}

	 := .ContentLength
	 := .Body
	if  == 0 {
		 = 0
		.Close()
		 = emptyBody
	}
	var  int64
	if .Header.Get("X-Goog-Metageneration") != "" {
		,  = strconv.ParseInt(.Header.Get("X-Goog-Metageneration"), 10, 64)
		if  != nil {
			return nil, 
		}
	}

	var  time.Time
	if .Header.Get("Last-Modified") != "" {
		,  = http.ParseTime(.Header.Get("Last-Modified"))
		if  != nil {
			return nil, 
		}
	}

	 := ReaderObjectAttrs{
		Size:            ,
		ContentType:     .Header.Get("Content-Type"),
		ContentEncoding: .Header.Get("Content-Encoding"),
		CacheControl:    .Header.Get("Cache-Control"),
		LastModified:    ,
		StartOffset:     ,
		Generation:      ,
		Metageneration:  ,
	}
	return &Reader{
		Attrs:    ,
		body:     ,
		size:     ,
		remain:   ,
		wantCRC:  ,
		checkCRC: ,
		reopen:   ,
	}, nil
}
decompressiveTranscoding returns true if the request was served decompressed and different than its original storage form. This happens when the "Content-Encoding" header is "gzip". See: * https://cloud.google.com/storage/docs/transcoding#transcoding_and_gzip * https://github.com/googleapis/google-cloud-go/issues/1800
Decompressive Transcoding.
	return .Header.Get("Content-Encoding") == "gzip" ||
		.Header.Get("X-Goog-Stored-Content-Encoding") == "gzip"
}

If the data is stored as gzip but is not encoded as gzip, then it was uncompressed by the server.
	return .Header.Get("X-Goog-Stored-Content-Encoding") == "gzip" &&
		.Header.Get("Content-Encoding") != "gzip"
}

func ( *http.Response) (uint32, bool) {
	const  = "crc32c="
	for ,  := range .Header["X-Goog-Hash"] {
		if strings.HasPrefix(, ) {
			,  := decodeUint32([len():])
			if  == nil {
				return , true
			}
		}
	}
	return 0, false
}

var emptyBody = ioutil.NopCloser(strings.NewReader(""))
Reader reads a Cloud Storage object. It implements io.Reader. Typically, a Reader computes the CRC of the downloaded content and compares it to the stored CRC, returning an error from Read if there is a mismatch. This integrity check is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding.
type Reader struct {
	Attrs              ReaderObjectAttrs
	body               io.ReadCloser
	seen, remain, size int64
	checkCRC           bool   // should we check the CRC?
	wantCRC            uint32 // the CRC32c value the server sent in the header
	gotCRC             uint32 // running crc
	reopen             func(seen int64) (*http.Response, error)
}
Close closes the Reader. It must be called when done reading.
func ( *Reader) () error {
	return .body.Close()
}

func ( *Reader) ( []byte) (int, error) {
	,  := .readWithRetry()
	if .remain != -1 {
		.remain -= int64()
	}
	if .checkCRC {
Check CRC here. It would be natural to check it in Close, but everybody defers Close on the assumption that it doesn't return anything worth looking at.
		if  == io.EOF {
			if .gotCRC != .wantCRC {
				return , fmt.Errorf("storage: bad CRC on read: got %d, want %d",
					.gotCRC, .wantCRC)
			}
		}
	}
	return , 
}

func ( *Reader) ( []byte) (int, error) {
	 := 0
	for len([:]) > 0 {
		,  := .body.Read([:])
		 += 
		.seen += int64()
		if !shouldRetryRead() {
			return , 
Read failed, but we will try again. Send a ranged read request that takes into account the number of bytes we've already seen.
		,  := .reopen(.seen)
reopen already retries
			return , 
		}
		.body.Close()
		.body = .Body
	}
	return , nil
}

func ( error) bool {
	if  == nil {
		return false
	}
	return strings.HasSuffix(.Error(), "INTERNAL_ERROR") && strings.Contains(reflect.TypeOf().String(), "http2")
}
Size returns the size of the object in bytes. The returned value is always the same and is not affected by calls to Read or Close. Deprecated: use Reader.Attrs.Size.
func ( *Reader) () int64 {
	return .Attrs.Size
}
Remain returns the number of bytes left to read, or -1 if unknown.
func ( *Reader) () int64 {
	return .remain
}
ContentType returns the content type of the object. Deprecated: use Reader.Attrs.ContentType.
func ( *Reader) () string {
	return .Attrs.ContentType
}
ContentEncoding returns the content encoding of the object. Deprecated: use Reader.Attrs.ContentEncoding.
CacheControl returns the cache control of the object. Deprecated: use Reader.Attrs.CacheControl.
func ( *Reader) () string {
	return .Attrs.CacheControl
}
LastModified returns the value of the Last-Modified header. Deprecated: use Reader.Attrs.LastModified.
func ( *Reader) () (time.Time, error) {
	return .Attrs.LastModified, nil