Copyright 2010 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.

package zip

import (
	
	
	
	
	
	
	
	
	
	
	
	
	
)

var (
	ErrFormat    = errors.New("zip: not a valid zip file")
	ErrAlgorithm = errors.New("zip: unsupported compression algorithm")
	ErrChecksum  = errors.New("zip: checksum error")
)
A Reader serves content from a ZIP archive.
fileList is a list of files sorted by ename, for use by the Open method.
A ReadCloser is a Reader that must be closed when no longer needed.
type ReadCloser struct {
	f *os.File
	Reader
}
A File is a single file in a ZIP archive. The file information is in the embedded FileHeader. The file content can be accessed by calling Open.
type File struct {
	FileHeader
	zip          *Reader
	zipr         io.ReaderAt
	zipsize      int64
	headerOffset int64
}

func ( *File) () bool {
	return .Flags&0x8 != 0
}
OpenReader will open the Zip file specified by name and return a ReadCloser.
func ( string) (*ReadCloser, error) {
	,  := os.Open()
	if  != nil {
		return nil, 
	}
	,  := .Stat()
	if  != nil {
		.Close()
		return nil, 
	}
	 := new(ReadCloser)
	if  := .init(, .Size());  != nil {
		.Close()
		return nil, 
	}
	.f = 
	return , nil
}
NewReader returns a new Reader reading from r, which is assumed to have the given size in bytes.
func ( io.ReaderAt,  int64) (*Reader, error) {
	if  < 0 {
		return nil, errors.New("zip: size cannot be negative")
	}
	 := new(Reader)
	if  := .init(, );  != nil {
		return nil, 
	}
	return , nil
}

func ( *Reader) ( io.ReaderAt,  int64) error {
	,  := readDirectoryEnd(, )
	if  != nil {
		return 
	}
	.r = 
	.File = make([]*File, 0, .directoryRecords)
	.Comment = .comment
	 := io.NewSectionReader(, 0, )
	if _,  = .Seek(int64(.directoryOffset), io.SeekStart);  != nil {
		return 
	}
	 := bufio.NewReader()
The count of files inside a zip is truncated to fit in a uint16. Gloss over this by reading headers until we encounter a bad one, and then only report an ErrFormat or UnexpectedEOF if the file count modulo 65536 is incorrect.
	for {
		 := &File{zip: , zipr: , zipsize: }
		 = readDirectoryHeader(, )
		if  == ErrFormat ||  == io.ErrUnexpectedEOF {
			break
		}
		if  != nil {
			return 
		}
		.File = append(.File, )
	}
Return the readDirectoryHeader error if we read the wrong number of directory entries.
		return 
	}
	return nil
}
RegisterDecompressor registers or overrides a custom decompressor for a specific method ID. If a decompressor for a given method is not found, Reader will default to looking up the decompressor at the package level.
func ( *Reader) ( uint16,  Decompressor) {
	if .decompressors == nil {
		.decompressors = make(map[uint16]Decompressor)
	}
	.decompressors[] = 
}

func ( *Reader) ( uint16) Decompressor {
	 := .decompressors[]
	if  == nil {
		 = decompressor()
	}
	return 
}
Close closes the Zip file, rendering it unusable for I/O.
func ( *ReadCloser) () error {
	return .f.Close()
}
DataOffset returns the offset of the file's possibly-compressed data, relative to the beginning of the zip file. Most callers should instead use Open, which transparently decompresses data and verifies checksums.
func ( *File) () ( int64,  error) {
	,  := .findBodyOffset()
	if  != nil {
		return
	}
	return .headerOffset + , nil
}
Open returns a ReadCloser that provides access to the File's contents. Multiple files may be read concurrently.
func ( *File) () (io.ReadCloser, error) {
	,  := .findBodyOffset()
	if  != nil {
		return nil, 
	}
	 := int64(.CompressedSize64)
	 := io.NewSectionReader(.zipr, .headerOffset+, )
	 := .zip.decompressor(.Method)
	if  == nil {
		return nil, ErrAlgorithm
	}
	var  io.ReadCloser = ()
	var  io.Reader
	if .hasDataDescriptor() {
		 = io.NewSectionReader(.zipr, .headerOffset++, dataDescriptorLen)
	}
	 = &checksumReader{
		rc:   ,
		hash: crc32.NewIEEE(),
		f:    ,
		desr: ,
	}
	return , nil
}

type checksumReader struct {
	rc    io.ReadCloser
	hash  hash.Hash32
	nread uint64 // number of bytes read so far
	f     *File
	desr  io.Reader // if non-nil, where to read the data descriptor
	err   error     // sticky error
}

func ( *checksumReader) () (fs.FileInfo, error) {
	return headerFileInfo{&.f.FileHeader}, nil
}

func ( *checksumReader) ( []byte) ( int,  error) {
	if .err != nil {
		return 0, .err
	}
	,  = .rc.Read()
	.hash.Write([:])
	.nread += uint64()
	if  == nil {
		return
	}
	if  == io.EOF {
		if .nread != .f.UncompressedSize64 {
			return 0, io.ErrUnexpectedEOF
		}
		if .desr != nil {
			if  := readDataDescriptor(.desr, .f);  != nil {
				if  == io.EOF {
					 = io.ErrUnexpectedEOF
				} else {
					 = 
				}
			} else if .hash.Sum32() != .f.CRC32 {
				 = ErrChecksum
			}
If there's not a data descriptor, we still compare the CRC32 of what we've read against the file header or TOC's CRC32, if it seems like it was set.
			if .f.CRC32 != 0 && .hash.Sum32() != .f.CRC32 {
				 = ErrChecksum
			}
		}
	}
	.err = 
	return
}

func ( *checksumReader) () error { return .rc.Close() }
findBodyOffset does the minimum work to verify the file has a header and returns the file body offset.
func ( *File) () (int64, error) {
	var  [fileHeaderLen]byte
	if ,  := .zipr.ReadAt([:], .headerOffset);  != nil {
		return 0, 
	}
	 := readBuf([:])
	if  := .uint32();  != fileHeaderSignature {
		return 0, ErrFormat
	}
	 = [22:] // skip over most of the header
	 := int(.uint16())
	 := int(.uint16())
	return int64(fileHeaderLen +  + ), nil
}
readDirectoryHeader attempts to read a directory header from r. It returns io.ErrUnexpectedEOF if it cannot read a complete header, and ErrFormat if it doesn't find a valid header signature.
func ( *File,  io.Reader) error {
	var  [directoryHeaderLen]byte
	if ,  := io.ReadFull(, [:]);  != nil {
		return 
	}
	 := readBuf([:])
	if  := .uint32();  != directoryHeaderSignature {
		return ErrFormat
	}
	.CreatorVersion = .uint16()
	.ReaderVersion = .uint16()
	.Flags = .uint16()
	.Method = .uint16()
	.ModifiedTime = .uint16()
	.ModifiedDate = .uint16()
	.CRC32 = .uint32()
	.CompressedSize = .uint32()
	.UncompressedSize = .uint32()
	.CompressedSize64 = uint64(.CompressedSize)
	.UncompressedSize64 = uint64(.UncompressedSize)
	 := int(.uint16())
	 := int(.uint16())
	 := int(.uint16())
	 = [4:] // skipped start disk number and internal attributes (2x uint16)
	.ExternalAttrs = .uint32()
	.headerOffset = int64(.uint32())
	 := make([]byte, ++)
	if ,  := io.ReadFull(, );  != nil {
		return 
	}
	.Name = string([:])
	.Extra = [ : +]
	.Comment = string([+:])
Determine the character encoding.
	,  := detectUTF8(.Name)
	,  := detectUTF8(.Comment)
	switch {
Name and Comment definitely not UTF-8.
Name and Comment use only single-byte runes that overlap with UTF-8.
Might be UTF-8, might be some other encoding; preserve existing flag. Some ZIP writers use UTF-8 encoding without setting the UTF-8 flag. Since it is impossible to always distinguish valid UTF-8 from some other encoding (e.g., GBK or Shift-JIS), we trust the flag.
		.NonUTF8 = .Flags&0x800 == 0
	}

	 := .UncompressedSize == ^uint32(0)
	 := .CompressedSize == ^uint32(0)
	 := .headerOffset == int64(^uint32(0))
Best effort to find what we need. Other zip authors might not even follow the basic format, and we'll just ignore the Extra content in that case.
	var  time.Time
:
	for  := readBuf(.Extra); len() >= 4; { // need at least tag and size
		 := .uint16()
		 := int(.uint16())
		if len() <  {
			break
		}
		 := .sub()

		switch  {
update directory values from the zip64 extra block. They should only be consulted if the sizes read earlier are maxed out. See golang.org/issue/13367.
			if  {
				 = false
				if len() < 8 {
					return ErrFormat
				}
				.UncompressedSize64 = .uint64()
			}
			if  {
				 = false
				if len() < 8 {
					return ErrFormat
				}
				.CompressedSize64 = .uint64()
			}
			if  {
				 = false
				if len() < 8 {
					return ErrFormat
				}
				.headerOffset = int64(.uint64())
			}
		case ntfsExtraID:
			if len() < 4 {
				continue 
			}
			.uint32()        // reserved (ignored)
			for len() >= 4 { // need at least tag and size
				 := .uint16()
				 := int(.uint16())
				if len() <  {
					continue 
				}
				 := .sub()
				if  != 1 ||  != 24 {
					continue // Ignore irrelevant attributes
				}

				const  = 1e7    // Windows timestamp resolution
				 := int64(.uint64()) // ModTime since Windows epoch
				 := int64( / )
				 := (1e9 / ) * int64(%)
				 := time.Date(1601, time.January, 1, 0, 0, 0, 0, time.UTC)
				 = time.Unix(.Unix()+, )
			}
		case unixExtraID, infoZipUnixExtraID:
			if len() < 8 {
				continue 
			}
			.uint32()              // AcTime (ignored)
			 := int64(.uint32()) // ModTime since Unix epoch
			 = time.Unix(, 0)
		case extTimeExtraID:
			if len() < 5 || .uint8()&1 == 0 {
				continue 
			}
			 := int64(.uint32()) // ModTime since Unix epoch
			 = time.Unix(, 0)
		}
	}

	 := msDosTimeToTime(.ModifiedDate, .ModifiedTime)
	.Modified = 
	if !.IsZero() {
		.Modified = .UTC()
If legacy MS-DOS timestamps are set, we can use the delta between the legacy and extended versions to estimate timezone offset. A non-UTC timezone is always used (even if offset is zero). Thus, FileHeader.Modified.Location() == time.UTC is useful for determining whether extended timestamps are present. This is necessary for users that need to do additional time calculations when dealing with legacy ZIP formats.
		if .ModifiedTime != 0 || .ModifiedDate != 0 {
			.Modified = .In(timeZone(.Sub()))
		}
	}
Assume that uncompressed size 2³²-1 could plausibly happen in an old zip32 file that was sharding inputs into the largest chunks possible (or is just malicious; search the web for 42.zip). If needUSize is true still, it means we didn't see a zip64 extension. As long as the compressed size is not also 2³²-1 (implausible) and the header is not also 2³²-1 (equally implausible), accept the uncompressed size 2³²-1 as valid. If nothing else, this keeps archive/zip working with 42.zip.
	_ = 

	if  ||  {
		return ErrFormat
	}

	return nil
}

func ( io.Reader,  *File) error {
	var  [dataDescriptorLen]byte
The spec says: "Although not originally assigned a signature, the value 0x08074b50 has commonly been adopted as a signature value for the data descriptor record. Implementers should be aware that ZIP files may be encountered with or without this signature marking data descriptors and should account for either case when reading ZIP files to ensure compatibility." dataDescriptorLen includes the size of the signature but first read just those 4 bytes to see if it exists.
	if ,  := io.ReadFull(, [:4]);  != nil {
		return 
	}
	 := 0
	 := readBuf([:4])
No data descriptor signature. Keep these four bytes.
		 += 4
	}
	if ,  := io.ReadFull(, [:12]);  != nil {
		return 
	}
	 := readBuf([:12])
	if .uint32() != .CRC32 {
		return ErrChecksum
	}
The two sizes that follow here can be either 32 bits or 64 bits but the spec is not very clear on this and different interpretations has been made causing incompatibilities. We already have the sizes from the central directory so we can just ignore these.

	return nil
}

look for directoryEndSignature in the last 1k, then in the last 65k
	var  []byte
	var  int64
	for ,  := range []int64{1024, 65 * 1024} {
		if  >  {
			 = 
		}
		 = make([]byte, int())
		if ,  := .ReadAt(, -);  != nil &&  != io.EOF {
			return nil, 
		}
		if  := findSignatureInBlock();  >= 0 {
			 = [:]
			 =  -  + int64()
			break
		}
		if  == 1 ||  ==  {
			return nil, ErrFormat
		}
	}
read header into struct
	 := readBuf([4:]) // skip signature
	 := &directoryEnd{
		diskNbr:            uint32(.uint16()),
		dirDiskNbr:         uint32(.uint16()),
		dirRecordsThisDisk: uint64(.uint16()),
		directoryRecords:   uint64(.uint16()),
		directorySize:      uint64(.uint32()),
		directoryOffset:    uint64(.uint32()),
		commentLen:         .uint16(),
	}
	 := int(.commentLen)
	if  > len() {
		return nil, errors.New("zip: invalid comment length")
	}
	.comment = string([:])
These values mean that the file can be a zip64 file
	if .directoryRecords == 0xffff || .directorySize == 0xffff || .directoryOffset == 0xffffffff {
		,  := findDirectory64End(, )
		if  == nil &&  >= 0 {
			 = readDirectory64End(, , )
		}
		if  != nil {
			return nil, 
		}
Make sure directoryOffset points to somewhere in our file.
	if  := int64(.directoryOffset);  < 0 ||  >=  {
		return nil, ErrFormat
	}
	return , nil
}
findDirectory64End tries to read the zip64 locator just before the directory end and returns the offset of the zip64 directory end if found.
func ( io.ReaderAt,  int64) (int64, error) {
	 :=  - directory64LocLen
	if  < 0 {
		return -1, nil // no need to look for a header outside the file
	}
	 := make([]byte, directory64LocLen)
	if ,  := .ReadAt(, );  != nil {
		return -1, 
	}
	 := readBuf()
	if  := .uint32();  != directory64LocSignature {
		return -1, nil
	}
	if .uint32() != 0 { // number of the disk with the start of the zip64 end of central directory
		return -1, nil // the file is not a valid zip64-file
	}
	 := .uint64()      // relative offset of the zip64 end of central directory record
	if .uint32() != 1 { // total number of disks
		return -1, nil // the file is not a valid zip64-file
	}
	return int64(), nil
}
readDirectory64End reads the zip64 directory end and updates the directory end with the zip64 directory end values.
func ( io.ReaderAt,  int64,  *directoryEnd) ( error) {
	 := make([]byte, directory64EndLen)
	if ,  := .ReadAt(, );  != nil {
		return 
	}

	 := readBuf()
	if  := .uint32();  != directory64EndSignature {
		return ErrFormat
	}

	 = [12:]                        // skip dir size, version and version needed (uint64 + 2x uint16)
	.diskNbr = .uint32()            // number of this disk
	.dirDiskNbr = .uint32()         // number of the disk with the start of the central directory
	.dirRecordsThisDisk = .uint64() // total number of entries in the central directory on this disk
	.directoryRecords = .uint64()   // total number of entries in the central directory
	.directorySize = .uint64()      // size of the central directory
	.directoryOffset = .uint64()    // offset of start of central directory with respect to the starting disk number

	return nil
}

func ( []byte) int {
defined from directoryEndSignature in struct.go
n is length of comment
			 := int([+directoryEndLen-2]) | int([+directoryEndLen-1])<<8
			if +directoryEndLen+ <= len() {
				return 
			}
		}
	}
	return -1
}

type readBuf []byte

func ( *readBuf) () uint8 {
	 := (*)[0]
	* = (*)[1:]
	return 
}

func ( *readBuf) () uint16 {
	 := binary.LittleEndian.Uint16(*)
	* = (*)[2:]
	return 
}

func ( *readBuf) () uint32 {
	 := binary.LittleEndian.Uint32(*)
	* = (*)[4:]
	return 
}

func ( *readBuf) () uint64 {
	 := binary.LittleEndian.Uint64(*)
	* = (*)[8:]
	return 
}

func ( *readBuf) ( int) readBuf {
	 := (*)[:]
	* = (*)[:]
	return 
}
A fileListEntry is a File and its ename. If file == nil, the fileListEntry describes a directory, without metadata.
type fileListEntry struct {
	name string
	file *File // nil for directories
}

type fileInfoDirEntry interface {
	fs.FileInfo
	fs.DirEntry
}

func ( *fileListEntry) () fileInfoDirEntry {
	if .file != nil {
		return headerFileInfo{&.file.FileHeader}
	}
	return 
}
Only used for directories.
func ( *fileListEntry) () string       { , ,  := split(.name); return  }
func ( *fileListEntry) () int64        { return 0 }
func ( *fileListEntry) () time.Time { return time.Time{} }
func ( *fileListEntry) () fs.FileMode  { return fs.ModeDir | 0555 }
func ( *fileListEntry) () fs.FileMode  { return fs.ModeDir }
func ( *fileListEntry) () bool        { return true }
func ( *fileListEntry) () interface{}   { return nil }

func ( *fileListEntry) () (fs.FileInfo, error) { return , nil }
toValidName coerces name to be a valid name for fs.FS.Open.
func ( string) string {
	 = strings.ReplaceAll(, `\`, `/`)
	 := path.Clean()
	if strings.HasPrefix(, "/") {
		 = [len("/"):]
	}
	for strings.HasPrefix(, "../") {
		 = [len("../"):]
	}
	return 
}

func ( *Reader) () {
	.fileListOnce.Do(func() {
		 := make(map[string]bool)
		for ,  := range .File {
			 := toValidName(.Name)
			for  := path.Dir();  != ".";  = path.Dir() {
				[] = true
			}
			.fileList = append(.fileList, fileListEntry{, })
		}
		for  := range  {
			.fileList = append(.fileList, fileListEntry{ + "/", nil})
		}

		sort.Slice(.fileList, func(,  int) bool { return fileEntryLess(.fileList[].name, .fileList[].name) })
	})
}

func (,  string) bool {
	, ,  := split()
	, ,  := split()
	return  <  ||  ==  &&  < 
}
Open opens the named file in the ZIP archive, using the semantics of fs.FS.Open: paths are always slash separated, with no leading / or ../ elements.
func ( *Reader) ( string) (fs.File, error) {
	.initFileList()

	 := .openLookup()
	if  == nil || !fs.ValidPath() {
		return nil, &fs.PathError{Op: "open", Path: , Err: fs.ErrNotExist}
	}
	if .file == nil || strings.HasSuffix(.file.Name, "/") {
		return &openDir{, .openReadDir(), 0}, nil
	}
	,  := .file.Open()
	if  != nil {
		return nil, 
	}
	return .(fs.File), nil
}

func ( string) (,  string,  bool) {
	if [len()-1] == '/' {
		 = true
		 = [:len()-1]
	}
	 := len() - 1
	for  >= 0 && [] != '/' {
		--
	}
	if  < 0 {
		return ".", , 
	}
	return [:], [+1:], 
}

var dotFile = &fileListEntry{name: "./"}

func ( *Reader) ( string) *fileListEntry {
	if  == "." {
		return dotFile
	}

	, ,  := split()
	 := .fileList
	 := sort.Search(len(), func( int) bool {
		, ,  := split([].name)
		return  >  ||  ==  &&  >= 
	})
	if  < len() {
		 := [].name
		if  ==  || len() == len()+1 && [len()] == '/' && [:len()] ==  {
			return &[]
		}
	}
	return nil
}

func ( *Reader) ( string) []fileListEntry {
	 := .fileList
	 := sort.Search(len(), func( int) bool {
		, ,  := split([].name)
		return  >= 
	})
	 := sort.Search(len(), func( int) bool {
		, ,  := split([].name)
		return  > 
	})
	return [:]
}

type openDir struct {
	e      *fileListEntry
	files  []fileListEntry
	offset int
}

func ( *openDir) () error               { return nil }
func ( *openDir) () (fs.FileInfo, error) { return .e.stat(), nil }

func ( *openDir) ([]byte) (int, error) {
	return 0, &fs.PathError{Op: "read", Path: .e.name, Err: errors.New("is a directory")}
}

func ( *openDir) ( int) ([]fs.DirEntry, error) {
	 := len(.files) - .offset
	if  > 0 &&  >  {
		 = 
	}
	if  == 0 {
		if  <= 0 {
			return nil, nil
		}
		return nil, io.EOF
	}
	 := make([]fs.DirEntry, )
	for  := range  {
		[] = .files[.offset+].stat()
	}
	.offset += 
	return , nil