Source File
deflate.go
Belonging Package
compress/flate
package flate
import (
)
const (
NoCompression = 0
BestSpeed = 1
BestCompression = 9
DefaultCompression = -1
HuffmanOnly = -2
)
const (
logWindowSize = 15
windowSize = 1 << logWindowSize
windowMask = windowSize - 1
baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
minMatchLength = 4 // The smallest match length that the compressor actually emits
maxMatchLength = 258 // The largest match length
baseMatchOffset = 1 // The smallest match offset
maxMatchOffset = 1 << 15 // The largest match offset
maxFlateBlockTokens = 1 << 14
maxStoreBlockSize = 65535
hashBits = 17 // After 17 performance degrades
hashSize = 1 << hashBits
hashMask = (1 << hashBits) - 1
maxHashOffset = 1 << 24
skipNever = math.MaxInt32
)
type compressionLevel struct {
level, good, lazy, nice, chain, fastSkipHashing int
}
var levels = []compressionLevel{
{0, 0, 0, 0, 0, 0}, // NoCompression.
{2, 4, 0, 16, 8, 5},
{4, 4, 4, 16, 16, skipNever},
{5, 8, 16, 32, 32, skipNever},
{6, 8, 16, 128, 128, skipNever},
{7, 8, 32, 128, 256, skipNever},
{8, 32, 128, 258, 1024, skipNever},
{9, 32, 258, 258, 4096, skipNever},
}
type compressor struct {
compressionLevel
w *huffmanBitWriter
bulkHasher func([]byte, []uint32)
fill func(*compressor, []byte) int // copy data to window
step func(*compressor) // process window
sync bool // requesting flush
bestSpeed *deflateFast // Encoder for BestSpeed
index int
window []byte
windowEnd int
blockStart int // window index where current tokens start
byteAvailable bool // if true, still need to process window[index-1].
hashMatch [maxMatchLength - 1]uint32
}
func ( *compressor) ( []byte) int {
copy(.window, .window[windowSize:2*windowSize])
.index -= windowSize
.windowEnd -= windowSize
if .blockStart >= windowSize {
.blockStart -= windowSize
} else {
.blockStart = math.MaxInt32
}
.hashOffset += windowSize
if .hashOffset > maxHashOffset {
:= .hashOffset - 1
.hashOffset -=
.chainHead -=
for , := range .hashPrev[:] {
if int() > {
.hashPrev[] = uint32(int() - )
} else {
.hashPrev[] = 0
}
}
for , := range .hashHead[:] {
if int() > {
.hashHead[] = uint32(int() - )
} else {
.hashHead[] = 0
}
}
}
}
:= copy(.window[.windowEnd:], )
.windowEnd +=
return
}
func ( *compressor) ( []token, int) error {
if > 0 {
var []byte
if .blockStart <= {
= .window[.blockStart:]
}
.blockStart =
.w.writeBlock(, false, )
return .w.err
}
return nil
}
if .compressionLevel.level < 2 {
return
}
if .index != 0 || .windowEnd != 0 {
panic("internal error: fillWindow called with stale data")
}
if len() > windowSize {
= [len()-windowSize:]
:= ( + 256 - minMatchLength) / 256
for := 0; < ; ++ {
:= * 256
:= + 256 + minMatchLength - 1
if > {
=
}
:= .window[:]
:= len() - minMatchLength + 1
if <= 0 {
continue
}
:= .hashMatch[:]
.bulkHasher(, )
var uint32
for , := range {
:= +
=
* = uint32( + .hashOffset)
}
.hash =
func ( *compressor) ( int, int, int, int) (, int, bool) {
:= maxMatchLength
if < {
=
}
:= .window[0 : +]
:= .chain
=
if >= .good {
>>= 2
}
:= [+]
:= [:]
:= - windowSize
for := ; > 0; -- {
if == [+] {
:= matchLen([:], , )
if > && ( > minMatchLength || - <= 4096) {
=
= -
= true
break
}
= [+]
}
}
break
}
= int(.hashPrev[&windowMask]) - .hashOffset
if < || < 0 {
break
}
}
return
}
func ( *compressor) ( []byte) error {
if .w.writeStoredHeader(len(), false); .w.err != nil {
return .w.err
}
.w.writeBytes()
return .w.err
}
const hashmul = 0x1e35a7bd
if .windowEnd < maxStoreBlockSize {
if !.sync {
return
}
if len(.tokens) > .windowEnd-(.windowEnd>>4) {
.w.writeBlockHuff(false, .window[:.windowEnd])
} else {
.w.writeBlockDynamic(.tokens, false, .window[:.windowEnd])
}
.err = .w.err
.windowEnd = 0
}
func ( *compressor) () {
.window = make([]byte, 2*windowSize)
.hashOffset = 1
.tokens = make([]token, 0, maxFlateBlockTokens+1)
.length = minMatchLength - 1
.offset = 0
.byteAvailable = false
.index = 0
.hash = 0
.chainHead = -1
.bulkHasher = bulkHash4
}
func ( *compressor) () {
if .windowEnd-.index < minMatchLength+maxMatchLength && !.sync {
return
}
.maxInsertIndex = .windowEnd - (minMatchLength - 1)
if .index < .maxInsertIndex {
.hash = hash4(.window[.index : .index+minMatchLength])
}
:
for {
if .index > .windowEnd {
panic("index > windowEnd")
}
:= .windowEnd - .index
if < minMatchLength+maxMatchLength {
if !.sync {
break
}
if .index > .windowEnd {
panic("index > windowEnd")
}
.hash = hash4(.window[.index : .index+minMatchLength])
:= &.hashHead[.hash&hashMask]
.chainHead = int(*)
.hashPrev[.index&windowMask] = uint32(.chainHead)
* = uint32(.index + .hashOffset)
}
:= .length
:= .offset
.length = minMatchLength - 1
.offset = 0
:= .index - windowSize
if < 0 {
= 0
}
if .chainHead-.hashOffset >= &&
(.fastSkipHashing != skipNever && > minMatchLength-1 ||
.fastSkipHashing == skipNever && > && < .lazy) {
if , , := .findMatch(.index, .chainHead-.hashOffset, minMatchLength-1, ); {
.length =
.offset =
}
}
if .fastSkipHashing != skipNever && .length >= minMatchLength ||
if .fastSkipHashing != skipNever {
.tokens = append(.tokens, matchToken(uint32(.length-baseMatchLength), uint32(.offset-baseMatchOffset)))
} else {
.tokens = append(.tokens, matchToken(uint32(-baseMatchLength), uint32(-baseMatchOffset)))
if .length <= .fastSkipHashing {
var int
if .fastSkipHashing != skipNever {
= .index + .length
} else {
= .index + - 1
}
:= .index
for ++; < ; ++ {
if < .maxInsertIndex {
* = uint32( + .hashOffset)
}
}
.index =
if .fastSkipHashing == skipNever {
.byteAvailable = false
.length = minMatchLength - 1
}
.index += .length
if .index < .maxInsertIndex {
.hash = hash4(.window[.index : .index+minMatchLength])
}
}
if .err = .writeBlock(.tokens, .index); .err != nil {
return
}
.tokens = .tokens[:0]
}
} else {
if .fastSkipHashing != skipNever || .byteAvailable {
:= .index - 1
if .fastSkipHashing != skipNever {
= .index
}
.tokens = append(.tokens, literalToken(uint32(.window[])))
if len(.tokens) == maxFlateBlockTokens {
if .err = .writeBlock(.tokens, +1); .err != nil {
return
}
.tokens = .tokens[:0]
}
}
.index++
if .fastSkipHashing == skipNever {
.byteAvailable = true
}
}
}
}
func ( *compressor) ( []byte) int {
:= copy(.window[.windowEnd:], )
.windowEnd +=
return
}
func ( *compressor) () {
if .windowEnd > 0 && (.windowEnd == maxStoreBlockSize || .sync) {
.err = .writeStoredBlock(.window[:.windowEnd])
.windowEnd = 0
}
}
func ( *compressor) () {
if .windowEnd < len(.window) && !.sync || .windowEnd == 0 {
return
}
.w.writeBlockHuff(false, .window[:.windowEnd])
.err = .w.err
.windowEnd = 0
}
func ( *compressor) ( []byte) ( int, error) {
if .err != nil {
return 0, .err
}
= len()
for len() > 0 {
.step()
= [.fill(, ):]
if .err != nil {
return 0, .err
}
}
return , nil
}
func ( *compressor) () error {
if .err != nil {
return .err
}
.sync = true
.step()
if .err == nil {
.w.writeStoredHeader(0, false)
.w.flush()
.err = .w.err
}
.sync = false
return .err
}
func ( *compressor) ( io.Writer, int) ( error) {
.w = newHuffmanBitWriter()
switch {
case == NoCompression:
.window = make([]byte, maxStoreBlockSize)
.fill = (*compressor).fillStore
.step = (*compressor).store
case == HuffmanOnly:
.window = make([]byte, maxStoreBlockSize)
.fill = (*compressor).fillStore
.step = (*compressor).storeHuff
case == BestSpeed:
.compressionLevel = levels[]
.window = make([]byte, maxStoreBlockSize)
.fill = (*compressor).fillStore
.step = (*compressor).encSpeed
.bestSpeed = newDeflateFast()
.tokens = make([]token, maxStoreBlockSize)
case == DefaultCompression:
= 6
fallthrough
case 2 <= && <= 9:
.compressionLevel = levels[]
.initDeflate()
.fill = (*compressor).fillDeflate
.step = (*compressor).deflate
default:
return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", )
}
return nil
}
func ( *compressor) ( io.Writer) {
.w.reset()
.sync = false
.err = nil
switch .compressionLevel.level {
case NoCompression:
.windowEnd = 0
case BestSpeed:
.windowEnd = 0
.tokens = .tokens[:0]
.bestSpeed.reset()
default:
.chainHead = -1
for := range .hashHead {
.hashHead[] = 0
}
for := range .hashPrev {
.hashPrev[] = 0
}
.hashOffset = 1
.index, .windowEnd = 0, 0
.blockStart, .byteAvailable = 0, false
.tokens = .tokens[:0]
.length = minMatchLength - 1
.offset = 0
.hash = 0
.maxInsertIndex = 0
}
}
func ( *compressor) () error {
if .err != nil {
return .err
}
.sync = true
.step()
if .err != nil {
return .err
}
if .w.writeStoredHeader(0, true); .w.err != nil {
return .w.err
}
.w.flush()
return .w.err
}
func ( io.Writer, int, []byte) (*Writer, error) {
:= &dictWriter{}
, := NewWriter(, )
if != nil {
return nil,
}
.d.fillWindow()
.dict = append(.dict, ...) // duplicate dictionary for Reset method.
return ,
}
type dictWriter struct {
w io.Writer
}
func ( *dictWriter) ( []byte) ( int, error) {
return .w.Write()
}
type Writer struct {
d compressor
dict []byte
}
![]() |
The pages are generated with Golds v0.3.2-preview. (GOOS=darwin GOARCH=amd64) Golds is a Go 101 project developed by Tapir Liu. PR and bug reports are welcome and can be submitted to the issue list. Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds. |