From 8411e1d1cc0d8619eb8207d6342fe4878470f7aa Mon Sep 17 00:00:00 2001 From: Klaus Post Date: Wed, 5 Jun 2024 03:34:26 -0700 Subject: [PATCH] zip: Upgrade to 1.22.4 upstream (#970) * Upgrade zip to 1.22.4 upstream This removes `CreateHeaderRaw` --- internal/godebug/godebug.go | 44 +++++ zip/reader.go | 274 +++++++++++++++++++++------ zip/reader_test.go | 290 ++++++++++++++++++++++++++++- zip/register.go | 6 +- zip/struct.go | 97 ++++++---- zip/testdata/comment-truncated.zip | Bin 0 -> 216 bytes zip/testdata/test-badbase.zip | Bin 0 -> 1170 bytes zip/writer.go | 84 ++++++--- zip/writer_test.go | 73 +++++++- zip/zip_test.go | 6 +- 10 files changed, 740 insertions(+), 134 deletions(-) create mode 100644 internal/godebug/godebug.go create mode 100644 zip/testdata/comment-truncated.zip create mode 100644 zip/testdata/test-badbase.zip diff --git a/internal/godebug/godebug.go b/internal/godebug/godebug.go new file mode 100644 index 0000000000..ff13f2a020 --- /dev/null +++ b/internal/godebug/godebug.go @@ -0,0 +1,44 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package godebug makes the simplified settings in the $GODEBUG environment variable +// available to packages. +// Needed since internal/godebug is not available here. +package godebug + +import "os" + +func Get(key string) string { + s := os.Getenv("GODEBUG") + if s == "" { + return "" + } + // Scan the string backward so that later settings are used + // and earlier settings are ignored. + // Note that a forward scan would cause cached values + // to temporarily use the ignored value before being + // updated to the "correct" one. + end := len(s) + eq := -1 + for i := end - 1; i >= -1; i-- { + if i == -1 || s[i] == ',' { + if eq >= 0 { + name, arg := s[i+1:eq], s[eq+1:end] + if name == key { + for j := 0; j < len(arg); j++ { + if arg[j] == '#' { + return arg[:j] + } + } + return arg + } + } + eq = -1 + end = i + } else if s[i] == '=' { + eq = i + } + } + return "" +} diff --git a/zip/reader.go b/zip/reader.go index 460394ca1f..c3bcc88383 100644 --- a/zip/reader.go +++ b/zip/reader.go @@ -14,16 +14,20 @@ import ( "io/fs" "os" "path" + "path/filepath" "sort" "strings" "sync" "time" + + "github.com/klauspost/compress/internal/godebug" ) var ( - ErrFormat = errors.New("zip: not a valid zip file") - ErrAlgorithm = errors.New("zip: unsupported compression algorithm") - ErrChecksum = errors.New("zip: checksum error") + ErrFormat = errors.New("zip: not a valid zip file") + ErrAlgorithm = errors.New("zip: unsupported compression algorithm") + ErrChecksum = errors.New("zip: checksum error") + ErrInsecurePath = errors.New("zip: insecure file path") ) // A Reader serves content from a ZIP archive. @@ -43,15 +47,15 @@ type Reader struct { fileList []fileListEntry } -// A ReadCloser is a Reader that must be closed when no longer needed. +// A ReadCloser is a [Reader] that must be closed when no longer needed. type ReadCloser struct { f *os.File Reader } // A File is a single file in a ZIP archive. -// The file information is in the embedded FileHeader. -// The file content can be accessed by calling Open. +// The file information is in the embedded [FileHeader]. +// The file content can be accessed by calling [File.Open]. type File struct { FileHeader zip *Reader @@ -61,6 +65,14 @@ type File struct { } // OpenReader will open the Zip file specified by name and return a ReadCloser. +// +// If any file inside the archive uses a non-local name +// (as defined by [filepath.IsLocal]) or a name containing backslashes +// and the GODEBUG environment variable contains `zipinsecurepath=0`, +// OpenReader returns the reader with an ErrInsecurePath error. +// A future version of Go may introduce this behavior by default. +// Programs that want to accept non-local names can ignore +// the ErrInsecurePath error and use the returned reader. func OpenReader(name string) (*ReadCloser, error) { f, err := os.Open(name) if err != nil { @@ -72,100 +84,111 @@ func OpenReader(name string) (*ReadCloser, error) { return nil, err } r := new(ReadCloser) - if err := r.init(f, fi.Size()); err != nil { + if err = r.init(f, fi.Size()); err != nil && err != ErrInsecurePath { f.Close() return nil, err } r.f = f - return r, nil + return r, err } -// NewReader returns a new Reader reading from r, which is assumed to +// NewReader returns a new [Reader] reading from r, which is assumed to // have the given size in bytes. +// +// If any file inside the archive uses a non-local name +// (as defined by [filepath.IsLocal]) or a name containing backslashes +// and the GODEBUG environment variable contains `zipinsecurepath=0`, +// NewReader returns the reader with an [ErrInsecurePath] error. +// A future version of Go may introduce this behavior by default. +// Programs that want to accept non-local names can ignore +// the [ErrInsecurePath] error and use the returned reader. func NewReader(r io.ReaderAt, size int64) (*Reader, error) { if size < 0 { return nil, errors.New("zip: size cannot be negative") } zr := new(Reader) - if err := zr.init(r, size); err != nil { + var err error + if err = zr.init(r, size); err != nil && err != ErrInsecurePath { return nil, err } - return zr, nil + return zr, err } -func (z *Reader) init(r io.ReaderAt, size int64) error { - end, baseOffset, err := readDirectoryEnd(r, size) +func (r *Reader) init(rdr io.ReaderAt, size int64) error { + end, baseOffset, err := readDirectoryEnd(rdr, size) if err != nil { return err } - z.r = r - z.baseOffset = baseOffset + r.r = rdr + r.baseOffset = baseOffset // Since the number of directory records is not validated, it is not - // safe to preallocate z.File without first checking that the specified + // safe to preallocate r.File without first checking that the specified // number of files is reasonable, since a malformed archive may // indicate it contains up to 1 << 128 - 1 files. Since each file has a // header which will be _at least_ 30 bytes we can safely preallocate // if (data size / 30) >= end.directoryRecords. if end.directorySize < uint64(size) && (uint64(size)-end.directorySize)/30 >= end.directoryRecords { - z.File = make([]*File, 0, end.directoryRecords) + r.File = make([]*File, 0, end.directoryRecords) } - z.Comment = end.comment - rs := io.NewSectionReader(r, 0, size) - if _, err = rs.Seek(z.baseOffset+int64(end.directoryOffset), io.SeekStart); err != nil { + r.Comment = end.comment + rs := io.NewSectionReader(rdr, 0, size) + if _, err = rs.Seek(r.baseOffset+int64(end.directoryOffset), io.SeekStart); err != nil { return err } buf := bufio.NewReader(rs) + // Get once + zipinsecurepath := godebug.Get("zipinsecurepath") == "0" + // The count of files inside a zip is truncated to fit in a uint16. // Gloss over this by reading headers until we encounter // a bad one, and then only report an ErrFormat or UnexpectedEOF if // the file count modulo 65536 is incorrect. for { - f := &File{zip: z, zipr: r} + f := &File{zip: r, zipr: rdr} err = readDirectoryHeader(f, buf) - - // For compatibility with other zip programs, - // if we have a non-zero base offset and can't read - // the first directory header, try again with a zero - // base offset. - if err == ErrFormat && z.baseOffset != 0 && len(z.File) == 0 { - z.baseOffset = 0 - if _, err = rs.Seek(int64(end.directoryOffset), io.SeekStart); err != nil { - return err - } - buf.Reset(rs) - continue - } - if err == ErrFormat || err == io.ErrUnexpectedEOF { break } if err != nil { return err } - f.headerOffset += z.baseOffset - z.File = append(z.File, f) + f.headerOffset += r.baseOffset + r.File = append(r.File, f) } - if uint16(len(z.File)) != uint16(end.directoryRecords) { // only compare 16 bits here + if uint16(len(r.File)) != uint16(end.directoryRecords) { // only compare 16 bits here // Return the readDirectoryHeader error if we read // the wrong number of directory entries. return err } + if zipinsecurepath { + for _, f := range r.File { + if f.Name == "" { + // Zip permits an empty file name field. + continue + } + // The zip specification states that names must use forward slashes, + // so consider any backslashes in the name insecure. + if !filepath.IsLocal(f.Name) || strings.Contains(f.Name, `\`) { + return ErrInsecurePath + } + } + } return nil } // RegisterDecompressor registers or overrides a custom decompressor for a // specific method ID. If a decompressor for a given method is not found, -// Reader will default to looking up the decompressor at the package level. -func (z *Reader) RegisterDecompressor(method uint16, dcomp Decompressor) { - if z.decompressors == nil { - z.decompressors = make(map[uint16]Decompressor) +// [Reader] will default to looking up the decompressor at the package level. +func (r *Reader) RegisterDecompressor(method uint16, dcomp Decompressor) { + if r.decompressors == nil { + r.decompressors = make(map[uint16]Decompressor) } - z.decompressors[method] = dcomp + r.decompressors[method] = dcomp } -func (z *Reader) decompressor(method uint16) Decompressor { - dcomp := z.decompressors[method] +func (r *Reader) decompressor(method uint16) Decompressor { + dcomp := r.decompressors[method] if dcomp == nil { dcomp = decompressor(method) } @@ -180,7 +203,7 @@ func (rc *ReadCloser) Close() error { // DataOffset returns the offset of the file's possibly-compressed // data, relative to the beginning of the zip file. // -// Most callers should instead use Open, which transparently +// Most callers should instead use [File.Open], which transparently // decompresses data and verifies checksums. func (f *File) DataOffset() (offset int64, err error) { bodyOffset, err := f.findBodyOffset() @@ -190,13 +213,29 @@ func (f *File) DataOffset() (offset int64, err error) { return f.headerOffset + bodyOffset, nil } -// Open returns a ReadCloser that provides access to the File's contents. +// Open returns a [ReadCloser] that provides access to the [File]'s contents. // Multiple files may be read concurrently. func (f *File) Open() (io.ReadCloser, error) { bodyOffset, err := f.findBodyOffset() if err != nil { return nil, err } + if strings.HasSuffix(f.Name, "/") { + // The ZIP specification (APPNOTE.TXT) specifies that directories, which + // are technically zero-byte files, must not have any associated file + // data. We previously tried failing here if f.CompressedSize64 != 0, + // but it turns out that a number of implementations (namely, the Java + // jar tool) don't properly set the storage method on directories + // resulting in a file with compressed size > 0 but uncompressed size == + // 0. We still want to fail when a directory has associated uncompressed + // data, but we are tolerant of cases where the uncompressed size is + // zero but compressed size is not. + if f.UncompressedSize64 != 0 { + return &dirReader{ErrFormat}, nil + } else { + return &dirReader{io.EOF}, nil + } + } size := int64(f.CompressedSize64) r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset, size) dcomp := f.zip.decompressor(f.Method) @@ -217,7 +256,7 @@ func (f *File) Open() (io.ReadCloser, error) { return rc, nil } -// OpenRaw returns a Reader that provides access to the File's contents without +// OpenRaw returns a [Reader] that provides access to the [File]'s contents without // decompression. func (f *File) OpenRaw() (io.Reader, error) { bodyOffset, err := f.findBodyOffset() @@ -228,6 +267,18 @@ func (f *File) OpenRaw() (io.Reader, error) { return r, nil } +type dirReader struct { + err error +} + +func (r *dirReader) Read([]byte) (int, error) { + return 0, r.err +} + +func (r *dirReader) Close() error { + return nil +} + type checksumReader struct { rc io.ReadCloser hash hash.Hash32 @@ -419,8 +470,8 @@ parseExtras: const ticksPerSecond = 1e7 // Windows timestamp resolution ts := int64(attrBuf.uint64()) // ModTime since Windows epoch - secs := int64(ts / ticksPerSecond) - nsecs := (1e9 / ticksPerSecond) * int64(ts%ticksPerSecond) + secs := ts / ticksPerSecond + nsecs := (1e9 / ticksPerSecond) * (ts % ticksPerSecond) epoch := time.Date(1601, time.January, 1, 0, 0, 0, 0, time.UTC) modified = time.Unix(epoch.Unix()+secs, nsecs) } @@ -565,12 +616,31 @@ func readDirectoryEnd(r io.ReaderAt, size int64) (dir *directoryEnd, baseOffset } } + maxInt64 := uint64(1<<63 - 1) + if d.directorySize > maxInt64 || d.directoryOffset > maxInt64 { + return nil, 0, ErrFormat + } + baseOffset = directoryEndOffset - int64(d.directorySize) - int64(d.directoryOffset) // Make sure directoryOffset points to somewhere in our file. if o := baseOffset + int64(d.directoryOffset); o < 0 || o >= size { return nil, 0, ErrFormat } + + // If the directory end data tells us to use a non-zero baseOffset, + // but we would find a valid directory entry if we assume that the + // baseOffset is 0, then just use a baseOffset of 0. + // We've seen files in which the directory end data gives us + // an incorrect baseOffset. + if baseOffset > 0 { + off := int64(d.directoryOffset) + rs := io.NewSectionReader(r, off, size-off) + if readDirectoryHeader(&File{}, rs) == nil { + baseOffset = 0 + } + } + return d, baseOffset, nil } @@ -630,9 +700,13 @@ func findSignatureInBlock(b []byte) int { if b[i] == 'P' && b[i+1] == 'K' && b[i+2] == 0x05 && b[i+3] == 0x06 { // n is length of comment n := int(b[i+directoryEndLen-2]) | int(b[i+directoryEndLen-1])<<8 - if n+directoryEndLen+i <= len(b) { - return i + if n+directoryEndLen+i > len(b) { + // Truncated comment. + // Some parsers (such as Info-ZIP) ignore the truncated comment + // rather than treating it as a hard error. + return -1 } + return i } } return -1 @@ -684,14 +758,14 @@ type fileInfoDirEntry interface { fs.DirEntry } -func (e *fileListEntry) stat() (fileInfoDirEntry, error) { - if e.isDup { - return nil, errors.New(e.name + ": duplicate entries in zip file") +func (f *fileListEntry) stat() (fileInfoDirEntry, error) { + if f.isDup { + return nil, errors.New(f.name + ": duplicate entries in zip file") } - if !e.isDir { - return headerFileInfo{&e.file.FileHeader}, nil + if !f.isDir { + return headerFileInfo{&f.file.FileHeader}, nil } - return e, nil + return f, nil } // Only used for directories. @@ -700,7 +774,7 @@ func (f *fileListEntry) Size() int64 { return 0 } func (f *fileListEntry) Mode() fs.FileMode { return fs.ModeDir | 0555 } func (f *fileListEntry) Type() fs.FileMode { return fs.ModeDir } func (f *fileListEntry) IsDir() bool { return true } -func (f *fileListEntry) Sys() interface{} { return nil } +func (f *fileListEntry) Sys() any { return nil } func (f *fileListEntry) ModTime() time.Time { if f.file == nil { @@ -711,12 +785,92 @@ func (f *fileListEntry) ModTime() time.Time { func (f *fileListEntry) Info() (fs.FileInfo, error) { return f, nil } +func (f *fileListEntry) String() string { + return formatDirEntry(f) +} + +// formatDirEntry returns a formatted version of dir for human readability. +// Implementations of [DirEntry] can call this from a String method. +// The outputs for a directory named subdir and a file named hello.go are: +// +// d subdir/ +// - hello.go +// +// TODO: Use fs.FormatDirEntry when Go 1.20 is no longer supported +func formatDirEntry(dir fs.DirEntry) string { + name := dir.Name() + b := make([]byte, 0, 5+len(name)) + + // The Type method does not return any permission bits, + // so strip them from the string. + mode := dir.Type().String() + mode = mode[:len(mode)-9] + + b = append(b, mode...) + b = append(b, ' ') + b = append(b, name...) + if dir.IsDir() { + b = append(b, '/') + } + return string(b) +} + +// formatFileInfo returns a formatted version of info for human readability. +// Implementations of [FileInfo] can call this from a String method. +// The output for a file named "hello.go", 100 bytes, mode 0o644, created +// January 1, 1970 at noon is +// +// -rw-r--r-- 100 1970-01-01 12:00:00 hello.go +// +// TODO: Use fs.FormatFileInfo when Go 1.20 is no longer supported +func formatFileInfo(info fs.FileInfo) string { + name := info.Name() + b := make([]byte, 0, 40+len(name)) + b = append(b, info.Mode().String()...) + b = append(b, ' ') + + size := info.Size() + var usize uint64 + if size >= 0 { + usize = uint64(size) + } else { + b = append(b, '-') + usize = uint64(-size) + } + var buf [20]byte + i := len(buf) - 1 + for usize >= 10 { + q := usize / 10 + buf[i] = byte('0' + usize - q*10) + i-- + usize = q + } + buf[i] = byte('0' + usize) + b = append(b, buf[i:]...) + b = append(b, ' ') + + b = append(b, info.ModTime().Format(time.DateTime)...) + b = append(b, ' ') + + b = append(b, name...) + if info.IsDir() { + b = append(b, '/') + } + + return string(b) +} + // toValidName coerces name to be a valid name for fs.FS.Open. func toValidName(name string) string { name = strings.ReplaceAll(name, `\`, `/`) p := path.Clean(name) + p = strings.TrimPrefix(p, "/") - p = strings.TrimPrefix(p, "../") + + for strings.HasPrefix(p, "../") { + p = p[len("../"):] + } + return p } diff --git a/zip/reader_test.go b/zip/reader_test.go index a08a2e8a2d..220ba57390 100644 --- a/zip/reader_test.go +++ b/zip/reader_test.go @@ -127,6 +127,24 @@ var tests = []ZipTest{ }, }, }, + { + Name: "test-badbase.zip", + Comment: "This is a zipfile comment.", + File: []ZipTestFile{ + { + Name: "test.txt", + Content: []byte("This is a test text file.\n"), + Modified: time.Date(2010, 9, 5, 12, 12, 1, 0, timeZone(+10*time.Hour)), + Mode: 0644, + }, + { + Name: "gophercolor16x16.png", + File: "gophercolor16x16.png", + Modified: time.Date(2010, 9, 5, 15, 52, 58, 0, timeZone(+10*time.Hour)), + Mode: 0644, + }, + }, + }, { Name: "r.zip", Source: returnRecursiveZip, @@ -553,6 +571,14 @@ var tests = []ZipTest{ }, }, }, + // Issue 66869: Don't skip over an EOCDR with a truncated comment. + // The test file sneakily hides a second EOCDR before the first one; + // previously we would extract one file ("file") from this archive, + // while most other tools would reject the file or extract a different one ("FILE"). + { + Name: "comment-truncated.zip", + Error: ErrFormat, + }, } func TestReader(t *testing.T) { @@ -1169,7 +1195,7 @@ func TestIssue12449(t *testing.T) { 0x00, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, } // Read in the archive. - _, err := NewReader(bytes.NewReader([]byte(data)), int64(len(data))) + _, err := NewReader(bytes.NewReader(data), int64(len(data))) if err != nil { t.Errorf("Error reading the archive: %v", err) } @@ -1291,6 +1317,7 @@ func TestFSModTime(t *testing.T) { } func TestCVE202127919(t *testing.T) { + t.Setenv("GODEBUG", "zipinsecurepath=0") // Archive containing only the file "../test.txt" data := []byte{ 0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x08, 0x00, @@ -1315,8 +1342,8 @@ func TestCVE202127919(t *testing.T) { 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x39, 0x00, 0x00, 0x00, 0x59, 0x00, 0x00, 0x00, 0x00, 0x00, } - r, err := NewReader(bytes.NewReader([]byte(data)), int64(len(data))) - if err != nil { + r, err := NewReader(bytes.NewReader(data), int64(len(data))) + if err != ErrInsecurePath { t.Fatalf("Error reading the archive: %v", err) } _, err = r.Open("test.txt") @@ -1334,6 +1361,62 @@ func TestCVE202127919(t *testing.T) { } } +func TestOpenReaderInsecurePath(t *testing.T) { + t.Setenv("GODEBUG", "zipinsecurepath=0") + // Archive containing only the file "../test.txt" + data := []byte{ + 0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x08, 0x00, + 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x2e, 0x2e, + 0x2f, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x74, 0x78, + 0x74, 0x0a, 0xc9, 0xc8, 0x2c, 0x56, 0xc8, 0x2c, + 0x56, 0x48, 0x54, 0x28, 0x49, 0x2d, 0x2e, 0x51, + 0x28, 0x49, 0xad, 0x28, 0x51, 0x48, 0xcb, 0xcc, + 0x49, 0xd5, 0xe3, 0x02, 0x04, 0x00, 0x00, 0xff, + 0xff, 0x50, 0x4b, 0x07, 0x08, 0xc0, 0xd7, 0xed, + 0xc3, 0x20, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x00, + 0x00, 0x50, 0x4b, 0x01, 0x02, 0x14, 0x00, 0x14, + 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xc0, 0xd7, 0xed, 0xc3, 0x20, 0x00, 0x00, + 0x00, 0x1a, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, + 0x2e, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x74, + 0x78, 0x74, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x39, 0x00, + 0x00, 0x00, 0x59, 0x00, 0x00, 0x00, 0x00, 0x00, + } + + // Read in the archive with the OpenReader interface + name := filepath.Join(t.TempDir(), "test.zip") + err := os.WriteFile(name, data, 0644) + if err != nil { + t.Fatalf("Unable to write out the bugos zip entry") + } + r, err := OpenReader(name) + if r != nil { + defer r.Close() + } + + if err != ErrInsecurePath { + t.Fatalf("Error reading the archive, we expected ErrInsecurePath but got: %v", err) + } + _, err = r.Open("test.txt") + if err != nil { + t.Errorf("Error reading file: %v", err) + } + if len(r.File) != 1 { + t.Fatalf("No entries in the file list") + } + if r.File[0].Name != "../test.txt" { + t.Errorf("Unexpected entry name: %s", r.File[0].Name) + } + if _, err := r.File[0].Open(); err != nil { + t.Errorf("Error opening file: %v", err) + } +} + func TestCVE202133196(t *testing.T) { // Archive that indicates it has 1 << 128 -1 files, // this would previously cause a panic due to attempting @@ -1412,6 +1495,7 @@ func TestCVE202139293(t *testing.T) { } func TestCVE202141772(t *testing.T) { + t.Setenv("GODEBUG", "zipinsecurepath=0") // Archive contains a file whose name is exclusively made up of '/', '\' // characters, or "../", "..\" paths, which would previously cause a panic. // @@ -1484,8 +1568,8 @@ func TestCVE202141772(t *testing.T) { 0x00, 0x04, 0x00, 0x04, 0x00, 0x31, 0x01, 0x00, 0x00, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, } - r, err := NewReader(bytes.NewReader([]byte(data)), int64(len(data))) - if err != nil { + r, err := NewReader(bytes.NewReader(data), int64(len(data))) + if err != ErrInsecurePath { t.Fatalf("Error reading the archive: %v", err) } entryNames := []string{`/`, `//`, `\`, `/test.txt`} @@ -1555,3 +1639,199 @@ func TestUnderSize(t *testing.T) { }) } } + +func TestIssue54801(t *testing.T) { + for _, input := range []string{"testdata/readme.zip", "testdata/dd.zip"} { + z, err := OpenReader(input) + if err != nil { + t.Fatal(err) + } + defer z.Close() + + for _, f := range z.File { + // Make file a directory + f.Name += "/" + + t.Run(f.Name, func(t *testing.T) { + t.Logf("CompressedSize64: %d, Flags: %#x", f.CompressedSize64, f.Flags) + + rd, err := f.Open() + if err != nil { + t.Fatal(err) + } + defer rd.Close() + + n, got := io.Copy(io.Discard, rd) + if n != 0 || got != ErrFormat { + t.Fatalf("Error mismatch, got: %d, %v, want: %v", n, got, ErrFormat) + } + }) + } + } +} + +func TestInsecurePaths(t *testing.T) { + t.Setenv("GODEBUG", "zipinsecurepath=0") + for _, path := range []string{ + "../foo", + "/foo", + "a/b/../../../c", + `a\b`, + } { + var buf bytes.Buffer + zw := NewWriter(&buf) + _, err := zw.Create(path) + if err != nil { + t.Errorf("zw.Create(%q) = %v", path, err) + continue + } + zw.Close() + + zr, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len())) + if err != ErrInsecurePath { + t.Errorf("NewReader for archive with file %q: got err %v, want ErrInsecurePath", path, err) + continue + } + var gotPaths []string + for _, f := range zr.File { + gotPaths = append(gotPaths, f.Name) + } + if !reflect.DeepEqual(gotPaths, []string{path}) { + t.Errorf("NewReader for archive with file %q: got files %q", path, gotPaths) + continue + } + } +} + +func TestDisableInsecurePathCheck(t *testing.T) { + t.Setenv("GODEBUG", "zipinsecurepath=1") + var buf bytes.Buffer + zw := NewWriter(&buf) + const name = "/foo" + _, err := zw.Create(name) + if err != nil { + t.Fatalf("zw.Create(%q) = %v", name, err) + } + zw.Close() + zr, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len())) + if err != nil { + t.Fatalf("NewReader with zipinsecurepath=1: got err %v, want nil", err) + } + var gotPaths []string + for _, f := range zr.File { + gotPaths = append(gotPaths, f.Name) + } + if want := []string{name}; !reflect.DeepEqual(gotPaths, want) { + t.Errorf("NewReader with zipinsecurepath=1: got files %q, want %q", gotPaths, want) + } +} + +func TestCompressedDirectory(t *testing.T) { + // Empty Java JAR, with a compressed directory with uncompressed size 0 + // which should not fail. + // + // Length Method Size Cmpr Date Time CRC-32 Name + // -------- ------ ------- ---- ---------- ----- -------- ---- + // 0 Defl:N 2 0% 12-01-2022 16:50 00000000 META-INF/ + // 60 Defl:N 59 2% 12-01-2022 16:50 af937e93 META-INF/MANIFEST.MF + // -------- ------- --- ------- + // 60 61 -2% 2 files + data := []byte{ + 0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x08, 0x08, + 0x08, 0x00, 0x49, 0x86, 0x81, 0x55, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x09, 0x00, 0x04, 0x00, 0x4d, 0x45, + 0x54, 0x41, 0x2d, 0x49, 0x4e, 0x46, 0x2f, 0xfe, + 0xca, 0x00, 0x00, 0x03, 0x00, 0x50, 0x4b, 0x07, + 0x08, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x4b, 0x03, + 0x04, 0x14, 0x00, 0x08, 0x08, 0x08, 0x00, 0x49, + 0x86, 0x81, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, + 0x00, 0x00, 0x00, 0x4d, 0x45, 0x54, 0x41, 0x2d, + 0x49, 0x4e, 0x46, 0x2f, 0x4d, 0x41, 0x4e, 0x49, + 0x46, 0x45, 0x53, 0x54, 0x2e, 0x4d, 0x46, 0xf3, + 0x4d, 0xcc, 0xcb, 0x4c, 0x4b, 0x2d, 0x2e, 0xd1, + 0x0d, 0x4b, 0x2d, 0x2a, 0xce, 0xcc, 0xcf, 0xb3, + 0x52, 0x30, 0xd4, 0x33, 0xe0, 0xe5, 0x72, 0x2e, + 0x4a, 0x4d, 0x2c, 0x49, 0x4d, 0xd1, 0x75, 0xaa, + 0x04, 0x0a, 0x00, 0x45, 0xf4, 0x0c, 0x8d, 0x15, + 0x34, 0xdc, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0x15, + 0x3c, 0xf3, 0x92, 0xf5, 0x34, 0x79, 0xb9, 0x78, + 0xb9, 0x00, 0x50, 0x4b, 0x07, 0x08, 0x93, 0x7e, + 0x93, 0xaf, 0x3b, 0x00, 0x00, 0x00, 0x3c, 0x00, + 0x00, 0x00, 0x50, 0x4b, 0x01, 0x02, 0x14, 0x00, + 0x14, 0x00, 0x08, 0x08, 0x08, 0x00, 0x49, 0x86, + 0x81, 0x55, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x4d, 0x45, 0x54, 0x41, 0x2d, 0x49, 0x4e, 0x46, + 0x2f, 0xfe, 0xca, 0x00, 0x00, 0x50, 0x4b, 0x01, + 0x02, 0x14, 0x00, 0x14, 0x00, 0x08, 0x08, 0x08, + 0x00, 0x49, 0x86, 0x81, 0x55, 0x93, 0x7e, 0x93, + 0xaf, 0x3b, 0x00, 0x00, 0x00, 0x3c, 0x00, 0x00, + 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3d, + 0x00, 0x00, 0x00, 0x4d, 0x45, 0x54, 0x41, 0x2d, + 0x49, 0x4e, 0x46, 0x2f, 0x4d, 0x41, 0x4e, 0x49, + 0x46, 0x45, 0x53, 0x54, 0x2e, 0x4d, 0x46, 0x50, + 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x02, 0x00, 0x7d, 0x00, 0x00, 0x00, 0xba, + 0x00, 0x00, 0x00, 0x00, 0x00, + } + r, err := NewReader(bytes.NewReader(data), int64(len(data))) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + for _, f := range r.File { + r, err := f.Open() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if _, err := io.Copy(io.Discard, r); err != nil { + t.Fatalf("unexpected error: %v", err) + } + } +} + +func TestBaseOffsetPlusOverflow(t *testing.T) { + // directoryOffset > maxInt64 && size-directoryOffset < 0 + data := []byte{ + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0xff, 0xff, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x50, 0x4b, 0x06, 0x06, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0xff, 0xff, 0x20, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x20, 0x08, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x80, 0x50, 0x4b, 0x06, 0x07, 0x00, + 0x00, 0x00, 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x50, + 0x4b, 0x05, 0x06, 0x20, 0x20, 0x20, 0x20, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0x20, 0x00, + } + defer func() { + if r := recover(); r != nil { + t.Fatalf("NewReader panicked: %s", r) + } + }() + // Previously, this would trigger a panic as we attempt to read from + // an io.SectionReader which would access a slice at a negative offset + // as the section reader offset & size were < 0. + NewReader(bytes.NewReader(data), int64(len(data))+1875) +} diff --git a/zip/register.go b/zip/register.go index ca8c13ce92..8ea8889382 100644 --- a/zip/register.go +++ b/zip/register.go @@ -20,7 +20,7 @@ import ( type Compressor func(w io.Writer) (io.WriteCloser, error) // A Decompressor returns a new decompressing reader, reading from r. -// The ReadCloser's Close method must be used to release associated resources. +// The [io.ReadCloser]'s Close method must be used to release associated resources. // The Decompressor itself must be safe to invoke from multiple goroutines // simultaneously, but each returned reader will be used only by // one goroutine at a time. @@ -116,7 +116,7 @@ func init() { } // RegisterDecompressor allows custom decompressors for a specified method ID. -// The common methods Store and Deflate are built in. +// The common methods [Store] and [Deflate] are built in. func RegisterDecompressor(method uint16, dcomp Decompressor) { if _, dup := decompressors.LoadOrStore(method, dcomp); dup { panic("decompressor already registered") @@ -124,7 +124,7 @@ func RegisterDecompressor(method uint16, dcomp Decompressor) { } // RegisterCompressor registers custom compressors for a specified method ID. -// The common methods Store and Deflate are built in. +// The common methods [Store] and [Deflate] are built in. func RegisterCompressor(method uint16, comp Compressor) { if _, dup := compressors.LoadOrStore(method, comp); dup { panic("compressor already registered") diff --git a/zip/struct.go b/zip/struct.go index 88effedc0f..2637e9c235 100644 --- a/zip/struct.go +++ b/zip/struct.go @@ -5,7 +5,7 @@ /* Package zip provides support for reading and writing ZIP archives. -See: https://www.pkware.com/appnote +See the [ZIP specification] for details. This package does not support disk spanning. @@ -16,6 +16,8 @@ fields. The 64 bit fields will always contain the correct value and for normal archives both fields will be the same. For files requiring the ZIP64 format the 32 bit fields will be 0xffffffff and the 64 bit fields must be used instead. + +[ZIP specification]: https://support.pkware.com/pkzip/appnote */ package zip @@ -65,7 +67,7 @@ const ( // // IDs 0..31 are reserved for official use by PKWARE. // IDs above that range are defined by third-party vendors. - // Since ZIP lacked high precision timestamps (nor a official specification + // Since ZIP lacked high precision timestamps (nor an official specification // of the timezone used for the date fields), many competing extra fields // have been invented. Pervasive use effectively makes them "official". // @@ -77,21 +79,16 @@ const ( infoZipUnixExtraID = 0x5855 // Info-ZIP Unix extension ) -// FileHeader describes a file within a zip file. -// See the zip spec for details. +// FileHeader describes a file within a ZIP file. +// See the [ZIP specification] for details. +// +// [ZIP specification]: https://support.pkware.com/pkzip/appnote type FileHeader struct { // Name is the name of the file. // // It must be a relative path, not start with a drive letter (such as "C:"), // and must use forward slashes instead of back slashes. A trailing slash // indicates that this file is a directory and should have no data. - // - // When reading zip files, the Name field is populated from - // the zip file directly and is not validated for correctness. - // It is the caller's responsibility to sanitize it as - // appropriate, including canonicalizing slash directions, - // validating that paths are relative, and preventing path - // traversal through filenames ("../../../"). Name string // Comment is any arbitrary user-defined string shorter than 64KiB. @@ -124,25 +121,51 @@ type FileHeader struct { // When writing, an extended timestamp (which is timezone-agnostic) is // always emitted. The legacy MS-DOS date field is encoded according to the // location of the Modified time. - Modified time.Time - ModifiedTime uint16 // Deprecated: Legacy MS-DOS date; use Modified instead. - ModifiedDate uint16 // Deprecated: Legacy MS-DOS time; use Modified instead. - - CRC32 uint32 - CompressedSize uint32 // Deprecated: Use CompressedSize64 instead. - UncompressedSize uint32 // Deprecated: Use UncompressedSize64 instead. - CompressedSize64 uint64 + Modified time.Time + + // ModifiedTime is an MS-DOS-encoded time. + // + // Deprecated: Use Modified instead. + ModifiedTime uint16 + + // ModifiedDate is an MS-DOS-encoded date. + // + // Deprecated: Use Modified instead. + ModifiedDate uint16 + + // CRC32 is the CRC32 checksum of the file content. + CRC32 uint32 + + // CompressedSize is the compressed size of the file in bytes. + // If either the uncompressed or compressed size of the file + // does not fit in 32 bits, CompressedSize is set to ^uint32(0). + // + // Deprecated: Use CompressedSize64 instead. + CompressedSize uint32 + + // UncompressedSize is the compressed size of the file in bytes. + // If either the uncompressed or compressed size of the file + // does not fit in 32 bits, CompressedSize is set to ^uint32(0). + // + // Deprecated: Use UncompressedSize64 instead. + UncompressedSize uint32 + + // CompressedSize64 is the compressed size of the file in bytes. + CompressedSize64 uint64 + + // UncompressedSize64 is the uncompressed size of the file in bytes. UncompressedSize64 uint64 - Extra []byte - ExternalAttrs uint32 // Meaning depends on CreatorVersion + + Extra []byte + ExternalAttrs uint32 // Meaning depends on CreatorVersion } -// FileInfo returns an fs.FileInfo for the FileHeader. +// FileInfo returns an fs.FileInfo for the [FileHeader]. func (h *FileHeader) FileInfo() fs.FileInfo { return headerFileInfo{h} } -// headerFileInfo implements fs.FileInfo. +// headerFileInfo implements [fs.FileInfo]. type headerFileInfo struct { fh *FileHeader } @@ -163,11 +186,15 @@ func (fi headerFileInfo) ModTime() time.Time { } func (fi headerFileInfo) Mode() fs.FileMode { return fi.fh.Mode() } func (fi headerFileInfo) Type() fs.FileMode { return fi.fh.Mode().Type() } -func (fi headerFileInfo) Sys() interface{} { return fi.fh } +func (fi headerFileInfo) Sys() any { return fi.fh } func (fi headerFileInfo) Info() (fs.FileInfo, error) { return fi, nil } -// FileInfoHeader creates a partially-populated FileHeader from an +func (fi headerFileInfo) String() string { + return formatFileInfo(fi) +} + +// FileInfoHeader creates a partially-populated [FileHeader] from an // fs.FileInfo. // Because fs.FileInfo's Name method returns only the base name of // the file it describes, it may be necessary to modify the Name field @@ -218,7 +245,7 @@ func timeZone(offset time.Duration) *time.Location { // msDosTimeToTime converts an MS-DOS date and time into a time.Time. // The resolution is 2s. -// See: https://msdn.microsoft.com/en-us/library/ms724247(v=VS.85).aspx +// See: https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-dosdatetimetofiletime func msDosTimeToTime(dosDate, dosTime uint16) time.Time { return time.Date( // date bits 0-4: day of month; 5-8: month; 9-15: years since 1980 @@ -238,7 +265,7 @@ func msDosTimeToTime(dosDate, dosTime uint16) time.Time { // timeToMsDosTime converts a time.Time to an MS-DOS date and time. // The resolution is 2s. -// See: https://msdn.microsoft.com/en-us/library/ms724274(v=VS.85).aspx +// See: https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-filetimetodosdatetime func timeToMsDosTime(t time.Time) (fDate uint16, fTime uint16) { fDate = uint16(t.Day() + int(t.Month())<<5 + (t.Year()-1980)<<9) fTime = uint16(t.Second()/2 + t.Minute()<<5 + t.Hour()<<11) @@ -246,17 +273,17 @@ func timeToMsDosTime(t time.Time) (fDate uint16, fTime uint16) { } // ModTime returns the modification time in UTC using the legacy -// ModifiedDate and ModifiedTime fields. +// [ModifiedDate] and [ModifiedTime] fields. // -// Deprecated: Use Modified instead. +// Deprecated: Use [Modified] instead. func (h *FileHeader) ModTime() time.Time { return msDosTimeToTime(h.ModifiedDate, h.ModifiedTime) } -// SetModTime sets the Modified, ModifiedTime, and ModifiedDate fields +// SetModTime sets the [Modified], [ModifiedTime], and [ModifiedDate] fields // to the given time in UTC. // -// Deprecated: Use Modified instead. +// Deprecated: Use [Modified] instead. func (h *FileHeader) SetModTime(t time.Time) { t = t.UTC() // Convert to UTC for compatibility h.Modified = t @@ -282,7 +309,7 @@ const ( msdosReadOnly = 0x01 ) -// Mode returns the permission and mode bits for the FileHeader. +// Mode returns the permission and mode bits for the [FileHeader]. func (h *FileHeader) Mode() (mode fs.FileMode) { switch h.CreatorVersion >> 8 { case creatorUnix, creatorMacOSX: @@ -296,7 +323,7 @@ func (h *FileHeader) Mode() (mode fs.FileMode) { return mode } -// SetMode changes the permission and mode bits for the FileHeader. +// SetMode changes the permission and mode bits for the [FileHeader]. func (h *FileHeader) SetMode(mode fs.FileMode) { h.CreatorVersion = h.CreatorVersion&0xff | creatorUnix<<8 h.ExternalAttrs = fileModeToUnixMode(mode) << 16 @@ -315,8 +342,8 @@ func (h *FileHeader) isZip64() bool { return h.CompressedSize64 >= uint32max || h.UncompressedSize64 >= uint32max } -func (f *FileHeader) hasDataDescriptor() bool { - return f.Flags&0x8 != 0 +func (h *FileHeader) hasDataDescriptor() bool { + return h.Flags&0x8 != 0 } func msdosModeToFileMode(m uint32) (mode fs.FileMode) { diff --git a/zip/testdata/comment-truncated.zip b/zip/testdata/comment-truncated.zip new file mode 100644 index 0000000000000000000000000000000000000000..1bc19a85575964f378a8a30f198ed6ba5360aa7d GIT binary patch literal 216 zcmWIWW@cf4gUWrGI~jpI5C#dmdHT2ppr}ax{CO=%28Pozb5c_hOA-UT8JU2>aDc83 pE&*nMbOm^`vVk~^KxhP{)xa|7=AgR>tO!m(+=pt;1fVP<0{|c)7RUeq literal 0 HcmV?d00001 diff --git a/zip/testdata/test-badbase.zip b/zip/testdata/test-badbase.zip new file mode 100644 index 0000000000000000000000000000000000000000..245a62cb6d116672b6c2e3201553cd0e047e0218 GIT binary patch literal 1170 zcmWIWW@Zs#U|`^2XiQYKJ#hW)VM!oQ3M?YSP?B0)qE}K;5*otEz+CvJ$)^m6ODnh; z7+JnDGBAL3a-Te*6UMN}rFGJkM?wmLfr}l&aaaM)^pwV1FgBTd*)~VY5GrSri z$jrb1!XgYZ4C(m=8L36d`8oMThGrFpW_ksA>0oQD44Qqcff&u2&Hz7mUM?w+fxMm` zE&U=x?Zy@V2qPe0vcxr_Bsf2F!C6nVlf&(QE?5{rm?pSGb*exy9+#I&RurKk- z+m)0yRCPG=d-HHDZeo&8l5*8w*j(kQ)h25CKV`;*MHBS|=I94cI>i2RTbstM*llm) zO8IWizic_XckQR<_w%av+wcEed*5>UK?&z&H{O(96e0boH{+WCE(bu95f2^3EZ4O=VNZ?3)UvtyJg5QDRcf$^EI~Jki zWtQzJsL_x*GnsW)scd27+nNv&O9`(nP~TLZC!C)&dpmb9Gex+~DL%5m{kC!2{41u2r}!o#L;7=ONQ zE-qq{y7hnokNZ9E^>G%L>tA=N)+ZKnN{AiY63=$`mQ27(iv*7buak|7iv?tIZF@fl zt>|OAAsR5{#f{tFon;*@eKst4U%}L6{!DE$Q;-Gc;z`Q`4>jq0{kyW^?3@=d0sB55 zxoLcI^%PC5#tAeEd>m$^DR^;2N$vDP24)a*LyE?_;6T$0ZaWgUlR!) zO)1Y`?`E^OggslIl9kNpY|u0@YV$J_L(}Dqa{cO(OH|YIR<91PW~;8;CG9SvxT%P1 zf&1?rTi;(l9{BL-LFV7-o3%9b_5H&Yx{{nGExEP)?O%T1_U}*5o%1=o!$7pc+~7{P zVEy`CyQ(f_ZB6^{)1;6Rw%g;&&eO(;vcI48f3W$#VE69d#v5C|RN3w=iCp_vEP-JK z2X8m?^Q2e6G|k}Y>gTe~DWNIAn~_P58CTww04Zev=23UUDy$e_XNHK(DWELv`QKCXsW uint16max { return errors.New("zip: Writer.Comment too long") @@ -125,7 +126,7 @@ func (w *Writer) Close() error { b.uint16(uint16(len(h.Comment))) b = b[4:] // skip disk number start and internal file attr (2x uint16) b.uint32(h.ExternalAttrs) - if h.isZip64() || h.offset > uint32max { + if h.offset > uint32max { b.uint32(uint32max) } else { b.uint32(uint32(h.offset)) @@ -207,14 +208,14 @@ func (w *Writer) Close() error { } // Create adds a file to the zip file using the provided name. -// It returns a Writer to which the file contents should be written. -// The file contents will be compressed using the Deflate method. +// It returns a [Writer] to which the file contents should be written. +// The file contents will be compressed using the [Deflate] method. // The name must be a relative path: it must not start with a drive // letter (e.g. C:) or leading slash, and only forward slashes are // allowed. To create a directory instead of a file, add a trailing // slash to the name. -// The file's contents must be written to the io.Writer before the next -// call to Create, CreateHeader, or Close. +// The file's contents must be written to the [io.Writer] before the next +// call to [Writer.Create], [Writer.CreateHeader], or [Writer.Close]. func (w *Writer) Create(name string) (io.Writer, error) { header := &FileHeader{ Name: name, @@ -261,13 +262,13 @@ func (w *Writer) prepare(fh *FileHeader) error { return nil } -// CreateHeader adds a file to the zip archive using the provided FileHeader -// for the file metadata. Writer takes ownership of fh and may mutate -// its fields. The caller must not modify fh after calling CreateHeader. +// CreateHeader adds a file to the zip archive using the provided [FileHeader] +// for the file metadata. [Writer] takes ownership of fh and may mutate +// its fields. The caller must not modify fh after calling [Writer.CreateHeader]. // -// This returns a Writer to which the file contents should be written. +// This returns a [Writer] to which the file contents should be written. // The file's contents must be written to the io.Writer before the next -// call to Create, CreateHeader, CreateRaw, or Close. +// call to [Writer.Create], [Writer.CreateHeader], [Writer.CreateRaw], or [Writer.Close]. func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) { if err := w.prepare(fh); err != nil { return nil, err @@ -433,18 +434,12 @@ func min64(x, y uint64) uint64 { return y } -// CreateHeaderRaw is replaced by CreateRaw. -// Deprecated: CreateHeaderRaw is replaced by CreateRaw (stdlib name). -func (w *Writer) CreateHeaderRaw(fh *FileHeader) (io.Writer, error) { - return w.CreateRaw(fh) -} - -// CreateRaw adds a file to the zip archive using the provided FileHeader and -// returns a Writer to which the file contents should be written. The file's -// contents must be written to the io.Writer before the next call to Create, -// CreateHeader, CreateRaw, or Close. +// CreateRaw adds a file to the zip archive using the provided [FileHeader] and +// returns a [Writer] to which the file contents should be written. The file's +// contents must be written to the io.Writer before the next call to [Writer.Create], +// [Writer.CreateHeader], [Writer.CreateRaw], or [Writer.Close]. // -// In contrast to CreateHeader, the bytes passed to Writer are not compressed. +// In contrast to [Writer.CreateHeader], the bytes passed to Writer are not compressed. func (w *Writer) CreateRaw(fh *FileHeader) (io.Writer, error) { if err := w.prepare(fh); err != nil { return nil, err @@ -476,9 +471,8 @@ func (w *Writer) CreateRaw(fh *FileHeader) (io.Writer, error) { return fw, nil } -// Copy copies the file f (obtained from a Reader) into w. It copies the raw +// Copy copies the file f (obtained from a [Reader]) into w. It copies the raw // form directly bypassing decompression, compression, and validation. -// CHANGE: Optional file name cannot be specified any more due to stdlib api. func (w *Writer) Copy(f *File) error { r, err := f.OpenRaw() if err != nil { @@ -493,7 +487,7 @@ func (w *Writer) Copy(f *File) error { } // RegisterCompressor registers or overrides a custom compressor for a specific -// method ID. If a compressor for a given method is not found, Writer will +// method ID. If a compressor for a given method is not found, [Writer] will // default to looking up the compressor at the package level. func (w *Writer) RegisterCompressor(method uint16, comp Compressor) { if w.compressors == nil { @@ -502,6 +496,44 @@ func (w *Writer) RegisterCompressor(method uint16, comp Compressor) { w.compressors[method] = comp } +// AddFS adds the files from fs.FS to the archive. +// It walks the directory tree starting at the root of the filesystem +// adding each file to the zip using deflate while maintaining the directory structure. +func (w *Writer) AddFS(fsys fs.FS) error { + return fs.WalkDir(fsys, ".", func(name string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if d.IsDir() { + return nil + } + info, err := d.Info() + if err != nil { + return err + } + if !info.Mode().IsRegular() { + return errors.New("zip: cannot add non-regular file") + } + h, err := FileInfoHeader(info) + if err != nil { + return err + } + h.Name = name + h.Method = Deflate + fw, err := w.CreateHeader(h) + if err != nil { + return err + } + f, err := fsys.Open(name) + if err != nil { + return err + } + defer f.Close() + _, err = io.Copy(fw, f) + return err + }) +} + func (w *Writer) compressor(method uint16) Compressor { comp := w.compressors[method] if comp == nil { diff --git a/zip/writer_test.go b/zip/writer_test.go index eb3109bf3e..0300b14633 100644 --- a/zip/writer_test.go +++ b/zip/writer_test.go @@ -16,6 +16,7 @@ import ( "os" "strings" "testing" + "testing/fstest" "time" ) @@ -367,7 +368,7 @@ func TestWriterDirAttributes(t *testing.T) { } } -func TestWriterCopy(t *testing.T) { +func TestWriterCopyKP(t *testing.T) { want, err := os.ReadFile("testdata/test.zip") if err != nil { t.Fatalf("unexpected ReadFile error: %v", err) @@ -420,7 +421,7 @@ func TestWriterCopy(t *testing.T) { } } -func TestWriterCopy2(t *testing.T) { +func TestWriterCopy(t *testing.T) { // make a zip file buf := new(bytes.Buffer) w := NewWriter(buf) @@ -655,3 +656,71 @@ func BenchmarkCompressedZipGarbage(b *testing.B) { } }) } + +func writeTestsToFS(tests []WriteTest) fs.FS { + fsys := fstest.MapFS{} + for _, wt := range tests { + fsys[wt.Name] = &fstest.MapFile{ + Data: wt.Data, + Mode: wt.Mode, + } + } + return fsys +} + +func TestWriterAddFS(t *testing.T) { + buf := new(bytes.Buffer) + w := NewWriter(buf) + tests := []WriteTest{ + { + Name: "file.go", + Data: []byte("hello"), + Mode: 0644, + }, + { + Name: "subfolder/another.go", + Data: []byte("world"), + Mode: 0644, + }, + } + err := w.AddFS(writeTestsToFS(tests)) + if err != nil { + t.Fatal(err) + } + + if err := w.Close(); err != nil { + t.Fatal(err) + } + + // read it back + r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len())) + if err != nil { + t.Fatal(err) + } + for i, wt := range tests { + testReadFile(t, r.File[i], &wt) + } +} + +func TestIssue61875(t *testing.T) { + buf := new(bytes.Buffer) + w := NewWriter(buf) + tests := []WriteTest{ + { + Name: "symlink", + Data: []byte("../link/target"), + Method: Deflate, + Mode: 0755 | fs.ModeSymlink, + }, + { + Name: "device", + Data: []byte(""), + Method: Deflate, + Mode: 0755 | fs.ModeDevice, + }, + } + err := w.AddFS(writeTestsToFS(tests)) + if err == nil { + t.Errorf("expected error, got nil") + } +} diff --git a/zip/zip_test.go b/zip/zip_test.go index 0b4faa82ed..cf7a73b450 100644 --- a/zip/zip_test.go +++ b/zip/zip_test.go @@ -23,13 +23,13 @@ func TestOver65kFiles(t *testing.T) { if testing.Short() { t.Skip("skipping in short mode") } - buf := new(bytes.Buffer) + buf := new(strings.Builder) w := NewWriter(buf) const nFiles = (1 << 16) + 42 for i := 0; i < nFiles; i++ { _, err := w.CreateHeader(&FileHeader{ Name: fmt.Sprintf("%d.dat", i), - Method: Store, // avoid Issue 6136 and Issue 6138 + Method: Store, // Deflate is too slow when it is compiled with -race flag }) if err != nil { t.Fatalf("creating file %d: %v", i, err) @@ -596,7 +596,7 @@ func testZip64(t testing.TB, size int64) *rleBuffer { } // read back zip file and check that we get to the end of it - r, err := NewReader(buf, int64(buf.Size())) + r, err := NewReader(buf, buf.Size()) if err != nil { t.Fatal("reader:", err) }